blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a4d9cd67be8d8087ca60582c8e2dacbdbc3aac6e | 54e0c677471942aa35386e810e7fa54753bbecce | /Assignment 3 submission/preprocess.py | 23b0a97ab08a87dcc6522ff1fa19a31f8c302d1f | [] | no_license | itsuncheng/COMP4901I_Assignments | b27dd86d4e5a438c54c1298478bcc07efe2ea879 | 3bbe115056900515a4fa8fee1973219f9efdf334 | refs/heads/master | 2021-05-16T21:39:59.170470 | 2020-03-27T08:43:26 | 2020-03-27T08:43:26 | 250,479,705 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,657 | py | import pandas as pd
import re
import numpy as np
import pickle
from collections import Counter
import torch
import torch.utils.data as data
PAD_INDEX = 0
UNK_INDEX = 1
def clean(sent):
# clean the data
############################################################
# TO DO
############################################################
sent = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", sent)
sent = re.sub(r"\'s", " \'s", sent)
sent = re.sub(r"\'ve", " \'ve", sent)
sent = re.sub(r"n\'t", " n\'t", sent)
sent = re.sub(r"\'re", " \'re", sent)
sent = re.sub(r"\'d", " \'d", sent)
sent = re.sub(r"\'ll", " \'ll", sent)
sent = re.sub(r",", " , ", sent)
sent = re.sub(r"!", " ! ", sent)
sent = re.sub(r"\(", " \( ", sent)
sent = re.sub(r"\)", " \) ", sent)
sent = re.sub(r"\?", " \? ", sent)
sent = sent.strip().lower()
return sent
class Vocab():
def __init__(self):
self.word2index = {"PAD":PAD_INDEX ,"UNK":UNK_INDEX }
self.word2count = {}
self.index2word = {PAD_INDEX: "PAD", UNK_INDEX: "UNK" }
self.n_words = 2 # Count default tokens
self.word_num = 0
def index_words(self, sentence):
for word in sentence:
self.word_num+=1
if word not in self.word2index:
self.word2index[word] = self.n_words
self.index2word[self.n_words] = word
self.word2count[word] = 1
self.n_words+=1
else:
self.word2count[word]+=1
def Lang(vocab, file_name):
statistic = {"sent_num":0, "word_num":0, "vocab_size":0, "top_ten_words":[], "max_len":0, "avg_len":0, "len_std":0, "class_distribution":{} }
df = pd.read_csv(file_name)
statistic["sent_num"] = len(df)
sent_len_list = []
############################################################
# TO DO
#build vocabulary and statistic
sent_list = list(df["content"])
for sent in sent_list:
sent = str(sent).strip().split()
vocab.index_words(sent)
sent_len_list.append(len(sent))
rating_list = list(df["rating"])
class_dist_dict = dict(Counter(rating_list))
statistic["word_num"] = vocab.word_num
statistic["vocab_size"] = vocab.n_words
statistic["top_ten_words"] = [word for word in dict(Counter(vocab.word2count).most_common(10))]
statistic["max_len"] = max(sent_len_list)
statistic["avg_len"] = sum(sent_len_list) / len(sent_len_list)
statistic["len_std"] = np.std(sent_len_list)
statistic["class_distribution"] = class_dist_dict
############################################################
return vocab, statistic
class Dataset(data.Dataset):
"""Custom data.Dataset compatible with data.DataLoader."""
def __init__(self, data, vocab):
self.id, self.X, self.y = data
self.vocab = vocab
self.num_total_seqs = len(self.X)
self.id = torch.LongTensor(self.id)
if(self.y is not None):self.y = torch.LongTensor(self.y)
def __getitem__(self, index):
"""Returns one data pair (source and target)."""
ind = self.id[index]
X = self.tokenize(self.X[index])
if(self.y is not None):
y = self.y[index]
return torch.LongTensor(X), y, ind
else:
return torch.LongTensor(X), ind
def __len__(self):
return self.num_total_seqs
def tokenize(self, sentence):
return [self.vocab.word2index[word] if word in self.vocab.word2index else UNK_INDEX for word in sentence]
def preprocess(filename, max_len=200, test=False):
df = pd.read_csv(filename)
id_ = [] # review id
rating = [] # rating
content = [] #review content
for i in range(len(df)):
id_.append(int(df['id'][i]))
if not test:
rating.append(int(df['rating'][i]))
sentence = clean(str(df['content'][i]).strip())
sentence = sentence.split()
sent_len = len(sentence)
# here we pad the sequence for whole training set, you can also try to do dynamic padding for each batch by customize collate_fn function
# if you do dynamic padding and report it, we will give 1 points bonus
if sent_len>max_len:
content.append(sentence[:max_len])
else:
content.append(sentence+["PAD"]*(max_len-sent_len))
if test:
len(id_) == len(content)
return (id_, content, None)
else:
assert len(id_) == len(content) ==len(rating)
return (id_, content, rating)
def get_dataloaders(batch_size, max_len):
vocab = Vocab()
vocab, statistic = Lang(vocab, "train.csv")
train_data = preprocess("train.csv", max_len)
dev_data = preprocess("dev.csv", max_len)
test_data = preprocess("test.csv",max_len, test=True)
train = Dataset(train_data, vocab)
dev = Dataset(dev_data, vocab)
test = Dataset(test_data, vocab)
print(statistic)
data_loader_tr = torch.utils.data.DataLoader(dataset=train,
batch_size=batch_size,
shuffle=True)
data_loader_dev = torch.utils.data.DataLoader(dataset=dev,
batch_size=batch_size,
shuffle=False)
data_loader_test = torch.utils.data.DataLoader(dataset=test,
batch_size=batch_size,
shuffle=False)
return data_loader_tr, data_loader_dev, data_loader_test, statistic["vocab_size"]
| [
"itsuncheng2000@gmail.com"
] | itsuncheng2000@gmail.com |
b327b6904a68a6fac9133923566f52491e3e7255 | 96db160b6075e49101686eb4947fefb2e0909985 | /Store/views.py | 3cba59847230429be847c64618fcdb291698a251 | [] | no_license | hdforoozan/Restaurant | 7c43b1c89e8edc504a27dac2515313b979069c88 | d9420dc5dcd42bcb6c5952474ef996845ec4381c | refs/heads/master | 2022-12-09T13:38:57.970747 | 2019-09-29T20:45:10 | 2019-09-29T20:45:10 | 208,814,583 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,784 | py | from datetime import datetime
from django.shortcuts import render
from .models import Store, Employee, Manager
from Food.models import Food
from django.urls import reverse_lazy
from django.views.generic import TemplateView,DetailView,ListView, CreateView,DeleteView,UpdateView
from django.contrib.auth.mixins import LoginRequiredMixin
from Cart.forms import CartAddFoodForm
from Order.models import Order
from Comment.forms import CommentForm
from Comment.models import Comment
from Food.forms import SearchForm
class HomePageView(TemplateView):
template_name = 'home.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['most_sell_foods'] = Food.objects.filter(name__icontains='p')
context['cheapest_foods'] = Food.objects.filter(price__lte=10)
context['search_form'] = SearchForm()
return context
##############################################################
# Store Model Views
##############################################################
class StoreListView(LoginRequiredMixin, ListView):
model = Store
context_object_name = 'stores'
class StoreDetailView(LoginRequiredMixin, DetailView):
model = Store
context_object_name = 'store'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
store = Store.objects.get(id=self.kwargs['pk'])
context['foods'] = Food.objects.filter(stores=store).filter(run_out=False)
context['employees'] = Employee.objects.filter(store__id=self.kwargs['pk'])
paid_orders = Order.objects.filter(paid=True)
monthly_income = 0
for item in paid_orders:
if item.store_id == self.kwargs['pk']:
monthly_income += item.get_total_cost()
context['monthly_income'] = monthly_income
return context
class StoreCreateView(LoginRequiredMixin, CreateView):
model = Store
fields = ['user','manager','foods','branch_num','image','pub_date','address']
class StoreUpdateView(LoginRequiredMixin, UpdateView):
model = Store
fields = ['manager','foods','branch_num','image','address']
context_object_name = 'store'
template_name = 'Store/store_update_form.html'
class StoreDeleteView(LoginRequiredMixin, DeleteView):
model = Store
success_url = reverse_lazy('store-list')
context_object_name = 'store'
class StoreFoodDetailView(LoginRequiredMixin, DetailView):
model = Store
context_object_name = 'store'
template_name = 'Store/store_food_detail.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
store = Store.objects.get(id=self.kwargs['pk'])
food = Food.objects.filter(stores=store).get(id=self.kwargs['food_id'])
context['food'] = food
context['cart_food_form'] = CartAddFoodForm()
context['comment_form'] = CommentForm()
comments = Comment.objects.filter(food=food)[:5]
comment_times = []
now = datetime.now()
date_format = "%Y-%m-%d %H:%M:%S"
time1 = now.strftime("%Y-%m-%d %H:%M:%S")
time_now = datetime.strptime(time1,date_format)
for comment in comments:
time2 = comment.created.strftime("%Y-%m-%d %H:%M:%S")
time_2 = now.strptime(time2,date_format)
diff_time = time_now - time_2
if diff_time.days > 0:
weeks = int(diff_time.days / 7)
months = int(diff_time.days / 30)
if months > 0:
comment_times.append('{} months ago'.format(months))
else:
if weeks > 0:
comment_times.append('{} weeks ago'.format(weeks))
else:
comment_times.append('{} days ago'.format(diff_time.days))
else:
hours = int(diff_time.seconds / (3600))
if hours > 0:
comment_times.append('{} hours ago'.format(hours))
else:
minutes = int((diff_time.seconds % 3600) / 60)
if minutes > 0:
comment_times.append('{} minutes ago'.format(minutes))
else:
comment_times.append('just now')
food_comments = zip(comments,comment_times)
context['food_comments'] = food_comments
self.request.session['store_id'] = store.id
return context
##############################################################
# Manager Model Views
###############################################################
class ManagerDetailView(LoginRequiredMixin, DetailView):
model = Manager
context_object_name = 'manager'
class ManagerUpdateView(LoginRequiredMixin, UpdateView):
model = Manager
fields = ['name','address','phone_num','education_degree','image']
context_object_name = 'manager'
template_name = 'Store/manager_update_form.html'
class ManagerDeleteView(LoginRequiredMixin, DeleteView):
model = Manager
success_url = reverse_lazy('store-list')
context_object_name = 'manager'
##############################################################
# Employee Model Views
###############################################################
class EmployeeDetailView(LoginRequiredMixin, DetailView):
model = Employee
context_object_name = 'employee'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
store_employees = Employee.objects.filter(store_id=self.kwargs['pk'])
employee = Employee.objects.get(id=self.kwargs['employee_id'])
if employee in store_employees:
context['employee'] = employee
else:
context['employee'] = None
return context
class EmployeeCreateView(LoginRequiredMixin, CreateView):
model = Employee
fields = ['store','name','address','phone_num','pub_date','image','position','education_degree','monthly_salary']
class EmployeeUpdateView(LoginRequiredMixin, UpdateView):
model = Employee
fields = ['name','address','phone_num','image','education_degree','position']
context_object_name = 'employee'
template_name = 'Store/employee_update_form.html'
class EmployeeDeleteView(LoginRequiredMixin, DeleteView):
model = Employee
success_url = reverse_lazy('store-detail')
context_object_name = 'employee'
| [
"hdforoozan@gmail.com"
] | hdforoozan@gmail.com |
930242a46d4d476e465877f556f7744c09983f6b | bc1746f9e0ee46a40a8620420155a92cfc613a6d | /django_pythonic_menu/menu.py | 4fa8e62d5c6d59c509d8ba2449d2ae03abbb83f9 | [
"MIT"
] | permissive | YuriMalinov/django-pythonic-menu | 2861c5c448f290fa6d692cb6b7c0330159b8ad29 | 3b82a1bd60f32eee8e5c57fb2c71321ed0b9cda8 | refs/heads/master | 2021-01-17T17:14:47.160297 | 2016-11-13T15:17:09 | 2016-11-13T15:17:09 | 62,589,078 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,966 | py | import re
from collections import OrderedDict
from importlib import import_module
import six
from django.core.urlresolvers import reverse
from django.utils.six import wraps
class MenuItem:
_counter = 0
def __init__(self, route=None, visibility=None, title=None, **kwargs):
self.title = title
self.visibility = visibility
self.route = route
self.name = None
self.kwargs = kwargs
self._index = MenuItem._counter
MenuItem._counter += 1
self.items = []
self.cached_url = False
def activate(self, f=None, only=False, before=True):
def actual_wrap(f):
if isinstance(f, type):
that = self
old_dispatch = f.dispatch
@wraps(old_dispatch)
def wrapper(self, request, *args, **kwargs):
if before:
that.activate_for_request(request, only)
result = old_dispatch(self, request, *args, **kwargs)
if not before:
that.activate_for_request(request, only)
return result
f.dispatch = wrapper
return f
else:
@wraps(f)
def wrapper(request, *args, **kwargs):
if before:
self.activate_for_request(request, only)
result = f(request, *args, **kwargs)
if not before:
self.activate_for_request(request, only)
return result
return wrapper
if callable(f):
return actual_wrap(f)
else:
return actual_wrap
def activate_for_request(self, request, only=False):
if not hasattr(request, 'active_menus') or only:
request.active_menus = {self}
else:
request.active_menus.add(self)
# noinspection PyUnresolvedReferences,PyProtectedMember
def build(self, request):
if callable(self.visibility) and not self.visibility(request, self):
return None
result = {
'title': self.title,
'url': self.make_url(request),
'items': [],
'by_name': OrderedDict(),
'active': hasattr(request, 'active_menus') and self in request.active_menus
}
result.update(self.kwargs)
for menu_item in self.items:
item = menu_item.build(request)
if item is None:
continue
if item['active'] and not result['active']:
result['active'] = 'subitem'
result['items'].append(item)
result['by_name'][menu_item.name] = item
return result
def make_url(self, request):
if self.cached_url is not False:
return self.cached_url
elif self.route is None:
return None
elif callable(self.route):
return self.route(request, self)
elif self.route.startswith('!'):
return self.route[1:]
else:
return reverse(self.route)
def cache_route(self):
if not callable(self.route):
self.cached_url = self.make_url(None)
for item in self.items:
item.cache_route()
class MenuMeta(type):
# noinspection PyProtectedMember,PyUnresolvedReferences
def __init__(cls, what, bases=None, dict=None):
super(MenuMeta, cls).__init__(what, bases, dict)
cls._cls_index = MenuItem._counter
MenuItem._counter += 1
cls.prepare()
class Menu(six.with_metaclass(MenuMeta)):
root_item = None
# noinspection PyUnresolvedReferences,PyProtectedMember,PyProtectedMember
@classmethod
def prepare(cls):
menu_items = []
for name, field in cls.__dict__.items():
if name.startswith('__') or name == 'root_item':
continue
menu_item = None
if isinstance(field, MenuItem):
field.name = name
if field.title is None:
field.title = cls.make_title(name)
menu_item = field
elif isinstance(field, type) and issubclass(field, Menu):
field.prepare()
menu_item = field.root_item
if menu_item:
menu_items.append(menu_item)
kwargs = {}
if hasattr(cls, 'Meta'):
for cls_name, cls_field in cls.Meta.__dict__.items():
if not cls_name.startswith('__'):
kwargs[cls_name] = cls_field
if 'title' not in kwargs:
kwargs['title'] = cls.make_title(cls.__name__)
cls.root_item = root_item = MenuItem(**kwargs)
root_item._index = cls._cls_index
menu_items.sort(key=lambda item: item._index)
root_item.items = menu_items
@classmethod
def activate(cls, f=None, only=False):
return cls.root_item.activate(f, only)
@classmethod
def build(cls, request):
if cls.root_item is None:
raise ValueError("root_item is None. Did you forget to call prepare()?")
return cls.root_item.build(request)
@classmethod
def cache_routes(cls):
cls.root_item.cache_route()
_uppercase_re = re.compile('([A-Z])')
@classmethod
def make_title(cls, name):
name = cls._uppercase_re.sub(' \\1', name)
name = name.replace('_', ' ')
parts = name.split(' ')
result = ' '.join(p.capitalize() for p in parts).strip()
return result
def build_menu(request, class_or_name):
if isinstance(class_or_name, type) and issubclass(class_or_name, Menu):
return class_or_name.build(request)
else:
(module, class_name) = class_or_name.rsplit('.', 1)
clazz = getattr(import_module(module), class_name)
return clazz.build(request)
| [
"yurik.m@gmail.com"
] | yurik.m@gmail.com |
7a65e54865e002c36b16fb0dc338699d283732c9 | 80b489a53f7f211a09920affa5998a0724d83e71 | /webapp/venv/lib/python2.7/site-packages/alembic/ddl/mysql.py | 96f42f382d97e602f22fcb1113185859ced6fda1 | [
"MIT"
] | permissive | mustafatasdemir/apkinspector | d9ec9d85da5e0014edaf0d98119561bf3f87dffc | 1bd0d044b7daef4efda21c985393f8d73722a074 | refs/heads/master | 2016-09-06T03:25:49.212735 | 2014-12-12T03:52:56 | 2014-12-12T03:52:56 | 24,392,752 | 3 | 0 | null | 2014-10-28T03:57:43 | 2014-09-23T23:10:55 | Java | UTF-8 | Python | false | false | 8,123 | py | from sqlalchemy.ext.compiler import compiles
from sqlalchemy import types as sqltypes
from sqlalchemy import schema
from ..compat import string_types
from .. import util
from .impl import DefaultImpl
from .base import ColumnNullable, ColumnName, ColumnDefault, \
ColumnType, AlterColumn, format_column_name, \
format_server_default
from .base import alter_table
class MySQLImpl(DefaultImpl):
__dialect__ = 'mysql'
transactional_ddl = False
def alter_column(self, table_name, column_name,
nullable=None,
server_default=False,
name=None,
type_=None,
schema=None,
autoincrement=None,
existing_type=None,
existing_server_default=None,
existing_nullable=None,
existing_autoincrement=None
):
if name is not None:
self._exec(
MySQLChangeColumn(
table_name, column_name,
schema=schema,
newname=name,
nullable=nullable if nullable is not None else
existing_nullable
if existing_nullable is not None
else True,
type_=type_ if type_ is not None else existing_type,
default=server_default if server_default is not False
else existing_server_default,
autoincrement=autoincrement if autoincrement is not None
else existing_autoincrement
)
)
elif nullable is not None or \
type_ is not None or \
autoincrement is not None:
self._exec(
MySQLModifyColumn(
table_name, column_name,
schema=schema,
newname=name if name is not None else column_name,
nullable=nullable if nullable is not None else
existing_nullable
if existing_nullable is not None
else True,
type_=type_ if type_ is not None else existing_type,
default=server_default if server_default is not False
else existing_server_default,
autoincrement=autoincrement if autoincrement is not None
else existing_autoincrement
)
)
elif server_default is not False:
self._exec(
MySQLAlterDefault(
table_name, column_name, server_default,
schema=schema,
)
)
def correct_for_autogen_constraints(self, conn_unique_constraints,
conn_indexes,
metadata_unique_constraints,
metadata_indexes):
removed = set()
for idx in list(conn_indexes):
# MySQL puts implicit indexes on FK columns, even if
# composite and even if MyISAM, so can't check this too easily
if idx.name == idx.columns.keys()[0]:
conn_indexes.remove(idx)
removed.add(idx.name)
# then remove indexes from the "metadata_indexes"
# that we've removed from reflected, otherwise they come out
# as adds (see #202)
for idx in list(metadata_indexes):
if idx.name in removed:
metadata_indexes.remove(idx)
class MySQLAlterDefault(AlterColumn):
def __init__(self, name, column_name, default, schema=None):
super(AlterColumn, self).__init__(name, schema=schema)
self.column_name = column_name
self.default = default
class MySQLChangeColumn(AlterColumn):
def __init__(self, name, column_name, schema=None,
newname=None,
type_=None,
nullable=None,
default=False,
autoincrement=None):
super(AlterColumn, self).__init__(name, schema=schema)
self.column_name = column_name
self.nullable = nullable
self.newname = newname
self.default = default
self.autoincrement = autoincrement
if type_ is None:
raise util.CommandError(
"All MySQL CHANGE/MODIFY COLUMN operations "
"require the existing type."
)
self.type_ = sqltypes.to_instance(type_)
class MySQLModifyColumn(MySQLChangeColumn):
pass
@compiles(ColumnNullable, 'mysql')
@compiles(ColumnName, 'mysql')
@compiles(ColumnDefault, 'mysql')
@compiles(ColumnType, 'mysql')
def _mysql_doesnt_support_individual(element, compiler, **kw):
raise NotImplementedError(
"Individual alter column constructs not supported by MySQL"
)
@compiles(MySQLAlterDefault, "mysql")
def _mysql_alter_default(element, compiler, **kw):
return "%s ALTER COLUMN %s %s" % (
alter_table(compiler, element.table_name, element.schema),
format_column_name(compiler, element.column_name),
"SET DEFAULT %s" % format_server_default(compiler, element.default)
if element.default is not None
else "DROP DEFAULT"
)
@compiles(MySQLModifyColumn, "mysql")
def _mysql_modify_column(element, compiler, **kw):
return "%s MODIFY %s %s" % (
alter_table(compiler, element.table_name, element.schema),
format_column_name(compiler, element.column_name),
_mysql_colspec(
compiler,
nullable=element.nullable,
server_default=element.default,
type_=element.type_,
autoincrement=element.autoincrement
),
)
@compiles(MySQLChangeColumn, "mysql")
def _mysql_change_column(element, compiler, **kw):
return "%s CHANGE %s %s %s" % (
alter_table(compiler, element.table_name, element.schema),
format_column_name(compiler, element.column_name),
format_column_name(compiler, element.newname),
_mysql_colspec(
compiler,
nullable=element.nullable,
server_default=element.default,
type_=element.type_,
autoincrement=element.autoincrement
),
)
def _render_value(compiler, expr):
if isinstance(expr, string_types):
return "'%s'" % expr
else:
return compiler.sql_compiler.process(expr)
def _mysql_colspec(compiler, nullable, server_default, type_,
autoincrement):
spec = "%s %s" % (
compiler.dialect.type_compiler.process(type_),
"NULL" if nullable else "NOT NULL"
)
if autoincrement:
spec += " AUTO_INCREMENT"
if server_default is not False and server_default is not None:
spec += " DEFAULT %s" % _render_value(compiler, server_default)
return spec
@compiles(schema.DropConstraint, "mysql")
def _mysql_drop_constraint(element, compiler, **kw):
"""Redefine SQLAlchemy's drop constraint to
raise errors for invalid constraint type."""
constraint = element.element
if isinstance(constraint, (schema.ForeignKeyConstraint,
schema.PrimaryKeyConstraint,
schema.UniqueConstraint)
):
return compiler.visit_drop_constraint(element, **kw)
elif isinstance(constraint, schema.CheckConstraint):
raise NotImplementedError(
"MySQL does not support CHECK constraints.")
else:
raise NotImplementedError(
"No generic 'DROP CONSTRAINT' in MySQL - "
"please specify constraint type")
| [
"mustafa.tasdemir@hotmail.com.tr"
] | mustafa.tasdemir@hotmail.com.tr |
7c9e10c88fe57e659271b8670b20e26f8d2ecefe | 911e7c7ba3ff999eb58275595d934dee968d8f80 | /trunk/agent/src/main/python/ambari_agent/ZooKeeperCommunicator.py | 576a4a4b458c22c4da1e845e50d351cc59c69c8a | [
"Apache-2.0"
] | permissive | sreev/ambari | 1bce266ed0d318af3ebe50a7bd6b378083612330 | 48b92a5aa58debe7824f4337a97dfa8a8bb07f71 | refs/heads/master | 2021-01-10T21:44:10.844859 | 2012-04-19T19:20:28 | 2012-04-19T19:20:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,890 | py | #!/usr/bin/env python2.6
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import logging
import logging.handlers
import signal
import simplejson
import socket
import sys, traceback
import time
import threading
import zookeeper
from optparse import OptionParser
from Runner import Runner
logger = logging.getLogger()
options = None
args = None
ZOOKEEPER_OPEN_ACL_UNSAFE = {"perms":0x1f, "scheme":"world", "id" :"anyone"}
timeout = 10
connected = False
conn_cv = threading.Condition()
session_time = 100000
class ZooKeeperCommunicator(threading.Thread):
hmsZkPrefix = "/clusters"
def __init__(self, zkservers, credential):
threading.Thread.__init__(self)
logger.debug('Initializing ZooKeeperCommunicator thread.')
zookeeper.set_debug_level(zookeeper.LOG_LEVEL_DEBUG)
self.zh = None
self.zkservers = zkservers
self.lock = threading.Lock()
self.acl = [ZOOKEEPER_OPEN_ACL_UNSAFE]
self.safeMode = True
self.credential = credential
def auth_callback(self, zh, result_code):
conn_cv.acquire()
conn_cv.notify()
conn_cv.release()
def conn_callback(self, zh, *args):
conn_cv.acquire()
conn_cv.notify()
conn_cv.release()
def start(self):
conn_cv.acquire()
self.zh = zookeeper.init(self.zkservers, self.conn_callback, session_time)
conn_cv.wait()
conn_cv.release()
if self.credential!=None:
print "credential: "+self.credential
conn_cv.acquire()
zookeeper.add_auth(self.zh, "digest", self.credential, self.auth_callback)
conn_cv.wait()
conn_cv.release()
logger.info("ZooKeeper connection established.")
def __del__(self):
zookeeper.close(self.zh)
def locate(self):
hostname = socket.gethostname()
try:
children = sorted(zookeeper.get_children(self.zh, self.hmsZkPrefix))
for child in children:
znode = self.hmsZkPrefix + '/' + child + '/' + hostname
if zookeeper.exists(self.zh, znode, None)!=None:
self.znode = znode
self.actionNode = znode + '/action'
self.statusNode = '/status'
stat, acl = zookeeper.get_acl(self.zh, self.statusNode)
self.acl = acl
if zookeeper.OK == self.aget():
self.safeMode = False
break
except:
self.safeMode = True
if self.safeMode != False:
logger.warn("Can not locate " + hostname + " in zookeeper, sleep " + str(timeout) + " seconds.")
if self.lock.locked():
self.lock.release()
def update(self, zh, node, object):
buffer = simplejson.dumps(object)
if zookeeper.exists(zh, node, None) != None:
zookeeper.delete(zh, node, 0)
zookeeper.create(zh, node, buffer, self.acl, 0)
def enqueue(self, zh, node, object):
buffer = simplejson.dumps(object)
zookeeper.create(zh, node, buffer, self.acl, zookeeper.SEQUENCE)
def launch(self, zh, workLogNode, actionNode, statusNode):
state = {}
data = zookeeper.get(zh, actionNode, 0)
jsonp = simplejson.loads(data[0])
state['cmdPath'] = jsonp['cmdPath']
state['actionPath'] = actionNode
state['actionId'] = jsonp['actionId']
state['host'] = self.znode
state['status']='STARTING'
self.update(zh, workLogNode, state)
logger.info("Launch: "+simplejson.dumps(jsonp))
dispatcher = Runner()
try:
result = dispatcher.run(jsonp)
logger.info("Result: "+simplejson.dumps(result))
if "exit_code" in result and result['exit_code']==0:
state['status']='SUCCEEDED'
else:
state['status']='FAILED'
except:
logger.exception('Execution error: '+actionNode)
state['status']='FAILED'
self.update(zh, workLogNode, state)
self.enqueue(zh, statusNode, state)
def aget(self):
return zookeeper.aget_children(self.zh, self.actionNode, self.queue_watcher, self.queue_callback)
def queue_watcher(self, zh, event, state, path):
if zookeeper.OK != self.aget():
logger.error('Fail to monitor action queue for: '+self.actionNode)
self.safeMode = True
def queue_callback(self, zh, rc, data):
if zookeeper.OK == rc:
try:
for child in sorted(data):
action = self.actionNode + '/' + child
workLog = self.actionNode + '/' + child + '/worklog'
statusLog = self.statusNode + '/status-'
""" Launch the task if the task has not been executed """
if zookeeper.exists(zh, workLog, None) == None:
self.launch(zh, workLog, action, statusLog)
else:
""" If task has been previous launched, check for partial execution """
buffer = zookeeper.get(zh, workLog, 0)
state = simplejson.loads(buffer[0])
""" If task is incompleted in execution, launch again """
if 'status' in state and state['status'] == 'STARTING':
logger.info('Relaunch '+child)
self.launch(zh, workLog, action, statusLog)
else:
""" If the task has been launched, and completed, update status queue """
if zookeeper.exists(zh, statusLog, None) == None:
logger.info('Update status.')
self.update(zh, statusLog, state)
except NoNodeException, err:
""" Skip no node exception """
except Exception, err:
logger.exception(err)
else:
if zookeeper.NONODE == rc:
self.safeMode = True
if self.lock.locked():
self.lock.release()
def run(self):
self.locate()
while True:
try:
self.lock.acquire()
if self.safeMode == True:
time.sleep(timeout)
zookeeper.close(self.zh)
conn_cv.acquire()
self.zh = zookeeper.init(self.zkservers, self.conn_callback, session_time)
conn_cv.wait()
conn_cv.release()
self.locate()
if self.safeMode == False:
if zookeeper.OK != zookeeper.aget_children(self.zh, self.actionNode, self.queue_watcher, None):
logger.error('Fail to monitor action queue for: '+self.actionNode+', activate safe mode.')
self.safeMode = True
except Exception, err:
logger.exception(err)
def main(argv=None):
# Allow Ctrl-C
signal.signal(signal.SIGINT, signal.SIG_DFL)
parser = OptionParser()
parser.add_option('-v', '--verbose',
dest='verbose',
default=False,
action='store_true',
help='Verbose logging. (default: %default)')
parser.add_option('--zkservers',
dest='zkservers',
default='localhost:2181',
help='Comma-separated list of host:port pairs. (default: %default)')
global options
global args
(options, args) = parser.parse_args()
if options.verbose:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s %(filename)s:%(lineno)d - %(message)s")
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
logger.info('Starting Zookeeper python example: %s' % ' '.join(sys.argv))
zc = ZooKeeperCommunicator("localhost:2181")
zc.start()
zc.join()
if __name__ == '__main__':
main()
| [
"sree_at_chess@yahoo.com"
] | sree_at_chess@yahoo.com |
08e27a11dfdd901859ea13372fe5cc91ccfe4f91 | 19a55c290de75ef32f790b96091eeb7a96703ec7 | /tasks/urls.py | 288e95106c5a36f4b38c17b300077d10576d3c58 | [] | no_license | vyshak-sukumaran/ToDo_Project | 49e7603d938d0401fc2d6c6caff8bd6bcf1f4ac1 | 7d368322d59793e42bcf5c00484d36fbf12240d0 | refs/heads/master | 2023-06-28T23:07:13.783844 | 2021-07-31T07:43:58 | 2021-07-31T07:43:58 | 391,287,194 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 237 | py | from django.urls import path, include
from . import views
urlpatterns = [
path('', views.index, name="index"),
path('update/<str:pk>/', views.update, name='update'),
path('delete/<str:pk>/', views.delete, name='delete'),
]
| [
"vyshakvyshu791@gmail.com"
] | vyshakvyshu791@gmail.com |
86438607b68f0230a38d395d4d3e9ad3da451323 | d68c6fd11a0a034348b5ec4cfd3304048a01e0a8 | /python/docop-hello-world/main.py | c44270d657fe9c00768af56e3c9630cd0e9279c6 | [] | no_license | wfelipe3/KnowBag | 043570ea1fad101d416b49bca04f621661ed159b | 0cf10e8a57076b68dc3ce9327869496027285c96 | refs/heads/master | 2021-01-24T08:15:30.437703 | 2018-02-12T03:32:49 | 2018-02-12T03:32:49 | 26,071,840 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,088 | py | """Naval Fate.
Usage:
naval_fate.py ship new <name>...
naval_fate.py ship <name> move <x> <y> [--speed=<kn>]
naval_fate.py ship shoot <x> <y>
naval_fate.py mine (set|remove) <x> <y> [--moored | --drifting]
naval_fate.py (-h | --help)
naval_fate.py --version
Options:
-h --help Show this screen.
--version Show version.
--speed=<kn> Speed in knots [default: 10].
--moored Moored (anchored) mine.
--drifting Drifting mine.
"""
from docopt import docopt
from subprocess import call
import subprocess
def execute(command):
process = subprocess.Popen(command, stdout=subprocess.PIPE)
lines = process.stdout.readlines()
print("this is a super test" + str(process.stdout.readline()))
print(lines)
out, err = process.communicate()
return out
def test_method():
this_is_a_test = "this is a test"
print(this_is_a_test)
if __name__ == '__main__':
arguments = docopt(__doc__, version='Naval Fate 2.0')
print(arguments)
res = execute(['ls', '-la'])
print("{res} is res".format(res = res))
test_method()
| [
"feliperojas@iMac-de-Felipe.local"
] | feliperojas@iMac-de-Felipe.local |
23b612e9499c69a6a90e61eb101c7b66f1b8f63d | 627bb6e86d174e1929439cb8326862cdf261201b | /node-1-srte-bsid-90001-to-node-4.py | c803851e231f71a6af8b264f79a35c83dba4a889 | [] | no_license | chrissembiring/sr-bootcamp | 2e7b24c4222b1cfef85a2e6b19a520e7ff0bfe9d | dfc198fadf1b0e1bb99089ee25feec35a3b056a0 | refs/heads/master | 2021-10-21T22:33:30.838919 | 2019-03-07T01:00:19 | 2019-03-07T01:00:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 912 | py | import getpass
import sys
import telnetlib
HOST = "198.18.1.41"
user = raw_input("Enter your telnet username: ")
password = getpass.getpass()
tn = telnetlib.Telnet(HOST)
tn.read_until("Username: ")
tn.write(user + "\n")
if password:
tn.read_until("Password: ")
tn.write(password + "\n")
tn.write("conf t\n")
tn.write("segment-routing\n")
tn.write("traffic-eng\n")
tn.write("segment-list 1-7-3-2-4\n")
tn.write("index 10 mpls label 16007\n")
tn.write("index 20 mpls label 16002\n")
tn.write("index 30 mpls label 16004\n")
tn.write("policy node-4\n")
tn.write("binding-sid mpls 90001\n")
tn.write("color 4 end-point ipv4 1.1.1.4\n")
tn.write("candidate-paths\n")
tn.write("preference 100\n")
tn.write("explicit segment-list 1-7-3-2-4\n")
tn.write("commit\n")
tn.write("end\n")
tn.write("show run segment-routing | in policy \n")
tn.write("exit\n")
print tn.read_all() | [
"noreply@github.com"
] | noreply@github.com |
077b5d30d14d285cab485c2c132910e626e14fc2 | dac8ec1bca7b390dac4aaedf94be0445cec03b66 | /lab04/run_NB.py | 53834b60d91845a6d924d88d71b14cd56d826d5d | [] | no_license | ldakir/Machine-Learning | 49333f641f745bcc523d6842fdc5e1c5db449d16 | 033a38459cac0d0c212f88a84ddcc6b772ec13dc | refs/heads/master | 2020-09-02T12:10:02.310934 | 2019-11-02T21:50:19 | 2019-11-02T21:50:19 | 219,218,176 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 906 | py | """
Top level comment: be sure to include the purpose/contents of this file
as well as the author(s)
"""
import util
from Partition import *
from NaiveBayes import *
import numpy as np
def main():
opts = util.parse_args()
train_partition = util.read_arff(opts.train_filename)
test_partition = util.read_arff(opts.test_filename)
#Creating Naive Bayes Model
nb_model = NaiveBayes(train_partition)
m = len(test_partition.labels)
confusion_matrix = np.zeros((m,m)) #initializing the confusion matrix
accuracy = 0
for x in test_partition.data:
y_hat = nb_model.classify(x.features)
y = x.label
confusion_matrix[y][y_hat] +=1
if y == y_hat:
accuracy+=1
print('Accuracy: '+ str(round(accuracy/test_partition.n,6)) +' ('+str(accuracy) + ' out of ' + str(test_partition.n) +' correct)')
print(confusion_matrix)
main()
| [
"39743074+ldakir@users.noreply.github.com"
] | 39743074+ldakir@users.noreply.github.com |
17fef6c5d241acb0b7bb102fad34566c88da3eff | ce5ce3764e75774c0b7eab47893987b9f311b1b9 | /.history/moments/views_20210527215915.py | aeb3ce256b14aff202e4f75fe445d9d3152b2c1b | [] | no_license | iSecloud/django-wechat | 410fb8a23b50dc2343c2f0167bbae560bf6e9694 | faaff9bb7f9454a63b2f8411d3577169b470baad | refs/heads/main | 2023-05-15T06:53:16.252422 | 2021-06-07T14:00:35 | 2021-06-07T14:00:35 | 374,685,069 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | from django.shortcuts import render
# Create your views here.
def home(request):
return render(request, 'homepage.html') | [
"869820505@qq.com"
] | 869820505@qq.com |
007455755e4df954816604e5b84ea3665446662f | bc900ac54e870c5150f1cf64a1196037c54835ce | /Main.py | 9d440ad2e8b1d45884e8a288feb45b10fddf7d26 | [] | no_license | ZeCanelha/AED_PROJECTO1 | 2065c12beb5047e80025f710c05e58b5dac53985 | 051991e4b2779d816139f520d4798d34ca94ff27 | refs/heads/master | 2021-01-19T21:15:05.700968 | 2017-03-26T22:14:48 | 2017-03-26T22:14:48 | 82,474,935 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,443 | py | import csv
import DoubleNode as DoubleNode
import DoubleLinkedList as DLList
import Functions as F
import re as split
#TODO Criar um dicionario com os pares de Pais / Sigla
_par_sigla_pais = {};
def create_ddlist(data):
DList = DLList.DoubleLinkedList()
Function = F.Functions(1,DList,_par_sigla_pais)
""" Cada no vai ter informacao sobre um pais """
for i in range(len(data)-1):
DList.add_end(data[i])
while(1):
print("\t\tDouble Linked Lists\n\n\t1.Search Element\n\t2.Insert Element\n\t3.Edit Element\n\t4.Remove Element\n\t5.Main Menu\n\t6.Print")
opt = input()
""" Search """
if ( opt == 1 ):
print("Country Name or Country Code:")
response = raw_input()
print("Index: %s\n" % Function.search(response))
""" Insert: Vai dar pra inserir tudo """
if ( opt == 2):
#Inserir um novo pais implica inserir um pais + sigla e um array de anos vazios 1960-2016 | 46 posicoes
print("1. Insert new country\n")
print("2. Insert new data on existent countries\n")
new_opt = input()
if ( new_opt == 1 ):
print("Country name: ")
country = raw_input()
print("Sigla: ")
sig = raw_input()
print(Function.inser_new_country(country,sig))
if ( new_opt == 2 ):
print("Country: ")
country = raw_input()
print("Year: ")
year = input()
print("Data: ")
data = input()
Function.insert(country,year,data)
""" Edit: Penso que seja so editar funcoes em determinados anos """
if ( opt == 3):
print("Country: ")
country = raw_input()
print("Year: ")
year = input()
print("Data: ")
data = input()
print(Function.edit(country,year,data))
""" Remove: Remove deve ser como insert, remover tudo """
if ( opt == 4):
print("1.Remove Country")
print("2.Remove information")
opt1 = input()
if ( opt1 == 1 ):
print("Country: ")
country = raw_input()
print(Function.remove_country(country))
DList.print_list()
if ( opt1 == 2 ):
print("Country: ")
country = raw_input()
print("Year: ")
year = input()
print(Function.remove(country,year))
if opt == 6:
DList.print_list()
if ( opt == 5):
break
def read_csv_files():
""" Ler os dados do ficheiro """
i = 0
data_list = []
with open('dados.csv','r') as fp:
csvreader = csv.reader(fp,delimiter=";", quotechar='"')
for row in csvreader:
list = ('|'.join(row)).split('|')
data_list.append(list)
_par_sigla_pais[data_list[i][0]] = data_list[i][1]
i = i + 1
""" Retorna toda a informcao do ficheiro numa lista """
return data_list
def init():
data = read_csv_files()
while(1):
print("\t\tMenu\n\n\t1.Double Linked Lists\n\t2.Exit")
opt = input()
if ( opt == 1 ):
create_ddlist(data)
if ( opt == 2):
break;
if __name__ == '__main__':
init()
| [
"josemc@student.dei.uc.pt"
] | josemc@student.dei.uc.pt |
fed2ff0bff98961ef7a53472b2ccc62403a7594b | 34f5db6ceda8719b22e45feba790ab97574fef39 | /书评主题分析/lianxi.py | 8bcc0ff94fb1e54d8a8e1f534dfa5b2947a335e3 | [] | no_license | Limjumy/sanguozhi-python | 15c4783e9477bc5ee20ca816d6d4a8083e851aaf | 2995e9c8851420338ad2ab6b48266216b157dddc | refs/heads/master | 2020-12-02T05:24:59.365261 | 2017-07-11T14:28:36 | 2017-07-11T14:28:36 | 96,902,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py | #!/usr/bin/python
# encoding:utf-8
list=[[]]
filename = "E:\\python_project\\爬虫\\长评论\\1025998.txt"
fopen = open(filename, 'r') # r 代表read
for eachLine in fopen:
list[-1].append(eachLine) # 返回列表
| [
"ju@lingju.ac.cn"
] | ju@lingju.ac.cn |
e866fc987d259075f3a82088c41e177dd80b1e7a | c5e3c87b73a3d8473eb1f184f7f85cbda9281a94 | /Client/runner.py | aaad71c2c21616a751be16f3c3a2b8335f416c0a | [] | no_license | itaybou/SHA1-Cracker-Server-Client-Python3 | 8c752db249baa546104648d4fa6dae472c60c279 | 47c48a64f2e81105f48ec6ca94a7ff1ff409dc53 | refs/heads/master | 2020-12-07T07:28:37.552890 | 2020-01-08T22:26:44 | 2020-01-08T22:26:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 219 | py | import udp_client as client
def main():
udp_client = client.UDPClient()
udp_client.start()
return
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print("Exited")
| [
"noreply@github.com"
] | noreply@github.com |
8d1dcda3139a9d6e5d1dcd75a2e85017e18a0a4a | 78c3082e9082b5b50435805723ae00a58ca88e30 | /03.AI알고리즘 소스코드/venv/Lib/site-packages/caffe2/python/operator_test/flatten_op_test.py | ba5fce81296a516900f9cabf049c0c697338ce54 | [] | no_license | jinStar-kimmy/algorithm | 26c1bc456d5319578110f3d56f8bd19122356603 | 59ae8afd8d133f59a6b8d8cee76790fd9dfe1ff7 | refs/heads/master | 2023-08-28T13:16:45.690232 | 2021-10-20T08:23:46 | 2021-10-20T08:23:46 | 419,217,105 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 960 | py |
from hypothesis import given
import numpy as np
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
class TestFlatten(hu.HypothesisTestCase):
@given(X=hu.tensor(min_dim=2, max_dim=4),
**hu.gcs)
def test_flatten(self, X, gc, dc):
for axis in range(X.ndim + 1):
op = core.CreateOperator(
"Flatten",
["X"],
["Y"],
axis=axis)
def flatten_ref(X):
shape = X.shape
outer = np.prod(shape[:axis]).astype(int)
inner = np.prod(shape[axis:]).astype(int)
return np.copy(X).reshape(outer, inner),
self.assertReferenceChecks(gc, op, [X], flatten_ref)
# Check over multiple devices
self.assertDeviceChecks(dc, op, [X], [0])
if __name__ == "__main__":
import unittest
unittest.main()
| [
"gudwls3126@gmail.com"
] | gudwls3126@gmail.com |
b1e1f5704c991a7ae0995afbf5c7841193d0579b | 104252de9f8ac93aa4c052a955773cfadd55d481 | /PYTHON_PART_TWO/Part6_Modules_and_Packages/Part6_module_third_C_way_my.py | 60d4cc80716d44d96cdd3e7b39c43cbc37da5bcc | [] | no_license | TomaszPrysak/Full_Stack_Developer_PYTHON_PART | a03bf808f5012736dc46810cc049615d52b08622 | 31cf0888cdfd285b4d29416683aa7243a9778fdd | refs/heads/master | 2022-06-18T01:18:12.753292 | 2022-05-29T11:11:20 | 2022-05-29T11:11:20 | 166,885,054 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 696 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Plik funkcji, klas bądź innych obiektów jako moduł bądź pakiet z którego będziemy importować całą zawartośc bądź poszczególne funkcje, klasy bądź inne obiekty.
def trzeciSposobIC(): # funkcja zdefiniowania w pliku będącym modułem importowanym, funkcja ta będzie wykonywana z poziomu pliku pythonowskiego w którym jest importowany ten plik jako moduł
print("Zostałem zaimportowany przez TRZECI C sposób")
ulubioneZwierzatka = ("psy", "foczki") # krtotka zdeklarowana w pliku będącym modułem importowanym, krotka ta będzie wywoływana z poziomu pliku pythonowskiego w którym jest importowany ten plik jako moduł
| [
"prysak.tomasz@gmail.com"
] | prysak.tomasz@gmail.com |
82f8969ed3e1b7093084a9af42e14ad369a4af19 | 6809cda579a7c1c88872f566d65f665c2dff20bb | /research-v10/verify-kb-2.py | e729c90888e8431c8193a7409ef0df0f2a33dcfb | [] | no_license | hellojixian/stock-dummy | edb3e7447e26ec3e0481c938fcf8f72063d6c850 | 06b352ba3d78ac419e7672b0e6ec630f6f461ae8 | refs/heads/master | 2020-06-15T09:11:33.401689 | 2019-11-05T15:46:43 | 2019-11-05T15:46:43 | 195,256,649 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,992 | py | #!/usr/bin/env python3
import datetime
import pandas as pd
import math, sys, os
from lib.jqdata import *
from lib.func import *
filename = 'data/dataset-labeled-min.csv'
np.random.seed(0)
dataset = pd.read_csv(filename,index_col=0)
trading_dates = dataset['security'].groupby(dataset.index).count().index.tolist()
print('Trading data loaded')
total_profit = 1
profits,temp = [],[]
skip_days = 8
history = pd.DataFrame()
for trading_date in trading_dates:
date_i = trading_dates.index(trading_date)
# if date_i>412:break
subset = dataset[dataset.index==trading_date]
total = subset.shape[0]
query = "(prev_0<=4.75 & prev_0>=-4) and (open!=close)"
subset = subset[subset.eval(query)]
factors = ['money','prev_changes_7']
rs = subset
rs = rs.sort_values(by=[factors[0]],ascending=True)
rs = rs[:20]
rs = rs.sort_values(by=[factors[1]],ascending=True)
rs = rs[:6]
rs = rs[['security','close',factors[1],'prev_1','prev_0','fu_1']]
if rs.shape[0]>4 :
print("="*120,'\n',rs,'\n',"="*120)
profit = rs['fu_1'].mean()
profits.append({'id':date_i,'date':trading_date,'profit':profit})
temp.append(profit)
if skip_days>0:
skip_days-=1
else:
total_profit = total_profit*(1+(profit/100))
print("{:06}\t{}\t Profit: {:.2f}%\t Total: {:.2f}%\t skip:{}\t secs:{:.2f}\n".format(
date_i,trading_date,profit,total_profit*100, skip_days, total))
if skip_days==0:
if np.sum(temp[-2:])>=11: skip_days = 3
if temp[-1]<=0 and temp[-2]>=0 and temp[-3]<=0 and temp[-4]>=0 and temp[-5]>=0: skip_days = 1
if temp[-1]<=0 and temp[-2]<=0 and temp[-3]>=0 and temp[-4]<=0 and temp[-5]<=0: skip_days = 1
if temp[-1]<=0 and temp[-2]<=0 and temp[-3]<=0 and temp[-4]<=0 and temp[-5]>=0: skip_days = 1
profits = pd.DataFrame(profits)
profits.to_csv('profit_changes.csv')
history.to_csv('buy_history.csv')
| [
"hellojixian@gmail.com"
] | hellojixian@gmail.com |
b6a695509f4c932fce5594d2924313a6581f08bd | 8afb5afd38548c631f6f9536846039ef6cb297b9 | /_ORGS/NPM/node/deps/v8/tools/release/test_scripts.py | 7cf5d141301c575c3186e2488597b510374b586d | [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"SunPro"
] | permissive | bgoonz/UsefulResourceRepo2.0 | d87588ffd668bb498f7787b896cc7b20d83ce0ad | 2cb4b45dd14a230aa0e800042e893f8dfb23beda | refs/heads/master | 2023-03-17T01:22:05.254751 | 2022-08-11T03:18:22 | 2022-08-11T03:18:22 | 382,628,698 | 10 | 12 | MIT | 2022-10-10T14:13:54 | 2021-07-03T13:58:52 | null | UTF-8 | Python | false | false | 34,860 | py | #!/usr/bin/env python
# Copyright 2013 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# for py2/py3 compatibility
from __future__ import print_function
import os
import shutil
import tempfile
import traceback
import unittest
import auto_push
from auto_push import LastReleaseBailout
import auto_roll
import common_includes
from common_includes import *
import create_release
from create_release import *
import merge_to_branch
from merge_to_branch import MergeToBranch
from auto_tag import AutoTag
import roll_merge
from roll_merge import RollMerge
TEST_CONFIG = {
"DEFAULT_CWD": None,
"BRANCHNAME": "test-prepare-push",
"PERSISTFILE_BASENAME": "/tmp/test-create-releases-tempfile",
"PATCH_FILE": "/tmp/test-v8-create-releases-tempfile-tempfile-patch",
"COMMITMSG_FILE": "/tmp/test-v8-create-releases-tempfile-commitmsg",
"CHROMIUM": "/tmp/test-create-releases-tempfile-chromium",
"SETTINGS_LOCATION": None,
"ALREADY_MERGING_SENTINEL_FILE":
"/tmp/test-merge-to-branch-tempfile-already-merging",
"TEMPORARY_PATCH_FILE": "/tmp/test-merge-to-branch-tempfile-temporary-patch",
}
AUTO_PUSH_ARGS = [
"-a", "author@chromium.org",
"-r", "reviewer@chromium.org",
]
class ToplevelTest(unittest.TestCase):
def testSaniniziteVersionTags(self):
self.assertEquals("4.8.230", SanitizeVersionTag("4.8.230"))
self.assertEquals("4.8.230", SanitizeVersionTag("tags/4.8.230"))
self.assertEquals(None, SanitizeVersionTag("candidate"))
def testNormalizeVersionTags(self):
input = ["4.8.230",
"tags/4.8.230",
"tags/4.8.224.1",
"4.8.224.1",
"4.8.223.1",
"tags/4.8.223",
"tags/4.8.231",
"candidates"]
expected = ["4.8.230",
"4.8.230",
"4.8.224.1",
"4.8.224.1",
"4.8.223.1",
"4.8.223",
"4.8.231",
]
self.assertEquals(expected, NormalizeVersionTags(input))
def Cmd(*args, **kwargs):
"""Convenience function returning a shell command test expectation."""
return {
"name": "command",
"args": args,
"ret": args[-1],
"cb": kwargs.get("cb"),
"cwd": kwargs.get("cwd", TEST_CONFIG["DEFAULT_CWD"]),
}
def RL(text, cb=None):
"""Convenience function returning a readline test expectation."""
return {
"name": "readline",
"args": [],
"ret": text,
"cb": cb,
"cwd": None,
}
def URL(*args, **kwargs):
"""Convenience function returning a readurl test expectation."""
return {
"name": "readurl",
"args": args[:-1],
"ret": args[-1],
"cb": kwargs.get("cb"),
"cwd": None,
}
class SimpleMock(object):
def __init__(self):
self._recipe = []
self._index = -1
def Expect(self, recipe):
self._recipe = recipe
def Call(self, name, *args, **kwargs): # pragma: no cover
self._index += 1
try:
expected_call = self._recipe[self._index]
except IndexError:
raise NoRetryException("Calling %s %s" % (name, " ".join(args)))
if not isinstance(expected_call, dict):
raise NoRetryException("Found wrong expectation type for %s %s" %
(name, " ".join(args)))
if expected_call["name"] != name:
raise NoRetryException("Expected action: %s %s - Actual: %s" %
(expected_call["name"], expected_call["args"], name))
# Check if the given working directory matches the expected one.
if expected_call["cwd"] != kwargs.get("cwd"):
raise NoRetryException("Expected cwd: %s in %s %s - Actual: %s" %
(expected_call["cwd"],
expected_call["name"],
expected_call["args"],
kwargs.get("cwd")))
# The number of arguments in the expectation must match the actual
# arguments.
if len(args) > len(expected_call['args']):
raise NoRetryException("When calling %s with arguments, the "
"expectations must consist of at least as many arguments." %
name)
# Compare expected and actual arguments.
for (expected_arg, actual_arg) in zip(expected_call['args'], args):
if expected_arg != actual_arg:
raise NoRetryException("Expected: %s - Actual: %s" %
(expected_arg, actual_arg))
# The expected call contains an optional callback for checking the context
# at the time of the call.
if expected_call['cb']:
try:
expected_call['cb']()
except:
tb = traceback.format_exc()
raise NoRetryException("Caught exception from callback: %s" % tb)
# If the return value is an exception, raise it instead of returning.
if isinstance(expected_call['ret'], Exception):
raise expected_call['ret']
return expected_call['ret']
def AssertFinished(self): # pragma: no cover
if self._index < len(self._recipe) -1:
raise NoRetryException("Called mock too seldom: %d vs. %d" %
(self._index, len(self._recipe)))
class ScriptTest(unittest.TestCase):
def MakeEmptyTempFile(self):
handle, name = tempfile.mkstemp()
os.close(handle)
self._tmp_files.append(name)
return name
def MakeEmptyTempDirectory(self):
name = tempfile.mkdtemp()
self._tmp_files.append(name)
return name
def WriteFakeVersionFile(self, major=3, minor=22, build=4, patch=0):
version_file = os.path.join(TEST_CONFIG["DEFAULT_CWD"], VERSION_FILE)
if not os.path.exists(os.path.dirname(version_file)):
os.makedirs(os.path.dirname(version_file))
with open(version_file, "w") as f:
f.write(" // Some line...\n")
f.write("\n")
f.write("#define V8_MAJOR_VERSION %s\n" % major)
f.write("#define V8_MINOR_VERSION %s\n" % minor)
f.write("#define V8_BUILD_NUMBER %s\n" % build)
f.write("#define V8_PATCH_LEVEL %s\n" % patch)
f.write(" // Some line...\n")
f.write("#define V8_IS_CANDIDATE_VERSION 0\n")
def WriteFakeWatchlistsFile(self):
watchlists_file = os.path.join(TEST_CONFIG["DEFAULT_CWD"], WATCHLISTS_FILE)
if not os.path.exists(os.path.dirname(watchlists_file)):
os.makedirs(os.path.dirname(watchlists_file))
with open(watchlists_file, "w") as f:
content = """
'merges': [
# Only enabled on branches created with tools/release/create_release.py
# 'v8-merges@googlegroups.com',
],
"""
f.write(content)
def MakeStep(self):
"""Convenience wrapper."""
options = ScriptsBase(TEST_CONFIG, self, self._state).MakeOptions([])
return MakeStep(step_class=Step, state=self._state,
config=TEST_CONFIG, side_effect_handler=self,
options=options)
def RunStep(self, script=CreateRelease, step_class=Step, args=None):
"""Convenience wrapper."""
args = args if args is not None else ["-m", "-a=author", "-r=reviewer", ]
return script(TEST_CONFIG, self, self._state).RunSteps([step_class], args)
def Call(self, fun, *args, **kwargs):
print("Calling %s with %s and %s" % (str(fun), str(args), str(kwargs)))
def Command(self, cmd, args="", prefix="", pipe=True, cwd=None):
print("%s %s" % (cmd, args))
print("in %s" % cwd)
return self._mock.Call("command", cmd + " " + args, cwd=cwd)
def ReadLine(self):
return self._mock.Call("readline")
def ReadURL(self, url, params):
if params is not None:
return self._mock.Call("readurl", url, params)
else:
return self._mock.Call("readurl", url)
def Sleep(self, seconds):
pass
def GetUTCStamp(self):
return "1000000"
def Expect(self, *args):
"""Convenience wrapper."""
self._mock.Expect(*args)
def setUp(self):
self._mock = SimpleMock()
self._tmp_files = []
self._state = {}
TEST_CONFIG["DEFAULT_CWD"] = self.MakeEmptyTempDirectory()
def tearDown(self):
if os.path.exists(TEST_CONFIG["PERSISTFILE_BASENAME"]):
shutil.rmtree(TEST_CONFIG["PERSISTFILE_BASENAME"])
# Clean up temps. Doesn't work automatically.
for name in self._tmp_files:
if os.path.isfile(name):
os.remove(name)
if os.path.isdir(name):
shutil.rmtree(name)
self._mock.AssertFinished()
def testGitMock(self):
self.Expect([Cmd("git --version", "git version 1.2.3"),
Cmd("git dummy", "")])
self.assertEquals("git version 1.2.3", self.MakeStep().Git("--version"))
self.assertEquals("", self.MakeStep().Git("dummy"))
def testCommonPrepareDefault(self):
self.Expect([
Cmd("git status -s -uno", ""),
Cmd("git checkout -f origin/master", ""),
Cmd("git fetch", ""),
Cmd("git branch", " branch1\n* %s" % TEST_CONFIG["BRANCHNAME"]),
RL("Y"),
Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], ""),
])
self.MakeStep().CommonPrepare()
self.MakeStep().PrepareBranch()
def testCommonPrepareNoConfirm(self):
self.Expect([
Cmd("git status -s -uno", ""),
Cmd("git checkout -f origin/master", ""),
Cmd("git fetch", ""),
Cmd("git branch", " branch1\n* %s" % TEST_CONFIG["BRANCHNAME"]),
RL("n"),
])
self.MakeStep().CommonPrepare()
self.assertRaises(Exception, self.MakeStep().PrepareBranch)
def testCommonPrepareDeleteBranchFailure(self):
self.Expect([
Cmd("git status -s -uno", ""),
Cmd("git checkout -f origin/master", ""),
Cmd("git fetch", ""),
Cmd("git branch", " branch1\n* %s" % TEST_CONFIG["BRANCHNAME"]),
RL("Y"),
Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], None),
])
self.MakeStep().CommonPrepare()
self.assertRaises(Exception, self.MakeStep().PrepareBranch)
def testInitialEnvironmentChecks(self):
TextToFile("", os.path.join(TEST_CONFIG["DEFAULT_CWD"], ".git"))
os.environ["EDITOR"] = "vi"
self.Expect([
Cmd("which vi", "/usr/bin/vi"),
])
self.MakeStep().InitialEnvironmentChecks(TEST_CONFIG["DEFAULT_CWD"])
def testTagTimeout(self):
self.Expect([
Cmd("git fetch", ""),
Cmd("git log -1 --format=%H --grep=\"Title\" origin/tag_name", ""),
Cmd("git fetch", ""),
Cmd("git log -1 --format=%H --grep=\"Title\" origin/tag_name", ""),
Cmd("git fetch", ""),
Cmd("git log -1 --format=%H --grep=\"Title\" origin/tag_name", ""),
Cmd("git fetch", ""),
Cmd("git log -1 --format=%H --grep=\"Title\" origin/tag_name", ""),
])
args = ["--branch", "candidates", "ab12345"]
self._state["version"] = "tag_name"
self._state["commit_title"] = "Title"
self.assertRaises(Exception,
lambda: self.RunStep(RollMerge, TagRevision, args))
def testReadAndPersistVersion(self):
self.WriteFakeVersionFile(build=5)
step = self.MakeStep()
step.ReadAndPersistVersion()
self.assertEquals("3", step["major"])
self.assertEquals("22", step["minor"])
self.assertEquals("5", step["build"])
self.assertEquals("0", step["patch"])
def testRegex(self):
self.assertEqual("(issue 321)",
re.sub(r"BUG=v8:(.*)$", r"(issue \1)", "BUG=v8:321"))
self.assertEqual("(Chromium issue 321)",
re.sub(r"BUG=(.*)$", r"(Chromium issue \1)", "BUG=321"))
cl = " too little\n\ttab\ttab\n too much\n trailing "
cl = MSub(r"\t", r" ", cl)
cl = MSub(r"^ {1,7}([^ ])", r" \1", cl)
cl = MSub(r"^ {9,80}([^ ])", r" \1", cl)
cl = MSub(r" +$", r"", cl)
self.assertEqual(" too little\n"
" tab tab\n"
" too much\n"
" trailing", cl)
self.assertEqual("//\n#define V8_BUILD_NUMBER 3\n",
MSub(r"(?<=#define V8_BUILD_NUMBER)(?P<space>\s+)\d*$",
r"\g<space>3",
"//\n#define V8_BUILD_NUMBER 321\n"))
TAGS = """
4425.0
0.0.0.0
3.9.6
3.22.4
test_tag
"""
# Version as tag: 3.22.4.0. Version on master: 3.22.6.
# Make sure that the latest version is 3.22.6.0.
def testIncrementVersion(self):
self.Expect([
Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
Cmd("git tag", self.TAGS),
Cmd("git checkout -f origin/master -- include/v8-version.h",
"", cb=lambda: self.WriteFakeVersionFile(3, 22, 6)),
])
self.RunStep(CreateRelease, IncrementVersion)
self.assertEquals("3", self._state["new_major"])
self.assertEquals("22", self._state["new_minor"])
self.assertEquals("7", self._state["new_build"])
self.assertEquals("0", self._state["new_patch"])
def testBootstrapper(self):
work_dir = self.MakeEmptyTempDirectory()
class FakeScript(ScriptsBase):
def _Steps(self):
return []
# Use the test configuration without the fake testing default work dir.
fake_config = dict(TEST_CONFIG)
del(fake_config["DEFAULT_CWD"])
self.Expect([
Cmd("fetch v8", "", cwd=work_dir),
])
FakeScript(fake_config, self).Run(["--work-dir", work_dir])
def testCreateRelease(self):
TextToFile("", os.path.join(TEST_CONFIG["DEFAULT_CWD"], ".git"))
# The version file on master has build level 5.
self.WriteFakeVersionFile(build=5)
commit_msg = """Version 3.22.5
TBR=reviewer@chromium.org"""
def CheckVersionCommit():
commit = FileToText(TEST_CONFIG["COMMITMSG_FILE"])
self.assertEquals(commit_msg, commit)
version = FileToText(
os.path.join(TEST_CONFIG["DEFAULT_CWD"], VERSION_FILE))
self.assertTrue(re.search(r"#define V8_MINOR_VERSION\s+22", version))
self.assertTrue(re.search(r"#define V8_BUILD_NUMBER\s+5", version))
self.assertFalse(re.search(r"#define V8_BUILD_NUMBER\s+6", version))
self.assertTrue(re.search(r"#define V8_PATCH_LEVEL\s+0", version))
self.assertTrue(
re.search(r"#define V8_IS_CANDIDATE_VERSION\s+0", version))
expectations = [
Cmd("git fetch origin +refs/heads/*:refs/heads/*", ""),
Cmd("git checkout -f origin/master", "", cb=self.WriteFakeWatchlistsFile),
Cmd("git branch", ""),
Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
Cmd("git tag", self.TAGS),
Cmd("git checkout -f origin/master -- include/v8-version.h",
"", cb=self.WriteFakeVersionFile),
Cmd("git log -1 --format=%H 3.22.4", "release_hash\n"),
Cmd("git log -1 --format=%s release_hash", "Version 3.22.4\n"),
Cmd("git log -1 --format=%H release_hash^", "abc3\n"),
Cmd("git log --format=%H abc3..push_hash", "rev1\n"),
Cmd("git push origin push_hash:refs/heads/3.22.5", ""),
Cmd("git reset --hard origin/master", ""),
Cmd("git new-branch work-branch --upstream origin/3.22.5", ""),
Cmd("git checkout -f 3.22.4 -- include/v8-version.h", "",
cb=self.WriteFakeVersionFile),
Cmd("git commit -aF \"%s\"" % TEST_CONFIG["COMMITMSG_FILE"], "",
cb=CheckVersionCommit),
Cmd("git cl upload --send-mail "
"-f --bypass-hooks --no-autocc --message-file "
"\"%s\"" % TEST_CONFIG["COMMITMSG_FILE"], ""),
Cmd("git cl land --bypass-hooks -f", ""),
Cmd("git fetch", ""),
Cmd("git log -1 --format=%H --grep="
"\"Version 3.22.5\" origin/3.22.5", "hsh_to_tag"),
Cmd("git tag 3.22.5 hsh_to_tag", ""),
Cmd("git push origin refs/tags/3.22.5:refs/tags/3.22.5", ""),
Cmd("git checkout -f origin/master", ""),
Cmd("git branch", "* master\n work-branch\n"),
Cmd("git branch -D work-branch", ""),
Cmd("git gc", ""),
]
self.Expect(expectations)
args = ["-a", "author@chromium.org",
"-r", "reviewer@chromium.org",
"--revision", "push_hash"]
CreateRelease(TEST_CONFIG, self).Run(args)
# Note: The version file is on build number 5 again in the end of this test
# since the git command that merges to master is mocked out.
# Check for correct content of the WATCHLISTS file
watchlists_content = FileToText(os.path.join(TEST_CONFIG["DEFAULT_CWD"],
WATCHLISTS_FILE))
expected_watchlists_content = """
'merges': [
# Only enabled on branches created with tools/release/create_release.py
'v8-merges@googlegroups.com',
],
"""
self.assertEqual(watchlists_content, expected_watchlists_content)
C_V8_22624_LOG = """V8 CL.
git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@22624 123
"""
C_V8_123455_LOG = """V8 CL.
git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@123455 123
"""
C_V8_123456_LOG = """V8 CL.
git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@123456 123
"""
ROLL_COMMIT_MSG = """Update V8 to version 3.22.4.
Summary of changes available at:
https://chromium.googlesource.com/v8/v8/+log/last_rol..roll_hsh
Please follow these instructions for assigning/CC'ing issues:
https://v8.dev/docs/triage-issues
Please close rolling in case of a roll revert:
https://v8-roll.appspot.com/
This only works with a Google account.
CQ_INCLUDE_TRYBOTS=luci.chromium.try:linux-blink-rel
CQ_INCLUDE_TRYBOTS=luci.chromium.try:linux_optional_gpu_tests_rel
CQ_INCLUDE_TRYBOTS=luci.chromium.try:mac_optional_gpu_tests_rel
CQ_INCLUDE_TRYBOTS=luci.chromium.try:win_optional_gpu_tests_rel
CQ_INCLUDE_TRYBOTS=luci.chromium.try:android_optional_gpu_tests_rel
R=reviewer@chromium.org"""
# Snippet from the original DEPS file.
FAKE_DEPS = """
vars = {
"v8_revision": "last_roll_hsh",
}
deps = {
"src/v8":
(Var("googlecode_url") % "v8") + "/" + Var("v8_branch") + "@" +
Var("v8_revision"),
}
"""
def testChromiumRollUpToDate(self):
TEST_CONFIG["CHROMIUM"] = self.MakeEmptyTempDirectory()
json_output_file = os.path.join(TEST_CONFIG["CHROMIUM"], "out.json")
TextToFile(self.FAKE_DEPS, os.path.join(TEST_CONFIG["CHROMIUM"], "DEPS"))
chrome_dir = TEST_CONFIG["CHROMIUM"]
self.Expect([
Cmd("git fetch origin", ""),
Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
Cmd("gclient getdep -r src/v8", "last_roll_hsh", cwd=chrome_dir),
Cmd("git describe --tags last_roll_hsh", "3.22.4"),
Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
Cmd("git rev-list --max-age=395200 --tags",
"bad_tag\nroll_hsh\nhash_123"),
Cmd("git describe --tags bad_tag", ""),
Cmd("git describe --tags roll_hsh", "3.22.4"),
Cmd("git describe --tags hash_123", "3.22.3"),
Cmd("git describe --tags roll_hsh", "3.22.4"),
Cmd("git describe --tags hash_123", "3.22.3"),
])
result = auto_roll.AutoRoll(TEST_CONFIG, self).Run(
AUTO_PUSH_ARGS + [
"-c", TEST_CONFIG["CHROMIUM"],
"--json-output", json_output_file])
self.assertEquals(0, result)
json_output = json.loads(FileToText(json_output_file))
self.assertEquals("up_to_date", json_output["monitoring_state"])
def testChromiumRoll(self):
# Setup fake directory structures.
TEST_CONFIG["CHROMIUM"] = self.MakeEmptyTempDirectory()
json_output_file = os.path.join(TEST_CONFIG["CHROMIUM"], "out.json")
TextToFile(self.FAKE_DEPS, os.path.join(TEST_CONFIG["CHROMIUM"], "DEPS"))
TextToFile("", os.path.join(TEST_CONFIG["CHROMIUM"], ".git"))
chrome_dir = TEST_CONFIG["CHROMIUM"]
os.makedirs(os.path.join(chrome_dir, "v8"))
def WriteDeps():
TextToFile("Some line\n \"v8_revision\": \"22624\",\n some line",
os.path.join(chrome_dir, "DEPS"))
expectations = [
Cmd("git fetch origin", ""),
Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
Cmd("gclient getdep -r src/v8", "last_roll_hsh", cwd=chrome_dir),
Cmd("git describe --tags last_roll_hsh", "3.22.3.1"),
Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
Cmd("git rev-list --max-age=395200 --tags",
"bad_tag\nroll_hsh\nhash_123"),
Cmd("git describe --tags bad_tag", ""),
Cmd("git describe --tags roll_hsh", "3.22.4"),
Cmd("git describe --tags hash_123", "3.22.3"),
Cmd("git describe --tags roll_hsh", "3.22.4"),
Cmd("git log -1 --format=%s roll_hsh", "Version 3.22.4\n"),
Cmd("git describe --tags roll_hsh", "3.22.4"),
Cmd("git describe --tags last_roll_hsh", "3.22.2.1"),
Cmd("git status -s -uno", "", cwd=chrome_dir),
Cmd("git checkout -f main", "", cwd=chrome_dir),
Cmd("git branch", "", cwd=chrome_dir),
Cmd("git pull", "", cwd=chrome_dir),
Cmd("git fetch origin", ""),
Cmd("git new-branch work-branch", "", cwd=chrome_dir),
Cmd("gclient setdep -r src/v8@roll_hsh", "", cb=WriteDeps,
cwd=chrome_dir),
Cmd(("git commit -am \"%s\" "
"--author \"author@chromium.org <author@chromium.org>\"" %
self.ROLL_COMMIT_MSG),
"", cwd=chrome_dir),
Cmd("git cl upload --send-mail -f "
"--cq-dry-run --set-bot-commit --bypass-hooks", "",
cwd=chrome_dir),
Cmd("git checkout -f main", "", cwd=chrome_dir),
Cmd("git branch -D work-branch", "", cwd=chrome_dir),
]
self.Expect(expectations)
args = ["-a", "author@chromium.org", "-c", chrome_dir,
"-r", "reviewer@chromium.org", "--json-output", json_output_file]
auto_roll.AutoRoll(TEST_CONFIG, self).Run(args)
deps = FileToText(os.path.join(chrome_dir, "DEPS"))
self.assertTrue(re.search("\"v8_revision\": \"22624\"", deps))
json_output = json.loads(FileToText(json_output_file))
self.assertEquals("success", json_output["monitoring_state"])
def testCheckLastPushRecently(self):
self.Expect([
Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
Cmd("git tag", self.TAGS),
Cmd("git log -1 --format=%H 3.22.4", "release_hash\n"),
Cmd("git log -1 --format=%s release_hash",
"Version 3.22.4 (based on abc3)\n"),
Cmd("git log --format=%H abc3..abc123", "\n"),
])
self._state["candidate"] = "abc123"
self.assertEquals(0, self.RunStep(
auto_push.AutoPush, LastReleaseBailout, AUTO_PUSH_ARGS))
def testAutoPush(self):
self.Expect([
Cmd("git fetch", ""),
Cmd("git fetch origin +refs/heads/lkgr:refs/heads/lkgr", ""),
Cmd("git show-ref -s refs/heads/lkgr", "abc123\n"),
Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
Cmd("git tag", self.TAGS),
Cmd("git log -1 --format=%H 3.22.4", "release_hash\n"),
Cmd("git log -1 --format=%s release_hash",
"Version 3.22.4 (based on abc3)\n"),
Cmd("git log --format=%H abc3..abc123", "some_stuff\n"),
])
auto_push.AutoPush(TEST_CONFIG, self).Run(AUTO_PUSH_ARGS + ["--push"])
state = json.loads(FileToText("%s-state.json"
% TEST_CONFIG["PERSISTFILE_BASENAME"]))
self.assertEquals("abc123", state["candidate"])
def testRollMerge(self):
TEST_CONFIG["ALREADY_MERGING_SENTINEL_FILE"] = self.MakeEmptyTempFile()
TextToFile("", os.path.join(TEST_CONFIG["DEFAULT_CWD"], ".git"))
self.WriteFakeVersionFile(build=5)
os.environ["EDITOR"] = "vi"
extra_patch = self.MakeEmptyTempFile()
def VerifyPatch(patch):
return lambda: self.assertEquals(patch,
FileToText(TEST_CONFIG["TEMPORARY_PATCH_FILE"]))
msg = """Version 3.22.5.1 (cherry-pick)
Merged ab12345
Merged ab23456
Merged ab34567
Merged ab45678
Merged ab56789
Title4
Title2
Title3
Title1
Revert "Something"
BUG=123,234,345,456,567,v8:123
"""
def VerifyLand():
commit = FileToText(TEST_CONFIG["COMMITMSG_FILE"])
self.assertEquals(msg, commit)
version = FileToText(
os.path.join(TEST_CONFIG["DEFAULT_CWD"], VERSION_FILE))
self.assertTrue(re.search(r"#define V8_MINOR_VERSION\s+22", version))
self.assertTrue(re.search(r"#define V8_BUILD_NUMBER\s+5", version))
self.assertTrue(re.search(r"#define V8_PATCH_LEVEL\s+1", version))
self.assertTrue(
re.search(r"#define V8_IS_CANDIDATE_VERSION\s+0", version))
self.Expect([
Cmd("git status -s -uno", ""),
Cmd("git checkout -f origin/master", ""),
Cmd("git fetch", ""),
Cmd("git branch", " branch1\n* branch2\n"),
Cmd("git new-branch %s --upstream refs/remotes/origin/candidates" %
TEST_CONFIG["BRANCHNAME"], ""),
Cmd(("git log --format=%H --grep=\"Port ab12345\" "
"--reverse origin/master"),
"ab45678\nab23456"),
Cmd("git log -1 --format=%s ab45678", "Title1"),
Cmd("git log -1 --format=%s ab23456", "Title2"),
Cmd(("git log --format=%H --grep=\"Port ab23456\" "
"--reverse origin/master"),
""),
Cmd(("git log --format=%H --grep=\"Port ab34567\" "
"--reverse origin/master"),
"ab56789"),
Cmd("git log -1 --format=%s ab56789", "Title3"),
RL("Y"), # Automatically add corresponding ports (ab34567, ab56789)?
# Simulate git being down which stops the script.
Cmd("git log -1 --format=%s ab12345", None),
# Restart script in the failing step.
Cmd("git log -1 --format=%s ab12345", "Title4"),
Cmd("git log -1 --format=%s ab23456", "Title2"),
Cmd("git log -1 --format=%s ab34567", "Title3"),
Cmd("git log -1 --format=%s ab45678", "Title1"),
Cmd("git log -1 --format=%s ab56789", "Revert \"Something\""),
Cmd("git log -1 ab12345", "Title4\nBUG=123\nBUG=234"),
Cmd("git log -1 ab23456", "Title2\n BUG = v8:123,345"),
Cmd("git log -1 ab34567", "Title3\nBUG=567, 456"),
Cmd("git log -1 ab45678", "Title1\nBUG="),
Cmd("git log -1 ab56789", "Revert \"Something\"\nBUG=none"),
Cmd("git log -1 -p ab12345", "patch4"),
Cmd(("git apply --index --reject \"%s\"" %
TEST_CONFIG["TEMPORARY_PATCH_FILE"]),
"", cb=VerifyPatch("patch4")),
Cmd("git log -1 -p ab23456", "patch2"),
Cmd(("git apply --index --reject \"%s\"" %
TEST_CONFIG["TEMPORARY_PATCH_FILE"]),
"", cb=VerifyPatch("patch2")),
Cmd("git log -1 -p ab34567", "patch3"),
Cmd(("git apply --index --reject \"%s\"" %
TEST_CONFIG["TEMPORARY_PATCH_FILE"]),
"", cb=VerifyPatch("patch3")),
Cmd("git log -1 -p ab45678", "patch1"),
Cmd(("git apply --index --reject \"%s\"" %
TEST_CONFIG["TEMPORARY_PATCH_FILE"]),
"", cb=VerifyPatch("patch1")),
Cmd("git log -1 -p ab56789", "patch5\n"),
Cmd(("git apply --index --reject \"%s\"" %
TEST_CONFIG["TEMPORARY_PATCH_FILE"]),
"", cb=VerifyPatch("patch5\n")),
Cmd("git apply --index --reject \"%s\"" % extra_patch, ""),
RL("Y"), # Automatically increment patch level?
Cmd("git commit -aF \"%s\"" % TEST_CONFIG["COMMITMSG_FILE"], ""),
RL("reviewer@chromium.org"), # V8 reviewer.
Cmd("git cl upload --send-mail -r \"reviewer@chromium.org\" "
"--bypass-hooks --cc \"ulan@chromium.org\"", ""),
Cmd("git checkout -f %s" % TEST_CONFIG["BRANCHNAME"], ""),
RL("LGTM"), # Enter LGTM for V8 CL.
Cmd("git cl presubmit", "Presubmit successfull\n"),
Cmd("git cl land -f --bypass-hooks", "Closing issue\n",
cb=VerifyLand),
Cmd("git fetch", ""),
Cmd("git log -1 --format=%H --grep=\""
"Version 3.22.5.1 (cherry-pick)"
"\" refs/remotes/origin/candidates",
""),
Cmd("git fetch", ""),
Cmd("git log -1 --format=%H --grep=\""
"Version 3.22.5.1 (cherry-pick)"
"\" refs/remotes/origin/candidates",
"hsh_to_tag"),
Cmd("git tag 3.22.5.1 hsh_to_tag", ""),
Cmd("git push origin refs/tags/3.22.5.1:refs/tags/3.22.5.1", ""),
Cmd("git checkout -f origin/master", ""),
Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], ""),
])
# ab12345 and ab34567 are patches. ab23456 (included) and ab45678 are the
# MIPS ports of ab12345. ab56789 is the MIPS port of ab34567.
args = ["-f", "-p", extra_patch, "--branch", "candidates",
"ab12345", "ab23456", "ab34567"]
# The first run of the script stops because of git being down.
self.assertRaises(GitFailedException,
lambda: RollMerge(TEST_CONFIG, self).Run(args))
# Test that state recovery after restarting the script works.
args += ["-s", "4"]
RollMerge(TEST_CONFIG, self).Run(args)
def testMergeToBranch(self):
TEST_CONFIG["ALREADY_MERGING_SENTINEL_FILE"] = self.MakeEmptyTempFile()
TextToFile("", os.path.join(TEST_CONFIG["DEFAULT_CWD"], ".git"))
self.WriteFakeVersionFile(build=5)
os.environ["EDITOR"] = "vi"
extra_patch = self.MakeEmptyTempFile()
def VerifyPatch(patch):
return lambda: self.assertEquals(patch,
FileToText(TEST_CONFIG["TEMPORARY_PATCH_FILE"]))
info_msg = ("NOTE: This script will no longer automatically "
"update include/v8-version.h "
"and create a tag. This is done automatically by the autotag bot. "
"Please call the merge_to_branch.py with --help for more information.")
msg = """Merged: Squashed multiple commits.
Merged: Title4
Revision: ab12345
Merged: Title2
Revision: ab23456
Merged: Title3
Revision: ab34567
Merged: Title1
Revision: ab45678
Merged: Revert \"Something\"
Revision: ab56789
BUG=123,234,345,456,567,v8:123
NOTRY=true
NOPRESUBMIT=true
NOTREECHECKS=true
"""
def VerifyLand():
commit = FileToText(TEST_CONFIG["COMMITMSG_FILE"])
self.assertEquals(msg, commit)
self.Expect([
Cmd("git status -s -uno", ""),
Cmd("git checkout -f origin/master", ""),
Cmd("git fetch", ""),
Cmd("git branch", " branch1\n* branch2\n"),
Cmd("git new-branch %s --upstream refs/remotes/origin/candidates" %
TEST_CONFIG["BRANCHNAME"], ""),
Cmd(("git log --format=%H --grep=\"^[Pp]ort ab12345\" "
"--reverse origin/master"),
"ab45678\nab23456"),
Cmd("git log -1 --format=%s ab45678", "Title1"),
Cmd("git log -1 --format=%s ab23456", "Title2"),
Cmd(("git log --format=%H --grep=\"^[Pp]ort ab23456\" "
"--reverse origin/master"),
""),
Cmd(("git log --format=%H --grep=\"^[Pp]ort ab34567\" "
"--reverse origin/master"),
"ab56789"),
Cmd("git log -1 --format=%s ab56789", "Title3"),
RL("Y"), # Automatically add corresponding ports (ab34567, ab56789)?
# Simulate git being down which stops the script.
Cmd("git log -1 --format=%s ab12345", None),
# Restart script in the failing step.
Cmd("git log -1 --format=%s ab12345", "Title4"),
Cmd("git log -1 --format=%s ab23456", "Title2"),
Cmd("git log -1 --format=%s ab34567", "Title3"),
Cmd("git log -1 --format=%s ab45678", "Title1"),
Cmd("git log -1 --format=%s ab56789", "Revert \"Something\""),
Cmd("git log -1 ab12345", "Title4\nBUG=123\nBUG=234"),
Cmd("git log -1 ab23456", "Title2\n BUG = v8:123,345"),
Cmd("git log -1 ab34567", "Title3\nBug: 567, 456,345"),
Cmd("git log -1 ab45678", "Title1\nBug:"),
Cmd("git log -1 ab56789", "Revert \"Something\"\nBUG=none"),
Cmd("git log -1 -p ab12345", "patch4"),
Cmd(("git apply --index --reject \"%s\"" %
TEST_CONFIG["TEMPORARY_PATCH_FILE"]),
"", cb=VerifyPatch("patch4")),
Cmd("git log -1 -p ab23456", "patch2"),
Cmd(("git apply --index --reject \"%s\"" %
TEST_CONFIG["TEMPORARY_PATCH_FILE"]),
"", cb=VerifyPatch("patch2")),
Cmd("git log -1 -p ab34567", "patch3"),
Cmd(("git apply --index --reject \"%s\"" %
TEST_CONFIG["TEMPORARY_PATCH_FILE"]),
"", cb=VerifyPatch("patch3")),
Cmd("git log -1 -p ab45678", "patch1"),
Cmd(("git apply --index --reject \"%s\"" %
TEST_CONFIG["TEMPORARY_PATCH_FILE"]),
"", cb=VerifyPatch("patch1")),
Cmd("git log -1 -p ab56789", "patch5\n"),
Cmd(("git apply --index --reject \"%s\"" %
TEST_CONFIG["TEMPORARY_PATCH_FILE"]),
"", cb=VerifyPatch("patch5\n")),
Cmd("git apply --index --reject \"%s\"" % extra_patch, ""),
Cmd("git commit -aF \"%s\"" % TEST_CONFIG["COMMITMSG_FILE"], ""),
RL("reviewer@chromium.org"), # V8 reviewer.
Cmd("git cl upload --send-mail -r \"reviewer@chromium.org\" "
"--bypass-hooks --cc \"ulan@chromium.org\"", ""),
Cmd("git checkout -f %s" % TEST_CONFIG["BRANCHNAME"], ""),
RL("LGTM"), # Enter LGTM for V8 CL.
Cmd("git cl presubmit", "Presubmit successfull\n"),
Cmd("git cl land -f --bypass-hooks", "Closing issue\n",
cb=VerifyLand),
Cmd("git checkout -f origin/master", ""),
Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], ""),
])
# ab12345 and ab34567 are patches. ab23456 (included) and ab45678 are the
# MIPS ports of ab12345. ab56789 is the MIPS port of ab34567.
args = ["-f", "-p", extra_patch, "--branch", "candidates",
"ab12345", "ab23456", "ab34567"]
# The first run of the script stops because of git being down.
self.assertRaises(GitFailedException,
lambda: MergeToBranch(TEST_CONFIG, self).Run(args))
# Test that state recovery after restarting the script works.
args += ["-s", "4"]
MergeToBranch(TEST_CONFIG, self).Run(args)
if __name__ == '__main__':
unittest.main()
| [
"bryan.guner@gmail.com"
] | bryan.guner@gmail.com |
2432a572bd371ad31357a644beb118c7a6652907 | 9c3584757cda097128e6916a5490056263d038a1 | /cv/urls.py | b4a4aa8a59791c0d56489205762ada1f5cf5f7b1 | [] | no_license | jackcorsi/bridging-coursework | 0921a5e2b7dbba304895022e95707475d176fb36 | a1e5e2e42b3cdcc241356fefc2a53d47320c7e04 | refs/heads/master | 2022-12-12T11:35:28.713567 | 2020-08-31T22:06:28 | 2020-08-31T22:06:28 | 290,881,071 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 102 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.cv, name='cv'),
] | [
"jackcorsiwarren@gmail.com"
] | jackcorsiwarren@gmail.com |
a161fd86ce0916197d8943f40b551acd0ba600bc | 50f0d33b12778f911fe16a4e18d0659936b9086b | /0x05-python-exceptions/4-list_division.py | e67e5211367f6871f31a26fa72ddb8ede0d0caa0 | [] | no_license | monicajoa/holbertonschool-higher_level_programming | 4f4eaa7aa2cad1642e7aed54663cb30eb92e1b4f | 451d20174144ad96fa726a4389c7aae72abf2495 | refs/heads/master | 2022-12-18T00:35:00.682624 | 2020-09-25T05:14:57 | 2020-09-25T05:14:57 | 259,479,680 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 533 | py | #!/usr/bin/python3
def list_division(my_list_1, my_list_2, list_length):
list_new = []
result = 0
for i in range(list_length):
try:
result = my_list_1[i] / my_list_2[i]
except ZeroDivisionError:
result = 0
print("division by 0")
except IndexError:
result = 0
print("out of range")
except TypeError:
result = 0
print("wrong type")
finally:
list_new.append(result)
return (list_new)
| [
"mnortiz.ortiz@gmail.com"
] | mnortiz.ortiz@gmail.com |
466c8bce8b26bf008d0bbd460978c463b959cc2e | d7dad757bd7e0d8447fe2e9d5b7efc1ace39385a | /Rod/handmodelling_ciss17/skeleton/SkeletonPolar.py | e4163320b29411b156293bbdfa203d4183546b31 | [] | no_license | eth-ait/ComputationalInteraction17 | cc28e523fd672f2851451ac3d80d5f44b28d6252 | eb34454007b8eefc1992cb1a2d94d6623e5863eb | refs/heads/master | 2021-12-14T18:34:52.388135 | 2017-06-16T14:45:56 | 2017-06-16T14:45:56 | 91,554,497 | 25 | 18 | null | null | null | null | UTF-8 | Python | false | false | 2,721 | py | import numpy as np
import Skeleton
class SkeletonPolar(Skeleton.Skeleton):
class SkeletonNode:
def __init__(self, label, v1, v2, v3, isOrigin):
self.v1 = v1
self.v2 = v2
self.v3 = v3
self.label = label
self.isOrigin = isOrigin
self.links = []
def add_link(self, node):
self.links.append(node)
def set_coordinates(self, x, y, z):
self.x = x
self.y = y
self.z = z
def __init__(self, columns, features):
Skeleton.Skeleton.__init__(self, columns, features)
self.links = None
def traverse_set_coordinates(self, root, data_row):
for node in root.links:
node.x = root.x + data_row[node.v3] * np.cos(data_row[node.v1]) * np.sin(data_row[node.v2])
node.y = root.y + data_row[node.v3] * np.sin(data_row[node.v1]) * np.sin(data_row[node.v2])
node.z = root.z + data_row[node.v3] * np.cos(data_row[node.v2])
self.traverse_set_coordinates(node, data_row)
def get_xyz_coordinates(self, data):
coords = []
rows, cols = data.shape
root = self.node_map[self.origin_name]
for row in xrange(rows):
root.x = data.values[row, root.v1]
root.y = data.values[row, root.v2]
root.z = data.values[row, root.v3]
self.traverse_set_coordinates(root, data.values[row, :])
coords_row = []
for label in self.labels:
coords_row.append(data.values[row, self.label_map[label.lower()]])
coords.append(coords_row)
return np.array(coords)
def plot(self, ax, data, showJoints=False, c='k', cmap=None, lw=1, alpha=0.25):
rows, cols = data.shape
root = self.node_map[self.origin_name]
cs = None
if cmap != None:
cs = [cmap(v)[0:3] for v in np.linspace(0, 1, rows)]
for row in xrange(rows):
if cs != None:
c = cs[row]
root.x = data.values[row, root.v1]
root.y = data.values[row, root.v2]
root.z = data.values[row, root.v3]
self.traverse_set_coordinates(root, data.values[row, :])
#ax.scatter([root.x], [root.y], [root.z], c='r', s=128)
for node in self.node_map.itervalues():
for node2 in node.links:
ax.plot([node.x, node2.x], [node.y, node2.y], [node.z, node2.z], c=c, lw=lw, alpha=alpha)
if showJoints:
ax.scatter([node.x], [node.y], [node.z], c=c, lw=lw, alpha=alpha) | [
"seon.wook@swook.net"
] | seon.wook@swook.net |
0ad9c543040c66b73a4c0063a4834e93bf347cb7 | 19bcb4784f2ddda66d5ccf9eb268c45baf1f122c | /python/nn/results/get_results_aggr.py | 21dc15f59a6a51107391466207eeb449a8b19102 | [
"MIT"
] | permissive | PeterJackNaylor/AutomaticWSI | bb76f9983479b1a1a6d7ad089eb9bb098da91136 | a26f3d8efff005dcf2d1a14705785579ce5484c8 | refs/heads/master | 2023-09-04T09:12:48.946814 | 2023-08-30T09:24:17 | 2023-08-30T09:24:17 | 226,664,370 | 1 | 1 | MIT | 2020-03-19T10:49:47 | 2019-12-08T12:30:52 | Python | UTF-8 | Python | false | false | 1,695 | py |
import os
from glob import glob
import pandas as pd
def get_options():
import argparse
parser = argparse.ArgumentParser(
description='takes a folder with ')
parser.add_argument('--path', required=True,
metavar="str", type=str,
help='folder where the result files can be found')
parser.add_argument('--name', required=True,
metavar="str", type=str,
help='name of the output')
args = parser.parse_args()
return args
def fres(st):
return st.split('at_res_')[1].split('___be')[0]
def fmodel(st):
return st.split('_for_')[0]
def fy(st):
return st.split('_for_')[1].split('_at_')[0]
def ftype(st):
return st.split('___best')[1]
def main():
options = get_options()
files = glob(os.path.join(options.path, '*best.csv'))
stats = pd.DataFrame()
for f in files:
table = pd.read_csv(f)
table = table.drop('Unnamed: 0', axis=1)
table['counts'] = table.shape[0]
table['mean'] = table.shape[0]
col = os.path.basename(f).split('.')[0]
stats[col + "mean"] = table.mean()
stats[col + "Std.Dev"] = table.std()
# stats[col + "Var"] = table.var()
stats = stats.T
stats['res'] = stats.apply(lambda x: fres(x.name), axis=1)
stats['model'] = stats.apply(lambda x: fmodel(x.name), axis=1)
stats['y'] = stats.apply(lambda x: fy(x.name), axis=1)
stats['type'] = stats.apply(lambda x: ftype(x.name), axis=1)
import pdb; pdb.set_trace()
stats = stats.set_index(['y', 'model', 'res', 'type'])
stats.to_csv(options.name)
if __name__ == '__main__':
main()
| [
"peter.naylor@mines-paristech.fr"
] | peter.naylor@mines-paristech.fr |
a95b8d425b5111a57fc1dd383f4c49c52955e818 | 91b32d0bbc563055751185221ef5f62a894944fa | /main.py | 129c12381a9ffe445aa865f192c3f7644fb201c9 | [] | no_license | frugs/allin-bnetprofile | 122a5bf87007d3b693a19c855a71f8aaedd8569e | 4563a582deb0f00df91ed96b15674653fe23ceb5 | refs/heads/master | 2020-03-29T23:22:38.065581 | 2018-09-26T18:08:48 | 2018-09-26T18:08:48 | 150,469,593 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 198 | py | from werkzeug.serving import run_simple
from bnetprofile import app
def main():
run_simple("localhost", 5001, app, threaded=True, ssl_context='adhoc')
if __name__ == "__main__":
main()
| [
"hugowainwright@fb.com"
] | hugowainwright@fb.com |
544802223d9e7154436d2675abc13fe6d6e844ec | fbeecc935e05bcd47fc23f96a3bc8fc94f676f6c | /webGatherer/yahoo/wc.py | ffdc0f2eaea589aefcaa7fe127d14ea6a3d55899 | [] | no_license | eox03y/works | 097b98241dc07843870cbab9b7fc3d640e6a6627 | 8bedbb7eedb57f91cde068454116d3aae1b71f8e | refs/heads/master | 2021-01-24T05:16:27.770666 | 2013-08-24T06:29:28 | 2013-08-24T06:29:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 162 | py | if __name__ == "__main__":
import sys
if len(sys.argv) < 2:
sys.exit()
f = open(sys.argv[1])
cnt = 0
for line in f:
cnt += 1
print "%d lines" % (cnt)
| [
"handol@gmail.com"
] | handol@gmail.com |
ec8c9468d457da5877f08cb25edc9fc0c224c492 | b8ceebeebfeed12b653e1b4d526e85973faecbb5 | /scheduler/factories.py | 893e9c1891f514133481027f2526eea7065cba97 | [
"MIT"
] | permissive | a3r0d7n4m1k/YACS | 2678589c4a07eb9f17d80e60d543226ab570428d | 6bdb2299905f6321be5de788f16a9464a70a4206 | refs/heads/master | 2020-06-23T12:40:39.823658 | 2015-11-09T01:26:17 | 2015-11-09T01:26:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 196 | py | import factory
from scheduler import models
class SavedSelectionFactory(factory.Factory):
FACTORY_FOR = models.SavedSelection
internal_section_ids = []
internal_blocked_times = []
| [
"jeff@jeffhui.net"
] | jeff@jeffhui.net |
b0db839d40c84990671e88503e7afe1b8d93b056 | d3022ceb672f2ef0d2985fbb31f2f086aef4d40f | /Principal.py | 2589448f2f1fcfd2236624337292088eba6bc226 | [] | no_license | BorjaMSLS/PFCRepo | 434b53a59239651113898e37022216e02ce4ae01 | 6d9b68847100dfe35d7f967580e36cacab43a953 | refs/heads/master | 2020-06-03T13:48:00.748486 | 2017-06-12T20:04:34 | 2017-06-12T20:04:34 | 94,132,813 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | import sys, os
def main(argv=None):
fname = "Orden.txt"
try:
if os.path.isfile(os.path.join(os.getcwd(),fname)):
print "fichero existe. Vamos a parsear"
else:
print "fichero noexiste"
sys.exit()
except Exception, e:
print "An exception has occured", e
if __name__ == '__main__':
sys.exit(main())
| [
"borjams85@gmail.com"
] | borjams85@gmail.com |
32b8393a60a17cb8d5d3a614d581aae9fcb466f1 | a7f16c95f973905e880ad4dc277fbba890486654 | /wildlifecompliance/migrations/0283_auto_20190814_1036.py | dcfe076119285e3560ffaf749bd7547dd1ce0fd5 | [
"Apache-2.0"
] | permissive | dbca-wa/wildlifecompliance | 9e98e9c093aeb25dbb7ff8d107be47e29bcd05e1 | cb12ad9ea1171f10b5297cdb7e1eb6ea484e633d | refs/heads/master | 2023-08-08T14:37:05.824428 | 2023-07-31T02:57:23 | 2023-07-31T02:57:23 | 232,276,030 | 1 | 17 | NOASSERTION | 2023-07-31T02:57:24 | 2020-01-07T08:12:53 | Python | UTF-8 | Python | false | false | 772 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2019-08-14 02:36
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wildlifecompliance', '0282_auto_20190813_1820'),
]
operations = [
migrations.RemoveField(
model_name='inspectiontypeapprovaldocument',
name='log_entry',
),
migrations.AddField(
model_name='inspectiontype',
name='approval_document',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='inspection_type', to='wildlifecompliance.InspectionTypeApprovalDocument'),
),
]
| [
"brendan.blackford@dbca.wa.gov.au"
] | brendan.blackford@dbca.wa.gov.au |
16dac5684e2692e62c1899cf8f289c0eefbf87d6 | a707cedea077872dbf5af48c1a5b1886e667732f | /src/wn-geoip/src/urllib3/contrib/socks.py | 8219ff104c80d8e5bb7e7c6e51a5dc6a10cf691f | [
"CC-BY-4.0"
] | permissive | osg-cat/virtual-school-2021 | 373850043c10f66f002df1e241110f3a5b2c91e2 | 403450fd6668aae609e4b85a4814a4f7f94677b3 | refs/heads/main | 2023-07-11T13:52:53.609399 | 2021-08-13T17:11:54 | 2021-08-13T17:11:54 | 357,324,855 | 0 | 0 | CC-BY-4.0 | 2021-04-12T20:06:17 | 2021-04-12T20:06:16 | null | UTF-8 | Python | false | false | 7,028 | py | # -*- coding: utf-8 -*-
"""
This module contains provisional support for SOCKS proxies from within
urllib3. This module supports SOCKS4, SOCKS4A (an extension of SOCKS4), and
SOCKS5. To enable its functionality, either install PySocks or install this
module with the ``socks`` extra.
The SOCKS implementation supports the full range of urllib3 features. It also
supports the following SOCKS features:
- SOCKS4A (``proxy_url='socks4a://...``)
- SOCKS4 (``proxy_url='socks4://...``)
- SOCKS5 with remote DNS (``proxy_url='socks5h://...``)
- SOCKS5 with local DNS (``proxy_url='socks5://...``)
- Usernames and passwords for the SOCKS proxy
.. note::
It is recommended to use ``socks5h://`` or ``socks4a://`` schemes in
your ``proxy_url`` to ensure that DNS resolution is done from the remote
server instead of client-side when connecting to a domain name.
SOCKS4 supports IPv4 and domain names with the SOCKS4A extension. SOCKS5
supports IPv4, IPv6, and domain names.
When connecting to a SOCKS4 proxy the ``username`` portion of the ``proxy_url``
will be sent as the ``userid`` section of the SOCKS request::
proxy_url="socks4a://<userid>@proxy-host"
When connecting to a SOCKS5 proxy the ``username`` and ``password`` portion
of the ``proxy_url`` will be sent as the username/password to authenticate
with the proxy::
proxy_url="socks5h://<username>:<password>@proxy-host"
"""
from __future__ import absolute_import
try:
pass
except ImportError:
import warnings
from ..exceptions import DependencyWarning
warnings.warn(
(
"SOCKS support in urllib3 requires the installation of optional "
"dependencies: specifically, PySocks. For more information, see "
"https://urllib3.readthedocs.io/en/latest/contrib.html#socks-proxies"
),
DependencyWarning,
)
raise
from socket import error as SocketError, timeout as SocketTimeout
from ..connection import HTTPConnection, HTTPSConnection
from ..connectionpool import HTTPConnectionPool, HTTPSConnectionPool
from ..exceptions import ConnectTimeoutError, NewConnectionError
from ..poolmanager import PoolManager
from ..util.url import parse_url
try:
import ssl
except ImportError:
ssl = None
class SOCKSConnection(HTTPConnection):
"""
A plain-text HTTP connection that connects via a SOCKS proxy.
"""
def __init__(self, *args, **kwargs):
self._socks_options = kwargs.pop("_socks_options")
super(SOCKSConnection, self).__init__(*args, **kwargs)
def _new_conn(self):
"""
Establish a new connection via the SOCKS proxy.
"""
extra_kw = {}
if self.source_address:
extra_kw["source_address"] = self.source_address
if self.socket_options:
extra_kw["socket_options"] = self.socket_options
try:
conn = socks.create_connection(
(self.host, self.port),
proxy_type=self._socks_options["socks_version"],
proxy_addr=self._socks_options["proxy_host"],
proxy_port=self._socks_options["proxy_port"],
proxy_username=self._socks_options["username"],
proxy_password=self._socks_options["password"],
proxy_rdns=self._socks_options["rdns"],
timeout=self.timeout,
**extra_kw
)
except SocketTimeout:
raise ConnectTimeoutError(
self,
"Connection to %s timed out. (connect timeout=%s)"
% (self.host, self.timeout),
)
except socks.ProxyError as e:
# This is fragile as hell, but it seems to be the only way to raise
# useful errors here.
if e.socket_err:
error = e.socket_err
if isinstance(error, SocketTimeout):
raise ConnectTimeoutError(
self,
"Connection to %s timed out. (connect timeout=%s)"
% (self.host, self.timeout),
)
else:
raise NewConnectionError(
self, "Failed to establish a new connection: %s" % error
)
else:
raise NewConnectionError(
self, "Failed to establish a new connection: %s" % e
)
except SocketError as e: # Defensive: PySocks should catch all these.
raise NewConnectionError(
self, "Failed to establish a new connection: %s" % e
)
return conn
# We don't need to duplicate the Verified/Unverified distinction from
# urllib3/connection.py here because the HTTPSConnection will already have been
# correctly set to either the Verified or Unverified form by that module. This
# means the SOCKSHTTPSConnection will automatically be the correct type.
class SOCKSHTTPSConnection(SOCKSConnection, HTTPSConnection):
pass
class SOCKSHTTPConnectionPool(HTTPConnectionPool):
ConnectionCls = SOCKSConnection
class SOCKSHTTPSConnectionPool(HTTPSConnectionPool):
ConnectionCls = SOCKSHTTPSConnection
class SOCKSProxyManager(PoolManager):
"""
A version of the urllib3 ProxyManager that routes connections via the
defined SOCKS proxy.
"""
pool_classes_by_scheme = {
"http": SOCKSHTTPConnectionPool,
"https": SOCKSHTTPSConnectionPool,
}
def __init__(
self,
proxy_url,
username=None,
password=None,
num_pools=10,
headers=None,
**connection_pool_kw
):
parsed = parse_url(proxy_url)
if username is None and password is None and parsed.auth is not None:
split = parsed.auth.split(":")
if len(split) == 2:
username, password = split
if parsed.scheme == "socks5":
socks_version = socks.PROXY_TYPE_SOCKS5
rdns = False
elif parsed.scheme == "socks5h":
socks_version = socks.PROXY_TYPE_SOCKS5
rdns = True
elif parsed.scheme == "socks4":
socks_version = socks.PROXY_TYPE_SOCKS4
rdns = False
elif parsed.scheme == "socks4a":
socks_version = socks.PROXY_TYPE_SOCKS4
rdns = True
else:
raise ValueError("Unable to determine SOCKS version from %s" % proxy_url)
self.proxy_url = proxy_url
socks_options = {
"socks_version": socks_version,
"proxy_host": parsed.host,
"proxy_port": parsed.port,
"username": username,
"password": password,
"rdns": rdns,
}
connection_pool_kw["_socks_options"] = socks_options
super(SOCKSProxyManager, self).__init__(
num_pools, headers, **connection_pool_kw
)
self.pool_classes_by_scheme = SOCKSProxyManager.pool_classes_by_scheme
| [
"clock@wisc.edu"
] | clock@wisc.edu |
d888ad9e63f68a9365129a57f809de089621a5e0 | 63aec28ae387493c049978a429b837f3d835e711 | /learn_python_asyncio/coroutines/example09.py | 24cee8a3263927285bd4888e296462d02aa15559 | [
"MIT"
] | permissive | erictapia/devstacklab | 83c538813c3d0c18509eba98601536b3d8c465ea | 2997a620c3f4d29c3a526d561ec0cfb4ba0cd6b4 | refs/heads/master | 2023-04-14T22:13:15.722186 | 2021-04-26T15:28:45 | 2021-04-26T15:28:45 | 335,650,864 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,202 | py | import asyncio
import time
from typing import Callable, Coroutine
import httpx
addr = "https://langa.pl/crawl"
todo = set()
# Removed recursive call and instead creates a task
async def crawl2(prefix: str, url: str = "") -> None:
url = url or prefix
client = httpx.AsyncClient()
try:
response = await client.get(url)
finally:
await client.aclose()
for line in response.text.splitlines():
if line.startswith(prefix):
todo.add(line)
asyncio.create_task(crawl2(prefix, line), name=line)
todo.discard(url)
async def progress(url: str, algo: Callable[..., Coroutine]) -> None:
# report in an elegant way
# tells asyncio to run task in the background
# it runs only when there is an await
asyncio.create_task(algo(url), name=url)
todo.add(url)
start = time.time()
while len(todo):
print(
f"{len(todo)}: "
+ ", ".join(sorted(todo))[-38:]
)
await asyncio.sleep(0.5)
end = time.time()
print(
f"Took {int(end-start)}"
+ " seconds"
)
if __name__ == "__main__":
asyncio.run(progress(addr, crawl2)) | [
"erictapia1@gmail.com"
] | erictapia1@gmail.com |
9ae75ffc5555390fa040f606e7a4f857ff2fd1fd | bff7a1feb29c339ae93ae56865928f8b53e16c41 | /spotify-playlist-generator.py | fca5248f25c07db041605fab030c8c25845fe0e5 | [] | no_license | imanishbarnwal/mlh-local-hack-day | c8c036582b9e09b0e69c07302b38594161cf2301 | 606a9b578ed0d768c78c0220b1c05e2bf2e18d0b | refs/heads/main | 2023-02-19T16:51:49.824763 | 2021-01-18T07:01:37 | 2021-01-18T07:01:37 | 328,614,282 | 2 | 2 | null | 2021-01-13T05:25:13 | 2021-01-11T09:37:21 | Jupyter Notebook | UTF-8 | Python | false | false | 4,237 | py | import base64
import datetime
from urllib.parse import urlencode
import requests
class SpotifyAPI(object):
access_token = None
access_token_expires = datetime.datetime.now()
access_token_did_expire = True
client_id = None
client_secret = None
token_url = "https://accounts.spotify.com/api/token"
def __init__(self, client_id, client_secret, *args, **kwargs):
super().__init__(*args, **kwargs)
self.client_id = client_id
self.client_secret = client_secret
def get_client_credentials(self):
"""
Returns a base64 encoded string
"""
client_id = self.client_id
client_secret = self.client_secret
if client_secret == None or client_id == None:
raise Exception("You must set client_id and client_secret")
client_creds = f"{client_id}:{client_secret}"
client_creds_b64 = base64.b64encode(client_creds.encode())
return client_creds_b64.decode()
def get_token_headers(self):
client_creds_b64 = self.get_client_credentials()
return {
"Authorization": f"Basic {client_creds_b64}"
}
def get_token_data(self):
return {
"grant_type": "client_credentials"
}
def perform_auth(self):
token_url = self.token_url
token_data = self.get_token_data()
token_headers = self.get_token_headers()
r = requests.post(token_url, data=token_data, headers=token_headers)
if r.status_code not in range(200, 299):
raise Exception("Could not authenticate client.")
# return False
data = r.json()
now = datetime.datetime.now()
access_token = data['access_token']
expires_in = data['expires_in'] # seconds
expires = now + datetime.timedelta(seconds=expires_in)
self.access_token = access_token
self.access_token_expires = expires
self.access_token_did_expire = expires < now
return True
def get_access_token(self):
token = self.access_token
expires = self.access_token_expires
now = datetime.datetime.now()
if expires < now:
self.perform_auth()
return self.get_access_token()
elif token == None:
self.perform_auth()
return self.get_access_token()
return token
def get_resource_header(self):
access_token = self.get_access_token()
headers = {
"Authorization": f"Bearer {access_token}"
}
return headers
def get_resource(self, lookup_id, resource_type='albums', version='v1'):
endpoint = f"https://api.spotify.com/{version}/{resource_type}/{lookup_id}"
headers = self.get_resource_header()
r = requests.get(endpoint, headers=headers)
if r.status_code not in range(200, 299):
return {}
return r.json()
def get_album(self, _id):
return self.get_resource(_id, resource_type='albums')
def get_artist(self, _id):
return self.get_resource(_id, resource_type='artists')
def base_search(self, query_params): # type
headers = self.get_resource_header()
endpoint = "https://api.spotify.com/v1/search"
lookup_url = f"{endpoint}?{query_params}"
r = requests.get(lookup_url, headers=headers)
if r.status_code not in range(200, 299):
return {}
return r.json()
def search(self, query=None, operator=None, operator_query=None, search_type='artist' ):
if query == None:
raise Exception("A query is required")
if isinstance(query, dict):
query = " ".join([f"{k}:{v}" for k,v in query.items()])
if operator != None and operator_query != None:
if operator.lower() == "or" or operator.lower() == "not":
operator = operator.upper()
if isinstance(operator_query, str):
query = f"{query} {operator} {operator_query}"
query_params = urlencode({"q": query, "type": search_type.lower()})
print(query_params)
return self.base_search(query_params)
| [
"noreply@github.com"
] | noreply@github.com |
7415de76e89f8f4dfe95d6ebb44104381c925582 | 6fa701cdaa0d83caa0d3cbffe39b40e54bf3d386 | /google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/enums/types/operating_system_version_operator_type.py | 9b61e85a5eec3edf7a897381b9d6f3c4f647b52d | [
"Apache-2.0"
] | permissive | oltoco/googleapis-gen | bf40cfad61b4217aca07068bd4922a86e3bbd2d5 | 00ca50bdde80906d6f62314ef4f7630b8cdb6e15 | refs/heads/master | 2023-07-17T22:11:47.848185 | 2021-08-29T20:39:47 | 2021-08-29T20:39:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,213 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v8.enums',
marshal='google.ads.googleads.v8',
manifest={
'OperatingSystemVersionOperatorTypeEnum',
},
)
class OperatingSystemVersionOperatorTypeEnum(proto.Message):
r"""Container for enum describing the type of OS operators. """
class OperatingSystemVersionOperatorType(proto.Enum):
r"""The type of operating system version."""
UNSPECIFIED = 0
UNKNOWN = 1
EQUALS_TO = 2
GREATER_THAN_EQUALS_TO = 4
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
b771ea29dec1c4fcd94859238ac6f6fb1411b4ff | b6b866f60297eef3d9fc67dbb547502d5603e896 | /lab/StructuredCode/ids/IO.py | 8bc7d4bb1df0e541c72305d1c4eef363f1efd366 | [] | no_license | RKDSOne/ids | a4ffa823fb7ffa795127cfff4402cbd6a3c4b32f | f509d75c3ced1b3d2bfe306358787f9e748c3e91 | refs/heads/master | 2021-06-01T02:21:18.024383 | 2016-06-29T02:24:30 | 2016-06-29T02:24:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,636 | py | import json
import numpy as np
import pandas as pd
import os
class DataReader:
"""A cache for reading data"""
def __init__(s, confpath="conf.json"):
s.conf = json.load(open(confpath))
def read(s, dataset, sep_label=True):
def filepath(dname):
if dname.find('-') != -1:
return '/{0}/{1}.data'.format(dname[:dname.find('-')], dname)
return '/{0}/{0}.data'.format(dname)
fpath = s.conf["dpath"] + filepath(dataset)
has_test = False
tst_fpath = fpath[:-5] + '-test' + fpath[-5:]
if os.path.exists(tst_fpath):
has_test = True
if sep_label:
df = pd.read_csv(fpath)
X = np.array(df.ix[:, :-1]).astype('float64')
y = np.array(map(lambda o: float(o[1:]), df.ix[:, -1])).astype('float64')
if has_test:
df = pd.read_csv(tst_fpath)
tstX = np.array(df.ix[:, :-1]).astype('float64')
tsty = np.array(map(lambda o: float(o[1:]), df.ix[:, -1])).astype('float64')
return has_test, X, y, tstX, tsty
return has_test, X, y
else:
df = pd.read_csv(fpath)
dat = np.array(df)
dat[:, -1] = np.array(map(lambda o: float(o[1:]), dat[:, -1]))
if has_test:
df = pd.read_csv(tst_fpath)
tst_dat = np.array(df)
tst_dat[:, -1] = np.array(map(lambda o: float(o[1:]), tst_dat[:, -1]))
return has_test, dat.astype('float64'), tst_dat.astype('float64')
return has_test, dat.astype('float64')
| [
"tianfu.d.he@gmail.com"
] | tianfu.d.he@gmail.com |
7ac63f93b0f13c2433e4559bcd2a6ac18cc46ebd | f22b9fbf2daf735390d46e60246335ded34306ee | /Getting started With Python 3._/poss2.py | fb4245063aff4e2745e418c031614fafc75ae835 | [] | no_license | gfobiyatechnical/2k19-With-Python- | 38497aa7bf111baec696817274e60c670df21868 | c2310d0b918525d48b84e77ec5b31d16fa95e213 | refs/heads/master | 2020-04-15T03:10:51.102486 | 2019-04-29T07:48:56 | 2019-04-29T07:48:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 59 | py | name = input("enter your name")
print(name + str(sharma))
| [
"noreply@github.com"
] | noreply@github.com |
c3535fbb041dc439a9b9f5b1c995eecdee0e1942 | bc82de9237a6aa28fd7623a27b35c02ae8416702 | /allennlp/semparse/type_declarations/type_declaration.py | 47683656260ae90fc79dfd7e462348f8ee762724 | [
"Apache-2.0"
] | permissive | Snnzhao/GrailQA | 78190a8a5bae934c07f4035786f658ef4764c510 | e89e66380402e51bac56f59c7d24d4400bcd11b6 | refs/heads/main | 2023-04-26T19:49:21.683922 | 2021-04-11T09:40:34 | 2021-04-11T09:40:34 | 370,937,323 | 1 | 0 | Apache-2.0 | 2021-05-26T07:00:21 | 2021-05-26T07:00:20 | null | UTF-8 | Python | false | false | 36,337 | py | """
This module defines some classes that are generally useful for defining a type system for a new
domain. We inherit the type logic in ``nltk.sem.logic`` and add some functionality on top of it
here. There are two main improvements:
1) Firstly, we allow defining multiple basic types with their own names (see ``NamedBasicType``).
2) Secondly, we allow defining function types that have placeholders in them (see
``PlaceholderType``).
We also extend NLTK's ``LogicParser`` to define a ``DynamicTypeLogicParser`` that knows how to deal
with the two improvements above.
"""
from typing import Dict, List, Optional, Set, Tuple, Union
from collections import defaultdict
import itertools
from overrides import overrides
from nltk.sem.logic import Expression, ApplicationExpression, ConstantExpression, LogicParser, Variable
from nltk.sem.logic import Type, BasicType, ComplexType as NltkComplexType, ANY_TYPE
from allennlp.common.util import START_SYMBOL
class ComplexType(NltkComplexType):
"""
In NLTK, a ``ComplexType`` is a function. These functions are curried, so if you need multiple
arguments for your function you nest ``ComplexTypes``. That currying makes things difficult
for us, and we mitigate the problems by adding ``return_type`` and ``argument_type`` functions
to ``ComplexType``.
"""
def return_type(self) -> Type:
"""
Gives the final return type for this function. If the function takes a single argument,
this is just ``self.second``. If the function takes multiple arguments and returns a basic
type, this should be the final ``.second`` after following all complex types. That is the
implementation here in the base class. If you have a higher-order function that returns a
function itself, you need to override this method.
"""
return_type = self.second
while isinstance(return_type, ComplexType):
return_type = return_type.second
return return_type
def argument_types(self) -> List[Type]:
"""
Gives the types of all arguments to this function. For functions returning a basic type,
we grab all ``.first`` types until ``.second`` is no longer a ``ComplexType``. That logic
is implemented here in the base class. If you have a higher-order function that returns a
function itself, you need to override this method.
"""
arguments = [self.first]
remaining_type = self.second
while isinstance(remaining_type, ComplexType):
arguments.append(remaining_type.first)
remaining_type = remaining_type.second
return arguments
def substitute_any_type(self, basic_types: Set[BasicType]) -> List[Type]:
"""
Takes a set of ``BasicTypes`` and replaces any instances of ``ANY_TYPE`` inside this
complex type with each of those basic types.
"""
substitutions = []
for first_type in substitute_any_type(self.first, basic_types):
for second_type in substitute_any_type(self.second, basic_types):
substitutions.append(self.__class__(first_type, second_type))
return substitutions
class HigherOrderType(ComplexType):
"""
A higher-order function is a ``ComplexType`` that returns functions. We just override
``return_type`` and ``argument_types`` to make sure that these types are correct.
Parameters
----------
num_arguments : ``int``
How many arguments this function takes before returning a function. We'll go through this
many levels of nested ``ComplexTypes`` before returning the final ``.second`` as our return
type.
first : ``Type``
Passed to NLTK's ComplexType.
second : ``Type``
Passed to NLTK's ComplexType.
"""
def __init__(self, num_arguments: int, first: Type, second: Type) -> None:
super().__init__(first, second)
self.num_arguments = num_arguments
@overrides
def return_type(self) -> Type:
return_type = self.second
for _ in range(self.num_arguments - 1):
return_type = return_type.second
return return_type
@overrides
def argument_types(self) -> List[Type]:
arguments = [self.first]
remaining_type = self.second
for _ in range(self.num_arguments - 1):
arguments.append(remaining_type.first)
remaining_type = remaining_type.second
return arguments
class NamedBasicType(BasicType):
"""
A ``BasicType`` that also takes the name of the type as an argument to its constructor. Type
resolution uses the output of ``__str__`` as well, so basic types with different
representations do not resolve against each other.
Parameters
----------
string_rep : ``str``
String representation of the type.
"""
def __init__(self, string_rep) -> None:
self._string_rep = string_rep
def __str__(self):
# TODO (pradeep): This limits the number of basic types we can have to 26. We may want to
# change this in the future if we extend to domains where we have more than 26 basic types.
if self._string_rep == START_SYMBOL:
return START_SYMBOL
else:
return self._string_rep.lower()[0]
def str(self):
return self._string_rep
class MultiMatchNamedBasicType(NamedBasicType):
"""
A ``NamedBasicType`` that matches with any type within a list of ``BasicTypes`` that it takes
as an additional argument during instantiation. We just override the ``matches`` method in
``BasicType`` to match against any of the types given by the list.
Parameters
----------
string_rep : ``str``
String representation of the type, passed to super class.
types_to_match : ``List[BasicType]``
List of types that this type should match with.
"""
def __init__(self,
string_rep,
types_to_match: List[BasicType]) -> None:
super().__init__(string_rep)
self.types_to_match = set(types_to_match)
@overrides
def matches(self, other):
return super().matches(other) or other in self.types_to_match
class PlaceholderType(ComplexType):
"""
``PlaceholderType`` is a ``ComplexType`` that involves placeholders, and thus its type
resolution is context sensitive. This is an abstract class for all placeholder types like
reverse, and, or, argmax, etc.
Note that ANY_TYPE in NLTK's type system doesn't work like a wild card. Once the type of a
variable gets resolved to a specific type, NLTK changes the type of that variable to that
specific type. Hence, what NLTK calls "ANY_TYPE", is essentially a "yet-to-be-decided" type.
This is a problem because we may want the same variable to bind to different types within a
logical form, and using ANY_TYPE for this purpose will cause a resolution failure. For example
the count function may apply to both rows and cells in the same logical form, and making count
of type ``ComplexType(ANY_TYPE, DATE_NUM_TYPE)`` will cause a resolution error. This class lets
you define ``ComplexType`` s with placeholders that are actually wild cards.
The subclasses of this abstract class need to do three things
1) Override the property ``_signature`` to define the type signature (this is just the
signature's string representation and will not affect type inference or checking). You will see
this signature in action sequences.
2) Override ``resolve`` to resolve the type appropriately (see the docstring in ``resolve`` for
more information).
3) Override ``get_application_type`` which returns the return type when this type is applied as
a function to an argument of a specified type. For example, if you defined a reverse type by
inheriting from this class, ``get_application_type`` gets an argument of type ``<a,b>``, it
should return ``<b,a>`` .
"""
_signature: str = None
@overrides
def resolve(self, other: Type) -> Optional[Type]:
"""
This method is central to type inference and checking. When a variable's type is being
checked, we compare what we know of its type against what is expected of its type by its
context. The expectation is provided as ``other``. We make sure that there are no
contradictions between this type and other, and return an updated type which may be more
specific than the original type.
For example, say this type is of the function variable F in F(cell), and we start out with
``<?, d>`` (that is, it takes any type and returns ``d`` ). Now we have already resolved
cell to be of type ``e`` . Then ``resolve`` gets called with ``other = <e, ?>`` , because
we know F is a function that took a constant of type ``e`` . When we resolve ``<e, ?>``
against ``<?, d>`` , there will not be a contradiction, because any type can be
successfully resolved against ``?`` . Finally we return ``<e, d>`` as the resolved type.
As a counter example, if we are trying to resolve ``<?, d>`` against ``<?, e>`` , the
resolution fails, and in that case, this method returns ``None`` .
Note that a successful resolution does not imply equality of types because of one of them
may be ANY_TYPE, and so in the subclasses of this type, we explicitly resolve in both
directions.
"""
raise NotImplementedError
def get_application_type(self, argument_type: Type) -> Type:
"""
This method returns the resulting type when this type is applied as a function to an argument of
the given type.
"""
raise NotImplementedError
@overrides
def substitute_any_type(self, basic_types: Set[BasicType]) -> List[Type]:
"""
Placeholders mess with substitutions, so even though this method is implemented in the
superclass, we override it here with a ``NotImplementedError`` to be sure that subclasses
think about what the right thing to do here is, and do it correctly.
"""
raise NotImplementedError
@overrides
def __eq__(self, other) -> bool:
return self.__class__ == other.__class__
@overrides
def matches(self, other) -> bool:
# self == ANY_TYPE = True iff self.first == ANY_TYPE and self.second == ANY_TYPE.
return self == other or self == ANY_TYPE or other == ANY_TYPE
@overrides
def __str__(self):
if self == ANY_TYPE:
# If the type remains unresolved, we return ? instead of its signature.
return str(ANY_TYPE)
else:
return self._signature
@overrides
def str(self):
if self == ANY_TYPE:
return ANY_TYPE.str()
else:
return self._signature
__hash__ = ComplexType.__hash__
class UnaryOpType(PlaceholderType):
"""
``UnaryOpType`` is a kind of ``PlaceholderType`` that takes an argument of any type and returns
an expression of the same type. ``identity`` is an example of this kind of function. The type
signature of ``UnaryOpType`` is <#1, #1>.
Parameters
----------
allowed_substitutions : ``Set[BasicType]``, optional (default=None)
If given, this sets restrictions on the types that can be substituted. That is, say you
have a unary operation that is only permitted for numbers and dates, you can pass those in
here, and we will only consider those types when calling :func:`substitute_any_type`. If
this is ``None``, all basic types are allowed.
signature : ``str``, optional (default='<#1,#1>')
The signature of the operation is what will appear in action sequences that include this
type. The default value is suitable for functions that apply to any type. If you have a
restricted set of allowed substitutions, you likely want to change the type signature to
reflect that.
"""
def __init__(self,
type_: BasicType = ANY_TYPE,
allowed_substitutions: Set[BasicType] = None,
signature: str = '<#1,#1>') -> None:
super().__init__(type_, type_)
self._allowed_substitutions = allowed_substitutions
self._signature = signature
@overrides
def resolve(self, other) -> Optional[Type]:
"""See ``PlaceholderType.resolve``"""
if not isinstance(other, NltkComplexType):
return None
other_first = other.first.resolve(other.second)
if not other_first:
return None
other_second = other.second.resolve(other_first)
if not other_second:
return None
return UnaryOpType(other_first, self._allowed_substitutions, self._signature)
@overrides
def get_application_type(self, argument_type: Type) -> Type:
return argument_type
@overrides
def substitute_any_type(self, basic_types: Set[BasicType]) -> List[Type]:
if self.first != ANY_TYPE:
return [self]
allowed_basic_types = self._allowed_substitutions if self._allowed_substitutions else basic_types
return [UnaryOpType(basic_type, self._allowed_substitutions, self._signature)
for basic_type in allowed_basic_types]
class BinaryOpType(PlaceholderType):
"""
``BinaryOpType`` is a function that takes two arguments of the same type and returns an
argument of that type. ``+``, ``-``, ``and`` and ``or`` are examples of this kind of function.
The type signature of ``BinaryOpType`` is ``<#1,<#1,#1>>``.
Parameters
----------
allowed_substitutions : ``Set[BasicType]``, optional (default=None)
If given, this sets restrictions on the types that can be substituted. That is, say you
have a unary operation that is only permitted for numbers and dates, you can pass those in
here, and we will only consider those types when calling :func:`substitute_any_type`. If
this is ``None``, all basic types are allowed.
signature : ``str``, optional (default='<#1,<#1,#1>>')
The signature of the operation is what will appear in action sequences that include this
type. The default value is suitable for functions that apply to any type. If you have a
restricted set of allowed substitutions, you likely want to change the type signature to
reflect that.
"""
def __init__(self,
type_: BasicType = ANY_TYPE,
allowed_substitutions: Set[BasicType] = None,
signature: str = '<#1,<#1,#1>>') -> None:
super().__init__(type_, ComplexType(type_, type_))
self._allowed_substitutions = allowed_substitutions
self._signature = signature
@overrides
def resolve(self, other: Type) -> Optional[Type]:
"""See ``PlaceholderType.resolve``"""
if not isinstance(other, NltkComplexType):
return None
if not isinstance(other.second, NltkComplexType):
return None
other_first = other.first.resolve(other.second.first)
if other_first is None:
return None
other_first = other_first.resolve(other.second.second)
if not other_first:
return None
other_second = other.second.resolve(ComplexType(other_first, other_first))
if not other_second:
return None
return BinaryOpType(other_first, self._allowed_substitutions, self._signature)
@overrides
def get_application_type(self, argument_type: Type) -> Type:
return ComplexType(argument_type, argument_type)
@overrides
def substitute_any_type(self, basic_types: Set[BasicType]) -> List[Type]:
if self.first != ANY_TYPE:
return [self]
allowed_basic_types = self._allowed_substitutions if self._allowed_substitutions else basic_types
return [BinaryOpType(basic_type, self._allowed_substitutions, self._signature)
for basic_type in allowed_basic_types]
class TypedConstantExpression(ConstantExpression):
# pylint: disable=abstract-method
"""
NLTK assumes all constants are of type ``EntityType`` (e) by default. We define this new class
where we can pass a default type to the constructor and use that in the ``_set_type`` method.
"""
def __init__(self, variable, default_type: Type) -> None:
super(TypedConstantExpression, self).__init__(variable)
self._default_type = default_type
@overrides
def _set_type(self, other_type=ANY_TYPE, signature=None) -> None:
if other_type == ANY_TYPE:
super(TypedConstantExpression, self)._set_type(self._default_type, signature)
else:
super(TypedConstantExpression, self)._set_type(other_type, signature)
class DynamicTypeApplicationExpression(ApplicationExpression):
"""
NLTK's ``ApplicationExpression`` (which represents function applications like P(x)) has two
limitations, which we overcome by inheriting from ``ApplicationExpression`` and overriding two
methods.
Firstly, ``ApplicationExpression`` does not handle the case where P's type involves
placeholders (R, V, !=, etc.), which are special cases because their return types depend on the
type of their arguments (x). We override the property ``type`` to redefine the type of the
application.
Secondly, NLTK's variables only bind to entities, and thus the variable types are 'e' by
default. We get around this issue by replacing x with X, whose initial type is ANY_TYPE, and
later gets resolved based on the type signature of the function whose scope the variable
appears in. This variable binding operation is implemented by overriding ``_set_type`` below.
"""
def __init__(self, function: Expression, argument: Expression, variables_with_placeholders: Set[str]) -> None:
super(DynamicTypeApplicationExpression, self).__init__(function, argument)
self._variables_with_placeholders = variables_with_placeholders
@property
def type(self):
# This gets called when the tree is being built by ``LogicParser.parse``. So, we do not
# have access to the type signatures yet. Thus, we need to look at the name of the function
# to return the type.
if not str(self.function) in self._variables_with_placeholders:
return super(DynamicTypeApplicationExpression, self).type
if self.function.type == ANY_TYPE:
return ANY_TYPE
argument_type = self.argument.type
return self.function.type.get_application_type(argument_type)
def _set_type(self, other_type: Type = ANY_TYPE, signature=None) -> None:
"""
We override this method to do just one thing on top of ``ApplicationExpression._set_type``.
In lambda expressions of the form /x F(x), where the function is F and the argument is x,
we can use the type of F to infer the type of x. That is, if F is of type <a, b>, we can
resolve the type of x against a. We do this as the additional step after setting the type
of F(x).
So why does NLTK not already do this? NLTK assumes all variables (x) are of type entity
(e). So it does not have to resolve the type of x anymore. However, this would cause type
inference failures in our case since x can bind to rows, numbers or cells, each of which
has a different type. To deal with this issue, we made X of type ANY_TYPE. Also, LambdaDCS
(and some other languages) contain a var function that indicate the usage of variables
within lambda functions. We map var to V, and made it of type <#1, #1>. We cannot leave X
as ANY_TYPE because that would propagate up the tree. We need to set its type when we have
the information about F. Hence this method. Note that the language may or may not contain
the var function. We deal with both cases below.
"""
super(DynamicTypeApplicationExpression, self)._set_type(other_type, signature)
# TODO(pradeep): Assuming the mapping of "var" function is "V". Do something better.
if isinstance(self.argument, ApplicationExpression) and str(self.argument.function) == "V":
# pylint: disable=protected-access
self.argument.argument._set_type(self.function.type.first)
if str(self.argument) == "X" and str(self.function) != "V":
# pylint: disable=protected-access
self.argument._set_type(self.function.type.first)
class DynamicTypeLogicParser(LogicParser):
"""
``DynamicTypeLogicParser`` is a ``LogicParser`` that can deal with ``NamedBasicType`` and
``PlaceholderType`` appropriately. Our extension here does two things differently.
Firstly, we should handle constants of different types. We do this by passing a dict of format
``{name_prefix: type}`` to the constructor. For example, your domain has entities of types
unicorns and elves, and you have an entity "Phil" of type unicorn, and "Bob" of type "elf". The
names of the two entities should then be "unicorn:phil" and "elf:bob" respectively.
Secondly, since we defined a new kind of ``ApplicationExpression`` above, the ``LogicParser``
should be able to create this new kind of expression.
"""
def __init__(self,
type_check: bool = True,
constant_type_prefixes: Dict[str, BasicType] = None,
type_signatures: Dict[str, Type] = None) -> None:
super(DynamicTypeLogicParser, self).__init__(type_check)
self._constant_type_prefixes = constant_type_prefixes or {}
self._variables_with_placeholders = {name for name, type_ in type_signatures.items()
if isinstance(type_, PlaceholderType)}
@overrides
def make_ApplicationExpression(self, function, argument):
return DynamicTypeApplicationExpression(function, argument, self._variables_with_placeholders)
@overrides
def make_VariableExpression(self, name):
if ":" in name:
prefix = name.split(":")[0]
if prefix in self._constant_type_prefixes:
return TypedConstantExpression(Variable(name), self._constant_type_prefixes[prefix])
else:
raise RuntimeError(f"Unknown prefix: {prefix}. Did you forget to pass it to the constructor?")
return super(DynamicTypeLogicParser, self).make_VariableExpression(name)
def __eq__(self, other):
if isinstance(self, other.__class__):
return self.__dict__ == other.__dict__
return NotImplemented
class NameMapper:
"""
The ``LogicParser`` we use has some naming conventions for functions (i.e. they should start
with an upper case letter, and the remaining characters can only be digits). This means that we
have to internally represent functions with unintuitive names. This class will automatically
give unique names following the convention, and populate central mappings with these names. If
for some reason you need to manually define the alias, you can do so by passing an alias to
`map_name_with_signature`.
Parameters
----------
language_has_lambda : ``bool`` (optional, default=False)
If your language has lambda functions, the word "lambda" needs to be in the name mapping,
mapped to the alias "\". NLTK understands this symbol, and it doesn't need a type signature
for it. Setting this flag to True adds the mapping to `name_mapping`.
alias_prefix : ``str`` (optional, default="F")
The one letter prefix used for all aliases. You do not need to specify it if you have only
instance of this class for you language. If not, you can specify a different prefix for each
name mapping you use for your language.
"""
def __init__(self,
language_has_lambda: bool = False,
alias_prefix: str = "F") -> None:
self.name_mapping: Dict[str, str] = {}
if language_has_lambda:
self.name_mapping["lambda"] = "\\"
self.type_signatures: Dict[str, Type] = {}
assert len(alias_prefix) == 1 and alias_prefix.isalpha(), (f"Invalid alias prefix: {alias_prefix}"
"Needs to be a single upper case character.")
self._alias_prefix = alias_prefix.upper()
self._name_counter = 0
def map_name_with_signature(self,
name: str,
signature: Type,
alias: str = None) -> None:
if name in self.name_mapping:
alias = self.name_mapping[name]
old_signature = self.type_signatures[alias]
if old_signature != signature:
raise RuntimeError(f"{name} already added with signature {old_signature}. "
f"Cannot add it again with {signature}!")
else:
alias = alias or f"{self._alias_prefix}{self._name_counter}"
self._name_counter += 1
self.name_mapping[name] = alias
self.type_signatures[alias] = signature
def get_alias(self, name: str) -> str:
if name not in self.name_mapping:
raise RuntimeError(f"Unmapped name: {name}")
return self.name_mapping[name]
def get_signature(self, name: str) -> Type:
alias = self.get_alias(name)
return self.type_signatures[alias]
def substitute_any_type(type_: Type, basic_types: Set[BasicType]) -> List[Type]:
"""
Takes a type and a set of basic types, and substitutes all instances of ANY_TYPE with all
possible basic types and returns a list with all possible combinations. Note that this
substitution is unconstrained. That is, If you have a type with placeholders, <#1,#1> for
example, this may substitute the placeholders with different basic types. In that case, you'd
want to use ``_substitute_placeholder_type`` instead.
"""
if type_ == ANY_TYPE:
return list(basic_types)
if isinstance(type_, BasicType):
return [type_]
# If we've made it this far, we have a ComplexType, and we can just call
# `type_.substitute_any_type()`.
return type_.substitute_any_type(basic_types)
def _make_production_string(source: Type, target: Union[List[Type], Type]) -> str:
return f"{source} -> {target}"
def _get_complex_type_production(complex_type: ComplexType,
multi_match_mapping: Dict[Type, List[Type]]) -> List[Tuple[Type, str]]:
"""
Takes a complex type (without any placeholders), gets its return values, and returns productions
(perhaps each with multiple arguments) that produce the return values. This method also takes
care of ``MultiMatchNamedBasicTypes``. If one of the arguments or the return types is a multi
match type, it gets all the substitutions of those types from ``multi_match_mapping`` and forms
a list with all possible combinations of substitutions. If the complex type passed to this method
has no ``MultiMatchNamedBasicTypes``, the returned list will contain a single tuple. For
example, if the complex is type ``<a,<<b,c>,d>>``, and ``a`` is a multi match type that matches
``e`` and ``f``, this gives the following list of tuples: ``[('d', 'd -> [<a,<<b,c>,d>, e,
<b,c>]), ('d', 'd -> [<a,<<b,c>,d>, f, <b,c>])]`` Note that we assume there will be no
productions from the multi match type, and the list above does not contain ``('d', 'd ->
[<a,<<b,c>,d>, a, <b,c>>]')``.
"""
return_type = complex_type.return_type()
if isinstance(return_type, MultiMatchNamedBasicType):
return_types_matched = list(multi_match_mapping[return_type] if return_type in
multi_match_mapping else return_type.types_to_match)
else:
return_types_matched = [return_type]
arguments = complex_type.argument_types()
argument_types_matched = []
for argument_type in arguments:
if isinstance(argument_type, MultiMatchNamedBasicType):
matched_types = list(multi_match_mapping[argument_type] if argument_type in
multi_match_mapping else argument_type.types_to_match)
argument_types_matched.append(matched_types)
else:
argument_types_matched.append([argument_type])
complex_type_productions: List[Tuple[Type, str]] = []
for matched_return_type in return_types_matched:
for matched_arguments in itertools.product(*argument_types_matched):
complex_type_productions.append((matched_return_type,
_make_production_string(return_type,
[complex_type] + list(matched_arguments))))
return complex_type_productions
def get_valid_actions(name_mapping: Dict[str, str],
type_signatures: Dict[str, Type],
basic_types: Set[Type],
multi_match_mapping: Dict[Type, List[Type]] = None,
valid_starting_types: Set[Type] = None,
num_nested_lambdas: int = 0) -> Dict[str, List[str]]:
"""
Generates all the valid actions starting from each non-terminal. For terminals of a specific
type, we simply add a production from the type to the terminal. For all terminal `functions`,
we additionally add a rule that allows their return type to be generated from an application of
the function. For example, the function ``<e,<r,<d,r>>>``, which takes three arguments and
returns an ``r`` would generate a the production rule ``r -> [<e,<r,<d,r>>>, e, r, d]``.
For functions that do not contain ANY_TYPE or placeholder types, this is straight-forward.
When there are ANY_TYPES or placeholders, we substitute the ANY_TYPE with all possible basic
types, and then produce a similar rule. For example, the identity function, with type
``<#1,#1>`` and basic types ``e`` and ``r``, would produce the rules ``e -> [<#1,#1>, e]`` and
``r -> [<#1,#1>, r]``.
We additionally add a valid action from the start symbol to all ``valid_starting_types``.
Parameters
----------
name_mapping : ``Dict[str, str]``
The mapping of names that appear in your logical form languages to their aliases for NLTK.
If you are getting all valid actions for a type declaration, this can be the
``COMMON_NAME_MAPPING``.
type_signatures : ``Dict[str, Type]``
The mapping from name aliases to their types. If you are getting all valid actions for a
type declaration, this can be the ``COMMON_TYPE_SIGNATURE``.
basic_types : ``Set[Type]``
Set of all basic types in the type declaration.
multi_match_mapping : ``Dict[Type, List[Type]]`` (optional)
A mapping from `MultiMatchNamedBasicTypes` to the types they can match. This may be
different from the type's ``types_to_match`` field based on the context. While building action
sequences that lead to complex types with ``MultiMatchNamedBasicTypes``, if a type does not
occur in this mapping, the default set of ``types_to_match`` for that type will be used.
valid_starting_types : ``Set[Type]``, optional
These are the valid starting types for your grammar; e.g., what types are we allowed to
parse expressions into? We will add a "START -> TYPE" rule for each of these types. If
this is ``None``, we default to using ``basic_types``.
num_nested_lambdas : ``int`` (optional)
Does the language used permit lambda expressions? And if so, how many nested lambdas do we
need to worry about? We'll add rules like "<r,d> -> ['lambda x', d]" for all complex
types, where the variable is determined by the number of nestings. We currently only
permit up to three levels of nesting, just for ease of implementation.
"""
valid_actions: Dict[str, Set[str]] = defaultdict(set)
valid_starting_types = valid_starting_types or basic_types
for type_ in valid_starting_types:
valid_actions[str(START_TYPE)].add(_make_production_string(START_TYPE, type_))
complex_types = set()
for name, alias in name_mapping.items():
# Lambda functions and variables associated with them get produced in specific contexts. So
# we do not add them to ``valid_actions`` here, and let ``GrammarState`` deal with it.
# ``var`` is a special function that some languages (like LambdaDCS) use within lambda
# functions to indicate the use of a variable (eg.: ``(lambda x (fb:row.row.year (var x)))``)
# We do not have to produce this function outside the scope of lambda. Even within lambdas,
# it is a lot easier to not do it, and let the action sequence to logical form transformation
# logic add it to the output logical forms instead.
if name in ["lambda", "var", "x", "y", "z"]:
continue
name_type = type_signatures[alias]
# Type to terminal productions.
for substituted_type in substitute_any_type(name_type, basic_types):
valid_actions[str(substituted_type)].add(_make_production_string(substituted_type, name))
# Keeping track of complex types.
if isinstance(name_type, ComplexType) and name_type != ANY_TYPE:
complex_types.add(name_type)
for complex_type in complex_types:
for substituted_type in substitute_any_type(complex_type, basic_types):
for head, production in _get_complex_type_production(substituted_type,
multi_match_mapping or {}):
valid_actions[str(head)].add(production)
# We can produce complex types with a lambda expression, though we'll leave out
# placeholder types for now.
for i in range(num_nested_lambdas):
lambda_var = chr(ord('x') + i)
# We'll only allow lambdas to be functions that take and return basic types as their
# arguments, for now. Also, we're doing this for all possible complex types where
# the first and second types are basic types. So we may be overgenerating a bit.
for first_type in basic_types:
for second_type in basic_types:
key = ComplexType(first_type, second_type)
production_string = _make_production_string(key, ['lambda ' + lambda_var, second_type])
valid_actions[str(key)].add(production_string)
valid_action_strings = {key: sorted(value) for key, value in valid_actions.items()}
return valid_action_strings
START_TYPE = NamedBasicType(START_SYMBOL)
# TODO(mattg): We're hard-coding three lambda variables here. This isn't a great way to do
# this; it's just something that works for now, that we can fix later if / when it's needed.
# If you allow for more than three nested lambdas, or if you want to use different lambda
# variable names, you'll have to change this somehow.
LAMBDA_VARIABLES = {'x', 'y', 'z'}
def is_nonterminal(production: str) -> bool:
# TODO(pradeep): This is pretty specific to the assumptions made in converting types to
# strings (e.g., that we're only using the first letter for types, lowercased).
# TODO(pradeep): Also we simply check the surface forms here, and this works for
# wikitables and nlvr. We should ideally let the individual type declarations define their own
# variants of this method.
if production in ['<=', '<']:
# Some grammars (including the wikitables grammar) have "less than" and "less than or
# equal to" functions that are terminals. We don't want to treat those like our
# "<t,d>" types.
return False
if production[0] == '<':
return True
if production.startswith('fb:'):
return False
if len(production) > 1 or production in LAMBDA_VARIABLES:
return False
return production[0].islower()
| [
"gu.826@cse-cnc196739s.coeit.osu.edu"
] | gu.826@cse-cnc196739s.coeit.osu.edu |
7942307b39359f8d6f113c7197dbd8984a6e6eab | a4ea525e226d6c401fdb87a6e9adfdc5d07e6020 | /src/azure-cli/azure/cli/command_modules/network/aaz/latest/network/service_endpoint/policy/_list.py | 89e3a49e6bdaa393add44d973ce1ee228c59f2ef | [
"MIT",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"LGPL-2.1-only",
"Apache-2.0",
"LGPL-2.1-or-later",
"BSD-2-Clause"
] | permissive | Azure/azure-cli | 13340eeca2e288e66e84d393fa1c8a93d46c8686 | a40fd14ad0b6e89720a2e58d4d9be3a6ce1535ca | refs/heads/dev | 2023-08-17T06:25:37.431463 | 2023-08-17T06:00:10 | 2023-08-17T06:00:10 | 51,040,886 | 4,018 | 3,310 | MIT | 2023-09-14T11:11:05 | 2016-02-04T00:21:51 | Python | UTF-8 | Python | false | false | 102,260 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"network service-endpoint policy list",
)
class List(AAZCommand):
"""List service endpoint policies.
:example: List service endpoint policies.
az network service-endpoint policy list --resource-group MyResourceGroup
"""
_aaz_info = {
"version": "2021-08-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/providers/microsoft.network/serviceendpointpolicies", "2021-08-01"],
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.network/serviceendpointpolicies", "2021-08-01"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
return self.build_paging(self._execute_operations, self._output)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.resource_group = AAZResourceGroupNameArg()
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
condition_0 = has_value(self.ctx.args.resource_group) and has_value(self.ctx.subscription_id)
condition_1 = has_value(self.ctx.subscription_id) and has_value(self.ctx.args.resource_group) is not True
if condition_0:
self.ServiceEndpointPoliciesListByResourceGroup(ctx=self.ctx)()
if condition_1:
self.ServiceEndpointPoliciesList(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance.value, client_flatten=True)
next_link = self.deserialize_output(self.ctx.vars.instance.next_link)
return result, next_link
class ServiceEndpointPoliciesListByResourceGroup(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "ODataV4Format"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2021-08-01",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.next_link = AAZStrType(
serialized_name="nextLink",
flags={"read_only": True},
)
_schema_on_200.value = AAZListType()
value = cls._schema_on_200.value
value.Element = AAZObjectType()
_ListHelper._build_schema_service_endpoint_policy_read(value.Element)
return cls._schema_on_200
class ServiceEndpointPoliciesList(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/providers/Microsoft.Network/ServiceEndpointPolicies",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "ODataV4Format"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2021-08-01",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.next_link = AAZStrType(
serialized_name="nextLink",
flags={"read_only": True},
)
_schema_on_200.value = AAZListType()
value = cls._schema_on_200.value
value.Element = AAZObjectType()
_ListHelper._build_schema_service_endpoint_policy_read(value.Element)
return cls._schema_on_200
class _ListHelper:
"""Helper class for List"""
_schema_application_security_group_read = None
@classmethod
def _build_schema_application_security_group_read(cls, _schema):
if cls._schema_application_security_group_read is not None:
_schema.etag = cls._schema_application_security_group_read.etag
_schema.id = cls._schema_application_security_group_read.id
_schema.location = cls._schema_application_security_group_read.location
_schema.name = cls._schema_application_security_group_read.name
_schema.properties = cls._schema_application_security_group_read.properties
_schema.tags = cls._schema_application_security_group_read.tags
_schema.type = cls._schema_application_security_group_read.type
return
cls._schema_application_security_group_read = _schema_application_security_group_read = AAZObjectType()
application_security_group_read = _schema_application_security_group_read
application_security_group_read.etag = AAZStrType(
flags={"read_only": True},
)
application_security_group_read.id = AAZStrType()
application_security_group_read.location = AAZStrType()
application_security_group_read.name = AAZStrType(
flags={"read_only": True},
)
application_security_group_read.properties = AAZObjectType(
flags={"client_flatten": True},
)
application_security_group_read.tags = AAZDictType()
application_security_group_read.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_application_security_group_read.properties
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.resource_guid = AAZStrType(
serialized_name="resourceGuid",
flags={"read_only": True},
)
tags = _schema_application_security_group_read.tags
tags.Element = AAZStrType()
_schema.etag = cls._schema_application_security_group_read.etag
_schema.id = cls._schema_application_security_group_read.id
_schema.location = cls._schema_application_security_group_read.location
_schema.name = cls._schema_application_security_group_read.name
_schema.properties = cls._schema_application_security_group_read.properties
_schema.tags = cls._schema_application_security_group_read.tags
_schema.type = cls._schema_application_security_group_read.type
_schema_extended_location_read = None
@classmethod
def _build_schema_extended_location_read(cls, _schema):
if cls._schema_extended_location_read is not None:
_schema.name = cls._schema_extended_location_read.name
_schema.type = cls._schema_extended_location_read.type
return
cls._schema_extended_location_read = _schema_extended_location_read = AAZObjectType()
extended_location_read = _schema_extended_location_read
extended_location_read.name = AAZStrType()
extended_location_read.type = AAZStrType()
_schema.name = cls._schema_extended_location_read.name
_schema.type = cls._schema_extended_location_read.type
_schema_frontend_ip_configuration_read = None
@classmethod
def _build_schema_frontend_ip_configuration_read(cls, _schema):
if cls._schema_frontend_ip_configuration_read is not None:
_schema.etag = cls._schema_frontend_ip_configuration_read.etag
_schema.id = cls._schema_frontend_ip_configuration_read.id
_schema.name = cls._schema_frontend_ip_configuration_read.name
_schema.properties = cls._schema_frontend_ip_configuration_read.properties
_schema.type = cls._schema_frontend_ip_configuration_read.type
_schema.zones = cls._schema_frontend_ip_configuration_read.zones
return
cls._schema_frontend_ip_configuration_read = _schema_frontend_ip_configuration_read = AAZObjectType()
frontend_ip_configuration_read = _schema_frontend_ip_configuration_read
frontend_ip_configuration_read.etag = AAZStrType(
flags={"read_only": True},
)
frontend_ip_configuration_read.id = AAZStrType()
frontend_ip_configuration_read.name = AAZStrType()
frontend_ip_configuration_read.properties = AAZObjectType(
flags={"client_flatten": True},
)
frontend_ip_configuration_read.type = AAZStrType(
flags={"read_only": True},
)
frontend_ip_configuration_read.zones = AAZListType()
properties = _schema_frontend_ip_configuration_read.properties
properties.gateway_load_balancer = AAZObjectType(
serialized_name="gatewayLoadBalancer",
)
cls._build_schema_sub_resource_read(properties.gateway_load_balancer)
properties.inbound_nat_pools = AAZListType(
serialized_name="inboundNatPools",
flags={"read_only": True},
)
properties.inbound_nat_rules = AAZListType(
serialized_name="inboundNatRules",
flags={"read_only": True},
)
properties.load_balancing_rules = AAZListType(
serialized_name="loadBalancingRules",
flags={"read_only": True},
)
properties.outbound_rules = AAZListType(
serialized_name="outboundRules",
flags={"read_only": True},
)
properties.private_ip_address = AAZStrType(
serialized_name="privateIPAddress",
)
properties.private_ip_address_version = AAZStrType(
serialized_name="privateIPAddressVersion",
)
properties.private_ip_allocation_method = AAZStrType(
serialized_name="privateIPAllocationMethod",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.public_ip_address = AAZObjectType(
serialized_name="publicIPAddress",
)
cls._build_schema_public_ip_address_read(properties.public_ip_address)
properties.public_ip_prefix = AAZObjectType(
serialized_name="publicIPPrefix",
)
cls._build_schema_sub_resource_read(properties.public_ip_prefix)
properties.subnet = AAZObjectType()
cls._build_schema_subnet_read(properties.subnet)
inbound_nat_pools = _schema_frontend_ip_configuration_read.properties.inbound_nat_pools
inbound_nat_pools.Element = AAZObjectType()
cls._build_schema_sub_resource_read(inbound_nat_pools.Element)
inbound_nat_rules = _schema_frontend_ip_configuration_read.properties.inbound_nat_rules
inbound_nat_rules.Element = AAZObjectType()
cls._build_schema_sub_resource_read(inbound_nat_rules.Element)
load_balancing_rules = _schema_frontend_ip_configuration_read.properties.load_balancing_rules
load_balancing_rules.Element = AAZObjectType()
cls._build_schema_sub_resource_read(load_balancing_rules.Element)
outbound_rules = _schema_frontend_ip_configuration_read.properties.outbound_rules
outbound_rules.Element = AAZObjectType()
cls._build_schema_sub_resource_read(outbound_rules.Element)
zones = _schema_frontend_ip_configuration_read.zones
zones.Element = AAZStrType()
_schema.etag = cls._schema_frontend_ip_configuration_read.etag
_schema.id = cls._schema_frontend_ip_configuration_read.id
_schema.name = cls._schema_frontend_ip_configuration_read.name
_schema.properties = cls._schema_frontend_ip_configuration_read.properties
_schema.type = cls._schema_frontend_ip_configuration_read.type
_schema.zones = cls._schema_frontend_ip_configuration_read.zones
_schema_ip_configuration_read = None
@classmethod
def _build_schema_ip_configuration_read(cls, _schema):
if cls._schema_ip_configuration_read is not None:
_schema.etag = cls._schema_ip_configuration_read.etag
_schema.id = cls._schema_ip_configuration_read.id
_schema.name = cls._schema_ip_configuration_read.name
_schema.properties = cls._schema_ip_configuration_read.properties
return
cls._schema_ip_configuration_read = _schema_ip_configuration_read = AAZObjectType()
ip_configuration_read = _schema_ip_configuration_read
ip_configuration_read.etag = AAZStrType(
flags={"read_only": True},
)
ip_configuration_read.id = AAZStrType()
ip_configuration_read.name = AAZStrType()
ip_configuration_read.properties = AAZObjectType(
flags={"client_flatten": True},
)
properties = _schema_ip_configuration_read.properties
properties.private_ip_address = AAZStrType(
serialized_name="privateIPAddress",
)
properties.private_ip_allocation_method = AAZStrType(
serialized_name="privateIPAllocationMethod",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.public_ip_address = AAZObjectType(
serialized_name="publicIPAddress",
)
cls._build_schema_public_ip_address_read(properties.public_ip_address)
properties.subnet = AAZObjectType()
cls._build_schema_subnet_read(properties.subnet)
_schema.etag = cls._schema_ip_configuration_read.etag
_schema.id = cls._schema_ip_configuration_read.id
_schema.name = cls._schema_ip_configuration_read.name
_schema.properties = cls._schema_ip_configuration_read.properties
_schema_network_interface_ip_configuration_read = None
@classmethod
def _build_schema_network_interface_ip_configuration_read(cls, _schema):
if cls._schema_network_interface_ip_configuration_read is not None:
_schema.etag = cls._schema_network_interface_ip_configuration_read.etag
_schema.id = cls._schema_network_interface_ip_configuration_read.id
_schema.name = cls._schema_network_interface_ip_configuration_read.name
_schema.properties = cls._schema_network_interface_ip_configuration_read.properties
_schema.type = cls._schema_network_interface_ip_configuration_read.type
return
cls._schema_network_interface_ip_configuration_read = _schema_network_interface_ip_configuration_read = AAZObjectType()
network_interface_ip_configuration_read = _schema_network_interface_ip_configuration_read
network_interface_ip_configuration_read.etag = AAZStrType(
flags={"read_only": True},
)
network_interface_ip_configuration_read.id = AAZStrType()
network_interface_ip_configuration_read.name = AAZStrType()
network_interface_ip_configuration_read.properties = AAZObjectType(
flags={"client_flatten": True},
)
network_interface_ip_configuration_read.type = AAZStrType()
properties = _schema_network_interface_ip_configuration_read.properties
properties.application_gateway_backend_address_pools = AAZListType(
serialized_name="applicationGatewayBackendAddressPools",
)
properties.application_security_groups = AAZListType(
serialized_name="applicationSecurityGroups",
)
properties.gateway_load_balancer = AAZObjectType(
serialized_name="gatewayLoadBalancer",
)
cls._build_schema_sub_resource_read(properties.gateway_load_balancer)
properties.load_balancer_backend_address_pools = AAZListType(
serialized_name="loadBalancerBackendAddressPools",
)
properties.load_balancer_inbound_nat_rules = AAZListType(
serialized_name="loadBalancerInboundNatRules",
)
properties.primary = AAZBoolType()
properties.private_ip_address = AAZStrType(
serialized_name="privateIPAddress",
)
properties.private_ip_address_version = AAZStrType(
serialized_name="privateIPAddressVersion",
)
properties.private_ip_allocation_method = AAZStrType(
serialized_name="privateIPAllocationMethod",
)
properties.private_link_connection_properties = AAZObjectType(
serialized_name="privateLinkConnectionProperties",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.public_ip_address = AAZObjectType(
serialized_name="publicIPAddress",
)
cls._build_schema_public_ip_address_read(properties.public_ip_address)
properties.subnet = AAZObjectType()
cls._build_schema_subnet_read(properties.subnet)
properties.virtual_network_taps = AAZListType(
serialized_name="virtualNetworkTaps",
)
application_gateway_backend_address_pools = _schema_network_interface_ip_configuration_read.properties.application_gateway_backend_address_pools
application_gateway_backend_address_pools.Element = AAZObjectType()
_element = _schema_network_interface_ip_configuration_read.properties.application_gateway_backend_address_pools.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_network_interface_ip_configuration_read.properties.application_gateway_backend_address_pools.Element.properties
properties.backend_addresses = AAZListType(
serialized_name="backendAddresses",
)
properties.backend_ip_configurations = AAZListType(
serialized_name="backendIPConfigurations",
flags={"read_only": True},
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
backend_addresses = _schema_network_interface_ip_configuration_read.properties.application_gateway_backend_address_pools.Element.properties.backend_addresses
backend_addresses.Element = AAZObjectType()
_element = _schema_network_interface_ip_configuration_read.properties.application_gateway_backend_address_pools.Element.properties.backend_addresses.Element
_element.fqdn = AAZStrType()
_element.ip_address = AAZStrType(
serialized_name="ipAddress",
)
backend_ip_configurations = _schema_network_interface_ip_configuration_read.properties.application_gateway_backend_address_pools.Element.properties.backend_ip_configurations
backend_ip_configurations.Element = AAZObjectType()
cls._build_schema_network_interface_ip_configuration_read(backend_ip_configurations.Element)
application_security_groups = _schema_network_interface_ip_configuration_read.properties.application_security_groups
application_security_groups.Element = AAZObjectType()
cls._build_schema_application_security_group_read(application_security_groups.Element)
load_balancer_backend_address_pools = _schema_network_interface_ip_configuration_read.properties.load_balancer_backend_address_pools
load_balancer_backend_address_pools.Element = AAZObjectType()
_element = _schema_network_interface_ip_configuration_read.properties.load_balancer_backend_address_pools.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_network_interface_ip_configuration_read.properties.load_balancer_backend_address_pools.Element.properties
properties.backend_ip_configurations = AAZListType(
serialized_name="backendIPConfigurations",
flags={"read_only": True},
)
properties.drain_period_in_seconds = AAZIntType(
serialized_name="drainPeriodInSeconds",
)
properties.inbound_nat_rules = AAZListType(
serialized_name="inboundNatRules",
flags={"read_only": True},
)
properties.load_balancer_backend_addresses = AAZListType(
serialized_name="loadBalancerBackendAddresses",
)
properties.load_balancing_rules = AAZListType(
serialized_name="loadBalancingRules",
flags={"read_only": True},
)
properties.location = AAZStrType()
properties.outbound_rule = AAZObjectType(
serialized_name="outboundRule",
)
cls._build_schema_sub_resource_read(properties.outbound_rule)
properties.outbound_rules = AAZListType(
serialized_name="outboundRules",
flags={"read_only": True},
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.tunnel_interfaces = AAZListType(
serialized_name="tunnelInterfaces",
)
backend_ip_configurations = _schema_network_interface_ip_configuration_read.properties.load_balancer_backend_address_pools.Element.properties.backend_ip_configurations
backend_ip_configurations.Element = AAZObjectType()
cls._build_schema_network_interface_ip_configuration_read(backend_ip_configurations.Element)
inbound_nat_rules = _schema_network_interface_ip_configuration_read.properties.load_balancer_backend_address_pools.Element.properties.inbound_nat_rules
inbound_nat_rules.Element = AAZObjectType()
cls._build_schema_sub_resource_read(inbound_nat_rules.Element)
load_balancer_backend_addresses = _schema_network_interface_ip_configuration_read.properties.load_balancer_backend_address_pools.Element.properties.load_balancer_backend_addresses
load_balancer_backend_addresses.Element = AAZObjectType()
_element = _schema_network_interface_ip_configuration_read.properties.load_balancer_backend_address_pools.Element.properties.load_balancer_backend_addresses.Element
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
properties = _schema_network_interface_ip_configuration_read.properties.load_balancer_backend_address_pools.Element.properties.load_balancer_backend_addresses.Element.properties
properties.admin_state = AAZStrType(
serialized_name="adminState",
)
properties.inbound_nat_rules_port_mapping = AAZListType(
serialized_name="inboundNatRulesPortMapping",
flags={"read_only": True},
)
properties.ip_address = AAZStrType(
serialized_name="ipAddress",
)
properties.load_balancer_frontend_ip_configuration = AAZObjectType(
serialized_name="loadBalancerFrontendIPConfiguration",
)
cls._build_schema_sub_resource_read(properties.load_balancer_frontend_ip_configuration)
properties.network_interface_ip_configuration = AAZObjectType(
serialized_name="networkInterfaceIPConfiguration",
)
cls._build_schema_sub_resource_read(properties.network_interface_ip_configuration)
properties.subnet = AAZObjectType()
cls._build_schema_sub_resource_read(properties.subnet)
properties.virtual_network = AAZObjectType(
serialized_name="virtualNetwork",
)
cls._build_schema_sub_resource_read(properties.virtual_network)
inbound_nat_rules_port_mapping = _schema_network_interface_ip_configuration_read.properties.load_balancer_backend_address_pools.Element.properties.load_balancer_backend_addresses.Element.properties.inbound_nat_rules_port_mapping
inbound_nat_rules_port_mapping.Element = AAZObjectType()
_element = _schema_network_interface_ip_configuration_read.properties.load_balancer_backend_address_pools.Element.properties.load_balancer_backend_addresses.Element.properties.inbound_nat_rules_port_mapping.Element
_element.backend_port = AAZIntType(
serialized_name="backendPort",
)
_element.frontend_port = AAZIntType(
serialized_name="frontendPort",
)
_element.inbound_nat_rule_name = AAZStrType(
serialized_name="inboundNatRuleName",
)
load_balancing_rules = _schema_network_interface_ip_configuration_read.properties.load_balancer_backend_address_pools.Element.properties.load_balancing_rules
load_balancing_rules.Element = AAZObjectType()
cls._build_schema_sub_resource_read(load_balancing_rules.Element)
outbound_rules = _schema_network_interface_ip_configuration_read.properties.load_balancer_backend_address_pools.Element.properties.outbound_rules
outbound_rules.Element = AAZObjectType()
cls._build_schema_sub_resource_read(outbound_rules.Element)
tunnel_interfaces = _schema_network_interface_ip_configuration_read.properties.load_balancer_backend_address_pools.Element.properties.tunnel_interfaces
tunnel_interfaces.Element = AAZObjectType()
_element = _schema_network_interface_ip_configuration_read.properties.load_balancer_backend_address_pools.Element.properties.tunnel_interfaces.Element
_element.identifier = AAZIntType()
_element.port = AAZIntType()
_element.protocol = AAZStrType()
_element.type = AAZStrType()
load_balancer_inbound_nat_rules = _schema_network_interface_ip_configuration_read.properties.load_balancer_inbound_nat_rules
load_balancer_inbound_nat_rules.Element = AAZObjectType()
_element = _schema_network_interface_ip_configuration_read.properties.load_balancer_inbound_nat_rules.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_network_interface_ip_configuration_read.properties.load_balancer_inbound_nat_rules.Element.properties
properties.backend_address_pool = AAZObjectType(
serialized_name="backendAddressPool",
)
cls._build_schema_sub_resource_read(properties.backend_address_pool)
properties.backend_ip_configuration = AAZObjectType(
serialized_name="backendIPConfiguration",
)
cls._build_schema_network_interface_ip_configuration_read(properties.backend_ip_configuration)
properties.backend_port = AAZIntType(
serialized_name="backendPort",
)
properties.enable_floating_ip = AAZBoolType(
serialized_name="enableFloatingIP",
)
properties.enable_tcp_reset = AAZBoolType(
serialized_name="enableTcpReset",
)
properties.frontend_ip_configuration = AAZObjectType(
serialized_name="frontendIPConfiguration",
)
cls._build_schema_sub_resource_read(properties.frontend_ip_configuration)
properties.frontend_port = AAZIntType(
serialized_name="frontendPort",
)
properties.frontend_port_range_end = AAZIntType(
serialized_name="frontendPortRangeEnd",
)
properties.frontend_port_range_start = AAZIntType(
serialized_name="frontendPortRangeStart",
)
properties.idle_timeout_in_minutes = AAZIntType(
serialized_name="idleTimeoutInMinutes",
)
properties.protocol = AAZStrType()
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
private_link_connection_properties = _schema_network_interface_ip_configuration_read.properties.private_link_connection_properties
private_link_connection_properties.fqdns = AAZListType(
flags={"read_only": True},
)
private_link_connection_properties.group_id = AAZStrType(
serialized_name="groupId",
flags={"read_only": True},
)
private_link_connection_properties.required_member_name = AAZStrType(
serialized_name="requiredMemberName",
flags={"read_only": True},
)
fqdns = _schema_network_interface_ip_configuration_read.properties.private_link_connection_properties.fqdns
fqdns.Element = AAZStrType()
virtual_network_taps = _schema_network_interface_ip_configuration_read.properties.virtual_network_taps
virtual_network_taps.Element = AAZObjectType()
cls._build_schema_virtual_network_tap_read(virtual_network_taps.Element)
_schema.etag = cls._schema_network_interface_ip_configuration_read.etag
_schema.id = cls._schema_network_interface_ip_configuration_read.id
_schema.name = cls._schema_network_interface_ip_configuration_read.name
_schema.properties = cls._schema_network_interface_ip_configuration_read.properties
_schema.type = cls._schema_network_interface_ip_configuration_read.type
_schema_network_interface_tap_configuration_read = None
@classmethod
def _build_schema_network_interface_tap_configuration_read(cls, _schema):
if cls._schema_network_interface_tap_configuration_read is not None:
_schema.etag = cls._schema_network_interface_tap_configuration_read.etag
_schema.id = cls._schema_network_interface_tap_configuration_read.id
_schema.name = cls._schema_network_interface_tap_configuration_read.name
_schema.properties = cls._schema_network_interface_tap_configuration_read.properties
_schema.type = cls._schema_network_interface_tap_configuration_read.type
return
cls._schema_network_interface_tap_configuration_read = _schema_network_interface_tap_configuration_read = AAZObjectType()
network_interface_tap_configuration_read = _schema_network_interface_tap_configuration_read
network_interface_tap_configuration_read.etag = AAZStrType(
flags={"read_only": True},
)
network_interface_tap_configuration_read.id = AAZStrType()
network_interface_tap_configuration_read.name = AAZStrType()
network_interface_tap_configuration_read.properties = AAZObjectType(
flags={"client_flatten": True},
)
network_interface_tap_configuration_read.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_network_interface_tap_configuration_read.properties
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.virtual_network_tap = AAZObjectType(
serialized_name="virtualNetworkTap",
)
cls._build_schema_virtual_network_tap_read(properties.virtual_network_tap)
_schema.etag = cls._schema_network_interface_tap_configuration_read.etag
_schema.id = cls._schema_network_interface_tap_configuration_read.id
_schema.name = cls._schema_network_interface_tap_configuration_read.name
_schema.properties = cls._schema_network_interface_tap_configuration_read.properties
_schema.type = cls._schema_network_interface_tap_configuration_read.type
_schema_network_interface_read = None
@classmethod
def _build_schema_network_interface_read(cls, _schema):
if cls._schema_network_interface_read is not None:
_schema.etag = cls._schema_network_interface_read.etag
_schema.extended_location = cls._schema_network_interface_read.extended_location
_schema.id = cls._schema_network_interface_read.id
_schema.location = cls._schema_network_interface_read.location
_schema.name = cls._schema_network_interface_read.name
_schema.properties = cls._schema_network_interface_read.properties
_schema.tags = cls._schema_network_interface_read.tags
_schema.type = cls._schema_network_interface_read.type
return
cls._schema_network_interface_read = _schema_network_interface_read = AAZObjectType()
network_interface_read = _schema_network_interface_read
network_interface_read.etag = AAZStrType(
flags={"read_only": True},
)
network_interface_read.extended_location = AAZObjectType(
serialized_name="extendedLocation",
)
cls._build_schema_extended_location_read(network_interface_read.extended_location)
network_interface_read.id = AAZStrType()
network_interface_read.location = AAZStrType()
network_interface_read.name = AAZStrType(
flags={"read_only": True},
)
network_interface_read.properties = AAZObjectType(
flags={"client_flatten": True},
)
network_interface_read.tags = AAZDictType()
network_interface_read.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_network_interface_read.properties
properties.auxiliary_mode = AAZStrType(
serialized_name="auxiliaryMode",
)
properties.dns_settings = AAZObjectType(
serialized_name="dnsSettings",
)
properties.dscp_configuration = AAZObjectType(
serialized_name="dscpConfiguration",
)
cls._build_schema_sub_resource_read(properties.dscp_configuration)
properties.enable_accelerated_networking = AAZBoolType(
serialized_name="enableAcceleratedNetworking",
)
properties.enable_ip_forwarding = AAZBoolType(
serialized_name="enableIPForwarding",
)
properties.hosted_workloads = AAZListType(
serialized_name="hostedWorkloads",
flags={"read_only": True},
)
properties.ip_configurations = AAZListType(
serialized_name="ipConfigurations",
)
properties.mac_address = AAZStrType(
serialized_name="macAddress",
flags={"read_only": True},
)
properties.migration_phase = AAZStrType(
serialized_name="migrationPhase",
)
properties.network_security_group = AAZObjectType(
serialized_name="networkSecurityGroup",
)
cls._build_schema_network_security_group_read(properties.network_security_group)
properties.nic_type = AAZStrType(
serialized_name="nicType",
)
properties.primary = AAZBoolType(
flags={"read_only": True},
)
properties.private_endpoint = AAZObjectType(
serialized_name="privateEndpoint",
)
cls._build_schema_private_endpoint_read(properties.private_endpoint)
properties.private_link_service = AAZObjectType(
serialized_name="privateLinkService",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.resource_guid = AAZStrType(
serialized_name="resourceGuid",
flags={"read_only": True},
)
properties.tap_configurations = AAZListType(
serialized_name="tapConfigurations",
flags={"read_only": True},
)
properties.virtual_machine = AAZObjectType(
serialized_name="virtualMachine",
)
cls._build_schema_sub_resource_read(properties.virtual_machine)
properties.vnet_encryption_supported = AAZBoolType(
serialized_name="vnetEncryptionSupported",
flags={"read_only": True},
)
properties.workload_type = AAZStrType(
serialized_name="workloadType",
)
dns_settings = _schema_network_interface_read.properties.dns_settings
dns_settings.applied_dns_servers = AAZListType(
serialized_name="appliedDnsServers",
flags={"read_only": True},
)
dns_settings.dns_servers = AAZListType(
serialized_name="dnsServers",
)
dns_settings.internal_dns_name_label = AAZStrType(
serialized_name="internalDnsNameLabel",
)
dns_settings.internal_domain_name_suffix = AAZStrType(
serialized_name="internalDomainNameSuffix",
flags={"read_only": True},
)
dns_settings.internal_fqdn = AAZStrType(
serialized_name="internalFqdn",
flags={"read_only": True},
)
applied_dns_servers = _schema_network_interface_read.properties.dns_settings.applied_dns_servers
applied_dns_servers.Element = AAZStrType()
dns_servers = _schema_network_interface_read.properties.dns_settings.dns_servers
dns_servers.Element = AAZStrType()
hosted_workloads = _schema_network_interface_read.properties.hosted_workloads
hosted_workloads.Element = AAZStrType()
ip_configurations = _schema_network_interface_read.properties.ip_configurations
ip_configurations.Element = AAZObjectType()
cls._build_schema_network_interface_ip_configuration_read(ip_configurations.Element)
private_link_service = _schema_network_interface_read.properties.private_link_service
private_link_service.etag = AAZStrType(
flags={"read_only": True},
)
private_link_service.extended_location = AAZObjectType(
serialized_name="extendedLocation",
)
cls._build_schema_extended_location_read(private_link_service.extended_location)
private_link_service.id = AAZStrType()
private_link_service.location = AAZStrType()
private_link_service.name = AAZStrType(
flags={"read_only": True},
)
private_link_service.properties = AAZObjectType(
flags={"client_flatten": True},
)
private_link_service.tags = AAZDictType()
private_link_service.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_network_interface_read.properties.private_link_service.properties
properties.alias = AAZStrType(
flags={"read_only": True},
)
properties.auto_approval = AAZObjectType(
serialized_name="autoApproval",
)
properties.enable_proxy_protocol = AAZBoolType(
serialized_name="enableProxyProtocol",
)
properties.fqdns = AAZListType()
properties.ip_configurations = AAZListType(
serialized_name="ipConfigurations",
)
properties.load_balancer_frontend_ip_configurations = AAZListType(
serialized_name="loadBalancerFrontendIpConfigurations",
)
properties.network_interfaces = AAZListType(
serialized_name="networkInterfaces",
flags={"read_only": True},
)
properties.private_endpoint_connections = AAZListType(
serialized_name="privateEndpointConnections",
flags={"read_only": True},
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.visibility = AAZObjectType()
auto_approval = _schema_network_interface_read.properties.private_link_service.properties.auto_approval
auto_approval.subscriptions = AAZListType()
subscriptions = _schema_network_interface_read.properties.private_link_service.properties.auto_approval.subscriptions
subscriptions.Element = AAZStrType()
fqdns = _schema_network_interface_read.properties.private_link_service.properties.fqdns
fqdns.Element = AAZStrType()
ip_configurations = _schema_network_interface_read.properties.private_link_service.properties.ip_configurations
ip_configurations.Element = AAZObjectType()
_element = _schema_network_interface_read.properties.private_link_service.properties.ip_configurations.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_network_interface_read.properties.private_link_service.properties.ip_configurations.Element.properties
properties.primary = AAZBoolType()
properties.private_ip_address = AAZStrType(
serialized_name="privateIPAddress",
)
properties.private_ip_address_version = AAZStrType(
serialized_name="privateIPAddressVersion",
)
properties.private_ip_allocation_method = AAZStrType(
serialized_name="privateIPAllocationMethod",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.subnet = AAZObjectType()
cls._build_schema_subnet_read(properties.subnet)
load_balancer_frontend_ip_configurations = _schema_network_interface_read.properties.private_link_service.properties.load_balancer_frontend_ip_configurations
load_balancer_frontend_ip_configurations.Element = AAZObjectType()
cls._build_schema_frontend_ip_configuration_read(load_balancer_frontend_ip_configurations.Element)
network_interfaces = _schema_network_interface_read.properties.private_link_service.properties.network_interfaces
network_interfaces.Element = AAZObjectType()
cls._build_schema_network_interface_read(network_interfaces.Element)
private_endpoint_connections = _schema_network_interface_read.properties.private_link_service.properties.private_endpoint_connections
private_endpoint_connections.Element = AAZObjectType()
_element = _schema_network_interface_read.properties.private_link_service.properties.private_endpoint_connections.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_network_interface_read.properties.private_link_service.properties.private_endpoint_connections.Element.properties
properties.link_identifier = AAZStrType(
serialized_name="linkIdentifier",
flags={"read_only": True},
)
properties.private_endpoint = AAZObjectType(
serialized_name="privateEndpoint",
)
cls._build_schema_private_endpoint_read(properties.private_endpoint)
properties.private_link_service_connection_state = AAZObjectType(
serialized_name="privateLinkServiceConnectionState",
)
cls._build_schema_private_link_service_connection_state_read(properties.private_link_service_connection_state)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
visibility = _schema_network_interface_read.properties.private_link_service.properties.visibility
visibility.subscriptions = AAZListType()
subscriptions = _schema_network_interface_read.properties.private_link_service.properties.visibility.subscriptions
subscriptions.Element = AAZStrType()
tags = _schema_network_interface_read.properties.private_link_service.tags
tags.Element = AAZStrType()
tap_configurations = _schema_network_interface_read.properties.tap_configurations
tap_configurations.Element = AAZObjectType()
cls._build_schema_network_interface_tap_configuration_read(tap_configurations.Element)
tags = _schema_network_interface_read.tags
tags.Element = AAZStrType()
_schema.etag = cls._schema_network_interface_read.etag
_schema.extended_location = cls._schema_network_interface_read.extended_location
_schema.id = cls._schema_network_interface_read.id
_schema.location = cls._schema_network_interface_read.location
_schema.name = cls._schema_network_interface_read.name
_schema.properties = cls._schema_network_interface_read.properties
_schema.tags = cls._schema_network_interface_read.tags
_schema.type = cls._schema_network_interface_read.type
_schema_network_security_group_read = None
@classmethod
def _build_schema_network_security_group_read(cls, _schema):
if cls._schema_network_security_group_read is not None:
_schema.etag = cls._schema_network_security_group_read.etag
_schema.id = cls._schema_network_security_group_read.id
_schema.location = cls._schema_network_security_group_read.location
_schema.name = cls._schema_network_security_group_read.name
_schema.properties = cls._schema_network_security_group_read.properties
_schema.tags = cls._schema_network_security_group_read.tags
_schema.type = cls._schema_network_security_group_read.type
return
cls._schema_network_security_group_read = _schema_network_security_group_read = AAZObjectType()
network_security_group_read = _schema_network_security_group_read
network_security_group_read.etag = AAZStrType(
flags={"read_only": True},
)
network_security_group_read.id = AAZStrType()
network_security_group_read.location = AAZStrType()
network_security_group_read.name = AAZStrType(
flags={"read_only": True},
)
network_security_group_read.properties = AAZObjectType(
flags={"client_flatten": True},
)
network_security_group_read.tags = AAZDictType()
network_security_group_read.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_network_security_group_read.properties
properties.default_security_rules = AAZListType(
serialized_name="defaultSecurityRules",
flags={"read_only": True},
)
properties.flow_logs = AAZListType(
serialized_name="flowLogs",
flags={"read_only": True},
)
properties.network_interfaces = AAZListType(
serialized_name="networkInterfaces",
flags={"read_only": True},
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.resource_guid = AAZStrType(
serialized_name="resourceGuid",
flags={"read_only": True},
)
properties.security_rules = AAZListType(
serialized_name="securityRules",
)
properties.subnets = AAZListType(
flags={"read_only": True},
)
default_security_rules = _schema_network_security_group_read.properties.default_security_rules
default_security_rules.Element = AAZObjectType()
cls._build_schema_security_rule_read(default_security_rules.Element)
flow_logs = _schema_network_security_group_read.properties.flow_logs
flow_logs.Element = AAZObjectType()
_element = _schema_network_security_group_read.properties.flow_logs.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.location = AAZStrType()
_element.name = AAZStrType(
flags={"read_only": True},
)
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.tags = AAZDictType()
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_network_security_group_read.properties.flow_logs.Element.properties
properties.enabled = AAZBoolType()
properties.flow_analytics_configuration = AAZObjectType(
serialized_name="flowAnalyticsConfiguration",
)
properties.format = AAZObjectType()
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.retention_policy = AAZObjectType(
serialized_name="retentionPolicy",
)
properties.storage_id = AAZStrType(
serialized_name="storageId",
flags={"required": True},
)
properties.target_resource_guid = AAZStrType(
serialized_name="targetResourceGuid",
flags={"read_only": True},
)
properties.target_resource_id = AAZStrType(
serialized_name="targetResourceId",
flags={"required": True},
)
flow_analytics_configuration = _schema_network_security_group_read.properties.flow_logs.Element.properties.flow_analytics_configuration
flow_analytics_configuration.network_watcher_flow_analytics_configuration = AAZObjectType(
serialized_name="networkWatcherFlowAnalyticsConfiguration",
)
network_watcher_flow_analytics_configuration = _schema_network_security_group_read.properties.flow_logs.Element.properties.flow_analytics_configuration.network_watcher_flow_analytics_configuration
network_watcher_flow_analytics_configuration.enabled = AAZBoolType()
network_watcher_flow_analytics_configuration.traffic_analytics_interval = AAZIntType(
serialized_name="trafficAnalyticsInterval",
)
network_watcher_flow_analytics_configuration.workspace_id = AAZStrType(
serialized_name="workspaceId",
)
network_watcher_flow_analytics_configuration.workspace_region = AAZStrType(
serialized_name="workspaceRegion",
)
network_watcher_flow_analytics_configuration.workspace_resource_id = AAZStrType(
serialized_name="workspaceResourceId",
)
format = _schema_network_security_group_read.properties.flow_logs.Element.properties.format
format.type = AAZStrType()
format.version = AAZIntType()
retention_policy = _schema_network_security_group_read.properties.flow_logs.Element.properties.retention_policy
retention_policy.days = AAZIntType()
retention_policy.enabled = AAZBoolType()
tags = _schema_network_security_group_read.properties.flow_logs.Element.tags
tags.Element = AAZStrType()
network_interfaces = _schema_network_security_group_read.properties.network_interfaces
network_interfaces.Element = AAZObjectType()
cls._build_schema_network_interface_read(network_interfaces.Element)
security_rules = _schema_network_security_group_read.properties.security_rules
security_rules.Element = AAZObjectType()
cls._build_schema_security_rule_read(security_rules.Element)
subnets = _schema_network_security_group_read.properties.subnets
subnets.Element = AAZObjectType()
cls._build_schema_subnet_read(subnets.Element)
tags = _schema_network_security_group_read.tags
tags.Element = AAZStrType()
_schema.etag = cls._schema_network_security_group_read.etag
_schema.id = cls._schema_network_security_group_read.id
_schema.location = cls._schema_network_security_group_read.location
_schema.name = cls._schema_network_security_group_read.name
_schema.properties = cls._schema_network_security_group_read.properties
_schema.tags = cls._schema_network_security_group_read.tags
_schema.type = cls._schema_network_security_group_read.type
_schema_private_endpoint_read = None
@classmethod
def _build_schema_private_endpoint_read(cls, _schema):
if cls._schema_private_endpoint_read is not None:
_schema.etag = cls._schema_private_endpoint_read.etag
_schema.extended_location = cls._schema_private_endpoint_read.extended_location
_schema.id = cls._schema_private_endpoint_read.id
_schema.location = cls._schema_private_endpoint_read.location
_schema.name = cls._schema_private_endpoint_read.name
_schema.properties = cls._schema_private_endpoint_read.properties
_schema.tags = cls._schema_private_endpoint_read.tags
_schema.type = cls._schema_private_endpoint_read.type
return
cls._schema_private_endpoint_read = _schema_private_endpoint_read = AAZObjectType()
private_endpoint_read = _schema_private_endpoint_read
private_endpoint_read.etag = AAZStrType(
flags={"read_only": True},
)
private_endpoint_read.extended_location = AAZObjectType(
serialized_name="extendedLocation",
)
cls._build_schema_extended_location_read(private_endpoint_read.extended_location)
private_endpoint_read.id = AAZStrType()
private_endpoint_read.location = AAZStrType()
private_endpoint_read.name = AAZStrType(
flags={"read_only": True},
)
private_endpoint_read.properties = AAZObjectType(
flags={"client_flatten": True},
)
private_endpoint_read.tags = AAZDictType()
private_endpoint_read.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_private_endpoint_read.properties
properties.application_security_groups = AAZListType(
serialized_name="applicationSecurityGroups",
)
properties.custom_dns_configs = AAZListType(
serialized_name="customDnsConfigs",
)
properties.custom_network_interface_name = AAZStrType(
serialized_name="customNetworkInterfaceName",
)
properties.ip_configurations = AAZListType(
serialized_name="ipConfigurations",
)
properties.manual_private_link_service_connections = AAZListType(
serialized_name="manualPrivateLinkServiceConnections",
)
properties.network_interfaces = AAZListType(
serialized_name="networkInterfaces",
flags={"read_only": True},
)
properties.private_link_service_connections = AAZListType(
serialized_name="privateLinkServiceConnections",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.subnet = AAZObjectType()
cls._build_schema_subnet_read(properties.subnet)
application_security_groups = _schema_private_endpoint_read.properties.application_security_groups
application_security_groups.Element = AAZObjectType()
cls._build_schema_application_security_group_read(application_security_groups.Element)
custom_dns_configs = _schema_private_endpoint_read.properties.custom_dns_configs
custom_dns_configs.Element = AAZObjectType()
_element = _schema_private_endpoint_read.properties.custom_dns_configs.Element
_element.fqdn = AAZStrType()
_element.ip_addresses = AAZListType(
serialized_name="ipAddresses",
)
ip_addresses = _schema_private_endpoint_read.properties.custom_dns_configs.Element.ip_addresses
ip_addresses.Element = AAZStrType()
ip_configurations = _schema_private_endpoint_read.properties.ip_configurations
ip_configurations.Element = AAZObjectType()
_element = _schema_private_endpoint_read.properties.ip_configurations.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_private_endpoint_read.properties.ip_configurations.Element.properties
properties.group_id = AAZStrType(
serialized_name="groupId",
)
properties.member_name = AAZStrType(
serialized_name="memberName",
)
properties.private_ip_address = AAZStrType(
serialized_name="privateIPAddress",
)
manual_private_link_service_connections = _schema_private_endpoint_read.properties.manual_private_link_service_connections
manual_private_link_service_connections.Element = AAZObjectType()
cls._build_schema_private_link_service_connection_read(manual_private_link_service_connections.Element)
network_interfaces = _schema_private_endpoint_read.properties.network_interfaces
network_interfaces.Element = AAZObjectType()
cls._build_schema_network_interface_read(network_interfaces.Element)
private_link_service_connections = _schema_private_endpoint_read.properties.private_link_service_connections
private_link_service_connections.Element = AAZObjectType()
cls._build_schema_private_link_service_connection_read(private_link_service_connections.Element)
tags = _schema_private_endpoint_read.tags
tags.Element = AAZStrType()
_schema.etag = cls._schema_private_endpoint_read.etag
_schema.extended_location = cls._schema_private_endpoint_read.extended_location
_schema.id = cls._schema_private_endpoint_read.id
_schema.location = cls._schema_private_endpoint_read.location
_schema.name = cls._schema_private_endpoint_read.name
_schema.properties = cls._schema_private_endpoint_read.properties
_schema.tags = cls._schema_private_endpoint_read.tags
_schema.type = cls._schema_private_endpoint_read.type
_schema_private_link_service_connection_state_read = None
@classmethod
def _build_schema_private_link_service_connection_state_read(cls, _schema):
if cls._schema_private_link_service_connection_state_read is not None:
_schema.actions_required = cls._schema_private_link_service_connection_state_read.actions_required
_schema.description = cls._schema_private_link_service_connection_state_read.description
_schema.status = cls._schema_private_link_service_connection_state_read.status
return
cls._schema_private_link_service_connection_state_read = _schema_private_link_service_connection_state_read = AAZObjectType()
private_link_service_connection_state_read = _schema_private_link_service_connection_state_read
private_link_service_connection_state_read.actions_required = AAZStrType(
serialized_name="actionsRequired",
)
private_link_service_connection_state_read.description = AAZStrType()
private_link_service_connection_state_read.status = AAZStrType()
_schema.actions_required = cls._schema_private_link_service_connection_state_read.actions_required
_schema.description = cls._schema_private_link_service_connection_state_read.description
_schema.status = cls._schema_private_link_service_connection_state_read.status
_schema_private_link_service_connection_read = None
@classmethod
def _build_schema_private_link_service_connection_read(cls, _schema):
if cls._schema_private_link_service_connection_read is not None:
_schema.etag = cls._schema_private_link_service_connection_read.etag
_schema.id = cls._schema_private_link_service_connection_read.id
_schema.name = cls._schema_private_link_service_connection_read.name
_schema.properties = cls._schema_private_link_service_connection_read.properties
_schema.type = cls._schema_private_link_service_connection_read.type
return
cls._schema_private_link_service_connection_read = _schema_private_link_service_connection_read = AAZObjectType()
private_link_service_connection_read = _schema_private_link_service_connection_read
private_link_service_connection_read.etag = AAZStrType(
flags={"read_only": True},
)
private_link_service_connection_read.id = AAZStrType()
private_link_service_connection_read.name = AAZStrType()
private_link_service_connection_read.properties = AAZObjectType(
flags={"client_flatten": True},
)
private_link_service_connection_read.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_private_link_service_connection_read.properties
properties.group_ids = AAZListType(
serialized_name="groupIds",
)
properties.private_link_service_connection_state = AAZObjectType(
serialized_name="privateLinkServiceConnectionState",
)
cls._build_schema_private_link_service_connection_state_read(properties.private_link_service_connection_state)
properties.private_link_service_id = AAZStrType(
serialized_name="privateLinkServiceId",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.request_message = AAZStrType(
serialized_name="requestMessage",
)
group_ids = _schema_private_link_service_connection_read.properties.group_ids
group_ids.Element = AAZStrType()
_schema.etag = cls._schema_private_link_service_connection_read.etag
_schema.id = cls._schema_private_link_service_connection_read.id
_schema.name = cls._schema_private_link_service_connection_read.name
_schema.properties = cls._schema_private_link_service_connection_read.properties
_schema.type = cls._schema_private_link_service_connection_read.type
_schema_public_ip_address_read = None
@classmethod
def _build_schema_public_ip_address_read(cls, _schema):
if cls._schema_public_ip_address_read is not None:
_schema.etag = cls._schema_public_ip_address_read.etag
_schema.extended_location = cls._schema_public_ip_address_read.extended_location
_schema.id = cls._schema_public_ip_address_read.id
_schema.location = cls._schema_public_ip_address_read.location
_schema.name = cls._schema_public_ip_address_read.name
_schema.properties = cls._schema_public_ip_address_read.properties
_schema.sku = cls._schema_public_ip_address_read.sku
_schema.tags = cls._schema_public_ip_address_read.tags
_schema.type = cls._schema_public_ip_address_read.type
_schema.zones = cls._schema_public_ip_address_read.zones
return
cls._schema_public_ip_address_read = _schema_public_ip_address_read = AAZObjectType()
public_ip_address_read = _schema_public_ip_address_read
public_ip_address_read.etag = AAZStrType(
flags={"read_only": True},
)
public_ip_address_read.extended_location = AAZObjectType(
serialized_name="extendedLocation",
)
cls._build_schema_extended_location_read(public_ip_address_read.extended_location)
public_ip_address_read.id = AAZStrType()
public_ip_address_read.location = AAZStrType()
public_ip_address_read.name = AAZStrType(
flags={"read_only": True},
)
public_ip_address_read.properties = AAZObjectType(
flags={"client_flatten": True},
)
public_ip_address_read.sku = AAZObjectType()
public_ip_address_read.tags = AAZDictType()
public_ip_address_read.type = AAZStrType(
flags={"read_only": True},
)
public_ip_address_read.zones = AAZListType()
properties = _schema_public_ip_address_read.properties
properties.ddos_settings = AAZObjectType(
serialized_name="ddosSettings",
)
properties.delete_option = AAZStrType(
serialized_name="deleteOption",
)
properties.dns_settings = AAZObjectType(
serialized_name="dnsSettings",
)
properties.idle_timeout_in_minutes = AAZIntType(
serialized_name="idleTimeoutInMinutes",
)
properties.ip_address = AAZStrType(
serialized_name="ipAddress",
)
properties.ip_configuration = AAZObjectType(
serialized_name="ipConfiguration",
)
cls._build_schema_ip_configuration_read(properties.ip_configuration)
properties.ip_tags = AAZListType(
serialized_name="ipTags",
)
properties.linked_public_ip_address = AAZObjectType(
serialized_name="linkedPublicIPAddress",
)
cls._build_schema_public_ip_address_read(properties.linked_public_ip_address)
properties.migration_phase = AAZStrType(
serialized_name="migrationPhase",
)
properties.nat_gateway = AAZObjectType(
serialized_name="natGateway",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.public_ip_address_version = AAZStrType(
serialized_name="publicIPAddressVersion",
)
properties.public_ip_allocation_method = AAZStrType(
serialized_name="publicIPAllocationMethod",
)
properties.public_ip_prefix = AAZObjectType(
serialized_name="publicIPPrefix",
)
cls._build_schema_sub_resource_read(properties.public_ip_prefix)
properties.resource_guid = AAZStrType(
serialized_name="resourceGuid",
flags={"read_only": True},
)
properties.service_public_ip_address = AAZObjectType(
serialized_name="servicePublicIPAddress",
)
cls._build_schema_public_ip_address_read(properties.service_public_ip_address)
ddos_settings = _schema_public_ip_address_read.properties.ddos_settings
ddos_settings.ddos_custom_policy = AAZObjectType(
serialized_name="ddosCustomPolicy",
)
cls._build_schema_sub_resource_read(ddos_settings.ddos_custom_policy)
ddos_settings.protected_ip = AAZBoolType(
serialized_name="protectedIP",
)
ddos_settings.protection_coverage = AAZStrType(
serialized_name="protectionCoverage",
)
dns_settings = _schema_public_ip_address_read.properties.dns_settings
dns_settings.domain_name_label = AAZStrType(
serialized_name="domainNameLabel",
)
dns_settings.fqdn = AAZStrType()
dns_settings.reverse_fqdn = AAZStrType(
serialized_name="reverseFqdn",
)
ip_tags = _schema_public_ip_address_read.properties.ip_tags
ip_tags.Element = AAZObjectType()
_element = _schema_public_ip_address_read.properties.ip_tags.Element
_element.ip_tag_type = AAZStrType(
serialized_name="ipTagType",
)
_element.tag = AAZStrType()
nat_gateway = _schema_public_ip_address_read.properties.nat_gateway
nat_gateway.etag = AAZStrType(
flags={"read_only": True},
)
nat_gateway.id = AAZStrType()
nat_gateway.location = AAZStrType()
nat_gateway.name = AAZStrType(
flags={"read_only": True},
)
nat_gateway.properties = AAZObjectType(
flags={"client_flatten": True},
)
nat_gateway.sku = AAZObjectType()
nat_gateway.tags = AAZDictType()
nat_gateway.type = AAZStrType(
flags={"read_only": True},
)
nat_gateway.zones = AAZListType()
properties = _schema_public_ip_address_read.properties.nat_gateway.properties
properties.idle_timeout_in_minutes = AAZIntType(
serialized_name="idleTimeoutInMinutes",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.public_ip_addresses = AAZListType(
serialized_name="publicIpAddresses",
)
properties.public_ip_prefixes = AAZListType(
serialized_name="publicIpPrefixes",
)
properties.resource_guid = AAZStrType(
serialized_name="resourceGuid",
flags={"read_only": True},
)
properties.subnets = AAZListType(
flags={"read_only": True},
)
public_ip_addresses = _schema_public_ip_address_read.properties.nat_gateway.properties.public_ip_addresses
public_ip_addresses.Element = AAZObjectType()
cls._build_schema_sub_resource_read(public_ip_addresses.Element)
public_ip_prefixes = _schema_public_ip_address_read.properties.nat_gateway.properties.public_ip_prefixes
public_ip_prefixes.Element = AAZObjectType()
cls._build_schema_sub_resource_read(public_ip_prefixes.Element)
subnets = _schema_public_ip_address_read.properties.nat_gateway.properties.subnets
subnets.Element = AAZObjectType()
cls._build_schema_sub_resource_read(subnets.Element)
sku = _schema_public_ip_address_read.properties.nat_gateway.sku
sku.name = AAZStrType()
tags = _schema_public_ip_address_read.properties.nat_gateway.tags
tags.Element = AAZStrType()
zones = _schema_public_ip_address_read.properties.nat_gateway.zones
zones.Element = AAZStrType()
sku = _schema_public_ip_address_read.sku
sku.name = AAZStrType()
sku.tier = AAZStrType()
tags = _schema_public_ip_address_read.tags
tags.Element = AAZStrType()
zones = _schema_public_ip_address_read.zones
zones.Element = AAZStrType()
_schema.etag = cls._schema_public_ip_address_read.etag
_schema.extended_location = cls._schema_public_ip_address_read.extended_location
_schema.id = cls._schema_public_ip_address_read.id
_schema.location = cls._schema_public_ip_address_read.location
_schema.name = cls._schema_public_ip_address_read.name
_schema.properties = cls._schema_public_ip_address_read.properties
_schema.sku = cls._schema_public_ip_address_read.sku
_schema.tags = cls._schema_public_ip_address_read.tags
_schema.type = cls._schema_public_ip_address_read.type
_schema.zones = cls._schema_public_ip_address_read.zones
_schema_security_rule_read = None
@classmethod
def _build_schema_security_rule_read(cls, _schema):
if cls._schema_security_rule_read is not None:
_schema.etag = cls._schema_security_rule_read.etag
_schema.id = cls._schema_security_rule_read.id
_schema.name = cls._schema_security_rule_read.name
_schema.properties = cls._schema_security_rule_read.properties
_schema.type = cls._schema_security_rule_read.type
return
cls._schema_security_rule_read = _schema_security_rule_read = AAZObjectType()
security_rule_read = _schema_security_rule_read
security_rule_read.etag = AAZStrType(
flags={"read_only": True},
)
security_rule_read.id = AAZStrType()
security_rule_read.name = AAZStrType()
security_rule_read.properties = AAZObjectType(
flags={"client_flatten": True},
)
security_rule_read.type = AAZStrType()
properties = _schema_security_rule_read.properties
properties.access = AAZStrType(
flags={"required": True},
)
properties.description = AAZStrType()
properties.destination_address_prefix = AAZStrType(
serialized_name="destinationAddressPrefix",
)
properties.destination_address_prefixes = AAZListType(
serialized_name="destinationAddressPrefixes",
)
properties.destination_application_security_groups = AAZListType(
serialized_name="destinationApplicationSecurityGroups",
)
properties.destination_port_range = AAZStrType(
serialized_name="destinationPortRange",
)
properties.destination_port_ranges = AAZListType(
serialized_name="destinationPortRanges",
)
properties.direction = AAZStrType(
flags={"required": True},
)
properties.priority = AAZIntType()
properties.protocol = AAZStrType(
flags={"required": True},
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.source_address_prefix = AAZStrType(
serialized_name="sourceAddressPrefix",
)
properties.source_address_prefixes = AAZListType(
serialized_name="sourceAddressPrefixes",
)
properties.source_application_security_groups = AAZListType(
serialized_name="sourceApplicationSecurityGroups",
)
properties.source_port_range = AAZStrType(
serialized_name="sourcePortRange",
)
properties.source_port_ranges = AAZListType(
serialized_name="sourcePortRanges",
)
destination_address_prefixes = _schema_security_rule_read.properties.destination_address_prefixes
destination_address_prefixes.Element = AAZStrType()
destination_application_security_groups = _schema_security_rule_read.properties.destination_application_security_groups
destination_application_security_groups.Element = AAZObjectType()
cls._build_schema_application_security_group_read(destination_application_security_groups.Element)
destination_port_ranges = _schema_security_rule_read.properties.destination_port_ranges
destination_port_ranges.Element = AAZStrType()
source_address_prefixes = _schema_security_rule_read.properties.source_address_prefixes
source_address_prefixes.Element = AAZStrType()
source_application_security_groups = _schema_security_rule_read.properties.source_application_security_groups
source_application_security_groups.Element = AAZObjectType()
cls._build_schema_application_security_group_read(source_application_security_groups.Element)
source_port_ranges = _schema_security_rule_read.properties.source_port_ranges
source_port_ranges.Element = AAZStrType()
_schema.etag = cls._schema_security_rule_read.etag
_schema.id = cls._schema_security_rule_read.id
_schema.name = cls._schema_security_rule_read.name
_schema.properties = cls._schema_security_rule_read.properties
_schema.type = cls._schema_security_rule_read.type
_schema_service_endpoint_policy_read = None
@classmethod
def _build_schema_service_endpoint_policy_read(cls, _schema):
if cls._schema_service_endpoint_policy_read is not None:
_schema.etag = cls._schema_service_endpoint_policy_read.etag
_schema.id = cls._schema_service_endpoint_policy_read.id
_schema.kind = cls._schema_service_endpoint_policy_read.kind
_schema.location = cls._schema_service_endpoint_policy_read.location
_schema.name = cls._schema_service_endpoint_policy_read.name
_schema.properties = cls._schema_service_endpoint_policy_read.properties
_schema.tags = cls._schema_service_endpoint_policy_read.tags
_schema.type = cls._schema_service_endpoint_policy_read.type
return
cls._schema_service_endpoint_policy_read = _schema_service_endpoint_policy_read = AAZObjectType()
service_endpoint_policy_read = _schema_service_endpoint_policy_read
service_endpoint_policy_read.etag = AAZStrType(
flags={"read_only": True},
)
service_endpoint_policy_read.id = AAZStrType()
service_endpoint_policy_read.kind = AAZStrType(
flags={"read_only": True},
)
service_endpoint_policy_read.location = AAZStrType()
service_endpoint_policy_read.name = AAZStrType(
flags={"read_only": True},
)
service_endpoint_policy_read.properties = AAZObjectType(
flags={"client_flatten": True},
)
service_endpoint_policy_read.tags = AAZDictType()
service_endpoint_policy_read.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_service_endpoint_policy_read.properties
properties.contextual_service_endpoint_policies = AAZListType(
serialized_name="contextualServiceEndpointPolicies",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.resource_guid = AAZStrType(
serialized_name="resourceGuid",
flags={"read_only": True},
)
properties.service_alias = AAZStrType(
serialized_name="serviceAlias",
)
properties.service_endpoint_policy_definitions = AAZListType(
serialized_name="serviceEndpointPolicyDefinitions",
)
properties.subnets = AAZListType(
flags={"read_only": True},
)
contextual_service_endpoint_policies = _schema_service_endpoint_policy_read.properties.contextual_service_endpoint_policies
contextual_service_endpoint_policies.Element = AAZStrType()
service_endpoint_policy_definitions = _schema_service_endpoint_policy_read.properties.service_endpoint_policy_definitions
service_endpoint_policy_definitions.Element = AAZObjectType()
_element = _schema_service_endpoint_policy_read.properties.service_endpoint_policy_definitions.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType()
properties = _schema_service_endpoint_policy_read.properties.service_endpoint_policy_definitions.Element.properties
properties.description = AAZStrType()
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.service = AAZStrType()
properties.service_resources = AAZListType(
serialized_name="serviceResources",
)
service_resources = _schema_service_endpoint_policy_read.properties.service_endpoint_policy_definitions.Element.properties.service_resources
service_resources.Element = AAZStrType()
subnets = _schema_service_endpoint_policy_read.properties.subnets
subnets.Element = AAZObjectType()
cls._build_schema_subnet_read(subnets.Element)
tags = _schema_service_endpoint_policy_read.tags
tags.Element = AAZStrType()
_schema.etag = cls._schema_service_endpoint_policy_read.etag
_schema.id = cls._schema_service_endpoint_policy_read.id
_schema.kind = cls._schema_service_endpoint_policy_read.kind
_schema.location = cls._schema_service_endpoint_policy_read.location
_schema.name = cls._schema_service_endpoint_policy_read.name
_schema.properties = cls._schema_service_endpoint_policy_read.properties
_schema.tags = cls._schema_service_endpoint_policy_read.tags
_schema.type = cls._schema_service_endpoint_policy_read.type
_schema_sub_resource_read = None
@classmethod
def _build_schema_sub_resource_read(cls, _schema):
if cls._schema_sub_resource_read is not None:
_schema.id = cls._schema_sub_resource_read.id
return
cls._schema_sub_resource_read = _schema_sub_resource_read = AAZObjectType()
sub_resource_read = _schema_sub_resource_read
sub_resource_read.id = AAZStrType()
_schema.id = cls._schema_sub_resource_read.id
_schema_subnet_read = None
@classmethod
def _build_schema_subnet_read(cls, _schema):
if cls._schema_subnet_read is not None:
_schema.etag = cls._schema_subnet_read.etag
_schema.id = cls._schema_subnet_read.id
_schema.name = cls._schema_subnet_read.name
_schema.properties = cls._schema_subnet_read.properties
_schema.type = cls._schema_subnet_read.type
return
cls._schema_subnet_read = _schema_subnet_read = AAZObjectType()
subnet_read = _schema_subnet_read
subnet_read.etag = AAZStrType(
flags={"read_only": True},
)
subnet_read.id = AAZStrType()
subnet_read.name = AAZStrType()
subnet_read.properties = AAZObjectType(
flags={"client_flatten": True},
)
subnet_read.type = AAZStrType()
properties = _schema_subnet_read.properties
properties.address_prefix = AAZStrType(
serialized_name="addressPrefix",
)
properties.address_prefixes = AAZListType(
serialized_name="addressPrefixes",
)
properties.application_gateway_ip_configurations = AAZListType(
serialized_name="applicationGatewayIpConfigurations",
)
properties.delegations = AAZListType()
properties.ip_allocations = AAZListType(
serialized_name="ipAllocations",
)
properties.ip_configuration_profiles = AAZListType(
serialized_name="ipConfigurationProfiles",
flags={"read_only": True},
)
properties.ip_configurations = AAZListType(
serialized_name="ipConfigurations",
flags={"read_only": True},
)
properties.nat_gateway = AAZObjectType(
serialized_name="natGateway",
)
cls._build_schema_sub_resource_read(properties.nat_gateway)
properties.network_security_group = AAZObjectType(
serialized_name="networkSecurityGroup",
)
cls._build_schema_network_security_group_read(properties.network_security_group)
properties.private_endpoint_network_policies = AAZStrType(
serialized_name="privateEndpointNetworkPolicies",
)
properties.private_endpoints = AAZListType(
serialized_name="privateEndpoints",
flags={"read_only": True},
)
properties.private_link_service_network_policies = AAZStrType(
serialized_name="privateLinkServiceNetworkPolicies",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.purpose = AAZStrType(
flags={"read_only": True},
)
properties.resource_navigation_links = AAZListType(
serialized_name="resourceNavigationLinks",
flags={"read_only": True},
)
properties.route_table = AAZObjectType(
serialized_name="routeTable",
)
properties.service_association_links = AAZListType(
serialized_name="serviceAssociationLinks",
flags={"read_only": True},
)
properties.service_endpoint_policies = AAZListType(
serialized_name="serviceEndpointPolicies",
)
properties.service_endpoints = AAZListType(
serialized_name="serviceEndpoints",
)
address_prefixes = _schema_subnet_read.properties.address_prefixes
address_prefixes.Element = AAZStrType()
application_gateway_ip_configurations = _schema_subnet_read.properties.application_gateway_ip_configurations
application_gateway_ip_configurations.Element = AAZObjectType()
_element = _schema_subnet_read.properties.application_gateway_ip_configurations.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_subnet_read.properties.application_gateway_ip_configurations.Element.properties
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.subnet = AAZObjectType()
cls._build_schema_sub_resource_read(properties.subnet)
delegations = _schema_subnet_read.properties.delegations
delegations.Element = AAZObjectType()
_element = _schema_subnet_read.properties.delegations.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType()
properties = _schema_subnet_read.properties.delegations.Element.properties
properties.actions = AAZListType(
flags={"read_only": True},
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.service_name = AAZStrType(
serialized_name="serviceName",
)
actions = _schema_subnet_read.properties.delegations.Element.properties.actions
actions.Element = AAZStrType()
ip_allocations = _schema_subnet_read.properties.ip_allocations
ip_allocations.Element = AAZObjectType()
cls._build_schema_sub_resource_read(ip_allocations.Element)
ip_configuration_profiles = _schema_subnet_read.properties.ip_configuration_profiles
ip_configuration_profiles.Element = AAZObjectType()
_element = _schema_subnet_read.properties.ip_configuration_profiles.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_subnet_read.properties.ip_configuration_profiles.Element.properties
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.subnet = AAZObjectType()
cls._build_schema_subnet_read(properties.subnet)
ip_configurations = _schema_subnet_read.properties.ip_configurations
ip_configurations.Element = AAZObjectType()
cls._build_schema_ip_configuration_read(ip_configurations.Element)
private_endpoints = _schema_subnet_read.properties.private_endpoints
private_endpoints.Element = AAZObjectType()
cls._build_schema_private_endpoint_read(private_endpoints.Element)
resource_navigation_links = _schema_subnet_read.properties.resource_navigation_links
resource_navigation_links.Element = AAZObjectType()
_element = _schema_subnet_read.properties.resource_navigation_links.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType(
flags={"read_only": True},
)
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_subnet_read.properties.resource_navigation_links.Element.properties
properties.link = AAZStrType()
properties.linked_resource_type = AAZStrType(
serialized_name="linkedResourceType",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
route_table = _schema_subnet_read.properties.route_table
route_table.etag = AAZStrType(
flags={"read_only": True},
)
route_table.id = AAZStrType()
route_table.location = AAZStrType()
route_table.name = AAZStrType(
flags={"read_only": True},
)
route_table.properties = AAZObjectType(
flags={"client_flatten": True},
)
route_table.tags = AAZDictType()
route_table.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_subnet_read.properties.route_table.properties
properties.disable_bgp_route_propagation = AAZBoolType(
serialized_name="disableBgpRoutePropagation",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.resource_guid = AAZStrType(
serialized_name="resourceGuid",
flags={"read_only": True},
)
properties.routes = AAZListType()
properties.subnets = AAZListType(
flags={"read_only": True},
)
routes = _schema_subnet_read.properties.route_table.properties.routes
routes.Element = AAZObjectType()
_element = _schema_subnet_read.properties.route_table.properties.routes.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType()
properties = _schema_subnet_read.properties.route_table.properties.routes.Element.properties
properties.address_prefix = AAZStrType(
serialized_name="addressPrefix",
)
properties.has_bgp_override = AAZBoolType(
serialized_name="hasBgpOverride",
)
properties.next_hop_ip_address = AAZStrType(
serialized_name="nextHopIpAddress",
)
properties.next_hop_type = AAZStrType(
serialized_name="nextHopType",
flags={"required": True},
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
subnets = _schema_subnet_read.properties.route_table.properties.subnets
subnets.Element = AAZObjectType()
cls._build_schema_subnet_read(subnets.Element)
tags = _schema_subnet_read.properties.route_table.tags
tags.Element = AAZStrType()
service_association_links = _schema_subnet_read.properties.service_association_links
service_association_links.Element = AAZObjectType()
_element = _schema_subnet_read.properties.service_association_links.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_subnet_read.properties.service_association_links.Element.properties
properties.allow_delete = AAZBoolType(
serialized_name="allowDelete",
)
properties.link = AAZStrType()
properties.linked_resource_type = AAZStrType(
serialized_name="linkedResourceType",
)
properties.locations = AAZListType()
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
locations = _schema_subnet_read.properties.service_association_links.Element.properties.locations
locations.Element = AAZStrType()
service_endpoint_policies = _schema_subnet_read.properties.service_endpoint_policies
service_endpoint_policies.Element = AAZObjectType()
cls._build_schema_service_endpoint_policy_read(service_endpoint_policies.Element)
service_endpoints = _schema_subnet_read.properties.service_endpoints
service_endpoints.Element = AAZObjectType()
_element = _schema_subnet_read.properties.service_endpoints.Element
_element.locations = AAZListType()
_element.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
_element.service = AAZStrType()
locations = _schema_subnet_read.properties.service_endpoints.Element.locations
locations.Element = AAZStrType()
_schema.etag = cls._schema_subnet_read.etag
_schema.id = cls._schema_subnet_read.id
_schema.name = cls._schema_subnet_read.name
_schema.properties = cls._schema_subnet_read.properties
_schema.type = cls._schema_subnet_read.type
_schema_virtual_network_tap_read = None
@classmethod
def _build_schema_virtual_network_tap_read(cls, _schema):
if cls._schema_virtual_network_tap_read is not None:
_schema.etag = cls._schema_virtual_network_tap_read.etag
_schema.id = cls._schema_virtual_network_tap_read.id
_schema.location = cls._schema_virtual_network_tap_read.location
_schema.name = cls._schema_virtual_network_tap_read.name
_schema.properties = cls._schema_virtual_network_tap_read.properties
_schema.tags = cls._schema_virtual_network_tap_read.tags
_schema.type = cls._schema_virtual_network_tap_read.type
return
cls._schema_virtual_network_tap_read = _schema_virtual_network_tap_read = AAZObjectType()
virtual_network_tap_read = _schema_virtual_network_tap_read
virtual_network_tap_read.etag = AAZStrType(
flags={"read_only": True},
)
virtual_network_tap_read.id = AAZStrType()
virtual_network_tap_read.location = AAZStrType()
virtual_network_tap_read.name = AAZStrType(
flags={"read_only": True},
)
virtual_network_tap_read.properties = AAZObjectType(
flags={"client_flatten": True},
)
virtual_network_tap_read.tags = AAZDictType()
virtual_network_tap_read.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_virtual_network_tap_read.properties
properties.destination_load_balancer_front_end_ip_configuration = AAZObjectType(
serialized_name="destinationLoadBalancerFrontEndIPConfiguration",
)
cls._build_schema_frontend_ip_configuration_read(properties.destination_load_balancer_front_end_ip_configuration)
properties.destination_network_interface_ip_configuration = AAZObjectType(
serialized_name="destinationNetworkInterfaceIPConfiguration",
)
cls._build_schema_network_interface_ip_configuration_read(properties.destination_network_interface_ip_configuration)
properties.destination_port = AAZIntType(
serialized_name="destinationPort",
)
properties.network_interface_tap_configurations = AAZListType(
serialized_name="networkInterfaceTapConfigurations",
flags={"read_only": True},
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.resource_guid = AAZStrType(
serialized_name="resourceGuid",
flags={"read_only": True},
)
network_interface_tap_configurations = _schema_virtual_network_tap_read.properties.network_interface_tap_configurations
network_interface_tap_configurations.Element = AAZObjectType()
cls._build_schema_network_interface_tap_configuration_read(network_interface_tap_configurations.Element)
tags = _schema_virtual_network_tap_read.tags
tags.Element = AAZStrType()
_schema.etag = cls._schema_virtual_network_tap_read.etag
_schema.id = cls._schema_virtual_network_tap_read.id
_schema.location = cls._schema_virtual_network_tap_read.location
_schema.name = cls._schema_virtual_network_tap_read.name
_schema.properties = cls._schema_virtual_network_tap_read.properties
_schema.tags = cls._schema_virtual_network_tap_read.tags
_schema.type = cls._schema_virtual_network_tap_read.type
__all__ = ["List"]
| [
"noreply@github.com"
] | noreply@github.com |
aa8a4755acbc4c4f51caf1502ace59a767713b17 | 4c5f84772962f0c9c89b990df9e1933f7ff8d406 | /prospb/urls.py | 98621bd76cf441fb249aa4a11805297c5d143aa2 | [] | no_license | lucazartu/prospb | 0fe2804da812371006256b4c4c7dd4671536b39c | 645c30fe23643ec598047678535871e2e5d327a8 | refs/heads/master | 2020-07-20T18:56:25.244621 | 2016-08-22T22:52:35 | 2016-08-22T22:52:35 | 66,313,724 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 933 | py | """prospb URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from core import views
urlpatterns = [
url(r'^$', views.index, name='home'),
url(r'^cursos/', views.cursos, name='cursos'),
url(r'^contato/', views.contato, name='contato'),
url(r'^admin/', admin.site.urls),
]
| [
"laqcs@cin.ufpe.br"
] | laqcs@cin.ufpe.br |
37dccfd79428f9a1109efb012e45f8792f7bc5f1 | 441a3063db206ce6dc5691d3722cfa2e7f498b35 | /panel/api/user/urls.py | fece51a925f601e2564100329aa08ce90eee80e1 | [
"MIT"
] | permissive | rrosajp/vigilio | 5014b7e07f6c2f0a886596e27a8758a55075b6b6 | d21bf4f9d39e5dcde5d7c21476d8650e914c3c66 | refs/heads/master | 2023-04-23T19:12:06.413788 | 2021-05-04T12:54:57 | 2021-05-04T12:54:57 | 352,330,016 | 1 | 0 | MIT | 2021-05-04T17:55:58 | 2021-03-28T12:59:37 | Python | UTF-8 | Python | false | false | 193 | py | from django.urls import path
from . import views
urlpatterns = [
path(
"change-password",
views.ChangePasswordEndpoint.as_view(),
name="change_password",
),
]
| [
"zortoporto@gmail.com"
] | zortoporto@gmail.com |
af9a0a783f94d08519d1f0e3125f060e8ec4cb96 | 8761902ce8b75afcf116c84615216fab4e009f5d | /setup.py | 658c8e26333eeffd1e44427b3e7a8d2364e1ef90 | [] | no_license | phanhongan/scheduler | 5796978d17761d87bbce635b077c5a6bca4c0b83 | f2f23ea6bb4ebdda3458e6c4b1718411a4dd9af4 | refs/heads/master | 2020-03-18T21:21:59.551883 | 2018-05-29T10:04:46 | 2018-05-29T10:04:46 | 135,276,794 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 375 | py | from setuptools import setup
setup(name='scheduler',
version='0.1',
description='Scheduler with timezone support',
url='http://github.com/phanhongan/scheduler',
author='Phan Hong An',
license='MIT',
packages=['scheduler'],
keywords=[
'scheduler', 'schedule', 'scheduling', 'timezone'
],
zip_safe=False
)
| [
"phanhongan@gmail.com"
] | phanhongan@gmail.com |
983c6da847e1914aa65d3aba6a6534f774a4e39f | 615855e8218fa182cd8f14480695de51c3eb0cb4 | /ga/app/knapsackproblem/deapCustomize/algorithms.py | 348ca66dafe1803d6ec1157480f9b9ca9d7d28a2 | [] | no_license | sandy230207/GA-for-Knapsack-Problem | 586b6a8391bb8bbb520e960e4efadc1de8eafd7f | 31ff14be848a405faf63a6c12b88809f886b0924 | refs/heads/master | 2022-10-20T01:55:37.404787 | 2020-01-07T16:56:44 | 2020-01-07T16:56:44 | 232,328,678 | 0 | 1 | null | 2022-10-01T09:08:53 | 2020-01-07T13:18:48 | Python | UTF-8 | Python | false | false | 23,153 | py | # This file is part of DEAP.
#
# DEAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# DEAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with DEAP. If not, see <http://www.gnu.org/licenses/>.
"""The :mod:`algorithms` module is intended to contain some specific algorithms
in order to execute very common evolutionary algorithms. The method used here
are more for convenience than reference as the implementation of every
evolutionary algorithm may vary infinitely. Most of the algorithms in this
module use operators registered in the toolbox. Generally, the keyword used are
:meth:`mate` for crossover, :meth:`mutate` for mutation, :meth:`~deap.select`
for selection and :meth:`evaluate` for evaluation.
You are encouraged to write your own algorithms in order to make them do what
you really want them to do.
"""
import random
from collections import defaultdict
from . import tools
def varAnd(population, toolbox, cxpb, mutpb):
"""Part of an evolutionary algorithm applying only the variation part
(crossover **and** mutation). The modified individuals have their
fitness invalidated. The individuals are cloned so returned population is
independent of the input population.
:param population: A list of individuals to vary.
:param toolbox: A :class:`~deap.base.Toolbox` that contains the evolution
operators.
:param cxpb: The probability of mating two individuals.
:param mutpb: The probability of mutating an individual.
:returns: A list of varied individuals that are independent of their
parents.
The variation goes as follow. First, the parental population
:math:`P_\mathrm{p}` is duplicated using the :meth:`toolbox.clone` method
and the result is put into the offspring population :math:`P_\mathrm{o}`. A
first loop over :math:`P_\mathrm{o}` is executed to mate pairs of
consecutive individuals. According to the crossover probability *cxpb*, the
individuals :math:`\mathbf{x}_i` and :math:`\mathbf{x}_{i+1}` are mated
using the :meth:`toolbox.mate` method. The resulting children
:math:`\mathbf{y}_i` and :math:`\mathbf{y}_{i+1}` replace their respective
parents in :math:`P_\mathrm{o}`. A second loop over the resulting
:math:`P_\mathrm{o}` is executed to mutate every individual with a
probability *mutpb*. When an individual is mutated it replaces its not
mutated version in :math:`P_\mathrm{o}`. The resulting :math:`P_\mathrm{o}`
is returned.
This variation is named *And* beceause of its propention to apply both
crossover and mutation on the individuals. Note that both operators are
not applied systematicaly, the resulting individuals can be generated from
crossover only, mutation only, crossover and mutation, and reproduction
according to the given probabilities. Both probabilities should be in
:math:`[0, 1]`.
"""
offspring = [toolbox.clone(ind) for ind in population]
# Apply crossover and mutation on the offspring
for i in range(1, len(offspring), 2):
if random.random() < cxpb:
offspring[i - 1], offspring[i] = toolbox.mate(offspring[i - 1],
offspring[i])
del offspring[i - 1].fitness.values, offspring[i].fitness.values
for i in range(len(offspring)):
if random.random() < mutpb:
offspring[i], = toolbox.mutate(offspring[i])
del offspring[i].fitness.values
return offspring
def eaSimple(population, toolbox, cxpb, mutpb, ngen, stats=None,
halloffame=None, verbose=__debug__):
"""This algorithm reproduce the simplest evolutionary algorithm as
presented in chapter 7 of [Back2000]_.
:param population: A list of individuals.
:param toolbox: A :class:`~deap.base.Toolbox` that contains the evolution
operators.
:param cxpb: The probability of mating two individuals.
:param mutpb: The probability of mutating an individual.
:param ngen: The number of generation.
:param stats: A :class:`~deap.tools.Statistics` object that is updated
inplace, optional.
:param halloffame: A :class:`~deap.tools.HallOfFame` object that will
contain the best individuals, optional.
:param verbose: Whether or not to log the statistics.
:returns: The final population
:returns: A class:`~deap.tools.Logbook` with the statistics of the
evolution
The algorithm takes in a population and evolves it in place using the
:meth:`varAnd` method. It returns the optimized population and a
:class:`~deap.tools.Logbook` with the statistics of the evolution. The
logbook will contain the generation number, the number of evalutions for
each generation and the statistics if a :class:`~deap.tools.Statistics` is
given as argument. The *cxpb* and *mutpb* arguments are passed to the
:func:`varAnd` function. The pseudocode goes as follow ::
evaluate(population)
for g in range(ngen):
population = select(population, len(population))
offspring = varAnd(population, toolbox, cxpb, mutpb)
evaluate(offspring)
population = offspring
As stated in the pseudocode above, the algorithm goes as follow. First, it
evaluates the individuals with an invalid fitness. Second, it enters the
generational loop where the selection procedure is applied to entirely
replace the parental population. The 1:1 replacement ratio of this
algorithm **requires** the selection procedure to be stochastic and to
select multiple times the same individual, for example,
:func:`~deap.tools.selTournament` and :func:`~deap.tools.selRoulette`.
Third, it applies the :func:`varAnd` function to produce the next
generation population. Fourth, it evaluates the new individuals and
compute the statistics on this population. Finally, when *ngen*
generations are done, the algorithm returns a tuple with the final
population and a :class:`~deap.tools.Logbook` of the evolution.
.. note::
Using a non-stochastic selection method will result in no selection as
the operator selects *n* individuals from a pool of *n*.
This function expects the :meth:`toolbox.mate`, :meth:`toolbox.mutate`,
:meth:`toolbox.select` and :meth:`toolbox.evaluate` aliases to be
registered in the toolbox.
.. [Back2000] Back, Fogel and Michalewicz, "Evolutionary Computation 1 :
Basic Algorithms and Operators", 2000.
"""
logbook = tools.Logbook()
logbook.header = ['gen', 'nevals'] + (stats.fields if stats else [])
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in population if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
if halloffame is not None:
halloffame.update(population)
record = stats.compile(population) if stats else {}
logbook.record(gen=0, nevals=len(invalid_ind), **record)
if verbose:
print(logbook.stream)
# Begin the generational process
for gen in range(1, ngen + 1):
# Select the next generation individuals
offspring = toolbox.select(population, len(population))
# Vary the pool of individuals
offspring = varAnd(offspring, toolbox, cxpb, mutpb)
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# Update the hall of fame with the generated individuals
if halloffame is not None:
halloffame.update(offspring)
# Replace the current population by the offspring
population[:] = offspring
# Append the current generation statistics to the logbook
record = stats.compile(population) if stats else {}
logbook.record(gen=gen, nevals=len(invalid_ind), **record)
if verbose:
print(logbook.stream)
return population, logbook
def varOr(population, toolbox, lambda_, cxpb, mutpb):
"""Part of an evolutionary algorithm applying only the variation part
(crossover, mutation **or** reproduction). The modified individuals have
their fitness invalidated. The individuals are cloned so returned
population is independent of the input population.
:param population: A list of individuals to vary.
:param toolbox: A :class:`~deap.base.Toolbox` that contains the evolution
operators.
:param lambda\_: The number of children to produce
:param cxpb: The probability of mating two individuals.
:param mutpb: The probability of mutating an individual.
:returns: The final population.
The variation goes as follow. On each of the *lambda_* iteration, it
selects one of the three operations; crossover, mutation or reproduction.
In the case of a crossover, two individuals are selected at random from
the parental population :math:`P_\mathrm{p}`, those individuals are cloned
using the :meth:`toolbox.clone` method and then mated using the
:meth:`toolbox.mate` method. Only the first child is appended to the
offspring population :math:`P_\mathrm{o}`, the second child is discarded.
In the case of a mutation, one individual is selected at random from
:math:`P_\mathrm{p}`, it is cloned and then mutated using using the
:meth:`toolbox.mutate` method. The resulting mutant is appended to
:math:`P_\mathrm{o}`. In the case of a reproduction, one individual is
selected at random from :math:`P_\mathrm{p}`, cloned and appended to
:math:`P_\mathrm{o}`.
This variation is named *Or* beceause an offspring will never result from
both operations crossover and mutation. The sum of both probabilities
shall be in :math:`[0, 1]`, the reproduction probability is
1 - *cxpb* - *mutpb*.
"""
assert (cxpb + mutpb) <= 1.0, (
"The sum of the crossover and mutation probabilities must be smaller "
"or equal to 1.0.")
offspring = []
for _ in range(lambda_):
op_choice = random.random()
if op_choice < cxpb: # Apply crossover
ind1, ind2 = list(map(toolbox.clone, random.sample(population, 2)))
ind1, ind2 = toolbox.mate(ind1, ind2)
del ind1.fitness.values
offspring.append(ind1)
elif op_choice < cxpb + mutpb: # Apply mutation
ind = toolbox.clone(random.choice(population))
ind, = toolbox.mutate(ind)
del ind.fitness.values
offspring.append(ind)
else: # Apply reproduction
offspring.append(random.choice(population))
return offspring
def eaMuPlusLambda(population, toolbox, mu, lambda_, cxpb, mutpb, ngen,
stats=None, halloffame=None, verbose=__debug__):
"""This is the :math:`(\mu + \lambda)` evolutionary algorithm.
:param population: A list of individuals.
:param toolbox: A :class:`~deap.base.Toolbox` that contains the evolution
operators.
:param mu: The number of individuals to select for the next generation.
:param lambda\_: The number of children to produce at each generation.
:param cxpb: The probability that an offspring is produced by crossover.
:param mutpb: The probability that an offspring is produced by mutation.
:param ngen: The number of generation.
:param stats: A :class:`~deap.tools.Statistics` object that is updated
inplace, optional.
:param halloffame: A :class:`~deap.tools.HallOfFame` object that will
contain the best individuals, optional.
:param verbose: Whether or not to log the statistics.
:returns: The final population
:returns: A class:`~deap.tools.Logbook` with the statistics of the
evolution.
The algorithm takes in a population and evolves it in place using the
:func:`varOr` function. It returns the optimized population and a
:class:`~deap.tools.Logbook` with the statistics of the evolution. The
logbook will contain the generation number, the number of evalutions for
each generation and the statistics if a :class:`~deap.tools.Statistics` is
given as argument. The *cxpb* and *mutpb* arguments are passed to the
:func:`varOr` function. The pseudocode goes as follow ::
evaluate(population)
for g in range(ngen):
offspring = varOr(population, toolbox, lambda_, cxpb, mutpb)
evaluate(offspring)
population = select(population + offspring, mu)
First, the individuals having an invalid fitness are evaluated. Second,
the evolutionary loop begins by producing *lambda_* offspring from the
population, the offspring are generated by the :func:`varOr` function. The
offspring are then evaluated and the next generation population is
selected from both the offspring **and** the population. Finally, when
*ngen* generations are done, the algorithm returns a tuple with the final
population and a :class:`~deap.tools.Logbook` of the evolution.
This function expects :meth:`toolbox.mate`, :meth:`toolbox.mutate`,
:meth:`toolbox.select` and :meth:`toolbox.evaluate` aliases to be
registered in the toolbox. This algorithm uses the :func:`varOr`
variation.
"""
genlog = defaultdict(dict)
logbook = tools.Logbook()
logbook.header = ['gen', 'nevals'] + (stats.fields if stats else [])
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in population if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
if halloffame is not None:
halloffame.update(population)
record = stats.compile(population) if stats is not None else {}
logbook.record(gen=0, nevals=len(invalid_ind), **record)
if verbose:
print(logbook.stream)
# Begin the generational process
for gen in range(1, ngen + 1):
# Vary the population
offspring = varOr(population, toolbox, lambda_, cxpb, mutpb)
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
#add invalid_ind to log
genlog['invalid_ind'][gen] = invalid_ind
# Update the hall of fame with the generated individuals
if halloffame is not None:
halloffame.update(offspring)
# Select the next generation population
population[:] = toolbox.select(population + offspring, mu)
#add population to log
genlog['population'][gen] = population[:]
# Update the statistics with the new population
record = stats.compile(population) if stats is not None else {}
logbook.record(gen=gen, nevals=len(invalid_ind), **record)
if verbose:
print(logbook.stream)
return population, logbook, genlog
def eaMuCommaLambda(population, toolbox, mu, lambda_, cxpb, mutpb, ngen,
stats=None, halloffame=None, verbose=__debug__):
"""This is the :math:`(\mu~,~\lambda)` evolutionary algorithm.
:param population: A list of individuals.
:param toolbox: A :class:`~deap.base.Toolbox` that contains the evolution
operators.
:param mu: The number of individuals to select for the next generation.
:param lambda\_: The number of children to produce at each generation.
:param cxpb: The probability that an offspring is produced by crossover.
:param mutpb: The probability that an offspring is produced by mutation.
:param ngen: The number of generation.
:param stats: A :class:`~deap.tools.Statistics` object that is updated
inplace, optional.
:param halloffame: A :class:`~deap.tools.HallOfFame` object that will
contain the best individuals, optional.
:param verbose: Whether or not to log the statistics.
:returns: The final population
:returns: A class:`~deap.tools.Logbook` with the statistics of the
evolution
The algorithm takes in a population and evolves it in place using the
:func:`varOr` function. It returns the optimized population and a
:class:`~deap.tools.Logbook` with the statistics of the evolution. The
logbook will contain the generation number, the number of evalutions for
each generation and the statistics if a :class:`~deap.tools.Statistics` is
given as argument. The *cxpb* and *mutpb* arguments are passed to the
:func:`varOr` function. The pseudocode goes as follow ::
evaluate(population)
for g in range(ngen):
offspring = varOr(population, toolbox, lambda_, cxpb, mutpb)
evaluate(offspring)
population = select(offspring, mu)
First, the individuals having an invalid fitness are evaluated. Second,
the evolutionary loop begins by producing *lambda_* offspring from the
population, the offspring are generated by the :func:`varOr` function. The
offspring are then evaluated and the next generation population is
selected from **only** the offspring. Finally, when
*ngen* generations are done, the algorithm returns a tuple with the final
population and a :class:`~deap.tools.Logbook` of the evolution.
.. note::
Care must be taken when the lambda:mu ratio is 1 to 1 as a
non-stochastic selection will result in no selection at all as the
operator selects *lambda* individuals from a pool of *mu*.
This function expects :meth:`toolbox.mate`, :meth:`toolbox.mutate`,
:meth:`toolbox.select` and :meth:`toolbox.evaluate` aliases to be
registered in the toolbox. This algorithm uses the :func:`varOr`
variation.
"""
assert lambda_ >= mu, "lambda must be greater or equal to mu."
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in population if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
if halloffame is not None:
halloffame.update(population)
logbook = tools.Logbook()
logbook.header = ['gen', 'nevals'] + (stats.fields if stats else [])
record = stats.compile(population) if stats is not None else {}
logbook.record(gen=0, nevals=len(invalid_ind), **record)
if verbose:
print(logbook.stream)
# Begin the generational process
for gen in range(1, ngen + 1):
# Vary the population
offspring = varOr(population, toolbox, lambda_, cxpb, mutpb)
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# Update the hall of fame with the generated individuals
if halloffame is not None:
halloffame.update(offspring)
# Select the next generation population
population[:] = toolbox.select(offspring, mu)
# Update the statistics with the new population
record = stats.compile(population) if stats is not None else {}
logbook.record(gen=gen, nevals=len(invalid_ind), **record)
if verbose:
print(logbook.stream)
return population, logbook
def eaGenerateUpdate(toolbox, ngen, halloffame=None, stats=None,
verbose=__debug__):
"""This is algorithm implements the ask-tell model proposed in
[Colette2010]_, where ask is called `generate` and tell is called `update`.
:param toolbox: A :class:`~deap.base.Toolbox` that contains the evolution
operators.
:param ngen: The number of generation.
:param stats: A :class:`~deap.tools.Statistics` object that is updated
inplace, optional.
:param halloffame: A :class:`~deap.tools.HallOfFame` object that will
contain the best individuals, optional.
:param verbose: Whether or not to log the statistics.
:returns: The final population
:returns: A class:`~deap.tools.Logbook` with the statistics of the
evolution
The algorithm generates the individuals using the :func:`toolbox.generate`
function and updates the generation method with the :func:`toolbox.update`
function. It returns the optimized population and a
:class:`~deap.tools.Logbook` with the statistics of the evolution. The
logbook will contain the generation number, the number of evalutions for
each generation and the statistics if a :class:`~deap.tools.Statistics` is
given as argument. The pseudocode goes as follow ::
for g in range(ngen):
population = toolbox.generate()
evaluate(population)
toolbox.update(population)
.. [Colette2010] Collette, Y., N. Hansen, G. Pujol, D. Salazar Aponte and
R. Le Riche (2010). On Object-Oriented Programming of Optimizers -
Examples in Scilab. In P. Breitkopf and R. F. Coelho, eds.:
Multidisciplinary Design Optimization in Computational Mechanics,
Wiley, pp. 527-565;
"""
logbook = tools.Logbook()
logbook.header = ['gen', 'nevals'] + (stats.fields if stats else [])
for gen in range(ngen):
# Generate a new population
population = toolbox.generate()
# Evaluate the individuals
fitnesses = toolbox.map(toolbox.evaluate, population)
for ind, fit in zip(population, fitnesses):
ind.fitness.values = fit
if halloffame is not None:
halloffame.update(population)
# Update the strategy with the evaluated individuals
toolbox.update(population)
record = stats.compile(population) if stats is not None else {}
logbook.record(gen=gen, nevals=len(population), **record)
if verbose:
print(logbook.stream)
return population, logbook
| [
"sandy230207@gmail.com"
] | sandy230207@gmail.com |
f7c7c421831b9eeb7c1aeb2c8bd83ab719ec2177 | 7c0fa2655c7a6010e5d8b797273a8bc6b88ef1de | /apps/goods/urls.py | 5daca772e8c367b7ae30f74a8dadce6decbfa481 | [] | no_license | chenxiaoyu2/OrderMall | 54ee02e40dff9fafcf96e6742e2158537f004f18 | e7a2595c1074d545f1f17016f71312f5da03c2cc | refs/heads/master | 2022-12-11T14:41:12.834239 | 2019-04-16T12:03:17 | 2019-04-16T12:03:17 | 179,245,447 | 2 | 0 | null | 2022-12-08T04:59:26 | 2019-04-03T08:29:31 | HTML | UTF-8 | Python | false | false | 388 | py | from django.conf.urls import url
from apps.goods.views import IndexView, DetailView, ListView
urlpatterns = [
url(r'^index$', IndexView.as_view(), name='index'), # 首页
url(r'^goods/(?P<goods_id>\d+)$', DetailView.as_view(), name='detail'), # 详情页
url(r'^list/(?P<type_id>\d+)/(?P<page>\d+)$', ListView.as_view(), name='list'), # 列表页
]
| [
"1480456073@qq.com"
] | 1480456073@qq.com |
0b29e0bdb53c18affd0a1891b1bdceb44d34336b | 1139234b2bb7f868c15310719125e693cfcde718 | /cricket_portal/cricket_portal/urls.py | 60c91c224c76b57fd914eeb5c7ac3e31c563ed19 | [] | no_license | pfirefly/cricket-portal | 6d7ff31deb00dcf47b84757e6339a26d4b15e087 | b52238fdcc05fe37ddfa4f921998542544d31f9e | refs/heads/master | 2020-04-23T11:10:19.891578 | 2019-02-17T13:57:22 | 2019-02-17T13:57:22 | 171,127,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 956 | py | """cricket_portal URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('match.urls'))
]+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"noreply@github.com"
] | noreply@github.com |
6458e807ae35ddfbbbd23abd2b56fca869e9cf33 | 04aefaaaefde30ba596fe69ae6391fd2635de994 | /CaC Python/EjerciciosPy1/Ej3.py | 06eeb5be5e90ff23d9baa3e08a736cc35a867f4d | [] | no_license | 35sebastian/Proyecto_Python_1 | 6a7256bdda5e8df2c78a4cf5bc08c6d211c5deae | 07cc7ec5d08eb5064b8ab084cec4121575d0ddab | refs/heads/master | 2023-02-07T13:27:38.962597 | 2023-01-29T13:02:53 | 2023-01-29T13:02:53 | 242,755,132 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 119 | py | nombre = input("Ingrese su nombre: ")
edad = int(input("Ingrese su edad: "))
print("Nombre: ", nombre,"\nEdad: ", edad) | [
"vegaa.sebastian@gmail.com"
] | vegaa.sebastian@gmail.com |
4f8360cb9656c65b9ab0af1060d4f523bca4959f | 6a95b330e1beec08b917ff45eccfd6be3fd4629f | /kubernetes/client/models/v1_namespace_status.py | 523e7e43d3f44960d90c934e8371361de7fc1cc0 | [
"Apache-2.0"
] | permissive | TokkoLabs/client-python | f4a83d6540e64861b59e322c951380a670578d7f | f1ad9c6889105d8510472606c98f8d3807f82020 | refs/heads/master | 2023-07-14T01:36:46.152341 | 2017-12-21T21:32:11 | 2017-12-21T21:32:11 | 115,042,671 | 0 | 0 | Apache-2.0 | 2021-08-06T03:29:17 | 2017-12-21T20:05:15 | Python | UTF-8 | Python | false | false | 3,306 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1NamespaceStatus(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'phase': 'str'
}
attribute_map = {
'phase': 'phase'
}
def __init__(self, phase=None):
"""
V1NamespaceStatus - a model defined in Swagger
"""
self._phase = None
self.discriminator = None
if phase is not None:
self.phase = phase
@property
def phase(self):
"""
Gets the phase of this V1NamespaceStatus.
Phase is the current lifecycle phase of the namespace. More info: https://git.k8s.io/community/contributors/design-proposals/namespaces.md#phases
:return: The phase of this V1NamespaceStatus.
:rtype: str
"""
return self._phase
@phase.setter
def phase(self, phase):
"""
Sets the phase of this V1NamespaceStatus.
Phase is the current lifecycle phase of the namespace. More info: https://git.k8s.io/community/contributors/design-proposals/namespaces.md#phases
:param phase: The phase of this V1NamespaceStatus.
:type: str
"""
self._phase = phase
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1NamespaceStatus):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"mehdy@google.com"
] | mehdy@google.com |
7e7f5a718ac8033167bc5a225a645a38c8c3650a | e5dd21ac1305257fe163995f951cbbfbf3051fd7 | /Cracking_the_Coding_Interview/8-6.py | c8c86de2f9e0015f6f18220cf9120789b84d7d12 | [] | no_license | QI1002/exampool | d3d3cdad040e4a861b934122ef12e059f7d6cd01 | 08800f78482f9fd9d6641c3eabc5880e69782f42 | refs/heads/master | 2021-01-02T09:35:15.283632 | 2020-10-13T14:54:25 | 2020-10-13T14:54:25 | 99,178,691 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 552 | py |
def paintBox(point, box):
x = point[0]
y = point[1]
if (x < 0 or x >= len(box[0])):
return
if (y < 0 or y >= len(box)):
return
if (box[y][x] == 1):
return
box[y][x] = 1
paintBox((x-1,y), box)
paintBox((x+1,y), box)
paintBox((x,y-1), box)
paintBox((x,y+1), box)
box = []
m = 4
n = 6
for i in range(n):
box.append([])
for j in range(m):
box[i].append(0)
point = (2,3)
paintBox(point, box)
print(box) | [
"alanchang544@gmail.com"
] | alanchang544@gmail.com |
897cf0c437285e8773f49a6e7cb7f12530950287 | 491f9ca49bbb275c99248134c604da9fb43ee9fe | /MD_analysis/process_radius_of_gyration.py | 2d4077af75475dccee4e3c7ab1dad1d1e233f511 | [] | no_license | KineOdegardHanssen/PhD-subprojects | 9ef0facf7da4b2a80b4bea9c890aa04f0ddcfd1a | c275539689b53b94cbb85c0fdb3cea5885fc40e9 | refs/heads/Windows | 2023-06-08T13:32:15.179813 | 2023-06-05T08:40:10 | 2023-06-05T08:40:10 | 195,783,664 | 2 | 0 | null | 2020-08-18T14:42:21 | 2019-07-08T09:49:14 | Python | UTF-8 | Python | false | false | 6,058 | py | import matplotlib.pyplot as plt # To plot
from scipy.optimize import curve_fit
import numpy as np
import random
import math
import time
start_time = time.process_time()
M = 9
N = 101
kangle = 20
kbond = 200
Kangle = kangle
Kbond = kbond
#factors = [0.1,1,10,100,250]
#charges = [0,-1,-5,-10]
#spacings = [1,2,3,4,5,10,40,100]
N = 101
spacing = 1
gridspacing = spacing
spacings = [1,2,3,4,5,6,7,8,10,15,40,100]#[1,2,3,4,5,8,10,15,40,100]
#spacings = [40]
#dielectrics = [1,2,10,100] # For lg = 2 nm
dielectrics = [1,2,10,50,100,1000]
#lengths = [21,61,101,141]
#lensp = [1,3,5,7]
#krfacs = [0.01, 0.05, 0.10, 0.50, 1.00]
#kangles = [20, 100, 200, 1000, 2000]
wallenergies = [1.042]
charge = -1
T = 310
spacesims = False
dielsims = True
wallsims = False
if spacesims == True:
Nsp = len(spacings)
outfilename = 'table_radgyrs_chaingrid_quadratic_M%iN%i_Langevin_wall%.3f_Kangle%i_Kbond%i_debye_kappa1_debyecutoff3_charge%i_T%i_theta0is180_twofirst_are_fixed_varyspacing.txt' % (M,N,wallenergy,kangle,kbond,charge,T)
outfile = open(outfilename,'w')
outfile.write('begin{table}\n\centering\n\caption{}\n begin{tabular}{r|c|c|c|c|c|c|c|c|c|c}\nSpacing/Chain & 1 & 2 & 3 & 4 & 5 & 6 & 7 & 8 & 9 & Average \ \ \n\hline\n')
if dielsims == True:
Nsp = len(dielectrics)
outfilename = 'table_radgyrs_chaingrid_quadratic_M%iN%i_Langevin_gridspacing%i_Kangle%i_Kbond%i_debye_kappa1_debyecutoff3_charge%i_T%i_theta0is180_twofirst_are_fixed_varydielectric.txt' % (M,N,gridspacing,Kangle,Kbond,charge,T)
outfile = open(outfilename,'w')
outfile.write('begin{table}\n\centering\n\caption{}\n begin{tabular}{r|c|c|c|c|c|c|c|c|c|c}\nDielectric/Chain & 1 & 2 & 3 & 4 & 5 & 6 & 7 & 8 & 9 & Average \ \ \n\hline\n')
if wallsims == True:
Nsp = len(wallenergies)
outfilename = 'table_radgyrs_chaingrid_quadratic_M%iN%i_Langevin_gridspacing%i_Kangle%i_Kbond%i_debye_kappa1_debyecutoff3_charge%i_T%i_theta0is180_twofirst_are_fixed_varywallenergy.txt' % (M,N,gridspacing,Kangle,Kbond,charge,T)
outfile = open(outfilename,'w')
outfile.write('begin{table}\n\centering\n\caption{}\n begin{tabular}{r|c|c|c|c|c|c|c|c|c|c}\n $\epsilon_w$/Chain & 1 & 2 & 3 & 4 & 5 & 6 & 7 & 8 & 9 & Average \ \ \n\hline\n')
totalaverage = np.zeros(Nsp)
totalrms = np.zeros(Nsp)
for i in range(Nsp):
if spacesims == True:
spacing = spacings[i]
outfile.write('%i' % spacing)
infilename = 'log.radgyrs_chaingrid_quadratic_M%iN%i_gridspacing%i_Langevin_Kangle%i_Kbond%i_debye_kappa1_debyecutoff3_charge' % (M,N,spacing,kangle,kbond)+str(charge)+'_T%i_theta0is180_twofirst_are_fixed' % T
if dielsims == True:
dielectric = dielectrics[i]
outfile.write('%i' % dielectric)
infilename = 'log.chaingrid_quadratic_M%iN%i_gridspacing%i_Langevin_Kangle%i_Kbond%i_debye_kappa1_debyecutoff3_charge%i_dielectric%i_T%i_theta0is180_twofirst_are_fixed' % (M,N,gridspacing,Kangle,Kbond,charge,dielectric,T)
if wallsims == True:
wallenergy = wallenergies[i]
outfile.write('%.3f' % wallenergy)
infilename = 'log.chaingrid_quadratic_M%iN%i_gridspacing%i_Langevin_wall%.3f_Kangle%i_Kbond%i_debye_kappa1_debyecutoff3_charge%i_T%i_theta0is180_twofirst_are_fixed_with_rgyr' % (M,N,spacing,wallenergy,kangle,kbond,charge,T)
# Readying arrays:
radgyr_average = np.zeros(M)
radgyr_stdv = np.zeros(M)
# This is really not the optimal solution:
allradgyrs_vals = []
allradgyrs_inds = []
infile = open(infilename,'r')
lines = infile.readlines()
#print('infilename:', infilename)
# Finding the mean and rms:
# Finding the mean:
starter1 = 0
starter2 = 0
counter = 0
for line in lines:
words = line.split()
#print('words=',words)
#print('starter1:', starter1, '; starter2:', starter2)
if len(words)>2:
if words[1]=='Run' and words[2]=='and':
# Finding the line: ####################### Run and write to file #########################################
starter1 = 1
#print('First mark hit')
#if starter1==1:
# print(words)
if starter1==1 and starter2==1:
# Test if we should break:
if len(words)>0:
if words[0]=='WARNING:' or words[0]=='Loop':
break
#print('Starting to read data')
if len(words)==12 or len(words)==18:
#print('I am in')
if len(words)==12:
addon = 3
else:
addon = 9
for j in range(M):
#print(words)
thisvalue = float(words[j+addon])
radgyr_average[j] += thisvalue
allradgyrs_vals.append(thisvalue)
allradgyrs_inds.append(j)
counter+=1
if starter1==1 and starter2==0:
if len(words)>0:
if words[0]=='Step':
starter2=1
#print('Second mark hit')
infile.close()
radgyr_average /= counter
totalaverage[i] = np.mean(radgyr_average)
# Finding the rms:
for j in range(len(allradgyrs_vals)):
chain = allradgyrs_inds[j]
val = allradgyrs_vals[j]
radgyr_stdv[chain] += (radgyr_average[chain]-val)**2
totalrms[i] = (totalaverage[i]-val)**2
totalrms[i] = np.sqrt(totalrms[i]/(counter-1))
for j in range(M):
radgyr_stdv[j] = np.sqrt(radgyr_stdv[j]/(counter-1))
outfile.write(' & %.3f$\pm$%.3f' % (radgyr_average[j], radgyr_stdv[j]))
outfile.write(' & %.4f$\pm$%.4f \ \ \n' % (totalaverage[i], totalrms[i]))
outfile.write('\end{tabular}\n\label{table:radgyrs_chain_and_total_something}\n\end{table}')
outfile.close()
| [
"noreply@github.com"
] | noreply@github.com |
ebfaa95f77f169b9ea0772633ca3dcff5322622b | 82f9100372c224f9e23acb38082d05c41f5cee66 | /front_end/chq_form.py | 81d1c1939610896c91ed0fded8104e6901e4c65b | [] | no_license | johnstembridge/DT | 207ed86ebc87e89fea84637cb0321d8beb78bd58 | 4516725d41728f7f27a1f06211eba980830c77ee | refs/heads/master | 2023-06-22T12:16:52.765924 | 2023-06-14T13:26:27 | 2023-06-14T13:26:27 | 187,260,532 | 0 | 0 | null | 2023-02-15T21:24:26 | 2019-05-17T18:02:28 | Python | UTF-8 | Python | false | false | 832 | py | from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField
from front_end.form_helpers import ReadOnlyWidget
from back_end.interface import get_member
class RenewalChequeForm(FlaskForm):
dt_number = StringField(label='DT number')
amount = StringField(label='Renewal amount')
description = StringField(label='Renewal')
submit = SubmitField(label='Save')
def populate(self, member_id, upgrade, downgrade):
member = get_member(member_id)
payment = member.last_payment()
self.description = member.long_membership_type(upgrade=upgrade, downgrade=downgrade)
self.dt_number.data = member.dt_number()
self.amount.data = payment.amount
def make_readonly(self, field):
prop = getattr(self, field)
setattr(prop, 'widget', ReadOnlyWidget())
| [
"john.stembridge@gmail.com"
] | john.stembridge@gmail.com |
3fb98eb68b61ad9968065ebf555fe75c62f98c16 | bdc6edf8d54623f0afe6d1fe6770ef7bd029d315 | /karatsuba_log int multi.py | 5f11af077cdfccaaf5782b4405900f82c669ae8f | [] | no_license | KeyurAsodariya111/Python | 8a79124677b37cfc89d0b8d25b87275d13efd2f3 | a795014f82995d9b5f07d37ba3c9d45b541199b4 | refs/heads/main | 2023-05-31T16:32:13.846214 | 2021-07-10T08:39:23 | 2021-07-10T08:39:23 | 384,649,140 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 589 | py | # -*- coding: utf-8 -*-
"""
Created on Mon May 3 10:09:13 2021
@author: Keyur Asodariya
Time com:- O(n^1.585)
"""
X=int(input("Enter number 1 :"))
Y=int(input("Enter number 2 :"))
def karatsuba(X,Y):
#base condition
if X<10 or Y<10:
return X*Y
m=max(len(str(X)),len(str(Y)))
if m%2!=0:
m-=1
a,b=divmod(X, 10**int(m/2))
c,d=divmod(Y, 10**int(m/2))
ac=karatsuba(a,c)
bd=karatsuba(b,d)
ad_bc=karatsuba((a+b),(c+d))-ac-bd
return (ac*(10**m))+(ad_bc*(10**int(m/2)))+bd
print(karatsuba(X,Y)) | [
"noreply@github.com"
] | noreply@github.com |
5b1037113dd6a8d2a100c6474c1598111cef66cd | be8c339e7ea80aaf5e28c31d7fbcc42e160dc2dd | /examples/lotka-volterra.py | 1b5afdfd88ade35c955ef60f2fd2730bd38e7cd3 | [] | no_license | carterian8/timediffeq | aea765fab8458fc65e4b77bf253a39fab53f56cb | de72ed5c3031a56b5c2f04436958217080b0c18b | refs/heads/master | 2020-05-18T15:46:59.345102 | 2019-05-13T01:57:45 | 2019-05-13T01:57:45 | 184,507,792 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,683 | py | import numpy as np
import numpy.random as npr
import matplotlib.pyplot as plt
from scipy import integrate
import tensorflow as tf
import timediffeq
def lotka_volterra(X, t=0, a = 1., b = 0.1, c = 1.5, d = 0.75):
""" Return the growth rate of fox and rabbit populations. """
return np.array([a*X[0] - b*X[0]*X[1], -c*X[1] + d*b*X[0]*X[1]])
if __name__ == '__main__':
total_time_pts = 1000
t = np.linspace(0, 15, total_time_pts) # time
y0 = np.array([10, 5]) # initials conditions: 10 rabbits and 5 foxes
outputs, info_dict = integrate.odeint(
func=lotka_volterra,
y0=y0,
t=t,
rtol=1e-6,
atol=1e-6,
full_output=True,
)
# Generate noisy samples from ode outputs
batch_sz = 4000
val_batch_sz = 1000
sample_sz = int(total_time_pts / 4)
noise_std = 0.5
samps = []
t0s = []
for _ in range(batch_sz + val_batch_sz):
t0_idx = npr.multinomial(
1, [1. / (total_time_pts - 2. * sample_sz)] * (total_time_pts - int(2 * sample_sz))
)
t0_idx = np.argmax(t0_idx) + sample_sz
t0s.append(t0_idx)
samp = outputs.T[:, t0_idx:t0_idx + sample_sz].copy()
samp += npr.randn(*samp.shape) * noise_std
samps.append(np.transpose(samp))
samps = np.stack(samps, axis=0)
# Plot for visualization
rabbits, foxes = outputs.T
samp_idx = int(npr.uniform(low=0, high=batch_sz-1))
noisy_rabbits = samps[samp_idx, :, 0]
noisy_foxes = samps[samp_idx, :, 1]
samp_ts = t[t0s[samp_idx]:t0s[samp_idx]+sample_sz]
f1 = plt.figure()
plt.plot(t, rabbits, 'r-', label='Rabbits')
plt.plot(t, foxes , 'b-', label='Foxes')
plt.scatter(samp_ts, noisy_rabbits, s=1.0, label='Noisy Rabbits')
plt.scatter(samp_ts, noisy_foxes, c='g', s=1.0, label='Noisy Foxes')
plt.grid()
plt.legend(loc='best')
plt.xlabel('time')
plt.ylabel('population')
f1.savefig('rabbits_and_foxes_1.png', dpi=500)
# Turn the toy data into a tf.data.Dataset
samp_ts = np.reshape(
np.repeat(t[:sample_sz], samps.shape[0]), (batch_sz+val_batch_sz, sample_sz), order="F"
)
dataset = tf.data.Dataset.from_tensor_slices(
(tf.cast(samps, dtype=tf.float32), tf.cast(samp_ts, dtype=tf.float32))
)
dataset = dataset.batch(batch_sz)
# Create a basic LatentODEVae model. This includes an RNN encoder, dense ODE model,
# and a dense decoder.
latent_dim = 4
nhidden = 20
rnn_nhidden = 25
obs_dim = 2
model = timediffeq.BasicLatentODEVae(
samps.shape,
obs_dim,
latent_dim=latent_dim,
ode_Dh=nhidden,
encoder_Dh=rnn_nhidden,
decoder_Dh=nhidden
)
# Train the model
optimizer = tf.train.AdamOptimizer(learning_rate=1e-2)
timediffeq.train(dataset, model, optimizer, epochs=10)
| [
"carterian8@gmail.com"
] | carterian8@gmail.com |
9f9a31f2fd7bdf297ce237c0e0bd5ca5475fafae | 60ff143cf313c0d134a17f1a20847d201caa28dc | /use_dictionary_to_count_word.py | 4762da5f7a3b4079b2d7deb8cfe27eb91629bb8e | [] | no_license | jensonhupython/reviews-analytics | db5afd829b70aed6fc68697f28bf3fd39da66ef6 | b0a933f316bba82205994740ab58fffd01f57446 | refs/heads/master | 2023-05-24T14:15:32.835160 | 2021-06-20T14:57:44 | 2021-06-20T14:57:44 | 295,876,039 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,324 | py | # 利用 dictionary 做一個功能
# 功能: 可以計數某個字出現在整個留言中, 可以讓使用者輸入
import time
data = []
count = 0
with open('reviews.txt', 'r') as f:
for line in f:
data.append(line)
count += 1
if count % 1000 == 0:
print(len(data))
#print('檔案讀取完畢, 總共有', len(data), '筆資料')
print(data[0])
start_time = time.time()
word_count = {}
for each_msg in data:
words = each_msg.split(' ')
for word in words:
if word in word_count:
word_count[word] += 1
else:
word_count[word] = 1 #新增新的key進字典
print(words)
break
for key in word_count:
#print(key, word_count[key])
if word_count[key] > 10000000:
print(key, word_count[key])
#print(word_count)
end_time = time.time()
print('花了', end_time - start_time, 'second')
print(len(word_count))
print(word_count['Allen'])
print('歡迎使用查找功能, 離開請輸入 q')
while True:
user_word = input('請問你想查什麼字: ')
if user_word == 'q':
break;
else:
if user_word in word_count:
print(user_word, '出現過的次數為: ', word_count[user_word])
else:
print(user_word, '沒有出現過!!')
print('感謝使用!!') | [
"jenson7899@gmail.com"
] | jenson7899@gmail.com |
6f51f3f9e059212cd21614b13cb96e4fca3ba3ce | 135ccc63bf78ba89f3bab1373414b8be66b5218d | /utdallasiia/wsgi.py | 0e31743ee5952db87c5477d92fdc49e225841e7d | [] | no_license | oddrationale/utdallasiia.com | 0b58d9ead02e50fa8899b4b045585557c67f34b7 | 9ce35fbdb4fee39b72ccc9daa528f5bff25df418 | refs/heads/master | 2016-09-05T20:44:33.843578 | 2013-05-03T17:48:53 | 2013-05-03T17:48:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,144 | py | """
WSGI config for utdallasiia project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "utdallasiia.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| [
"oddrationale@gmail.com"
] | oddrationale@gmail.com |
0ffe4bc3ddbf2d4c37f626ced16302e1c0753d97 | d00aec723a30e5dff05218fceeecf14e986131c3 | /src/bot/output/offense_output.py | 8f2eba13f3997491bc5eb7754f846c9d1efeb96c | [
"MIT"
] | permissive | xKynn/discord-pob | 422e2c1f42035ba292ee303cf9957f48fd47adfb | 8c1175974fa69de4a4a143487dab3ca94ce8ceed | refs/heads/master | 2020-03-25T04:19:50.908268 | 2018-07-14T09:54:09 | 2018-07-14T09:54:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,948 | py | from src.bot.consts.thresholds import OutputThresholds
from src.bot.util import build_checker
from src.models import Skill
def calc_max(comparison_dps: []):
"""
Get the max value out of all values in the list when they are set.
:param comparison_dps:
:return:
"""
max = 0
for dps in comparison_dps:
if dps and dps > max:
max = dps
return round(max, 2)
def show_avg_damage(active_skill: Skill) -> bool:
"""
Determine if we have to show avg damage instead of dps (useful for mines and traps)
:return: boolean
"""
if active_skill:
show_avg = any("mine" in gem.get_name().lower() for gem in active_skill.gems)
show_avg = show_avg or any("trap" in gem.get_name().lower() for gem in active_skill.gems)
show_avg = show_avg or any(
"firestorm" in gem.get_name().lower() or "ice storm" in gem.get_name().lower() for gem in active_skill.gems)
return show_avg
def get_damage_output(build, avg, dps):
output = ""
speed = build.get_stat('Player', 'Speed')
minion_speed = build.get_stat('Minion', 'Speed')
if show_avg_damage(build.get_active_skill()) or avg > dps:
output += "**AVG**: {avg:,.0f}\n".format(
avg=avg)
else:
shown_speed = speed if not minion_speed or minion_speed < speed else minion_speed
output += "**DPS**: {dps:,.0f} @ {speed}/s\n".format(
dps=dps,
speed=round(shown_speed, 2) if shown_speed else min)
crit_chance = build.get_stat('Player', 'CritChance', )
crit_multi = build.get_stat('Player', 'CritMultiplier')
if crit_chance and crit_chance > OutputThresholds.CRIT_CHANCE.value:
output += "**Crit**: Chance {crit_chance:,.2f}% | Multiplier: {crit_multi:,.0f}%\n".format(
crit_chance=crit_chance,
crit_multi=crit_multi * 100 if crit_multi else 150)
acc = build.get_stat('Player', 'HitChance', )
if acc and acc < OutputThresholds.ACCURACY.value:
output += "**Hit Chance**: {:.2f}%".format(acc)
return output
def get_support_outptut(build):
return "Auras: {}, Curses: {}".format(build.aura_count, build.curse_count)
def get_offense(build):
"""
Parses the meat of the build as in either support or dmg stats
:param build: Build instance
:return: String (Support|Offense), String (Output)
"""
output = ""
# Basics
comparison_dps = [build.get_stat('Player', 'TotalDPS'), build.get_stat('Player', 'WithPoisonDPS'),
build.get_stat('Minion', 'TotalDPS'), build.get_stat('Minion', 'WithPoisonDPS')]
comparison_avg = [build.get_stat('Player', 'WithPoisonAverageDamage')]
dps = calc_max(comparison_dps)
avg = calc_max(comparison_avg)
if build_checker.is_support(build, dps, avg):
return "Support", get_support_outptut(build)
else:
return "Offense", get_damage_output(build, avg, dps)
| [
"fabian.widmann@uni-ulm.de"
] | fabian.widmann@uni-ulm.de |
1d49ed1332e56d7a4116b959a1697f345d699931 | 31704accb3f1d49fbe3092c35ea054fa6c0bdf37 | /Models/Baselines/baselines.py | be4fd70b9c22b786da492e1aa6a93712a433fe1e | [] | no_license | tommasoc80/germeval-rug | eaf21e6882b419e2ef5222c50b7e049b827ff15c | 125ff4eb4f421fbd7b5e6ab1d91eac36d61fd2b0 | refs/heads/master | 2020-03-27T03:13:00.904594 | 2018-10-11T14:14:15 | 2018-10-11T14:14:15 | 145,846,700 | 1 | 1 | null | 2018-10-11T14:14:16 | 2018-08-23T11:52:28 | Python | UTF-8 | Python | false | false | 3,463 | py | '''
Baseline systems for Germeval 2018 Shared Task
1) Most freq. class
2) Tfidf-weighted (word) unigram + linear SVM
Input corpus file as command-line argument
'''
import argparse
import statistics as stats
from sklearn.dummy import DummyClassifier
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_recall_fscore_support
def read_corpus(corpus_file, binary=True):
'''Reading in data from corpus file'''
tweets = []
labels = []
with open(corpus_file, 'r', encoding='utf-8') as fi:
for line in fi:
data = line.strip().split('\t')
# making sure no missing labels
if len(data) != 3:
raise IndexError('Missing data for tweet "%s"' % data[0])
tweets.append(data[0])
if binary:
# 2-class problem: OTHER vs. OFFENSE
labels.append(data[1])
else:
# 4-class problem: OTHER, PROFANITY, INSULT, ABUSE
labels.append(data[2])
return tweets, labels
def evaluate(Ygold, Yguess):
'''Evaluating model performance and printing out scores in readable way'''
print('-'*50)
print("Accuracy:", accuracy_score(Ygold, Yguess))
print('-'*50)
print("Precision, recall and F-score per class:")
# get all labels in sorted way
# Ygold is a regular list while Yguess is a numpy array
labs = sorted(set(Ygold + Yguess.tolist()))
# printing out precision, recall, f-score for each class in easily readable way
PRFS = precision_recall_fscore_support(Ygold, Yguess, labels=labs)
print('{:10s} {:>10s} {:>10s} {:>10s}'.format("", "Precision", "Recall", "F-score"))
for idx, label in enumerate(labs):
print("{0:10s} {1:10f} {2:10f} {3:10f}".format(label, PRFS[0][idx],PRFS[1][idx],PRFS[2][idx]))
print('-'*50)
print("Average (macro) F-score:", stats.mean(PRFS[2]))
print('-'*50)
print('Confusion matrix:')
print('Labels:', labs)
print(confusion_matrix(Ygold, Yguess, labels=labs))
print()
if __name__ == '__main__':
train_dat = '../../Data/germeval.ensemble.train.txt'
test_dat = '../../Data/germeval.ensemble.test.txt'
print('Loading data...')
Xtrain, Ytrain = read_corpus(train_dat, binary=False)
Xtest, Ytest = read_corpus(test_dat, binary=False)
# Setting up most frequent class (mfc) baseline
classifer_mfc = Pipeline([('vec', CountVectorizer()),
('classify', DummyClassifier(strategy='most_frequent', random_state=0))])
# Setting up SVM baseline
# classifier_svm = Pipeline([('vec', TfidfVectorizer()),
# ('classify', SVC(kernel=Kernel, C=C_val))])
# Fitting either model
print('Fitting models...')
classifer_mfc.fit(Xtrain,Ytrain)
# classifier_svm.fit(Xtrain,Ytrain)
# Predicting using either model
print('Predicting...')
Yguess_mfc = classifer_mfc.predict(Xtest)
# Yguess_svm = classifier_svm.predict(Xtest)
# Evaluating the performance of either model on validation data
print('Results for most frequent class baseline:')
evaluate(Ytest, Yguess_mfc)
print()
# print('Results for svm baseline:')
# evaluate(Ytest, Yguess_svm)
| [
"xiaoyubai2002@yahoo.de"
] | xiaoyubai2002@yahoo.de |
2bdca8a8bf20ae934daa0612cf186baca66bacb7 | c37e6d0770c31a78f315bd4a46c96158ccc13485 | /IDE/AsmIDE/migrations/0010_alter_tests_starting_time.py | 1ddd6c8774b2e16f7f2c9990f9a48f9dc32845f5 | [] | no_license | Scorpion197/8086-IDE | 22f8b3ae0831bc5ecd8ff5643564eb8b970855cc | 165eb2f9ff60ae17f61c077a637a0c123f52e718 | refs/heads/main | 2023-05-21T00:09:53.426535 | 2021-06-15T01:42:54 | 2021-06-15T01:42:54 | 355,016,027 | 2 | 0 | null | 2021-04-24T20:14:04 | 2021-04-06T01:03:57 | JavaScript | UTF-8 | Python | false | false | 509 | py | # Generated by Django 3.2 on 2021-04-24 01:07
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('AsmIDE', '0009_alter_tests_starting_time'),
]
operations = [
migrations.AlterField(
model_name='tests',
name='starting_time',
field=models.DateTimeField(default=datetime.datetime(2021, 4, 24, 1, 7, 45, 110539, tzinfo=utc)),
),
]
| [
"doudou.gaw@gmail.com"
] | doudou.gaw@gmail.com |
2b6fd7d1a8889319ee9306ac006162dc199fd10e | aae970b6020f1ae416d3b1c8ad581027c1389d2c | /venv/Scripts/easy_install-3.7-script.py | 62f792f8fa9e0418d04dba7e62fdf69a75208c29 | [] | no_license | saffist3r/DecryptXOR | 3f5b5a37c3f84cf44fea87e3b19688680dfcc8a1 | e417acfd3d8eec42bb0c5ba1feb1b879a9dc6558 | refs/heads/master | 2022-11-20T13:45:06.369448 | 2020-07-26T17:32:11 | 2020-07-26T17:32:11 | 282,700,086 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 473 | py | #!"C:\Users\Safwen Ladhari\PycharmProjects\PythonTests\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.7')()
)
| [
"axelsaff@gmail.com"
] | axelsaff@gmail.com |
fbe5bcce893c65d0de9fbe54faebde4410ae5695 | 5ac726f23d0490d3d43819578cca590b62d0ff02 | /wise_intern/Interviews/admin.py | 259b7df34ca5d091e9f4021328f27e7c247f6b8f | [] | no_license | youssriaboelseod/Software-HumanResources | 52ab324bf43479d8bea20690f71690615c68ef25 | 821fdf195915af3f6b6ec16ef2fb6d9f70d986f7 | refs/heads/master | 2022-12-28T01:29:52.310176 | 2020-10-10T05:05:22 | 2020-10-10T05:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 124 | py | from django.contrib import admin
from .models import Interview
# Register your models here.
admin.site.register(Interview) | [
"bhatnagar.aman1998@gmail.com"
] | bhatnagar.aman1998@gmail.com |
6ba2eaaf0441bd9cbe54bd95bd0f0810f655a902 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02999/s283934284.py | 6cd487c30f50e638f9be2239e26427ad2bcd0a27 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 85 | py | x,a = list(map(int,input().strip().split()))
s = (0)
if x >= a:
s = (10)
print(s) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
d558ad7aff4e520653b7da7b5f2cf5eb3c8f7089 | f98c12b0fa1449ede5c53d4fa11ad14274a0390c | /homework_2/question4.py | c13cc75d006a8ab6e27e77a78128073a81805b12 | [] | no_license | doodles526/CS344 | 49cc20551c89a948336eb15a87208dc58b5df8ce | 02f2b37bbcc7d42d70a611b1e74557c566c2f6f6 | refs/heads/master | 2021-01-19T18:29:49.880665 | 2014-09-01T06:45:49 | 2014-09-01T06:45:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,753 | py | # Josh Deare
# dearej@onid.orst.edu
# CS311-400
# Homework 2
import math
import sys
COMPOSITE = 3
def primes_to_n(scan_to):
"""
Returns number of primes and list of primes between 0 and scan_to
Return value is a tuple, as (num_primes, primes[])
"""
# initialize the lists
num_list = [None] * scan_to
primes = list()
# initialize counters
num_primes = 0
k = 2
# Mark composites until k exceeds the sqrt of our scan range
while k < math.sqrt(scan_to):
m = k
# iterate m, until we find a non-composite
while num_list[m] == COMPOSITE and m < scan_to:
m = m + 1
# Until we reach our scan_to, we set all
# 2n * m values to composite
comp_numbers = 2
while (comp_numbers * m) < scan_to:
num_list[comp_numbers * m] = COMPOSITE
comp_numbers = comp_numbers + 1
# Add this m to the list of primes
primes.append(m)
# Increment num_primes
num_primes = num_primes + 1
# And setup for the next loop
k = m + 1
# iterate from k to scan_to
for i in range(k, scan_to):
if num_list[i] != COMPOSITE:
primes.append(i)
num_primes = num_primes + 1
return (num_primes, primes)
def get_nth_prime(n):
"""
Returns the nth prime number
"""
# Plug n into the prime number theorem
# multiplied by 1.5 and added with 3 as offset
upper_to_test = int(n * 1.5 * math.log(n) + 3)
return primes_to_n(upper_to_test)[1][n - 1]
def main(argv):
# print the result of get_nth_prime
print get_nth_prime(int(argv[0]))
if __name__ == "__main__":
# call main with cli args
main(sys.argv[1:])
| [
"dearej@onid.orst.edu"
] | dearej@onid.orst.edu |
3433c235216a86e3fa5e9cfba839c2bfad220340 | 5bf4dddf4ec67115a604b42bb43691bcb8853106 | /tga/ugc/management/commands/bot.py | 21a945ab3f41fcd0156dacc5f097bc9bd88a6601 | [] | no_license | xislam/bot-werca | e6f4f829442c01700b0f44142749192dd6232bf6 | d826944460c1449e425f54373fe90927d575d832 | refs/heads/main | 2023-01-05T10:55:58.667394 | 2020-10-06T14:14:55 | 2020-10-06T14:14:55 | 301,629,851 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,653 | py | from abc import ABC
from aiogram.utils import executor
from django.core.management.base import BaseCommand
from ugc.management.commands.text import text1, text2, text3, text5, text7, text6, text8, text9, text10, text11, \
text12, text13, text14, \
text15, text16, text17, text18, text19, text20, text21, text22, text4, text23, text24
from aiogram import Bot, types
from aiogram.dispatcher import Dispatcher
from aiogram.types import ReplyKeyboardMarkup, KeyboardButton
bot = Bot(token='1396506839:AAGC8G-3J_4xHHcYZsyuw-uha8zTvpZ68yc')
dp = Dispatcher(bot)
@dp.message_handler(commands=['start'])
async def process_start_command(message: types.Message):
await message.reply(text2, reply_markup=markup1)
# Кнопки после старта
new_button1 = KeyboardButton('Начать')
new_button2 = KeyboardButton('Партнерам')
new_button3 = KeyboardButton('Помощь')
@dp.message_handler(text='Начать')
async def new_1(message: types.Message):
await message.reply(text1, reply_markup=markup2)
@dp.message_handler(text='Партнерам')
async def new_1(message: types.Message):
await message.reply('🤝 Уважаемый партнер! Вероятно. у Вас накопились вопросы ❓\n\
Тогда поищи ответы здесь 👇', reply_markup=markup3)
@dp.message_handler(text='Помощь')
async def new_1(message: types.Message):
await message.reply('❓ ❓❓ Остались вопросы? Нужна помощь в активации смарт-контракта?\n\
Обратитесь с вопросом менеджеру, либо пишите в общий чат, где вам обязательно помогут.'
, reply_markup=markup4)
markup1 = ReplyKeyboardMarkup(resize_keyboard=True).row(new_button1).row(new_button2, new_button3)
# Кнопки "Начать"
start_buttons1 = KeyboardButton('Активировать')
start_buttons2 = KeyboardButton('Доходность')
start_buttons3 = KeyboardButton('О проекте FairPact')
start_buttons4 = KeyboardButton('_НАЗАД_')
@dp.message_handler(text='Активировать')
async def new_1(message: types.Message):
await message.reply(text3, reply_markup=markup5)
@dp.message_handler(text='Доходность')
async def new_1(message: types.Message):
await message.reply(text1, reply_markup=markup6)
@dp.message_handler(text='О проекте FairPact')
async def new_1(message: types.Message):
await message.reply(text12, reply_markup=markup7)
@dp.message_handler(text='_НАЗАД_')
async def new_1(message: types.Message):
await message.reply(text1, reply_markup=markup1)
markup2 = ReplyKeyboardMarkup(resize_keyboard=True).row(start_buttons1, start_buttons2).row(start_buttons3,
start_buttons4)
# Кнопки "Паркнерам"
for_partners_buttons1 = KeyboardButton('О компании WERCA')
for_partners_buttons2 = KeyboardButton('Ваш ID')
for_partners_buttons3 = KeyboardButton('_НАЗАД')
@dp.message_handler(text='О компании WERCA')
async def new_1(message: types.Message):
await message.reply(text19, reply_markup=markup12)
@dp.message_handler(text='Ваш ID')
async def new_1(message: types.Message):
await message.reply(text13, reply_markup=markup13)
@dp.message_handler(text='_НАЗАД')
async def new_1(message: types.Message):
await message.reply(text1, reply_markup=markup1)
markup3 = ReplyKeyboardMarkup(resize_keyboard=True).row(for_partners_buttons1,
for_partners_buttons2).row(for_partners_buttons3)
# Кнопки "Помощь"
help_buttons1 = KeyboardButton('Задать вопрос')
help_buttons2 = KeyboardButton('Контакты')
help_buttons3 = KeyboardButton('Назад_')
@dp.message_handler(text='Задать вопрос')
async def new_1(message: types.Message):
await message.reply('❗️ Ждем Вашего вопроса. Напиши, и мы ответим 👇 @bot_questions_fairpact_bot')
@dp.message_handler(text='Контакты')
async def new_1(message: types.Message):
await message.reply('👉В общем чате Вам помогут с активацией смарт-контракта и проконсультируют по любым вопросам')
@dp.message_handler(text='Назад_')
async def new_1(message: types.Message):
await message.reply(text1, reply_markup=markup1)
markup4 = ReplyKeyboardMarkup(resize_keyboard=True).row(help_buttons1, help_buttons2).row(help_buttons3)
# Кнопки "Активировать"
activate_buttons1 = KeyboardButton('Установить')
activate_buttons2 = KeyboardButton('Купить ETH')
activate_buttons3 = KeyboardButton('Вызов смарт-контракта')
activate_buttons4 = KeyboardButton('Назад.')
@dp.message_handler(text='Установить')
async def new_1(message: types.Message):
await message.reply(text23, reply_markup=markup8)
@dp.message_handler(text='Купить ETH')
async def new_1(message: types.Message):
await message.reply(text5)
@dp.message_handler(text='Вызов смарт-контракта')
async def new_1(message: types.Message):
await message.reply(text6, reply_markup=markup9)
@dp.message_handler(text='Назад.')
async def new_1(message: types.Message):
await message.reply(text1, reply_markup=markup2)
markup5 = ReplyKeyboardMarkup(resize_keyboard=True).add(activate_buttons1, activate_buttons2).row(activate_buttons3,
activate_buttons4)
# Кнопки "Доходность"
profitability_buttons1 = KeyboardButton('Маркетинг')
profitability_buttons2 = KeyboardButton('Бонусная программа')
profitability_buttons3 = KeyboardButton('*-Назад')
@dp.message_handler(text='Маркетинг')
async def new_1(message: types.Message):
await message.reply(text24, reply_markup=markup10)
@dp.message_handler(text='Бонусная программа')
async def new_1(message: types.Message):
await message.reply(text13, reply_markup=markup11)
@dp.message_handler(text='*-Назад')
async def new_1(message: types.Message):
await message.reply('ok', reply_markup=markup2)
markup6 = ReplyKeyboardMarkup(resize_keyboard=True).row(profitability_buttons1,
profitability_buttons2).row(profitability_buttons3)
# Кнопки "О проекте FairPact"
about_FairPact1 = KeyboardButton('О смарт-контракте')
about_FairPact2 = KeyboardButton('О маркетинге')
about_FairPact3 = KeyboardButton('О блокчейне')
about_FairPact4 = KeyboardButton('О создателе')
about_FairPact5 = KeyboardButton('Назад:')
@dp.message_handler(text='О смарт-контракте')
async def new_1(message: types.Message):
await message.reply(text9)
@dp.message_handler(text='О маркетинге')
async def new_1(message: types.Message):
await message.reply(text11)
@dp.message_handler(text='О блокчейне')
async def new_1(message: types.Message):
await message.reply(text10)
@dp.message_handler(text='О создателе')
async def new_1(message: types.Message):
await message.reply(text18)
@dp.message_handler(text='Назад:')
async def new_1(message: types.Message):
await message.reply('ok', reply_markup=markup2)
markup7 = ReplyKeyboardMarkup(resize_keyboard=True).row(about_FairPact1, about_FairPact2).row(about_FairPact3,
about_FairPact4).row(about_FairPact5)
# Кнопки "установить"
install_1 = KeyboardButton('На пк')
install_2 = KeyboardButton('На телефоне')
install_3 = KeyboardButton('- Назад')
@dp.message_handler(text='На пк')
async def new_1(message: types.Message):
await message.reply(text7)
@dp.message_handler(text='На телефоне')
async def new_1(message: types.Message):
await message.reply(text4)
@dp.message_handler(text='- Назад')
async def new_1(message: types.Message):
await message.reply('ok', reply_markup=markup5)
markup8 = ReplyKeyboardMarkup(resize_keyboard=True).row(install_1, install_2).row(install_3)
# Кнопки "Вызов смарт-контракта"
Calling_a_smart_contract_1 = KeyboardButton('На ПК (Metamask)')
Calling_a_smart_contract_2 = KeyboardButton('На телефоне (Android, IOS)')
Calling_a_smart_contract_3 = KeyboardButton('=-назад')
@dp.message_handler(text='На ПК (Metamask)')
async def new_1(message: types.Message):
await message.reply(text7)
@dp.message_handler(text='На телефоне (Android, IOS)')
async def new_1(message: types.Message):
await message.reply(text8)
@dp.message_handler(text='=-назад')
async def new_1(message: types.Message):
await message.reply("ок", reply_markup=markup5)
markup9 = ReplyKeyboardMarkup(resize_keyboard=True).row(Calling_a_smart_contract_1,
Calling_a_smart_contract_2).row(Calling_a_smart_contract_1)
# Кнопки "Маркетинг"
Marketing_1 = KeyboardButton('1 уровень')
Marketing_2 = KeyboardButton('2 уровень')
Marketing_3 = KeyboardButton('3 уровень')
Marketing_4 = KeyboardButton('4 уровень')
Marketing_5 = KeyboardButton('^НАЗАД')
@dp.message_handler(text='1 уровень')
async def new_1(message: types.Message):
await message.reply('1️⃣ УРОВЕНЬ'
'✅ Стоимость – 0.2 ETH\n\
✅ Прибыль от продажи первого уровня – 0.32 ETH\n\
✅ Кол-во партнеров – 2 человека')
@dp.message_handler(text='2 уровень')
async def new_1(message: types.Message):
await message.reply('2️⃣ УРОВЕНЬ\n\
✅ Стоимость – 0.3 ETH\n\
✅ Прибыль от продажи второго уровня – 0.96 ETH\n\
✅ Кол-во партнеров – 4 человека')
@dp.message_handler(text='3 уровень')
async def new_1(message: types.Message):
await message.reply('3️⃣ УРОВЕНЬ\n\
\n\
✅ Стоимость – 0.5 ETH\n\
✅ Прибыль от продажи третьего уровня – 3.2 ETH\n\
✅ Кол-во партнеров - 8 человек')
@dp.message_handler(text='4 уровень')
async def new_1(message: types.Message):
await message.reply('4️⃣ УРОВЕНЬ\n\
✅ Стоимость входа – 1 ETH\n\
✅ Прибыль от продажи четвертого уровня – 12.8 ETH\n\
✅ Кол-во партнеров - 16 человек')
@dp.message_handler(text='^НАЗАД')
async def new_1(message: types.Message):
await message.reply(text1, reply_markup=markup6)
markup10 = ReplyKeyboardMarkup(resize_keyboard=True).row(Marketing_1, Marketing_2).row(Marketing_3,
Marketing_4).row(Marketing_5)
# Кнопки "Бонусная программа"
bonus_program_1 = KeyboardButton('от 15%')
bonus_program_2 = KeyboardButton('от 20%')
bonus_program_3 = KeyboardButton('от 25%')
bonus_program_4 = KeyboardButton('от 30%')
bonus_program_5 = KeyboardButton('назад-*')
@dp.message_handler(text='от 15%')
async def new_1(message: types.Message):
await message.reply(text14)
@dp.message_handler(text='от 20%')
async def new_1(message: types.Message):
await message.reply(text15)
@dp.message_handler(text='от 25%')
async def new_1(message: types.Message):
await message.reply(text16)
@dp.message_handler(text='от 30%')
async def new_1(message: types.Message):
await message.reply(text17)
@dp.message_handler(text='назад-*')
async def new_1(message: types.Message):
await message.reply('ок', reply_markup=markup6)
markup11 = ReplyKeyboardMarkup(resize_keyboard=True).row(bonus_program_1, bonus_program_2).row(bonus_program_3,
bonus_program_4).row(bonus_program_5)
# Кнопки "Ваш ID"
id_1 = KeyboardButton('Как найти свой ID в структуре?')
id_2 = KeyboardButton('Как проходит покупка следующего уровня?')
id_3 = KeyboardButton('Почему')
id_4 = KeyboardButton('Что')
id_5 = KeyboardButton('Где')
id_6 = KeyboardButton('Когда')
id_7 = KeyboardButton('Назад!')
markup13 = ReplyKeyboardMarkup(resize_keyboard=True).row(id_1, id_2).row(id_3, id_4).row(id_5, id_6).row(id_7)
@dp.message_handler(text='Как найти свой ID в структуре?')
async def new_1(message: types.Message):
await message.reply('Как найти свой ID в структуре\n\
1.Переходите по ссылке 👉 https://fairpact.ru/structure/\n\
2. Кликаете слева вверху на "CHANGE ID"\n\
3. В поле вбиваете 1685 - центральный ID ветки')
@dp.message_handler(text='Как проходит покупка следующего уровня?')
async def new_1(message: types.Message):
await message.reply('text')
@dp.message_handler(text='Почему')
async def new_1(message: types.Message):
await message.reply('🔹Почему я не могу найти свой ID на структуре ❓\n\
\n\
🔸 По всей вероятности, из-за большого количества партнеров и небольшого размера схемы. Но есть выход:\n\
👉 Закройте одну из веток ID 1689 слева и ID 1690 справа от Вас. Схема станет больше. Попробуйте свернуть несколько «ног", и станет еще больше и лучше видна.')
@dp.message_handler(text='Что')
async def new_1(message: types.Message):
await message.reply('🔹 Что произойдет, если я не куплю уровень ❓\n\
\n\
🔸 Тогда деньги, которые Вы должны были бы получить от своих партнеров, уйдут выше. Однако, это не значит, что вы совсем потеряли свою прибыль. Быстрее повышайте уровень, и остальные переливы вновь будут поступать к Вам.\n\
\n\
❗️Очень важно быть подключенным к чату в Telegram, чтобы не пропустить новости, в том числе и о поднятии уровней.\n\
👉Для присоединения зайдите в раздел КОНТАКТЫ')
@dp.message_handler(text='Где')
async def new_1(message: types.Message):
await message.reply('🔹 Где я могу посмотреть, есть ли партнеры под моим ID ❓\n\
🔸 Вы можете посмотреть свою структуру на сайте проекта по адресу 👉 https://fairpact.ru/structure/')
@dp.message_handler(text='Когда')
async def new_1(message: types.Message):
await message.reply('🔹Когда необходимо поднимать уровень? Как я об этом узнаю ❓\n\
🔸Согласно нашей стратегии, уровни поднимаются одновременно всеми партнерами в ряду. О необходимости поднять уровень Вам сообщат менеджеры нашего проекта.\n\
❗️Важно быть участником основного Тelеgram канала и следить за сообщениями от менеджеров проекта. \n\
👉Проверьте, подписаны ли Вы на канал? Для этого зайдите в раздел КОНТАКТЫ')
@dp.message_handler(text='Назад!')
async def new_1(message: types.Message):
await message.reply(text1, reply_markup=markup3)
# Кнопки "О компании WERCA"
werca_1 = KeyboardButton('Наши проекты')
werca_2 = KeyboardButton('Работа')
werca_3 = KeyboardButton('Команда')
werca_4 = KeyboardButton('Наши соцсети')
werca_5 = KeyboardButton('*Назад*')
markup12 = ReplyKeyboardMarkup(resize_keyboard=True).row(werca_1, werca_2).row(werca_3, werca_4).row(werca_5)
@dp.message_handler(text='Наши проекты')
async def new_1(message: types.Message):
await message.reply(text20)
@dp.message_handler(text='Работа')
async def new_1(message: types.Message):
await message.reply(text21)
@dp.message_handler(text='Команда')
async def new_1(message: types.Message):
await message.reply(text22)
@dp.message_handler(text='Наши соцсети')
async def new_1(message: types.Message):
await message.reply(text22)
@dp.message_handler(text='*Назад*')
async def new_1(message: types.Message):
await message.reply('ok', reply_markup=markup3)
class Command(BaseCommand):
help = 'Телеграм-бот'
def handle(self, *args, **options):
executor.start_polling(dp)
| [
"nurdinovislam4@gmail.com"
] | nurdinovislam4@gmail.com |
5ce998a8d321fbb8c92ffc3515a595137019c013 | 222d525f50f0c955ba6e8af0b41a9bd9c04d99a9 | /venv/Lib/site-packages/pandas/_version.py | 560c9c69332156b1bd4362ce45c1cffa515f362a | [] | no_license | Sakthi-zebra/Rest_RCI | 943c4652a239808b71d0d2ba5c28acca7435cf68 | 9a716860986a3d1fafee70f0c6339810fce152f2 | refs/heads/master | 2021-01-05T16:44:41.126142 | 2020-02-17T10:22:16 | 2020-02-17T10:22:16 | 241,074,751 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 545 | py | # This file was generated by 'versioneer.py' (0.15) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
from warnings import catch_warnings
with catch_warnings(record=True):
import json
import sys
version_json = '''
{
"dirty": false,
"error": null,
"full-revisionid": "29d6b0232aab9576afa896ff5bab0b994760495a",
"version": "1.0.1"
}
''' # END VERSION_JSON
def get_versions():
return json.loads(version_json)
| [
"sm1279@zebra.com"
] | sm1279@zebra.com |
1cae0928c21b5d35d8bbd79dddccb421f2bc0d15 | 58d8b2b9a1ece8cc06a2997f731a17f22edacbf0 | /cipher/plugins/hearing/routes.py | ce25307ce4abb5ebe981279cecefcdb2f641aba0 | [] | no_license | valorun/CIPHER | 2c67a7b6eee1f8a593c07cb594dd922f805a468e | e7d1aef66470477d5788c2dc11c7370284a6bdf4 | refs/heads/master | 2023-07-24T04:05:54.000846 | 2022-07-24T09:36:37 | 2022-07-24T09:36:37 | 137,410,300 | 0 | 0 | null | 2023-07-18T21:53:21 | 2018-06-14T21:15:53 | Python | UTF-8 | Python | false | false | 2,705 | py | import json
import re
import threading
from flask import Flask, session, request, jsonify
from . import hearing
from .model import Intent, chat_queue
from cipher.model import Sequence, db, resources
from cipher.security import login_required
from cipher.core.actions import CUSTOM_ACTIONS
@hearing.route('/hearing')
@login_required
def hearing_page():
sequences = Sequence.query.all()
intents = Intent.query.all()
return hearing.render_page('hearing.html', sequences=sequences, intents=intents, chat_records=list(chat_queue))
@hearing.route('/save_intent', methods=['POST'])
@login_required
def save_intent():
"""
Save an intent in the database.
"""
intent = request.json.get('intent')
action_name = request.json.get('action_name')
seq_id = request.json.get('sequence_id')
if not intent or ' ' in intent:
return jsonify("Un nom d'intention ne doit pas être vide ou contenir d'espace."), 400
if (not action_name and not seq_id) or (action_name and seq_id):
return jsonify("Vous devez choisir une séquence OU une action."), 400
if action_name and ' ' in action_name:
return jsonify("Un nom de d'action ne doit pas contenir d'espace."), 400
if Intent.query.filter_by(intent=intent).first() is not None:
return jsonify("Une intention portant le même nom existe déjà."), 400
hearing.log.info("Saving intent '%s'", intent)
db_intent = Intent(intent=intent, sequence_id=seq_id, enabled=True)
db.session.merge(db_intent)
db.session.commit()
return jsonify("L'intention '" + intent + "' a été sauvegardée avec succès."), 200
@hearing.route('/enable_intent', methods=['POST'])
@login_required
def enable_intent():
intent = request.json.get('intent')
value = request.json.get('value')
if not intent or ' ' in intent:
return jsonify("Une intention ne doit pas être vide ou contenir d'espace."), 400
hearing.log.info("Updating intent '" + intent + "'")
db_intent = Intent.query.filter_by(intent=intent).first()
db_intent.enabled = value
db.session.commit()
return jsonify("L'état de l'intention '" + intent + "' a été modifié."), 200
@hearing.route('/delete_intent', methods=['POST'])
@login_required
def delete_intent():
intent = request.json.get('intent')
if not intent or ' ' in intent:
return jsonify("Une intention ne doit pas être vide ou contenir d'espace."), 400
hearing.log.info("Deleting intent '" + intent + "'")
db_intent = Intent.query.filter_by(intent=intent).first()
db.session.delete(db_intent)
db.session.commit()
return jsonify("L'intention '" + intent + "' a été supprimée avec succès."), 200
| [
"ronan@collier-famille.fr"
] | ronan@collier-famille.fr |
290cccac3244c8f49b7fe30dc928990ec75a0610 | b7ba02a29b10c449a8e405063c5eede32c36f0c8 | /doc/conf.py | 31ed7b46162d1b1b5bd2fbd7c00247768fd3b1bc | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Paebbels/pyHTTPRequestRouter | 96e32756ef4ef0e538f8990cef99719eac5dad0d | 10592ecdd9fd57bc04d218a7cdbb050d7ae38cc5 | refs/heads/master | 2021-06-22T14:25:37.250642 | 2020-01-02T00:57:08 | 2020-01-02T00:57:08 | 214,282,531 | 0 | 0 | NOASSERTION | 2021-04-28T22:09:05 | 2019-10-10T20:46:33 | Python | UTF-8 | Python | false | false | 4,408 | py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('..'))
#sys.path.insert(0, os.path.abspath('../pyHTTPRequestRouter'))
#sys.path.insert(0, os.path.abspath('_extensions'))
#sys.path.insert(0, os.path.abspath('_themes/sphinx_rtd_theme'))
# -- Project information -----------------------------------------------------
project = 'pyHTTPRequestRouter'
copyright = '2017-2019, Patrick Lehmann'
author = 'Patrick Lehmann'
# The full version, including alpha/beta/rc tags
release = 'v0.1'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
# Sphinx theme
"sphinx_rtd_theme",
# Standard Sphinx extensions
"sphinx.ext.autodoc",
'sphinx.ext.extlinks',
'sphinx.ext.intersphinx',
'sphinx.ext.inheritance_diagram',
'sphinx.ext.todo',
'sphinx.ext.graphviz',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
# SphinxContrib extensions
# Other extensions
# 'DocumentMember',
# local extensions (patched)
# local extensions
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [
"_build",
"Thumbs.db",
".DS_Store"
]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# ==============================================================================
# Sphinx.Ext.InterSphinx
# ==============================================================================
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
# 'pyFlags': ('http://pyFlags.readthedocs.io/en/latest', None),
'pyExceptions': ('http://pyExceptions.readthedocs.io/en/latest', None),
'pyAttributes': ('http://pyAttributes.readthedocs.io/en/latest', None),
'pyGenericPath': ('http://pyGenericPath.readthedocs.io/en/latest', None),
'pyHTTPInterface': ('http://pyHTTPInterface.readthedocs.io/en/latest', None),
}
# ==============================================================================
# Sphinx.Ext.AutoDoc
# ==============================================================================
# see: https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html#configuration
autodoc_member_order = "bysource" # alphabetical, groupwise, bysource
# ==============================================================================
# Sphinx.Ext.ExtLinks
# ==============================================================================
extlinks = {
'issue': ('https://github.com/Paebbels/pyHTTPRequestRouter/issues/%s', 'issue #'),
'pull': ('https://github.com/Paebbels/pyHTTPRequestRouter/pull/%s', 'pull request #'),
'src': ('https://github.com/Paebbels/pyHTTPRequestRouter/blob/master/pyHTTPRequestRouter/%s?ts=2', None),
# 'test': ('https://github.com/Paebbels/pyHTTPRequestRouter/blob/master/test/%s?ts=2', None)
}
# ==============================================================================
# Sphinx.Ext.Graphviz
# ==============================================================================
graphviz_output_format = "svg"
| [
"Patrick.Lehmann@plc2.de"
] | Patrick.Lehmann@plc2.de |
062f297cae347ab07eaa95341a3d6d4e5870644e | 161e7693f8e0dccdaff940fc5b3fcde3b2a54b7c | /lesson5_inheritance.py | 988de798d20db2f1a884dca8ce585e96d5d92f4c | [] | no_license | tanweer123/Python | 4d759e8b07057fe2248d087c822e88a874238d05 | af7d736484dccb3a72f17cbb56e6da87a78ba3e2 | refs/heads/master | 2020-04-16T19:59:09.910136 | 2019-03-10T11:20:35 | 2019-03-10T11:20:35 | 165,881,809 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 471 | py | class Parent:
def __init__(self):
print("This is parent class")
def parentfunc(self):
print("This is parent method")
def func(self):
print("calling from parent class")
p = Parent()
class Child(Parent):
def __init__(self):
print("This is child class")
def childfunc(self):
print("This is child function")
def func(self):
print("Calling from child func")
c = Child()
c.childfunc()
c.func()
| [
"alam.tanweer153@gmail.com"
] | alam.tanweer153@gmail.com |
efddcc33bba411d8e270ab601367317c336c97a1 | 3e0a928b4e92ddd51421970de0bab7e787780e24 | /assignments/assignment3/number1.py | 785851163c4b6449eca436a87b6ff613344953ff | [] | no_license | mjohn239/530-390-13 | cc7ac27a7d449954cf8739de21fc23c0daddf894 | 78171c55fbc5092556fa7af3918bd9b222d80572 | refs/heads/master | 2021-01-18T17:14:39.108564 | 2016-01-22T04:15:53 | 2016-01-22T04:15:53 | 49,161,182 | 0 | 0 | null | 2016-01-06T20:58:30 | 2016-01-06T20:58:30 | null | UTF-8 | Python | false | false | 1,261 | py | #!/usr/bin/env python
import numpy as np
import random
import time
# selection sort
def selectionsort(A, n):
for i in range(n):
m = A[i]
mj = i
for j in range(i,n):
if A[j] < m:
m = A[j]
mj = j
A[mj] = A[i]
A[i] = m
# merge sort (entry point)
def mergesort(A,n):
if n > 1:
m = round(0.5 * n)
mergesort(A[0:m],m)
mergesort(A[m:], n-m)
merge(A,n,m)
# merge
def merge(A,n,m):
B = np.empty(n)
i = 0 # first half index
j = m # second half index
for k in range(n):
if j == n:
B[k] = A[i]
i = i + 1
elif i == m:
B[k] = A[j]
j = j + 1
elif A[j] < A[i]:
B[k] = A[j]
j = j + 1
else:
B[k] = A[i]
i = i + 1
for i in range(n):
A[i] = B[i]
# compare sorting algorithms
def comparesort(N):
A = np.random.rand(N)
A2 = np.array(A)
t0 = time.time()
selectionsort(A,N)
t1 = time.time()
mergesort(A2,N)
t2 = time.time()
print(t1 - t0, t2 - t1)
# compare times for different sizes N
comparesort(100)
# selection sort time = 0.001244, merge sort time = 0.001454
comparesort(1000)
# selection sort time = 0.123083, merge sort time = 0.018130
comparesort(10000)
# selection sort time = 12.2384, merge sort time = 0.218801
| [
"mjohn239@jhu.edu"
] | mjohn239@jhu.edu |
57c06678cfe9cd58a767af3192995b70794be68a | 63ddf960d66b380933b9294c7993d3bfba158b5d | /flask_website/__init__.py | b0394be6e60d9c69c0194d5f1a70ee23dd5decaf | [] | no_license | ujansen/Flask-Blog | 1b1a0e8c5588b8bfaa90c812d1501d75474b1cc7 | 7389416e714790b32289766aa776516017b29109 | refs/heads/main | 2023-01-11T17:29:34.793206 | 2020-11-18T13:13:11 | 2020-11-18T13:13:11 | 313,937,983 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 861 | py | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_bcrypt import Bcrypt
from flask_login import LoginManager
from flask_mail import Mail
from flask_website.config import Config
db = SQLAlchemy()
bcrypt = Bcrypt()
login_manager = LoginManager()
login_manager.login_view = 'users.login'
login_manager.login_message_category = 'info'
mail = Mail()
def create_app(config_class = Config):
app = Flask(__name__)
app.config.from_object(Config)
db.init_app(app)
bcrypt.init_app(app)
login_manager.init_app(app)
mail.init_app(app)
from flask_website.users.routes import users
from flask_website.posts.routes import posts
from flask_website.main.routes import main
app.register_blueprint(users)
app.register_blueprint(posts)
app.register_blueprint(main)
return app | [
"noreply@github.com"
] | noreply@github.com |
ca6d3a98f3998152b7b9dda41f3a1cd0d6f4985b | cab2cae48f1dee4a9b8c6b3cdb2d5406ebc5e72f | /dbConfig.py | 92fef633530172299a0a6f48fc0668d49ddf4111 | [] | no_license | CMUChimpsLab/python_static_analyzer | 4961fcb92150bbe390b7aca4ace8a2dc4e9df7b7 | 7c562f62a39a3392814467f8d7d20bee6ea5883b | refs/heads/master | 2021-01-20T13:48:58.447211 | 2018-06-12T14:52:07 | 2018-06-12T14:52:07 | 15,215,146 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 179 | py | #Do not commit login info to github !!
#There is security risk to leak the ip address of our mongodb server
HOSTNAME = "xxx.xxx.xxx.xxx"
USERNAME = "xxxxx"
PASSWORD = "xxxxxxxxx"
| [
"lsupperx@gmail.com"
] | lsupperx@gmail.com |
2f03740dce263ba7d322d9175165a19213c55b8e | e5859e6766bc9a76e84312adad7c10d575e735c9 | /SOURCE/Current-Development-Code/swift/swift/oracle_plus/utility.py | 4afc222fbdfa28a77d2e6ebdcb6714b1bcc67c3f | [
"Apache-2.0"
] | permissive | gayana06/Thesis | 8e142d8c6697c18c5c5f99a27fafbc6dfd2b0f96 | e287fdf6b963e113c23e09fc6564e64aa9dad45a | refs/heads/master | 2021-01-10T11:21:17.700390 | 2015-05-23T21:38:34 | 2015-05-23T21:38:34 | 36,142,371 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,769 | py | __author__ = 'Gayana'
from datetime import datetime
from swift.oracle_plus.config import is_adaptive_mode,log_path,PERFORMANCE_PATH,QUORUM_MAP_PATH,CASE_ID,PROCESSED_LOG_PATH,SENT_LOG_PATH,ENABLE_LOGS
def Is_Adaptive_Mode():
return is_adaptive_mode
def get_current_datetime():
return str(datetime.now())
def log_sent_messages(message):
if ENABLE_LOGS:
with open(SENT_LOG_PATH, "a") as tran_file:
tran_file.write("Message = "+ str(message)+" --- at--- "+get_current_datetime()+"\n")
def log_processed_messages(message):
if ENABLE_LOGS:
with open(PROCESSED_LOG_PATH, "a") as tran_file:
tran_file.write("Message = "+ str(message)+" --- at--- "+get_current_datetime()+"\n")
def log_query(qrery):
with open("/home/ubuntu/ml_q.txt", "a") as tran_file:
tran_file.write(qrery+"\n")
def log_oracle_plus(message):
if ENABLE_LOGS:
with open(log_path, "a") as tran_file:
tran_file.write("Message = "+ str(message)+" --- at--- "+get_current_datetime()+"\n")
def log_performace(read, write,read_count,write_count,read_avg_duration,write_avg_duration,replied_get,replied_put,write_quorum):
with open(PERFORMANCE_PATH, "a") as tran_file:
tran_file.write("--------------------------------------------------------ZKZ\n")
#tran_file.write("||CASE-ID ="+str(CASE_ID)+"|| --- at--- "+get_current_datetime()+"\n")
#tran_file.write("||write_quorum ="+ str(write_quorum)+"|| --- at--- "+get_current_datetime()+"\n")
#tran_file.write("||received_gets ="+ str(read_count)+"|| --- at--- "+get_current_datetime()+"\n")
#tran_file.write("||received_puts ="+ str(write_count)+"|| --- at--- "+get_current_datetime()+"\n")
#tran_file.write("||get_avg_latency ="+ str(read_avg_duration)+"|| --- at--- "+get_current_datetime()+"\n")
#tran_file.write("||put_avg_latency ="+ str(write_avg_duration)+"|| --- at--- "+get_current_datetime()+"\n")
#tran_file.write("||replied_gets ="+ str(replied_get)+"|| --- at--- "+get_current_datetime()+"\n")
#tran_file.write("||replied_puts ="+ str(replied_put)+"|| --- at--- "+get_current_datetime()+"\n")
tran_file.write("Read Tpt="+ str(read)+" Write Tpt="+str(write)+"|| --- at--- "+get_current_datetime()+"\n")
tran_file.write("ZKZ--------------------------------------------------------\n\n")
def log_quorum_map(message):
#if ENABLE_LOGS:
with open(QUORUM_MAP_PATH, "a") as tran_file:
tran_file.write("--------------------------------------------------------\n")
tran_file.write("Quorum Map = "+ str(message)+" --- at--- "+get_current_datetime()+"\n")
tran_file.write("--------------------------------------------------------\n\n") | [
"gayanachandrasekara@gmail.com"
] | gayanachandrasekara@gmail.com |
1f2b03d1b4506d5cf644d64178ff55f7ec29716a | 97e1c6bf888cf701dd64b03b7667ff96e1ca9d46 | /Docker-Django/app/env/lib/python3.8/site-packages/ode_composer/dictionary_builder.py | 00441add9ede35f9373f45e95539b8e611716942 | [] | no_license | samumartinf/docker-django-ode | 4bd8093926e69fb33fe8b8a59e00216baf0478fd | 3dd4ee2a23c0cccadf3c355788b8ecc1c8145412 | refs/heads/main | 2023-01-22T10:16:17.124097 | 2020-12-03T20:59:44 | 2020-12-03T20:59:44 | 314,359,337 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,555 | py | import numpy as np
from typing import List, Dict
from .util import MultiVariableFunction
from itertools import combinations_with_replacement
from sympy import latex
class DictionaryBuilder(object):
def __init__(self, dict_fcns: List[str]):
self.dict_fcns: List[MultiVariableFunction] = list()
self.regressor_mtx = None
for d_f in dict_fcns:
self.add_dict_fcn(d_f)
def add_dict_fcn(self, d_f: str):
dict_fcn = MultiVariableFunction.create_function(
rhs_fcn=d_f, parameters={}, weight=1.0
)
self.dict_fcns.append(dict_fcn)
def evaluate_dict(self, input_data: Dict) -> np.ndarray:
"""Evaluates the symbolic expressions stored in the dictionary with input data.
The evaluated dictionary, referred to as regressor matrix attribute, is returned."""
reg_mtx = []
for idx, d_fcn in enumerate(self.dict_fcns):
reg_mtx.append(
d_fcn.evaluate_function(measurement_data=input_data)
)
# each dictionary function's weight gets a parameter name
d_fcn.constant_name = f"p{idx+1}"
self.regressor_mtx = np.transpose(np.vstack(reg_mtx))
return self.regressor_mtx
@classmethod
def from_mak_generator(
cls, number_of_states: int, max_order: int = 2, number_of_inputs=0
):
"""Build a dictionary with massaction kinetic terms.
based on the number of states and the maximum order (or chemical complex size)
this function generates all the possible polynomial terms.
>>> db = DictionaryBuilder.from_mak_generator(number_of_states=2, max_order=2)
>>> str(db)
'x1*x1 | x1*x2 | x2*x2'
>>> db = DictionaryBuilder.from_mak_generator(number_of_states=2, max_order=2, number_of_inputs=1)
>>> str(db)
'x1*x1 | x1*x2 | u1*x1 | x2*x2 | u1*x2 | u1*u1'
Args:
number_of_states: number of states the model has, e.g. two means (x_1,x_2)
max_order: the maximum number of states in polynomial term, e.g. max_order is three means x_1*x_2^2, in a two state system
number_of_inputs: that are added to the dictionary function (as massaction kinetics terms)
Returns: DictionaryBuilder object
"""
if number_of_states < 1:
raise ValueError("Model has to have at least non-state")
if max_order < 1:
raise ValueError("The max_order has to be at least one")
if number_of_inputs < 0:
raise ValueError("The number of inputs cannot be negative")
states = []
for s in range(1, number_of_states + 1):
states.append(f"x{s}")
if number_of_inputs != 0:
for i in range(1, number_of_inputs + 1):
states.append(f"u{i}")
comb = combinations_with_replacement(states, max_order)
mak_dictionary = []
for c in comb:
mak_dictionary.append("*".join(c))
return cls(dict_fcns=mak_dictionary)
@classmethod
def from_hill_generator(
cls, state_variable, Km_range, cooperativity_range
):
term_list = []
for Km in Km_range:
for n in cooperativity_range:
term_list.append(
f"{state_variable}^{n}/({Km} + {state_variable}^{n})"
)
return cls(dict_fcns=term_list)
@classmethod
def from_dict_fcns(cls, dict_fcn):
# TODO change the __init__ to accept MultiVariableFunction as a dictionary
instance = cls(dict_fcns=[])
instance.dict_fcns = dict_fcn
return instance
def __str__(self):
"""Returns the string representation of dictionary functions"""
return " | ".join([str(df) for df in self.dict_fcns])
def print_dictionary(self, latex_format=False):
ss = []
line_width = 10
max_width = max(
[len(str(df.symbolic_expression)) for df in self.dict_fcns]
)
s = []
s2 = []
if latex_format:
sep = " & "
new_line = "\\\ "
columns = "|c" * line_width + "|"
preamble = (
"\\begin{center}\\begin{tabular}{ %s } \\hline\n" % columns
)
postamble = "\n \\end{tabular}\\end{center}"
else:
sep = " | "
new_line = "\n"
preamble = ""
postamble = ""
for idx, df in enumerate(self.dict_fcns):
s.append(f"{df.constant_name:<{max_width}}")
s2.append(f"${str(latex(df.symbolic_expression)):<{max_width}}$")
if (idx + 1) % line_width == 0:
ss.append(s)
ss.append(s2)
s = []
s2 = []
else:
ss.append(s)
ss.append(s2)
print(preamble)
for idx, row in enumerate(ss):
if (idx + 1) % 2 == 0:
hline = "\hline"
else:
hline = ""
print(sep.join(row) + new_line + hline)
print(postamble)
def __add__(self, other):
"""Adds to DictionaryBuilder instances together"""
if len(self.dict_fcns) == 0 or len(other.dict_fcns) == 0:
raise ValueError("Dictionary cannot be empty!")
# TODO ZAT: change it to chain or extend
return DictionaryBuilder.from_dict_fcns(
self.dict_fcns + other.dict_fcns
)
def __len__(self):
return len(self.dict_fcns)
| [
"samumartinfrias@gmail.com"
] | samumartinfrias@gmail.com |
f101d81ec1eb6d47cd3d9ad9f6b421ad77b7f6ee | 7195f0cc2615c094d1a9c187a67948dfb2bbff25 | /accounts/urls.py | 23fa5bf31a145f3988094db9c90fdf78e848c77b | [] | no_license | dhirajkumar2020/makemytrip | 7ceae2c60175efc5e78d4ffe309d71eeeb36e31a | 7ef571b65117a57215d2d30d9f9c13420fa185e0 | refs/heads/master | 2020-09-17T00:09:04.716126 | 2019-11-26T08:08:31 | 2019-11-26T08:08:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 248 | py | from django.contrib import admin
from django.urls import path
from . import views
urlpatterns = [
path("register", views.register,name="register"),
path("login", views.login,name="login"),
path("logout", views.logout,name="logout"),
]
| [
"dhiraj.kumar@nescode.com"
] | dhiraj.kumar@nescode.com |
8c1958a090bf6f112c785c4943e5b48ff31e4d78 | e278302c3faf1ef0686de771313e1a16256791ff | /csdn_blog_data/x.py | 19e0d1c92320434369e63498b95bd092c3c184a6 | [] | no_license | rflin/python-spider | 45ea56b70251589daf157ad3f37b2485150f6321 | 61aa27423e457340169ed337cc47777131f1300f | refs/heads/master | 2021-09-04T16:19:19.785711 | 2018-01-20T07:46:52 | 2018-01-20T07:46:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,134 | py | import urllib.request
import re,os
import time
import matplotlib.pyplot as plt
name = 'gl486546'
CSDN_blog_url = 'http://blog.csdn.net/' + name
def get_maxlistNum():
rps = urllib.request.urlopen(CSDN_blog_url)
html = rps.read().decode('utf-8')
x = re.findall(r'/article/list/([0-9]+)">尾页</a>',html)
if x:
return int(x[0])
print('not found\n')
return 1
def fetch_data():
num = get_maxlistNum()
prefix_url = 'http://blog.csdn.net/'+ name +'/article/list/'
mylist = []
for i in range(1,num+1):
url = prefix_url + str(i)
rps = urllib.request.urlopen(url)
html = rps.read().decode('utf-8')
x = re.findall(r'<span class="link_title"><a href="/.+/article/details/(.+)">\s*([\s\S]*?)\s*</a>[\s\S]+?title="阅读次数">阅读</a>\((.+)\)',html)
for e in x:
li = list(e);
li[1] = re.sub(r'<.+?>(.+)</.+?>\s+',' ',li[1])
mylist.append(li)
file_name = time.strftime('%Y-%m-%d-%H',time.localtime(time.time())) + '.txt'
mylist.sort(key=lambda x:x[0])
print(len(mylist))
with open(file_name,'w',encoding='utf-8') as f:
for e in mylist:
f.write(str(e)+'\n')
os.startfile(file_name)
fetch_data()
| [
"noreply@github.com"
] | noreply@github.com |
af952cafe9b7c8cd011f7a54506bc85501ad2fe1 | 933bf4d1915983ec36589a75635ecbf5b153a943 | /Chapter 03/FileCode.py | b1717c5c10884b47cb7587622b2e0fa8fbb7b4a7 | [
"MIT"
] | permissive | smartdong/PythonPractise | f3ccaf709dbf0f7c93710842e6d494563fdac558 | e1fe421b24d7ec8b26d5e34f70f2692ce825e967 | refs/heads/master | 2021-01-18T22:13:06.703189 | 2016-11-12T02:54:22 | 2016-11-12T02:54:22 | 69,237,115 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 901 | py | #file = open("data2.text", "w")
#file.write("Hello Python\n")
#file.write("This is a line\n")
#file.close()
#
#text_lines = [
# "Chapter 3\n",
# "Sample text data file\n",
# "This is the third line of text\n",
# "The fourth line looks like this\n",
# "Edit the file with any text editor\n" ]
#
#file = open("data.txt", "w")
#file.writelines(text_lines)
#file.close()
#
#file = open("data.txt", "r")
#all_data = file.readlines()
#print(all_data)
#file.close()
#
#print("Lines: ", len(all_data))
#for line in all_data:
# print(line.strip())
import struct
file = open("binary.dat", "wb")
for n in range(1000):
data = struct.pack("i", n)
file.write(data)
file.close()
file = open("binary.dat", "rb")
size = struct.calcsize("i")
bytes_read = file.read(size)
while bytes_read:
value = struct.unpack("i", bytes_read)
value = value[0]
print(value,end=" ")
bytes_read = file.read(size)
file.close()
| [
"yangxudongsmart@gmail.com"
] | yangxudongsmart@gmail.com |
bca46c19a4f931e51af14f0a270d2dabae403505 | 1f24127dd3647976bc713efa68cdfcacdb8f84d1 | /products.py | 9146a23f0558d92338cc8845681f31f0c2a00643 | [] | no_license | rexlinster/PYLN | f38a792eb82ddfb76a1003c58327f6ea4d8056b6 | cb74507eed0277fdd91a45d1d5cd24530c4c5f26 | refs/heads/main | 2023-03-01T08:29:04.358222 | 2021-02-09T07:15:21 | 2021-02-09T07:15:21 | 325,210,200 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | products = []
product = ''
while product != 'q' :
product = input("你要買啥? ")
if product == "q" :
break
price = float(input("價格是? "))
amount = float(input("數量是? "))
total = str(price * amount)
products.append([product,total])
with open("product.csv","w") as f :
f.write('產品,價格\n')
for p in products :
f.write("%s\n" %p)
print(products) | [
"rexlinster@gmail.com"
] | rexlinster@gmail.com |
9652f01eca6829045b4e09504f32936418c55ce6 | 51a80d87fd9f009d8b97573288d6ebd0b420b995 | /blahblah/scribble.py | a02548edc7528ac110dd0856d04c0b764824e178 | [] | no_license | kim-ellen821/mycoding | 02858509c9bf0892d81d73f01e838c762644c3cf | 20c03c529d07f0a776fd909a60cabaca3a8fcc34 | refs/heads/master | 2023-08-15T08:06:50.872463 | 2021-04-01T09:19:43 | 2021-10-13T13:02:25 | 353,621,725 | 0 | 0 | null | 2021-04-01T09:19:44 | 2021-04-01T08:00:52 | Python | UTF-8 | Python | false | false | 1,099 | py | # #2
# h ,w = 4, 4
# blocks = [[0,0], [3,1], [1,0], [0,0], [2,2], [2,3], [3,1]]
# map = [0] * 100001
# answer = []
# def solution(h, w, blocks):
# check = dict()
# for i in range(len(blocks)):
# s = str(blocks[i][0]) + str(blocks[i][1])
# print(s)
# if s not in check:
# map[blocks[i][1]] += 1
# check[s] = True
# answer.append(w-map[0])
# for i in range(1, h):
# answer.append(w-map[i] + answer[i-1])
# print(answer)
# solution(h, w, blocks)
#1316
#print(ord('a'), ord('z'))
# MAX = int(1e9)
# coins = list(map(int, input().split()))
# amount = int(input())
# dp = [MAX] * (amount + 1)
# dp[0] = 0
# for coin in coins:
# for i in range(coin, amount + 1):
# dp[i] = min(dp[i], dp[i-coin]+1)
# if dp[amount]==MAX:
# print(-1)
# else:
# print(dp[amount])
N, A = map(int, input().split())
val = ""
for i in range(A) :
C, K = input().split()
K = int(K)
val += C * K
for i in range(len(val)) :
if (i + 1) % N == 0 :
print(val[i])
else :
print(val[i], end="")
print() | [
"ellenkim821@gmail.com"
] | ellenkim821@gmail.com |
e6fc546651d2205d4808a4a327045054eda8451d | 7db0883137d119565540f2d071638c4016f39213 | /Note/Project_Play/BaiduBaike/SpiderMan.py | eb7543c12a969940ffb61d81059fa69f378fe5f0 | [] | no_license | PhilHuang-d/python--- | cf22a4cc00d4beaaf75ef7ca87a4c5d31a9d5efe | 152c18f51838ce652b79a0cd24765b1a1c237eee | refs/heads/master | 2021-09-13T05:32:53.754865 | 2018-04-25T13:36:40 | 2018-04-25T13:36:40 | 108,812,447 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,344 | py | #coding:utf-8
import DataOutput
import HTMLDownloader
import URLManger
import HtmlParser
class SpiderMan(object):
def __init__(self):
self.manager = URLManger.UrlManager()
self.downloader = HTMLDownloader.HtmlDownloader()
self.parser = HtmlParser.HhmlParser()
self.output = DataOutput.DataOutput()
def crawl(self,root_url):
#添加url入口
self.manager.add_new_url(root_url)
while(self.manager.has_new_url() and self.manager.old_url_size() < 100):
try:
#从url管理器获取新的url
new_url = self.manager.add_new_url()
#html下载器下载网页
html = self.downloader.download(new_url)
#html解析器抽取网页数据
new_urls,data = self.parser.parser(new_url,html)
#将抽取的url添加到url管理器中
self.manager.add_new_urls(new_url)
#数据存储器存储文件
self.output.store_data(data)
print("已经抓取%s个链接")%self.manager.old_url_size()
except Exception,e:
print("抓取失败")
self.output.output_html()
if __name__=="__main__":
spider_man = SpiderMan()
spider_man.crawl("http://baike.baidu.com/view/284853.htm") | [
"32259595+PhilHuang-d@users.noreply.github.com"
] | 32259595+PhilHuang-d@users.noreply.github.com |
61d46884b1498f13a9058ea7eb7389bdad18882e | 0626d664ac306066b3f97914a407cdc618afcce7 | /Setup.py | 1b1932c39f2727c7f08c8ddbd1c5672b81d6ff5c | [
"MIT"
] | permissive | kobejn/Roland-GP-8 | 5b9e5e6c5c2868db11bd1354c6381128da91c929 | 558d1cb547b99b44beb517371f1fc171017e1b31 | refs/heads/main | 2023-09-04T23:26:56.069456 | 2021-10-18T13:45:58 | 2021-10-18T13:45:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 363 | py | #!/usr/bin/env python
from distutils.core import setup
setup(name='Roland-Gp-8',
version='1.0',
description='Library for inspecting and modifying Roland Gp-8 sysex dump format.',
author='F.G. Robertson',
author_email='me@grantrobertson.com',
url='https://www.github.com/grobertson/Roland-GP-8/',
packages=['RolandGp8'],
)
| [
"me@grantrobertson.com"
] | me@grantrobertson.com |
22788a768aeb91f8b074cef2f964bf056ddc7185 | b27d0135accc27594a7f60efc824960fd53a8fe1 | /check-application.py | 26db8f9b732b2ee2ac4bd28ac40d669a33d9141f | [] | no_license | hendrasaputra8/belajar-py | 61dd89842d24ec955e6b4be86793d36392c289a9 | 1cac8e82a579acdec55fa015df7eefcde6563c0c | refs/heads/master | 2020-04-15T09:44:04.636663 | 2019-01-21T04:37:28 | 2019-01-21T04:37:28 | 164,563,009 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 225 | py | import subprocess
rc = subprocess.call(['which', 'htop'])
if rc == 0:
print 'htop installed!'
else:
print 'htop missing in path!'
subprocess.call('/root/scripts/install.sh') # --> path bash bisa taro dimana pun
| [
"hendrasaputra@hendrasaputra.local"
] | hendrasaputra@hendrasaputra.local |
99510d519de2a5ec319d4e7e6655ff6fd749a0c3 | 9b6f26c9ce7771bdfd23e02850824ba0d609e0d1 | /src/deprecated/restore_relative_word_freq.py | 13056f409b303ec34705009bbdaa5d461eb59ed9 | [] | no_license | ReinaKousaka/core | adb182daf04399e946551422ccf1b91d97352b26 | 33a3fa38ad4dcdd54ff583da15dcd67c99ad9701 | refs/heads/master | 2023-04-14T02:39:42.038400 | 2021-04-27T13:54:35 | 2021-04-27T13:54:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,200 | py | import os
from twitterDownloader import TwitterTweetDownloader, TwitterFriendsDownloader
import pymongo
import datetime
from word_frequency import WordFrequency, daterange
from pymongo import MongoClient
from collections import Counter
ds_location = 'mongodb://localhost:27017'
word_frequency = WordFrequency(os.getcwd() + "/../General/ds-init-config.yaml")
client = MongoClient(ds_location)
# get global word freq vector from Thomas's database
db = client['globalData']
collection = db['wordCount']
result = collection.find()
global_word_freq_vector = Counter()
for doc in result:
for word in doc:
if word != '_id':
global_word_freq_vector[word] = doc[word]
# get list of david madras followers from Thomas's code
friend_downloader = TwitterFriendsDownloader(os.getcwd() + "/../General/ds-init-config.yaml")
madras_following_users = friend_downloader.get_friends_by_screen_name("david_madras", 400)
# based on Thomas's user database, calculate the user word frequencies and store in my format
db = client['productionFunction']
collection = db['users']
users_in_db = []
for user_handle in madras_following_users:
result = collection.find({'handle': user_handle})
for doc in result:
if doc['start'] == datetime.datetime(2018, 9, 1, 0, 0, 0) and\
doc['end'] == datetime.datetime(2019, 9, 1, 0, 0, 0) and user_handle == doc['handle']:
words = []
# for tweet_text in doc['tweets']:
# processed_text_list = word_frequency._process_tweet_text(tweet_text)
# words.extend(processed_text_list)
for tweet_text in doc['retweets']:
processed_text_list = word_frequency._process_tweet_text(tweet_text[0])
words.extend(processed_text_list)
# get word frequency
user_word_freq_vector = word_frequency._get_word_frequency_vector(words)
# store in db
word_freq_db = client['WordFreq-Retweets']
user_word_freq_collection = word_freq_db['UserWordFreq']
user_word_freq_collection.insert_one({
'User': user_handle,
'UserWordFreqVector': user_word_freq_vector
})
users_in_db.append(user_handle)
# calculate user relative word freq
word_freq_db = client['WordFreq-Retweets']
user_word_freq_collection = word_freq_db['UserWordFreq']
result = user_word_freq_collection.find()
for doc in result:
user_handle = doc['User']
# get user word freq
user_word_freq_vector = doc['UserWordFreqVector']
user_words_not_in_global = []
relative_word_freq = {}
for word in user_word_freq_vector:
user_word_count = user_word_freq_vector[word]
if user_word_count >= 3:
if word in global_word_freq_vector:
global_word_count = global_word_freq_vector[word]
relative_word_freq[word] = user_word_count / global_word_count
else:
user_words_not_in_global.append(word)
user_word_freq_collection = word_freq_db['UserRelativeWordFreq']
user_word_freq_collection.insert_one({
"User": user_handle,
"RelativeWordFrequency": relative_word_freq,
"UserWordsNotInGlobal": user_words_not_in_global
})
# store the list of users from which we've gotten our relative user freq
db = client['WordFreq-Retweets']
collection = db['Users']
collection.insert_one({
'Users': users_in_db,
'InitialUsersSet': madras_following_users
})
# word_frequency = WordFrequency(os.getcwd() + "/../General/ds-init-config.yaml")
# print(word_frequency._process_tweet_text("thatve thatss yeas"))
#-------------------------------DEBUG INFO--------------------------------------
db = client['globalData']
collection = db['wordCount']
result = collection.find()
global_word_freq_vector = Counter()
for doc in result:
for word in doc:
if word != '_id':
global_word_freq_vector[word] = doc[word]
# f = open('debug2', 'w')
# word_freq_db = client['WordFreq-Test2']
# user_relative_word_freq_collection = word_freq_db['UserRelativeWordFreq']
# user_word_freq_collection = word_freq_db["UserWordFreq"]
# user_to_rwf = {}
# for doc in user_relative_word_freq_collection.find({'User': "JFutoma"}):
# user = doc['User']
# # orig_rwf_vector = Counter(doc['RelativeWordFrequency'])
# dao = NewAlgoClusteringMongoDAO()
# orig_rwf_vector = dao.get_rwf()[user]
# words_not_in_wf_vector = doc['UserWordsNotInGlobal']
# word_count_doc = user_word_freq_collection.find({"User": user})[0]
# user_word_frequency = word_count_doc['UserWordFreqVector']
# # handle words in global wf vector
# for word in words_not_in_wf_vector:
# user_word_count = user_word_frequency[word]
# global_count = global_word_freq_vector[word] if word in global_word_freq_vector else 0
# # print([doc['UserWordFreqVector'][word] for doc in user_word_freq_collection.find() if word in doc['UserWordFreqVector']])
# local_community_count = sum([doc['UserWordFreqVector'][word] for doc in user_word_freq_collection.find() if word in doc['UserWordFreqVector']])
# total_user_word_count = sum([user_word_frequency[word] for word in user_word_frequency])
# user_wf_length = len(user_word_frequency)
# # gwf_len
# rwf = orig_rwf_vector[word]
# f.write("Word: {}\nUser Word Count: {}\nGlobal Word Count: {}\nLocal Community Word Count: {}\nRWF: {}\n\n".format(word, user_word_count, global_count, local_community_count, rwf))
word_freq_db = client['WordFreq-Test2']
user_relative_word_freq_collection = word_freq_db['UserRelativeWordFreq']
user_word_freq_collection = word_freq_db["UserWordFreq"]
new_rwf_collection = word_freq_db['UserRWF']
cache = {}
total_global_count = sum([global_word_freq_vector[word] for word in global_word_freq_vector])
user_to_wcv = {}
for doc in user_relative_word_freq_collection.find():
user = doc['User']
word_count_doc = user_word_freq_collection.find({"User": user})[0]
user_word_count = word_count_doc['UserWordFreqVector']
user_to_wcv[user] = user_word_count
for doc in user_relative_word_freq_collection.find():
user = doc['User']
old_rwf_vector = Counter(doc['RelativeWordFrequency'])
new_rwf_vector = Counter(doc['RelativeWordFrequency'])
words_not_in_wf_vector = doc['UserWordsNotInGlobal']
word_count_doc = user_word_freq_collection.find({"User": user})[0]
user_word_frequency = word_count_doc['UserWordFreqVector']
# need to recompute rwf value
for word in user_word_frequency:
"""
uwf = user_count/total_user_count
gwf = global_count/total_global_count
rwf = uwf/gwf
"""
user_count = user_word_frequency[word]
global_count = global_word_freq_vector[word] if word in global_word_freq_vector else 0
# print([doc['UserWordFreqVector'][word] for doc in user_word_freq_collection.find() if word in doc['UserWordFreqVector']])
total_user_count = sum([user_word_frequency[word] for word in user_word_frequency])
if global_count == 0:
# we can cache local community count
if word not in cache:
# local_community_count = sum([Counter(doc['UserWordFreqVector'])[word] for doc in user_word_freq_collection.find() if word in Counter(doc['UserWordFreqVector'])])
local_community_count = 0
for user_ in user_to_wcv:
u_word_counter = user_to_wcv[user_]
if word in u_word_counter:
local_community_count += u_word_counter[word]
cache[word] = local_community_count
else:
local_community_count = cache[word]
global_count = local_community_count
uwf = user_count / total_user_count
gwf = global_count / total_global_count
rwf = uwf / gwf
new_rwf_vector[word] = rwf
new_rwf_collection.insert_one({
"User": user,
"RelativeWordFrequency": new_rwf_vector,
"UserWordsNotInGlobal": words_not_in_wf_vector
})
| [
"kailong.huang@mail.utoronto.ca"
] | kailong.huang@mail.utoronto.ca |
c72280c1eeaae1f109b80dd090e27597795b4d21 | 8b4fa99b046341d13c1522853fe7a4961043b2b4 | /nutr/migrations/0008_auto_20170603_2037.py | b5d1bf2559667dc720700c6858374a1813100d3e | [] | no_license | mesas998/epa7658577 | 228c3229ab0900942c7b36f47409c159b4017bd8 | 6e3ede9866e8ece5cc9972b73b9e17163c09ef48 | refs/heads/master | 2021-01-25T07:48:43.715268 | 2017-06-12T16:48:18 | 2017-06-12T16:48:18 | 93,660,299 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 688 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-03 20:37
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('nutr', '0007_auto_20170603_2023'),
]
operations = [
migrations.AlterField(
model_name='datsrcln',
name='ndb_no',
field=models.CharField(max_length=5),
),
migrations.AlterField(
model_name='datsrcln',
name='nutr_no',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='nutr.NutrDef'),
),
]
| [
"michael.sweeney303@gmail.com"
] | michael.sweeney303@gmail.com |
41e2743e524fdd8b6b96c7526e848d2add4d2802 | 854181e839968c7e9fbc350a880b0c9d1f70dc3a | /search_online.py | 0a2b569e865e2d8782c1a0bed052f5bc2c00c789 | [] | no_license | JiangChSo/2020-Huya-Program-Technical-Challenge | 479f3dec0020e412c088ae322cfe0359cec3db5f | 4b99ac05c405c10b924e429cde631d94a2f0debe | refs/heads/master | 2023-05-23T23:27:15.454882 | 2021-06-13T03:17:28 | 2021-06-13T03:17:28 | 287,489,597 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,762 | py | import requests
from bs4 import BeautifulSoup
import random
import time
import csv
import jieba.analyse
import jieba
import pypinyin
# 不带声调的(style=pypinyin.NORMAL)
def pinyin(word):
s = ''
for i in pypinyin.pinyin(word, style=pypinyin.NORMAL):
s += ''.join(i)
return s
list_content = []
list_t = []
#代理
def get_header():
header1 = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.61 Safari/537.36"
}
header2 = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3741.400 QQBrowser/10.5.3868.400"
}
header3 = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko"
}
header_list = [header1, header2, header3]
index = random.randint(0, 1)
return header_list[index]
pass
# 从微博网站获取数据
def get_data(wordlist):
#每一次爬取开始标记为0 并且重新计入表头['id', 'TopKeywords', 'context'] 不影响模糊匹配查询
j = 0
fp = open('weibo_search_online.csv', 'a', encoding='utf_8_sig', newline='')
writer = csv.writer(fp) # 获取文件“写笔”
headers = ['id', 'TopKeywords', 'context']
for i in range(10):
headers.append('keyword' + str(i + 1))
writer.writerow(headers) # 写入一行记录
for word in wordlist:
print("正在爬取词:"+word)
url = "https://s.weibo.com/weibo?q="+ word +"&wvr=6&b=1&Refer=SWeibo_box"
newcontent = requests.get(url=url, headers=get_header())
newcontent.encoding = "utf-8"
soup_list = BeautifulSoup(newcontent.text, 'html.parser')
for news in soup_list.find_all("p", class_="txt"):
j = j + 1
# print(str(j)) 调试查看进度
context = news.text
# print(context)
kWords = jieba.analyse.extract_tags(context, topK=10, withWeight=False, allowPOS=('n'))
#print(news)
topical_subject = word
#print(topical_subject)
values = [j, topical_subject, context]
for kword in kWords:
kword = pinyin(kword) # 将关键词转换为拼音
values.append(kword)
writer.writerow(values)
fp.close()
# print(list_content)
#print("end")
#可直接在列表中添加词语或者句子,即将对应的搜索数据存入
'''
前三个search均为匹配则执行search_online.py中的extendcsv
'''
def extendcsv(string):
kWords = jieba.analyse.extract_tags(string, topK=10, withWeight=False, allowPOS=('n'))
get_data(kWords)
print("爬取结束") | [
"954237332@qq.com"
] | 954237332@qq.com |
916ef8e5c0cb8e838afb8b7fd007599ff40dee2b | 8b0593977cf5162ec511229199f9cd024e874f46 | /mysite/settings.py | b738ff7cd86bd272d53d2c1fa3bc0c8cc4085a92 | [] | no_license | anaismarquina/my-first-blog | 292eae55709031850657a49c550b949fc522f523 | 0c6c3b6f92635343b0e3dbd00c397946191bf2fd | refs/heads/master | 2020-04-08T12:50:09.599853 | 2018-12-01T22:45:17 | 2018-12-01T22:45:17 | 139,491,878 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,217 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.11.16.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '!km&qui2(h6j1-6ucscs_-ycrd#&z*q=@fqtp3f9*ly$2yrrrv'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', 'anaismarquinar.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Chile/Continental'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| [
"anaismarquinar@gmail.com"
] | anaismarquinar@gmail.com |
c8e16252fb1427ba3ea9dd2778e24d30b756a85c | 738806f165e448b6ca0b7f6324da60a5bceea294 | /pset6.2/caesar.py | 3560e5f8c277a213b3af61e3a4df072c7137d89f | [] | no_license | rajdeep-biswas/allofc50-iguess | fa704968f21c79e5d0e7163a924090954f47e6d2 | 0285fdf59a82348895246dcd37e607634d7281a6 | refs/heads/master | 2022-12-13T02:05:04.720441 | 2020-09-07T14:39:35 | 2020-09-07T14:39:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 472 | py | from sys import argv
if len(argv) != 2:
exit("invalid input")
key = int(argv[1])
arr = input("plaintext: ")
print("ciphertext: ", end="")
for i in range(len(arr)):
if ord(arr[i]) >= ord('a') and ord(arr[i]) <= ord('z'):
print(chr(97 + ((ord(arr[i]) - 97 + key) % 26)), end="");
elif ord(arr[i]) >= ord('A') and ord(arr[i]) <= ord('Z'):
print(chr(65 + ((ord(arr[i]) - 65 + key) % 26)), end="");
else:
print(arr[i], end="")
print() | [
"rajdeep.biswas@sap.com"
] | rajdeep.biswas@sap.com |
36fbcc1dbeea8d7452334d443e9dcc52f980366a | bf0d62460cbfb9291dffda76825ec06bfdbe8c98 | /higher_moments/test_update.py | 50d0312f13a0d76e26c31a5a14cd7c85496b2a17 | [] | no_license | contagon/deepfilters | 5cdabc9f7cddbb5f8750553d0040701b6e78b14a | 97be59d0896d7dfa0a618f9649ae634b455d3e63 | refs/heads/master | 2023-02-19T02:10:07.519107 | 2021-01-12T02:09:54 | 2021-01-12T02:09:54 | 307,907,212 | 1 | 0 | null | 2020-12-16T19:35:26 | 2020-10-28T04:34:17 | Python | UTF-8 | Python | false | false | 1,296 | py | import torch
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import loadmat
from networks import *
import sys
# load data
n = 9400
data = OdometryData("exact_data_4.pkl", split=8000)
all = slice(None,None)
# make models
n_m = 3+6+10+15
pnn = Network(n_m, n_m-3, 64, 12, *data.train_predict(all)).cuda()
x1, x2, y = data.train_update(all)
x = torch.cat([(x1[:,:,0,:]+x1[:,:,1,:])/2, x2], 2)
unn = Network(n_m-3+2, n_m, 64, 12, x, y).cuda()
# restore weights
models = torch.load(sys.argv[1])
pnn.load_state_dict(models['pnn'])
unn.load_state_dict(models['unn'])
# push through model
innov_train, moments_train, y_train = data.train_update(n)
y_model = torch.zeros_like(y_train)
for i in range(innov_train.shape[1]):
mask = ~torch.isnan(innov_train[:,i,:]).byte().any(axis=1).bool().detach()
x_train = torch.cat([innov_train[:,i,:], moments_train], 1)
y_model[mask] += unn(x_train[mask], norm_out=False)
i = 45
print("OG", x_train[i])
print("Expected", y_train[i])
print("Got", y_model[i])
s_pf = y_train.cpu().detach().numpy()
s_nn = y_model.cpu().detach().numpy()
t = np.arange(200)
fig, axs = plt.subplots(3,3)
axs = axs.flatten()
for i, ax in enumerate(axs):
ax.plot(t, s_pf[:,i], label="PF")
ax.plot(t, s_nn[:,i], label="NN")
plt.legend()
plt.show() | [
"contagon6@gmail.com"
] | contagon6@gmail.com |
ed6631bc58999a9ae4ed3f03dec516046371b6bd | 1dbe05265fd89f5983eafd74681cb4d80540b02d | /Gps_poller.py | 6ee68900b12b9ec47bc77131b513a636a47adec7 | [] | no_license | GwilymNewton/MoonPi | 2d15576a2ee4b50d25a546f68064185ffe86ddb7 | c038829a63384e707b5ca98d94cc6d232fd46b8e | refs/heads/master | 2020-04-11T07:53:30.543672 | 2015-03-06T20:36:17 | 2015-03-06T20:36:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,150 | py | #! /usr/bin/python
# Author: Gwilym Newton
from gps import **
import time
import threading
class GpsPoller(threading.Thread):
gpsd = None #seting the global variable
polling_time = 5
def __init__(self):
threading.Thread.__init__(self)
global gpsd #bring it in scope
gpsd = gps(mode=WATCH_ENABLE) #starting the stream of info
self.current_value = None
self.running = True #setting the thread running to true
def run(self):
global gpsd
while gpsp.running:
time.sleep(5)
gpsd.next() #this will continue to loop and grab EACH set of gpsd info to clear the buffer
def get_GPS_alt()
return gpsd.fix.altitude
def get_GPS_lat()
return gpsd.fix.latitude
def get_GPS_long()
return gpsd.fix.longitude
def get_GPS_climb()
return gpsd.fix.climb
def get_GPS_speed()
return gpsd.fix.speed
def get_polling_time()
return polling_time
def set_polling_ti(set)
polling_time=set
def stop_poller() :
print "\nKilling Thread..."
self.running = False
self.join() # wait for the thread to finish what it's doing
print "Done.\nExiting." | [
"gwilym.newton@dur.ac.uk"
] | gwilym.newton@dur.ac.uk |
03aad395ebb9bef895b83c7e331bb6ec2a9c6a54 | 7393f7e49aa65e4aa8dd0efb1d4fa82aa706fa0e | /environment.py | 766826e4095e1a6462d779d868b0ad6fb0f62e80 | [] | no_license | hrabit64/Maze_DQN_reinforce_learning | 0759086d679981a5c8560458657465cf83146d00 | b816916a9fbb8403802f8111c2574c2f8bb84f87 | refs/heads/master | 2023-06-24T05:42:18.342534 | 2021-07-19T08:04:44 | 2021-07-19T08:04:44 | 387,384,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,947 | py | import tkinter as tk
import numpy as np
import time
import pyautogui
from PIL import Image
class Env(tk.Tk):
def __init__(self, render_speed=0,width = 10,height = 10):
super(Env, self).__init__()
self.render_speed = render_speed
self.epi = 0
self.steps = 1
self.width = width
self.height = height
self.action_space = [0,1,2,3]
self.action_size = len(self.action_space)
self.player_loc = [1,1]
#빈 곳 0
#벽 1
#캐릭터 2
#목표 3
#함정 4
self.game_board = [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 2, 0, 0, 0, 4, 1, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 1, 0, 0, 1],
[1, 0, 0, 0, 1, 0, 1, 0, 4, 1],
[1, 0, 1, 0, 4, 0, 0, 0, 0, 1],
[1, 0, 1, 0, 1, 0, 0, 0, 0, 1],
[1, 0, 1, 0, 1, 0, 0, 0, 0, 1],
[1, 0, 1, 0, 0, 0, 1, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 1, 0, 3, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
self.reward = 0
self.images = self._load_images()
self.canvas = self._build_canvas()
def _load_images(self):
character = tk.PhotoImage(file = "./sprite/character.png")
trap = tk.PhotoImage(file = "./sprite/box.png")
mark = tk.PhotoImage(file ="./sprite/mark.png")
block = tk.PhotoImage(file ="./sprite/block.png")
trap_with_character = tk.PhotoImage(file ="./sprite/box and mark.png")
mark_with_character = tk.PhotoImage(file ="./sprite/character and mark.png")
return block, character, mark, trap, trap_with_character,mark_with_character
def _build_canvas(self):
pixel = 32
canvas = tk.Canvas(self,bg = 'black',height = pixel*self.height+50,width = pixel*self.width)
# for i in range(0, pixel*self.height, 32):
# x0, y0, x1, y1 = i, 0, i, pixel*self.height
# canvas.create_line(x0, y0, x1, y1,fill = "white")
#
# for j in range(0, pixel*self.height, 32):
# x0, y0, x1, y1 = 0, j, pixel*self.height, j
# canvas.create_line(x0, y0, x1, y1,fill = "white")
for j in range(self.height):
for i in range(self.width):
k = self.game_board[j][i]
x = i*32
y = j*32
if k == 1:
canvas.create_image(x, y, anchor="nw", image=self.images[0])
elif k == 2:
canvas.create_image(x, y, anchor="nw", image=self.images[1])
elif k == 3:
canvas.create_image(x, y, anchor="nw", image=self.images[2])
elif k == 4:
canvas.create_image(x, y, anchor="nw", image=self.images[3])
elif k == 5:
canvas.create_image(x, y, anchor="nw", image=self.images[4])
elif k == 6:
canvas.create_image(x, y, anchor="nw", image=self.images[5])
texts = str(self.epi)+"episodes"
canvas.create_text(175,330,text = texts,font=('Helvetica',10),fill = "white")
canvas.pack()
return canvas
def reset(self,epi):
self.epi = epi
self.steps = 1
self.action_space = [0, 1, 2, 3]
self.action_size = len(self.action_space)
# 빈 곳 0
# 벽 1
# 캐릭터 2
# 목표 3
# 함정 4
self.game_board = [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 2, 0, 0, 0, 4, 1, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 1, 0, 0, 1],
[1, 0, 0, 0, 1, 0, 1, 0, 4, 1],
[1, 0, 1, 0, 4, 0, 0, 0, 0, 1],
[1, 0, 1, 0, 1, 0, 0, 0, 0, 1],
[1, 0, 1, 0, 1, 0, 0, 0, 0, 1],
[1, 0, 1, 0, 0, 0, 1, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 1, 0, 3, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
self.reward = 0
self.player_loc = [1, 1]
self.canvas.destroy()
self.canvas = self._build_canvas()
self.render()
def render(self):
time.sleep(self.render_speed)
self.update()
def set_up(self,dir):
player_x = self.player_loc[0]
player_y = self.player_loc[1]
# 0 1 2 3
#"l", "u", "d", "r"
# 빈 곳 0
# 벽 1
# 캐릭터 2
# 목표 3
# 함정 4
# 함정과 겹친 캐릭터 5
# 목표와 겹친 캐릭터 6
before_x = player_x
before_y = player_y
if dir == 0:
after_x = player_x - 1
after_y = player_y
elif dir == 1:
after_x = player_x + 1
after_y = player_y
elif dir == 2:
after_x = player_x
after_y = player_y + 1
elif dir == 3:
after_x = player_x
after_y = player_y - 1
# check mark
if self.game_board[after_y][after_x] == 3:
self.game_board[after_y][after_x] = 6
self.game_board[before_y][before_x] = 0
self.reward += 10
self.player_loc = [after_x, after_y]
return True
# check wall
elif self.game_board[after_y][after_x] == 1:
self.reward -= 5
# check trap
elif self.game_board[after_y][after_x] == 4:
self.game_board[after_y][after_x] = 5
self.game_board[before_y][before_x] = 0
self.player_loc = [after_x, after_y]
self.reward -= 5
elif self.game_board[before_y][before_x] == 5:
self.game_board[after_y][after_x] = 2
self.game_board[before_y][before_x] = 4
self.player_loc = [after_x, after_y]
else:
self.game_board[after_y][after_x] = 2
self.game_board[before_y][before_x] = 0
self.player_loc = [after_x, after_y]
return False
def get_state(self):
self.takeScreenshot()
img = Image.open('state.png')
img = img.convert("L")
img = img.resize((324//4, 324//4))
img.save('test.png')
data = np.asarray(img)
data = np.resize(data,(81,81,1))
data = data.astype(float) / 255
return data
def takeScreenshot(self):
x, y = self.canvas.winfo_rootx(), self.canvas.winfo_rooty()
w, h = self.canvas.winfo_width(), self.canvas.winfo_height()-50
pyautogui.screenshot('state.png', region=(x, y, w, h))
def step(self,action):
done = self.set_up(action)
self.steps += 1
self.canvas.destroy()
self.canvas = self._build_canvas()
self.render()
return self.get_state(), self.reward , done
| [
"hzser123@gmail.com"
] | hzser123@gmail.com |
0afd72284979030a577a6afb50f3516967bc088b | 7e8ea701c075f06025fcdf2df39a7ac81e2d35f6 | /consulting/models/inventory.py | d5316ce419257e89e0670fa0d21212a5383cad55 | [] | no_license | goryfigment/consulting | 945dde852624f855d8bc5a4098182cb6eca3a043 | d084b205ea1d0bc184e3f49dc2abab245964e9a7 | refs/heads/master | 2021-06-23T06:13:24.454143 | 2019-12-12T01:35:27 | 2019-12-12T01:35:27 | 225,676,883 | 0 | 0 | null | 2021-06-10T22:21:21 | 2019-12-03T17:21:13 | CSS | UTF-8 | Python | false | false | 7,075 | py | # This is an auto-generated Django model module.
# You'll have to do the following manually to clean this up:
# * Rearrange models' order
# * Make sure each model has one field with primary_key=True
# * Make sure each ForeignKey has `on_delete` set to the desired behavior.
# * Remove `managed = False` lines if you wish to allow Django to create, modify, and delete the table
# Feel free to rename the models, but don't rename db_table values or field names.
from __future__ import unicode_literals
from django.db import models
class AuthGroup(models.Model):
name = models.CharField(unique=True, max_length=80)
class Meta:
managed = False
db_table = 'auth_group'
class AuthGroupPermissions(models.Model):
group_id = models.IntegerField()
permission_id = models.IntegerField()
class Meta:
managed = False
db_table = 'auth_group_permissions'
unique_together = (('group_id', 'permission_id'),)
class AuthPermission(models.Model):
name = models.CharField(max_length=255)
content_type_id = models.IntegerField()
codename = models.CharField(max_length=100)
class Meta:
managed = False
db_table = 'auth_permission'
unique_together = (('content_type_id', 'codename'),)
class AuthUser(models.Model):
password = models.CharField(max_length=128)
last_login = models.DateTimeField(blank=True, null=True)
is_superuser = models.IntegerField()
username = models.CharField(unique=True, max_length=150)
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
email = models.CharField(max_length=254)
is_staff = models.IntegerField()
is_active = models.IntegerField()
date_joined = models.DateTimeField()
class Meta:
managed = False
db_table = 'auth_user'
class AuthUserGroups(models.Model):
user_id = models.IntegerField()
group_id = models.IntegerField()
class Meta:
managed = False
db_table = 'auth_user_groups'
unique_together = (('user_id', 'group_id'),)
class AuthUserUserPermissions(models.Model):
user_id = models.IntegerField()
permission_id = models.IntegerField()
class Meta:
managed = False
db_table = 'auth_user_user_permissions'
unique_together = (('user_id', 'permission_id'),)
class Boss(models.Model):
business_id = models.IntegerField(unique=True)
settings_id = models.IntegerField(unique=True)
class Meta:
managed = False
db_table = 'boss'
class Business(models.Model):
name = models.CharField(max_length=100)
class Meta:
managed = False
db_table = 'business'
class BusinessStores(models.Model):
business_id = models.IntegerField()
store_id = models.IntegerField()
class Meta:
managed = False
db_table = 'business_stores'
unique_together = (('business_id', 'store_id'),)
class DjangoAdminLog(models.Model):
action_time = models.DateTimeField()
object_id = models.TextField(blank=True, null=True)
object_repr = models.CharField(max_length=200)
action_flag = models.SmallIntegerField()
change_message = models.TextField()
content_type_id = models.IntegerField(blank=True, null=True)
user_id = models.IntegerField()
class Meta:
managed = False
db_table = 'django_admin_log'
class DjangoContentType(models.Model):
app_label = models.CharField(max_length=100)
model = models.CharField(max_length=100)
class Meta:
managed = False
db_table = 'django_content_type'
unique_together = (('app_label', 'model'),)
class DjangoMigrations(models.Model):
app = models.CharField(max_length=255)
name = models.CharField(max_length=255)
applied = models.DateTimeField()
class Meta:
managed = False
db_table = 'django_migrations'
class DjangoSession(models.Model):
session_key = models.CharField(primary_key=True, max_length=40)
session_data = models.TextField()
expire_date = models.DateTimeField()
class Meta:
managed = False
db_table = 'django_session'
class ItemLog(models.Model):
action = models.CharField(max_length=255)
operation = models.CharField(max_length=255)
item_name = models.CharField(max_length=255)
change = models.CharField(max_length=255)
previous_value = models.CharField(max_length=255)
date = models.IntegerField()
details = models.TextField() # This field type is a guess.
business_id = models.IntegerField(blank=True, null=True)
store_id = models.IntegerField(blank=True, null=True)
user_id = models.IntegerField()
class Meta:
managed = False
db_table = 'item_log'
class Settings(models.Model):
start_time = models.IntegerField()
date_range = models.CharField(max_length=15)
ip_address = models.CharField(max_length=100)
header = models.TextField() # This field type is a guess.
footer = models.TextField() # This field type is a guess.
class Meta:
managed = False
db_table = 'settings'
class Store(models.Model):
name = models.CharField(max_length=100)
tax = models.CharField(max_length=12)
link_columns = models.TextField() # This field type is a guess.
include_columns = models.TextField() # This field type is a guess.
columns = models.TextField() # This field type is a guess.
picture_column = models.CharField(max_length=100)
inventory = models.TextField() # This field type is a guess.
order_by = models.CharField(max_length=100)
reverse = models.IntegerField()
transaction_filter = models.TextField() # This field type is a guess.
class Meta:
managed = False
db_table = 'store'
class Transaction(models.Model):
items = models.TextField() # This field type is a guess.
payment_type = models.CharField(max_length=255)
tax = models.CharField(max_length=12)
subtotal = models.CharField(max_length=255)
memo = models.CharField(max_length=255)
date = models.IntegerField()
boss_id = models.IntegerField()
seller_id = models.IntegerField()
store_id = models.IntegerField(blank=True, null=True)
class Meta:
managed = False
db_table = 'transaction'
class InventoryUser(models.Model):
password = models.CharField(max_length=128)
last_login = models.DateTimeField(blank=True, null=True)
email = models.CharField(unique=True, max_length=255, blank=True, null=True)
username = models.CharField(unique=True, max_length=15)
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
reset_link = models.CharField(max_length=255, blank=True, null=True)
reset_date = models.IntegerField(blank=True, null=True)
is_staff = models.IntegerField()
is_superuser = models.IntegerField()
boss_id = models.IntegerField(unique=True, blank=True, null=True)
class Meta:
managed = False
db_table = 'user'
| [
"goryfigment@gmail.com"
] | goryfigment@gmail.com |
ab7703e60847420b5edf20bb002a401ece929af8 | 1db25f324f7b247724a0ec9348cb4f0bd250f2ed | /draw_img.py | e1c8675603a77812b39dc7b726367a9c944a4b5f | [
"MIT"
] | permissive | Animmus/GGanalysis | 135a0381e35437f3f6fb0be2b7330ad35a7668b1 | 68fa70d56e3d5247098c2248b193ff78a9071746 | refs/heads/main | 2023-07-05T07:56:53.937318 | 2021-08-21T14:58:04 | 2021-08-21T14:58:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,831 | py | from GGanalysislib.UpItem.Up5starCharacter import Up5starCharacter
import GGanalysislib
import matplotlib.cm as cm
import numpy as np
# matplot colormap https://matplotlib.org/stable/tutorials/colors/colormaps.html
img_dpi = 300
save_img = 1
show_img = 0
en_switch = 0
a = GGanalysislib.DrawTransCDF()
text_model = "采用bilibili.com/read/cv10468091模型\n"
if en_switch:
text_model = "Gacha model is from www.bilibili.com/read/cv10468091\n"
a.xlabel = "Probability"
a.ylabel = "Wishes"
a.auther = "@一棵平衡树OneBST"
a.en_switch = 1
from matplotlib.font_manager import FontProperties # 字体管理器
a.mark_font = FontProperties(fname=r"./fonts/SourceHanSansSC-Bold.otf", size=8)
# 角色活动祈愿五星角色
calc_obj = GGanalysislib.Up5starCharacter()
# a = GGanalysislib.DrawTransCDF()
a.item_num = 7
a.item_type = "Character"
a.line_colors = cm.Blues(np.linspace(0.5, 0.9, a.item_num))
a.img_name = "UpCharacter5"
a.img_title = "原神UP五星角色抽取概率"
text_note = "本算例中UP物品均不在常驻祈愿中\n"
text_up_expectation = "获取一个特定UP物品的期望为"+str(round(calc_obj.reference_upitem_expectation,2))+"抽\n"
text_up_badcase = "获取一个特定UP物品最多需要"+str(2*calc_obj.pity_pos)+"抽\n"
if en_switch:
a.img_title = "Probability of getting rate up 5* character\nin character event wish"
text_note = "Suppose none of the rate up items are in permanent wish\n"
text_up_expectation = "The expected wishes of obtaining one specific rate up item is "+str(round(calc_obj.reference_upitem_expectation,2))+"\n"
text_up_badcase = "You need at most "+str(2*calc_obj.pity_pos)+" wishes to get one specific rate up item\n"
a.img_description = text_model+text_note+text_up_expectation+text_up_badcase
a.img_dpi = img_dpi
a.save_img = save_img # 是否保存图片
a.show_img = show_img # 是否显示图片
a.plot_img(calc_obj.get_distribution(a.item_num, 1150, 0).cumsum(axis=1))
# 角色活动祈愿四星角色
calc_obj = GGanalysislib.Up4starCharacter()
# a = GGanalysislib.DrawTransCDF()
a.item_num = 7
a.item_type = "Character"
a.line_colors = cm.Purples(np.linspace(0.5, 0.9, a.item_num))
a.img_name = "UpCharacter4"
a.img_title = "原神三UP四星角色抽取特定角色概率"
text_note = "本算例中UP物品均不在常驻祈愿中\n绘图曲线忽略五星与四星耦合情况\n"
temp_obj = GGanalysislib.PityGacha()
# print(type(temp_obj.weapon_4star_pity()))
coupling_4star_p = GGanalysislib.calc_coupling_p(temp_obj.common_5star_pity(), temp_obj.common_4star_pity())
up_multi = (2-calc_obj.up_rate) * calc_obj.up_type
text_up_expectation = "考虑耦合时获取一个特定UP物品的期望为"+str(round(up_multi/coupling_4star_p,2))+"抽\n"
text_up_badcase = "无法保证在有限抽数内必能获得特定物品\n"
if en_switch:
a.img_title = "Probability of getting rate up 4* character\nin character event wish"
text_note = "Suppose none of the rate up items are in permanent wish\n"
text_up_expectation = "The expected wishes of obtaining one specific rate up item is "+str(round(up_multi/coupling_4star_p,2))+"\n"
text_up_badcase = "*Attention\nCan't guarantee that certain items will be obtained within limited wishes\n"
a.img_description = text_model+text_note+text_up_expectation+text_up_badcase
a.img_dpi = img_dpi
a.save_img = save_img # 是否保存图片
a.show_img = show_img # 是否显示图片
a.plot_img(calc_obj.get_distribution(a.item_num, 600, 0).cumsum(axis=1))
# 武器活动祈愿不定轨抽五星武器
calc_obj = GGanalysislib.Up5starWeaponOld()
# a = GGanalysislib.DrawTransCDF()
a.item_num = 5
a.item_type = "Weapon"
temp_color = np.ones((a.item_num, 4), dtype=float)/2
# a.line_colors = (temp_color+cm.Reds(np.linspace(0.5, 0.9, a.item_num)))/2
a.line_colors = (cm.Greys(np.linspace(0.5, 0.9, a.item_num))+cm.Reds(np.linspace(0.5, 0.9, a.item_num)))/2
# print(np.shape(a.line_colors))
a.img_name = "UpWeapon5Old"
a.img_title = "原神双UP五星武器不定轨抽取特定武器概率"
text_note = "本算例中UP物品均不在常驻祈愿中\n"
text_up_expectation = "获取一个特定UP物品的期望为"+str(round(calc_obj.reference_upitem_expectation,2))+"抽\n"
text_up_badcase = "无法保证在有限抽数内必能获得特定物品\n"
if en_switch:
a.img_title = "Probability of getting rate up 5* weapon\n(without Epitomized Path)"
text_note = "Suppose none of the rate up items are in permanent wish\n"
text_up_expectation = "The expected wishes of obtaining one specific rate up item is "+str(round(calc_obj.reference_upitem_expectation,2))+"\n"
text_up_badcase = "*Attention\nCan't guarantee that certain items will be obtained within limited wishes\n"
a.img_description = text_model+text_note+text_up_expectation+text_up_badcase
a.img_dpi = img_dpi
a.save_img = save_img # 是否保存图片
a.show_img = show_img # 是否显示图片
a.plot_img(calc_obj.get_distribution(a.item_num, 1600, 0).cumsum(axis=1))
# 武器活动祈愿定轨抽五星武器
calc_obj = GGanalysislib.Up5starWeaponEP()
# a = GGanalysislib.DrawTransCDF()
a.item_num = 5
a.item_type = "Weapon"
a.line_colors = cm.Reds(np.linspace(0.5, 0.9, a.item_num))
a.img_name = "UpWeapon5EP"
a.img_title = "原神双UP五星武器定轨抽取特定武器概率"
text_note = "本算例中UP物品均不在常驻祈愿中\n"
text_up_expectation = "获取一个特定UP物品的期望为"+str(round(calc_obj.reference_upitem_expectation,2))+"抽\n"
text_up_badcase = "获取一个特定UP物品最多需要"+str(3*calc_obj.pity_pos)+"抽\n"
if en_switch:
a.img_title = "Probability of getting rate up 5* weapon\n(with Epitomized Path)"
text_note = "Suppose none of the rate up items are in permanent wish\n"
text_up_expectation = "The expected wishes of obtaining one specific rate up item is "+str(round(calc_obj.reference_upitem_expectation,2))+"\n"
text_up_badcase = "You need at most "+str(3*calc_obj.pity_pos)+" wishes to get one specific rate up item\n"
a.img_description = text_model+text_note+text_up_expectation+text_up_badcase
a.img_dpi = img_dpi
a.save_img = save_img # 是否保存图片
a.show_img = show_img # 是否显示图片
a.plot_img(calc_obj.get_distribution(a.item_num, 1050, 0).cumsum(axis=1))
# 武器活动祈愿抽四星武器
calc_obj = GGanalysislib.Up4starWeapon()
# a = GGanalysislib.DrawTransCDF()
a.item_num = 5
a.item_type = "Weapon"
a.line_colors = cm.Oranges(np.linspace(0.5, 0.9, a.item_num))
a.img_name = "UpWeapon4"
a.img_title = "原神五UP四星武器抽取特定武器概率"
text_note = "本算例中UP物品均不在常驻祈愿中\n绘图曲线忽略五星与四星耦合情况\n"
temp_obj = GGanalysislib.PityGacha()
up_multi = (2-calc_obj.up_rate) * calc_obj.up_type
coupling_4star_p = GGanalysislib.calc_coupling_p(temp_obj.weapon_5star_pity(), temp_obj.weapon_4star_pity())
text_up_expectation = "考虑耦合时获取一个特定UP物品的期望为"+str(round(up_multi/coupling_4star_p,2))+"抽\n"
text_up_badcase = "无法保证在有限抽数内必能获得特定物品\n"
if en_switch:
a.img_title = "Probability of getting rate up 4* weapon\nin weapon event wish"
text_note = "Suppose none of the rate up items are in permanent wish\n"
text_up_expectation = "The expected wishes of obtaining one specific rate up item is "+str(round(up_multi/coupling_4star_p,2))+"\n"
text_up_badcase = "*Attention\nCan't guarantee that certain items will be obtained within limited wishes\n"
a.img_description = text_model+text_note+text_up_expectation+text_up_badcase
a.img_dpi = img_dpi
a.save_img = save_img # 是否保存图片
a.show_img = show_img # 是否显示图片
a.plot_img(calc_obj.get_distribution(a.item_num, 600, 0).cumsum(axis=1))
'''
# 常驻祈愿抽五星角色
calc_obj = GGanalysislib.Stander5StarCharacter()
# a = GGanalysislib.DrawTransCDF()
a.item_num = 7
a.item_type = "Character"
a.line_colors = cm.PuRd(np.linspace(0.5, 1, a.item_num))
a.img_name = "StanderCharacter5"
a.img_title = "原神常驻祈愿抽取特定五星角色概率"
text_note = "本算例中常驻祈愿中有"+str(calc_obj.stander_num)+"种五星角色\n"
temp_obj = GGanalysislib.PityGacha()
text_up_expectation = "获取一个五星物品的期望为"+str(round(calc_obj.item_expectation,2))+"抽\n"
text_up_badcase = "无法保证在有限抽数内必能获得特定物品\n"
a.img_description = text_model+text_note+text_up_expectation+text_up_badcase
a.x_bias_num = -7.6 # x方向偏移量
a.img_dpi = img_dpi
a.save_img = save_img # 是否保存图片
a.show_img = show_img # 是否显示图片
a.plot_img(calc_obj.get_distribution(a.item_num, 10100, 0).cumsum(axis=1))
# 常驻祈愿抽四星角色
calc_obj = GGanalysislib.Stander4StarCharacter()
# a = GGanalysislib.DrawTransCDF()
a.item_num = 7
a.item_type = "Character"
a.line_colors = cm.RdPu(np.linspace(0.5, 1, a.item_num))
a.img_name = "StanderCharacter4"
a.img_title = "原神常驻祈愿抽取特定四星角色概率"
text_note = "本算例中常驻祈愿中有"+str(calc_obj.stander_num)+"种四星角色\n"
temp_obj = GGanalysislib.PityGacha()
coupling_4star_p = GGanalysislib.calc_coupling_p(temp_obj.weapon_5star_pity(), temp_obj.weapon_4star_pity())
text_up_expectation = "获取一个四星物品的期望为"+str(round(1/coupling_4star_p,2))+"抽\n"
text_up_badcase = "无法保证在有限抽数内必能获得特定物品\n"
a.img_description = text_model+text_note+text_up_expectation+text_up_badcase
a.x_bias_num = -7.6 # x方向偏移量
a.img_dpi = img_dpi
a.save_img = save_img # 是否保存图片
a.show_img = show_img # 是否显示图片
a.plot_img(calc_obj.get_distribution(a.item_num, 5000, 0).cumsum(axis=1))
# 常驻祈愿抽五星武器
calc_obj = GGanalysislib.Stander5StarWeapon()
# a = GGanalysislib.DrawTransCDF()
a.item_num = 5
a.item_type = "Weapon"
a.line_colors = cm.OrRd(np.linspace(0.4, 1, a.item_num))
a.img_name = "StanderWeapon5"
a.img_title = "原神常驻祈愿抽取特定五星武器概率"
text_note = "本算例中常驻祈愿中有"+str(calc_obj.stander_num)+"种五星武器\n"
temp_obj = GGanalysislib.PityGacha()
text_up_expectation = "获取一个五星物品的期望为"+str(round(calc_obj.item_expectation,2))+"抽\n"
text_up_badcase = "无法保证在有限抽数内必能获得特定物品\n"
a.img_description = text_model+text_note+text_up_expectation+text_up_badcase
a.x_bias_num = -7.6 # x方向偏移量
a.img_dpi = img_dpi
a.save_img = save_img # 是否保存图片
a.show_img = show_img # 是否显示图片
a.plot_img(calc_obj.get_distribution(a.item_num, 16000, 0).cumsum(axis=1))
# 常驻祈愿抽四星武器
calc_obj = GGanalysislib.Stander4StarWeapon()
# a = GGanalysislib.DrawTransCDF()
a.item_num = 5
a.item_type = "Weapon"
a.line_colors = cm.YlOrRd(np.linspace(0.3, 1, a.item_num))
a.img_name = "StanderWeapon4"
a.img_title = "原神常驻祈愿抽取特定四星武器概率"
text_note = "本算例中常驻祈愿中有"+str(calc_obj.stander_num)+"种四星武器\n"
temp_obj = GGanalysislib.PityGacha()
coupling_4star_p = GGanalysislib.calc_coupling_p(temp_obj.weapon_5star_pity(), temp_obj.weapon_4star_pity())
text_up_expectation = "获取一个四星物品的期望为"+str(round(1/coupling_4star_p,2))+"抽\n"
text_up_badcase = "无法保证在有限抽数内必能获得特定物品\n"
a.img_description = text_model+text_note+text_up_expectation+text_up_badcase
a.x_bias_num = -7.6 # x方向偏移量
a.img_dpi = img_dpi
a.save_img = save_img # 是否保存图片
a.show_img = show_img # 是否显示图片
a.plot_img(calc_obj.get_distribution(a.item_num, 3900, 0).cumsum(axis=1))
# 常驻祈愿抽齐五星角色
calc_obj = GGanalysislib.Stander5StarCharacter()
calc_obj.collect_all = 1
# a = GGanalysislib.DrawTransCDF()
a.item_num = 5
a.item_type = "Type"
a.line_colors = cm.PuRd(np.linspace(0.5, 1, a.item_num))
a.img_name = "GetAllStanderCharacter5"
a.img_title = "原神集齐常驻祈愿五星角色概率"
text_note = "本算例中常驻祈愿中有"+str(calc_obj.stander_num)+"种五星角色\n"
text_up_expectation = ""
text_up_badcase = "无法保证在有限抽数内必能集齐\n"
a.img_description = text_model+text_note+text_up_expectation+text_up_badcase
a.x_bias_num = -7.6 # x方向偏移量
a.mid_bias_num = -7
a.img_dpi = img_dpi
a.save_img = save_img # 是否保存图片
a.show_img = show_img # 是否显示图片
a.plot_img(calc_obj.get_distribution(a.item_num, 4100, 0).cumsum(axis=1))
# 常驻祈愿抽齐五星武器
calc_obj = GGanalysislib.Stander5StarWeapon()
calc_obj.collect_all = 1
# a = GGanalysislib.DrawTransCDF()
a.item_num = 10
a.total_item_types = 10 # 物品种类
a.item_type = "Type"
a.fig_size_y = 12
a.line_colors = cm.OrRd(np.linspace(0.4, 1, a.item_num))
a.img_name = "GetAllStanderWeapon5"
a.img_title = "原神集齐常驻祈愿五星武器概率"
text_note = "本算例中常驻祈愿中有"+str(calc_obj.stander_num)+"种五星武器\n"
text_up_expectation = ""
text_up_badcase = "无法保证在有限抽数内必能集齐\n"
a.img_description = text_model+text_note+text_up_expectation+text_up_badcase
a.x_bias_num = -7.6 # x方向偏移量
a.mid_bias_num = -7
a.img_dpi = img_dpi
a.save_img = save_img # 是否保存图片
a.show_img = show_img # 是否显示图片
a.plot_img(calc_obj.get_distribution(a.item_num, 9500, 0).cumsum(axis=1))
# 常驻祈愿抽齐某类四星
from GGanalysislib.StanderItem.Stander4Star import Stander4Star
calc_obj = Stander4Star()
calc_obj.collect_all = 1
calc_obj.stander_num = 18 # 计算时物品种类
# a = GGanalysislib.DrawTransCDF()
a.item_num = 18
a.total_item_types = 18 # 画图物品种类
a.fig_size_y = 20
a.item_type = "Type"
a.line_colors = cm.PuRd(np.linspace(0.5, 1, a.item_num))
a.img_name = "GetAllStander4"
a.img_title = "原神集齐常驻祈愿某类四星概率"
text_note = "本算例中常驻祈愿中有"+str(calc_obj.stander_num)+"种本类物品\n"
text_up_expectation = ""
text_up_badcase = "无法保证在有限抽数内必能集齐\n"
a.img_description = text_model+text_note+text_up_expectation+text_up_badcase
a.x_bias_num = -7.6 # x方向偏移量
a.y_bias_num = 0.2
a.mid_bias_num = -7
a.img_dpi = img_dpi
a.save_img = save_img # 是否保存图片
a.show_img = show_img # 是否显示图片
a.plot_img(calc_obj.get_distribution(a.item_num, 1500, 0).cumsum(axis=1))
''' | [
"onebst@foxmail.com"
] | onebst@foxmail.com |
4ab2d1b2588ed24e75a8fd18060032ba278250ad | bd792a49f21d901f14165993d45e114e4df60340 | /venv/bin/futurize | 2a1ba3df470dca8741f0b0ce6790bd1dcee24bdc | [] | no_license | Cynthrial/Serial-read | 82d0883ad4de01fe48e58523d2d0f4bcf97b3835 | eb706094b95b09cfc8870bff0f6385d04d807996 | refs/heads/master | 2020-04-13T13:37:32.380790 | 2018-12-27T02:35:04 | 2018-12-27T02:35:04 | 163,236,705 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 417 | #!/root/PycharmProjects/Serial_read/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'future==0.17.1','console_scripts','futurize'
__requires__ = 'future==0.17.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('future==0.17.1', 'console_scripts', 'futurize')()
)
| [
"root@localhost.localdomain"
] | root@localhost.localdomain | |
6cdd33b225194a437ad5ed9ae0a18e40503fdc34 | f98e5eae4a8edea219949e58b94f41e20ca93cae | /schools/cert_issuer/models.py | 13dbb60b8ca4be33c5d64f82abea11ac94b12393 | [] | no_license | BlockCerts-Swust/block_certs_back_end | ec6117060a83f7f9eea0c5f8506bda9d89c27b53 | 7c518666c497cf885ab7eaa18ef387dd166692c4 | refs/heads/master | 2022-12-22T07:16:23.434977 | 2020-05-29T05:32:16 | 2020-05-29T05:32:16 | 244,879,049 | 1 | 0 | null | 2022-12-08T04:00:51 | 2020-03-04T11:06:33 | Python | UTF-8 | Python | false | false | 2,658 | py | from abc import abstractmethod
from schools.cert_issuer.config import ESTIMATE_NUM_INPUTS
class BatchHandler(object):
def __init__(self, secret_manager, certificate_handler, merkle_tree):
self.certificate_handler = certificate_handler
self.secret_manager = secret_manager
self.merkle_tree = merkle_tree
@abstractmethod
def pre_batch_actions(self, config):
pass
@abstractmethod
def post_batch_actions(self, config):
pass
def set_certificates_in_batch(self, certificates_to_issue):
self.certificates_to_issue = certificates_to_issue
class CertificateHandler(object):
@abstractmethod
def validate_certificate(self, certificate_metadata):
pass
@abstractmethod
def sign_certificate(self, signer, certificate_metadata):
pass
@abstractmethod
def get_byte_array_to_issue(self, certificate_metadata):
pass
@abstractmethod
def add_proof(self, certificate_metadata, merkle_proof):
pass
class ServiceProviderConnector(object):
@abstractmethod
def get_balance(self, address):
pass
def broadcast_tx(self, tx):
pass
class Signer(object):
"""
Abstraction for a component that can sign.
"""
def __init__(self):
pass
@abstractmethod
def sign_message(self, wif, message_to_sign):
pass
@abstractmethod
def sign_transaction(self, wif, transaction_to_sign):
pass
class SecretManager(object):
def __init__(self, signer):
self.signer = signer
self.wif = None
@abstractmethod
def start(self):
pass
@abstractmethod
def stop(self):
pass
def sign_message(self, message_to_sign):
return self.signer.sign_message(self.wif, message_to_sign)
def sign_transaction(self, transaction_to_sign):
return self.signer.sign_transaction(self.wif, transaction_to_sign)
class TransactionHandler(object):
@abstractmethod
def ensure_balance(self):
pass
@abstractmethod
def issue_transaction(self, blockchain_bytes):
pass
class MockTransactionHandler(TransactionHandler):
def ensure_balance(self):
pass
def issue_transaction(self, op_return_bytes):
return 'This has not been issued on a blockchain and is for testing only'
class TransactionCreator(object):
@abstractmethod
def estimate_cost_for_certificate_batch(self, tx_cost_constants, num_inputs=ESTIMATE_NUM_INPUTS):
pass
@abstractmethod
def create_transaction(self, tx_cost_constants, issuing_address, inputs, op_return_value):
pass
| [
"2482003411@qq.com"
] | 2482003411@qq.com |
28d939d002a35bc02c16215e3fe153a32445d91b | 8726a58628e1d6c8e6e8cba0bb67de80bad72a51 | /wizard/create_data_template.py | 5cfc1efe6cda7e3d4c77f4974c579767e7e5f7b0 | [] | no_license | cgsoftware/jasper_reports | 54a612a44cd94963794b16ab4266026b233b8ba4 | 207bdea1b8738dff88260f4ea76da8b627e05375 | refs/heads/master | 2021-01-10T19:33:22.921656 | 2011-09-06T15:10:39 | 2011-09-06T15:10:39 | null | 0 | 0 | null | null | null | null | WINDOWS-1252 | Python | false | false | 5,766 | py | # encoding: iso-8859-15
from xml.dom.minidom import getDOMImplementation
import wizard
import pooler
import base64
import osv
import string
from tools.translate import _
view_form_start = """<?xml version="1.0"?>
<form string="Create Data Template">
<group colspan="2">
<field name="model"/>
<field name="depth"/>
</group>
</form>"""
view_fields_start = {
'model': { 'string': 'Model', 'type': 'many2one', 'relation': 'ir.model', 'required': True },
'depth': { 'string':'Depth', 'type':'integer', 'required': True },
}
view_form_end = """<?xml version="1.0"?>
<form string="Create Data Template">
<group colspan="2">
<field name="model"/>
<field name="data" filename="filename"/>
<field name="filename" invisible="1"/>
</group>
</form>"""
view_fields_end = {
'model': { 'string': 'Model', 'type': 'char', 'readonly': True },
'data': { 'string': 'XML', 'type': 'binary', 'relation': 'ir.model', 'readonly': True },
'filename': { 'string': 'File Name', 'type': 'char' },
}
src_chars = """àáäâÀÁÄÂèéëêÈÉËÊìíïîÌÍÏÎòóöôÒÓÖÔùúüûÙÚÜÛçñºª·¤ '"()/*-+?¿!&$[]{}@#`'^:;<>=~%,\\"""
src_chars = unicode( src_chars, 'iso-8859-1' )
dst_chars = """aaaaAAAAeeeeEEEEiiiiIIIIooooOOOOuuuuUUUUcnoa_e________________________________"""
dst_chars = unicode( dst_chars, 'iso-8859-1' )
class create_data_template(wizard.interface):
def _action_start(self, cr, uid, data, context):
res = {
'depth': 1
}
return res
def normalize(self, text):
if isinstance( text, unicode ):
text = text.encode('utf-8')
return text
def unaccent(self, text):
if isinstance( text, str ):
text = unicode( text, 'utf-8' )
output = text
for c in xrange(len(src_chars)):
output = output.replace( src_chars[c], dst_chars[c] )
return output.strip('_').encode( 'utf-8' )
def generate_xml(self, cr, uid, context, pool, modelName, parentNode, document, depth, first_call):
# First of all add "id" field
fieldNode = document.createElement('id')
parentNode.appendChild( fieldNode )
valueNode = document.createTextNode( '1' )
fieldNode.appendChild( valueNode )
language = context.get('lang')
if language == 'en_US':
language = False
# Then add all fields in alphabetical order
model = pool.get(modelName)
fields = model._columns.keys()
fields.sort()
for field in fields:
name = False
if language:
# Obtain field string for user's language.
name = pool.get('ir.translation')._get_source(cr, uid, modelName + ',' + field, 'field', language)
#name = self.unaccent( name )
#name = self.normalize( name )
#help = pool.get('ir.translation')._get_source(cr, uid, modelName + ',' + field, 'help', language)
#help = self.normalize( help )
if not name:
# If there's not description in user's language, use default (english) one.
name = pool.get(modelName)._columns[field].string
#help = pool.get(modelName)._columns[field].help
if name:
name = self.unaccent( name )
# After unaccent the name might result in an empty string
if name:
name = '%s-%s' % (self.unaccent( name ), field )
else:
name = field
fieldNode = document.createElement( name )
#if name:
#fieldNode.setAttribute( 'name', name )
#if help:
#fieldNode.setAttribute( 'help', help )
parentNode.appendChild( fieldNode )
fieldType = model._columns[field]._type
if fieldType in ('many2one','one2many','many2many'):
if depth <= 1:
continue
newName = model._columns[field]._obj
self.generate_xml(cr, uid, context, pool, newName, fieldNode, document, depth-1, False)
continue
if fieldType == 'float':
value = '12345.67'
elif fieldType == 'integer':
value = '12345'
elif fieldType == 'date':
value = '2009-12-31 00:00:00'
elif fieldType == 'time':
value = '12:34:56'
elif fieldType == 'datetime':
value = '2009-12-31 12:34:56'
else:
value = field
valueNode = document.createTextNode( value )
fieldNode.appendChild( valueNode )
if depth > 1 and modelName != 'Attachments':
# Create relation with attachments
fieldNode = document.createElement( '%s-Attachments' % _('Attachments') )
parentNode.appendChild( fieldNode )
self.generate_xml(cr, uid, context, pool, 'ir.attachment', fieldNode, document, depth-1, False)
if first_call:
# Create relation with user
fieldNode = document.createElement( '%s-User' % _('User') )
parentNode.appendChild( fieldNode )
self.generate_xml(cr, uid, context, pool, 'res.users', fieldNode, document, depth-1, False)
def _action_create_xml(self, cr, uid, data, context):
pool = pooler.get_pool(cr.dbname)
form = data['form']
values = pool.get('ir.model').read(cr, uid, form['model'], ['name','model'], context)
name = values['name']
model = values['model']
document = getDOMImplementation().createDocument(None, 'data', None)
topNode = document.documentElement
recordNode = document.createElement('record')
topNode.appendChild( recordNode )
self.generate_xml( cr, uid, context, pool, model, recordNode, document, form['depth'], True )
topNode.toxml()
res = {
'model': name,
'data': base64.encodestring( topNode.toxml() ),
'filename': 'jasper.xml',
}
return res
states = {
'init': {
'actions': [_action_start],
'result': {
'type': 'form',
'arch': view_form_start,
'fields': view_fields_start,
'state': [('end','Cancel','gtk-cancel'),('create','Create','gtk-ok')]
}
},
'create': {
'actions': [_action_create_xml],
'result': {
'type': 'form',
'arch': view_form_end,
'fields': view_fields_end,
'state': [('end','Accept','gtk-ok')]
}
}
}
create_data_template('jasper_create_data_template')
| [
"g.dalo@cgsoftware.it"
] | g.dalo@cgsoftware.it |
ae562bd8e340a7cb6fbd929fb318d10161565a0f | bbc2c112c910ff670842f3e911c816bace918a99 | /BookFormProject/BookFormProject/settings.py | 0b20d6039b3745b618837e33b7e0f8db89e7c831 | [] | no_license | dhanyamc97/django_Projects | d73367ce49fdc47205b13b9d85ed4e743f93f7f4 | 78211043bdca92e2f47c25b8a8675b92732d2ae7 | refs/heads/main | 2023-01-09T20:51:09.066316 | 2020-11-08T17:16:30 | 2020-11-08T17:16:30 | 311,112,068 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,165 | py | """
Django settings for BookFormProject project.
Generated by 'django-admin startproject' using Django 3.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
TEMPLATE_DIR = Path.joinpath(BASE_DIR,"templates")
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'hd@!lg)l_ezm+bkxih@isp9+1n0wbw6*drjhp9+3sgs!(nr3m^'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'Books',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'BookFormProject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'BookFormProject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| [
"dhanyamc97@gmail.com"
] | dhanyamc97@gmail.com |
7132a6efe79998e97391fede2615e22427a1242a | 5ffed81ced523b6e417b4e48d20380b6f16f8f42 | /pre_exam/vacantion.py | 10a4829eea56d31166c28138daf1f4126ed1418f | [] | no_license | Nikoletazl/Basics-Python | 0f3f095bd51f9546c681e3cdd268232de88749ab | 17aef1b95814f13a02053681aae3e617e56f2fe6 | refs/heads/main | 2023-08-14T15:48:48.450249 | 2021-10-08T15:02:35 | 2021-10-08T15:02:35 | 415,027,622 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 372 | py | count_people = int(input())
count_nights = int(input())
count_cards = int(input())
count_tickets = int(input())
sum_one_person = count_nights * 20
sum_cards = count_cards * 1.60
tickets = count_tickets * 6
total_sum_one = sum_one_person + sum_cards + tickets
group_sum = total_sum_one * count_people
end_sum = group_sum + 0.25 * group_sum
print(f"{end_sum:.2f}") | [
"noreply@github.com"
] | noreply@github.com |
72e353a6734d1e9117e2b2a4dba5f5cb7d955d5b | 73081ee1fdf0dad38ae98afa8bb74a31a594a436 | /observer_hub/video.py | d4b03715223f5caf9865acd53326bfccaf31daf6 | [
"Apache-2.0"
] | permissive | carrier-io/observer-hub | 8b5328e9a3795e020589bc845247b95201a5212e | a79daf743d1279da29e8fb0688895ad63d0f10f3 | refs/heads/master | 2023-07-19T00:36:21.061184 | 2021-09-09T15:19:25 | 2021-09-09T15:19:25 | 285,288,482 | 0 | 0 | Apache-2.0 | 2021-02-19T08:33:37 | 2020-08-05T12:57:43 | Python | UTF-8 | Python | false | false | 1,221 | py | import json
import os
import subprocess
import tempfile
from time import time
from requests import get
from observer_hub.constants import VIDEO_PATH
from observer_hub.util import logger
def start_video_recording(video_host):
start_time = time()
start_recording(video_host)
current_time = time() - start_time
return int(current_time)
def start_recording(host):
get(f'http://{host}/record/start')
def stop_recording(host):
logger.info("Stop recording...")
os.makedirs(VIDEO_PATH, exist_ok=True)
video_results = get(f'http://{host}/record/stop').content
video_folder = tempfile.mkdtemp(dir=VIDEO_PATH)
video_path = os.path.join(video_folder, "video.mp4")
with open(video_path, 'w+b') as f:
f.write(video_results)
logger.info(f"Video file {video_path}")
return video_folder, video_path
def get_video_length(file_path):
command = [
"ffprobe",
"-loglevel", "quiet",
"-print_format", "json",
"-show_format",
file_path
]
pipe = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, err = pipe.communicate()
return int(float(json.loads(out)['format']['duration']) * 1000)
| [
"semen4ik20@gmail.com"
] | semen4ik20@gmail.com |
3d6e47d4efb78b657a60852b6505cf0266099e8e | 1369e6a623b0c8d2fcd9ced21756ea0b79373adf | /blog/admin.py | 703ff05eedd523f7a1feac63c1dab7edd8d5227e | [] | no_license | sunshane6726/Practice-App | ed3be2b85272bb3a1d701c9a9b902130c411220a | a6f0763d643be2dbb33f400a0b3f356b6c63f5eb | refs/heads/master | 2023-04-27T01:34:30.809318 | 2019-07-05T12:04:32 | 2019-07-05T12:04:32 | 193,915,683 | 0 | 0 | null | 2022-04-22T21:40:15 | 2019-06-26T13:59:31 | JavaScript | UTF-8 | Python | false | false | 171 | py | from django.contrib import admin
from .models import Blog
# Register your models here.
admin.site.register(Blog)
# 실제로 내가 작성을 해봐야 실력이 는다. | [
"sunshane6726@gmail.com"
] | sunshane6726@gmail.com |
958e6cc962347253a2d2217e8fb7795f660d2001 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /Mkt3yqQMsw9e3Jmjq_4.py | 02def150323524f87c2640e1f03e1201eb361dcd | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,137 | py | """
Write a function which, given a permutation of `[0, 1, ..., n-1]` ( `n>0`)
represented by a shuffled list, returns the permutation in _disjoint cycle
form_ as a set of tuples.
A **permutation** is a particular (re)ordering of a set of objects. For
example, `[1,3,0,4]` is a permutation on the 4 objects `[0,1,2,3]`. In this
problem, we represent permutations on `n` objects as lists containing the
numbers in `list(range(n)) == [0, ..., n-1]`.
A **cycle** or **cyclic permutation** is a particular kind of permutation
whereby all elements are sent to one another in a cyclic fashion. In this
problem, we represent cycles as tuples.
* For example, the permutation `[1,2,3,0]` is a cyclic permutation of `[0,1,2,3]` because it can be made from `[0,1,2,3]` by applying the mapping `{0:1, 1:2, 2:3, 3:0}`, which maps elements in the _cycle_ `0➞1➞2➞3➞0`. We represent this cycle by the tuple `(0,1,2,3)`, where each element gets sent to the one on the right, and the last is sent to the first.
* The cycles `(0,1,2,3)`, `(1,2,3,0)`, `(2,3,0,1)` and `(3,0,1,2)` all represent the same cycle; namely `0➞1➞2➞3➞0` . We always choose the cycle to have the lowest element first: `(0,1,2,3)`.
Finally, any permutation can be written in **disjoint cycle form** , or as an
unordered set of cyclic permutations. _Disjoint_ means none of the cycles have
any elements in common. This form is unique up to the order of the cycles and
up to the cycle representation.
* The cyclic permutation `[0,1,3,2,4,5]` can be written as `(2,3)`—since 2 an 3 are swapped—and so the disjoint cycle form is `{(2,3)}`.
* `[1,0,3,2]` is the mapping `{0:1, 1:0, 2:3, 3:2}` and has disjoint cycle form`{(0, 1), (2, 3)}` .
Your function takes a list (the permutation) and returns a set of tuples (the
set of cyclic permutations).
### Examples
disjoint_cycle_form([1, 0]) ➞ {(0, 1)}
# 0 and 1 are swapped, but lowest is listed first.
disjoint_cycle_form([0, 1, 2, 3]) ➞ set()
# Permutation is already in order.
disjoint_cycle_form([0, 1, 3, 2]) ➞ {(2, 3)}
disjoint_cycle_form([1, 0, 3, 2]) ➞ {(0, 1), (2, 3)}
# or {(2, 3), (0, 1)}; the cycle order in a set doesn't matter.
disjoint_cycle_form([1, 3, 0, 2]) ➞ {(0, 1, 3, 2)}
### Notes
Look up "disjoint cycle notation" or "cycle decomposition" for more
information about permutations. This is the kind of thing you learn in a first
course in Group Theory. Note that the given permutations will always have at
least one element (the only such permutation is `[0]`), and a permutation of
length `n` will always contain the elements of `range(n)` (that is, `0` to
`n-1` inclusive).
"""
def cycles(perm):
remain = set(perm)
result = []
while len(remain) > 0:
n = remain.pop()
cycle = [n]
while True:
n = perm[n]
if n not in remain:
break
remain.remove(n)
cycle.append(n)
result.append(cycle)
return result
def disjoint_cycle_form(perm):
ans = set()
for cycle in cycles(perm):
if len(cycle) > 1:
ans.add(tuple(cycle))
return ans
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
ae9733baa5acdbba8e89173c3ab1ad64124800fc | 5213322410b843325d2e50576d397695b2aea2f7 | /proxy/pay.py | 4bceec220a74add591984a66d1c4f8a5dd6b2d0b | [] | no_license | cash2one/Python-In-Action | 854be36627031cc4da3b65e37c7f0e52069af2e0 | bbf5cb090e07e22245089ed3df1284e13c11115a | refs/heads/master | 2020-03-10T21:26:46.534010 | 2018-01-19T13:51:57 | 2018-01-19T13:51:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 565 | py | import requests
import pymongo
import time
client = pymongo.MongoClient('localhost', 27017)
proxy = client['proxy']
pay = proxy['pay']
while True:
time.sleep(0.5)
wb_date = requests.get('http://api.ip.data5u.com/dynamic/get.html?order=59f3d1290512f231f0acd124242fe932&sep=2')
pay.insert_one({'ip': wb_date.text.strip()})
print(wb_date.text.strip())
wb_date = requests.get('http://api.ip.data5u.com/dynamic/get.html?order=59f3d1290512f231f0acd124242fe932&sep=2')
pay.insert_one({'ip': wb_date.text.strip()})
print(wb_date.text.strip())
| [
"2808581543@qq.com"
] | 2808581543@qq.com |
dd8a77fd38ee84af2b84e6bcf6ee5c31652a8548 | 1aec7f1ff9b25bb5e0065bdef34b6773acb1c15a | /PMS/asgi.py | dc743121b9e31e71ee9e7fe7925752145430aefe | [] | no_license | macjade/pms | 35b53cbdcd7e809fad334675ff3b6a8d52108b65 | 7dd46a60d53209ba0ecf73f3b5d3231beaa8639e | refs/heads/master | 2023-07-18T07:51:41.633869 | 2021-08-07T19:23:55 | 2021-08-07T19:23:55 | 393,447,105 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | """
ASGI config for PMS project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'PMS.settings')
application = get_asgi_application()
| [
"jamesdaniel00700@gmail.com"
] | jamesdaniel00700@gmail.com |
aebf3cbd105f56502484732cbb959833a049352b | 6bce631b869a8717eed29eae186688a7fdb7f5c8 | /venv/Lib/site-packages/test/test_stock_price.py | 4ce7f9435eb697392c8e98c7711ab22e0976e446 | [] | no_license | singhd3101/CS5100-Stock-Market-Prediction | 6d43bd39633dd80bb1141dc550302874a5bc0939 | 2804a6270a05155e168d0f2518bcd97f1c9bcb3e | refs/heads/master | 2020-11-26T03:56:02.613630 | 2019-12-19T02:22:13 | 2019-12-19T02:22:13 | 228,958,401 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,286 | py | # coding: utf-8
"""
Intrinio API
Welcome to the Intrinio API! Through our Financial Data Marketplace, we offer a wide selection of financial data feed APIs sourced by our own proprietary processes as well as from many data vendors. For a complete API request / response reference please view the [Intrinio API documentation](https://intrinio.com/documentation/api_v2). If you need additional help in using the API, please visit the [Intrinio website](https://intrinio.com) and click on the chat icon in the lower right corner. # noqa: E501
OpenAPI spec version: 2.2.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import intrinio_sdk
from intrinio_sdk.models.stock_price import StockPrice # noqa: E501
from intrinio_sdk.rest import ApiException
class TestStockPrice(unittest.TestCase):
"""StockPrice unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testStockPrice(self):
"""Test StockPrice"""
# FIXME: construct object with mandatory attributes with example values
# model = intrinio_sdk.models.stock_price.StockPrice() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"singh3101div@gmail.com"
] | singh3101div@gmail.com |
289cae1e6468e6b0e277c0821edbf38c7de80a92 | 5fa9678b4c37847352ae43af107653ca39d4e2d5 | /chodzenie.py | 1ccb7675db14370dd8c31d0c6c8d9db75410a33d | [] | no_license | wunderbarDeveloper/Gierka | 68e39671cae80261c36ba922bb30ac91cef303de | dbd711566d165300b84d4036478ee0aeb9e4afab | refs/heads/master | 2020-04-29T18:26:07.418685 | 2019-06-16T20:05:10 | 2019-06-16T20:05:10 | 176,324,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,231 | py | import os
import pygame
pygame.init()
SIZE = WIDTH, HEIGHT = 600, 600
BACKGROUND_COLOR = pygame.Color('blue')
FPS = 60
screen = pygame.display.set_mode(SIZE)
clock = pygame.time.Clock()
def load_images(path):
images = []
for file_name in os.listdir(path):
image = pygame.image.load(path + os.sep + file_name)
images.append(image)
return images
class AnimatedSprite(pygame.sprite.Sprite):
def __init__(self, position, images):
super(AnimatedSprite, self).__init__()
size = (32, 32)
self.rect = pygame.Rect(position, size)
self.images = images
self.images_right = images
self.images_left = [pygame.transform.flip(image, True, False) for image in images]
self.index = 0
self.image = images[self.index]
self.velocity = pygame.math.Vector2(0, 0)
self.animation_time = 0.04
self.current_time = 0
self.animation_frames = 6
self.current_frame = 0
def update_time_dependent(self, dt):
if self.velocity.x > 0:
self.images = self.images_right
elif self.velocity.x < 0:
self.images = self.images_left
self.current_time += dt
if self.current_time >= self.animation_time:
self.current_time = 0
self.index = (self.index + 1) % len(self.images)
self.image = self.images[self.index]
self.rect.move_ip(*self.velocity)
def update_frame_dependent(self):
if self.velocity.x > 0:
self.images = self.images_right
elif self.velocity.x < 0:
self.images = self.images_left
self.current_frame += 1
if self.current_frame >= self.animation_frames:
self.current_frame = 0
self.index = (self.index + 1) % len(self.images)
self.image = self.images[self.index]
self.rect.move_ip(*self.velocity)
def update(self, dt):
self.update_time_dependent(dt)
def main():
images = load_images(path='E:\Pobrane\gierka\chodzenie')
player = AnimatedSprite(position=(5, 5), images=images)
all_sprites = pygame.sprite.Group(player)
running = True
while running:
dt = clock.tick(FPS) / 1000
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_RIGHT:
player.velocity.x = 4
elif event.key == pygame.K_LEFT:
player.velocity.x = -4
elif event.key == pygame.K_DOWN:
player.velocity.y = 4
elif event.key == pygame.K_UP:
player.velocity.y = -4
elif event.type == pygame.KEYUP:
if event.key == pygame.K_RIGHT or event.key == pygame.K_LEFT:
player.velocity.x = 0
elif event.key == pygame.K_DOWN or event.key == pygame.K_UP:
player.velocity.y = 0
all_sprites.update(dt)
screen.fill(BACKGROUND_COLOR)
all_sprites.draw(screen)
pygame.display.update()
if __name__ == '__main__':
main() | [
"Lukasso2015@gmail.com"
] | Lukasso2015@gmail.com |
7b80af63ba4e31916ceecbe57cc05ff3750f51ae | 85bc90e520580d2e4c186f910fc3e0396ee934f7 | /schedule/models.py | d9b47093d0a456ece85a67412d2b60bfd0a392a7 | [] | no_license | ulchm/picks_server | c6abb32220ee18bb1d72398680139b354a092042 | 1f87296538d69b51f2bef75f009e35363d04a5b8 | refs/heads/master | 2021-01-10T02:02:12.993034 | 2016-01-03T14:31:47 | 2016-01-03T14:31:47 | 48,917,543 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,367 | py | from django.db import models
class League(models.Model):
name = models.CharField(max_length=255)
short_name = models.CharField(max_length=10)
is_active=models.BooleanField(default=True)
def __unicode__(self):
return self.name
class Season(models.Model):
name = models.CharField(max_length=255, help_text="The name of the season.")
start_date = models.DateField(help_text="The first day of the season.")
end_date = models.DateField(help_text="The last day of the season")
league = models.ForeignKey('League', related_name="seasons")
is_active=models.BooleanField(default=True)
def __unicode__(self):
return self.name
class Team(models.Model):
name = models.CharField(max_length=255, help_text="The name of the team, without the city")
short_name = models.CharField(max_length=3, help_text="The short name / code for the team. 3 Letters.")
city = models.CharField(max_length=255, blank=True, null=True, help_text="The city the team is from")
logo = models.FileField(upload_to="team_logos", blank=True, null=True, help_text="SVG Vector Image logo")
is_active = models.BooleanField(default=True)
def __unicode__(self):
if self.city:
return "%s %s" % (self.city, self.name)
return "%s: %s" % (self.short_name, self.name)
def get_active_away_record(self):
#TODO: Look up current leagues / seasons and return active win, loss and ties as a dictionary
return {'wins': 0,
'losses': 0,
'overtime_losses': 0}
def get_active_home_record(self):
#TODO: Look up current leagues / seasons and return active win, loss and ties as a dictionary
return {'wins': 0,
'losses': 0,
'overtime_losses': 0}
class Game(models.Model):
season = models.ForeignKey('Season', related_name="games")
home_team = models.ForeignKey('Team', related_name="home_games")
away_team = models.ForeignKey('Team', related_name="away_games")
starts_at = models.DateTimeField()
is_playing = models.BooleanField(default=False)
home_score = models.IntegerField(blank=True, null=True)
away_score = models.IntegerField(blank=True, null=True)
period = models.CharField(max_length=255, blank=True, null=True)
is_final = models.BooleanField(default=False)
was_shootout = models.BooleanField(default=False)
was_overtime = models.BooleanField(default=False)
winning_team = models.ForeignKey('Team', related_name="games_won", blank=True, null=True)
losing_team = models.ForeignKey('Team', related_name="games_lost", blank=True, null=True)
def __unicode__(self):
return "%s @ %s - %s" % (self.away_team, self.home_team, self.starts_at)
def get_season_team_history(self):
#TODO: Look up games in active system and history between these teams, building list of home / away wins and losses
return {
"home_wins": 0,
"away_wins": 0,
"home_losses": 0,
"away_losses": 0
}
def get_all_team_history(self):
#TODO: Look up all games in system and return how many games the home team and away team have won / lost
return {
"home_wins": 0,
"away_wins": 0,
"home_losses": 0,
"away_losses": 0
}
| [
"mike@norcode.com"
] | mike@norcode.com |
60b566ffa37d4b8a664568c5090b7a037600e88b | 32c31618ed94d78a3654ddd267632691784663c5 | /fedlab_benchmarks/fedavg/scale/mnist-cnn/mnist_partition.py | a1510a6de907316bb55d38c8f49201faea366612 | [
"Apache-2.0"
] | permissive | roy-ch/FedLab-benchmarks | 2a1f5833b8a519dda76e5f75f9214f02bd556d7a | 725f6453c114a12741b6e7277e96788949576ea6 | refs/heads/master | 2023-08-25T18:42:52.165193 | 2021-10-11T08:31:45 | 2021-10-11T08:31:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 474 | py | from fedlab.utils.functional import save_dict
from fedlab.utils.dataset.slicing import noniid_slicing, random_slicing
import torchvision
root = '../../../../datasets/data/mnist/'
trainset = torchvision.datasets.MNIST(root=root, train=True, download=True)
data_indices = noniid_slicing(trainset, num_clients=100, num_shards=200)
save_dict(data_indices, "mnist_noniid.pkl")
data_indices = random_slicing(trainset, num_clients=100)
save_dict(data_indices, "mnist_iid.pkl")
| [
"928255708@qq.com"
] | 928255708@qq.com |
147317683cc08fca306d68aa4d573fceacbb5a7b | f51c0d7c19936997076fb94c6b39b6dfbc2c7578 | /basic_states/gesture_states/src/gesture_states/search_wave_sm.py | 7c1be08aac6619c6e5c3c87e80716f31f5a66e7b | [] | no_license | changlongzj/robocup2014 | 16dd0bf82adc87bd3d64df443cbab8ad839505c0 | 505f84b60fa685619cb711a778c2514fe440feec | refs/heads/master | 2021-01-18T08:38:39.329312 | 2014-06-06T14:44:37 | 2014-06-06T14:44:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,116 | py | #! /usr/bin/env python
"""
Created on 28/05/14
@author: Chang Long Zhu Jin
@mail: changlongzj@gmail.com
"""
import rospy
import smach
import tf
import tf.transformations as TT
from std_msgs.msg import Header
from geometry_msgs.msg import PoseStamped, Pose, PointStamped, Point, Quaternion
from util_states.topic_reader import topic_reader
from pal_vision_msgs.msg import Gesture
import numpy
import copy
from speech_states.say import text_to_say
from util_states.math_utils import *
from gesture_states.wave_detection_sm import WaveDetection
from manipulation_states.move_head_form import move_head_form
GESTURE_TOPIC = '/head_mount_xtion/gestures'
final_frame_id = 'base_link'
PUB_1_TOPIC = '/given_pose'
PUB_2_TOPIC = '/transformed_pose'
NUMBER_OF_HEAD_POSES = 3
class prepare_move_head(smach.State):
def __init__(self):
rospy.loginfo("Entering loop_test")
smach.State.__init__(self, outcomes=['succeeded','aborted', 'preempted', 'end_searching'],
input_keys=['loop_iterations', 'head_left_right', 'head_up_down'],
output_keys=['head_left_right', 'head_up_down', 'standard_error', 'loop_iterations'])
def execute(self, userdata):
if userdata.loop_iterations == NUMBER_OF_HEAD_POSES:
userdata.loop_iterations = 0
return 'end_searching'
else:
rospy.loginfo(userdata.loop_iterations)
userdata.standard_error='OK'
userdata.head_left_right = 1 - userdata.loop_iterations*0.5
if userdata.loop_iterations == 0:
userdata.head_left_right = 'mid_left'
elif userdata.loop_iterations == 1:
userdata.head_left_right = 'center'
elif userdata.loop_iterations == 2:
userdata.head_left_right = 'mid_right'
userdata.head_up_down = 'normal'
userdata.loop_iterations = userdata.loop_iterations + 1
return 'succeeded'
class Search_Wave_SM(smach.StateMachine):
"""
Search_Wave_SM: This SM searches for any 'wave' gesture around a room,
as the robot will move its head from left to right.
If any gesture is detected, then it will stop and register the position XYZ of the gesture.
Input Keys:
None
Output Keys:
@key wave_position: A PointStamped point referenced to /base_link
@key wave_yaw_degree: the yaw in degrees for the robot to turn.
@key standard_error: A base error to inform.
Required Parameters:
None
Outcomes:
'succeeded' : Found a person
'aborted' : something went wrong
'end_searching' : No one is found, so searching is cancelled
"""
def __init__(self, head_position=None):
smach.StateMachine.__init__(self, outcomes=['succeeded', 'preempted', 'aborted', 'end_searching'],
input_keys=[],
output_keys=['wave_position', 'wave_yaw_degree','standard_error'])
with self:
self.userdata.loop_iterations = 0
self.userdata.wave_position = None
self.userdata.wave_yaw_degree = None
smach.StateMachine.add(
'Move_head_prepare',
prepare_move_head(),
transitions={'succeeded': 'move_head', 'aborted': 'aborted',
'preempted': 'preempted', 'end_searching':'end_searching'})
smach.StateMachine.add(
'move_head',
move_head_form(head_up_down='normal'),
transitions={'succeeded': 'Say_Searching',
'preempted':'Say_Searching',
'aborted':'aborted'})
smach.StateMachine.add(
'Say_Searching',
text_to_say("I am searching, let's see"),
transitions={'succeeded':'wave_recognition', 'aborted':'wave_recognition', 'preempted':'wave_recognition'})
smach.StateMachine.add(
'wave_recognition',
WaveDetection(),
transitions={'succeeded': 'succeeded', 'aborted': 'Move_head_prepare',
'preempted': 'preempted'})
def main():
rospy.loginfo('Wave Detection Node')
rospy.init_node('wave_detection_node')
sm = smach.StateMachine(outcomes=['succeeded', 'preempted', 'aborted', 'end_searching'])
with sm:
smach.StateMachine.add(
'gesture_state',
Search_Wave_SM(head_position='down'),
transitions={'succeeded': 'succeeded','preempted':'preempted', 'aborted':'aborted', 'end_searching':'end_searching'})
sm.execute()
rospy.spin()
if __name__=='__main__':
main()
| [
"sam.pfeiffer@pal-robotics.com"
] | sam.pfeiffer@pal-robotics.com |
9650486d3c6c5ba56d45045f7903fdcfc4194666 | caaf7723580684886559dedba9a0cfa19036243d | /led_stepper_scan.py | cc1a7797598b5c6cf966e7d924197614e5a642c4 | [] | no_license | mike-fang/led_micro | 27214b5d9e67abd3dbc85c2962be13bb82c83723 | c08105b1cd84836fed2dea11074e1d47d13f099a | refs/heads/master | 2022-11-28T10:46:09.647242 | 2020-08-02T19:44:22 | 2020-08-02T19:44:22 | 275,946,959 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 268 | py | from capture_msi import init_rb
from stepper_control import Stepper
from elp_usb_cam import ELP_Camera
import numpy as np
import time
rb = init_rb()
led_ch = {
'g' : 1,
'b' : 2,
'w' : 5,
'r' : 6,
'o' : 7
}
state = np.ones(8)
rb.set_state(state)
| [
"1michaelfang@gmail.com"
] | 1michaelfang@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.