max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
agents/imitation/network.py | kadn/carla-imitation | 0 | 12763751 | import numpy as np
import tensorflow as tf
def weight_ones(shape, name):
initial = tf.constant(1.0, shape=shape, name=name)
return tf.Variable(initial)
def weight_xavi_init(shape, name):
initial = tf.get_variable(name=name, shape=shape,
initializer=tf.contrib.layers.xavier_initializer())
return initial
def bias_variable(shape, name):
initial = tf.constant(0.1, shape=shape, name=name)
return tf.Variable(initial)
class Network(object):
def __init__(self, train_state):
""" We put a few counters to see how many times we called each function """
self._count_conv = 0
self._count_pool = 0
self._count_bn = 0
self._count_dropouts = 0
self._count_activations = 0
self._count_fc = 0
self._count_lstm = 0
self._count_soft_max = 0
self._conv_kernels = []
self._conv_strides = []
self._weights = {}
self._features = {}
self._train_state = train_state
""" Our conv is currently using bias """
def conv(self, x, kernel_size, stride, output_size, padding_in='SAME'):
self._count_conv += 1
filters_in = x.get_shape()[-1]
shape = [kernel_size, kernel_size, filters_in, output_size]
weights = weight_xavi_init(shape, 'W_c_' + str(self._count_conv))
bias = bias_variable([output_size], name='B_c_' + str(self._count_conv))
self._weights['W_conv' + str(self._count_conv)] = weights
self._conv_kernels.append(kernel_size)
self._conv_strides.append(stride)
conv_res = tf.add(tf.nn.conv2d(x, weights, [1, stride, stride, 1], padding=padding_in,
name='conv2d_' + str(self._count_conv)), bias,
name='add_' + str(self._count_conv))
self._features['conv_block' + str(self._count_conv - 1)] = conv_res
return conv_res
def max_pool(self, x, ksize=3, stride=2):
self._count_pool += 1
return tf.nn.max_pool(x, ksize=[1, ksize, ksize, 1], strides=[1, stride, stride, 1],
padding='SAME', name='max_pool' + str(self._count_pool))
def bn(self, x):
self._count_bn += 1
return tf.contrib.layers.batch_norm(x, is_training=False,
updates_collections=None, scope='bn' + str(self._count_bn))
def activation(self, x):
self._count_activations += 1
return tf.nn.relu(x, name='relu' + str(self._count_activations))
def dropout(self, x, prob=1):
print ("Dropout", self._count_dropouts)
self._count_dropouts += 1
output = tf.nn.dropout(x, prob,
name='dropout' + str(self._count_dropouts))
return output
def fc(self, x, output_size):
self._count_fc += 1
filters_in = x.get_shape()[-1]
shape = [filters_in, output_size]
weights = weight_xavi_init(shape, 'W_f_' + str(self._count_fc))
bias = bias_variable([output_size], name='B_f_' + str(self._count_fc))
return tf.nn.xw_plus_b(x, weights, bias, name='fc_' + str(self._count_fc))
def conv_block(self, x, kernel_size, stride, output_size, padding_in='SAME', dropout_prob=None):
print (" === Conv", self._count_conv, " : ", kernel_size, stride, output_size)
with tf.name_scope("conv_block" + str(self._count_conv)):
x = self.conv(x, kernel_size, stride, output_size, padding_in=padding_in)
x = self.bn(x)
if dropout_prob is not None:
x = tf.cond(self._train_state,
true_fn=lambda: self.dropout(x, dropout_prob),
false_fn=lambda: x)
return self.activation(x)
def fc_block(self, x, output_size, dropout_prob=None):
print (" === FC", self._count_fc, " : ", output_size)
with tf.name_scope("fc" + str(self._count_fc + 1)):
x = self.fc(x, output_size)
if dropout_prob is not None:
x = tf.cond(self._train_state,
true_fn=lambda: self.dropout(x, dropout_prob),
false_fn=lambda: x)
self._features['fc_block' + str(self._count_fc + 1)] = x
return self.activation(x)
def get_weigths_dict(self):
return self._weights
def get_feat_tensors_dict(self):
return self._features
def sometimes(img, prob, fn):
mask = tf.less(tf.random_uniform([tf.shape(img)[0]]), prob)
return tf.where(mask, x=fn(img), y=img)
def additive_gaussian_noise(img, loc, scale, per_channel):
std = tf.random_uniform([], scale[0], scale[1])
shp = tf.shape(img)
noise_per_channel = tf.random_normal(shp,
mean=loc, stddev=std)
noise_all_channel = tf.random_normal([shp[0], shp[1], shp[2], 1],
mean=loc, stddev=std)
mask = tf.less(tf.random_uniform([shp[0]]), per_channel)
return tf.where(mask,
x=img + noise_per_channel,
y=img + noise_all_channel)
def random_multiply(img, lower, upper, per_channel):
shp = tf.shape(img)
factor_per_channel = tf.random_uniform([1,1,1,shp[3]], lower, upper)
factor_all_channel = tf.random_uniform([], lower, upper)
mask = tf.less(tf.random_uniform([shp[0]]), per_channel)
return tf.where(mask,
x=img * factor_per_channel,
y=img * factor_all_channel)
def augment_image(img):
img = sometimes(img, 0.2,
lambda image: tf.image.random_hue(image, max_delta=0.05))
img = sometimes(img, 0.2,
lambda image: tf.image.random_saturation(image, lower=0.5, upper=2))
img = sometimes(img, 0.2,
lambda image: tf.image.random_contrast(image, lower=0.3, upper=1.7))
img = sometimes(img, 0.5,
lambda image: tf.image.random_brightness(image, max_delta=40))
img = sometimes(img, 0.5,
lambda image: random_multiply(image, lower=0.3, upper=2.0, per_channel=0.2))
img = sometimes(img, 0.3,
lambda image: additive_gaussian_noise(image, loc=0,
scale=(0, 0.05*255), per_channel=0.5))
return img
def multi_channel(inp_img):
tmp = []
for i in range(10):
tmp.append(augment_image(inp_img[:,:,:,i*3:i*3+3]))
return tf.concat([tmp[0],
tmp[1],
tmp[2],
tmp[3],
tmp[4],
tmp[5],
tmp[6],
tmp[7],
tmp[8],
tmp[9]],
3)
def make_network():
inp_img = tf.placeholder(tf.float32, shape=[None, 88, 200, 30], name='input_image')
inp_speed = tf.placeholder(tf.float32, shape=[None, 1], name='input_speed')
target_control = tf.placeholder(tf.float32, shape=[None, 3], name='target_control')
#target_command = tf.placeholder(tf.float32, shape=[None, 4], name='target_command')
train_state = tf.placeholder(tf.bool, shape=[], name='train_state')
oup_img = tf.cond(train_state,
true_fn=lambda: multi_channel(inp_img),
false_fn=lambda: inp_img)
network_manager = Network(train_state)
xc = network_manager.conv_block(oup_img, 5, 2, 32, padding_in='VALID')
print (xc)
xc = network_manager.conv_block(xc, 3, 1, 32, padding_in='VALID')
print (xc)
xc = network_manager.conv_block(xc, 3, 2, 64, padding_in='VALID')
print (xc)
xc = network_manager.conv_block(xc, 3, 1, 64, padding_in='VALID')
print (xc)
xc = network_manager.conv_block(xc, 3, 2, 128, padding_in='VALID')
print (xc)
xc = network_manager.conv_block(xc, 3, 1, 128, padding_in='VALID')
print (xc)
xc = network_manager.conv_block(xc, 3, 1, 256, padding_in='VALID')
print (xc)
xc = network_manager.conv_block(xc, 3, 1, 256, padding_in='VALID')
print (xc)
x = tf.reshape(xc, [-1, int(np.prod(xc.get_shape()[1:]))], name='reshape')
print (x)
x = network_manager.fc_block(x, 512, dropout_prob=0.7)
print (x)
x = network_manager.fc_block(x, 512, dropout_prob=0.7)
with tf.name_scope("Speed"):
speed = network_manager.fc_block(inp_speed, 128, dropout_prob=0.5)
speed = network_manager.fc_block(speed, 128, dropout_prob=0.5)
j = tf.concat([x, speed], 1)
j = network_manager.fc_block(j, 512, dropout_prob=0.5)
control_out = network_manager.fc_block(j, 256, dropout_prob=0.5)
control_out = network_manager.fc_block(control_out, 256)
control_out = network_manager.fc(control_out, 3)
loss = tf.reduce_mean(tf.square(tf.subtract(control_out, target_control)))
tf.summary.scalar('loss', loss)
'''
branch_config = [["Steer", "Gas", "Brake"], ["Steer", "Gas", "Brake"], \
["Steer", "Gas", "Brake"], ["Steer", "Gas", "Brake"]]
branches = []
losses = []
for i in range(0, len(branch_config)):
with tf.name_scope("Branch_" + str(i)):
branch_output = network_manager.fc_block(j, 256, dropout_prob=0.5)
branch_output = network_manager.fc_block(branch_output, 256)
branches.append(network_manager.fc(branch_output, len(branch_config[i])))
losses.append(tf.square(tf.subtract(branches[i], target_control)))
print (branch_output)
losses = tf.convert_to_tensor(losses)
losses = tf.reduce_mean(tf.transpose(losses, [1, 2, 0]), axis=1) * target_command;
loss = tf.reduce_sum(losses)
'''
return {'loss': loss,
'train_state': train_state,
'inputs': [inp_img, inp_speed],
'labels': [target_control],
'outputs': [control_out, oup_img]}
| 2.84375 | 3 |
tools/frontend/alpha/utils/logging.py | Ren-Research/maestro | 0 | 12763752 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import atexit
import builtins
import decimal
import functools
import logging
import os
import sys
from .comm import is_master_process as is_master_proc
def _suppress_print():
"""
Suppresses printing from the current process.
"""
def print_pass(*objects, sep=" ", end="\n", file=sys.stdout, flush=False):
pass
builtins.print = print_pass
def setup_logging(save_path, mode='a'):
"""
Sets up the logging for multiple processes. Only enable the logging for the
master process, and suppress logging for the non-master processes.
"""
if is_master_proc():
# Enable logging for the master process.
logging.root.handlers = []
else:
# Suppress logging for non-master processes.
_suppress_print()
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logger.propagate = False
print_plain_formatter = logging.Formatter(
"[%(asctime)s]: %(message)s",
datefmt="%m/%d %H:%M:%S",
)
fh_plain_formatter = logging.Formatter("%(message)s")
if is_master_proc():
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging.DEBUG)
ch.setFormatter(print_plain_formatter)
logger.addHandler(ch)
if save_path is not None and is_master_proc():
fh = logging.FileHandler(save_path, mode=mode)
fh.setLevel(logging.DEBUG)
fh.setFormatter(fh_plain_formatter)
logger.addHandler(fh)
def get_logger(name):
"""
Retrieve the logger with the specified name or, if name is None, return a
logger which is the root logger of the hierarchy.
Args:
name (string): name of the logger.
"""
return logging.getLogger(name)
| 2.34375 | 2 |
auditwheel/policy/__init__.py | andreysmelter/auditwheel | 0 | 12763753 | <reponame>andreysmelter/auditwheel
import sys
import json
import platform as _platform_module
from typing import Optional
from os.path import join, dirname, abspath
import logging
logger = logging.getLogger(__name__)
_sys_map = {'linux2': 'linux',
'linux': 'linux',
'darwin': 'osx',
'win32': 'win',
'openbsd5': 'openbsd'}
non_x86_linux_machines = {'armv6l', 'armv7l', 'ppc64le'}
platform = _sys_map.get(sys.platform, 'unknown')
linkage = _platform_module.architecture()[1]
# https://docs.python.org/3/library/platform.html#platform.architecture
bits = 8 * (8 if sys.maxsize > 2 ** 32 else 4)
_PLATFORM_REPLACEMENT_MAP = {
'manylinux1_x86_64': ['linux_x86_64'],
'manylinux2010_x86_64': ['linux_x86_64'],
'manylinux1_i686': ['linux_i686'],
'manylinux2010_i686': ['linux_i686'],
}
# XXX: this could be weakened. The show command _could_ run on OS X or
# Windows probably, but there's not much reason to inspect foreign package
# that won't run on the platform.
if platform != 'linux':
logger.critical('Error: This tool only supports Linux')
sys.exit(1)
def get_arch_name():
if _platform_module.machine() in non_x86_linux_machines:
return _platform_module.machine()
else:
return {64: 'x86_64', 32: 'i686'}[bits]
_ARCH_NAME = get_arch_name()
with open(join(dirname(abspath(__file__)), 'policy.json')) as f:
_POLICIES = json.load(f)
for p in _POLICIES:
p['name'] = p['name'] + '_' + _ARCH_NAME
POLICY_PRIORITY_HIGHEST = max(p['priority'] for p in _POLICIES)
POLICY_PRIORITY_LOWEST = min(p['priority'] for p in _POLICIES)
def load_policies():
return _POLICIES
def _load_policy_schema():
with open(join(dirname(abspath(__file__)), 'policy-schema.json')) as f:
schema = json.load(f)
return schema
def get_policy_name(priority: int) -> Optional[str]:
matches = [p['name'] for p in _POLICIES if p['priority'] == priority]
if len(matches) == 0:
return None
if len(matches) > 1:
raise RuntimeError('Internal error. priorities should be unique')
return matches[0]
def get_priority_by_name(name: str):
matches = [p['priority'] for p in _POLICIES if p['name'] == name]
if len(matches) == 0:
return None
if len(matches) > 1:
raise RuntimeError('Internal error. Policies should be unique.')
return matches[0]
def get_replace_platforms(name: str):
"""Extract platform tag replacement rules from policy
>>> get_replace_platforms('linux_x86_64')
[]
>>> get_replace_platforms('linux_i686')
[]
>>> get_replace_platforms('manylinux1_x86_64')
['linux_x86_64']
>>> get_replace_platforms('manylinux1_i686')
['linux_i686']
"""
return _PLATFORM_REPLACEMENT_MAP.get(name, [])
from .external_references import lddtree_external_references
from .versioned_symbols import versioned_symbols_policy
__all__ = ['lddtree_external_references', 'versioned_symbols_policy',
'load_policies', 'POLICY_PRIORITY_HIGHEST',
'POLICY_PRIORITY_LOWEST']
| 2.375 | 2 |
src/spinnaker_ros_lsm/venv/lib/python2.7/site-packages/pacman/model/abstract_classes/virtual_partitioned_vertex.py | Roboy/LSM_SpiNNaker_MyoArm | 2 | 12763754 | <filename>src/spinnaker_ros_lsm/venv/lib/python2.7/site-packages/pacman/model/abstract_classes/virtual_partitioned_vertex.py
from pacman.model.partitioned_graph.partitioned_vertex import PartitionedVertex
from pacman.model.constraints.placer_constraints\
.placer_chip_and_core_constraint import PlacerChipAndCoreConstraint
class VirtualPartitionedVertex(PartitionedVertex):
def __init__(
self, resources_required, label, spinnaker_link_id,
constraints=None):
PartitionedVertex.__init__(
self, resources_required, label, constraints=constraints)
self._spinnaker_link_id = spinnaker_link_id
self._virtual_chip_x = None
self._virtual_chip_y = None
self._real_chip_x = None
self._real_chip_y = None
self._real_link = None
@property
def virtual_chip_x(self):
return self._virtual_chip_x
@property
def virtual_chip_y(self):
return self._virtual_chip_y
@property
def real_chip_x(self):
return self._real_chip_x
@property
def real_chip_y(self):
return self._real_chip_y
@property
def real_link(self):
return self._real_link
def set_virtual_chip_coordinates(
self, virtual_chip_x, virtual_chip_y, real_chip_x, real_chip_y,
real_link):
self._virtual_chip_x = virtual_chip_x
self._virtual_chip_y = virtual_chip_y
self._real_chip_x = real_chip_x
self._real_chip_y = real_chip_y
self._real_link = real_link
placement_constaint = PlacerChipAndCoreConstraint(
self._virtual_chip_x, self._virtual_chip_y)
self.add_constraint(placement_constaint)
@property
def spinnaker_link_id(self):
""" The id of the spinnaker link being used
"""
return self._spinnaker_link_id
| 2.421875 | 2 |
Functions/saral_function.py | Ena-Sharma/Meraki_Solution | 0 | 12763755 | <gh_stars>0
def ask_question():
print("Who is the founder of Facebook?")
option=["<NAME>","<NAME>","<NAME>","<NAME>"]
for i in option:
print(i)
ask_question()
i=0
while i<100:
ask_question()
i+=1
def say_hello(name):
print ("Hello ", name)
print ("Aap kaise ho?")
say_hello("jai")
def add_number(num1,num2):
print("hum2 numbers ko add krenge")
print(num1+num2)
add_number(112,3)
varx=10
vary=20
add_number(varx,vary)
def say_lang(name,language):
if language=="punjabi":
print("sat sri akaal",name)
elif language=="hindi":
print("Namestye",name)
elif language=="English":
print("good morning",name)
say_lang("rishabh","hindi")
say_lang("jai","English")
def print_lines(name,position):
print("mera naam "+str(name)+" hai")
print("mein "+str(position)+" ka co-founder hu")
print_lines("jai","Navgurkul")
def add_numbers(number1,number2):
add=number1+number2
print(number1,"aur",number2," ka sum:-",add)
add_numbers(56,12)
num1=[15,20,30]
num2=[20,30,40]
def add_num_list(num1,num2):
i=0
b = []
while i<len(num1):
add = num1[i]+num2[i]
i+=1
b.append(add)
return (b)
print(add_num_list(num1,num2))
num1=[15,20,30,2, 6, 18, 10, 3, 75]
num2=[20,30,40,6, 19, 24, 12, 3, 87]
def add_num_list(num1,num2):
i=0
while i<len(num1):
if num1[i]%2==0 and num2[i]%2==0:
print("even hai")
else:
print("odd hai")
i+=1
(add_num_list(num1,num2))
def add_numbers_print(number_x, number_y):
number_sum = number_x + number_y
return number_sum
sum4 = add_numbers_print(4, 5)
print (sum4)
print (type(sum4))
def calculator(numx,numy,operator):
if operator=="add":
add=numx+numy
return add
elif operator=="subtract":
subtract=numx-numy
return subtract
elif operator=="multiply":
multiply=numx*numy
return multiply
elif operator=="divide":
divide=numx/numy
return divide
num1=int(input("Enter the 1st number :- "))
num2=int(input("Enter the 2nd number :- "))
num3=input("which action you want to perform (add/subtract/multiply/divide)")
print(calculator(num1,num2,num3))
a=[3,4,5,6]
b=[2,4,5,6]
def list_change(a,b):
i=0
multiply=0
c=[]
while i<len(a):
multiply=a[i]*b[i]
c.append(multiply)
i+=1
return c
print(list_change(a,b))
| 3.6875 | 4 |
migrations/versions/360581181dc_.py | LandRegistry/system-of-record | 0 | 12763756 | <gh_stars>0
"""empty message
Revision ID: 360581181dc
Revises: <KEY>
Create Date: 2015-03-21 11:22:30.887740
"""
# revision identifiers, used by Alembic.
revision = '360581181dc'
down_revision = '18aa913e6bb'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.execute("DROP INDEX sor_idx")
op.execute("ALTER TABLE sor RENAME TO records")
op.execute("ALTER TABLE records RENAME sor TO record")
op.execute("CREATE UNIQUE INDEX title_abr_idx ON records((record->'data'->>'title_number'),(record->'data'->>'application_reference'))")
def downgrade():
op.execute("DROP INDEX title_abr_idx")
op.execute("ALTER TABLE records RENAME record TO sor")
op.execute("ALTER TABLE records RENAME TO sor")
op.execute("CREATE UNIQUE INDEX sor_idx ON sor((sor->>'sig'))")
| 1.492188 | 1 |
jacket/db/storage/sqlalchemy/migrate_repo/versions/066_add_allocated_id_column_to_reservations.py | bopopescu/jacket | 0 | 12763757 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column
from sqlalchemy import MetaData, Integer, Table, ForeignKey
def upgrade(migrate_engine):
"""Add allocated_id to the reservations table."""
meta = MetaData()
meta.bind = migrate_engine
reservations = Table('storage_reservations', meta, autoload=True)
Table('storage_quotas', meta, autoload=True)
allocated_id = Column('allocated_id', Integer, ForeignKey('storage_quotas.id'),
nullable=True)
reservations.create_column(allocated_id)
usage_id = reservations.c.usage_id
usage_id.alter(nullable=True)
| 1.9375 | 2 |
source/funwithflags/entities/auth.py | zywangzy/fun_with_flags | 0 | 12763758 | """Module for authentication utilities."""
import bcrypt
def hash_password_with_salt(password: str, salt: bytes) -> bytes:
"""Hash a password string with a salt bytes."""
return bcrypt.hashpw(bytes(password.encode()), salt)
def generate_salt_hash_password(password: str) -> (bytes, bytes):
"""Hash a password string with generated salt, return a 'bytes' hashed password and the 'bytes' salt."""
salt = bcrypt.gensalt()
return hash_password_with_salt(password, salt), salt
| 3.671875 | 4 |
src/python/spaCy2JSON.py | aarushiibisht/JSON-NLP | 24 | 12763759 | <reponame>aarushiibisht/JSON-NLP
#!/usr/bin/env python3
from collections import Counter
import spacy
import json
import datetime
# nlp = spacy.load('en_core_web_lg')
nlp = spacy.load('en')
def process(text):
doc = nlp(text)
j = {
"conformsTo": 0.1,
'source': 'SpaCy {}'.format(spacy.__version__),
"created": datetime.datetime.now().replace(microsecond=0).isoformat(),
"date": datetime.datetime.now().replace(microsecond=0).isoformat(),
"dependenciesBasic": [],
"expressions": [],
"text": text,
"sentences": []
}
token_list = []
lang = Counter()
sent_lookup = {}
token_lookup = {}
# tokens and sentences
token_offset = 0
for sent_num, sent in enumerate(doc.sents):
current_sent = {
'id': sent_num,
'text': sent.text,
'tokenFrom': token_offset,
'tokenTo': token_offset + len(sent),
'tokens': []
}
sent_lookup[sent.text] = sent_num
j['sentences'].append(current_sent)
for token in sent:
t = {
'id': token_offset,
'text': token.text,
'lemma': token.lemma_,
'pos': token.pos_,
'entity': token.ent_type_,
'entity_iob': token.ent_iob_,
'overt': True,
'characterOffsetBegin': token.idx,
'characterOffsetEnd': token.idx + len(token)
}
lang[token.lang_] += 1
token_lookup[(sent_num, token.i)] = token_offset
current_sent['tokens'].append(token_offset)
token_offset += 1
token_list.append(t)
current_sent['tokenFrom'] = current_sent['tokens'][0]
current_sent['tokenTo'] = current_sent['tokens'][-1]
j['tokenList'] = token_list
# noun phrases
entity_id = 0
for chunk in doc.noun_chunks:
sent_id = sent_lookup[chunk.sent.text]
e = {
'id': entity_id,
'type': 'NP',
'head': token_lookup[(sent_id, chunk.root.i)],
'dep': chunk.root.dep_,
'tokens': []
}
for token in chunk:
e['tokens'].append(token_lookup[(sent_id, token.i)])
for token in chunk.rights:
e['tokens'].append(token_lookup[(sent_id, token.i)])
j['expressions'].append(e)
entity_id += 1
# dependencies
for sent_num, sent in enumerate(doc.sents):
for token in sent:
j['dependenciesBasic'].append({
'type': token.dep_,
'governor': token_lookup[(sent_num, token.head.i)],
'dependent': token_lookup[(sent_num, token.i)],
})
j['lang'] = max(lang)
return j
if __name__ == "__main__":
test = "Autonomous cars from the countryside of France shift insurance liability toward manufacturers. People are afraid that they will crash."
with open('spaCy.json', 'w') as fp:
json.dump(process(test), fp, sort_keys=True, indent=4)
| 2.515625 | 3 |
utils/logger.py | YuLvS/L2PIPS | 3 | 12763760 | import os
import sys
from .util import get_timestamp
# print to file and std_out simultaneously
class PrintLogger(object):
def __init__(self, log_path):
self.terminal = sys.stdout
self.log = open(os.path.join(log_path, 'output.txt'), 'a')
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
pass
class Logger(object):
def __init__(self, opt):
self.exp_name = opt['name']
self.use_tensorboard_logger = opt['use_tensorboard_logger']
self.opt = opt['logger']
self.log_dir = opt['path']['log']
# loss log file
self.loss_log_path = os.path.join(self.log_dir, 'loss_log.txt')
with open(self.loss_log_path, 'a') as log_file:
log_file.write('=============== Time: ' + get_timestamp() + ' =============\n')
log_file.write('================ Training Losses ================\n')
# val results log file
self.val_log_path = os.path.join(self.log_dir, 'val_log.txt')
with open(self.val_log_path, 'a') as log_file:
log_file.write('================ Time: ' + get_timestamp() + ' ===============\n')
log_file.write('================ Validation Results ================\n')
if self.use_tensorboard_logger and 'debug' not in self.exp_name:
from tensorboard_logger import Logger as TensorboardLogger
self.tb_logger_path = os.path.join(self.log_dir, "tb_logger")
self.tb_logger = TensorboardLogger(self.tb_logger_path)
def print_format_results(self, mode, rlt):
epoch = rlt.pop('epoch')
iters = rlt.pop('iters')
time = rlt.pop('time')
model = rlt.pop('model')
if 'train_acc' in rlt:
acc = rlt.pop('train_acc')
if 'lr' in rlt:
lr = rlt.pop('lr')
message = '<epoch:{:3d}, iter:{:7,d}, time:{:5,d}s, lr:{:.1e}> Train_ACC:{:.2f} | '.format(
epoch, iters, int(time), lr, acc)
else:
message = '<epoch:{:3d}, iter:{:7,d}, time:{:5,d}s> Train_ACC:{:.2f} | '.format(
epoch, iters, int(time), acc)
else:
if 'lr' in rlt:
lr = rlt.pop('lr')
message = '<epoch:{:3d}, iter:{:7,d}, time:{:5,d}s, lr:{:.1e}> '.format(
epoch, iters, int(time), lr)
else:
message = '<epoch:{:3d}, iter:{:7,d}, time:{:5,d}s> '.format(
epoch, iters, int(time))
for label, value in rlt.items():
if mode == 'train':
message += '{:s}: {:.2e} | '.format(label, value)
elif mode == 'val':
message += '{:s}: {:.4f} | '.format(label, value)
# tensorboard logger
if self.use_tensorboard_logger and 'debug' not in self.exp_name:
self.tb_logger.log_value(label, value, iters)
# print in console
print(message)
# write in log file
if mode == 'train':
with open(self.loss_log_path, 'a') as log_file:
log_file.write(message + '\n')
elif mode == 'val':
with open(self.val_log_path, 'a') as log_file:
log_file.write(message + '\n')
| 2.5625 | 3 |
profileinstaller/integration-tests/testapp/cli/repackage.py | chao2zhang/androidx | 2 | 12763761 | <filename>profileinstaller/integration-tests/testapp/cli/repackage.py<gh_stars>1-10
#!/usr/bin/env python3
import argparse
import os
import shutil
import subprocess
import sys
import tempfile
from pathlib import Path
from zipfile import ZipFile
# CHANGEME:
# PATH_TO_APKSIGNER = '/Users/lelandr/Library/Android/sdk/build-tools/30.0.3/apksigner'
PATH_TO_APKSIGNER = 'apksigner'
SCRIPT_PATH = Path(__file__).parent.absolute()
SUPPORT_PATH = (SCRIPT_PATH / Path("../../../..")).resolve()
ROOT_DIR = (SUPPORT_PATH / Path("../..")).resolve()
BUILD_OUT_DIR = (Path(SUPPORT_PATH) / Path(
"../../out/androidx/profileinstaller/integration-tests/"
"testapp/build/outputs/apk/")).resolve()
MAPPING_OUT_PATH = (Path(SUPPORT_PATH) / Path(
"../../out/androidx/profileinstaller/integration-tests/"
"testapp/build/outputs/mapping/release/mapping.txt")).resolve()
APK_PREFIX = "testapp"
APK_PROFILE_FILE = "baseline.prof"
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--profile', '-p', required=False, default=str(Path(SCRIPT_PATH) / Path(
"all_compose_profile.txt")))
parser.add_argument('--apk-path', '-f', required=False, help='apk path to for processing a '
'single apk')
parser.add_argument('--jar', '-j', required=False, default=str(Path(SCRIPT_PATH) /
Path("profgen-cli.jar")))
parser.add_argument('--output', '-o', required=False, default="out.apk")
parser.add_argument('--debug', type=bool, required=False, default=False)
parser.add_argument('--apk-signer', required=False, default=PATH_TO_APKSIGNER)
args = parser.parse_args()
return args
def dir_for_buildtype(debug, path):
if (path is not None):
return Path(path).absolute()
type = 'debug' if debug else 'release'
newpath = BUILD_OUT_DIR / Path(type) / Path(APK_PREFIX + "-" + type + ".apk")
return newpath.resolve()
def profile_from(pathStr):
return Path(pathStr)
def jar_from(jarPathStr):
return Path(jarPathStr)
def output_apk_from(outPathStr):
return Path(outPathStr)
def check_env(apk_src, profile, jar, out_apk, apk_signer):
if not apk_src.exists():
print("ERROR: APK source does not exist, build it using gradle.")
print(apk_src)
sys.exit(-1)
if not profile.exists():
print("ERROR: Profile path does not exist")
print(profile)
sys.exit(-1)
if not jar.exists():
print("ERROR: Jar file does not exist")
print(jar)
sys.exit(-1)
if shutil.which(apk_signer) is None:
print("ERROR: missing command line tool `apksigner`")
print("please install it on your system or modify the constant PATH_TO_APKSIGNER")
sys.exit(-1)
if out_apk.exists():
print("WARNING: Output apk already exists, overwriting")
print(f"Apk source: //{apk_src.relative_to(ROOT_DIR)}")
print(f"Profile: //{profile.relative_to(ROOT_DIR)}")
print(f"Profgen: {jar.absolute()}")
print(f"Output: {output_apk.absolute()}")
def run_profgen(tmpDirName, apk_src, profile, jar, output_file, debug):
print(f"Running profgen:")
print(f"Profile: {profile.absolute()}")
print(f"Apk: {apk_src.absolute()}")
print(f"Output: {output_file.absolute()}")
jar_command = [
'java',
'-jar',
str(jar.absolute()),
'generate',
str(profile.absolute()),
'--apk',
str(apk_src.absolute()),
'--output',
str(output_file.absolute()),
'--verbose'
] + ([] if debug else [
'--map',
str(MAPPING_OUT_PATH.absolute())
])
subprocess.run(jar_command, stdout=sys.stdout)
if not output_file.exists():
print(f"Failed to generate output file from profgen")
print(" ".join(jar_command))
sys.exit(-1)
output_size = os.stat(output_file.absolute()).st_size
print(f"Successfully created profile. Size: {output_size}")
def repackage_jar(apk_src, profile, apk_dest, tmp_dir, apksigner):
working_dir = tmp_dir / Path("working/")
working_dir.mkdir()
working_apk = working_dir / Path("working.apk")
shutil.copyfile(apk_src, working_apk)
with ZipFile(working_apk, 'a') as zip:
profile_destination = Path('assets/dexopt/') / Path(APK_PROFILE_FILE)
if str(profile_destination) in [it.filename for it in zip.infolist()]:
print("ERROR: profile already in apk, aborting")
print(profile_destination)
sys.exit(-1)
zip.write(profile, profile_destination)
keystore = Path.home() / Path(".android/debug.keystore")
apksigner_command = [
apksigner,
'sign',
'-ks',
str(keystore.absolute()),
'--ks-pass',
'pass:<PASSWORD>',
str(working_apk.absolute())
]
subprocess.check_output(apksigner_command)
shutil.copyfile(working_apk, apk_dest)
def generate_apk(apk_src, profile, jar, out_apk, debug, apk_signer):
check_env(apk_src, profile, jar, out_apk, apk_signer)
with tempfile.TemporaryDirectory() as tmpDirName:
output_profile = Path(tmpDirName) / Path("out.prof")
print(f"Output profile: {output_profile.absolute()}")
run_profgen(tmpDirName, apk_src, profile, jar, output_profile, debug)
repackage_jar(apk_src, output_profile, out_apk, Path(tmpDirName), apk_signer)
if __name__ == "__main__":
args = parse_args()
apk_src = dir_for_buildtype(args.debug, args.apk_path)
profile = profile_from(args.profile)
jar = jar_from(args.jar)
output_apk = output_apk_from(args.output)
generate_apk(apk_src, profile, jar, output_apk, args.debug, args.apk_signer)
| 2.28125 | 2 |
groupdocsclassificationcloud/__init__.py | groupdocs-classification-cloud/groupdocs-classification-cloud-python | 3 | 12763762 | <reponame>groupdocs-classification-cloud/groupdocs-classification-cloud-python
# coding: utf-8
# flake8: noqa
from __future__ import absolute_import
# import apis into sdk package
from groupdocsclassificationcloud.apis.classification_api import ClassificationApi
# import ApiClient
from groupdocsclassificationcloud.api_client import ApiClient
from groupdocsclassificationcloud.configuration import Configuration
# import models into sdk package
from groupdocsclassificationcloud.models.base_request import BaseRequest
from groupdocsclassificationcloud.models.base_response import BaseResponse
from groupdocsclassificationcloud.models.classification_result import ClassificationResult
from groupdocsclassificationcloud.models.file_info import FileInfo
from groupdocsclassificationcloud.models.format import Format
from groupdocsclassificationcloud.models.format_collection import FormatCollection
from groupdocsclassificationcloud.models.classification_response import ClassificationResponse
from groupdocsclassificationcloud.models.classify_request import ClassifyRequest
from groupdocsclassificationcloud.models.get_supported_file_formats_request import GetSupportedFileFormatsRequest | 1.25 | 1 |
containers/AVLTree.py | Tonnpo/csci-046 | 0 | 12763763 | <reponame>Tonnpo/csci-046
from containers.BinaryTree import BinaryTree, Node
from containers.BST import BST
from copy import copy
class AVLTree(BST):
'''
FIXME:
AVLTree is currently not a subclass of BST.
You should make the necessary changes in the class declaration line above
and in the constructor below.
'''
def __init__(self, xs=None):
'''
FIXME:
Implement this function.
'''
super().__init__()
def balance_factor(self):
'''
Returns the balance factor of a tree.
'''
return AVLTree._balance_factor(self.root)
@staticmethod
def _balance_factor(node):
'''
Returns the balance factor of a node.
'''
if node is None:
return 0
return BinaryTree._height(node.left) - BinaryTree._height(node.right)
def is_avl_satisfied(self):
return AVLTree._is_avl_satisfied(self.root)
@staticmethod
def _is_avl_satisfied(node):
'''
FIXME:
Implement this function.
'''
ret = True
if node:
if abs(AVLTree._balance_factor(node)) <= 1:
ret &= AVLTree._is_avl_satisfied(node.left)
ret &= AVLTree._is_avl_satisfied(node.right)
else:
ret = False
return ret
@staticmethod
def _left_rotate(node):
if node.right:
new_node = copy(node.right)
left_child = new_node.left
new_node.left = copy(node)
new_node.left.right = left_child
return new_node
else:
return node
@staticmethod
def _right_rotate(node):
if node.left:
new_node = copy(node.left)
right_child = new_node.right
new_node.right = copy(node)
new_node.right.left = right_child
return new_node
else:
return node
def insert(self, value):
if self.root:
BST._insert(value, self.root)
if not self.is_avl_satisfied():
AVLTree._fix_balance(value, self.root)
else:
self.root = Node(value)
@staticmethod
def _fix_balance(value, node):
if node:
balanced = AVLTree._rebalance(node)
node.value = balanced.value
node.left = balanced.left
node.right = balanced.right
AVLTree._fix_balance(value, node.right)
AVLTree._fix_balance(value, node.left)
@staticmethod
def _insert(value, node):
if value > node.value:
balanced_node = AVLTree._rebalance(node)
node.value = balanced_node.value
node.left = balanced_node.left
node.right = balanced_node.right
if node.right:
AVLTree._insert(value, node.right)
else:
node.right = Node(value)
elif value < node.value:
balanced_node = AVLTree._rebalance(node)
node.value = balanced_node.value
node.left = balanced_node.left
node.right = balanced_node.right
if node.left:
AVLTree._insert(value, node.left)
else:
node.left = Node(value)
@staticmethod
def _rebalance(node):
'''
There are no test cases for the rebalance function,
so you do not technically have to implement it.
But both the insert function needs the rebalancing code,
so I recommend including that code here.
'''
new_balance = copy(node)
if AVLTree._balance_factor(node) < -1:
if AVLTree._balance_factor(node.right) > 0:
new_balance.right = AVLTree._right_rotate(node.right)
new_balance = AVLTree._left_rotate(new_balance)
else:
new_balance = AVLTree._left_rotate(node)
elif AVLTree._balance_factor(node) > 1:
if AVLTree._balance_factor(node.left) < 0:
new_balance.left = AVLTree._left_rotate(node.left)
new_balance = AVLTree._right_rotate(new_balance)
else:
new_balance = AVLTree._right_rotate(node)
return new_balance
| 2.984375 | 3 |
catalog_update_product.py | cognitivefashion/cf-sdk-python | 9 | 12763764 | #------------------------------------------------------------------------------
# Update product in a catalog.
# PUT /v1/catalog/{catalog_name}/products/{id}
#------------------------------------------------------------------------------
import os
import json
import requests
from urlparse import urljoin
from pprint import pprint
from props import *
# Replace this with the custom url generated for you.
api_gateway_url = props['api_gateway_url']
# Pass the api key into the header
# Replace 'your_api_key' with your API key.
headers = {'X-Api-Key': props['X-Api-Key']}
# Catalog name.
catalog_name = props['catalog_name']
# Product id.
id = 'LPJNA16AMDMTE91662'
# API end point,
api_endpoint = '/v1/catalog/%s/products/%s'%(catalog_name,id)
url = urljoin(api_gateway_url,api_endpoint)
data = {}
data['out_of_stock'] = 'yes'
params = {}
# Optional parameters.
params['download_images'] = 'true'
response = requests.put(url,
headers=headers,
json=data,
params=params)
print response.status_code
pprint(response.json())
| 2.453125 | 2 |
2020/Day_06/part1.py | Adilius/adventofcode | 2 | 12763765 | input_file = open("input.txt","r")
lines = input_file.readlines()
count = 0
answers = set()
for line in lines:
if line == "\n":
count += len(answers)
print(answers)
answers.clear()
for character in line:
#print(character)
if character != "\n":
answers.add(character)
count += len(answers)
print(answers)
answers.clear()
print(count) | 3.625 | 4 |
unet/unet.py | mvWellman/OCTseg | 3 | 12763766 | # Copyright (C) 2019 Harvard University. All Rights Reserved. Unauthorized
# copying of this file, via any medium is strictly prohibited Proprietary and
# confidential
# Developed by <NAME> <<EMAIL>>,
# <<EMAIL>>.
# ==============================================================================
"""Build U-Net model"""
import keras.layers as KL
from keras.models import Model
from unet.ops import conv_layer, up_conv, MaxPoolingND
def unet_model(im_shape, nFeature=32, outCh=2, nLayer=3, pool_scale=2):
""" Build U-Net model.
Args:
x: input placeholder
outCh: number of output channels
Returns:
keras model
"""
if im_shape[0] == 1:
im_shape = im_shape[1:]
x = KL.Input(shape=im_shape)
out = [x]
for iLayer in range(nLayer):
out.append(conv_layer(out[-1], (2 ** (iLayer // 2)) * nFeature))
out.append(MaxPoolingND(out[-1], s=pool_scale))
out.append(conv_layer(out[-1], (2 ** (nLayer // 2)) * nFeature))
out.append(conv_layer(out[-1], (2 ** (nLayer // 2)) * nFeature))
for iLayer in range(nLayer - 1, -1, -1):
u = up_conv(out[-1], s=pool_scale)
if iLayer == 0:
u = KL.Cropping2D(cropping=((0, (im_shape[-3] + pool_scale - 1) // pool_scale * pool_scale - im_shape[-3]),
(0, (im_shape[-2] + pool_scale - 1) // pool_scale * pool_scale - im_shape[-2])),
data_format=None)(u)
c = KL.Concatenate()([out[1 + 2 * iLayer], u])
out.append(conv_layer(c, (2 ** (iLayer // 2)) * nFeature))
if iLayer != 0 and pool_scale > 2:
out.append(KL.Dropout(0.5)(out[-1]))
out.append(conv_layer(out[-1], outCh))
return Model(inputs=x, outputs=out[-1])
| 2.375 | 2 |
gbe/email/forms/adhoc_email_form.py | bethlakshmi/gbe-divio-djangocms-python2.7 | 1 | 12763767 | from django.forms import (
CharField,
EmailField,
Form,
MultipleChoiceField,
Textarea,
TextInput,
)
from django.forms.widgets import CheckboxSelectMultiple
from django.utils.html import strip_tags
class AdHocEmailForm(Form):
use_required_attribute = False
required_css_class = 'required'
error_css_class = 'error'
to = MultipleChoiceField(
required=True,
widget=CheckboxSelectMultiple(attrs={'class': 'form-check-input'}))
sender = EmailField(required=True,
label="From")
subject = CharField(widget=TextInput(attrs={'size': '79'}))
html_message = CharField(
widget=Textarea(attrs={'id': 'admin-tiny-mce'}),
label="Message")
| 2.171875 | 2 |
src/vnsw/agent/gdb/agent_ksync.py | jnpr-pranav/contrail-controller | 37 | 12763768 | <reponame>jnpr-pranav/contrail-controller
from __future__ import print_function
#
# Copyright (c) 2016 Juniper Networks. All rights reserved.
#
# agent_ksync.py
#
# gdb macros to dump the vrouter agent KSync object/entries
from builtins import next
from builtins import str
import gdb
from libstdcxx.v6.printers import *
from boost.printers import *
class my_value(gdb.Value):
def __init__(self, b):
gdb.Value(b)
self.type_name = str(b.type)
def print_ksync_entry(entry_ptr, entry):
print(str(entry_ptr) + " state=0x%08X" % (entry['state_']))
def print_ksync_entries(ksync_obj, print_fn = print_ksync_entry):
tree_ref = my_value(gdb.parse_and_eval('(((KSyncDBObject *)' + str(ksync_obj) + ')->tree_)'))
tree = BoostIntrusiveSet(tree_ref)
it = tree.children()
try:
while (it.node):
entry_ptr = it.get_element_pointer_from_node_pointer()
entry = next(it)[1]
print_fn(entry_ptr, entry)
except StopIteration:
pass
def print_nh_ksync_entry(entry_ptr, entry):
knh = entry.cast(gdb.lookup_type('NHKSyncEntry'))
print(str(entry_ptr) + " idx=%-5d type=%-4d state=0x%08X" % (knh['index_'], knh['type_'], knh['state_']))
def dump_nh_ksync_entries():
nh_table = gdb.parse_and_eval('Agent::singleton_->ksync_->nh_ksync_obj_.px')
print_ksync_entries(nh_table, print_nh_ksync_entry)
def print_mpls_ksync_entry(entry_ptr, entry):
kmpls = entry.cast(gdb.lookup_type(str(entry.dynamic_type)))
print (str(entry_ptr) + " label=%-5s nh=%-5s " % (kmpls['label_'], kmpls['nh_']))
def dump_ksync_mpls_entries():
kmpls_table = gdb.parse_and_eval('Agent::singleton_->ksync_->mpls_ksync_obj_.px')
print_ksync_entries(kmpls_table, print_mpls_ksync_entry)
def print_kintf_entry(entry_ptr, entry):
kintf = entry.cast(gdb.lookup_type('InterfaceKSyncEntry'))
print(str(entry_ptr) + " idx=%-5d name=%-20s " % (kintf['index_'],\
kintf['interface_name_']['_M_dataplus']['_M_p']))
def dump_ksync_intf_entries():
kintf_table = gdb.parse_and_eval('Agent::singleton_->ksync_->interface_ksync_obj_.px')
print_ksync_entries(kintf_table, print_kintf_entry)
def print_kvrf_assign_entries(entry_ptr, entry):
kvrf_assign = entry.cast(gdb.lookup_type('VrfAssignKSyncEntry'))
print (str(entry_ptr) + " id=%-5s vlan_tag=%-5s nh=%-5s " % (kvrf_assign['vrf_id_'], kvrf_assign['vlan_tag_'], kvrf_assign['nh_']))
def dump_kvrf_assign_entries():
kvrf_assign_table = gdb.parse_and_eval('Agent::singleton_->ksync_->vrf_assign_ksync_obj_.px')
print_ksync_entries(kvrf_assign_table, print_kvrf_assign_entries)
def print_ksync_route_entry(entry_ptr, ptr):
krt = entry.cast(gdb.lookup_type('RouteKSyncEntry'))
ip = krt['addr_']['ipv4_address_']['addr_']['s_addr']
print (str(entry_ptr) + " %d.%d.%d.%d/%d vrf=%d label=%d nh=%d " % ((ip & 0xff),\
(ip >> 8 & 0xff), (ip >> 16 & 0xff), (ip >> 24 & 0xff),\
krt['plen_'], krt['vrf_id_'], krt['label_'], krt['nh_']['px']))
def dump_ksync_route_entries(table):
ksync_uc_route_table = gdb.parse_and_eval(str(table))
print_ksync_entries(ksync_uc_route_table, print_ksync_route_entry)
def dump_ksync_mc_route_entries(table):
ksync_mc_route_table = gdb.parse_and_eval(str(table))
print_ksync_entries(ksync_mc_route_table, print_ksync_route_entry)
def print_ksync_flow_entry(entry_ptr, entry):
kflow = entry.cast(gdb.lookup_type('FlowTableKSyncEntry'))
print ( str(entry_ptr) + " hash=0x%-8x fp=%s \n" % (kflow['hash_id_'], kflow['flow_entry_']['px']))
def dump_ksync_flow_entries(table):
pksync_entries = gdb.parse_and_eval(str(table))
print_ksync_entries(pksync_entries, print_ksync_flow_entry)
def print_ksync_mirror_entry(entry_ptr, entry):
kmirror_entry = entry.cast(gdb.lookup_type('MirrorKSyncEntry'))
sip = str(kmirror_entry['sip_'])
dip = str(kmirror_entry['dip_'])
print (str(entry_ptr) + " sip = " + sip + " dip = " + dip + " nh = " + str(kmirror_entry['nh_']['px']) + " sport = " + str(kmirror_entry['sport_']) + " dport = " + str(kmirror_entry['dport_']))
def dump_ksync_mirror_entries():
mirror_entries = gdb.parse_and_eval('Agent::singleton_->ksync_->mirror_ksync_obj_.px')
print_ksync_entries(mirror_entries, print_ksync_mirror_entry)
def dump_ksync_vxlan_entries():
vxlan_entries = gdb.parse_and_eval('Agent::singleton_->ksync_->vxlan_ksync_obj_.px')
print_ksync_entries(vxlan_entries, print_ksync_vxlan_entry)
def print_ksync_vxlan_entry(entry_ptr, entry):
kvxlan = entry.cast(gdb.lookup_type('VxLanIdKSyncEntry'))
print (str(entry_ptr) + " nh=%s label=%s\n" % (kvxlan['nh_']['px'], kvxlan['label_']))
| 2.171875 | 2 |
day15/tests.py | andrewyang96/AdventOfCode2017 | 0 | 12763769 | from solution import final_count
from solution import final_count2
assert final_count(65, 8921, 5) == 1
print(final_count(679, 771, 40000000))
assert final_count2(65, 8921, 5000000) == 309
print(final_count2(679, 771, 5000000))
| 2.0625 | 2 |
swashbot/cmds/current.py | almonds0166/swashbot | 0 | 12763770 |
import discord
INF = None
async def command(client, param, ctx):
"""
Syntax:
~current
Description:
Get it? Current? Haaa
Explicitly tells users how Swashbot is working to swash in the channel
"""
cid = ctx.channel.id
e = discord.Embed(
title="Current settings for this channel",
description=status_message(client.memo.get(cid, client.DEFAULT_SETTINGS)),
color=client.color
)
await ctx.channel.send(embed=e)
def status_message(memo):
"""
Creates a status string for the current settings
"""
at_least = memo.at_least
at_most = memo.at_most
time = memo.time
if at_most is INF and time is INF:
return "No action taken."
f = ""
if at_least == 0 and at_most == 0:
f += "Turbid! Delete all new messages immediately. "
f += "Wait, doesn't that mean this message will get deleted in a split second?\n"
elif at_least == 0:
if at_most is not INF:
f += f"Immediately delete all messages past count {at_most}.\n"
f += f"Everything else takes {time} minute"
else:
f += f"Messages take {time} minute"
f += "s" if time != 1 else ""
f += " to wash away.\n"
elif at_least == at_most and at_most is not INF:
f += f"Immediately delete all messages past count {at_most}.\n"
else:
f += f"Always keep the {at_least} most recent message"
f += "s" if at_least != 1 else ""
f += ".\n"
if at_most is not INF:
f += f"Delete all messages past count {at_most}.\n"
if time is not INF:
f += f"Any messages in-between take {time} minute"
f += "s" if time != 1 else ""
f += " to wash away.\n"
else:
f += f"Any other messages take {time} minute"
f += "s" if time != 1 else ""
f += " to wash away.\n"
return f.strip()
| 2.6875 | 3 |
servicemon/tests/data/user_plugin_dir/cannot_make_reader_instance.py | tomdonaldson/servicemon | 1 | 12763771 | <reponame>tomdonaldson/servicemon
from servicemon.plugin_support import AbstractResultWriter
class CannotInstantiate(AbstractResultWriter, plugin_name='cannot_make_reader_instance',
description="A writer in the user's plug-in dir that cannot be instatiated"
" because it doesn't implement the end() method from the abstract base class."):
def begin(self, **kwargs):
pass
def one_result(self, stats):
pass
| 2.53125 | 3 |
molar/cli/commands/cmd_install.py | aspuru-guzik-group/molar | 4 | 12763772 | <reponame>aspuru-guzik-group/molar
# std
from datetime import datetime
from distutils.spawn import find_executable
import os
from pathlib import Path
import secrets
import shutil
import stat
import subprocess
from time import sleep
from typing import List, Optional
# external
from alembic import command
import click
from dotenv import dotenv_values
from passlib.context import CryptContext
import pkg_resources
from python_on_whales import docker
from python_on_whales.utils import DockerException
from rich.console import Console
from rich.prompt import Confirm, Prompt
# molar
from molar import sql_utils
from .. import alembic_utils
from ..cli_utils import CustomClickCommand
pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
@click.group(help="Molar Installation")
@click.pass_context
def install(ctx):
pass
def config_env_vars(
console: Console,
data_dir: Path,
*,
postgres_server: Optional[str] = None,
postgres_user: Optional[str] = None,
postgres_password: Optional[str] = None,
server_host: Optional[str] = None,
emails_enabled: Optional[bool] = None,
smtp_tls: Optional[bool] = None,
smtp_host: Optional[str] = None,
smtp_port: Optional[int] = None,
smtp_user: Optional[str] = None,
smtp_password: Optional[str] = None,
emails_from_email: Optional[str] = None,
emails_from_name: Optional[str] = None,
backend_port: Optional[int] = None,
backend_num_workers: Optional[int] = None,
):
console.log("[blue bold]Setting up backend environment..")
if (data_dir / ".env").exists():
if Confirm.ask(
(
f"[red bold]A .env file already exists in {str(data_dir.resolve())}.\n"
"Do you want to use it?"
)
):
return dotenv_values((data_dir / ".env"))
if postgres_server is None:
postgres_server = Prompt.ask("PostgreSQL server hostname", default="localhost")
if postgres_user is None:
postgres_user = Prompt.ask("PostgreSQL user", default="postgres")
if postgres_password is None:
postgres_password = Prompt.ask(f"Password for Postgres admin", password=True)
if server_host is None:
server_host = Prompt.ask(f"Server url", default="http://localhost")
if emails_enabled is None:
emails_enabled = Confirm.ask("Allow the backend to send email?")
if emails_enabled:
if smtp_host is None:
smtp_host = Prompt.ask("SMTP server")
if smtp_port is None:
smtp_port = Prompt.ask("SMTP Port", default=25)
if smtp_user is None:
smtp_user = Prompt.ask("SMTP user")
if smtp_password is None:
smtp_password = Prompt.ask("SMTP password", password=True)
if smtp_tls is None:
smtp_tls = Confirm.ask("Use TLS to connect to the SMTP server?")
if emails_from_email is None:
emails_from_email = Prompt.ask("Email address of the bakcend")
if emails_from_name is None:
emails_from_name = Prompt.ask("Email name of the backend")
if backend_port is None:
backend_port = Prompt.ask("Backend port", default="8000")
if backend_num_workers is None:
backend_num_workers = Prompt.ask(
"Number of workers for the backend", default="2"
)
dotenv_file = (data_dir / ".env").resolve()
with open(dotenv_file, "w") as f:
print(f"DATA_DIR={data_dir.resolve()}", file=f)
print(f"POSTGRES_SERVER={postgres_server}", file=f)
print(f"POSTGRES_USER={postgres_user}", file=f)
print(f"POSTGRES_PASSWORD={postgres_password}", file=f)
print(f"SERVER_HOST={server_host}", file=f)
print(f"EMAILS_ENABLED={'true' if emails_enabled else 'false'}", file=f)
print(f"SMTP_TLS={'true' if smtp_tls else 'false'}", file=f)
print(f"SMTP_HOST={smtp_host or ''}", file=f)
print(f"SMTP_PORT={smtp_port or 25}", file=f)
print(f"SMTP_USER={smtp_user or ''}", file=f)
print(f"SMTP_PASSWORD={smtp_password or ''}", file=f)
print(f"EMAILS_FROM_EMAIL={emails_from_email or '<EMAIL>'}", file=f)
print(f"EMAILS_FROM_NAME={emails_from_name or ''}", file=f)
print(f"BACKEND_PORT={backend_port}", file=f)
print(f"BACKEND_NUM_WORKERS={backend_num_workers}", file=f)
print(f"ALEMBIC_USER_DIR=/alembic", file=f)
print(f"SECRET_KEY={secrets.token_urlsafe(32)}", file=f)
os.chmod(dotenv_file, stat.S_IREAD)
return locals()
@install.command(
cls=CustomClickCommand, help="Install Molar locally with docker compose"
)
@click.pass_context
def local(ctx):
def _compose_status_healthy():
try:
status = docker.compose.ps()[0].state.health.status == "healthy"
except:
out = subprocess.check_output(
"docker-compose ps | grep postgres", shell=True
)
status = "healthy" in str(out)
return status
console = ctx.obj["console"]
if not find_executable("docker"):
console.log(
(
"[red bold]Docker is not installed[/], "
"please install it from https://docker.com"
)
)
return
_verify_data_dir(ctx)
data_dir = ctx.obj["data_dir"]
config = config_env_vars(
console,
data_dir,
postgres_server="postgres",
postgres_user="postgres",
)
molar_docker_compose = pkg_resources.resource_filename(
"molar", "docker/docker-compose.yml"
)
local_docker_compose = data_dir / "docker-compose.yml"
shutil.copyfile(molar_docker_compose, local_docker_compose)
os.chdir(data_dir)
with console.status("Setting up PostgreSQL (this can take a few minutes)..."):
try:
docker.compose.up(services=["postgres"], detach=True)
except DockerException:
# docker compose up --detach doesn't work with some
# version of docker
subprocess.call(["docker-compose", "up", "-d", "postgres"])
while not _compose_status_healthy():
sleep(1)
sleep(2)
console.log("Installing Molar...")
_install_molar(ctx, "localhost", "postgres", config["postgres_password"])
console.log("[bold blue]Creating the first superuser[/bold blue]")
superuser_name = Prompt.ask("Full name")
superuser_email = Prompt.ask("Email")
superuser_password = Prompt.ask("Password", password=True)
_add_user(
user_name=superuser_name,
email=superuser_email,
password=<PASSWORD>,
hostname="localhost",
postgres_username="postgres",
postgres_password=config["postgres_password"],
postgres_database="molar_main",
)
console.log("Molar :tooth: is insalled!")
if Confirm.ask("Do you want to start it now?"):
try:
docker.compose.up(detach=True)
except DockerException:
subprocess.call("docker-compose up -d", shell=True)
else:
try:
docker.compose.down()
except DockerException:
subprocess.call("docker-compose down", shell=True)
@install.command(cls=CustomClickCommand, help="Set up remote postgres database")
@click.option("--hostname", type=str)
@click.option("--postgres-username")
@click.option("--postgres-password")
@click.option("--superuser-email", type=str, default=None)
@click.option("--superuser-password", type=str, default=None)
@click.pass_context
def remote(
ctx,
hostname,
postgres_username,
postgres_password,
superuser_email,
superuser_password,
):
console = ctx.obj["console"]
_install_molar(ctx, hostname, postgres_username, postgres_password)
console.log("Creating the first user!")
if superuser_email is None:
superuser_email = Prompt.ask("Email")
if superuser_password is None:
superuser_password = Prompt.ask("Password", password=True)
_add_user(
email=superuser_email,
password=<PASSWORD>,
hostname=hostname,
postgres_username="postgres",
postgres_password=<PASSWORD>,
postgres_database="molar_main",
)
console.log(f"Molar :tooth: is insalled on {hostname}!")
def _install_molar(
ctx,
hostname=None,
postgres_username="postgres",
postgres_password=None,
):
console = ctx.obj["console"]
if hostname is None:
hostname = Prompt.ask(
"What is the hostname of the postgres server to install Molar :tooth: on?"
)
if postgres_password is None:
postgres_password = Prompt.ask("Postgres password?", password=True)
with console.status("Installing Molar :tooth:..."):
console.log("Creating database")
_create_database(
ctx,
hostname,
postgres_username,
postgres_password,
"molar_main",
False,
)
def _add_user(
user_name=None,
email=None,
password=None,
hostname=None,
postgres_username="postgres",
postgres_password=None,
postgres_database="molar_main",
):
connection = sql_utils.create_connection(
postgres_username, postgres_password, hostname, postgres_database
)
connection.execute(
(
'insert into "user".user '
' ("full_name", '
' "email", '
' "hashed_password", '
' "is_superuser", '
' "is_active" , '
' "created_on") '
"values "
f"('{user_name}',"
f"'{email}', "
f" '{pwd_context.hash(password)}',"
" true,"
" true,"
f" '{datetime.utcnow()}');"
)
)
def _verify_data_dir(ctx):
console = ctx.obj["console"]
data_dir = ctx.obj["data_dir"]
if data_dir is None:
console.log("[blue bold]No data-dir where specified![/blue bold]")
data_dir = Path(
Prompt.ask(
"Where do you want to install Molar :tooth:", default="./molar_data_dir"
)
)
if not data_dir.exists():
data_dir.mkdir()
alembic_dir = data_dir / "migrations"
if not alembic_dir.exists():
alembic_dir.mkdir()
postgres_dir = data_dir / "postgres"
if not postgres_dir.exists():
postgres_dir.mkdir()
ctx.obj["data_dir"] = data_dir.resolve()
def _create_database(
ctx, hostname, postgres_username, postgres_password, new_database_name, advanced
):
connection = sql_utils.create_connection(
postgres_username, postgres_password, hostname, "postgres"
)
connection.execution_options(isolation_level="AUTOCOMMIT").execute(
f"create database {new_database_name}"
)
connection.close()
connection = sql_utils.create_connection(
postgres_username, postgres_password, hostname, new_database_name
)
alembic_config = alembic_utils.get_alembic_config(ctx, database="molar_main")
alembic_config.set_main_option(
"sqlalchemy.url",
(
f"postgresql://{postgres_username}:{postgres_password}"
f"@{hostname}/{new_database_name}"
),
)
command.upgrade(alembic_config, "molar-main@head")
connection.close()
| 1.890625 | 2 |
helicalc/__init__.py | FMS-Mu2e/helicalc | 0 | 12763773 | # from __future__ import absolute_import
# import os
import git
def get_git_root(path):
git_repo = git.Repo(path, search_parent_directories=True)
git_root = git_repo.git.rev_parse("--show-toplevel")
return git_root
# def get_conda_dir(cfgfile):
# with open(cfgfile) as f:
# directory = f.readlines()[0].strip()
# return directory
# helicalc_dir = get_git_root('.')
# helicalc_dir = get_git_root(os.getcwd())
helicalc_dir = get_git_root(__file__)+'/'
# data directory (symbolic link)
helicalc_data = helicalc_dir + 'data/'
# location of Anaconda env directory
# conda_dir = get_conda_dir(helicalc_dir+'cfg/conda.cfg')
| 2.09375 | 2 |
elmo.py | Sparkluis5/EmotionalEmojis-PT | 0 | 12763774 | <filename>elmo.py
import string
import tensorflow_hub as hub
import tensorflow as tf
import numpy as np
import scipy
import model_definitions
from tqdm import tqdm
import pickle
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import f1_score
from allennlp.commands.elmo import ElmoEmbedder
"""
Script consisting of additional experiments using Elmo and Bert word embeddings and retraining new models for emotion
classification. Still in implementation phase.
"""
#TFhub elmo definition
elmo = hub.Module("https://tfhub.dev/google/elmo/2", trainable=False)
#Function to remove punction from input string.
def remove_punc(text):
exclude = set(string.punctuation)
text = ''.join(ch for ch in text if ch not in exclude)
text = ''.join([i for i in text if not i.isdigit()])
return text
#Transforms a given string composing of a text to ELMO word embeddings. Returns vector of embeddings
def elmo_vectors(x):
x = x['Text'].tolist()
embeddings = elmo(x, signature="default", as_dict=True)["elmo"]
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.tables_initializer())
# return average of ELMo features
return sess.run(tf.reduce_mean(embeddings,1))
#Auxiliary function to convert dataframe to list of list format. Pandas method was not working properly
def convert_rows(dataframe):
return_list = []
# Iterate over each row
for index, rows in dataframe.iterrows():
# Create list for the current row
my_list = str(rows.Text).split()
# append the list to the final list
return_list.append(my_list)
return return_list
#Function for reading dataset and trasform all texts to its Elmo embedding vectors. We train a Logistic Regression model for simple performance analysis
def process_data(filename, save):
all_data = model_definitions.read_data(filename)
all_data['Text'] = all_data['Text'].values.astype('U')
all_data['Text'] = all_data['Text'].apply(remove_punc)
#Train and test sets split
train, test = train_test_split(all_data, test_size=0.20, shuffle=True)
#Creating batches of 100 due to performance issues
list_train = [train[i:i + 100] for i in range(0, train.shape[0], 100)]
list_test = [test[i:i + 100] for i in range(0, test.shape[0], 100)]
print("Converting text to Elmo")
elmo_train = [elmo_vectors(x) for x in tqdm(list_train)]
elmo_test = [elmo_vectors(x) for x in tqdm(list_test)]
#Saving embeddings
if save:
print("Saving Elmo embeddigns")
elmo_train_new = np.concatenate(elmo_train, axis=0)
elmo_test_new = np.concatenate(elmo_test, axis=0)
# save elmo_train_new
pickle_out = open("elmo_train.pickle", "wb")
pickle.dump(elmo_train_new, pickle_out, protocol=pickle.HIGHEST_PROTOCOL)
pickle_out.close()
# save elmo_test_new
pickle_out = open("elmo_test.pickle", "wb")
pickle.dump(elmo_test_new, pickle_out, protocol=pickle.HIGHEST_PROTOCOL)
pickle_out.close()
print("Training Model")
#Logistic Regression model creation and evaluation
lreg = LogisticRegression()
lreg.fit(elmo_train_new, train['Emocao'])
preds_valid = lreg.predict(elmo_test_new)
print(f1_score(test['Emocao'], preds_valid))
#Test function converting Texts to Elmo embeddings using AllenNLP
def use_allen():
options_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/contributed/pt/elmo_pt_options.json"
weight_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/contributed/pt/elmo_pt_weights.hdf5"
eltest = ElmoEmbedder(options_file, weight_file)
test_list = ['First test']
sec_test = ['Second test']
vec = eltest.embed_sentence(test_list)
vec2 = eltest.embed_sentence(sec_test)
print(vec)
print(vec2)
print(scipy.spatial.distance.cosine(vec,vec2))
def read_embeddings():
infile = open('')
if __name__ == '__main__':
pass
#read_data()
#use_allen()
#statistics() | 2.859375 | 3 |
test/test_segmentation.py | noah80/tsam | 0 | 12763775 | import os
import time
import pandas as pd
import numpy as np
import tsam.timeseriesaggregation as tsam
def test_segmentation():
raw = pd.read_csv(os.path.join(os.path.dirname(__file__),'..','examples','testdata.csv'), index_col = 0)
orig_raw = pd.read_csv(os.path.join(os.path.dirname(__file__),'..','examples','results','testperiods_segmentation.csv'), index_col = [0,1,2])
starttime = time.time()
aggregation = tsam.TimeSeriesAggregation(raw, noTypicalPeriods = 20, hoursPerPeriod = 24,
clusterMethod = 'hierarchical', segmentation=True, noSegments=12)
typPeriods = aggregation.createTypicalPeriods()
print('Clustering took ' + str(time.time() - starttime))
# sort the typical days in order to avoid error assertion due to different order
sortedDaysOrig = orig_raw.sum(axis=0,level=0).sort_values('GHI').index
sortedDaysTest = typPeriods.sum(axis=0,level=0).sort_values('GHI').index
# rearange their order
orig = orig_raw[typPeriods.columns].unstack().loc[sortedDaysOrig,:].stack()
test = typPeriods.unstack().loc[sortedDaysTest,:].stack()
np.testing.assert_array_almost_equal(orig.values, test.values,decimal=4)
if __name__ == "__main__":
test_segmentation() | 2.359375 | 2 |
build_migrator/generators/_cmake/cmake_cmd.py | alexsharoff/BuildMigrator | 17 | 12763776 | <reponame>alexsharoff/BuildMigrator
from build_migrator.common.algorithm import flatten_list
from build_migrator.modules import Generator
command_tmpl = """
COMMAND
${{CMAKE_COMMAND}} -E env{params}
{command}"""
cmd_tmpl = """
add_custom_command(OUTPUT {output}{command}{depends}{working_dir}
VERBATIM
)
add_custom_target({name} ALL DEPENDS {output})
"""
post_build_cmd_tmpl = """
add_custom_command(TARGET {target_name} POST_BUILD{command}{working_dir}
VERBATIM
)
"""
find_program_tmpl = """
find_program({var} {name})
if(NOT {var})
message(FATAL_ERROR "{name} not found")
endif()
"""
class CMakeCmd(Generator):
priority = 1
@staticmethod
def add_arguments(arg_parser):
pass
def __init__(self, context, project=None):
self.context = context
if project is None:
self.var_prefix = ""
else:
self.var_prefix = project.upper() + "_"
self.found_programs = {"cmake": "CMAKE_COMMAND", "objcopy": "CMAKE_OBJCOPY"}
def generate(self, target):
if not target["type"] == "cmd":
return False
with self.context.open("CMakeLists.txt", "a") as f:
program = target["program"]
program_var = self.found_programs.get(program)
if program_var is None:
program_var = self.var_prefix + program.upper()
s = self.context.format(
find_program_tmpl, name=program, var=program_var
)
f.write(s)
self.found_programs[program] = program_var
working_dir = target.get("working_dir")
if working_dir is None:
working_dir = ""
else:
working_dir = '\n WORKING_DIRECTORY "{}"'.format(working_dir)
object_lib_deps = []
cmd_dependencies_str = ""
dependencies = target.get("dependencies")
if dependencies:
cmd_dependencies_str = "\n DEPENDS"
for dep in dependencies:
_tgt = self.context.target_index.get(dep)
if _tgt:
if _tgt.get("type") == "module":
if _tgt["module_type"] == "object_lib":
object_lib_deps.append(_tgt)
dep = _tgt["name"]
elif _tgt.get("type") == "directory":
# Don't add directory dependencies, it's unnecessary
continue
cmd_dependencies_str += "\n {}".format(dep)
command_args = []
for arg in flatten_list(target["args"]):
if arg in self.context.target_index:
arg_target = self.context.target_index[arg]
if (
arg_target["type"] == "module"
and arg_target["module_type"] != "object_lib"
):
arg = "$<TARGET_FILE:{}>".format(arg_target["name"])
command_args.append(arg)
command = ["${" + program_var + "}"] + command_args
params = ""
if target.get("parameters"):
params = " " + " ".join(
"{}={}".format(k, v) for k, v in target["parameters"].items()
)
command_str = self.context.format(
command_tmpl, params=params, command=" ".join(command),
)
# CMake doesn't provide built-in way to set object file path.
# Explicitly copy object files to expected locations
for _tgt in object_lib_deps:
cmd = '${{CMAKE_COMMAND}} -E copy_if_different "$<TARGET_OBJECTS:{name}>" "{output}"'.format(
name=_tgt["name"], output=_tgt["output"],
)
command_str = (
self.context.format(command_tmpl, params=params, command=cmd,)
+ command_str
)
post_build_target_name = target.get("post_build")
if post_build_target_name:
s = self.context.format(
post_build_cmd_tmpl,
target_name=post_build_target_name,
command=command_str,
working_dir=working_dir,
)
else:
output = target["output"]
for o in target.get("msvc_import_lib") or []:
output += "\n " + o
name = target.get("name")
if name is None:
name = output.split("@")[-1][1:].replace(".", "_").replace("/", "_")
s = self.context.format(
cmd_tmpl,
name=name,
output=output,
command=command_str,
depends=cmd_dependencies_str,
working_dir=working_dir,
)
f.write(s)
return True
__all__ = ["CMakeCmd"]
| 2.25 | 2 |
login.py | 19930308/test01 | 0 | 12763777 | def fn():
num1=1
num3=20
sum=num1+num2
print(sum)
num=10
def school():
students=1000
classes=12
avg=students/classes
return avg
num1=school()
if num1>500:
print('人数很好')
| 3.5625 | 4 |
iris_sdk/models/data/lidb_tn_group.py | NumberAI/python-bandwidth-iris | 2 | 12763778 | #!/usr/bin/env python
from iris_sdk.models.base_resource import BaseData
from iris_sdk.models.maps.lidb_tn_group import LidbTnGroupMap
from iris_sdk.models.data.telephone_number_list import TelephoneNumberList
class LidbTnGroupData(LidbTnGroupMap, BaseData):
def __init__(self):
self.telephone_numbers = TelephoneNumberList() | 2.265625 | 2 |
src/util/nowcasts_table.py | dfarrow0/nowcast | 3 | 12763779 | """
===============
=== Purpose ===
===============
A simple wrapper for the `nowcasts` table in the Delphi database.
=======================
=== Data Dictionary ===
=======================
Nowcasts (value and standard deviation) are stored in the `nowcasts` table.
+----------+-------------+------+-----+---------+----------------+
| Field | Type | Null | Key | Default | Extra |
+----------+-------------+------+-----+---------+----------------+
| id | int(11) | NO | PRI | NULL | auto_increment |
| epiweek | int(11) | NO | MUL | NULL | |
| location | varchar(12) | YES | MUL | NULL | |
| value | float | NO | | NULL | |
| std | float | NO | | NULL | |
+----------+-------------+------+-----+---------+----------------+
id: unique identifier for each record
epiweek: the epiweek for which (w)ILI is being predicted
location: where the data was collected (nat, hhs, cen, and states)
value: nowcast point prediction
std: nowcast standard deviation
"""
# standard library
import time
# first party
from delphi.nowcast.util.delphi_database import DelphiDatabase
from delphi.operations import secrets
class NowcastsTable(DelphiDatabase.Table):
"""A database wrapper for the `nowcasts` table."""
SQL_INSERT = '''
INSERT INTO `nowcasts`
(`epiweek`, `location`, `value`, `std`)
VALUES
(%s, %s, %s, %s)
ON DUPLICATE KEY UPDATE
value = %s, std = %s
'''
def insert(self, epiweek, location, value, stdev):
"""
Add a new nowcast record to the database, or update an existing record with
the same key.
"""
args = (epiweek, location, value, stdev, value, stdev)
self._database.execute(NowcastsTable.SQL_INSERT, args)
def set_last_update_time(self):
"""
Store the timestamp of the most recent nowcast update.
This hack was copied from the old nowcast.py, which has this to say:
> Store the unix timestamp in a meta row representing the last update time.
> The key to this row is `epiweek`=0, `location`='updated'. The timestamp
> is stored across the `value` and `std` fields. These are 32-bit floats,
> so precision is limited (hence, using both fields).
"""
t = round(time.time())
a, b = t // 100000, t % 100000
self.insert(0, 'updated', a, b)
def _get_connection_info(self):
"""Return username, password, and database name."""
return secrets.db.epi + ('epidata',)
| 1.890625 | 2 |
codes/test_incomplete_sim.py | IllusoryTime/Image-Based-CFD-Simulation-Using-Deep-Learning | 16 | 12763780 | <gh_stars>10-100
"""
Despite of deleting all incomplete simulation by delete_all_unnecessary.py there are still some incomplete simulations contains in VTK folder.
This scripts find all those directory and then delete them manually.
"""
import os
from tqdm import tqdm
directory_list = next(os.walk('simulation_data'))[1]
for sim_no in tqdm(directory_list):
DIR = './simulation_data/' + sim_no + '/VTK'
if len([name for name in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, name))]) != 41:
print(sim_no)
| 1.921875 | 2 |
perfdump/html.py | etscrivner/nose-perfdump | 1 | 12763781 | # This file is part of nose-perftest.
#
# Copyright (c) 2012, <NAME> and AUTHORS
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the nose-perfdump nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from perfdump.models import TestTime, SetupTime
class HtmlReport(object):
"""Writes the performance report to an HTML file."""
@classmethod
def write(cls, html_file):
"""Writes the HTML report to the given file."""
f = open(html_file, 'w')
f.write('<html>')
f.write('<head>')
f.write('</head>')
f.write('<body>')
f.write('<h1>Test times</h1>')
fmt_test = '<tr><td>{:.05f}</td><td>{}</td></tr><tr><td> </td><td>{}</td></tr>'
f.write('<table>')
f.write('<tr><th>Time</th><th>Test info</th></tr>')
for row in TestTime.get_slowest_tests(10):
f.write(fmt_test.format(row['elapsed'], row['file'], '{}.{}.{}'.format(row['module'], row['class'], row['func'])))
f.write('</table>')
fmt_file = '<tr><td>{:.05f}</td><td>{}</td></tr>'
f.write('<table>')
f.write('<tr><th>Time</th><th>Test info</th></tr>')
for row in TestTime.get_slowest_files(10):
f.write(fmt_file.format(row['sum_elapsed'], row['file']))
f.write('</table>')
f.write('<h1>Setup times</h1>')
f.write('<table>')
f.write('<tr><th>Time</th><th>Test info</th></tr>')
for row in SetupTime.get_slowest_tests(10):
f.write(fmt_test.format(row['elapsed'], row['file'], '{}.{}.{}'.format(row['module'], row['class'], row['func'])))
f.write('</table>')
f.write('<table>')
f.write('<tr><th>Time</th><th>Test info</th></tr>')
for row in SetupTime.get_slowest_files(10):
f.write(fmt_file.format(row['sum_elapsed'], row['file']))
f.write('</table>')
f.write('</body>')
f.write('</html>')
f.close()
| 1.382813 | 1 |
src/bxcommon/network/socket_connection_protocol.py | dolphinridercrypto/bxcommon | 12 | 12763782 | import time
from asyncio import BufferedProtocol
from typing import Optional, TYPE_CHECKING
from bxcommon.network.abstract_socket_connection_protocol import AbstractSocketConnectionProtocol
from bxcommon.network.ip_endpoint import IpEndpoint
from bxutils import logging
from bxutils.logging import LogRecordType
if TYPE_CHECKING:
# noinspection PyUnresolvedReferences
# pylint: disable=ungrouped-imports,cyclic-import
from bxcommon.connections.abstract_node import AbstractNode
logger = logging.get_logger(__name__)
network_troubleshooting_logger = logging.get_logger(LogRecordType.NetworkTroubleshooting, __name__)
# pyre-fixme[11]: Annotation `BufferedProtocol` is not defined as a type.
class SocketConnectionProtocol(AbstractSocketConnectionProtocol, BufferedProtocol):
def __init__(
self,
node: "AbstractNode",
endpoint: Optional[IpEndpoint] = None,
is_ssl: bool = True,
):
AbstractSocketConnectionProtocol.__init__(self, node, endpoint, is_ssl)
self._buffer_request_time: Optional[float] = None
self._buffer_update_time: Optional[float] = None
# pylint: disable=arguments-differ
def get_buffer(self, _sizehint: int):
self._buffer_request_time = time.time()
logger.trace("[{}] - get_buffer {}.", self, _sizehint)
return self._receive_buf
def buffer_updated(self, nbytes: int) -> None:
if self.is_receivable():
self._buffer_update_time = time.time()
logger.trace("[{}] - buffer_updated {}.", self, nbytes)
self._node.on_bytes_received(self.file_no, self._receive_buf[:nbytes])
def get_last_read_duration_ms(self) -> float:
if self._buffer_request_time and self._buffer_update_time:
end_time = self._buffer_update_time
start_time = self._buffer_request_time
assert end_time is not None
assert start_time is not None
return (end_time - start_time) * 1000
return 0
def get_time_since_read_end_ms(self, end_time: float) -> float:
if self._buffer_update_time:
start_time = self._buffer_request_time
assert start_time is not None
return (end_time - start_time) * 1000
return 0
| 2.296875 | 2 |
src/gui.py | xi-xi/pyMotionViewer | 0 | 12763783 | import pyglet
from pyglet.gl import *
from OpenGL.GLUT import *
class MainWindow(pyglet.window.Window):
def __init__(self, *args, **kargs):
super(MainWindow, self).__init__(*args, **kargs)
def on_draw(self):
# super(MainWindow, self).on_draw()
# self.clear()
glClear(GL_COLOR_BUFFER_BIT)
glLoadIdentity()
glBegin(GL_TRIANGLES)
glVertex2f(0, 0)
glVertex2f(self.width, 0)
glVertex2f(self.width, self.height)
glEnd()
| 2.578125 | 3 |
cedm/policy/SummaryActionMaster.py | Ratnesh-Rai/Pydial-Personalisation | 3 | 12763784 | ###############################################################################
# PyDial: Multi-domain Statistical Spoken Dialogue System Software
###############################################################################
#
# Copyright 2015 - 2018
# Cambridge University Engineering Department Dialogue Systems Group
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###############################################################################
'''
SummaryAction.py - Mapping between summary and master actions
=============================================================
Copyright CUED Dialogue Systems Group 2015 - 2017, 2017
.. seealso:: CUED Imports/Dependencies:
import :mod:`policy.SummaryUtils` |.|
import :mod:`ontology.Ontology` |.|
import :mod:`utils.ContextLogger` |.|
import :mod:`utils.Settings`
************************
'''
__author__ = "cued_dialogue_systems_group"
import policy.SummaryAction
from utils import ContextLogger
logger = ContextLogger.getLogger('')
MAX_NUM_ACCEPTED = 10
class SummaryActionMaster(policy.SummaryAction.SummaryAction):
'''
The summary action class encapsulates the functionality of a summary action along with the conversion from summary to master actions.
.. Note::
The list of all possible summary actions are defined in this class.
'''
def __init__(self, domainString, empty=False, confreq=False):
'''
Records what domain the class is instantiated for, and what actions are available
:param domainString: domain tag
:type domainString: string
:param empty: None
:type empty: bool
:param confreq: representing if the action confreq is used
:type confreq: bool
'''
super(SummaryActionMaster, self).__init__(domainString, empty, confreq)
self.action_names = ['obj','rel']
def getNonExecutable(self, belief, lastSystemAction):
return []
#END OF FILE
| 1.648438 | 2 |
__init__.py | brendasalenave/NRCLex | 29 | 12763785 | <filename>__init__.py
import os
__version__ = '1.5.0'
__license__ = 'MIT'
__author__ = 'metalcorebear'
name = "NRCLex"
PACKAGE_DIR = os.path.dirname(os.path.abspath(__file__))
| 1.296875 | 1 |
src/shopping_cart/migrations/0002_auto_20180531_0944.py | ciphertz/final | 0 | 12763786 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2018-05-31 09:44
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('shopping_cart', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='order',
name='VAT',
),
migrations.RemoveField(
model_name='order',
name='voucher_applied',
),
]
| 1.40625 | 1 |
setup.py | memcachier/django-ascii | 1 | 12763787 | <reponame>memcachier/django-ascii<filename>setup.py
from setuptools import setup, find_packages
from memcachier_django import __version__
setup(
name = 'memcachier-django-ascii',
version = __version__,
description = 'Django cache backend supporting MemCachier service',
long_description = open('README.md').read(),
author = 'MemCachier',
author_email = '<EMAIL>',
url = 'https://github.com/memcachier/django-ascii',
packages = find_packages(),
install_requires = ['pymemcache', 'Django>=1.3'],
license = 'BSD',
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Framework :: Django',
'Operating System :: OS Independent',
'Programming Language :: Python',
'License :: OSI Approved :: Apache Software License',
'Topic :: Database'
],
)
| 1.320313 | 1 |
2h Seira Askisewn/ex2/python/doomsday.py | danchris/pl1-ntua | 0 | 12763788 | <reponame>danchris/pl1-ntua<filename>2h Seira Askisewn/ex2/python/doomsday.py
#!/usr/bin/env python3
import sys
import os
## Global defines
N , M = 1,1
myMap = [['X' for x in range(1002)] for y in range(1002)]
def printMap ():
global myMap, N, M
for i in range(1,N):
for k in range(1,M):
print(myMap[i][k], end='')
print('\n',end='')
return
## Main function
def main(argv):
file = open(sys.argv[1],"r")
global N, M, myMap
myList = list()
last = tuple()
j = 1
with file as f:
while True:
c = f.read(1)
if (not c): break
elif (c.isspace()):
M = j
myMap[N][j] = 'X'
N += 1
myMap[N][0] = 'X'
j = 1;
else:
myMap[N][j] = c
if (c == '+' or c == '-'):
last = (N,j,c,0)
myList.append(last)
j += 1
file.close()
end = 0
t = 0
prevT = 0
while (len(myList)):
item = myList.pop(0)
it_i = item[0]
it_j = item[1]
it_c = item[2]
it_t = item[3]
if (end and it_t > prevT):
break
## Right
if(myMap[it_i][it_j+1] == '.'):
myMap[it_i][it_j+1] = it_c
last = (it_i,it_j+1,it_c,it_t+1)
myList.append(last)
elif( myMap[it_i][it_j+1] != 'X' and myMap[it_i][it_j+1] != it_c):
myMap[it_i][it_j+1] = '*'
t = it_t+1
end = 1;
## Left
if(myMap[it_i][it_j-1] == '.'):
myMap[it_i][it_j-1] = it_c
last = (it_i,it_j-1,it_c,it_t+1)
myList.append(last)
elif( myMap[it_i][it_j-1] != 'X' and myMap[it_i][it_j-1] != it_c):
myMap[it_i][it_j-1] = '*'
t = it_t+1
end = 1;
## Up
if(myMap[it_i-1][it_j] == '.'):
myMap[it_i-1][it_j] = it_c
last = (it_i-1,it_j,it_c,it_t+1)
myList.append(last)
elif( myMap[it_i-1][it_j] != 'X' and myMap[it_i-1][it_j] != it_c):
myMap[it_i-1][it_j] = '*'
t = it_t+1
end = 1;
## Down
if(myMap[it_i+1][it_j] == '.'):
myMap[it_i+1][it_j] = it_c
last = (it_i+1,it_j,it_c,it_t+1)
myList.append(last)
elif( myMap[it_i+1][it_j] != 'X' and myMap[it_i+1][it_j] != it_c):
myMap[it_i+1][it_j] = '*'
t = it_t+1
end = 1;
prevT = it_t
if (not end): print ("the world is saved")
else: print(t)
printMap()
return
if __name__ =="__main__":
main(sys.argv)
| 3.46875 | 3 |
analytics/analytics/analytic_types/segment.py | jonyrock-back/hastic-server | 0 | 12763789 | <filename>analytics/analytics/analytic_types/segment.py
from typing import Optional
import utils.meta
@utils.meta.JSONClass
class Segment:
'''
Used for segment manipulation instead of { 'from': ..., 'to': ... } dict
'''
def __init__(
self,
from_timestamp: int,
to_timestamp: int,
_id: Optional[str] = None,
analytic_unit_id: Optional[str] = None,
labeled: Optional[bool] = None,
deleted: Optional[bool] = None,
message: Optional[str] = None
):
if to_timestamp < from_timestamp:
raise ValueError(f'Can`t create segment with to < from: {to_timestamp} < {from_timestamp}')
self.from_timestamp = from_timestamp
self.to_timestamp = to_timestamp
self._id = _id
self.analytic_unit_id = analytic_unit_id
self.labeled = labeled
self.deleted = deleted
self.message = message
@utils.meta.JSONClass
class AnomalyDetectorSegment(Segment):
'''
Used for segment manipulation instead of { 'from': ..., 'to': ..., 'data': ... } dict
'''
def __init__(
self,
from_timestamp: int,
to_timestamp: int,
data = [],
_id: Optional[str] = None,
analytic_unit_id: Optional[str] = None,
labeled: Optional[bool] = None,
deleted: Optional[bool] = None,
message: Optional[str] = None
):
super().__init__(
from_timestamp,
to_timestamp,
_id,
analytic_unit_id,
labeled,
deleted,
message
)
self.data = data
| 2.375 | 2 |
Python/Account.py | hiddenworlds225/School-code | 0 | 12763790 | class BankAccount:
def __init__(self):
self.accountNum = 0
self.accountOwner = ""
self.accountBalance = 0.00
def ModifyAccount(self, id, name, balance):
self.accountNum = id
self.accountOwner = name
self.accountBalance = balance
account = BankAccount()
account.ModifyAccount(12345, "<NAME>", 123456.78)
print("Account ID: {}, Name: {}, Balance: ${}".format(account.accountNum, account.accountOwner, account.accountBalance)) | 3.5625 | 4 |
alphatwirl/selection/factories/expand.py | shane-breeze/AlphaTwirl | 0 | 12763791 | <reponame>shane-breeze/AlphaTwirl
# <NAME> <<EMAIL>>
##__________________________________________________________________||
def expand_path_cfg(path_cfg, alias_dict={ }, overriding_kargs={ }):
"""expand a path config
Args:
path_cfg (str, tuple, dict): a config for path
alias_dict (dict): a dict for aliases
overriding_kargs (dict): to be used for recursive call
"""
if isinstance(path_cfg, str):
return _expand_str(path_cfg, alias_dict, overriding_kargs)
if isinstance(path_cfg, dict):
return _expand_dict(path_cfg, alias_dict)
# assume tuple or list
return _expand_tuple(path_cfg, alias_dict, overriding_kargs)
##__________________________________________________________________||
def _expand_str(path_cfg, alias_dict, overriding_kargs):
"""expand a path config given as a string
"""
if path_cfg in alias_dict:
# e.g., path_cfg = 'var_cut'
return _expand_str_alias(path_cfg, alias_dict, overriding_kargs)
# e.g., path_cfg = 'ev : {low} <= ev.var[0] < {high}'
return _expand_for_lambda_str(path_cfg, alias_dict, overriding_kargs)
def _expand_for_lambda_str(path_cfg, alias_dict, overriding_kargs):
# e.g.,
# path_cfg = 'ev : {low} <= ev.var[0] < {high}'
ret = dict(factory='LambdaStrFactory', lambda_str=path_cfg, components=())
# e.g.,
# {
# 'factory': 'LambdaStrFactory',
# 'lambda_str': 'ev : {low} <= ev.var[0] < {high}'
# }
overriding_kargs_copy = overriding_kargs.copy()
# e.g., {'low': 25, 'high': 200, 'alias': 'var_cut', 'name': 'var_cut25'}
if 'alias' in overriding_kargs:
ret['name'] = overriding_kargs_copy.pop('alias')
if 'name' in overriding_kargs:
ret['name'] = overriding_kargs_copy.pop('name')
ret.update(overriding_kargs_copy)
# e.g.,
# {
# 'factory': 'LambdaStrFactory',
# 'lambda_str': 'ev : {low} <= ev.var[0] < {high}',
# 'name': 'var_cut25',
# 'low': 25, 'high': 200
# }
return ret
def _expand_str_alias(path_cfg, alias_dict, overriding_kargs):
"""expand a path config given as a string
Args:
path_cfg (str): an alias
alias_dict (dict):
overriding_kargs (dict):
"""
# e.g.,
# path_cfg = 'var_cut'
new_path_cfg = alias_dict[path_cfg]
# e.g., ('ev : {low} <= ev.var[0] < {high}', {'low': 10, 'high': 200})
new_overriding_kargs = dict(alias=path_cfg)
# e.g., {'alias': 'var_cut'}
new_overriding_kargs.update(overriding_kargs)
# e.g., {'alias': 'var_cut', 'name': 'var_cut25', 'low': 25}
return expand_path_cfg(new_path_cfg, alias_dict,new_overriding_kargs)
##__________________________________________________________________||
def _expand_tuple(path_cfg, alias_dict, overriding_kargs):
"""expand a path config given as a tuple
"""
# e.g.,
# path_cfg = ('ev : {low} <= ev.var[0] < {high}', {'low': 10, 'high': 200})
# overriding_kargs = {'alias': 'var_cut', 'name': 'var_cut25', 'low': 25}
new_path_cfg = path_cfg[0]
# e.g., 'ev : {low} <= ev.var[0] < {high}'
new_overriding_kargs = path_cfg[1].copy()
# e.g., {'low': 10, 'high': 200}
new_overriding_kargs.update(overriding_kargs)
# e.g., {'low': 25, 'high': 200, 'alias': 'var_cut', 'name': 'var_cut25'}
return expand_path_cfg(
new_path_cfg,
overriding_kargs=new_overriding_kargs,
alias_dict=alias_dict
)
##__________________________________________________________________||
def _expand_dict(path_cfg, alias_dict):
if 'factory' in path_cfg:
return path_cfg
if not sum([k in path_cfg for k in ('All', 'Any', 'Not')]) <= 1:
raise ValueError("Any pair of 'All', 'Any', 'Not' cannot be simultaneously given unless factory is given!")
if 'All' in path_cfg:
new_path_cfg = path_cfg.copy()
new_path_cfg['factory'] = 'AllFactory'
new_path_cfg['components'] = tuple([expand_path_cfg(p, alias_dict=alias_dict) for p in new_path_cfg.pop('All')])
return new_path_cfg
if 'Any' in path_cfg:
new_path_cfg = path_cfg.copy()
new_path_cfg['factory'] = 'AnyFactory'
new_path_cfg['components'] = tuple([expand_path_cfg(p, alias_dict=alias_dict) for p in new_path_cfg.pop('Any')])
return new_path_cfg
if 'Not' in path_cfg:
new_path_cfg = path_cfg.copy()
new_path_cfg['factory'] = 'NotFactory'
new_path_cfg['components'] = (expand_path_cfg(new_path_cfg.pop('Not'), alias_dict=alias_dict), )
return new_path_cfg
raise ValueError("cannot recognize the path_cfg")
##__________________________________________________________________||
| 2.375 | 2 |
src/website/search/urls.py | jhk523/news-trend | 1 | 12763792 | <gh_stars>1-10
from django.urls import path
from django.conf.urls import url
from . import views
from search.views import Search, Result
app_name = 'status'
urlpatterns = [
url(r'^$', Search.as_view(), name='index'),
url(r'^result/$', Result.as_view(), name='result')
# path('', Search.as_view(), name='index'),
# path('result/', Result.as_view(), name='result')
]
| 1.84375 | 2 |
test.py | l-bdx/iso639 | 6 | 12763793 | import unittest
from iso639 import Lang, iter_langs
from iso639.exceptions import InvalidLanguageValue, DeprecatedLanguageValue
class TestLang(unittest.TestCase):
"""Test the Lang class."""
lang_vals = {tg: set(d.keys()) for tg, d in Lang._data.items()}
lang_vals["changed_to"] = {
d["change_to"] for d in Lang._deprecated.values() if d["change_to"]
}
lang_vals["deprecated"] = set(Lang._deprecated.keys())
lang_vals["macro"] = set(Lang._macro["macro"].keys())
lang_vals["individual"] = set(Lang._macro["individual"].keys())
def test_pt1(self):
lg = Lang("fr")
self.assertEqual(lg.pt1, "fr")
self.assertEqual(lg.pt2b, "fre")
self.assertEqual(lg.pt2t, "fra")
self.assertEqual(lg.pt3, "fra")
self.assertEqual(lg.pt5, "")
self.assertEqual(lg.name, "French")
def test_upper_pt1(self):
with self.assertRaises(InvalidLanguageValue):
Lang("FR")
def test_capitalized_pt1(self):
with self.assertRaises(InvalidLanguageValue):
Lang("Fr")
def test_pt2b(self):
lg = Lang("fre")
self.assertEqual(lg.pt1, "fr")
self.assertEqual(lg.pt2b, "fre")
self.assertEqual(lg.pt2t, "fra")
self.assertEqual(lg.pt3, "fra")
self.assertEqual(lg.pt5, "")
self.assertEqual(lg.name, "French")
def test_pt2t(self):
lg = Lang("deu")
self.assertEqual(lg.pt1, "de")
self.assertEqual(lg.pt2b, "ger")
self.assertEqual(lg.pt2t, "deu")
self.assertEqual(lg.pt3, "deu")
self.assertEqual(lg.pt5, "")
self.assertEqual(lg.name, "German")
def test_pt5(self):
lg = Lang("ber")
self.assertEqual(lg.pt1, "")
self.assertEqual(lg.pt2b, "ber")
self.assertEqual(lg.pt2t, "")
self.assertEqual(lg.pt3, "")
self.assertEqual(lg.pt5, "ber")
self.assertEqual(lg.name, "Berber languages")
def test_pt3_with_other_pts(self):
lg = Lang("eng")
self.assertEqual(lg.pt1, "en")
self.assertEqual(lg.pt2b, "eng")
self.assertEqual(lg.pt2t, "eng")
self.assertEqual(lg.pt3, "eng")
self.assertEqual(lg.pt5, "")
self.assertEqual(lg.name, "English")
def test_pt3_without_other_pts(self):
lg = Lang("cmn")
self.assertEqual(lg.pt1, "")
self.assertEqual(lg.pt2b, "")
self.assertEqual(lg.pt2t, "")
self.assertEqual(lg.pt3, "cmn")
self.assertEqual(lg.pt5, "")
self.assertEqual(lg.name, "Mandarin Chinese")
def test_upper_pt3(self):
with self.assertRaises(InvalidLanguageValue):
Lang("ENG")
def test_capitalized_pt3(self):
with self.assertRaises(InvalidLanguageValue):
Lang("Eng")
def test_name(self):
lg = Lang("German")
self.assertEqual(lg.pt1, "de")
self.assertEqual(lg.pt2b, "ger")
self.assertEqual(lg.pt2t, "deu")
self.assertEqual(lg.pt3, "deu")
self.assertEqual(lg.pt5, "")
self.assertEqual(lg.name, "German")
def test_lower_name(self):
with self.assertRaises(InvalidLanguageValue):
Lang("german")
def test_upper_name(self):
with self.assertRaises(InvalidLanguageValue):
Lang("GERMAN")
def test_equal_languages(self):
lg1 = Lang("eng")
lg2 = Lang("en")
self.assertEqual(lg1, lg2)
def test_not_equal_languages(self):
lg1 = Lang("fra")
lg2 = Lang("eng")
self.assertNotEqual(lg1, lg2)
def test_not_equal_languages_string(self):
lg1 = Lang("fra")
lg2 = "fra"
self.assertNotEqual(lg1, lg2)
def test_not_equal_languages_None(self):
lg1 = Lang("fra")
lg2 = None
self.assertNotEqual(lg1, lg2)
def test_lang_of_lang(self):
lg1 = Lang("fra")
lg2 = Lang(lg1)
self.assertEqual(lg1, lg2)
def test_multiple_args(self):
with self.assertRaises(InvalidLanguageValue):
Lang("fra", "fr")
def test_kwarg(self):
lg = Lang(pt1="fr")
self.assertEqual(lg.pt1, "fr")
self.assertEqual(lg.pt2b, "fre")
self.assertEqual(lg.pt2t, "fra")
self.assertEqual(lg.pt3, "fra")
self.assertEqual(lg.pt5, "")
self.assertEqual(lg.name, "French")
def test_multiple_kwargs(self):
lg = Lang(pt1="fr", name="French")
self.assertEqual(lg.pt1, "fr")
self.assertEqual(lg.pt2b, "fre")
self.assertEqual(lg.pt2t, "fra")
self.assertEqual(lg.pt3, "fra")
self.assertEqual(lg.pt5, "")
self.assertEqual(lg.name, "French")
def test_kwarg_wrong_value(self):
with self.assertRaises(InvalidLanguageValue):
Lang(pt1="fra")
def test_kwargs_wrong_second_value(self):
with self.assertRaises(InvalidLanguageValue):
Lang(pt1="fr", pt3="deu")
def test_kwargs_right_empty_second_value(self):
Lang(pt1="fr", pt5="")
def test_kwarg_wrong_key(self):
with self.assertRaises(InvalidLanguageValue):
Lang(foobar="fr")
def test_kwarg_wrong_second_key(self):
with self.assertRaises(InvalidLanguageValue):
Lang(pt1="fr", foobar="fra")
def test_no_arg_no_kwarg(self):
with self.assertRaises(InvalidLanguageValue):
Lang()
def test_none_arg(self):
with self.assertRaises(InvalidLanguageValue):
Lang(None)
def test_none_kwarg(self):
for tag in Lang._tags:
kwargs = {tag: ""}
with self.assertRaises(InvalidLanguageValue):
Lang(**kwargs)
def test_empty_string_arg(self):
with self.assertRaises(InvalidLanguageValue):
Lang("")
def test_empty_string_kwarg(self):
for tag in Lang._tags:
kwargs = {tag: ""}
with self.assertRaises(InvalidLanguageValue):
Lang(**kwargs)
def test_arg_and_kwarg(self):
lg = Lang("fra", pt1="fr")
self.assertEqual(lg.pt1, "fr")
self.assertEqual(lg.pt2b, "fre")
self.assertEqual(lg.pt2t, "fra")
self.assertEqual(lg.pt3, "fra")
self.assertEqual(lg.pt5, "")
self.assertEqual(lg.name, "French")
def test_arg_and_kwarg_nok(self):
with self.assertRaises(InvalidLanguageValue):
Lang("fra", pt1="deu")
def test_repr(self):
lg = Lang("alu")
s = (
"""Lang(name="'Are'are", pt1='', pt2b='', """
"""pt2t='', pt3='alu', pt5='')"""
)
self.assertEqual(s, repr(lg))
def test_immutable(self):
lg = Lang("fra")
with self.assertRaises(AttributeError):
lg.pt1 = "en"
def test_hashable_set_element(self):
lg = Lang("fra")
s = set()
s.add(lg)
self.assertIn(lg, s)
def test_hashable_dict_key(self):
lg = Lang("fra")
d = {}
d.update({lg: "foobar"})
self.assertEqual(d[lg], "foobar")
def test_scope(self):
self.assertEqual(Lang("fra").scope(), "Individual")
self.assertEqual(Lang("zh").scope(), "Macrolanguage")
self.assertEqual(Lang("und").scope(), "Special")
self.assertIsNone(Lang("ber").scope())
def test_type(self):
self.assertEqual(Lang("lat").type(), "Ancient")
self.assertEqual(Lang("epo").type(), "Constructed")
self.assertEqual(Lang("djf").type(), "Extinct")
self.assertEqual(Lang("fro").type(), "Historical")
self.assertEqual(Lang("fra").type(), "Living")
self.assertEqual(Lang("und").type(), "Special")
self.assertIsNone(Lang("ber").type())
def test_macro(self):
lg = Lang("cmn")
self.assertEqual(lg.macro().pt3, "zho")
def test_individuals(self):
lg = Lang("fas")
ind_lgs = {x.pt3 for x in lg.individuals()}
self.assertIn("pes", ind_lgs)
def test_deprecated_arg(self):
for pt3 in self.lang_vals["deprecated"]:
with self.assertRaises(DeprecatedLanguageValue):
Lang(pt3)
def test_deprecated_kwarg(self):
for pt3 in self.lang_vals["deprecated"]:
with self.assertRaises(DeprecatedLanguageValue):
Lang(pt3=pt3)
def test_deprecated_with_change_to(self):
for pt in ("name", "pt1", "pt2b", "pt2t", "pt3", "pt5"):
for lv in self.lang_vals[pt]:
try:
Lang(lv)
except DeprecatedLanguageValue as e:
if e.change_to:
Lang(e.change_to)
def test_no_macro_of_macro(self):
for lvs in self.lang_vals.values():
for lv in lvs:
try:
macro = Lang(lv).macro()
except DeprecatedLanguageValue:
continue
else:
if macro is not None:
self.assertIsNone(macro.macro())
def test_no_individual_of_individual(self):
for lvs in self.lang_vals.values():
for lv in lvs:
try:
individuals = Lang(lv).individuals()
except DeprecatedLanguageValue:
continue
else:
for ind in individuals:
self.assertEqual(ind.individuals(), [])
def test_iter_langs(self):
lg1 = next(iter_langs()).name
lgs = [lg.name for lg in iter_langs()]
self.assertEqual(lg1, lgs[0])
self.assertEqual(len(set(lgs)), len(lgs))
if __name__ == "__main__":
unittest.main()
| 3 | 3 |
ongoing.py | krbrs/weechat-ongoing | 0 | 12763794 | <reponame>krbrs/weechat-ongoing
# -*- coding: utf-8 -*-
#
# ongoing.py
# Weechat script for automatized downloading of the new releases on XDCC bots.
#
# To enable it copy the file "ongoing.py" to ~/.weechat/python/ directiry and
# execute the command "/python load python/ongoing.py"
#
# The comprehensive information about script usage can be found using
# the command "/help ongoing".
#
# Licensed under MIT, see LICENSE file content for the details.
#
#
# HERE BE DRAGONS
#
import weechat
import re, os, pickle
SCRIPT_NAME = "ongoing"
SCRIPT_AUTHOR = "<NAME> <<EMAIL>>"
SCRIPT_VERSION = "0.1"
SCRIPT_LICENSE = "MIT"
SCRIPT_DESC = "Automatically downloads new files on mask from XDCC bots"
SCRIPT_COMMAND = "ongoing"
SCRIPT_CMDS = "channel [name] | add_bot name regex | del_bot name | " + \
"list_bots | add_filter regex | del_filter id | list_filters"
SCRIPT_HELP = """Available commands:
{0}/{2} channel{1} - get the name of the channel for monitoring
{0}/{2} channel #nibl{1} - monitor the channel #nibl for the updates
{0}/{2} add_bot KareRaisu .*SEND\s([0-9]+).*{1} - look for the messages from
the bot KareRaisu matching the listed regular expression (the only
mentioned group in regex should be pack ID! Well, this regex should work
with the most of Eggdrop installations and masquerading ones)
{0}/{2} list_bots{1} - list of the bots are watched
{0}/{2} del_bot KareRaisu{1} - stop the monitoring of messages from this bot
{0}/{2} add_filter HorribleSubs.*Kantai.*720p{1} - add filter for the files
with the names matching with this regular expression
{0}/{2} list_filters{1} - list of the enabled file filters with their IDs
{0}/{2} del_filter 1{1} - delete the filter with ID 1
""".format(weechat.color("yellow"), weechat.color("chat"), SCRIPT_COMMAND)
# register new weechat script
weechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, SCRIPT_LICENSE,
SCRIPT_DESC, "", "")
# the only default configuration option for the script
DEFAULT_OPTIONS = {
"channel": "#news"
}
CONFIG_DIR = weechat.info_get("weechat_dir", "") + "/ongoing/"
FILE_BOTS = CONFIG_DIR + "bots.db"
FILE_FILTERS = CONFIG_DIR + "filters.db"
# load script configuration
for option, default_value in DEFAULT_OPTIONS.items():
if not weechat.config_is_set_plugin(option):
weechat.config_set_plugin(option, default_value)
# --------------------------------------
# File manipulation helpers
def file_check(f):
try:
with open(f):
pass
except IOError:
d = os.path.dirname(f)
if not os.path.exists(d):
os.makedirs(d)
open(f, 'w').close()
def file_read(f):
file_check(f)
with open(f, "rb") as ff:
try:
return pickle.load(ff)
except EOFError:
return {}
def file_write(f, data):
with open(f, "wb") as ff:
pickle.dump(data, ff)
pass
# --------------------------------------
# script command handlers
def stats():
weechat.prnt("", "stats goes here")
return weechat.WEECHAT_RC_OK
def get_channel():
channel_name = weechat.config_get_plugin("channel")
weechat.prnt("", "The current channel is %s%s" %
(weechat.color("green"), channel_name))
return weechat.WEECHAT_RC_OK
def set_channel(channel_name):
weechat.config_set_plugin("channel", channel_name.lower())
weechat.prnt("", "The channel set to %s%s" %
(weechat.color("green"), channel_name))
return weechat.WEECHAT_RC_OK
def add_bot(bot_name, regex):
bots = file_read(FILE_BOTS)
bots[bot_name] = regex
file_write(FILE_BOTS, bots)
weechat.prnt("", "Added %s%s%s to XDCC providers list." %
(weechat.color("green"), bot_name, weechat.color("chat")))
return weechat.WEECHAT_RC_OK
def list_bots():
bots = file_read(FILE_BOTS)
if bots == {}:
weechat.prnt("", "%sThere are no added bots to watch for updates on." %
(weechat.color("red")))
else:
weechat.prnt("", "-- %sList of the watched bots %s--------" %
(weechat.color("yellow"), weechat.color("chat")))
for bot_name, regex in bots.items():
weechat.prnt("", " %s%-24s %s%s" % (weechat.color("green"),
bot_name, weechat.color("chat"), regex))
weechat.prnt("", "------------------------------------")
return weechat.WEECHAT_RC_OK
def del_bot(bot_name):
bots = file_read(FILE_BOTS)
try:
bots.pop(bot_name)
weechat.prnt("", "%s%s%s has been removed from the list." %
(weechat.color("green"), bot_name, weechat.color("chat")))
except KeyError:
weechat.prnt("", "There is no bot named %s%s%s in list to delete him." %
(weechat.color("red"), bot_name, weechat.color("chat")))
file_write(FILE_BOTS, bots)
return weechat.WEECHAT_RC_OK
def add_filter(fltr):
filters_h = file_read(FILE_FILTERS)
filters = filters_h.get('filters', [])
filters.append(fltr)
file_write(FILE_FILTERS, {"filters": filters})
weechat.prnt("", "Added %s%s%s to file filters list." %
(weechat.color("green"), fltr, weechat.color("chat")))
return weechat.WEECHAT_RC_OK
def list_filters():
filters_h = file_read(FILE_FILTERS)
if filters_h == {} or filters_h == {"filters": []}:
weechat.prnt("", "%sThere are no added file filters." %
(weechat.color("red")))
else:
weechat.prnt("", "-- %sList of the file filters %s--------" %
(weechat.color("yellow"), weechat.color("chat")))
i = 0
for fltr in filters_h["filters"]:
i += 1
weechat.prnt("", "%4s %s%s%s" % (str(i), weechat.color("green"),
fltr, (weechat.color("chat"))))
weechat.prnt("", "------------------------------------")
return weechat.WEECHAT_RC_OK
def del_filter(fltr_id):
fid = int(fltr_id)
filters_h = file_read(FILE_FILTERS)
if filters_h == {} or filters_h == {"filters": []}:
weechat.prnt("", "%sThere are no added file filters." %
(weechat.color("red")))
return weechat.WEECHAT_RC_OK
filters = filters_h['filters']
try:
fltr_data = filters[fid - 1]
filters.pop(fid - 1)
weechat.prnt("", "%s%s%s has been removed from the list." %
(weechat.color("green"), fltr_data, weechat.color("chat")))
except IndexError:
weechat.prnt("", "There is no filter ID %s%s%s in list to delete it." %
(weechat.color("red"), str(fid), weechat.color("chat")))
file_write(FILE_FILTERS, {"filters": filters})
return weechat.WEECHAT_RC_OK
# --------------------------------------
# handler for the hook for script commands
def ongoing_hook(data, buffer, args):
a = args.split(" ", 1)
command = a[0]
try:
retval = weechat.WEECHAT_RC_OK
if command == "stats":
retval = stats()
elif command == "channel":
if len(a) == 1:
retval = get_channel()
else:
retval = set_channel(a[1])
elif command == "add_bot":
# weechat.prnt("", "%s" % a[1])
[bot_name, regex] = a[1].split(" ", 1)
retval = add_bot(bot_name, regex)
elif command == "list_bots":
retval = list_bots()
elif command == "del_bot":
retval = del_bot(a[1])
elif command == "add_filter":
retval = add_filter(a[1])
elif command == "list_filters":
retval = list_filters()
elif command == "del_filter":
retval = del_filter(a[1])
except IndexError:
retval = weechat.WEECHAT_RC_ERROR
return retval
# --------------------------------------
# handle for the hook for parsing channel messages
def parse_messages(data, signal, signal_data):
srv = signal.split(',', 2)[0]
msghash = weechat.info_get_hashtable("irc_message_parse",
{"message": signal_data})
if msghash['channel'].lower() == weechat.config_get_plugin("channel"):
bots = file_read(FILE_BOTS)
if msghash['nick'] in bots:
regex = bots[msghash['nick']]
filters = file_read(FILE_FILTERS)['filters']
for fltr in filters:
if re.search(fltr, msghash['arguments']):
g = re.search(regex, msghash['arguments'])
if g:
file_id = g.group(1)
weechat.command("", "/msg -server %s %s xdcc send %s" %
(srv, msghash['nick'], file_id))
break
# weechat.prnt("", "%s" % signal)
return weechat.WEECHAT_RC_OK
# --------------------------------------
# register the hook for script commands
weechat.hook_command(SCRIPT_COMMAND, SCRIPT_DESC, SCRIPT_CMDS, SCRIPT_HELP,
"channel %(channel_name)"
" || add_bot %(bot_name) %(regex)"
" || list_bots"
" || del_bot %(bot_name)"
" || add_filter %(filter)"
" || list_filters"
" || del_filter %(filter_id)",
"ongoing_hook", "")
# register the hooks for message parser
weechat.hook_signal("*,irc_in2_privmsg", "parse_messages", "")
| 2.21875 | 2 |
nlptools/news/test/utils.py | hayj/NLPTools | 4 | 12763795 | from systemtools.basics import *
from systemtools.file import *
from systemtools.location import *
from datatools.jsonutils import NDJson
import random
from datastructuretools.processing import *
import copy
def newsCleanerDataTestGen():
for filePath in sortedGlob(execDir(__file__) + "/testdata/newscleaner/*.txt"):
(dir, filename, ext, filenameExt) = decomposePath(filePath)
text = fileToStr(filePath)
text = text.strip()
if len(text) > 2:
yield (filename, text) | 2.3125 | 2 |
neurolang/datalog/translate_to_named_ra.py | antonia-ms/NeuroLang-1 | 0 | 12763796 | import collections
from operator import contains, eq, not_
from typing import AbstractSet, Callable, Tuple
from ..exceptions import ForbiddenExpressionError, NeuroLangException
from ..expression_walker import (
ExpressionBasicEvaluator,
ReplaceExpressionsByValues,
add_match,
)
from ..expressions import Constant, FunctionApplication, Symbol
from ..logic import Disjunction
from ..relational_algebra import (
ColumnInt,
ColumnStr,
Destroy,
Difference,
ExtendedProjection,
ExtendedProjectionListMember,
NameColumns,
NaturalJoin,
Projection,
RelationalAlgebraOperation,
Selection,
Union,
get_expression_columns,
)
from ..type_system import is_leq_informative
from ..utils import NamedRelationalAlgebraFrozenSet
from .expressions import Conjunction, Negation
EQ = Constant(eq)
CONTAINS = Constant(contains)
EQ_pattern = Constant[Callable](eq)
Builtin_pattern = Constant[Callable]
REBV = ReplaceExpressionsByValues({})
class TranslateToNamedRAException(NeuroLangException):
pass
class CouldNotTranslateConjunctionException(TranslateToNamedRAException):
def __init__(self, output):
super().__init__(f"Could not translate conjunction: {output}")
self.output = output
class NegativeFormulaNotSafeRangeException(TranslateToNamedRAException):
def __init__(self, formula):
super().__init__(f"Negative predicate {formula} is not safe range")
self.formula = formula
class NegativeFormulaNotNamedRelationException(TranslateToNamedRAException):
def __init__(self, formula):
super().__init__(f"Negative formula {formula} is not a named relation")
self.formula = formula
class TranslateToNamedRA(ExpressionBasicEvaluator):
"""Partial implementation of algorithm 5.4.8 from [1]_.
.. [1] <NAME>, <NAME>, <NAME>, Foundations of databases
(Addison Wesley, 1995), Addison-Wesley.
"""
@add_match(FunctionApplication(EQ_pattern, (Constant, Symbol)))
def translate_eq_c_s(self, expression):
return self.walk(EQ(*expression.args[::-1]))
@add_match(FunctionApplication(EQ_pattern, (Symbol, Constant)))
def translate_eq_s_c(self, expression):
symbol, constant = expression.args
return self.walk(
FunctionApplication(
EQ,
(
Constant[ColumnStr](symbol.name, verify_type=False),
constant,
),
)
)
@add_match(FunctionApplication(EQ_pattern, (FunctionApplication, Symbol)))
def translate_eq_fa_s(self, expression):
return self.walk(EQ(*expression.args[::-1]))
@add_match(FunctionApplication(EQ_pattern, (Symbol, FunctionApplication)))
def translate_eq_c_fa(self, expression):
processed_fa = self.walk(expression.args[1])
if isinstance(processed_fa, RelationalAlgebraOperation):
processed_fa = expression.args[1]
if processed_fa is not expression.args[1]:
res = self.walk(
FunctionApplication(EQ, (expression.args[0], processed_fa))
)
else:
dst = Constant[ColumnStr](
ColumnStr(expression.args[0].name), verify_type=False
)
res = FunctionApplication(EQ, (dst, processed_fa))
return res
@add_match(FunctionApplication(EQ_pattern, (Symbol, Symbol)))
def translate_eq_c_c(self, expression):
left = Constant[ColumnStr](
ColumnStr(expression.args[0].name), verify_type=False
)
right = Constant[ColumnStr](
ColumnStr(expression.args[1].name), verify_type=False
)
res = FunctionApplication(EQ, (left, right))
return res
@add_match(FunctionApplication(EQ_pattern, ...))
def translate_eq(self, expression):
new_args = tuple()
changed = False
for arg in expression.args:
new_arg = self.walk(arg)
changed |= new_arg is not arg
new_args += (new_arg,)
if changed:
return EQ(*new_args)
else:
return expression
@add_match(
FunctionApplication(Builtin_pattern, ...),
lambda exp: len(exp._symbols) > 0,
)
def translate_builtin_fa(self, expression):
args = expression.args
new_args = tuple()
changed = False
for arg in args:
new_arg = self.walk(arg)
if isinstance(new_arg, Symbol):
new_arg = Constant[ColumnStr](
ColumnStr(new_arg.name), verify_type=False
)
changed |= True
elif isinstance(new_arg, Constant[Tuple]) and all(
isinstance(v, Symbol) for v in new_arg.value
):
n = len(new_arg.value)
new_arg = Constant[Tuple[(ColumnStr,) * n]](
tuple(ColumnStr(v.name) for v in new_arg.value)
)
changed |= True
else:
changed |= new_arg is not arg
new_args += (new_arg,)
if changed:
res = FunctionApplication(expression.functor, new_args)
else:
res = expression
return res
@add_match(
FunctionApplication(Builtin_pattern, ...),
lambda exp: all(
isinstance(arg, Constant)
and not issubclass(arg.type, (ColumnInt, ColumnStr))
for arg in exp.args
),
)
def translate_builtin_fa_constants(self, expression):
return ExpressionBasicEvaluator.evaluate_function(self, expression)
@add_match(FunctionApplication)
def translate_fa(self, expression):
functor = self.walk(expression.functor)
named_args = list()
projections = list()
selections = dict()
selection_columns = dict()
stack = list(reversed(expression.args))
counter = 0
while stack:
arg = stack.pop()
if isinstance(arg, Constant):
selections[counter] = arg
elif arg in named_args:
selection_columns[counter] = named_args.index(arg)
elif isinstance(arg, FunctionApplication):
stack += list(reversed(arg.args))
else:
projections.append(
Constant[ColumnInt](counter, verify_type=False)
)
named_args.append(arg)
counter += 1
in_set = self.generate_ra_expression(
functor,
selections,
selection_columns,
tuple(projections),
tuple(named_args),
)
return in_set
def generate_ra_expression(
self, functor, selections, selection_columns, projections, named_args
):
in_set = functor
for k, v in selections.items():
criterium = EQ(Constant[ColumnInt](k, verify_type=False), v)
in_set = Selection(in_set, criterium)
for k, v in selection_columns.items():
criterium = EQ(
Constant[ColumnInt](k, verify_type=False),
Constant[ColumnInt](v, verify_type=False),
)
in_set = Selection(in_set, criterium)
in_set = Projection(in_set, projections)
column_names = tuple(
Constant[ColumnStr](ColumnStr(arg.name), verify_type=False)
for arg in named_args
)
in_set = NameColumns(in_set, column_names)
return in_set
@add_match(Negation)
def translate_negation(self, expression):
if isinstance(expression.formula, Negation):
return self.walk(expression.formula.formula)
formula = expression.formula
if isinstance(formula, FunctionApplication) and isinstance(
formula.functor, Constant
):
res = FunctionApplication(Constant(not_), (formula,))
res = self.walk(res)
else:
res = Negation(self.walk(expression.formula))
return res
@add_match(Disjunction)
def translate_disjunction(self, expression):
ra_formulas = self.walk(expression.formulas)
ra_formulas = list(ra_formulas)
formula = ra_formulas.pop()
while len(ra_formulas) > 0:
formula_ = ra_formulas.pop()
formula = Union(formula_, formula)
return formula
@add_match(Conjunction)
def translate_conjunction(self, expression):
classified_formulas = self.classify_formulas_obtain_names(expression)
output = self.process_positive_formulas(classified_formulas)
output = self.process_negative_formulas(classified_formulas, output)
while (
len(classified_formulas["destroy_formulas"])
+ len(classified_formulas["selection_formulas"])
+ len(classified_formulas["eq_formulas"])
+ len(classified_formulas["ext_proj_formulas"])
) > 0:
new_output = self.process_destroy_formulas(
classified_formulas, output
)
new_output = self.process_equality_formulas(
classified_formulas, new_output
)
new_output = self.process_extended_projection_formulas(
classified_formulas, new_output
)
new_output = self.process_selection_formulas(
classified_formulas, new_output
)
if new_output is output:
new_output = self.process_equality_formulas_as_extended_projections(
classified_formulas, new_output
)
if new_output is output:
raise CouldNotTranslateConjunctionException(expression)
output = new_output
return output
def classify_formulas_obtain_names(self, expression):
classified_formulas = {
"pos_formulas": [],
"neg_formulas": [],
"eq_formulas": [],
"ext_proj_formulas": [],
"selection_formulas": [],
"destroy_formulas": [],
"named_columns": set(),
}
for formula in expression.formulas:
formula = self.walk(formula)
if isinstance(formula, Negation):
classified_formulas["neg_formulas"].append(formula.formula)
elif isinstance(formula, FunctionApplication):
self.classify_formulas_obtain_named_function_applications(
formula, classified_formulas
)
else:
classified_formulas["pos_formulas"].append(formula)
if isinstance(formula, Constant):
classified_formulas["named_columns"].update(
formula.value.columns
)
elif isinstance(formula, NameColumns):
classified_formulas["named_columns"].update(
formula.column_names
)
return classified_formulas
def classify_formulas_obtain_named_function_applications(
self, formula, classified_formulas
):
if formula.functor == EQ:
if formula.args[0] == formula.args[1]:
pass
elif isinstance(formula.args[1], (Constant, Symbol)):
classified_formulas["eq_formulas"].append(formula)
elif isinstance(formula.args[1], FunctionApplication):
classified_formulas["ext_proj_formulas"].append(formula)
elif formula.functor == CONTAINS and (
isinstance(formula.args[1], Constant[ColumnStr])
or isinstance(formula.args[1], Constant[Tuple])
):
classified_formulas["destroy_formulas"].append(formula)
else:
classified_formulas["selection_formulas"].append(formula)
@staticmethod
def process_positive_formulas(classified_formulas):
if len(classified_formulas["pos_formulas"]) == 0:
output = Constant[AbstractSet](NamedRelationalAlgebraFrozenSet([]))
else:
output = classified_formulas["pos_formulas"][0]
for pos_formula in classified_formulas["pos_formulas"][1:]:
output = NaturalJoin(output, pos_formula)
return output
@staticmethod
def process_negative_formulas(classified_formulas, output):
named_columns = classified_formulas["named_columns"]
for neg_formula in classified_formulas["neg_formulas"]:
neg_cols = TranslateToNamedRA.obtain_negative_columns(neg_formula)
if named_columns > neg_cols:
neg_formula = NaturalJoin(output, neg_formula)
elif named_columns != neg_cols:
raise NegativeFormulaNotSafeRangeException(neg_formula)
output = Difference(output, neg_formula)
return output
@staticmethod
def obtain_negative_columns(neg_formula):
if isinstance(neg_formula, NameColumns):
neg_cols = set(neg_formula.column_names)
elif isinstance(neg_formula, Constant):
neg_cols = set(neg_formula.value.columns)
else:
raise NegativeFormulaNotNamedRelationException(neg_formula)
return neg_cols
@staticmethod
def process_destroy_formulas(classified_formulas, output):
destroy_to_keep = []
named_columns = classified_formulas["named_columns"]
for destroy in classified_formulas["destroy_formulas"]:
if destroy.args[0] in named_columns:
output = Destroy(output, destroy.args[0], destroy.args[1])
if is_leq_informative(destroy.args[1].type, Tuple):
for arg in destroy.args[1].value:
named_columns.add(Constant(arg))
else:
named_columns.add(destroy.args[1])
else:
destroy_to_keep.append(destroy)
classified_formulas["destroy_formulas"] = destroy_to_keep
return output
@staticmethod
def process_equality_formulas(classified_formulas, output):
named_columns = classified_formulas["named_columns"]
to_keep = []
for formula in classified_formulas["eq_formulas"]:
new_output = TranslateToNamedRA.process_equality_formula(
formula, named_columns, output
)
if new_output is output:
to_keep.append(formula)
output = new_output
classified_formulas["eq_formulas"] = to_keep
return output
@staticmethod
def process_equality_formula(formula, named_columns, output):
left, right = formula.args
if (
isinstance(left, Constant[ColumnStr])
and isinstance(right, Constant)
and not isinstance(right, Constant[ColumnStr])
):
return TranslateToNamedRA.process_equality_formulas_constant(
output, left, right, named_columns
)
criteria = EQ(left, right)
if left in named_columns and right in named_columns:
output = Selection(output, criteria)
return output
@staticmethod
def process_equality_formulas_constant(output, left, right, named_columns):
if isinstance(output, Constant[AbstractSet]) and output.value.is_dum():
return Constant[AbstractSet[Tuple[right.type]]](
NamedRelationalAlgebraFrozenSet(
(left.value,), [(REBV.walk(right),)]
)
)
elif left in named_columns:
return Selection(output, EQ(left, right))
else:
return output
@staticmethod
def process_equality_formulas_as_extended_projections(
classified_formulas, output
):
named_columns = classified_formulas["named_columns"]
extended_projections = tuple(
ExtendedProjectionListMember(c, c) for c in named_columns
)
stack = list(classified_formulas["eq_formulas"])
if len(stack) == 0:
return output
seen_counts = collections.defaultdict(int)
while stack:
formula = stack.pop()
seen_counts[formula] += 1
if seen_counts[formula] > 2:
raise ForbiddenExpressionError(
f"Could not resolve equality {formula}"
)
# case y = x where y already in set (create new column x)
if formula.args[0] in named_columns:
src, dst = formula.args
elif (
# case x = y where y already in set (create new column x)
formula.args[1] in named_columns
# case x = C where C is a constant (create new constant col x)
or TranslateToNamedRA.is_col_to_const_equality(formula)
):
dst, src = formula.args
else:
stack.insert(0, formula)
continue
extended_projections += (ExtendedProjectionListMember(src, dst),)
named_columns.add(dst)
seen_counts = collections.defaultdict(int)
new_output = ExtendedProjection(output, extended_projections)
classified_formulas["eq_formulas"] = []
return new_output
@staticmethod
def is_col_to_const_equality(formula):
return (
isinstance(formula.args[0], Constant)
and formula.args[0].type is ColumnStr
and isinstance(formula.args[1], Constant)
and formula.args[1].type is not ColumnStr
)
@staticmethod
def process_extended_projection_formulas(classified_formulas, output):
extended_projections = []
to_keep = []
named_columns = classified_formulas["named_columns"]
dst_columns = set()
for ext_proj in classified_formulas["ext_proj_formulas"]:
dst_column, fun_exp = ext_proj.args
cols_for_fun_exp = get_expression_columns(fun_exp)
if cols_for_fun_exp.issubset(named_columns):
extended_projections.append(
ExtendedProjectionListMember(fun_exp, dst_column)
)
dst_columns.add(dst_column)
else:
to_keep.append(ext_proj)
if len(extended_projections) > 0:
for column in classified_formulas["named_columns"]:
extended_projections.append(
ExtendedProjectionListMember(column, column)
)
output = ExtendedProjection(output, extended_projections)
named_columns |= dst_columns
classified_formulas["ext_proj_formulas"] = to_keep
return output
@staticmethod
def process_selection_formulas(classified_formulas, output):
to_keep = []
for selection in classified_formulas["selection_formulas"]:
selection_columns = get_expression_columns(selection)
if selection_columns.issubset(
classified_formulas["named_columns"]
):
output = Selection(output, selection)
else:
to_keep.append(selection)
classified_formulas["selection_formulas"] = to_keep
return output
| 2.421875 | 2 |
maximum-depth-of-binary-tree/Solution.6583982.py | rahul-ramadas/leetcode | 0 | 12763797 | <reponame>rahul-ramadas/leetcode
class Solution:
def maxDepth(self, root):
if root is None:
return 0
return 1 + max(self.maxDepth(root.left), self.maxDepth(root.right))
| 3.5 | 4 |
journal/views.py | metadeng/LVTUBEN_CD_TOOLKIL | 0 | 12763798 | <filename>journal/views.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
from django.contrib.admin.models import LogEntry
from django.contrib.admin.options import get_content_type_for_model
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.http import HttpResponse
from django.shortcuts import render, get_object_or_404
# Create your views here.
from django.utils.encoding import force_text
from django.views.decorators.csrf import csrf_exempt
from utils.log_action_util import flag2action
@login_required
def index(request):
return render(request, 'journal/index.html')
@login_required
def journal_list(request):
page = int(request.GET.get('page', 1))
limit = int(request.GET.get('limit', 10))
key = request.GET.get('key')
data = dict()
data_list = list()
if key:
journal_list = LogEntry.objects.filter(change_message__contains=key).order_by('-action_time').all()
else:
journal_list = LogEntry.objects.order_by('-action_time').all()
paginator = Paginator(journal_list, limit)
try:
journals = paginator.page(page)
except PageNotAnInteger:
journals = paginator.page(1)
except EmptyPage:
journals = paginator.page(paginator.num_pages)
for j in journals:
tmp = dict()
tmp['id'] = j.id
tmp['action_time'] = str(j.action_time)
tmp['user'] = j.user.username
tmp['model'] = j.content_type.model
tmp['object_id'] = j.object_id
tmp['object_repr'] = str(j.object_repr)
tmp['action_flag'] = flag2action(j.action_flag)
tmp['msg'] = str(j.change_message)
data_list.append(tmp)
data['code'] = 0
data['msg'] = ''
data['data'] = data_list
data['count'] = len(journal_list)
return HttpResponse(json.dumps(data), content_type="application/json")
@login_required
@csrf_exempt
def delete(request):
data = dict()
sids = request.POST.getlist('sids[]')
status = False
try:
LogEntry.objects.filter(pk__in=sids).delete()
msg = '删除成功'
status = True
except Exception as e:
msg = e
data['status'] = status
data['msg'] = msg
return HttpResponse(json.dumps(data), content_type="application/json")
@login_required
def detail(request, sid):
log = get_object_or_404(LogEntry, pk=sid)
return render(request, 'journal/detail.html', {'log': log})
def log_addition(request, object, message):
from django.contrib.admin.models import LogEntry, ADDITION
LogEntry.objects.log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(object).pk,
object_id=object.pk,
object_repr=force_text(object),
action_flag=ADDITION,
change_message=message,
)
def log_change(request, object, message):
from django.contrib.admin.models import LogEntry, CHANGE
LogEntry.objects.log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(object).pk,
object_id=object.pk,
object_repr=force_text(object),
action_flag=CHANGE,
change_message=message,
)
def log_grant(request, object, message):
from django.contrib.admin.models import LogEntry
LogEntry.objects.log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(object).pk,
object_id=object.pk,
object_repr=force_text(object),
action_flag=4,
change_message=message,
)
def log_deletion(request, object, message):
from django.contrib.admin.models import LogEntry, DELETION
return LogEntry.objects.log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(object).pk,
object_id=object.pk,
object_repr=force_text(object),
action_flag=DELETION,
change_message=message,
)
| 2.0625 | 2 |
zjson/ZJson.py | zoloypzuo/regex_engine | 0 | 12763799 | """
{func} beautified_json and {func} plain_json is simple to use, and it is as same as use {std} json with {func} encodable
"""
from collections import namedtuple, defaultdict, OrderedDict
from json import dumps, load as _load, loads as _loads
from re import sub
from Utils import *
JsonObject = object # indicate 标准库 json.load 返回的对象,即标准库支持序列化与反序列化的对象;这里只是提示一下类型,不是严格的
Text = str # indicate the parsed text
Regex = str # indicate the regex str
# region serialize
def _make_encodable(obj, decodable=True) -> JsonObject:
'''
make class obj serializable to json, please conform to json standard
commonly, class is encoded with a special dict that contains class mod name, class name and class dict so that it can be decoded then
set `decodable to False to output as simple dict,这样在debug时易读
:param obj:
:param undecodable:
:return:json_obj, commonly a dict
'''
# TODO 错误处理,else来报错是不存在的
# TODO 日后类型等限制可能解除,所以有些地方要再思考一下,比如class obj处
# TODO class不支持继承,这是非常难做到的,需要了解mro机制
# 一个简单的想法,class的继承是比较难做的,先不管
# 添加了set,tuple,但是是硬编码,这是没办法的,builtin没法像那样处理(其实没查)
if isinstance(obj, (int, float, str, bool)):
return obj
elif isinstance(obj, list):
return [_make_encodable(i, decodable=decodable) for i in obj]
elif isinstance(obj, tuple):
if is_namedtuple_instance(obj):
_ret_cls_name = sub("\(.*\)", "", repr(obj))
_ret_item = {_make_encodable(key, decodable=decodable): _make_encodable(val, decodable=decodable) for
key, val in
dict(obj._asdict()).items()}
return {'__namedtuple_cls_name': _ret_cls_name, '__namedtuple_dict': _ret_item}
else:
_ret = [_make_encodable(i, decodable=decodable) for i in obj]
_ret.insert(0, '__tuple__')
return _ret
elif isinstance(obj, set):
_ret = [_make_encodable(i, decodable=decodable) for i in obj]
_ret.insert(0, '__set__')
return _ret
elif isinstance(obj, frozenset):
_ret = [_make_encodable(i, decodable=decodable) for i in obj]
_ret.insert(0, '__frozenset__')
return _ret
elif isinstance(obj, dict):
for key, val in obj.items():
assert isinstance(key, str)
return {_make_encodable(key, decodable=decodable): _make_encodable(val, decodable=decodable) for key, val in
obj.items()}
elif obj is None:
return None
else: # class type; note that isinstanceof(object) is useless since even dict returns true
if not decodable: # if decoable is set to false, class info wont be recorded
return {_make_encodable(key, decodable=decodable): _make_encodable(val, decodable=decodable) for key, val in
obj.__dict__.items()}
else:
_ret = {'__class_module__': obj.__class__.__module__, '__class_name__': obj.__class__.__name__,
'__class_dict__': {}}
for key, value in obj.__dict__.items():
_ret['__class_dict__'][_make_encodable(key, decodable=decodable)] = _make_encodable(value,
decodable=decodable)
return _ret
def beautified_json(obj, decodable=True) -> str:
'''return beautified json str of obj;
also used to overwrite obj.__str__ for pretty output eg. def __str__(self):return beau...(self)'''
return dumps(_make_encodable(obj, decodable=decodable), indent=4, sort_keys=True)
@compact
def plain_json(obj, decodable=True):
'''return plain/compact json str of obj
add "@compact" for test'''
a = _make_encodable(obj)
return dumps(_make_encodable(obj, decodable=decodable))
# endregion
# region deserialize
def _decode(json_obj: JsonObject) -> object:
if is_class_dict(json_obj): # class must be checked before dict, or it will be...
# if it is a encoded class, we load class (class is object in python) and new it, then recursively build its dict
cls = load_class(json_obj['__class_module__'], json_obj['__class_name__'])
instance = object.__new__(cls)
instance.__dict__ = {_decode(key): _decode(val) for key, val in json_obj['__class_dict__'].items()}
return instance
elif is_named_tuple_dict(json_obj):
global namedtuple_classes
if namedtuple_classes[json_obj['__namedtuple_cls_name']] == None:
namedtuple_classes[json_obj['__namedtuple_cls_name']] = namedtuple(json_obj['__namedtuple_cls_name'],
json_obj['__namedtuple_dict'].keys())
cls = namedtuple_classes[json_obj['__namedtuple_cls_name']]
return cls(**OrderedDict(json_obj['__namedtuple_dict']))
elif isinstance(json_obj, (int, float, str, bool)):
return json_obj
elif isinstance(json_obj, list):
_ret = [_decode(i) for i in json_obj]
if _ret:
if _ret[0] == '__tuple__':
return tuple(_ret[1:])
elif _ret[0] == '__set__':
return set(_ret[1:])
elif _ret[0] == '__frozenset__':
return frozenset(_ret[1:])
return _ret
elif json_obj is None:
return None
elif isinstance(json_obj, dict):
return {_decode(key): _decode(val) for key, val in json_obj.items()}
def load(path) -> object:
with open(path, 'r') as f:
return _decode(_load(f))
def loads(json_text: str) -> object:
_loads(json_text)
return _decode(_loads(json_text))
namedtuple_classes = defaultdict(lambda: None) # cls_name => {namedtuple}cls
# endregion
# region json parser
import re
class ZJsonParseError(RuntimeError):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg.__str__()
class ZJsonValueError(ZJsonParseError):
def __init__(self, msg):
super().__init__(msg)
class ZJsonTypeError(ZJsonParseError):
def __init__(self, msg):
super().__init__(msg)
def parse(json_text: Text) -> JsonObject:
''' return json obj or throw an internal exception'''
Failure = (None, None) # singleton that indicate parse failure, note failure is not error
ws: Regex = '\s*'
tokenizer: Regex = ws + '(%s)' + ws # regex to match against (and consume) the start of remainder
literal_map = {'true': True, 'false': False, 'null': None}
literal: Regex = 'null|true|false'
number: Regex = '-?[0-9]+(\.[0-9]+)?' # 不是特别严谨,只match -1.1这种
string: Regex = '\"(\\.|.)*\"' # 不是特别严谨,这里转义处理甚至可能是错的。我不知道regex里的alt是否有序;这里是错的,str内容的贪心不知道怎么处理
value: Regex = '.*?' # 仅为方便,检查会在下一层递归进行
array: Regex = r'\[((%s\,)*%s)\]' % (value, value) # 可能只需要[.*?]即可,但是多一些好了。验证格式
pair: Regex = '%s\:%s' % (string, value)
obj: Regex = r'\{((%s,)*%s)\}' % (pair, pair)
def parse_literal(match_obj):
return literal_map[match_obj.group(1)]
def parse_number(match_obj):
return float(match_obj.group(1))
def parse_string(match_obj):
return match_obj.group(1).strip('\"')
def parse_array(match_obj):
return [parse_value(i)[0] for i in match_obj.group(2).split(',')]
def parse_obj(match_obj):
return {pair[0].strip('\"'): parse_value(pair[1])[0] for pair in
map(lambda x: x.split(':'), match_obj.group(2).split(','))}
# 这个写成tuple会更好,因为只需要遍历pair
grammar = {
literal: parse_literal,
number: parse_number,
string: parse_string,
array: parse_array,
obj: parse_obj
}
def parse_value(remainder: Text):
'''return (json_obj, remainder) or Fail '''
for k, v in grammar.items():
match_obj = re.match(tokenizer % k, remainder)
if match_obj:
return v(match_obj), None # remainder[match_obj.end():]
raise ZJsonValueError('invalid json value')
return parse_value(json_text)[0]
# endregion
| 3.265625 | 3 |
game/combatant.py | Moguri/prototype-mercury | 2 | 12763800 | import random
from direct.interval import IntervalGlobal as intervals
from .monster import MonsterActor
from . import effects
from . import gamedb
class Combatant:
def __init__(self, monster, parent_node):
gdb = gamedb.get_instance()
self._monster = monster
form = monster.form
self._current_hp = self.max_hp
self.current_ct = random.randrange(0, 10)
self.abilities = [
gdb['abilities']['basic_attack']
] + [
ability
for ability in monster.abilities
if ability.effects
]
self.range_index = 0
self.target = None
self.tile_position = (0, 0)
self.lock_controls = False
self._actor = MonsterActor(form, parent_node, monster.weapon)
def __getattr__(self, name):
if hasattr(self._monster, name):
return getattr(self._monster, name)
return getattr(self._actor, name)
@property
def weapon(self):
return self._monster.weapon
@weapon.setter
def weapon(self, value):
self._monster.weapon = value
@property
def current_hp(self):
return self._current_hp
@current_hp.setter
def current_hp(self, value):
wasdead = self.is_dead
self._current_hp = value
if not wasdead and self.is_dead:
self.play_anim('death')
@property
def max_hp(self):
return self._monster.hit_points
@property
def is_dead(self):
return self.current_hp <= 0
def get_state(self):
return {
'name': self.name,
'hp_current': self.current_hp,
'hp_max': self.max_hp,
'ct_current': min(100, self.current_ct),
'ct_max': 100,
}
def use_ability(self, ability, target, controller, effect_node):
self.target = target
target.target = self
return intervals.Sequence(
intervals.Func(
controller.display_message,
f'{self.name} is using {ability.name} '
f'on {target.name}'
),
effects.sequence_from_ability(
effect_node,
self,
ability,
controller
)
)
def can_use_ability(self, _ability):
return True
| 2.484375 | 2 |
pyyaks/context.py | sot/pyyaks | 1 | 12763801 | <reponame>sot/pyyaks<gh_stars>1-10
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import print_function, division, absolute_import
import re
import os
import time
import stat
import pdb
import logging
from six.moves import cPickle as pickle
from copy import deepcopy
import jinja2
import pyyaks.fileutil
class NullHandler(logging.Handler):
def emit(self, record):
pass
logger = logging.getLogger('pyyaks')
logger.addHandler(NullHandler())
logger.propagate = False
CONTEXT = {}
def render(val):
"""Render ``val`` using the template engine and the current context.
:param val: input value
:returns: rendered value
"""
if isinstance(val, ContextValue):
return str(val)
else:
return str(ContextValue(val))
def render_args(*argids):
"""
Decorate a function so that the specified arguments are rendered via
context.render() before being passed to function. Keyword arguments are
unaffected.
Examples::
# Apply render() to all 3 args
@context.render_args()
def func(arg1, arg2, arg3):
return arg1, arg2, arg3
# Render just arg1
@context.render_args(1)
def func(arg1, arg2, arg3):
return arg1, arg2, arg3
# Render arg1 and arg3
@context.render_args(1, 3)
def func(arg1, arg2, arg3):
return arg1, arg2, arg3
"""
def decorate(func):
def newfunc(*args, **kwargs):
ids = [x-1 for x in argids] if argids else list(range(len(args)))
newargs = [(render(x) if i in ids else x) for (i, x) in enumerate(args)]
return func(*newargs, **kwargs)
# Make an effort to copy func_name and func_doc. Built-ins don't have these.
try:
newfunc.__name__ = func.__name__
newfunc.__doc__ = func.__doc__
except AttributeError:
pass
return newfunc
return decorate
def update_context(filename, keys=None):
"""Update the current context from ``filename``. This file should be
created with ``store_context()``.
:param filename: name of file containing context
:param keys: list of keys in CONTEXT to update (default=None => all)
:rtype: None
"""
logger.verbose('Restoring context from %s' % filename)
context = pickle.load(open(filename, 'rb'))
for name in context:
if keys and name not in keys:
continue
if name not in CONTEXT:
raise KeyError('ContextDict %s found in %s but not in existing CONTEXT' %
(name, filename))
CONTEXT[name].update(context[name])
def store_context(filename, keys=None):
"""Store the current context to ``filename``.
:param filename: name of file for storing context
:param keys: list of keys in CONTEXT to store (default=None => all)
:rtype: None
"""
if filename:
logger.verbose('Storing context to %s' % filename)
if keys:
dump_context = dict((x, CONTEXT[x]) for x in keys)
else:
dump_context = CONTEXT
pickle.dump(dump_context, open(filename, 'wb'))
class ContextValue(object):
"""Value with context that has a name and modification time.
:param val: initial value (optional)
:param name: context value name
:param basedir: root directory for a file context value
:param format: optional format specifier when rendering value
:param ext: extension to be added when rendering a file context value
"""
def __init__(self, val=None, name=None, format=None, ext=None, parent=None):
# Possibly inherit attrs (except for 'ext') from an existing ContextValue object
if isinstance(val, ContextValue):
for attr in ('_val', '_mtime', '_name', 'parent', 'format'):
setattr(self, attr, getattr(val, attr))
else:
self._val = val
self._mtime = None if val is None else time.time()
self._name = name
self.parent = parent
self.format = format
self.ext = ext
def clear(self):
"""Clear the value, modification time, and format (set to None)"""
self._val = None
self._mtime = None
def getval(self):
return self._val
def setval(self, val):
if isinstance(val, ContextValue):
self.__init__(val, ext=val.ext)
else:
self._val = val
self._mtime = time.time()
val = property(getval, setval)
"""Set or get with the ``val`` attribute"""
@property
def fullname(self):
return (self.parent._name + '.' + self.name) if self.parent else self.name
@property
def name(self):
return self._name + ('.' + self.ext if self.ext else '')
@property
def basedir(self):
return None if (self.parent is None) else self.parent.basedir
@property
def mtime(self):
"""Modification time"""
if self.basedir:
filename = str(self)
return (os.stat(filename)[stat.ST_MTIME] if os.path.exists(filename) else None)
else:
return self._mtime
def __unicode__(self):
return str(self)
def __str__(self):
strval = val = self._val
if val is None:
raise ValueError("Context value '%s' is undefined" % self.fullname)
template_tag = re.compile(r'{[%{]')
try:
# Following line will give TypeError unless val is string-like
while (template_tag.search(val)):
template = jinja2.Template(val)
strval = template.render(CONTEXT)
if strval == val:
break
else:
val = strval
except TypeError:
strval = (self.format or '%s') % val
if self.basedir:
# Note that os.path.join(a,b) returns b is b is already absolute
ext = ('.' + self.ext if self.ext else '')
strval0 = strval
for basedir in self.basedir.split(os.pathsep):
strval = pyyaks.fileutil.relpath(os.path.join(basedir, strval0) + ext)
if os.path.exists(strval):
break
return strval
def __fspath__(self):
"""ABC os.PathLike interface ContextValue is directly useable in Path or
os.path or open, etc.
https://docs.python.org/3/library/os.html#os.PathLike
"""
return str(self)
@property
def type(self):
return 'value' if (self.basedir is None) else 'file'
@property
def rel(self):
"""File context value as a relative path or self._val if not a file.
Basedir can have multiple base paths separated by ':' (os.pathsep) like the
linux PATH. The first base path for which the content file path exists is
returned, or if none exist then the last relative path will be returned.
"""
return str(self)
@property
def abs(self):
"""File context value as an absolute path or self._val if not a file
Basedir can have multiple base paths separated by ':' (os.pathsep) like the
linux PATH. The first base path for which the content file path exists is
returned, or if none exist then the last absolute path will be returned.
"""
return str(self._val) if (self.basedir is None) else os.path.abspath(str(self))
def __getattr__(self, ext):
"""Interpret an unfound attribute lookup as a file extension.
A new ContextValue object with that extension is returned.
"""
# pickle looks for some specific attributes beginning with __ and expects
# AttributeError if they are not provided by class.
if ext.startswith('__'):
raise AttributeError
else:
return ContextValue(val=self, ext=ext)
class ContextDict(dict):
"""Dictionary class that automatically registers the dict in the module
CONTEXT and overrides __setitem__ to create an appropriate ContextValue
when assigning to a dict key. If no ``name`` is supplied then the
ContextDict is not registered in the global CONTEXT.
:param name: name by which dictionary is registered in context.
:param basedir: base directory for file context
"""
def __new__(cls, name=None, basedir=None):
if name in CONTEXT:
if basedir != CONTEXT[name].basedir:
raise ValueError("Re-using context name '{}' but basedirs don't match "
"({} vs. {})".format(name, basedir, CONTEXT[name].basedir))
return CONTEXT[name]
self = super(ContextDict, cls).__new__(cls)
if name is not None:
CONTEXT[name] = self
self._name = name
self.basedir = basedir
self._context_manager_cache = []
for attr in ('val', 'rel', 'abs', 'format'):
setattr(self, attr, _ContextDictAccessor(self, attr))
return self
def __init__(self, *args, **kwargs):
# Initialization is done in __new__, so don't do anything here
pass
def __getitem__(self, key):
"""Get key value from the ContextDict. For a ContextDict with base
then allow for extensions on key.
"""
match = re.match(r'([^.]+)\.(.+)', key)
base, ext = match.groups() if match else (key, None)
# Autogenerate an entry for key
if base not in self:
value = ContextValue(val=None, name=base, parent=self)
logger.debug('Autogen %s with name=%s basedir=%s' %
(repr(value), base, self.basedir))
dict.__setitem__(self, base, value)
baseContextValue = dict.__getitem__(self, base)
return (ContextValue(baseContextValue, ext=ext) if ext else baseContextValue)
def __setitem__(self, key, val):
# If ContextValue was already init'd then just update val
if key in self:
value = dict.__getitem__(self, key)
logger.debug('Setting value %s with name=%s val=%s basedir=%s' %
(repr(value), repr(key), repr(val), self.basedir))
value.val = val
else:
if '.' in key:
raise ValueError('Dot not allowed in ContextDict key ' + key)
value = ContextValue(val=val, name=key, parent=self)
logger.debug('Creating value %s with name=%s val=%s basedir=%s' %
(repr(value), repr(key), repr(val), self.basedir))
dict.__setitem__(self, key, value)
def __enter__(self):
"""
Context manager to cache this ContextDict object::
context_val = Context('context_val')
with context_val:
pass
"""
# Push a copy of self onto a stack
self._context_manager_cache.append(deepcopy(self))
def __exit__(self, exc_type, exc_val, exc_tb):
# Pop the most recent cached version and update self
self_cache = self._context_manager_cache.pop()
self.update(self_cache)
# Delete any keys now in self that weren't in the cached version
delkeys = [key for key in self if key not in self_cache]
for key in delkeys:
del self[key]
def cache(self, func):
"""
Decorator to cache this ContextDict object
"""
import functools
@functools.wraps(func)
def wrap_func(*args, **kwargs):
self_cache = deepcopy(self)
try:
result = func(*args, **kwargs)
finally:
# Restore to self_cache and delete any keys now in self that weren't in the
# cached version
self.update(self_cache)
delkeys = [key for key in self if key not in self_cache]
for key in delkeys:
del self[key]
return result
return wrap_func
def update(self, vals):
if hasattr(vals, 'items'):
vals = vals.items()
for key, val in vals:
self[key] = val
def __repr__(self):
return str(dict((key, self[key].val) for key in self))
def clear(self):
"""Clear all values in dictionary. The keys are not deleted so that
ContextValue references in task decorators maintain validity."""
for key in self:
dict.__getitem__(self, key).clear()
def get_basedir(self):
return self._basedir
def set_basedir(self, val):
if val is None:
self._basedir = None
else:
# Split on : which is not followed by \ (which would almost certainly
# be a Windows file path like C:\\Users).
non_windows_colon = re.compile(r':(?=[^\\])')
vals = [os.path.abspath(x) for x in non_windows_colon.split(val)]
self._basedir = os.pathsep.join(vals)
basedir = property(get_basedir, set_basedir)
class _ContextDictAccessor(object):
"""Get or set ContextValue attributes via object attribute syntax through ContextDict.
Examples::
src = ContextDict('src')
src.val.joe = 2 # same as src['joe'] = 2
x = src.val.joe # src['joe'].val
src.format.joe = '%03d'
print src['joe']
files = ContextDict('files', basedir='.')
files['jane'] = '{{src.joe}}/jane'
print files.rel.jane
print files.abs.jane
"""
def __init__(self, contextdict, attr):
object.__setattr__(self, '_contextdict', contextdict)
object.__setattr__(self, '_attr', attr)
def __getattr__(self, name):
# pickle looks for some specific attributes beginning with __ and expects
# AttributeError if they are not provided by class.
if name.startswith('__'):
raise AttributeError
return getattr(self._contextdict[name], self._attr)
def __setattr__(self, name, value):
setattr(self._contextdict[name], self._attr, value)
def __getitem__(self, name):
return self.__getattr__(name)
def __setitem__(self, name, value):
self.__setattr__(name, value)
| 2.171875 | 2 |
tests/test_camel_to_upper_snake.py | HTPhenotyping/htcondor_file_transfer | 0 | 12763802 | <filename>tests/test_camel_to_upper_snake.py
import pytest
from xfer import camel_to_upper_snake
@pytest.mark.parametrize(
"input, output",
[
("CamelCase", "CAMEL_CASE"),
("AlTeRnAtInG", "AL_TE_RN_AT_IN_G"),
("foobar", "FOOBAR"),
("FOOBAR", "FOOBAR"),
("fooBar", "FOO_BAR"),
("DAGMan", "DAG_MAN"),
("twoGroups", "TWO_GROUPS"),
],
)
def test_camel_to_upper_snake(input, output):
assert camel_to_upper_snake(input) == output
| 2.703125 | 3 |
nengo/networks/product.py | HugoChateauLaurent/nengo | 0 | 12763803 | <filename>nengo/networks/product.py
import warnings
import numpy as np
import nengo
from nengo.networks.ensemblearray import EnsembleArray
def Product(n_neurons, dimensions, input_magnitude=1., net=None, **kwargs):
"""Computes the element-wise product of two equally sized vectors.
The network used to calculate the product is described in
`Gosmann, 2015`_. A simpler version of this network can be found in the
:doc:`Multiplication example <examples/basic/multiplication>`.
Note that this network is optimized under the assumption that both input
values (or both values for each input dimensions of the input vectors) are
uniformly and independently distributed. Visualized in a joint 2D space,
this would give a square of equal probabilities for pairs of input values.
This assumption is violated with non-uniform input value distributions
(for example, if the input values follow a Gaussian or cosine similarity
distribution). In that case, no square of equal probabilities is obtained,
but a probability landscape with circular equi-probability lines. To obtain
the optimal network accuracy, scale the *input_magnitude* by a factor of
``1 / sqrt(2)``.
.. _Gosmann, 2015:
https://nbviewer.jupyter.org/github/ctn-archive/technical-reports/blob/
master/Precise-multiplications-with-the-NEF.ipynb
Parameters
----------
n_neurons : int
Number of neurons per dimension in the vector.
.. note:: These neurons will be distributed evenly across two
ensembles. If an odd number of neurons is specified, the
extra neuron will not be used.
dimensions : int
Number of dimensions in each of the vectors to be multiplied.
input_magnitude : float, optional (Default: 1.)
The expected magnitude of the vectors to be multiplied.
This value is used to determine the radius of the ensembles
computing the element-wise product.
kwargs
Keyword arguments passed through to ``nengo.Network``.
Returns
-------
net : Network
The newly built product network, or the provided ``net``.
Attributes
----------
net.input_a : Node
The first vector to be multiplied.
net.input_b : Node
The second vector to be multiplied.
net.output : Node
The resulting product.
net.sq1 : EnsembleArray
Represents the first squared term. See `Gosmann, 2015`_ for details.
net.sq2 : EnsembleArray
Represents the second squared term. See `Gosmann, 2015`_ for details.
"""
if net is None:
kwargs.setdefault('label', "Product")
net = nengo.Network(**kwargs)
else:
warnings.warn("The 'net' argument is deprecated.", DeprecationWarning)
with net:
net.input_a = net.A = nengo.Node(size_in=dimensions, label="input_a")
net.input_b = net.B = nengo.Node(size_in=dimensions, label="input_b")
net.output = nengo.Node(size_in=dimensions, label="output")
net.sq1 = EnsembleArray(
max(1, n_neurons // 2), n_ensembles=dimensions, ens_dimensions=1,
radius=input_magnitude * np.sqrt(2))
net.sq2 = EnsembleArray(
max(1, n_neurons // 2), n_ensembles=dimensions, ens_dimensions=1,
radius=input_magnitude * np.sqrt(2))
tr = 1. / np.sqrt(2.)
nengo.Connection(
net.input_a, net.sq1.input, transform=tr, synapse=None)
nengo.Connection(
net.input_b, net.sq1.input, transform=tr, synapse=None)
nengo.Connection(
net.input_a, net.sq2.input, transform=tr, synapse=None)
nengo.Connection(
net.input_b, net.sq2.input, transform=-tr, synapse=None)
sq1_out = net.sq1.add_output('square', np.square)
nengo.Connection(sq1_out, net.output, transform=.5, synapse=None)
sq2_out = net.sq2.add_output('square', np.square)
nengo.Connection(sq2_out, net.output, transform=-.5, synapse=None)
return net
def dot_product_transform(dimensions, scale=1.0):
"""Returns a transform for output to compute the scaled dot product."""
return scale * np.ones((1, dimensions))
| 3.484375 | 3 |
view/progress.py | rbzargon/py_flashcards | 0 | 12763804 | <reponame>rbzargon/py_flashcards
"""
<NAME>
CS521
Due June 30, 2019
Term project
Flashcard application - view for a progress bar
"""
import tkinter as tk
from tkinter import ttk
from utils import pad_children
class ProgressView(ttk.Frame):
'''Class for displaying a progress bar'''
def __init__(self, parent, maximum, *args, **kwargs):
super().__init__(parent, *args, **kwargs)
self._value: tk.IntVar = tk.IntVar(0)
self.progressbar = ttk.Progressbar(
self, value=0, maximum=maximum)
self.progressbar.grid(column=0, row=0, sticky='nsew')
self.rowconfigure(0, weight=1)
self.columnconfigure(0, weight=1)
pad_children(self)
@property
def value(self) -> int:
return self.progressbar['value']
@value.setter
def value(self, value: int) -> None:
self.progressbar['value'] = value
| 3 | 3 |
src/classifier.py | AlfME/proyecto_sin | 0 | 12763805 | """
The pipeline for realizing classification. Consists of a data pre-processing pipeline, a training
module for generating and storing classifiers and finally a procedure to load a defined classifer.
Generic pipeline structure:
1. Tokenization
2. Instance feature cleaning
3. Noise removal
4. Feature Extraction
5. Classification
"""
import nltk
import pickle
import nltk.classify.util
class ClassificationPipeline:
"""
Sets the data of the pipeline. It should be given as a list of pairs of (instance, class_labels), where the instance is a text string and the class labels are a tupel of class labels.
"""
def __init__(self):
self.labelsel = None
def setData(self, data):
self.data = data
def getProcessedData(self):
return self.dataProcessed
def setTokenizer(self, tokenizer):
self.tokenizer = tokenizer
def setCleaner(self, cleaner):
self.cleaner = cleaner
def setFeatureEx(self, featex):
self.featex = featex
def setLabelSelection(self, labelsel):
self.labelsel = labelsel
def setClassifier(self, classifier):
self.classifier = classifier
def loadClassifierFromFile(self, name):
f = open("../classifiers/" + name + '.pickle', 'rb')
self.classifier = pickle.load(f)
f.close()
def storeClassifierInFile(self, name):
f = open("../classifiers/" + name + '.pickle', 'wb')
pickle.dump(self.classifier, f)
f.close()
def getClassifier(self):
return self.classifier
def trainClassifier(self):
self.preprocess()
self.train
def preprocess(self):
dataTokenized = self.tokenizer(self.data)
self.dataProcessed = self.cleaner(dataTokenized)
self.dataProcessed = self.featex(self.dataProcessed)
if self.labelsel != None:
self.dataProcessed = self.labelsel(self.dataProcessed)
def train(self):
self.classifier = self.classifier.train(self.dataProcessed, max_iter=15)
| 3.578125 | 4 |
sdk/python/pulumi_proxmox/lxc_container.py | beyondcloud-co/pulumi-proxmox | 0 | 12763806 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = ['LXCContainerArgs', 'LXCContainer']
@pulumi.input_type
class LXCContainerArgs:
def __init__(__self__, *,
target_node: pulumi.Input[str],
arch: Optional[pulumi.Input[str]] = None,
bwlimit: Optional[pulumi.Input[int]] = None,
clone: Optional[pulumi.Input[str]] = None,
clone_storage: Optional[pulumi.Input[str]] = None,
cmode: Optional[pulumi.Input[str]] = None,
console: Optional[pulumi.Input[bool]] = None,
cores: Optional[pulumi.Input[int]] = None,
cpulimit: Optional[pulumi.Input[int]] = None,
cpuunits: Optional[pulumi.Input[int]] = None,
description: Optional[pulumi.Input[str]] = None,
features: Optional[pulumi.Input['LXCContainerFeaturesArgs']] = None,
force: Optional[pulumi.Input[bool]] = None,
full: Optional[pulumi.Input[bool]] = None,
hagroup: Optional[pulumi.Input[str]] = None,
hastate: Optional[pulumi.Input[str]] = None,
hookscript: Optional[pulumi.Input[str]] = None,
hostname: Optional[pulumi.Input[str]] = None,
ignore_unpack_errors: Optional[pulumi.Input[bool]] = None,
lock: Optional[pulumi.Input[str]] = None,
memory: Optional[pulumi.Input[int]] = None,
mountpoints: Optional[pulumi.Input[Sequence[pulumi.Input['LXCContainerMountpointArgs']]]] = None,
nameserver: Optional[pulumi.Input[str]] = None,
networks: Optional[pulumi.Input[Sequence[pulumi.Input['LXCContainerNetworkArgs']]]] = None,
onboot: Optional[pulumi.Input[bool]] = None,
ostemplate: Optional[pulumi.Input[str]] = None,
ostype: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
pool: Optional[pulumi.Input[str]] = None,
protection: Optional[pulumi.Input[bool]] = None,
restore: Optional[pulumi.Input[bool]] = None,
rootfs: Optional[pulumi.Input['LXCContainerRootfsArgs']] = None,
searchdomain: Optional[pulumi.Input[str]] = None,
ssh_public_keys: Optional[pulumi.Input[str]] = None,
start: Optional[pulumi.Input[bool]] = None,
startup: Optional[pulumi.Input[str]] = None,
swap: Optional[pulumi.Input[int]] = None,
tags: Optional[pulumi.Input[str]] = None,
template: Optional[pulumi.Input[bool]] = None,
tty: Optional[pulumi.Input[int]] = None,
unique: Optional[pulumi.Input[bool]] = None,
unprivileged: Optional[pulumi.Input[bool]] = None,
vmid: Optional[pulumi.Input[int]] = None):
"""
The set of arguments for constructing a LXCContainer resource.
"""
pulumi.set(__self__, "target_node", target_node)
if arch is not None:
pulumi.set(__self__, "arch", arch)
if bwlimit is not None:
pulumi.set(__self__, "bwlimit", bwlimit)
if clone is not None:
pulumi.set(__self__, "clone", clone)
if clone_storage is not None:
pulumi.set(__self__, "clone_storage", clone_storage)
if cmode is not None:
pulumi.set(__self__, "cmode", cmode)
if console is not None:
pulumi.set(__self__, "console", console)
if cores is not None:
pulumi.set(__self__, "cores", cores)
if cpulimit is not None:
pulumi.set(__self__, "cpulimit", cpulimit)
if cpuunits is not None:
pulumi.set(__self__, "cpuunits", cpuunits)
if description is not None:
pulumi.set(__self__, "description", description)
if features is not None:
pulumi.set(__self__, "features", features)
if force is not None:
pulumi.set(__self__, "force", force)
if full is not None:
pulumi.set(__self__, "full", full)
if hagroup is not None:
pulumi.set(__self__, "hagroup", hagroup)
if hastate is not None:
pulumi.set(__self__, "hastate", hastate)
if hookscript is not None:
pulumi.set(__self__, "hookscript", hookscript)
if hostname is not None:
pulumi.set(__self__, "hostname", hostname)
if ignore_unpack_errors is not None:
pulumi.set(__self__, "ignore_unpack_errors", ignore_unpack_errors)
if lock is not None:
pulumi.set(__self__, "lock", lock)
if memory is not None:
pulumi.set(__self__, "memory", memory)
if mountpoints is not None:
pulumi.set(__self__, "mountpoints", mountpoints)
if nameserver is not None:
pulumi.set(__self__, "nameserver", nameserver)
if networks is not None:
pulumi.set(__self__, "networks", networks)
if onboot is not None:
pulumi.set(__self__, "onboot", onboot)
if ostemplate is not None:
pulumi.set(__self__, "ostemplate", ostemplate)
if ostype is not None:
pulumi.set(__self__, "ostype", ostype)
if password is not None:
pulumi.set(__self__, "password", password)
if pool is not None:
pulumi.set(__self__, "pool", pool)
if protection is not None:
pulumi.set(__self__, "protection", protection)
if restore is not None:
pulumi.set(__self__, "restore", restore)
if rootfs is not None:
pulumi.set(__self__, "rootfs", rootfs)
if searchdomain is not None:
pulumi.set(__self__, "searchdomain", searchdomain)
if ssh_public_keys is not None:
pulumi.set(__self__, "ssh_public_keys", ssh_public_keys)
if start is not None:
pulumi.set(__self__, "start", start)
if startup is not None:
pulumi.set(__self__, "startup", startup)
if swap is not None:
pulumi.set(__self__, "swap", swap)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if template is not None:
pulumi.set(__self__, "template", template)
if tty is not None:
pulumi.set(__self__, "tty", tty)
if unique is not None:
pulumi.set(__self__, "unique", unique)
if unprivileged is not None:
pulumi.set(__self__, "unprivileged", unprivileged)
if vmid is not None:
pulumi.set(__self__, "vmid", vmid)
@property
@pulumi.getter(name="targetNode")
def target_node(self) -> pulumi.Input[str]:
return pulumi.get(self, "target_node")
@target_node.setter
def target_node(self, value: pulumi.Input[str]):
pulumi.set(self, "target_node", value)
@property
@pulumi.getter
def arch(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "arch")
@arch.setter
def arch(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arch", value)
@property
@pulumi.getter
def bwlimit(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "bwlimit")
@bwlimit.setter
def bwlimit(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "bwlimit", value)
@property
@pulumi.getter
def clone(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "clone")
@clone.setter
def clone(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "clone", value)
@property
@pulumi.getter(name="cloneStorage")
def clone_storage(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "clone_storage")
@clone_storage.setter
def clone_storage(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "clone_storage", value)
@property
@pulumi.getter
def cmode(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "cmode")
@cmode.setter
def cmode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cmode", value)
@property
@pulumi.getter
def console(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "console")
@console.setter
def console(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "console", value)
@property
@pulumi.getter
def cores(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "cores")
@cores.setter
def cores(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "cores", value)
@property
@pulumi.getter
def cpulimit(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "cpulimit")
@cpulimit.setter
def cpulimit(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "cpulimit", value)
@property
@pulumi.getter
def cpuunits(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "cpuunits")
@cpuunits.setter
def cpuunits(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "cpuunits", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def features(self) -> Optional[pulumi.Input['LXCContainerFeaturesArgs']]:
return pulumi.get(self, "features")
@features.setter
def features(self, value: Optional[pulumi.Input['LXCContainerFeaturesArgs']]):
pulumi.set(self, "features", value)
@property
@pulumi.getter
def force(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "force")
@force.setter
def force(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "force", value)
@property
@pulumi.getter
def full(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "full")
@full.setter
def full(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "full", value)
@property
@pulumi.getter
def hagroup(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "hagroup")
@hagroup.setter
def hagroup(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "hagroup", value)
@property
@pulumi.getter
def hastate(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "hastate")
@hastate.setter
def hastate(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "hastate", value)
@property
@pulumi.getter
def hookscript(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "hookscript")
@hookscript.setter
def hookscript(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "hookscript", value)
@property
@pulumi.getter
def hostname(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "hostname")
@hostname.setter
def hostname(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "hostname", value)
@property
@pulumi.getter(name="ignoreUnpackErrors")
def ignore_unpack_errors(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "ignore_unpack_errors")
@ignore_unpack_errors.setter
def ignore_unpack_errors(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "ignore_unpack_errors", value)
@property
@pulumi.getter
def lock(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "lock")
@lock.setter
def lock(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "lock", value)
@property
@pulumi.getter
def memory(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "memory")
@memory.setter
def memory(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "memory", value)
@property
@pulumi.getter
def mountpoints(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['LXCContainerMountpointArgs']]]]:
return pulumi.get(self, "mountpoints")
@mountpoints.setter
def mountpoints(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['LXCContainerMountpointArgs']]]]):
pulumi.set(self, "mountpoints", value)
@property
@pulumi.getter
def nameserver(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "nameserver")
@nameserver.setter
def nameserver(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "nameserver", value)
@property
@pulumi.getter
def networks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['LXCContainerNetworkArgs']]]]:
return pulumi.get(self, "networks")
@networks.setter
def networks(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['LXCContainerNetworkArgs']]]]):
pulumi.set(self, "networks", value)
@property
@pulumi.getter
def onboot(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "onboot")
@onboot.setter
def onboot(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "onboot", value)
@property
@pulumi.getter
def ostemplate(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "ostemplate")
@ostemplate.setter
def ostemplate(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ostemplate", value)
@property
@pulumi.getter
def ostype(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "ostype")
@ostype.setter
def ostype(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ostype", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter
def pool(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "pool")
@pool.setter
def pool(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pool", value)
@property
@pulumi.getter
def protection(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "protection")
@protection.setter
def protection(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "protection", value)
@property
@pulumi.getter
def restore(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "restore")
@restore.setter
def restore(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "restore", value)
@property
@pulumi.getter
def rootfs(self) -> Optional[pulumi.Input['LXCContainerRootfsArgs']]:
return pulumi.get(self, "rootfs")
@rootfs.setter
def rootfs(self, value: Optional[pulumi.Input['LXCContainerRootfsArgs']]):
pulumi.set(self, "rootfs", value)
@property
@pulumi.getter
def searchdomain(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "searchdomain")
@searchdomain.setter
def searchdomain(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "searchdomain", value)
@property
@pulumi.getter(name="sshPublicKeys")
def ssh_public_keys(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "ssh_public_keys")
@ssh_public_keys.setter
def ssh_public_keys(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ssh_public_keys", value)
@property
@pulumi.getter
def start(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "start")
@start.setter
def start(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "start", value)
@property
@pulumi.getter
def startup(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "startup")
@startup.setter
def startup(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "startup", value)
@property
@pulumi.getter
def swap(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "swap")
@swap.setter
def swap(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "swap", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter
def template(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "template")
@template.setter
def template(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "template", value)
@property
@pulumi.getter
def tty(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "tty")
@tty.setter
def tty(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "tty", value)
@property
@pulumi.getter
def unique(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "unique")
@unique.setter
def unique(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "unique", value)
@property
@pulumi.getter
def unprivileged(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "unprivileged")
@unprivileged.setter
def unprivileged(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "unprivileged", value)
@property
@pulumi.getter
def vmid(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "vmid")
@vmid.setter
def vmid(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "vmid", value)
@pulumi.input_type
class _LXCContainerState:
def __init__(__self__, *,
arch: Optional[pulumi.Input[str]] = None,
bwlimit: Optional[pulumi.Input[int]] = None,
clone: Optional[pulumi.Input[str]] = None,
clone_storage: Optional[pulumi.Input[str]] = None,
cmode: Optional[pulumi.Input[str]] = None,
console: Optional[pulumi.Input[bool]] = None,
cores: Optional[pulumi.Input[int]] = None,
cpulimit: Optional[pulumi.Input[int]] = None,
cpuunits: Optional[pulumi.Input[int]] = None,
description: Optional[pulumi.Input[str]] = None,
features: Optional[pulumi.Input['LXCContainerFeaturesArgs']] = None,
force: Optional[pulumi.Input[bool]] = None,
full: Optional[pulumi.Input[bool]] = None,
hagroup: Optional[pulumi.Input[str]] = None,
hastate: Optional[pulumi.Input[str]] = None,
hookscript: Optional[pulumi.Input[str]] = None,
hostname: Optional[pulumi.Input[str]] = None,
ignore_unpack_errors: Optional[pulumi.Input[bool]] = None,
lock: Optional[pulumi.Input[str]] = None,
memory: Optional[pulumi.Input[int]] = None,
mountpoints: Optional[pulumi.Input[Sequence[pulumi.Input['LXCContainerMountpointArgs']]]] = None,
nameserver: Optional[pulumi.Input[str]] = None,
networks: Optional[pulumi.Input[Sequence[pulumi.Input['LXCContainerNetworkArgs']]]] = None,
onboot: Optional[pulumi.Input[bool]] = None,
ostemplate: Optional[pulumi.Input[str]] = None,
ostype: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
pool: Optional[pulumi.Input[str]] = None,
protection: Optional[pulumi.Input[bool]] = None,
restore: Optional[pulumi.Input[bool]] = None,
rootfs: Optional[pulumi.Input['LXCContainerRootfsArgs']] = None,
searchdomain: Optional[pulumi.Input[str]] = None,
ssh_public_keys: Optional[pulumi.Input[str]] = None,
start: Optional[pulumi.Input[bool]] = None,
startup: Optional[pulumi.Input[str]] = None,
swap: Optional[pulumi.Input[int]] = None,
tags: Optional[pulumi.Input[str]] = None,
target_node: Optional[pulumi.Input[str]] = None,
template: Optional[pulumi.Input[bool]] = None,
tty: Optional[pulumi.Input[int]] = None,
unique: Optional[pulumi.Input[bool]] = None,
unprivileged: Optional[pulumi.Input[bool]] = None,
unuseds: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
vmid: Optional[pulumi.Input[int]] = None):
"""
Input properties used for looking up and filtering LXCContainer resources.
"""
if arch is not None:
pulumi.set(__self__, "arch", arch)
if bwlimit is not None:
pulumi.set(__self__, "bwlimit", bwlimit)
if clone is not None:
pulumi.set(__self__, "clone", clone)
if clone_storage is not None:
pulumi.set(__self__, "clone_storage", clone_storage)
if cmode is not None:
pulumi.set(__self__, "cmode", cmode)
if console is not None:
pulumi.set(__self__, "console", console)
if cores is not None:
pulumi.set(__self__, "cores", cores)
if cpulimit is not None:
pulumi.set(__self__, "cpulimit", cpulimit)
if cpuunits is not None:
pulumi.set(__self__, "cpuunits", cpuunits)
if description is not None:
pulumi.set(__self__, "description", description)
if features is not None:
pulumi.set(__self__, "features", features)
if force is not None:
pulumi.set(__self__, "force", force)
if full is not None:
pulumi.set(__self__, "full", full)
if hagroup is not None:
pulumi.set(__self__, "hagroup", hagroup)
if hastate is not None:
pulumi.set(__self__, "hastate", hastate)
if hookscript is not None:
pulumi.set(__self__, "hookscript", hookscript)
if hostname is not None:
pulumi.set(__self__, "hostname", hostname)
if ignore_unpack_errors is not None:
pulumi.set(__self__, "ignore_unpack_errors", ignore_unpack_errors)
if lock is not None:
pulumi.set(__self__, "lock", lock)
if memory is not None:
pulumi.set(__self__, "memory", memory)
if mountpoints is not None:
pulumi.set(__self__, "mountpoints", mountpoints)
if nameserver is not None:
pulumi.set(__self__, "nameserver", nameserver)
if networks is not None:
pulumi.set(__self__, "networks", networks)
if onboot is not None:
pulumi.set(__self__, "onboot", onboot)
if ostemplate is not None:
pulumi.set(__self__, "ostemplate", ostemplate)
if ostype is not None:
pulumi.set(__self__, "ostype", ostype)
if password is not None:
pulumi.set(__self__, "password", password)
if pool is not None:
pulumi.set(__self__, "pool", pool)
if protection is not None:
pulumi.set(__self__, "protection", protection)
if restore is not None:
pulumi.set(__self__, "restore", restore)
if rootfs is not None:
pulumi.set(__self__, "rootfs", rootfs)
if searchdomain is not None:
pulumi.set(__self__, "searchdomain", searchdomain)
if ssh_public_keys is not None:
pulumi.set(__self__, "ssh_public_keys", ssh_public_keys)
if start is not None:
pulumi.set(__self__, "start", start)
if startup is not None:
pulumi.set(__self__, "startup", startup)
if swap is not None:
pulumi.set(__self__, "swap", swap)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if target_node is not None:
pulumi.set(__self__, "target_node", target_node)
if template is not None:
pulumi.set(__self__, "template", template)
if tty is not None:
pulumi.set(__self__, "tty", tty)
if unique is not None:
pulumi.set(__self__, "unique", unique)
if unprivileged is not None:
pulumi.set(__self__, "unprivileged", unprivileged)
if unuseds is not None:
pulumi.set(__self__, "unuseds", unuseds)
if vmid is not None:
pulumi.set(__self__, "vmid", vmid)
@property
@pulumi.getter
def arch(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "arch")
@arch.setter
def arch(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arch", value)
@property
@pulumi.getter
def bwlimit(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "bwlimit")
@bwlimit.setter
def bwlimit(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "bwlimit", value)
@property
@pulumi.getter
def clone(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "clone")
@clone.setter
def clone(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "clone", value)
@property
@pulumi.getter(name="cloneStorage")
def clone_storage(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "clone_storage")
@clone_storage.setter
def clone_storage(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "clone_storage", value)
@property
@pulumi.getter
def cmode(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "cmode")
@cmode.setter
def cmode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cmode", value)
@property
@pulumi.getter
def console(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "console")
@console.setter
def console(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "console", value)
@property
@pulumi.getter
def cores(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "cores")
@cores.setter
def cores(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "cores", value)
@property
@pulumi.getter
def cpulimit(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "cpulimit")
@cpulimit.setter
def cpulimit(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "cpulimit", value)
@property
@pulumi.getter
def cpuunits(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "cpuunits")
@cpuunits.setter
def cpuunits(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "cpuunits", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def features(self) -> Optional[pulumi.Input['LXCContainerFeaturesArgs']]:
return pulumi.get(self, "features")
@features.setter
def features(self, value: Optional[pulumi.Input['LXCContainerFeaturesArgs']]):
pulumi.set(self, "features", value)
@property
@pulumi.getter
def force(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "force")
@force.setter
def force(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "force", value)
@property
@pulumi.getter
def full(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "full")
@full.setter
def full(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "full", value)
@property
@pulumi.getter
def hagroup(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "hagroup")
@hagroup.setter
def hagroup(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "hagroup", value)
@property
@pulumi.getter
def hastate(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "hastate")
@hastate.setter
def hastate(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "hastate", value)
@property
@pulumi.getter
def hookscript(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "hookscript")
@hookscript.setter
def hookscript(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "hookscript", value)
@property
@pulumi.getter
def hostname(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "hostname")
@hostname.setter
def hostname(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "hostname", value)
@property
@pulumi.getter(name="ignoreUnpackErrors")
def ignore_unpack_errors(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "ignore_unpack_errors")
@ignore_unpack_errors.setter
def ignore_unpack_errors(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "ignore_unpack_errors", value)
@property
@pulumi.getter
def lock(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "lock")
@lock.setter
def lock(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "lock", value)
@property
@pulumi.getter
def memory(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "memory")
@memory.setter
def memory(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "memory", value)
@property
@pulumi.getter
def mountpoints(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['LXCContainerMountpointArgs']]]]:
return pulumi.get(self, "mountpoints")
@mountpoints.setter
def mountpoints(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['LXCContainerMountpointArgs']]]]):
pulumi.set(self, "mountpoints", value)
@property
@pulumi.getter
def nameserver(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "nameserver")
@nameserver.setter
def nameserver(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "nameserver", value)
@property
@pulumi.getter
def networks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['LXCContainerNetworkArgs']]]]:
return pulumi.get(self, "networks")
@networks.setter
def networks(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['LXCContainerNetworkArgs']]]]):
pulumi.set(self, "networks", value)
@property
@pulumi.getter
def onboot(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "onboot")
@onboot.setter
def onboot(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "onboot", value)
@property
@pulumi.getter
def ostemplate(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "ostemplate")
@ostemplate.setter
def ostemplate(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ostemplate", value)
@property
@pulumi.getter
def ostype(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "ostype")
@ostype.setter
def ostype(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ostype", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter
def pool(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "pool")
@pool.setter
def pool(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pool", value)
@property
@pulumi.getter
def protection(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "protection")
@protection.setter
def protection(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "protection", value)
@property
@pulumi.getter
def restore(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "restore")
@restore.setter
def restore(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "restore", value)
@property
@pulumi.getter
def rootfs(self) -> Optional[pulumi.Input['LXCContainerRootfsArgs']]:
return pulumi.get(self, "rootfs")
@rootfs.setter
def rootfs(self, value: Optional[pulumi.Input['LXCContainerRootfsArgs']]):
pulumi.set(self, "rootfs", value)
@property
@pulumi.getter
def searchdomain(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "searchdomain")
@searchdomain.setter
def searchdomain(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "searchdomain", value)
@property
@pulumi.getter(name="sshPublicKeys")
def ssh_public_keys(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "ssh_public_keys")
@ssh_public_keys.setter
def ssh_public_keys(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ssh_public_keys", value)
@property
@pulumi.getter
def start(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "start")
@start.setter
def start(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "start", value)
@property
@pulumi.getter
def startup(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "startup")
@startup.setter
def startup(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "startup", value)
@property
@pulumi.getter
def swap(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "swap")
@swap.setter
def swap(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "swap", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="targetNode")
def target_node(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "target_node")
@target_node.setter
def target_node(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target_node", value)
@property
@pulumi.getter
def template(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "template")
@template.setter
def template(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "template", value)
@property
@pulumi.getter
def tty(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "tty")
@tty.setter
def tty(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "tty", value)
@property
@pulumi.getter
def unique(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "unique")
@unique.setter
def unique(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "unique", value)
@property
@pulumi.getter
def unprivileged(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "unprivileged")
@unprivileged.setter
def unprivileged(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "unprivileged", value)
@property
@pulumi.getter
def unuseds(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "unuseds")
@unuseds.setter
def unuseds(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "unuseds", value)
@property
@pulumi.getter
def vmid(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "vmid")
@vmid.setter
def vmid(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "vmid", value)
class LXCContainer(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
arch: Optional[pulumi.Input[str]] = None,
bwlimit: Optional[pulumi.Input[int]] = None,
clone: Optional[pulumi.Input[str]] = None,
clone_storage: Optional[pulumi.Input[str]] = None,
cmode: Optional[pulumi.Input[str]] = None,
console: Optional[pulumi.Input[bool]] = None,
cores: Optional[pulumi.Input[int]] = None,
cpulimit: Optional[pulumi.Input[int]] = None,
cpuunits: Optional[pulumi.Input[int]] = None,
description: Optional[pulumi.Input[str]] = None,
features: Optional[pulumi.Input[pulumi.InputType['LXCContainerFeaturesArgs']]] = None,
force: Optional[pulumi.Input[bool]] = None,
full: Optional[pulumi.Input[bool]] = None,
hagroup: Optional[pulumi.Input[str]] = None,
hastate: Optional[pulumi.Input[str]] = None,
hookscript: Optional[pulumi.Input[str]] = None,
hostname: Optional[pulumi.Input[str]] = None,
ignore_unpack_errors: Optional[pulumi.Input[bool]] = None,
lock: Optional[pulumi.Input[str]] = None,
memory: Optional[pulumi.Input[int]] = None,
mountpoints: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LXCContainerMountpointArgs']]]]] = None,
nameserver: Optional[pulumi.Input[str]] = None,
networks: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LXCContainerNetworkArgs']]]]] = None,
onboot: Optional[pulumi.Input[bool]] = None,
ostemplate: Optional[pulumi.Input[str]] = None,
ostype: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
pool: Optional[pulumi.Input[str]] = None,
protection: Optional[pulumi.Input[bool]] = None,
restore: Optional[pulumi.Input[bool]] = None,
rootfs: Optional[pulumi.Input[pulumi.InputType['LXCContainerRootfsArgs']]] = None,
searchdomain: Optional[pulumi.Input[str]] = None,
ssh_public_keys: Optional[pulumi.Input[str]] = None,
start: Optional[pulumi.Input[bool]] = None,
startup: Optional[pulumi.Input[str]] = None,
swap: Optional[pulumi.Input[int]] = None,
tags: Optional[pulumi.Input[str]] = None,
target_node: Optional[pulumi.Input[str]] = None,
template: Optional[pulumi.Input[bool]] = None,
tty: Optional[pulumi.Input[int]] = None,
unique: Optional[pulumi.Input[bool]] = None,
unprivileged: Optional[pulumi.Input[bool]] = None,
vmid: Optional[pulumi.Input[int]] = None,
__props__=None):
"""
Create a LXCContainer resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: LXCContainerArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Create a LXCContainer resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param LXCContainerArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(LXCContainerArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
arch: Optional[pulumi.Input[str]] = None,
bwlimit: Optional[pulumi.Input[int]] = None,
clone: Optional[pulumi.Input[str]] = None,
clone_storage: Optional[pulumi.Input[str]] = None,
cmode: Optional[pulumi.Input[str]] = None,
console: Optional[pulumi.Input[bool]] = None,
cores: Optional[pulumi.Input[int]] = None,
cpulimit: Optional[pulumi.Input[int]] = None,
cpuunits: Optional[pulumi.Input[int]] = None,
description: Optional[pulumi.Input[str]] = None,
features: Optional[pulumi.Input[pulumi.InputType['LXCContainerFeaturesArgs']]] = None,
force: Optional[pulumi.Input[bool]] = None,
full: Optional[pulumi.Input[bool]] = None,
hagroup: Optional[pulumi.Input[str]] = None,
hastate: Optional[pulumi.Input[str]] = None,
hookscript: Optional[pulumi.Input[str]] = None,
hostname: Optional[pulumi.Input[str]] = None,
ignore_unpack_errors: Optional[pulumi.Input[bool]] = None,
lock: Optional[pulumi.Input[str]] = None,
memory: Optional[pulumi.Input[int]] = None,
mountpoints: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LXCContainerMountpointArgs']]]]] = None,
nameserver: Optional[pulumi.Input[str]] = None,
networks: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LXCContainerNetworkArgs']]]]] = None,
onboot: Optional[pulumi.Input[bool]] = None,
ostemplate: Optional[pulumi.Input[str]] = None,
ostype: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
pool: Optional[pulumi.Input[str]] = None,
protection: Optional[pulumi.Input[bool]] = None,
restore: Optional[pulumi.Input[bool]] = None,
rootfs: Optional[pulumi.Input[pulumi.InputType['LXCContainerRootfsArgs']]] = None,
searchdomain: Optional[pulumi.Input[str]] = None,
ssh_public_keys: Optional[pulumi.Input[str]] = None,
start: Optional[pulumi.Input[bool]] = None,
startup: Optional[pulumi.Input[str]] = None,
swap: Optional[pulumi.Input[int]] = None,
tags: Optional[pulumi.Input[str]] = None,
target_node: Optional[pulumi.Input[str]] = None,
template: Optional[pulumi.Input[bool]] = None,
tty: Optional[pulumi.Input[int]] = None,
unique: Optional[pulumi.Input[bool]] = None,
unprivileged: Optional[pulumi.Input[bool]] = None,
vmid: Optional[pulumi.Input[int]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = LXCContainerArgs.__new__(LXCContainerArgs)
__props__.__dict__["arch"] = arch
__props__.__dict__["bwlimit"] = bwlimit
__props__.__dict__["clone"] = clone
__props__.__dict__["clone_storage"] = clone_storage
__props__.__dict__["cmode"] = cmode
__props__.__dict__["console"] = console
__props__.__dict__["cores"] = cores
__props__.__dict__["cpulimit"] = cpulimit
__props__.__dict__["cpuunits"] = cpuunits
__props__.__dict__["description"] = description
__props__.__dict__["features"] = features
__props__.__dict__["force"] = force
__props__.__dict__["full"] = full
__props__.__dict__["hagroup"] = hagroup
__props__.__dict__["hastate"] = hastate
__props__.__dict__["hookscript"] = hookscript
__props__.__dict__["hostname"] = hostname
__props__.__dict__["ignore_unpack_errors"] = ignore_unpack_errors
__props__.__dict__["lock"] = lock
__props__.__dict__["memory"] = memory
__props__.__dict__["mountpoints"] = mountpoints
__props__.__dict__["nameserver"] = nameserver
__props__.__dict__["networks"] = networks
__props__.__dict__["onboot"] = onboot
__props__.__dict__["ostemplate"] = ostemplate
__props__.__dict__["ostype"] = ostype
__props__.__dict__["password"] = password
__props__.__dict__["pool"] = pool
__props__.__dict__["protection"] = protection
__props__.__dict__["restore"] = restore
__props__.__dict__["rootfs"] = rootfs
__props__.__dict__["searchdomain"] = searchdomain
__props__.__dict__["ssh_public_keys"] = ssh_public_keys
__props__.__dict__["start"] = start
__props__.__dict__["startup"] = startup
__props__.__dict__["swap"] = swap
__props__.__dict__["tags"] = tags
if target_node is None and not opts.urn:
raise TypeError("Missing required property 'target_node'")
__props__.__dict__["target_node"] = target_node
__props__.__dict__["template"] = template
__props__.__dict__["tty"] = tty
__props__.__dict__["unique"] = unique
__props__.__dict__["unprivileged"] = unprivileged
__props__.__dict__["vmid"] = vmid
__props__.__dict__["unuseds"] = None
super(LXCContainer, __self__).__init__(
'proxmox:index/lXCContainer:LXCContainer',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
arch: Optional[pulumi.Input[str]] = None,
bwlimit: Optional[pulumi.Input[int]] = None,
clone: Optional[pulumi.Input[str]] = None,
clone_storage: Optional[pulumi.Input[str]] = None,
cmode: Optional[pulumi.Input[str]] = None,
console: Optional[pulumi.Input[bool]] = None,
cores: Optional[pulumi.Input[int]] = None,
cpulimit: Optional[pulumi.Input[int]] = None,
cpuunits: Optional[pulumi.Input[int]] = None,
description: Optional[pulumi.Input[str]] = None,
features: Optional[pulumi.Input[pulumi.InputType['LXCContainerFeaturesArgs']]] = None,
force: Optional[pulumi.Input[bool]] = None,
full: Optional[pulumi.Input[bool]] = None,
hagroup: Optional[pulumi.Input[str]] = None,
hastate: Optional[pulumi.Input[str]] = None,
hookscript: Optional[pulumi.Input[str]] = None,
hostname: Optional[pulumi.Input[str]] = None,
ignore_unpack_errors: Optional[pulumi.Input[bool]] = None,
lock: Optional[pulumi.Input[str]] = None,
memory: Optional[pulumi.Input[int]] = None,
mountpoints: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LXCContainerMountpointArgs']]]]] = None,
nameserver: Optional[pulumi.Input[str]] = None,
networks: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LXCContainerNetworkArgs']]]]] = None,
onboot: Optional[pulumi.Input[bool]] = None,
ostemplate: Optional[pulumi.Input[str]] = None,
ostype: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
pool: Optional[pulumi.Input[str]] = None,
protection: Optional[pulumi.Input[bool]] = None,
restore: Optional[pulumi.Input[bool]] = None,
rootfs: Optional[pulumi.Input[pulumi.InputType['LXCContainerRootfsArgs']]] = None,
searchdomain: Optional[pulumi.Input[str]] = None,
ssh_public_keys: Optional[pulumi.Input[str]] = None,
start: Optional[pulumi.Input[bool]] = None,
startup: Optional[pulumi.Input[str]] = None,
swap: Optional[pulumi.Input[int]] = None,
tags: Optional[pulumi.Input[str]] = None,
target_node: Optional[pulumi.Input[str]] = None,
template: Optional[pulumi.Input[bool]] = None,
tty: Optional[pulumi.Input[int]] = None,
unique: Optional[pulumi.Input[bool]] = None,
unprivileged: Optional[pulumi.Input[bool]] = None,
unuseds: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
vmid: Optional[pulumi.Input[int]] = None) -> 'LXCContainer':
"""
Get an existing LXCContainer resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _LXCContainerState.__new__(_LXCContainerState)
__props__.__dict__["arch"] = arch
__props__.__dict__["bwlimit"] = bwlimit
__props__.__dict__["clone"] = clone
__props__.__dict__["clone_storage"] = clone_storage
__props__.__dict__["cmode"] = cmode
__props__.__dict__["console"] = console
__props__.__dict__["cores"] = cores
__props__.__dict__["cpulimit"] = cpulimit
__props__.__dict__["cpuunits"] = cpuunits
__props__.__dict__["description"] = description
__props__.__dict__["features"] = features
__props__.__dict__["force"] = force
__props__.__dict__["full"] = full
__props__.__dict__["hagroup"] = hagroup
__props__.__dict__["hastate"] = hastate
__props__.__dict__["hookscript"] = hookscript
__props__.__dict__["hostname"] = hostname
__props__.__dict__["ignore_unpack_errors"] = ignore_unpack_errors
__props__.__dict__["lock"] = lock
__props__.__dict__["memory"] = memory
__props__.__dict__["mountpoints"] = mountpoints
__props__.__dict__["nameserver"] = nameserver
__props__.__dict__["networks"] = networks
__props__.__dict__["onboot"] = onboot
__props__.__dict__["ostemplate"] = ostemplate
__props__.__dict__["ostype"] = ostype
__props__.__dict__["password"] = password
__props__.__dict__["pool"] = pool
__props__.__dict__["protection"] = protection
__props__.__dict__["restore"] = restore
__props__.__dict__["rootfs"] = rootfs
__props__.__dict__["searchdomain"] = searchdomain
__props__.__dict__["ssh_public_keys"] = ssh_public_keys
__props__.__dict__["start"] = start
__props__.__dict__["startup"] = startup
__props__.__dict__["swap"] = swap
__props__.__dict__["tags"] = tags
__props__.__dict__["target_node"] = target_node
__props__.__dict__["template"] = template
__props__.__dict__["tty"] = tty
__props__.__dict__["unique"] = unique
__props__.__dict__["unprivileged"] = unprivileged
__props__.__dict__["unuseds"] = unuseds
__props__.__dict__["vmid"] = vmid
return LXCContainer(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def arch(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "arch")
@property
@pulumi.getter
def bwlimit(self) -> pulumi.Output[Optional[int]]:
return pulumi.get(self, "bwlimit")
@property
@pulumi.getter
def clone(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "clone")
@property
@pulumi.getter(name="cloneStorage")
def clone_storage(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "clone_storage")
@property
@pulumi.getter
def cmode(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "cmode")
@property
@pulumi.getter
def console(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "console")
@property
@pulumi.getter
def cores(self) -> pulumi.Output[Optional[int]]:
return pulumi.get(self, "cores")
@property
@pulumi.getter
def cpulimit(self) -> pulumi.Output[Optional[int]]:
return pulumi.get(self, "cpulimit")
@property
@pulumi.getter
def cpuunits(self) -> pulumi.Output[Optional[int]]:
return pulumi.get(self, "cpuunits")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "description")
@property
@pulumi.getter
def features(self) -> pulumi.Output[Optional['outputs.LXCContainerFeatures']]:
return pulumi.get(self, "features")
@property
@pulumi.getter
def force(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "force")
@property
@pulumi.getter
def full(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "full")
@property
@pulumi.getter
def hagroup(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "hagroup")
@property
@pulumi.getter
def hastate(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "hastate")
@property
@pulumi.getter
def hookscript(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "hookscript")
@property
@pulumi.getter
def hostname(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "hostname")
@property
@pulumi.getter(name="ignoreUnpackErrors")
def ignore_unpack_errors(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "ignore_unpack_errors")
@property
@pulumi.getter
def lock(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "lock")
@property
@pulumi.getter
def memory(self) -> pulumi.Output[Optional[int]]:
return pulumi.get(self, "memory")
@property
@pulumi.getter
def mountpoints(self) -> pulumi.Output[Optional[Sequence['outputs.LXCContainerMountpoint']]]:
return pulumi.get(self, "mountpoints")
@property
@pulumi.getter
def nameserver(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "nameserver")
@property
@pulumi.getter
def networks(self) -> pulumi.Output[Optional[Sequence['outputs.LXCContainerNetwork']]]:
return pulumi.get(self, "networks")
@property
@pulumi.getter
def onboot(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "onboot")
@property
@pulumi.getter
def ostemplate(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "ostemplate")
@property
@pulumi.getter
def ostype(self) -> pulumi.Output[str]:
return pulumi.get(self, "ostype")
@property
@pulumi.getter
def password(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "password")
@property
@pulumi.getter
def pool(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "pool")
@property
@pulumi.getter
def protection(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "protection")
@property
@pulumi.getter
def restore(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "restore")
@property
@pulumi.getter
def rootfs(self) -> pulumi.Output[Optional['outputs.LXCContainerRootfs']]:
return pulumi.get(self, "rootfs")
@property
@pulumi.getter
def searchdomain(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "searchdomain")
@property
@pulumi.getter(name="sshPublicKeys")
def ssh_public_keys(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "ssh_public_keys")
@property
@pulumi.getter
def start(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "start")
@property
@pulumi.getter
def startup(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "startup")
@property
@pulumi.getter
def swap(self) -> pulumi.Output[Optional[int]]:
return pulumi.get(self, "swap")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="targetNode")
def target_node(self) -> pulumi.Output[str]:
return pulumi.get(self, "target_node")
@property
@pulumi.getter
def template(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "template")
@property
@pulumi.getter
def tty(self) -> pulumi.Output[Optional[int]]:
return pulumi.get(self, "tty")
@property
@pulumi.getter
def unique(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "unique")
@property
@pulumi.getter
def unprivileged(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "unprivileged")
@property
@pulumi.getter
def unuseds(self) -> pulumi.Output[Sequence[str]]:
return pulumi.get(self, "unuseds")
@property
@pulumi.getter
def vmid(self) -> pulumi.Output[Optional[int]]:
return pulumi.get(self, "vmid")
| 1.398438 | 1 |
ml/logistic/sklogistic.py | ruiyangio/ml-sentiment | 0 | 12763807 | from sklearn.linear_model import LogisticRegression
from modelbase import ModelBase
class SkLogistic(ModelBase):
def __init__(self):
ModelBase.__init__(self)
self.model = LogisticRegression()
| 2.484375 | 2 |
test/test_giftcard.py | AdityaMisra/GiftCardService | 0 | 12763808 | import pytest
from models.brand import Brand
from models.customer import Customer
from models.giftcard import GiftCard
from service.gift_card_service import GiftCardService
class TestGiftService:
# def setup(self):
# # create brands
# apple = Brand("Apple", 5)
# walmart = Brand("Walmart", 15)
# amazon = Brand("Amazon", 10)
#
# # create gift cards
# apple_gc_50 = GiftCard(apple, 50)
# apple_gc_25 = GiftCard(apple, 25)
# walmart_gc = GiftCard(walmart, 100)
# amazon_gc = GiftCard(amazon, 20)
#
# # create customers
# customer_a = Customer("A")
# customer_b = Customer("B")
def test_all(self):
# create brands
apple = Brand("Apple", 5)
walmart = Brand("Walmart", 15)
amazon = Brand("Amazon", 10)
# create gift cards
apple_gc_50 = GiftCard(apple, 50)
apple_gc_25 = GiftCard(apple, 25)
walmart_gc = GiftCard(walmart, 100)
amazon_gc = GiftCard(amazon, 20)
# create customers
customer_a = Customer("A")
customer_b = Customer("B")
assert isinstance(customer_b, Customer)
assert customer_a.add_gift_card(apple_gc_50) == 47.5
# assert customer_a.add_gift_card(123) == 47.5
assert customer_a.add_gift_card(amazon_gc) == 65.5
assert customer_a.get_total_basket_price() == 65.5
assert customer_b.add_gift_card(walmart_gc) == 85.0
assert customer_b.add_gift_card(apple_gc_25) == 108.75
assert customer_b.add_gift_card(amazon_gc) == 126.75
assert customer_b.get_total_basket_price() == 126.75
assert customer_a.add_gift_card(apple_gc_25) == 89.25
assert customer_a.get_total_basket_price() == 89.25
amazon.update_discount(20)
assert customer_a.get_total_basket_price() == 87.25
assert customer_b.get_total_basket_price() == 124.75
def test_customer_instance(self):
# create customers
customer_b = Customer("B")
assert isinstance(customer_b, Customer)
@pytest.mark.parametrize("customer, gc, amount", [(Customer("A"), GiftCard(Brand("Apple", 5), 50), 47.5),
(Customer("A"), GiftCard(Brand("Amazon", 10), 20), 18),
(Customer("A"), GiftCard(Brand("Apple", 5), 25), 23.75),
(Customer("B"), GiftCard(Brand("Walmart", 15), 100), 85.0),
(Customer("B"), GiftCard(Brand("Apple", 5), 25), 23.75),
(Customer("B"), GiftCard(Brand("Amazon", 10), 20), 18),
])
def test_add_gift_card_parametrized(self, customer, gc, amount):
assert customer.add_gift_card(gc) == amount
def test_total_basket_amount(self):
# create brands
apple = Brand("Apple", 5)
walmart = Brand("Walmart", 15)
amazon = Brand("Amazon", 10)
# create gift cards
apple_gc_50 = GiftCard(apple, 50)
apple_gc_25 = GiftCard(apple, 25)
walmart_gc = GiftCard(walmart, 100)
amazon_gc = GiftCard(amazon, 20)
# create customers
customer_a = Customer("A")
customer_b = Customer("B")
customer_a.add_gift_card(apple_gc_50)
customer_a.add_gift_card(amazon_gc)
assert customer_a.get_total_basket_price() == 65.5
customer_b.add_gift_card(walmart_gc)
customer_b.add_gift_card(apple_gc_25)
customer_b.add_gift_card(amazon_gc)
assert customer_b.get_total_basket_price() == 126.75
customer_a.add_gift_card(apple_gc_25)
assert customer_a.get_total_basket_price() == 89.25
amazon.update_discount(20)
assert customer_a.get_total_basket_price() == 87.25
assert customer_b.get_total_basket_price() == 124.75
def test_value_error(self):
# create customers
customer_a = Customer("A")
with pytest.raises(ValueError):
customer_a.add_gift_card(123)
def test_gift_card_service(self):
assert GiftCardService.run() is None
| 2.75 | 3 |
setup.py | cocodrips/doc-cov | 14 | 12763809 | import io
import re
from setuptools import setup
try:
with open('README.md') as f:
readme = f.read()
except IOError:
readme = ''
with io.open('doccov/__init__.py', 'rt', encoding='utf8') as f:
version = re.search(r'__version__ = \'(.*?)\'', f.read()).group(1)
def _requires_from_file(filename):
return open(filename).read().splitlines()
setup(
name='doc-cov',
version=version,
url='https://github.com/cocodrips/doc-cov',
license='MIT',
author='ku-mu',
author_email='<EMAIL>',
description='doc-cov is a tool for measuring docstring coverage of Python project',
long_description=readme,
long_description_content_type='text/markdown',
packages=['doccov'],
include_package_data=True,
python_requires='>=3.6',
install_requires=[],
extras_require={
'dev': [
'pytest>=3',
],
},
entry_points={
'console_scripts': [
'doccov = doccov.main:entry_point',
'doccov-report = doccov.report:entry_point',
],
},
) | 1.773438 | 2 |
guillotina_cms/vocabularies/content_layouts.py | alteroo/guillotina_cms | 5 | 12763810 | from guillotina import configure
from guillotina import app_settings
@configure.vocabulary(name="content_layouts")
class ContentLayoutVocabulary:
def __init__(self, context):
self.context = context
if hasattr(self.context, "context"):
self.values = app_settings["layouts"].get(self.context.context.type_name, [])
else:
self.values = app_settings["layouts"].get(self.context.type_name, [])
def keys(self):
return self.values
def __iter__(self):
return iter([x for x in self.values])
def __contains__(self, value):
return value in self.values
def __len__(self):
return len(self.values)
def getTerm(self, value):
if value in self.values:
return value
else:
raise KeyError("No valid state")
| 2.4375 | 2 |
mcg/sampling.py | nyu-dl/dl4mt-multi | 143 | 12763811 | <reponame>nyu-dl/dl4mt-multi
import logging
import copy
import numpy
import operator
import os
import re
import signal
import time
import theano
from blocks.extensions import SimpleExtension
from collections import OrderedDict
from subprocess import Popen, PIPE
from toolz import merge
from .utils import _p, get_enc_dec_ids
logger = logging.getLogger(__name__)
def gen_sample(f_init, f_next, x, src_selector, trg_selector, k=1,
maxlen=30, stochastic=True, argmax=False, eos_idx=0,
cond_init_trg=False, ignore_unk=False, minlen=1, unk_idx=1,
f_next_state=None, return_alphas=False):
if k > 1:
assert not stochastic, \
'Beam search does not support stochastic sampling'
sample = []
sample_score = []
sample_decalphas = []
if stochastic:
sample_score = 0
live_k = 1
dead_k = 0
hyp_samples = [[]] * live_k
hyp_decalphas = []
hyp_scores = numpy.zeros(live_k).astype('float32')
hyp_states = []
# multi-source
inp_xs = [x]
init_inps = inp_xs
ret = f_init(*init_inps)
next_state, ctx0 = ret[0], ret[1]
next_w = -1 * numpy.ones((1,)).astype('int64')
for ii in range(maxlen):
ctx = numpy.tile(ctx0, [live_k, 1])
prev_w = copy.copy(next_w)
prev_state = copy.copy(next_state)
inps = [next_w, ctx, next_state]
ret = f_next(*inps)
next_p, next_w, next_state = ret[0], ret[1], ret[2]
if return_alphas:
next_decalpha = ret.pop(0)
if stochastic:
if argmax:
nw = next_p[0].argmax()
else:
nw = next_w[0]
sample.append(nw)
sample_score -= numpy.log(next_p[0, nw])
if nw == eos_idx:
break
else:
log_probs = numpy.log(next_p)
# Adjust log probs according to search restrictions
if ignore_unk:
log_probs[:, unk_idx] = -numpy.inf
if ii < minlen:
log_probs[:, eos_idx] = -numpy.inf
cand_scores = hyp_scores[:, None] - numpy.log(next_p)
cand_flat = cand_scores.flatten()
ranks_flat = cand_flat.argsort()[:(k-dead_k)]
voc_size = next_p.shape[1]
trans_indices = ranks_flat / voc_size
word_indices = ranks_flat % voc_size
costs = cand_flat[ranks_flat]
new_hyp_samples = []
new_hyp_scores = numpy.zeros(k-dead_k).astype('float32')
new_hyp_states = []
new_hyp_decalphas = []
for idx, [ti, wi] in enumerate(zip(trans_indices, word_indices)):
new_hyp_samples.append(hyp_samples[ti]+[wi])
new_hyp_scores[idx] = copy.copy(costs[idx])
new_hyp_states.append(copy.copy(next_state[ti]))
if return_alphas:
tmp_decalphas = []
if ii > 0:
tmp_decalphas = copy.copy(hyp_decalphas[ti])
tmp_decalphas.append(next_decalpha[ti])
new_hyp_decalphas.append(tmp_decalphas)
# check the finished samples
new_live_k = 0
hyp_samples = []
hyp_scores = []
hyp_states = []
hyp_decalphas = []
for idx in range(len(new_hyp_samples)):
if new_hyp_samples[idx][-1] == eos_idx:
sample.append(new_hyp_samples[idx])
sample_score.append(new_hyp_scores[idx])
if return_alphas:
sample_decalphas.append(new_hyp_decalphas[idx])
dead_k += 1
else:
new_live_k += 1
hyp_samples.append(new_hyp_samples[idx])
hyp_scores.append(new_hyp_scores[idx])
hyp_states.append(new_hyp_states[idx])
if return_alphas:
hyp_decalphas.append(new_hyp_decalphas[idx])
hyp_scores = numpy.array(hyp_scores)
live_k = new_live_k
if new_live_k < 1:
break
if dead_k >= k:
break
next_w = numpy.array([w[-1] for w in hyp_samples])
next_state = numpy.array(hyp_states)
if not stochastic:
# dump every remaining one
if live_k > 0:
for idx in range(live_k):
sample.append(hyp_samples[idx])
sample_score.append(hyp_scores[idx])
if return_alphas:
sample_decalphas.append(hyp_decalphas[idx])
if not return_alphas:
return numpy.array(sample), numpy.array(sample_score)
return numpy.array(sample), numpy.array(sample_score), \
numpy.array(sample_decalphas)
class SamplingBase(object):
def _get_attr_rec(self, obj, attr):
return self._get_attr_rec(getattr(obj, attr), attr) \
if hasattr(obj, attr) else obj
def _get_true_length(self, seq, eos_idx):
try:
return seq.tolist().index(eos_idx) + 1
except ValueError:
return len(seq)
def _oov_to_unk(self, seq):
return [x if x < self.src_vocab_size else self.unk_idx
for x in seq]
def _parse_input(self, line, eos_idx):
seqin = line.split()
seqlen = len(seqin)
seq = numpy.zeros(seqlen+1, dtype='int64')
for idx, sx in enumerate(seqin):
seq[idx] = self.vocab.get(sx, self.unk_idx)
if seq[idx] >= self.src_vocab_size:
seq[idx] = self.unk_idx
seq[-1] = eos_idx
return seq
def _idx_to_word(self, seq, ivocab):
return " ".join([ivocab.get(idx, "<UNK>") for idx in seq])
def _get_true_seq(self, seq, eos_idx):
return seq[:self._get_true_length(seq, eos_idx)]
def _make_matrix(self, arr):
if arr.ndim >= 2:
return arr
return arr[None, :]
class Sampler(SimpleExtension, SamplingBase):
"""Samples from computation graph
Does not use peeked batches
"""
def __init__(self, f_init, f_next, data_stream, num_samples=1,
src_vocab=None, trg_vocab=None, src_ivocab=None,
trg_ivocab=None, enc_id=0, dec_id=0, src_eos_idx=-1,
trg_eos_idx=-1, cond_init_trg=False, f_next_state=None,
**kwargs):
super(Sampler, self).__init__(**kwargs)
self.f_init = f_init
self.f_next = f_next
self.f_next_state = f_next_state
self.data_stream = data_stream
self.num_samples = num_samples
self.src_vocab = src_vocab
self.trg_vocab = trg_vocab
self.src_ivocab = src_ivocab
self.trg_ivocab = trg_ivocab
self.src_eos_idx = src_eos_idx
self.trg_eos_idx = trg_eos_idx
self.cond_init_trg = cond_init_trg
self.enc_id = enc_id
self.dec_id = dec_id
self._synced = False
self.sampling_fn = gen_sample
def do(self, which_callback, *args):
batch = args[0]
# Get current model parameters
if not self._synced:
sources = self._get_attr_rec(
self.main_loop.data_stream.streams[_p(self.enc_id,
self.dec_id)],
'data_stream')
self.sources = sources
self._synced = True
batch = self.main_loop.data_stream\
.get_batch_with_stream_id(_p(self.enc_id, self.dec_id))
batch_size = batch['source'].shape[1]
# Load vocabularies and invert if necessary
# WARNING: Source and target indices from data stream
# can be different
if not self.src_vocab:
self.src_vocab = self.sources.data_streams[0].dataset.dictionary
if not self.trg_vocab:
self.trg_vocab = self.sources.data_streams[1].dataset.dictionary
if not self.src_ivocab:
self.src_ivocab = {v: k for k, v in self.src_vocab.items()}
self.src_ivocab[self.src_eos_idx] = '</S>'
if not self.trg_ivocab:
self.trg_ivocab = {v: k for k, v in self.trg_vocab.items()}
self.trg_ivocab[self.trg_eos_idx] = '</S>'
sample_idx = numpy.random.choice(
batch_size, self.num_samples, replace=False)
src_batch = batch['source']
trg_batch = batch['target']
input_ = src_batch[:, sample_idx]
target_ = trg_batch[:, sample_idx]
# Sample
outputs = [list() for _ in sample_idx]
costs = [list() for _ in sample_idx]
for i, idx in enumerate(sample_idx):
outputs[i], costs[i] = self.sampling_fn(
self.f_init, self.f_next, eos_idx=self.trg_eos_idx,
x=self._get_true_seq(input_[:, i], self.src_eos_idx)[:, None],
src_selector=self._make_matrix(batch['src_selector'][idx, :]),
trg_selector=self._make_matrix(batch['trg_selector'][idx, :]),
k=1, maxlen=30, stochastic=True, argmax=False,
cond_init_trg=self.cond_init_trg,
f_next_state=self.f_next_state)
print ""
logger.info("Sampling from computation graph[{}-{}]"
.format(self.enc_id, self.dec_id))
for i in range(len(outputs)):
input_length = self._get_true_length(input_[:, i],
self.src_eos_idx)
target_length = self._get_true_length(target_[:, i],
self.trg_eos_idx)
sample_length = self._get_true_length(outputs[i],
self.trg_eos_idx)
print "Input : ", self._idx_to_word(input_[:, i][:input_length],
self.src_ivocab)
print "Target: ", self._idx_to_word(target_[:, i][:target_length],
self.trg_ivocab)
print "Sample: ", self._idx_to_word(outputs[i][:sample_length],
self.trg_ivocab)
print "Sample cost: ", costs[i].sum()
print ""
class BleuValidator(SimpleExtension, SamplingBase):
"""Highly not recommended for use."""
def __init__(self, f_init, f_next, data_stream,
bleu_script, val_set_out, val_set_grndtruth, src_vocab_size,
src_selector=None, trg_selector=None, n_best=1,
track_n_models=1, trg_ivocab=None, beam_size=5,
val_burn_in=10000, _reload=True, enc_id=None, dec_id=None,
saveto=None, src_eos_idx=-1, trg_eos_idx=-1, normalize=True,
cond_init_trg=False,**kwargs):
super(BleuValidator, self).__init__(**kwargs)
self.f_init = f_init
self.f_next = f_next
self.data_stream = data_stream
self.bleu_script = bleu_script
self.val_set_out = val_set_out
self.val_set_grndtruth = val_set_grndtruth
self.src_vocab_size = src_vocab_size
self.src_selector = src_selector
self.trg_selector = trg_selector
self.n_best = n_best
self.track_n_models = track_n_models
self.trg_ivocab = trg_ivocab
self.beam_size = beam_size
self.val_burn_in = val_burn_in
self._reload = _reload
self.enc_id = enc_id
self.dec_id = dec_id
self.saveto = saveto if saveto else "."
self.verbose = val_set_out
self._synced = False
self.src_eos_idx = src_eos_idx
self.trg_eos_idx = trg_eos_idx
self.normalize = normalize
self.cond_init_trg = cond_init_trg
# Helpers
self.vocab = data_stream.dataset.dictionary
self.unk_sym = data_stream.dataset.unk_token
self.eos_sym = data_stream.dataset.eos_token
self.unk_idx = self.vocab[self.unk_sym]
self.best_models = []
self.val_bleu_curve = []
self.sampling_fn = gen_sample
self.multibleu_cmd = ['perl', bleu_script, val_set_grndtruth, '<']
# Create saving directory if it does not exist
if not os.path.exists(saveto):
os.makedirs(saveto)
if self._reload:
try:
bleu_score = numpy.load(
os.path.join(
saveto, 'val_bleu_scores{}_{}.npz'.format(
self.enc_id, self.dec_id)))
self.val_bleu_curve = bleu_score['bleu_scores'].tolist()
# Track n best previous bleu scores
for i, bleu in enumerate(
sorted(self.val_bleu_curve, reverse=True)):
if i < self.track_n_models:
self.best_models.append(ModelInfo(bleu))
logger.info("BleuScores Reloaded")
except:
logger.info("BleuScores not Found")
def do(self, which_callback, *args):
# Track validation burn in
if self.main_loop.status['iterations_done'] <= self.val_burn_in:
return
# Get current model parameters
if not self._synced:
enc_ids, dec_ids = get_enc_dec_ids(self.main_loop.models.keys())
self.enc_idx = enc_ids.index(self.enc_id)
self.dec_idx = dec_ids.index(self.dec_id)
self.sources = self._get_attr_rec(
self.main_loop.data_stream.streams[_p(self.enc_id,
self.dec_id)],
'data_stream')
self._synced = True
# Evaluate and save if necessary
self._save_model(self._evaluate_model())
def _evaluate_model(self):
logger.info("Started Validation: ")
val_start_time = time.time()
mb_subprocess = Popen(self.multibleu_cmd, stdin=PIPE, stdout=PIPE)
total_cost = 0.0
# Get target vocabulary
if not self.trg_ivocab:
trg_vocab = self.sources.data_streams[1].dataset.dictionary
self.trg_ivocab = {v: k for k, v in trg_vocab.items()}
if self.verbose:
ftrans = open(self.val_set_out, 'w')
for i, line in enumerate(self.data_stream.get_epoch_iterator()):
"""
Load the sentence, retrieve the sample, write to file
"""
seq = numpy.array(self._oov_to_unk(line[0])).astype('int64')
# Branch for multiple computation graphs
src_selector_input = numpy.zeros(
(1, self.main_loop.num_encs)).astype(theano.config.floatX)
src_selector_input[0, self.enc_idx] = 1.
trg_selector_input = numpy.zeros(
(1, self.main_loop.num_decs)).astype(theano.config.floatX)
trg_selector_input[0, self.dec_idx] = 1.
# draw sample, checking to ensure we don't get an empty string back
trans, costs = self.sampling_fn(
self.f_init, self.f_next,
x=seq.reshape([len(seq), 1]), eos_idx=self.trg_eos_idx,
src_selector=src_selector_input,
trg_selector=trg_selector_input,
k=self.beam_size, maxlen=3*len(seq), stochastic=False,
argmax=False, cond_init_trg=self.cond_init_trg)
if self.normalize:
lengths = numpy.array([len(s) for s in trans])
costs = costs / lengths
nbest_idx = numpy.argsort(costs)[:self.n_best]
for j, best in enumerate(nbest_idx):
try:
total_cost += costs[best]
trans_out = trans[best]
# convert idx to words
trans_out = self._idx_to_word(trans_out[:-1],
self.trg_ivocab)
except ValueError:
logger.info(
"Can NOT find a translation for line: {}".format(i+1))
trans_out = '<UNK>'
if j == 0:
# Write to subprocess and file if it exists
print >> mb_subprocess.stdin, trans_out
if self.verbose:
print >> ftrans, trans_out
if i != 0 and i % 100 == 0:
logger.info(
"Translated {} lines of validation set...".format(i))
mb_subprocess.stdin.flush()
logger.info("Total cost of the validation: {}".format(total_cost))
self.data_stream.reset()
if self.verbose:
ftrans.close()
# send end of file, read output.
mb_subprocess.stdin.close()
stdout = mb_subprocess.stdout.readline()
logger.info(stdout)
out_parse = re.match(r'BLEU = [-.0-9]+', stdout)
logger.info("Validation Took: {} minutes".format(
float(time.time() - val_start_time) / 60.))
assert out_parse is not None
# extract the score
bleu_score = float(out_parse.group()[6:])
self.val_bleu_curve.append(bleu_score)
logger.info('BLEU score: {}'.format(bleu_score))
mb_subprocess.terminate()
# Save bleu scores to file
self._save_bleu_scores()
return bleu_score
def _is_valid_to_save(self, bleu_score):
if not self.best_models or min(self.best_models,
key=operator.attrgetter('bleu_score')).bleu_score < bleu_score:
return True
return False
def _save_model(self, bleu_score):
if self._is_valid_to_save(bleu_score):
model = ModelInfo(
bleu_score, self.saveto, self.enc_id, self.dec_id)
# Manage n-best model list first
if len(self.best_models) >= self.track_n_models:
old_model = self.best_models[0]
if old_model.path and os.path.isfile(old_model.path):
logger.info("Deleting old model %s" % old_model.path)
os.remove(old_model.path)
self.best_models.remove(old_model)
self.best_models.append(model)
self.best_models.sort(key=operator.attrgetter('bleu_score'))
# Save the model here
s = signal.signal(signal.SIGINT, signal.SIG_IGN)
logger.info("Saving new model {}".format(model.path))
params_to_save = []
for cg_name in self.main_loop.models.keys():
params_to_save.append(
self.main_loop.models[cg_name].get_param_values())
params_to_save = merge(params_to_save)
self._save_params(model, params_to_save)
self._save_bleu_scores()
signal.signal(signal.SIGINT, s)
def _save_params(self, model, params):
# Rename accordingly for blocks compatibility
params_to_save = dict(
(k.replace('/', '-'), v) for k, v in params.items())
numpy.savez(model.path, **params_to_save)
def _save_bleu_scores(self):
numpy.savez(
os.path.join(
self.saveto,
'val_bleu_scores{}_{}.npz'.format(self.enc_id, self.dec_id)),
bleu_scores=self.val_bleu_curve)
class ModelInfo:
def __init__(self, bleu_score, path=None, enc_id=None, dec_id=None):
self.bleu_score = bleu_score
self.enc_id = enc_id if enc_id is not None else ''
self.dec_id = dec_id if dec_id is not None else ''
self.path = self._generate_path(path) if path else None
def _generate_path(self, path):
return os.path.join(
path, 'best_bleu_model{}_{}_{}_BLEU{:.2f}.npz'.format(
self.enc_id, self.dec_id, int(time.time()), self.bleu_score))
| 1.539063 | 2 |
addons/website_helpdesk_support_ticket/models/type_of_subject.py | marionumza/vocal_v12 | 0 | 12763812 | <reponame>marionumza/vocal_v12
# -*- coding: utf-8 -*-
from odoo import models, fields
class TypeOfSubject(models.Model):
_name = 'type.of.subject'
name = fields.Char(
'Name',
required=True,
)
| 1.46875 | 1 |
pdbparse/construct/formats/filesystem/ext3.py | ax330d/ida_pdb_loader | 50 | 12763813 | <filename>pdbparse/construct/formats/filesystem/ext3.py
"""
Extension 3 (ext3)
Used primarily for concurrent Linux systems (ext2 + journalling)
"""
| 1.210938 | 1 |
ansys/dpf/core/field_definition.py | pscedu/DPF-Core | 0 | 12763814 | """
FieldDefinition
================
"""
from ansys import dpf
from ansys.grpc.dpf import (base_pb2,
field_definition_pb2, field_definition_pb2_grpc)
from ansys.dpf.core.common import natures, shell_layers
from ansys.dpf.core.dimensionality import Dimensionality
class FieldDefinition:
"""Contains the physical and mathematical description of the field.
Parameters
----------
field_definition : optional
The default is ``None``.
server : ansys.dpf.core.server, optional
Server with the channel connected to the remote or local instance.
The default is ``None``, in which case an attempt is made to use
the global server.
"""
def __init__(self, field_definition=None, server=None):
if server is None:
server = dpf.core._global_server()
self._server = server
self._stub = self._connect(self._server.channel)
if isinstance(field_definition, field_definition_pb2.FieldDefinition):
self._messageDefinition = field_definition
else:
request = base_pb2.Empty()
self._messageDefinition = self._stub.Create(request)
@property
def location(self):
"""Field location.
Returns
-------
str
Location string, such as ``"Nodal"``, ``"Elemental"``,
or ``"TimeFreq_sets"``.
"""
out = self._stub.List(self._messageDefinition)
return out.location.location
@property
def unit(self):
"""Units of the field.
Returns
-------
str
Units of the field.
"""
return self._stub.List(self._messageDefinition).unit.symbol
@property
def shell_layers(self):
"""Order of the shell layers.
Returns
-------
shell_layers : shell_layers
``LayerIndependent`` is returned for fields unrelated to layers.
"""
enum_val = self._stub.List(self._messageDefinition).shell_layers
return shell_layers(enum_val.real-1) #+1 is added to the proto enum to have notset as 0
@property
def dimensionality(self):
"""Dimensionality
Returns
-------
dimensionality : Dimensionality
Nature and size of the elementary data.
"""
val = self._stub.List(self._messageDefinition).dimensionnality # typo exists on server side
return Dimensionality(val.size, natures(val.nature.real))
@unit.setter
def unit(self, value):
self._modify_field_def(unit=value)
@location.setter
def location(self, value):
self._modify_field_def(location=value)
@shell_layers.setter
def shell_layers(self, value):
self._modify_field_def(shell_layer=value)
@dimensionality.setter
def dimensionality(self, value):
self._modify_field_def(dimensionality=value)
def _modify_field_def(self, unit = None, location = None, dimensionality = None, shell_layer=None):
request = field_definition_pb2.FieldDefinitionUpdateRequest()
request.field_definition.CopyFrom(self._messageDefinition)
if unit != None:
request.unit_symbol.symbol = unit
if location != None:
request.location.location = location
if dimensionality != None:
if not isinstance(dimensionality, Dimensionality):
raise TypeError("the dimensionality needs to be of type Dimensionality")
request.dimensionnality.CopyFrom(dimensionality._parse_dim_to_message()) # typo is on server side
if shell_layer != None:
if isinstance(shell_layer, shell_layers):
request.shell_layers = shell_layer.value+1
else:
request.shell_layers = shell_layer+1
self._stub.Update(request)
def __del__(self):
try:
self._stub.Delete(self._messageDefinition)
except:
pass
def _connect(self, channel):
"""Connect to the gRPC service."""
return field_definition_pb2_grpc.FieldDefinitionServiceStub(channel)
| 2.34375 | 2 |
apps/manager/purpleserver/manager/migrations/0018_auto_20210705_1049.py | rcknr/purplship-server | 12 | 12763815 | # Generated by Django 3.2.5 on 2021-07-05 10:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('manager', '0017_auto_20210629_1650'),
]
operations = [
migrations.AddField(
model_name='shipment',
name='reference',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='shipment',
name='label_type',
field=models.CharField(blank=True, max_length=25, null=True),
),
]
| 1.507813 | 2 |
exercicios/ex051.py | CinatitBR/exercicios-phyton | 0 | 12763816 | termo1 = int(input('Digite o 1º termo da P.A: '))
razao = int(input('Digite a razão desta P.A: '))
decimo = termo1 + (10 - 1) * razao
for i in range(termo1, decimo + 1, razao):
print(i, end=' ')
| 3.890625 | 4 |
prkng/api/explorer.py | prkng/api | 7 | 12763817 | from prkng.models import ParkingLots, Slots
from flask import jsonify, Blueprint, request, send_from_directory
import os
explorer = Blueprint('explorer', __name__, url_prefix='/explorer')
slot_props = (
'id',
'geojson',
'rules',
'button_locations',
'way_name'
)
def init_explorer(app):
"""
Initialize Explorer extension into Flask application
"""
app.register_blueprint(explorer)
@explorer.route('/', defaults={'path': None})
@explorer.route('/<path:path>')
def test_view(path):
"""
Serve explorer interface.
Should only be used for testing; otherwise serve with NGINX instead.
"""
if path and not path.startswith(("assets", "public", "fonts", "images")):
path = None
sdir = os.path.dirname(os.path.realpath(__file__))
if path and path.startswith("images"):
sdir = os.path.abspath(os.path.join(sdir, '../../../explorer/public'))
else:
sdir = os.path.abspath(os.path.join(sdir, '../../../explorer/dist'))
return send_from_directory(sdir, path or 'index.html')
@explorer.route('/api/slots')
def get_slots():
"""
Returns slots inside a boundbox
"""
res = Slots.get_boundbox(
request.args['neLat'],
request.args['neLng'],
request.args['swLat'],
request.args['swLng'],
slot_props,
request.args.get('checkin'),
request.args.get('duration', 0.25),
int(request.args.get('type', 0)),
request.args.get('invert') in [True, "true"]
)
if res == False:
return jsonify(status="no feature found"), 404
props = ["id", "geojson", "button_locations", "restrict_types"]
slots = [
{field: row[field] for field in props}
for row in res
]
return jsonify(slots=slots), 200
@explorer.route('/api/slots/<int:id>')
def get_slot(id):
"""
Returns data on a specific slot
"""
res = Slots.get_byid(id, slot_props)
if not res:
return jsonify(status="feature not found"), 404
slot = {field: res[0][num] for num, field in enumerate(slot_props)}
return jsonify(slot=slot), 200
@explorer.route('/api/lots')
def get_lots():
"""
Returns garages inside a boundbox
"""
res = ParkingLots.get_boundbox(
request.args['neLat'],
request.args['neLng'],
request.args['swLat'],
request.args['swLng']
)
if res == False:
return jsonify(status="no feature found"), 404
lots = [
{key: value for key, value in row.items()}
for row in res
]
return jsonify(lots=lots), 200
| 2.46875 | 2 |
dingtalk/contact/__init__.py | niulinlnc/dingtalk-python | 2 | 12763818 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time: 2018/2/28 下午2:05
# @Author: BlackMatrix
# @Site: https://github.com/blackmatrix7
# @File: __init__.py
# @Software: PyCharm
import logging
from .user import *
from .dept import *
from .role import *
from functools import partial
from ..foundation import dingtalk_method
__author__ = 'blackmatrix'
METHODS = {}
method = partial(dingtalk_method, methods=METHODS)
class Contact:
def __init__(self, auth, logger=logging):
self.auth = auth
self.methods = METHODS
self.logger = logger
# ------------------- 员工管理部分 -------------------
def get_user(self, user_id):
user_info = get_user(self.auth.access_token, user_id)
return user_info
def get_dept_user_list(self, department_id):
"""
根据部门id获取用户列表
每次请求最多返回100条数据,需要根据偏移量自行翻页
:param department_id:
:return:
"""
data = get_dept_user_list(self.auth.access_token, department_id)
user_list = data['userlist']
return user_list
def get_all_dept_user_list(self, department_id):
"""
根据部门Id获取部门下的所有员工
会自动根据偏移量获取部门下全部的员工
:param department_id:
:return:
"""
user_list = []
def _get_dept_user_list(offset, size):
data = get_dept_user_list(access_token=self.auth.access_token, department_id=department_id, offset=offset, size=size)
user_list.extend(data['userlist'])
if data['hasMore'] is True:
offset = len(user_list)
_get_dept_user_list(offset=offset, size=size)
_get_dept_user_list(0, 5)
return user_list
def get_all_org_users(self):
"""
获取组织架构下的所有部门,及部门下所有员工
:return:
"""
dept_list = self.get_department_list()
for _dept in dept_list:
del _dept['autoAddUser']
del _dept['createDeptGroup']
_dept['employees'] = self.get_all_dept_user_list(_dept['id'])
return dept_list
def get_all_users(self):
"""
根据部门Id遍历获取整个组织架构下的所有员工
:return:
"""
dept_id_list = self.get_all_department_id_list()
employee_list = []
for dept_id in dept_id_list:
dept_employee_list = self.get_all_dept_user_list(dept_id)
for employee in dept_employee_list:
if employee not in employee_list:
employee_list.append(employee)
return employee_list
def create_user(self, **user_info):
"""
创建用户
:param user_info:
:return:
"""
result = create_user(self.auth.access_token, **user_info)
return result
def update_user(self, **user_info):
"""
更新用户
:param user_info:
:return:
"""
result = update_user(self.auth.access_token, **user_info)
return result
def delete_user(self, userid):
"""
删除用户
:param userid:
:return:
"""
result = delete_user(self.auth.access_token, userid)
return result
def get_org_user_count(self, only_active):
"""
获取企业员工人数
:param only_active: 0:非激活人员数量,1:已经激活人员数量
:return:
"""
data = get_org_user_count(self.auth.access_token, only_active)
return data['count']
def get_user_departments(self, userid):
"""
查询指定用户的所有上级父部门路径
查询主管理员时,会返回无此用户,原因不明。
可能是钉钉有意设置。
:param userid:
:return:
"""
data = get_user_departments(self.auth.access_token, userid)
return data
def get_user_by_code(self, code: str):
"""
通过jsapi传入的code,向钉钉服务器换取用户信息
:param code:
:return:
"""
data = get_user_by_code(self.auth.access_token, code)
return data
def get_userid_by_unionid(self, unionid: str):
"""
根据unionid获取成员的userid
:param unionid:
:return:
"""
data = get_userid_by_unionid(self.auth.access_token, unionid)
return data
# ------------------- 部门管理部分 -------------------
def get_department_id_list(self, dept_id=1):
"""
获取部门Id列表
:return:
"""
data = get_department_id_list(access_token=self.auth.access_token, dept_id=dept_id)
dept_id_list = data['sub_dept_id_list']
return dept_id_list
def get_all_department_id_list(self):
"""
递归获取当前企业所有的部门Id列表
:return:
"""
all_dept_id_list = []
def get_sub_dept_id_list(dept_id=1):
sub_dept_id_list = self.get_department_id_list(dept_id)
if sub_dept_id_list:
all_dept_id_list.extend(sub_dept_id_list)
for sub_dept_id in sub_dept_id_list:
get_sub_dept_id_list(sub_dept_id)
get_sub_dept_id_list()
return tuple(set(all_dept_id_list))
def get_department_list(self, id_=None):
"""
获取部门列表
:param id_:
:return:
"""
data = get_department_list(self.auth.access_token, id_)
depart_list = data['department']
return depart_list
def get_department(self, id_):
"""
根据部门Id获取部门
:param id_:
:return:
"""
data = get_department(self.auth.access_token, id_)
return data
def create_department(self, **dept_info):
"""
创建部门
:param dept_info:
:return:
"""
data = create_department(self.auth.access_token, **dept_info)
return data['id']
def update_department(self, **dept_info):
"""
更新部门信息
:param dept_info:
:return:
"""
data = update_department(self.auth.access_token, **dept_info)
return data['id']
def delete_department(self, id_):
"""
根据部门id删除部门
:param id_:
:return:
"""
data = delete_department(self.auth.access_token, id_)
return data
# ------------------- 角色管理部分 -------------------
@method('dingtalk.corp.role.list')
def get_corp_role_list(self, size=20, offset=0):
"""
获取企业角色列表(分页)
https://open-doc.dingtalk.com/docs/doc.htm?spm=a219a.7629140.0.0.85WR2K&treeId=385&articleId=29205&docType=2
:param size:
:param offset:
:return:
"""
resp = get_corp_role_list(self.auth.access_token, size=size, offset=offset)
data = resp['dingtalk_corp_role_list_response']['result']['list']
if data.get('role_groups') is None:
return None
else:
role_groups = data.get('role_groups')
for role_group in role_groups:
# 钉钉返回的格式嵌套了两层roles,对格式做下处理
role_group['roles'] = role_group.pop('roles').pop('roles')
return role_groups
@method('dingtalk.corp.role.all')
def get_all_corp_role_list(self):
"""
获取全部企业角色列表
https://open-doc.dingtalk.com/docs/doc.htm?spm=a219a.7629140.0.0.85WR2K&treeId=385&articleId=29205&docType=2
:return:
"""
size = 100
offset = 0
dd_role_list = []
while True:
dd_roles = self.get_corp_role_list(size=size, offset=offset)
if dd_roles is None or len(dd_roles) <= 0:
break
else:
dd_role_list.extend(dd_roles)
offset += size
return dd_role_list
@method('dingtalk.corp.role.simplelist')
def get_role_simple_list(self, role_id, size=20, offset=0):
"""
获取角色的员工列表
https://open-doc.dingtalk.com/docs/doc.htm?spm=a219a.7629140.0.0.qatKNZ&treeId=385&articleId=29204&docType=2
:param role_id:
:param size:
:param offset:
:return:
"""
data = get_role_simple_list(self.auth.access_token, role_id=role_id, size=size, offset=offset)
# 返回的数据格式,嵌套这么多层,不累吗?
user_list = data['dingtalk_corp_role_simplelist_response']['result']['list']
if user_list and 'emp_simple_list' in user_list:
return user_list['emp_simple_list']
@method('dingtalk.corp.role.getrolegroup')
def get_role_group(self, group_id):
"""
该接口通过group_id参数可以获取该角色组详细信息以及下面所有关联的角色的信息
目前没有找到可以获取角色组id,即group_id的地方,如果获取角色组的话,可以使用dingtalk.corp.role.list获取
但是只能获取到组名,没有角色组id,所以暂时不知道这个接口有什么用
https://open-doc.dingtalk.com/docs/doc.htm?spm=a219a.7629140.0.0.VqsINY&treeId=385&articleId=29978&docType=2
:param group_id:
:return:
"""
data = get_role_group(self.auth.access_token, group_id=group_id)
return data
| 2.359375 | 2 |
mail.py | FilippoRanza/mail.py | 0 | 12763819 | <gh_stars>0
#! /usr/bin/python3
# Copyright (c) 2019 <NAME> <<EMAIL>>
from argparse import ArgumentParser
from mail_sender import message_builder, load_destination
DEFAULT_SUBJECT = "A message from mail.py"
DEFAULT_CONFIG_FILE = "/etc/mail_config.json"
def setup_argparser():
out = ArgumentParser()
out.add_argument('-d', '--destination', required=True, nargs='+',
help='''Set destination addresse[es], this argument
can be a file in this case adress[es] are read from it,
addresses are line or spece separated''')
out.add_argument('-s', '--subject', default=DEFAULT_SUBJECT,
help='specify subject')
out.add_argument('-c', '--config', default=DEFAULT_CONFIG_FILE,
help='specify configuration file')
out.add_argument('-a', '--attachment', default=None, nargs='+',
help='add attachment file, just one')
out.add_argument('-f', '--file', default=None,
help="specify message file")
return out
def main():
parser = setup_argparser()
args = parser.parse_args()
if args:
mail = message_builder(args.config, args.attachment, args.subject, args.file)
dst = load_destination(args.destination)
mail.send_mail(dst)
if __name__ == "__main__":
main()
| 3 | 3 |
srtplayer.py | glymehrvrd/srtplayer | 0 | 12763820 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from PyQt4 import QtCore, QtGui
import sys
import pysrt
import chardet
class ControlMainWindow(QtGui.QLabel):
def __init__(self, parent=None):
super(ControlMainWindow, self).__init__(parent)
# make window frameless, topmost and transparent
self.setWindowFlags(QtCore.Qt.FramelessWindowHint |
QtCore.Qt.Window | QtCore.Qt.WindowStaysOnTopHint)
self.setAttribute(QtCore.Qt.WA_TranslucentBackground)
self.setMinimumSize(1000, 180)
self.setWordWrap(True)
self.setAlignment(QtCore.Qt.AlignCenter)
# context menu
self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self.showContextMenu)
# font config
font = QtGui.QFont()
font.setFamily('mono')
font.setBold(True)
font.setPointSize(24)
self.setFont(font)
self.setText('open srt file by clicking here!')
# init local vars
self.offset = 0
self.moving = False
self.pos = QtCore.QPoint(0, 0)
self.subPos = 0
self.showing = False
self.timer = QtCore.QTimer(self)
self.timer.timeout.connect(self.onTimeout)
self.timer.setSingleShot(True)
@QtCore.pyqtSlot(QtCore.QPoint)
def showContextMenu(self, pos):
'''
Show context menu
'''
globalPos = self.mapToGlobal(pos)
menu = QtGui.QMenu()
menu.addAction("Set time")
menu.addAction("Font")
menu.addSeparator()
menu.addAction("Exit")
selItem = menu.exec_(globalPos)
if selItem:
if selItem.text() == "Set time":
newStartTime, succ = QtGui.QInputDialog.getText(
self, "Input start time", "", QtGui.QLineEdit.Normal, "00:00:00,000")
if succ:
self.playSrt(
pysrt.SubRipTime.from_string(unicode(newStartTime)))
elif selItem.text() == "Font":
font, succ = QtGui.QFontDialog.getFont(
self.font(), self, 'Font')
if succ:
self.setFont(font)
elif selItem.text() == "Exit":
self.close()
@QtCore.pyqtSlot()
def onTimeout(self):
'''
Change to next subtitle item.
'''
if self.showing:
self.showing = False
self.setText("")
# calc duration
d = self.subs[self.subPos + 1].start - self.subs[self.subPos].end
mil = (((d.hours * 60) + d.minutes) * 60 + d.seconds) * \
1000 + d.milliseconds
self.subPos += 1
# if srt has finished
if self.subPos >= len(self.subs):
self.setText('Finished')
return
else:
self.showing = True
self.setText(self.subs[self.subPos].text)
# calc duration
d = self.subs[self.subPos].end - self.subs[self.subPos].start
mil = (((d.hours * 60) + d.minutes) * 60 + d.seconds) * \
1000 + d.milliseconds
self.timer.start(mil)
def openFile(self):
'''
Choose srt file and play it.
'''
self.timer.stop()
filename = QtGui.QFileDialog.getOpenFileName(
self, 'Open File', '~/', '*.srt;;*')
if not filename:
return
with open(filename, 'r') as f:
encoding = chardet.detect(f.read())['encoding']
if encoding == 'GB2312':
encoding = 'gbk'
self.subs = pysrt.open(unicode(filename), encoding=encoding)
self.playSrt()
def findPos(self, startTime):
'''
Find out which subtitle item should be displayed at startTime.
Using binary search method.
'''
def fp(a, b):
c = a + (b - a) / 2
if a > b:
# return nearest subtitle item if there is no subtitle at
# startTime
return c
elif startTime > self.subs[c].end:
return fp(c + 1, b)
elif startTime < self.subs[c].start:
return fp(a, c - 1)
else:
return c
return fp(0, len(self.subs) - 1)
def playSrt(self, startTime=0):
if not hasattr(self, 'subs'):
return
self.setText('Empty')
self.timer.stop()
self.showing = False
self.subPos = self.findPos(startTime)
d = self.subs[self.subPos].start - startTime
# if already begins, then show it
if d < 0:
d = self.subs[self.subPos].end - startTime
# if between intermit
if d < 0:
self.showing = False
d = self.subs[self.subPos + 1].start - startTime
self.setText('Empty')
else:
self.showing = True
self.setText(self.subs[self.subPos].text)
self.subPos += 1
print d
mil = (((d.hours * 60) + d.minutes) * 60 + d.seconds) * \
1000 + d.milliseconds
self.timer.start(mil)
def mousePressEvent(self, event):
if event.button() == QtCore.Qt.LeftButton:
self.pos = event.globalPos()
self.offset = event.globalPos() - self.frameGeometry().topLeft()
self.setCursor(QtCore.Qt.PointingHandCursor)
self.moving = True
def mouseReleaseEvent(self, event):
if event.button() == QtCore.Qt.LeftButton:
self.setCursor(QtCore.Qt.ArrowCursor)
self.moving = False
if (event.globalPos() - self.pos) == QtCore.QPoint(0, 0):
self.openFile()
def mouseMoveEvent(self, event):
if self.moving:
self.move(event.globalPos() - self.offset)
def keyPressEvent(self, event):
if event.key() == QtCore.Qt.Key_Escape:
self.close()
app = QtGui.QApplication(sys.argv)
mySW = ControlMainWindow()
mySW.show()
sys.exit(app.exec_())
| 2.453125 | 2 |
working_stats.py | Koofus/Statistics | 0 | 12763821 | <reponame>Koofus/Statistics<gh_stars>0
import stats
import random
def get_data():
container = []
data = 0
i = 1
print("Enter any amount of numbers to be analyzed")
while True:
data = input("Input Number -->")
if data != 'done':
data = int(data)
container.append(data)
i += 1
else:
break
return container
data_object = stats.statistics(get_data())
data_object.display_stats()
| 3.5 | 4 |
2016/lab4/hastad/code.py | JustHitTheCore/ctf_workshops | 7 | 12763822 | # https://id0-rsa.pub/problem/11/
import gmpy2
def crt(a, n):
"""Chinese remainder theorem
from: http://rosettacode.org/wiki/Chinese_remainder_theorem#Python
x = a[0] % n[0]
x = a[1] % n[1]
x = a[2] % n[2]
Args:
a(list): remainders
n(list): modules
Returns:
long: solution to crt
"""
if len(a) != len(n):
log.critical_error("Different number of remainders({}) and modules({})".format(len(a), len(n)))
sum = 0
prod = reduce(lambda x, y: x * y, n)
for n_i, a_i in zip(n, a):
p = prod / n_i
sum += a_i * gmpy2.invert(p, n_i) * p
return long(sum % prod)
e = 3
C1 = 0x94f145679ee247b023b09f917beea7e38707452c5f4dc443bba4d089a18ec42de6e32806cc967e09a28ea6fd2e683d5bb7258bce9e6f972d6a30d7e5acbfba0a85610261fb3e0aac33a9e833234a11895402bc828da3c74ea2979eb833cd644b8ab9e3b1e46515f47a49ee602c608812241e56b94bcf76cfbb13532d9f4ff8ba
N1 = 0xa5d1c341e4837bf7f2317024f4436fb25a450ddabd7293a0897ebecc24e443efc47672a6ece7f9cac05661182f3abbb0272444ce650a819b477fd72bf01210d7e1fbb7eb526ce77372f1aa6c9ce570066deee1ea95ddd22533cbc68b3ba20ec737b002dfc6f33dcb19e6f9b312caa59c81bb80cda1facf16536cb3c184abd1d5
C2 = 0x5ad248df283350558ba4dc22e5ec8325364b3e0b530b143f59e40c9c2e505217c3b60a0fae366845383adb3efe37da1b9ae37851811c4006599d3c1c852edd4d66e4984d114f4ea89d8b2aef45cc531cfa1ab16c7a2e04d8884a071fed79a8d30af66edf1bbbf695ff8670b9fccf83860a06e017d67b1788b19b72d597d7d8d8
N2 = <KEY>
C3 = 0x8a9315ee3438a879f8af97f45df528de7a43cd9cf4b9516f5a9104e5f1c7c2cdbf754b1fa0702b3af7cecfd69a425f0676c8c1f750f32b736c6498cac207aa9d844c50e654ceaced2e0175e9cfcc2b9f975e3183437db73111a4a139d48cc6ce4c6fac4bf93b98787ed8a476a9eb4db4fd190c3d8bf4d5c4f66102c6dd36b73
N3 = 0x5ca9a30effc85f47f5889d74fd35e16705c5d1a767004fec7fdf429a205f01fd7ad876c0128ddc52caebaa0842a89996379ac286bc96ebbb71a0f8c3db212a18839f7877ebd76c3c7d8e86bf6ddb17c9c93a28defb8c58983e11304d483fd7caa19b4b261fc40a19380abae30f8d274481a432c8de488d0ea7b680ad6cf7776b
n_all = [N1, N2, N3]
ciphertext_all = [C1, C2, C3]
c_e = crt(ciphertext_all, n_all)
c = gmpy2.iroot(c_e, e)
print c
c = long(c[0])
print hex(c)[2:].strip('L').decode('hex')
| 3.015625 | 3 |
MSBRadix.py | divine-coder/CODECHEF-PYTHON | 0 | 12763823 | import sys
def getQ(size):
bucket=[[] for i in range(size)]
return bucket
def noDigit(b):
return len(str(max(b)))
def getHash(val,i):
return (val%10**i)/(10**(i-1))
p1=0
def radix(b,i,no):
if i>=no:
return b
if len(b)<=1:
return b
bucket=getQ(10)
for elem in b:
p=getHash(elem,no-i)
#print p,elem,i
bucket[p].append(elem)
for j in range(len(bucket)):
#print eachar
bucket[j]=radix(bucket[j],i+1,no)
dic=[]
for everylist in bucket:
#print everylist
dic.extend(everylist)
print dic
return dic
if __name__=='__main__':
a=[13,13,132,9817391,1237,1231,131]
#a=[170, 45, 75, 90, 2, 24, 802, 66]
print noDigit(a)
a=radix(a,0,noDigit(a))
print a
| 3.125 | 3 |
High School/9th Grade APCSP (Python)/Unit 8/08.01.01.py | SomewhereOutInSpace/Computer-Science-Class | 0 | 12763824 | lis = []
for i in range (10):
num = int(input())
lis.append(num)
print(lis)
for i in range (len(lis)):
print(lis[i])
| 3.65625 | 4 |
backend/music/models.py | ahanwaar/my-music-app | 0 | 12763825 | from django.db import models
class Artist(models.Model):
"""Database model for the Artists"""
name = models.CharField('Name', max_length=50, unique=True)
artist_poster = models.URLField(blank=True, null=True)
slug = models.SlugField(max_length=50, null=True, blank=True, unique=True)
class Meta:
verbose_name = 'Artist'
verbose_name_plural = 'Artists'
def __str__(self):
return self.name
class Album(models.Model):
"""Database model for the Albums"""
title = models.CharField('Album Title', max_length=50)
# many albums can be related to the same artist
year = models.PositiveIntegerField('Year')
artist = models.ForeignKey(Artist, on_delete=models.CASCADE, related_name="albums")
album_poster = models.URLField(blank=True, null=True)
class Meta:
verbose_name = 'Album'
verbose_name_plural = 'Albums'
def __str__(self):
return self.title
class Song(models.Model):
"""Database model for the Songs"""
title = models.CharField('Song', max_length=50);
# many songs can be related to the same album
album = models.ForeignKey(Album, on_delete=models.CASCADE, related_name="songs")
class Meta:
verbose_name = 'Song'
verbose_name_plural = 'Songs'
def __str__(self):
return self.title
| 2.9375 | 3 |
python/clx/parsers/splunk_notable_parser.py | gbatmaz/clx | 0 | 12763826 | # Copyright (c) 2019, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import cudf
from clx.parsers.event_parser import EventParser
log = logging.getLogger(__name__)
class SplunkNotableParser(EventParser):
"""This is class parses splunk notable logs.
"""
REGEX_FILE = "resources/splunk_notable_regex.yaml"
EVENT_NAME = "notable"
def __init__(self):
"""Constructor method
"""
event_regex = {}
regex_filepath = (
os.path.dirname(os.path.abspath(__file__)) + "/" + self.REGEX_FILE
)
self.event_regex = self._load_regex_yaml(regex_filepath)
EventParser.__init__(self, event_regex.keys(), self.EVENT_NAME)
def parse(self, dataframe, raw_column):
"""Parses the Splunk notable raw events.
:param dataframe: Raw events to be parsed.
:type dataframe: cudf.DataFrame
:param raw_column: Raw data contained column name.
:type raw_column: string
:return: parsed information.
:rtype: cudf.DataFrame
"""
# Cleaning raw data to be consistent.
dataframe[raw_column] = dataframe[raw_column].str.replace("\\\\", "")
parsed_dataframe = self.parse_raw_event(dataframe, raw_column, self.event_regex)
# Replace null values of all columns with empty.
parsed_dataframe = parsed_dataframe.fillna("")
# Post-processing: for src_ip and dest_ip.
parsed_dataframe = self._process_ip_fields(parsed_dataframe)
return parsed_dataframe
def _process_ip_fields(self, parsed_dataframe):
"""
This function replaces src_ip column with src_ip2, if scr_ip is empty and does the same way for dest_ip column.
"""
for ip in ["src_ip", "dest_ip"]:
log.debug("******* Processing %s *******" % (ip))
ip2 = ip + "2"
ip_len = ip + "_len"
# Calculate ip column value length.
parsed_dataframe[ip_len] = parsed_dataframe[ip].str.len()
# Retrieve empty ip column records.
tmp_dataframe = parsed_dataframe[parsed_dataframe[ip_len] == 0]
# Retrieve non empty ip column records.
parsed_dataframe = parsed_dataframe[parsed_dataframe[ip_len] != 0]
if not tmp_dataframe.empty:
log.debug("tmp_dataframe size %s" % (str(tmp_dataframe.shape)))
# Assign ip2 column values to empty ip column values.
tmp_dataframe[ip] = tmp_dataframe[ip2]
if not parsed_dataframe.empty:
log.debug(
"parsed_dataframe is not empty %s"
% (str(parsed_dataframe.shape))
)
# Concat, if both parsed_dataframe and tmp_df are not empty.
parsed_dataframe = cudf.concat([parsed_dataframe, tmp_dataframe])
else:
# If parsed_dataframe is empty assign tmp_df.
parsed_dataframe = tmp_dataframe
# Remove ip2 and ip_len columns. Since data is captured in ip column.
parsed_dataframe = parsed_dataframe.drop([ip_len, ip2])
return parsed_dataframe
| 2.390625 | 2 |
arachne/tests/test_extensions.py | sliderSun/arachne | 137 | 12763827 | <filename>arachne/tests/test_extensions.py<gh_stars>100-1000
"""
To see if we have the right pipelines in place
"""
import inspect
from unittest import TestCase
from scrapy import signals, Field, Item
from mock import patch, mock_open, Mock, call
from arachne.extensions import ExportCSV, ExportData, ExportJSON
from scrapy.contrib.exporter import CsvItemExporter, JsonItemExporter
class ScrapyItem(Item):
field1 = Field()
field2 = Field()
field3 = Field()
class TestPipelines(TestCase):
def test_cls_export_data(self):
cls = ExportData()
self.assertTrue(inspect.ismethod(cls.from_crawler))
with self.assertRaises(NotImplementedError):
cls.spider_opened('test')
# TODO: test extension signals connect using `mock.assert_has_calls`
crawler_mock = Mock()
cls.from_crawler(crawler_mock)
assert crawler_mock.signals.connect.called
self.assertEquals(cls.files, {})
self.assertIsNone(cls.exporter)
def test_export_cls(self):
test_classes = [
{'cls': ExportJSON,
'file_type': 'json',
'exporter': JsonItemExporter},
{'cls': ExportCSV,
'file_type': 'csv',
'exporter': CsvItemExporter}
]
for test_cls in test_classes:
cls = test_cls['cls']()
mock_open_func = mock_open(read_data='Hello')
spider = Mock()
spider.name = 'abc'
with patch('arachne.extensions.open', mock_open_func):
cls.spider_opened(spider)
path = 'exports/%s/abc.%s' % (test_cls['file_type'],
test_cls['file_type'])
mock_open_func.assert_called_with(path, 'w+b')
self.assertIsInstance(cls.exporter, test_cls['exporter'])
# test if cls.files is empty
cls.spider_closed(spider)
self.assertEquals(cls.files, {})
# test exporter.export_item
item = ScrapyItem()
result = cls.item_scraped(item, spider)
self.assertEquals(item, result)
| 2.34375 | 2 |
test/examples/simple/tlm2/blocking_simple/initiator.py | rodrigomelo9/uvm-python | 140 | 12763828 | #//----------------------------------------------------------------------
#// Copyright 2010-2011 Mentor Graphics Corporation
#// Copyright 2010-2011 Synopsys, Inc
#// Copyright 2019-2020 <NAME> (tpoikela)
#// All Rights Reserved Worldwide
#//
#// Licensed under the Apache License, Version 2.0 (the
#// "License"); you may not use this file except in
#// compliance with the License. You may obtain a copy of
#// the License at
#//
#// http://www.apache.org/licenses/LICENSE-2.0
#//
#// Unless required by applicable law or agreed to in
#// writing, software distributed under the License is
#// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
#// CONDITIONS OF ANY KIND, either express or implied. See
#// the License for the specific language governing
#// permissions and limitations under the License.
#//----------------------------------------------------------------------
import cocotb
from uvm import (UVMComponent, uvm_component_utils, UVMTLMTime,
UVMTLMBInitiatorSocket)
from uvm.macros import *
from apb_rw import apb_rw
class initiator(UVMComponent):
# uvm_tlm_b_initiator_socket#(apb_rw) sock
def __init__(self, name="initiator", parent=None):
super().__init__(name, parent)
self.sock = UVMTLMBInitiatorSocket("sock", self) # (apb_rw)("sock", self)
# //
# // Execute a simple read-modify-write
# //
async def run_phase(self, phase):
delay = UVMTLMTime()
phase.raise_objection(self)
for i in range(10):
rw = apb_rw.type_id.create("rw",None,self.get_full_name())
rw.kind = apb_rw.READ
rw.addr = 0x0000FF00
rw.data = i + 1
await self.sock.b_transport(rw, delay)
# Ok to reuse the same RW instance
rw.kind = apb_rw.WRITE
rw.data = ~rw.data
await self.sock.b_transport(rw, delay)
phase.drop_objection(self)
uvm_component_utils(initiator)
| 1.5625 | 2 |
Fun Excercise/factorial.py | NirmalSilwal/Python- | 32 | 12763829 | n = int(input('enter number to find the factorial: '))
fact=1;
for i in range(1,n+1,1):
fact=fact*i
print(fact)
| 4.1875 | 4 |
python/sandbox.py | LightningDash1755/HE | 61 | 12763830 | # import gettext
# gettext.bindtextdomain('messages', '/var/www/locale')
# gettext.textdomain('messages')
# t = gettext.translation('pt', '/var/www/locale', fallback=True)
# _ = t.ugettext
# # ...
# print _('Missing fields.')
print _('Task Managerr') | 2.15625 | 2 |
motivate.py | timitoc/motivate | 0 | 12763831 | <reponame>timitoc/motivate<filename>motivate.py
#!/usr/bin/env python3
import json
import os
import random
scriptpath = os.path.dirname(__file__)
data_dir = os.path.join(scriptpath, 'data')
num_of_json = len([f for f in os.listdir(data_dir)
if os.path.isfile(os.path.join(data_dir, f))])
rand_no = random.randint(1, num_of_json)
filename = os.path.join(data_dir, str(rand_no).zfill(3) + '.json')
with open(filename) as json_data:
quotes = json.load(json_data)
ran_no = random.randint(1, len(quotes["data"])) - 1
quote = quotes["data"][ran_no]["quote"]
quote = "\033[1;36m" + "\"" + quote + "\"" + "\033[1;m"
author = quotes["data"][ran_no]["author"]
author = "\033[1;35m" + "--" + author + "\033[1;m"
output = quote + "\n\t\t" + author
print(output)
| 2.6875 | 3 |
python-fastapi-ca/src/infrastructure/students/repo/impl/MongoDBStudentRepositoryImpl1.py | solmars/python-fastapi-clean-architecture | 0 | 12763832 | <gh_stars>0
from typing import Union
from inject import autoparams
from loaders.database import DatabaseProvider
from models.student.StudentModel import StudentModel, UpdateStudentModel
from infrastructure.students.repo.StudentRepository import StudentRepository
class MongoDBStudentRepository(StudentRepository):
@autoparams()
def __init__(self, provider: DatabaseProvider):
self.dbTable = provider.getMongo_DB()["students"]
# all of the following should be dtos on return
def get_all(self) -> list[StudentModel]:
students = self.dbTable.find()
return list(students)
def create(self, student: StudentModel) -> StudentModel:
new_student = self.dbTable.insert_one(student)
created_student = self.dbTable.find_one(
{"_id": new_student.inserted_id})
return created_student
def get_one(self, id: Union[str, int]):
return self.dbTable.find_one({"_id": id})
def update(self, id, student: UpdateStudentModel):
student = {k: v for k, v in student.dict().items() if v is not None}
if len(student) >= 1:
update_result = self.dbTable.update_one(
{"_id": id}, {"$set": student})
if update_result.modified_count == 1:
if (
updated_student := self.dbTable.find_one({"_id": id})
) is not None:
return updated_student
def delete(self, id):
result = self.dbTable.delete_one({"_id": id})
if result.deleted_count == 0:
return False
return True
| 2.4375 | 2 |
pycodeanalyzer/core/configuration/__init__.py | miong/pycodeanalyzer | 3 | 12763833 | <gh_stars>1-10
"""Configuration package.
This package handle all information that could be given to pycodeanalyzer in the configuration.
"""
| 1.171875 | 1 |
manast_database/migrations/0001_initial.py | oierajenjo/ManagementAssistant | 0 | 12763834 | # Generated by Django 2.2.13 on 2020-06-30 12:15
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('manast_site', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, verbose_name='name_category')),
('profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='manast_site.Profile')),
],
options={
'verbose_name': 'Category',
'verbose_name_plural': 'Categories',
},
),
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, verbose_name='name_item')),
('category', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to='manast_database.Category', verbose_name='category_item')),
('profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='manast_site.Profile')),
],
options={
'verbose_name': 'Item',
'verbose_name_plural': 'Items',
},
),
migrations.CreateModel(
name='Sale',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.DecimalField(decimal_places=4, default=1.0, max_digits=10, verbose_name='quantity_sale')),
('price', models.DecimalField(decimal_places=2, default=0.0, max_digits=10, verbose_name='price_sale')),
('cost', models.DecimalField(decimal_places=2, default=0.0, max_digits=10, verbose_name='cost_sale')),
('date', models.DateField(default=datetime.date.today, verbose_name='date_sale')),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='manast_database.Item', verbose_name='item_sale')),
('shop', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='manast_site.Shop')),
],
options={
'verbose_name': 'Sale',
'verbose_name_plural': 'Sales',
},
),
migrations.CreateModel(
name='Expense',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.DecimalField(decimal_places=2, default=1.0, max_digits=10, verbose_name='quantity_expense')),
('cost', models.DecimalField(decimal_places=2, default=0.0, max_digits=10, verbose_name='cost_expense')),
('date', models.DateField(default=datetime.date.today, verbose_name='date_exp')),
('periodicity', models.IntegerField(default=1, verbose_name=[(1, 'Daily'), (2, 'Weekly'), (3, 'Monthly'), (4, 'Annual')])),
('repeat', models.BooleanField(default=False, verbose_name='repeat_expense')),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='manast_database.Item', verbose_name='item_exp')),
('shop', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='manast_site.Shop')),
],
options={
'verbose_name': 'Expense',
'verbose_name_plural': 'Expenses',
},
),
]
| 1.867188 | 2 |
boardom/config/common.py | dmarnerides/boardom | 2 | 12763835 | <filename>boardom/config/common.py
import torch
from boardom import str2bool, process_path
import torchvision as tv
# MODEL CONFIG
_UPSAMPLE_TYPES = ['transpose', 'nearest', 'bilinear']
_DOWNSAMPLE_TYPES = [
'strided',
'nearest',
'bilinear',
'strided_preact_bbn',
'nearest_preact_bbn',
'bilinear_preact_bbn',
]
_BLOCK_TYPES = [
'a',
'n',
'c',
'cna',
'nac',
'r_orig',
'r_preact',
'r_preact_c',
'r_preact_bbn',
]
_UNET_FUSION_TYPES = ['lms_guided_filter', 'guided_filter', 'cat', 'add']
_UNET_TYPES = ['original', 'old_gunet', 'custom', 'pretrained']
# The lists here (temporarily) hold the elements defined in the
# configuration files
# They hold the elements per line as they are parsed.
def _create_datum(value, groups=None, tags=None, meta=None):
if meta is None:
meta = {}
if not isinstance(meta, dict):
raise RuntimeError('Meta property must be a dictionary.')
if tags is None:
tags = []
if not isinstance(tags, list):
raise RuntimeError('Tags property must be a list.')
if groups is None:
group = Group()
else:
group = Group(groups)
return {group: {'value': value, 'tags': tags, 'meta': meta}}
class Group(set):
_SEPARATOR = '.'
_DEFAULT_STR = 'default_grp'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if Group._DEFAULT_STR in self:
self.remove(Group._DEFAULT_STR)
if None in self:
self.remove(None)
def add(self, key):
if (key is None) or (key == Group._DEFAULT_STR):
return
super().add(key)
def __str__(self):
lst = list(self)
lst.sort()
return Group._SEPARATOR.join(lst)
@property
def is_default(self):
return len(self) == 0
@staticmethod
def from_full_argname(arg_name):
arg_name, *groups = arg_name.split(Group._SEPARATOR)
return arg_name, Group(groups)
def build_full_argname(self, arg_name):
if self.is_default:
return arg_name
return f'{arg_name}{Group._SEPARATOR}{self}'
def __hash__(self):
return hash(str(self))
def _is_valid_argname(x):
return isinstance(x, str) and x.isidentifier() and (not x.startswith('_'))
OPTIMIZERS = [
'adam',
'adamw',
'sgd',
'adadelta',
'adagrad',
'sparseadam',
'adamax',
'rmsprop',
]
DEVICE_KEYS = ['device', 'cudnn_benchmark']
CRITERIA_KEYS = ['criteria', 'criterion_weight']
OPTIMIZER_KEYS = [
'optimizer',
'lr',
'momentum',
'dampening',
'beta1',
'beta2',
'rho',
'alpha',
'centered',
'lr_decay',
'weight_decay',
'find_good_lr',
]
DATALOADER_KEYS = [
'num_workers',
'batch_size',
'shuffle',
'pin_memory',
'drop_last',
'timeout',
'prefetch_factor',
'persistent_workers',
]
CHECKPOINT_KEYS = ['overwrite', 'strict', 'use_timestamps']
IMAGE_SAMPLER_KEYS = ['overwrite', 'use_timestamps', 'extension']
TORCHVISION_DATASETS = [
'mnist',
'fashionmnist',
'cifar10',
'cifar100',
]
AUTOMATIC_ARGS = [
'process_id',
'time_configured',
'session_path',
]
CORE_SETTINGS = [
dict(
flag='--create_session',
type=str2bool,
default=False,
help='Save session data in the .session.bd.json file.',
),
dict(
flag='--project_path',
type=process_path,
default='.',
help='Root directory for placing session sub-directories.',
),
dict(
flag='--session_name',
default='bd_session',
help='Name of session',
),
dict(
flag='--log_stdout',
type=str2bool,
default=False,
help=('Output all stdout to a log file.'),
),
dict(
flag='--copy_config_files',
type=str2bool,
default=False,
help=(
'Copy configuration files (.bd files)' ' used when launching main script.'
),
),
dict(
flag='--print_cfg',
type=str2bool,
default=False,
help='Print configuration when setup() is done.',
),
dict(
flag='--save_full_config',
type=str2bool,
default=False,
help='Save full configuration when setup() is done.',
),
dict(
flag='--log_csv',
type=str2bool,
default=False,
help='Log stuff in csv files.',
),
dict(
flag='--log_tensorboard',
type=str2bool,
default=False,
help='Use tensorboard.',
),
dict(flag='--log_boardom', type=str2bool, default=False, help='Use boardom.'),
dict(
flag='--autocommit',
type=str2bool,
default=False,
help=('Autocommit on a separate branch'),
),
dict(
flag='--only_run_same_hash',
type=str2bool,
default=False,
help=(
'Only run code that matches the previous automatically generated git hash.'
),
),
]
EXTRA_SETTINGS = [
dict(flag='--train', type=str2bool, default=True, help='Do training.'),
dict(flag='--validate', type=str2bool, default=False),
dict(flag='--test', type=str2bool, default=False, help='Do testing.'),
dict(
flag='--max_epochs',
type=int,
default=1000,
help='Maximum number of epochs',
),
# Frequencies default to -1 such that they are not unintentionally used
dict(
flag='--per_step',
type=int,
default=-1,
),
dict(
flag='--per_epoch',
type=int,
default=-1,
),
dict(
flag='--per_minute',
type=float,
default=-1,
),
dict(
flag='--timestamp',
type=str2bool,
default=True,
),
dict(flag='--device', type=torch.device, default='cpu', help='Device to use'),
dict(
flag='--cudnn_benchmark',
type=str2bool,
default=False,
help='Use cudnn benchmark mode',
),
dict(
flag='--criteria',
nargs='+',
default=[],
help='Criteria to use',
),
dict(
flag='--criterion_weight',
type=float,
default=1.0,
help='Weights for criteria.',
),
dict(
flag='--metrics',
nargs='+',
default=[],
help='Criteria to use',
),
dict(
flag='--optimizer',
type=str.lower,
choices=OPTIMIZERS,
default='adam',
help='Optimizer',
),
dict(flag='--lr', type=float, default=1e-3, help='Learning rate'),
dict(flag='--momentum', type=float, default=0.9, help='SGD Momentum'),
dict(flag='--dampening', type=float, default=0.0, help='SGD Dampening'),
dict(flag='--beta1', type=float, default=0.9, help='Adam beta1 parameter'),
dict(flag='--beta2', type=float, default=0.999, help='Adam beta2 parameter'),
dict(flag='--rho', type=float, default=0.9, help='Adadelta rho parameter'),
dict(
flag='--alpha',
type=float,
default=0.99,
help='RMSprop alpha parameter',
),
dict(
flag='--centered',
type=str2bool,
default=False,
help='RMSprop centered flag',
),
dict(flag='--lr_decay', type=float, default=0.0, help='Adagrad lr_decay'),
dict(
flag='--optim_eps',
type=float,
default=1e-8,
help='Term added to denominator for numerical stability.',
),
dict(
flag='--weight_decay',
type=float,
default=0.0,
help='Weight decay / L2 regularization.',
),
dict(
flag='--find_good_lr',
type=str2bool,
default=False,
help='Find best lr',
),
dict(
flag='--num_workers',
type=int,
default=0,
help='Number of data loading threads',
),
dict(flag='--batch_size', type=int, default=1, help='Batch size for loader'),
dict(
flag='--shuffle',
type=str2bool,
default=True,
help='Loader shuffles data each epoch',
),
dict(
flag='--pin_memory',
type=str2bool,
default=False,
help='Pin tensor memory for efficient GPU loading',
),
dict(
flag='--drop_last',
type=str2bool,
default=False,
help='Drop last batch if its size is less than batch size',
),
dict(
flag='--timeout',
type=float,
default=0,
help='Timeout for data loader.',
),
dict(flag='--prefetch_factor', type=int, default=2),
dict(flag='--persistent_workers', type=str2bool, default=False),
dict(flag='--overwrite', type=str2bool, default=True),
dict(flag='--strict', type=str2bool, default=True),
dict(flag='--use_timestamps', type=str2bool, default=True),
dict(flag='--save_state_dicts', type=str2bool, default=True),
dict(flag='--display', type=str2bool, default=False),
dict(flag='--save', type=str2bool, default=False),
dict(
flag='--seed',
type=int,
default=None,
help='Seed for random number generation.',
),
dict(
flag='--data_root_path', type=process_path, default='.', help='Data directory'
),
dict(flag='--dataset', default=None, help='Dataset in use.'),
dict(
flag='--torchvision_dataset',
type=str.lower,
choices=TORCHVISION_DATASETS,
default=None,
help='Specific dataset to use',
),
dict(
flag='--download',
type=str2bool,
default=True,
help='Download the dataset.',
),
dict(
flag='--grow_dataset',
type=int,
default=1,
help='Growth factor for bd.GrowDataset.',
),
dict(
flag='--load_in_memory',
type=str2bool,
default=False,
help='Load data in memory',
),
dict(
flag='--load_encoded',
type=str2bool,
default=False,
help='Whether to load encoded',
),
dict(
flag='--encoded_positions',
type=int,
nargs='+',
default=[0],
help='Positions of images that the dataset returns.',
),
dict(
flag='--compress_loaded',
type=int,
default=0,
help=(
'Compress Loaded datasets (if using LoadedDataset). '
'[0-9] with 0 being no compression.'
),
),
dict(
flag='--data_extensions',
nargs='+',
default=['jpg', 'png', 'hdr', 'exr', 'pfm'],
help='Extensions of data to load.',
),
dict(
flag='--lr_schedule',
choices=['plateau', 'step', 'none'],
default='step',
help='Learning rate schedule',
),
dict(
flag='--lr_step_size',
type=int,
default=100,
help='Epochs per learning rate decrease (step).',
),
dict(
flag='--lr_patience',
type=int,
default=10,
help='Epochs of patience for metric (plateau).',
),
dict(
flag='--lr_cooldown',
type=int,
default=0,
help='Epochs of cooldown period after lr change (plateau).',
),
dict(
flag='--lr_min',
type=float,
default=1e-7,
help='Minimum learning rate (plateau)',
),
dict(
flag='--lr_ratio',
type=float,
default=0.5,
help='Ratio to decrease learning rate by (all)',
),
dict(
flag='--dataparallel',
type=int,
nargs='+',
default=None,
help='Use dataparallel module.',
),
dict(
flag='--normalization',
default='batchnorm2d',
help='Normalization module.',
),
dict(
flag='--activation',
default='relu',
help='Activation module.',
),
dict(
flag='--network_name',
default='resnet18',
choices=tv.models.resnet.__all__,
help='Pretrained resnet network name.',
),
dict(
flag='--num_pretrained_layers',
type=int,
default=5,
help='Number of pretrained layers for PretrainedResnet.',
),
dict(
flag='--freeze_batchnorm',
type=str2bool,
default=True,
help='Freeze batch normalization for pretrained networks.',
),
dict(
flag='--split_before_relus',
type=str2bool,
default=False,
help='Skip encoder features before relus are evaluated for pretrained resnets.',
),
dict(
flag='--leaky_relu_slope',
type=float,
default=0.01,
help='Slope for leaky relu activation.',
),
dict(
flag='--kernel_size',
type=int,
default=3,
help='Kernel size.',
),
dict(
flag='--kernel_sizes',
nargs='*',
type=int,
default=[3],
help='Kernel sizes.',
),
dict(
flag='--downsample',
default='strided',
choices=_DOWNSAMPLE_TYPES,
help='Downsampling type.',
),
dict(
flag='--upsample',
default='transpose',
choices=_UPSAMPLE_TYPES,
help='Upsampling type.',
),
dict(
flag='--fusion', default='cat', choices=_UNET_FUSION_TYPES, help='Fusion type.'
),
dict(flag='--epsilon', type=float, default=1e-3, help='Generic epsilon value.'),
dict(
flag='--learn_epsilon',
type=str2bool,
default=True,
help='LearnEpsilonValue.',
),
dict(
flag='--bottleneck_gf_adapter',
type=str2bool,
default=True,
help='Bottleck gf adapter for large channel sizes by a factor of 4.',
),
dict(
flag='--grouped_gf',
type=str2bool,
default=False,
help='Grouped multiplication for guided filter',
),
dict(
flag='--norm_eps',
type=float,
default=1e-5,
help='Normalization eps value. (BN default)',
),
dict(
flag='--norm_momentum',
type=float,
default=0.1,
help='Normalization momentum value. (BN default)',
),
dict(
flag='--norm_affine',
type=str2bool,
default=True,
help='Normalization affine value. (BN default)',
),
dict(
flag='--norm_track_running_stats',
type=str2bool,
default=True,
help='Normalization track running stats value. (BN default)',
),
dict(
flag='--hidden_features',
type=int,
nargs='+',
default=[64, 128, 256, 512],
help='Hidden features.',
),
dict(flag='--in_features', type=int, default=3, help='Input features.'),
dict(flag='--out_features', type=int, default=3, help='Output features.'),
dict(
flag='--block_types',
nargs='+',
default=['r_preact'],
choices=_BLOCK_TYPES,
help='Type of blocks.',
),
dict(flag='--unet_type', default='custom', choices=_UNET_TYPES, help='UNet type'),
dict(
flag='--first_conv_then_resize',
type=str2bool,
default=False,
help='First do convolution and then resize.',
),
dict(flag='--initializer', default='kaimingnormal', help='Initializer'),
dict(flag='--uniform_a', type=float, default=0, help='a value for uniform init.'),
dict(flag='--uniform_b', type=float, default=1, help='b value for uniform init.'),
dict(
flag='--normal_mean',
type=float,
default=0,
help='mean value for normal init.',
),
dict(
flag='--normal_std',
type=float,
default=1,
help='std value for normal init.',
),
dict(
flag='--constant_val',
type=float,
default=0,
help='val value for constant init.',
),
dict(
flag='--dirac_groups',
type=int,
default=1,
help='groups value for dirac init',
),
dict(
flag='--xavier_gain',
type=float,
default=1,
help='gain value for xavier init',
),
dict(
flag='--kaiming_a',
type=float,
default=0,
help='a value for kaiming init',
),
dict(
flag='--kaiming_mode',
default='fan_in',
choices=['fan_in', 'fan_out'],
help='mode for kaiming init',
),
dict(
flag='--kaiming_nonlinearity',
default='leaky_relu',
choices=['relu', 'leaky_relu'],
help='nonlinearity for kaiming init',
),
dict(
flag='--orthogonal_gain',
type=float,
default=1.0,
help='gain for orthogonal init',
),
dict(
flag='--sparse_sparsity',
type=float,
default=1.0,
help='sparsity for sparse init',
),
dict(
flag='--sparse_std',
type=float,
default=0.01,
help='std for sparse init',
),
dict(flag='--extension', default='.jpg'),
dict(
flag='--log_all_iterations',
type=str2bool,
default=True,
help='Log all iterations.',
),
dict(
flag='--log_averages',
type=str2bool,
default=True,
help='Log averages.',
),
]
CORE_ARGNAMES = [x['flag'][2:] for x in CORE_SETTINGS]
DEFAULT_CFG_DICT = {'core': CORE_SETTINGS, 'extra': EXTRA_SETTINGS}
# UNTOUCHABLEs cannot be added to categories or groups
# and can't be changed after setup
UNTOUCHABLES = AUTOMATIC_ARGS + CORE_ARGNAMES
| 2.125 | 2 |
bot/Events.py | RamaDev09/CrateBot | 0 | 12763836 | <reponame>RamaDev09/CrateBot
from __future__ import print_function, unicode_literals
import os
from bot.TextInput import TextInput
from bot.prompt import color_msg
# Python Event
def CustomPythonEvent(name, extensions):
here = os.getcwd()
try:
with open(here + "/cogs/events/" + name + "." + extensions, "x") as f:
f.write(TextInput.EventPy(self=TextInput(), name=name))
color_msg("#00FF00", "Event Created")
except FileExistsError:
color_msg("#ff0000", "Event Already Exits")
except FileNotFoundError:
color_msg("#ff0000", "Make sure you are in Bot Project") | 2.5625 | 3 |
bot/reviewbot/tools/pycodestyle.py | reviewboard/ReviewBot | 91 | 12763837 | <reponame>reviewboard/ReviewBot
"""Unit tests for reviewbot.tools.pycodestyle."""
from __future__ import unicode_literals
from reviewbot.config import config
from reviewbot.tools import BaseTool
from reviewbot.utils.process import execute
class PycodestyleTool(BaseTool):
"""Review Bot tool to run pycodestyle."""
name = 'pycodestyle'
version = '1.0'
description = 'Checks Python code for style errors.'
timeout = 30
exe_dependencies = ['pycodestyle']
file_patterns = ['*.py']
options = [
{
'name': 'max_line_length',
'field_type': 'django.forms.IntegerField',
'default': 79,
'field_options': {
'label': 'Maximum Line Length',
'help_text': 'The maximum line length to allow.',
'required': True,
},
},
{
'name': 'ignore',
'field_type': 'django.forms.CharField',
'default': '',
'field_options': {
'label': 'Ignore',
'help_text': ('A comma-separated list of errors and warnings '
'to ignore. This will be passed to the --ignore '
'command line argument (e.g. E4,W).'),
'required': False,
},
},
]
def build_base_command(self, **kwargs):
"""Build the base command line used to review files.
Args:
**kwargs (dict, unused):
Additional keyword arguments.
Returns:
list of unicode:
The base command line.
"""
settings = self.settings
ignore = settings.get('ignore', '').strip()
cmd = [
config['exe_paths']['pycodestyle'],
'--max-line-length=%s' % settings['max_line_length'],
'--format=%(code)s:%(row)d:%(col)d:%(text)s',
]
if ignore:
cmd.append('--ignore=%s' % ignore)
return cmd
def handle_file(self, f, path, base_command, **kwargs):
"""Perform a review of a single file.
Args:
f (reviewbot.processing.review.File):
The file to process.
path (unicode):
The local path to the patched file to review.
base_command (list of unicode):
The base command used to run pycodestyle.
**kwargs (dict, unused):
Additional keyword arguments.
"""
output = execute(base_command + [path],
split_lines=True,
ignore_errors=True)
for line in output:
try:
error_code, line_num, column, message = line.split(':', 3)
line_num = int(line_num)
column = int(column)
except Exception as e:
self.logger.error('Cannot parse pycodestyle line "%s": %s',
line, e)
continue
f.comment(text=message.strip(),
first_line=line_num,
start_column=column,
error_code=error_code)
| 2.5 | 2 |
multi_ear_services/util/__init__.py | Multi-EAR/Multi-EAR-software | 0 | 12763838 | """
multi_ear_services.util init
"""
# import all modules
from ..util.dataselect import DataSelect
from ..util.is_raspberry_pi import is_raspberry_pi
from ..util.parse_config import parse_config
__all__ = ['DataSelect', 'is_raspberry_pi', 'parse_config']
| 1.25 | 1 |
Ago-Dic-2019/ERIK EDUARDO MONTOYA MARTINEZ/PRACTICA 1/CAPITULO 3/YouOwnList.py | Arbupa/DAS_Sistemas | 41 | 12763839 | Automoviles=['Dodge Challeger', 'VW Gti', 'Jeep Rubicon', 'Alfa Romeo Quadro', 'Ford ST', 'Dodge RAM', 'Ford FX4']
M1="Me gustaria compar un " + Automoviles[0].title()+"."
M2="Mi vecino choco su nuevo " + Automoviles[1].title() + "."
M3="El nuevo " + Automoviles[2].title()+ " es mucho mas economico."
M4="Hay una gran diferencia entre el " + Automoviles[3].title() + " y el " + Automoviles[4].title()+"."
M5="La camioneta " + Automoviles[5].title() + " es de gasolina, mientras que la " + Automoviles[6].title() +" es de Diesel."
print(M1)
print(M2)
print(M3)
print(M4)
print(M5)
| 3.03125 | 3 |
tensorflow2/IPU-replicas.py | Alwaysproblem/explore-ipu | 0 | 12763840 | from tensorflow.python.ipu import ipu_infeed_queue
from tensorflow.python.ipu import ipu_outfeed_queue
from tensorflow.python.ipu import loops
from tensorflow.python.ipu import utils
from tensorflow.python.ipu import ipu_strategy
import tensorflow as tf
# The dataset for feeding the graphs
ds = tf.data.Dataset.from_tensors(tf.constant(1.0, shape=[800]))
ds = ds.map(lambda x: [x, x])
ds = ds.repeat()
for xt, yt in ds.take(1):
print(xt)
print(yt)
# The host side queues
infeed_queue = ipu_infeed_queue.IPUInfeedQueue(ds, feed_name="infeed")
outfeed_queue = ipu_outfeed_queue.IPUOutfeedQueue(feed_name="outfeed")
# The device side main
def body(counter, x1, x2):
d1 = x1 + x2
d2 = x1 - x2
counter += 1
outfeed_queue.enqueue({'d1': d1, 'd2': d2})
return counter
@tf.function(experimental_compile=True)
def my_net():
count = 0
count = loops.repeat(10, body, [count], infeed_queue)
return count
# Configure the hardware.
config = utils.create_ipu_config()
config = utils.auto_select_ipus(config, 1)
utils.configure_ipu_system(config)
# Initialize the IPU default strategy.
strategy = ipu_strategy.IPUStrategy()
with strategy.scope():
infeed_queue.initializer
count_out = strategy.experimental_run_v2(my_net)
print("counter", count_out)
# The outfeed dequeue has to happen after the outfeed enqueue op has been executed.
result = outfeed_queue.dequeue()
print("outfeed result", result) | 2.890625 | 3 |
src/lib/EUnNode.py | Amtoniusz/latte | 0 | 12763841 | <reponame>Amtoniusz/latte
from lib.compileException import compileException
class EUnNode():
def __init__(self, exp_type, expr, op, line):
self.exp_type = exp_type
self.expr = expr
self.op=op
self.line=line
self.const = None
self.type = None
self.get_const()
def checkType(self, s):
self.type = self.expr.checkType(s)
# print(f"OP = {self.op}\n")
if self.op == '-' and self.type != 'int':
raise compileException(f"operator (-) cant be used with type {self.type} should be int :C",self.line)
if self.op == '!' and self.type != 'boolean':
raise compileException(f"operator (!) cant be used with type {self.type} should be boolean :C",self.line)
return self.type
def get_const(self):
expr_const = self.expr.get_const()
if expr_const is None:
return None
self.type = self.expr.type
if self.op == '-':
if self.type != 'int':
raise compileException(f"operator (-) cant be used with type {self.type} should be int :C",self.line)
else:
self.const = -expr_const
if self.op == '!':
if self.type != 'boolean':
raise compileException(f"operator (!) cant be used with type {self.type} should be boolean :C",self.line)
else:
self.const = not expr_const
return self.const
def text(self):
print(f"EUnary exp: {self.op}")
self.expr.text()
print(f" ")
| 3.078125 | 3 |
chmvh_website/team/migrations/0001_initial.py | chmvh/chmvh-website | 0 | 12763842 | <reponame>chmvh/chmvh-website<filename>chmvh_website/team/migrations/0001_initial.py
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-10-24 23:14
from __future__ import unicode_literals
from django.db import migrations, models
import team.models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="TeamMember",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("bio", models.TextField(verbose_name="biography")),
(
"name",
models.CharField(
max_length=50, unique=True, verbose_name="name"
),
),
(
"picture",
models.ImageField(
blank=True,
null=True,
upload_to=team.models.team_member_image_name,
),
),
],
),
]
| 1.703125 | 2 |
setup.py | alserranor/distributions | 0 | 12763843 | from setuptools import setup
setup(name = 'statDistributions',
version = '1.0',
description = 'Gaussian and Binominal distributions',
packages = ['statDistributions'],
author = '<NAME>',
author_email = '<EMAIL>',
zip_safe = False)
| 1.03125 | 1 |
city_housing_index/local_admin/domains/register_domain.py | Sinope-Nanto/city_house | 0 | 12763844 | from local_auth.models import UserProfile
from local_auth.serializers import UserProfileSerializer
from django.contrib.auth.models import User
def get_waiting_register_list():
user_profiles = UserProfile.get_waiting_list()
return UserProfileSerializer(user_profiles, many=True).data
def get_waiting_register_detail(user_id):
user_profile = UserProfile.get_by_user_id(user_id)
ret_dict = UserProfileSerializer(user_profile).data
ret_dict["identity"] = user_profile.identity
ret_dict["identity_image"] = user_profile.identity_image.url
return ret_dict
def accept_register_user(user_id) -> (bool, str):
try:
user_profile = UserProfile.get_by_user_id(user_id)
user_profile.set_to_accept()
except UserProfile.DoesNotExist:
return False, "该用户不存在"
user = User.objects.get(id = user_id)
user.is_active = True
user.save()
return True, ""
def refuse_register_user(user_id) -> (bool, str):
try:
user_profile = UserProfile.get_by_user_id(user_id)
user_profile.set_to_refuse()
except UserProfile.DoesNotExist:
return False, "该用户不存在"
return True, ""
| 2.328125 | 2 |
invocare/openssl/passphrase.py | jbronn/invocare-openssl | 0 | 12763845 | <reponame>jbronn/invocare-openssl
import os
def passphrase(arg):
if arg == 'stdin':
return 'stdin'
elif os.path.isfile(arg):
return 'file:"%s"' % arg
elif arg in os.environ:
return 'env:%s' % arg
else:
return 'pass:"%s"'
| 2.328125 | 2 |
crawlplates/crawlplates/module/stock.py | ZombieIce/A-Stock-Plate-Crawling | 0 | 12763846 | class Stock:
def __init__(self, symbol, name):
self.__name = name
self.__symbol = symbol
self.__stockPlate = []
self.__carePlate = []
@property
def name(self):
return self.__name
@property
def symbol(self):
return self.__symbol
@property
def stockPlate(self):
return self.stockPlate
@property
def carePlate(self):
return self.__carePlate
def addCarePlate(self, cp):
if cp in self.__carePlate:
print("Already exist!")
else:
self.__carePlate.append(cp)
def addStockPlate(self, sp):
if sp in self.__stockPlate:
print("Already exist!")
else:
self.__stockPlate.append(sp)
# print("Success")
def formatPlateInfo(self):
# print(self.__carePlate)
return {"name": self.__name, "carePlate":self.__carePlate, "stockPlate": self.__stockPlate} | 3.59375 | 4 |
in-develop/src/autobridge/Codegen/InjectPipelineLogic.py | Blaok/AutoBridge | 76 | 12763847 | <gh_stars>10-100
from collections import defaultdict
from typing import List, Dict
from autobridge.Opt.Slot import Slot
def get_ap_start_pipeline_def(slot_list: List[Slot], pipeline_level=4) -> List[str]:
pipeline_def = []
pipeline_def.append(f'// ----- pipelining the ap_start signal -----')
pipeline_def.append(f'wire ap_start_orig;')
for slot in slot_list:
pipeline_def.append(f'(* keep = "true" *) reg ap_start_{slot.getRTLModuleName()};')
for level in range(pipeline_level):
pipeline_def.append(f'(* keep = "true" *) reg ap_start_{slot.getRTLModuleName()}_q{level};')
pipeline_def.append(f'always @ (posedge ap_clk) begin')
pipeline_def.append(f' ap_start_{slot.getRTLModuleName()}_q0 <= ap_start_orig;')
for level in range(1, pipeline_level):
pipeline_def.append(f' ap_start_{slot.getRTLModuleName()}_q{level} <= ap_start_{slot.getRTLModuleName()}_q{level-1};')
pipeline_def.append(f' ap_start_{slot.getRTLModuleName()} <= ap_start_{slot.getRTLModuleName()}_q{pipeline_level-1};')
pipeline_def.append(f'end')
pipeline_def.append(f'\n')
pipeline_def.append(f'// ----- end of pipelining the ap_start signal -----\n')
# add indentation
pipeline_def = [' ' + stmt for stmt in pipeline_def]
return pipeline_def
def get_ap_rst_pipeline_def(slot_list: List[Slot], pipeline_level=4) -> List[str]:
pipeline_def = []
pipeline_def.append(f'// ----- pipelining the ap_rst signal -----')
for slot in slot_list:
pipeline_def.append(f'(* keep = "true" *) reg ap_rst_{slot.getRTLModuleName()};')
for level in range(pipeline_level):
pipeline_def.append(f'(* keep = "true" *) reg ap_rst_{slot.getRTLModuleName()}_q{level};')
pipeline_def.append(f'always @ (posedge ap_clk) begin')
pipeline_def.append(f' ap_rst_{slot.getRTLModuleName()}_q0 <= !ap_rst_n;')
for level in range(1, pipeline_level):
pipeline_def.append(f' ap_rst_{slot.getRTLModuleName()}_q{level} <= ap_rst_{slot.getRTLModuleName()}_q{level-1};')
pipeline_def.append(f' ap_rst_{slot.getRTLModuleName()} <= ap_rst_{slot.getRTLModuleName()}_q{pipeline_level-1};')
pipeline_def.append(f'end')
pipeline_def.append(f'\n')
pipeline_def.append(f'// ----- end of pipelining the ap_rst_ signal -----\n')
# add indentation
pipeline_def = [' ' + stmt for stmt in pipeline_def]
return pipeline_def
def get_ap_done_pipeline_def(v_name_to_s: Dict[str, Slot], ap_done_module_list: List[str]) -> List[str]:
pipeline_def = []
pipeline_def.append(f'// ----- pipelining the ap_done signal -----')
# get the union ap_done from each slot
# note that some modules dont have ap_done
s_to_v_name_list = defaultdict(list)
for v_name in ap_done_module_list:
slot = v_name_to_s[v_name]
s_to_v_name_list[slot].append(v_name)
# reigster the ap_dones from each module instance
# hold the signal unless reset
for v_name in ap_done_module_list:
slot = v_name_to_s[v_name]
pipeline_def.append(f'wire ap_done_{v_name};')
pipeline_def.append(f'(* keep = "true" *) reg ap_done_{v_name}_q0;')
pipeline_def.append(f'\n')
for slot, v_name_list in s_to_v_name_list.items():
pipeline_def.append(f'(* keep = "true" *) reg ap_done_{slot.getRTLModuleName()}_q0;')
pipeline_def.append(f'(* keep = "true" *) reg ap_done_{slot.getRTLModuleName()}_q1;')
pipeline_def.append(f'(* keep = "true" *) reg ap_done_{slot.getRTLModuleName()}_q2;')
pipeline_def.append(f'\n')
pipeline_def.append(f'(* keep = "true" *) reg ap_done_final;')
for slot, v_name_list in s_to_v_name_list.items():
pipeline_def.append(f'(* keep = "true" *) reg ap_done_final_{slot.getRTLModuleName()}_q0;')
pipeline_def.append(f'(* keep = "true" *) reg ap_done_final_{slot.getRTLModuleName()}_q1;')
pipeline_def.append(f'(* keep = "true" *) reg ap_done_final_{slot.getRTLModuleName()}_q2;')
pipeline_def.append(f'\n')
# get the ap_done of a slot. Reset the ap_done of each module once the slot ap_done is captured
for v_name in ap_done_module_list:
slot = v_name_to_s[v_name]
pipeline_def.append(f'always @ (posedge ap_clk) ap_done_{v_name}_q0 <= (ap_done_{v_name} | ap_done_{v_name}_q0) & (!ap_rst_{slot.getRTLModuleName()}) & (!ap_done_{slot.getRTLModuleName()}_q1);')
pipeline_def.append(f'\n')
# get the final ap_done. Reset the ap_done of each slot once the final ap_done is captured
for slot, v_name_list in s_to_v_name_list.items():
pipeline_def.append(f'always @ (posedge ap_clk) ap_done_{slot.getRTLModuleName()}_q0 <= ' + ' & '.join([f'ap_done_{v_name}_q0' for v_name in v_name_list]) + ';')
pipeline_def.append(f'always @ (posedge ap_clk) ap_done_{slot.getRTLModuleName()}_q1 <= ap_done_{slot.getRTLModuleName()}_q0;')
pipeline_def.append(f'always @ (posedge ap_clk) ap_done_{slot.getRTLModuleName()}_q2 <= (ap_done_{slot.getRTLModuleName()}_q2 | ap_done_{slot.getRTLModuleName()}_q1) & (!ap_rst_{slot.getRTLModuleName()}) & (!ap_done_final_{slot.getRTLModuleName()}_q2);')
pipeline_def.append(f'\n')
# get the final ap_done
pipeline_def.append(f'assign ap_done = ap_done_final;') # for compatibility with HLS simulation
pipeline_def.append(f'always @ (posedge ap_clk) ap_done_final <= ' + ' & '.join([f'ap_done_{slot.getRTLModuleName()}_q2' for slot in s_to_v_name_list.keys()]) + ';')
pipeline_def.append(f'\n')
# pipeline the final ap_done back to each slot to reset the slot-level ap_done
for slot, v_name_list in s_to_v_name_list.items():
pipeline_def.append(f'always @ (posedge ap_clk) ap_done_final_{slot.getRTLModuleName()}_q0 <= ap_done_final;')
pipeline_def.append(f'always @ (posedge ap_clk) ap_done_final_{slot.getRTLModuleName()}_q1 <= ap_done_final_{slot.getRTLModuleName()}_q0;')
pipeline_def.append(f'always @ (posedge ap_clk) ap_done_final_{slot.getRTLModuleName()}_q2 <= ap_done_final_{slot.getRTLModuleName()}_q1;')
pipeline_def.append(f'// ----- end of pipelining the ap_done signal -----\n')
# add indentation
pipeline_def = [' ' + stmt for stmt in pipeline_def]
return pipeline_def
def remove_orig_ctrl_signal(temp_rtl_top: List[str]) -> None:
"""
comment out all assign statement of the original control signals
also comment out the always blocks related to the original control signals
"""
ctrl_signals = [
'ap_start',
'ap_ready',
'ap_done',
'ap_continue',
'ap_idle',
'ap_sync_',
]
# the always blocks are suppose to be after all module instances
for i in range(len(temp_rtl_top)):
if 'always' in temp_rtl_top[i] and 'ap_clk' in temp_rtl_top[i]:
for j in range(i, len(temp_rtl_top)):
if 'assign' in temp_rtl_top[j]:
break
else:
temp_rtl_top[j] = f'// {temp_rtl_top[j]}'
break
# all assign statement are after the always blocks
for i in range(len(temp_rtl_top)):
if 'assign' in temp_rtl_top[i]:
assert any(ctrl_signal in temp_rtl_top[i] for ctrl_signal in ctrl_signals), temp_rtl_top[i]
temp_rtl_top[i] = f'// {temp_rtl_top[i]}'
| 2.53125 | 3 |
src/textbook/rosalind_ba9d.py | cowboysmall/rosalind | 0 | 12763848 | <gh_stars>0
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../tools'))
import files
import arrays
import tree
def main(argv):
text = files.read_line(argv[0])
st = tree.SuffixTree(text)
print st.longest_repeat()
sa = arrays.suffix_array(text + '$')
ha = arrays.lcp_array(sa)
index = max(ha, key = lambda x: x[1])
print text[sa[index[0]][0]:sa[index[0]][0] + index[1]]
if __name__ == "__main__":
main(sys.argv[1:])
| 2.390625 | 2 |
network-tests/test_expire.py | victor-tucci/beldex-storage-server | 2 | 12763849 | import pyoxenmq
import ss
import time
import base64
import json
from nacl.encoding import HexEncoder, Base64Encoder
from nacl.hash import blake2b
from nacl.signing import VerifyKey
import nacl.exceptions
def test_expire_all(omq, random_mn, sk, exclude):
swarm = ss.get_swarm(omq, random_mn, sk)
mns = ss.random_swarm_members(swarm, 2, exclude)
conns = [omq.connect_remote("curve://{}:{}/{}".format(mn['ip'], mn['port_omq'], mn['pubkey_x25519']))
for mn in mns]
msgs = ss.store_n(omq, conns[0], sk, b"omg123", 5)
my_ss_id = '05' + sk.verify_key.encode().hex()
ts = msgs[2]['req']['expiry']
to_sign = "expire_all{}".format(ts).encode()
sig = sk.sign(to_sign, encoder=Base64Encoder).signature.decode()
params = json.dumps({
"pubkey": my_ss_id,
"expiry": ts,
"signature": sig
}).encode()
resp = omq.request(conns[1], 'storage.expire_all', [params])
assert len(resp) == 1
r = json.loads(resp[0])
assert set(r['swarm'].keys()) == {x['pubkey_ed25519'] for x in swarm['mnodes']}
# 0 and 1 have later expiries than 2, so they should get updated; 2's expiry is already the
# given value, and 3/4 are <= so shouldn't get updated.
msg_hashes = sorted(msgs[i]['hash'] for i in (0, 1))
# signature of ( PUBKEY_HEX || EXPIRY || UPDATED[0] || ... || UPDATED[N] )
expected_signed = "".join((my_ss_id, str(ts), *msg_hashes)).encode()
for k, v in r['swarm'].items():
assert v['updated'] == msg_hashes
edpk = VerifyKey(k, encoder=HexEncoder)
edpk.verify(expected_signed, base64.b64decode(v['signature']))
r = json.loads(omq.request(conns[0], 'storage.retrieve',
[json.dumps({ "pubkey": my_ss_id }).encode()]
)[0])
assert len(r['messages']) == 5
assert r['messages'][0]['expiration'] == ts
assert r['messages'][1]['expiration'] == ts
assert r['messages'][2]['expiration'] == ts
assert r['messages'][3]['expiration'] == msgs[3]['req']['expiry']
assert r['messages'][4]['expiration'] == msgs[4]['req']['expiry']
def test_stale_expire_all(omq, random_mn, sk, exclude):
swarm = ss.get_swarm(omq, random_mn, sk)
mn = ss.random_swarm_members(swarm, 2, exclude)[0]
conn = omq.connect_remote("curve://{}:{}/{}".format(mn['ip'], mn['port_omq'], mn['pubkey_x25519']))
msgs = ss.store_n(omq, conn, sk, b"omg123", 5)
my_ss_id = '05' + sk.verify_key.encode().hex()
ts = int((time.time() - 120) * 1000)
to_sign = "expire_all{}".format(ts).encode()
sig = sk.sign(to_sign, encoder=Base64Encoder).signature.decode()
params = {
"pubkey": my_ss_id,
"expiry": ts,
"signature": sig
}
resp = omq.request(conn, 'storage.expire_all', [json.dumps(params).encode()])
assert resp == [b'406', b'expire_all timestamp should be >= current time']
def test_expire(omq, random_mn, sk, exclude):
swarm = ss.get_swarm(omq, random_mn, sk)
mns = ss.random_swarm_members(swarm, 2, exclude)
conns = [omq.connect_remote("curve://{}:{}/{}".format(mn['ip'], mn['port_omq'], mn['pubkey_x25519']))
for mn in mns]
msgs = ss.store_n(omq, conns[0], sk, b"omg123", 10)
my_ss_id = '05' + sk.verify_key.encode().hex()
ts = msgs[6]['req']['expiry']
hashes = [msgs[i]['hash'] for i in (0, 1, 5, 6, 7, 9)] + ['bepQtTaYrzcuCXO3fZkmk/h3xkMQ3vCh94i5HzLmj3I']
actual_update_msgs = sorted(msgs[i]['hash'] for i in (0, 1, 5))
hashes = sorted(hashes, reverse=True)
to_sign = ("expire" + str(ts) + "".join(hashes)).encode()
sig = sk.sign(to_sign, encoder=Base64Encoder).signature.decode()
params = json.dumps({
"pubkey": my_ss_id,
"messages": hashes,
"expiry": ts,
"signature": sig
}).encode()
resp = omq.request(conns[1], 'storage.expire', [params])
assert len(resp) == 1
r = json.loads(resp[0])
assert set(r['swarm'].keys()) == {x['pubkey_ed25519'] for x in swarm['mnodes']}
# ( PUBKEY_HEX || EXPIRY || RMSG[0] || ... || RMSG[N] || UMSG[0] || ... || UMSG[M] )
expected_signed = "".join((my_ss_id, str(ts), *hashes, *actual_update_msgs)).encode()
for k, v in r['swarm'].items():
assert v['updated'] == actual_update_msgs
edpk = VerifyKey(k, encoder=HexEncoder)
try:
edpk.verify(expected_signed, base64.b64decode(v['signature']))
except nacl.exceptions.BadSignatureError as e:
print("Bad signature from swarm member {}".format(k))
raise e
r = json.loads(omq.request(conns[0], 'storage.retrieve',
[json.dumps({ "pubkey": my_ss_id }).encode()]
)[0])
assert len(r['messages']) == 10
for i in range(10):
assert r['messages'][i]['expiration'] == ts if i in (0, 1, 5, 6) else msgs[i]['req']['expiry']
| 2.0625 | 2 |
src/templates/django/boilerplate/settings/development.py | GBrachetta/frames | 0 | 12763850 | <reponame>GBrachetta/frames<gh_stars>0
import os
# Import below from the common config as needed, and append to the list (+=)
from boilerplate.settings.common import INSTALLED_APPS, MIDDLEWARE
DEBUG = True
ALLOWED_HOSTS = [
os.environ.get("DOMAIN_NAME"), # Add in .env
"localhost",
"127.0.0.1",
]
INSTALLED_APPS += [
"debug_toolbar",
"mail_panel",
# Add here your new apps for dev only
]
MIDDLEWARE.insert(
0,
"debug_toolbar.middleware.DebugToolbarMiddleware",
)
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": "db.sqlite3",
}
}
EMAIL_BACKEND = "mail_panel.backend.MailToolbarBackend"
DEFAULT_FROM_EMAIL = "<EMAIL>"
DEBUG_TOOLBAR_PANELS = [
"ddt_request_history.panels.request_history.RequestHistoryPanel",
"debug_toolbar.panels.history.HistoryPanel",
"debug_toolbar.panels.versions.VersionsPanel",
"debug_toolbar.panels.timer.TimerPanel",
"debug_toolbar.panels.settings.SettingsPanel",
"debug_toolbar.panels.headers.HeadersPanel",
"debug_toolbar.panels.request.RequestPanel",
"debug_toolbar.panels.sql.SQLPanel",
"debug_toolbar.panels.staticfiles.StaticFilesPanel",
"debug_toolbar.panels.templates.TemplatesPanel",
"debug_toolbar.panels.cache.CachePanel",
"debug_toolbar.panels.signals.SignalsPanel",
"debug_toolbar.panels.logging.LoggingPanel",
"mail_panel.panels.MailToolbarPanel",
"debug_toolbar.panels.redirects.RedirectsPanel",
"debug_toolbar.panels.profiling.ProfilingPanel",
]
INTERNAL_IPS = ["127.0.0.1", "localhost"] # For debug-toolbar
| 1.8125 | 2 |