max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
mid_exam/runge_kutta_fourth_order_calculator.py | GiantSweetroll/Computational-Mathematics | 0 | 12773951 | import sympy as sp
def get_4th_order_rungekutta(dydx, x0, y0, n:int, h, x = sp.Symbol('x'), y = sp.Symbol('y')):
"""
Method to get the values of x, y and dy/dx using fourth-order Runge-Kutta method in a form of a 2d list
Parameters:
dydx: Equation to get the derivative
x0: initial value of x
y0: initial value of y
n: number of iterations
h: value of increment for x
x: x Symbol object
y: y Symbol object
"""
sets = []
sets.append([x0, y0, dydx.evalf(subs={x: x0, y: y0})])
for i in range(1, n+1):
xOld = sets[i-1][0]
yOld = sets[i-1][1]
k1 = dydx.evalf(subs={x:xOld, y:yOld})
k2 = dydx.evalf(subs={x:xOld + h/2, y:yOld + h*k1/2})
k3 = dydx.evalf(subs={x:xOld + h/2, y:yOld + h*k2/2})
k4 = dydx.evalf(subs={x:xOld + h, y:yOld + k3 * h})
yNew = yOld + (1.0/6.0) * (k1 + 2*k2 + 2*k3 + k4) * h
xNew = xOld + h
sets.append([xNew, yNew, dydx.evalf(subs={x:xNew, y:yNew})])
return sets
def example():
#Usage example
sets = get_4th_order_rungekutta(sp.Symbol('y'), 0, 1, 3, 1)
for s in sets:
print(s) | 3.4375 | 3 |
tests/singleton_test.py | markusressel/container-app-conf | 2 | 12773952 | <reponame>markusressel/container-app-conf
# Copyright (c) 2019 <NAME>
# .
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# .
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# .
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from container_app_conf import ConfigBase
from container_app_conf.entry.bool import BoolConfigEntry
from tests import TestBase, TestConfigBase
class TestConfigBase2(ConfigBase):
BOOL = BoolConfigEntry(
key_path=["test", "bool"],
# default=False,
example=True
)
class TestSingleton(TestBase):
def test_singleton(self):
assert not TestConfigBase() == TestConfigBase2()
assert TestConfigBase() == TestConfigBase()
assert TestConfigBase2() == TestConfigBase2()
def test_singleton_config_entry(self):
conf1 = TestConfigBase()
conf2 = TestConfigBase()
conf1.INT.value = 1
conf2.INT.value = 2
self.assertEqual(conf1.INT.value, conf2.INT.value)
def test_instance_config_entry(self):
conf1 = TestConfigBase()
conf2 = TestConfigBase(singleton=False)
conf3 = TestConfigBase(singleton=False)
conf1.INT.value = 1
conf2.INT.value = 2
conf3.INT.value = 3
self.assertNotEqual(conf1.INT.value, conf2.INT.value)
self.assertNotEqual(conf1.INT.value, conf3.INT.value)
self.assertNotEqual(conf2.INT.value, conf3.INT.value)
| 1.976563 | 2 |
myp/package/__init__.py | YunisDEV/py-scripts | 2 | 12773953 | from .reader import MYPReader | 1.007813 | 1 |
pyutl/localenv.py | valldriz/pyutl | 0 | 12773954 | import os
import sys
import json
class NoEnvironmentFile(Exception):
pass
class KeyNotFound(Exception):
pass
DEFAULT = object()
class LocalEnv:
_BOOLEANS = {'1': True, 'yes': True, 'true': True, 'on': True,
'0': False, 'no': False, 'false': False, 'off': False, '': False}
def __init__(self):
self.files = []
self.data = {}
self.first_load = False
def load(self, file=None):
"""
If no file is defined, the .env file will be searched
in invoker module's directory
"""
if file is None:
file = self._invoker()
self.files.append({'file': file, 'exists': '', 'loaded': False})
# search all files given and load them
for file_dict in self.files:
file_dict['exists'] = os.path.isfile(file_dict['file'])
if file_dict['exists'] and not file_dict['loaded']:
with open(file_dict['file']) as f:
for line in f:
line = line.strip()
if not line or line.startswith('#') or '=' not in line:
continue
key, value = line.split('=', 1)
key = key.replace('export', '')
key = key.strip()
value = value.strip().strip('\'"')
self.data[key] = value
file_dict['loaded'] = True
def _cast(self, cast, data):
if cast is bool and str(data).lower() not in self._BOOLEANS:
raise ValueError(f'value can not be parsed as boolean')
elif cast is bool:
return self._BOOLEANS[str(data).lower()]
else:
return cast(data)
def get(self, key, default=DEFAULT, cast=None):
if not self.first_load:
self.load()
self.first_load = True
try:
ret_val = self.data[key] if cast is None else self._cast(cast, self.data[key])
except KeyError:
if default != DEFAULT:
ret_val = default if cast is None else self._cast(cast, default)
else:
raise KeyNotFound(f'value not found in files: \n{json.dumps(self.files, indent=4)}')
return ret_val
def _invoker(self):
# tip from:
# https://github.com/henriquebastos/python-decouple/blob/master/decouple.py
# MAGIC! Get the caller's module path.
frame = sys._getframe()
path = os.path.dirname(frame.f_back.f_back.f_back.f_code.co_filename)
file = os.path.join(path, '.env')
return file
localenv = LocalEnv()
| 2.8125 | 3 |
tests/providers/cloudera/utils.py | zomborinorbert/airflow | 0 | 12773955 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Utils module for common utility methods used in the tests"""
from itertools import tee
from json import dumps
from requests import Response
def iter_len_plus_one(iterator):
"""Return the length + 1 of the given iterator.
The +1 is because in the tests the first side effect is already consumed"""
return sum(1 for _ in tee(iterator)) + 1
def _get_call_arguments(self):
if len(self) == 2:
# returned tuple is args, kwargs = self
_, kwargs = self
else:
# returned tuple is name, args, kwargs = self
_, _, kwargs = self
return kwargs
def _make_response(status, body, reason):
resp = Response()
resp.status_code = status
resp._content = dumps(body).encode('utf-8')
resp.reason = reason
return resp
| 2.34375 | 2 |
NER-Chinese-BiLSTM+CRF/data/boson/data_util.py | aka-zyq/NER-TF | 47 | 12773956 | <filename>NER-Chinese-BiLSTM+CRF/data/boson/data_util.py
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import codecs
import pandas as pd
import numpy as np
import re
def data2pkl():
datas = list()
labels = list()
linedata=list()
linelabel=list()
tags = set()
input_data = codecs.open('./wordtagsplit.txt', 'r', 'utf-8')
for line in input_data.readlines():
line = line.split()
linedata=[]
linelabel=[]
numNotO=0
for word in line:
word = word.split('/')
linedata.append(word[0])
linelabel.append(word[1])
tags.add(word[1])
if word[1]!='O':
numNotO+=1
if numNotO!=0:
datas.append(linedata)
labels.append(linelabel)
input_data.close()
print(len(datas), tags)
print(len(labels))
import collections
def flatten(x):
result = []
for el in x:
if isinstance(x, collections.Iterable) and not isinstance(el, str):
result.extend(flatten(el))
else:
result.append(el)
return result
all_words = flatten(datas)
sr_allwords = pd.Series(all_words)
sr_allwords = sr_allwords.value_counts()
set_words = sr_allwords.index
set_ids = range(1, len(set_words)+1)
tags = [i for i in tags]
tag_ids = range(len(tags))
word2id = pd.Series(set_ids, index=set_words)
id2word = pd.Series(set_words, index=set_ids)
tag2id = pd.Series(tag_ids, index=tags)
id2tag = pd.Series(tags, index=tag_ids)
word2id["unknow"] = len(word2id)+1
print (word2id)
max_len = 60
def X_padding(words):
ids = list(word2id[words])
if len(ids) >= max_len:
return ids[:max_len]
ids.extend([0]*(max_len-len(ids)))
return ids
def y_padding(tags):
ids = list(tag2id[tags])
if len(ids) >= max_len:
return ids[:max_len]
ids.extend([0]*(max_len-len(ids)))
return ids
df_data = pd.DataFrame({'words': datas, 'tags': labels}, index=range(len(datas)))
df_data['x'] = df_data['words'].apply(X_padding)
df_data['y'] = df_data['tags'].apply(y_padding)
x = np.asarray(list(df_data['x'].values))
y = np.asarray(list(df_data['y'].values))
from sklearn.model_selection import train_test_split
x_train,x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=43)
x_train, x_valid, y_train, y_valid = train_test_split(x_train, y_train, test_size=0.2, random_state=43)
import pickle
import os
with open('../Bosondata.pkl', 'wb') as outp:
pickle.dump(word2id, outp)
pickle.dump(id2word, outp)
pickle.dump(tag2id, outp)
pickle.dump(id2tag, outp)
pickle.dump(x_train, outp)
pickle.dump(y_train, outp)
pickle.dump(x_test, outp)
pickle.dump(y_test, outp)
pickle.dump(x_valid, outp)
pickle.dump(y_valid, outp)
print ('** Finished saving the data.')
def origin2tag():
input_data = codecs.open('./origindata.txt', 'r', 'utf-8')
output_data = codecs.open('./wordtag.txt', 'w', 'utf-8')
for line in input_data.readlines():
line=line.strip()
i = 0
while i <len(line):
if line[i] == '{':
i += 2
temp = ""
while line[i] != '}':
temp += line[i]
i += 1
i += 2
word = temp.split(':')
sen = word[1]
output_data.write(sen[0] + "/B_" + word[0] + " ")
for j in sen[1:len(sen) - 1]:
output_data.write(j + "/M_" + word[0] + " ")
output_data.write(sen[-1] + "/E_" + word[0] + " ")
else:
output_data.write(line[i] + "/O ")
i += 1
output_data.write('\n')
input_data.close()
output_data.close()
def tagsplit():
with open('./wordtag.txt','rb') as inp:
texts = inp.read().decode('utf-8')
sentences = re.split('[,。!?、‘’“”()]/[O]', texts)
output_data = codecs.open('./wordtagsplit.txt', 'w', 'utf-8')
for sentence in sentences:
if sentence != " ":
output_data.write(sentence.strip() + '\n')
output_data.close()
origin2tag()
tagsplit()
data2pkl()
| 2.703125 | 3 |
examples/apps/reverse_image_search/server.py | keunhong/scanner | 1 | 12773957 | import subprocess
try:
from flask import Flask, request, send_from_directory
except ImportError:
print('This example needs Flask to run. Try running:\n'
'pip install flask')
app = Flask(__name__)
STATIC_DIR = 'examples/reverse_image_search/static'
# TODO(wcrichto): figure out how to prevent image caching
@app.route('/mystatic/<path:path>')
def mystatic(path):
return send_from_directory('static', path)
@app.route('/', methods=['GET','POST'])
def index():
if request.method == 'POST':
f = request.files['file']
f.save('{}/query.jpg'.format(STATIC_DIR))
subprocess.check_call(['python', 'examples/reverse_image_search/search.py'])
return """
<img src="/mystatic/result0.jpg" />
<img src="/mystatic/result1.jpg" />
<img src="/mystatic/result2.jpg" />
<img src="/mystatic/result3.jpg" />
<img src="/mystatic/result4.jpg" />
"""
else:
return """
<form method="post" enctype="multipart/form-data">
<input type="file" name="file">
<input type="submit" value="Upload">
</form>
"""
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=True)
| 2.546875 | 3 |
input.py | albertliangcode/DiceRoll | 0 | 12773958 | <filename>input.py
"""
Input
3/26/15
<NAME>
Just a quick check to make sure input can be taken in from the user through the console.
On the side, it also tests conditionals.
"""
while(True):
s = raw_input("Enter string to print: ")
print s,"\n"
if(s.lower() == "exit"):
break
print "Exiting..."
| 3.28125 | 3 |
examples/_gen_playback.py | JacobKosowski/mpl-point-clicker | 3 | 12773959 | <reponame>JacobKosowski/mpl-point-clicker
from mpl_playback.record import record_file
# record_file("heatmap_slicer.py", "fig")
record_file("example.py", "fig")
| 1.65625 | 2 |
Extra/Waste Seggregation using trashnet/Final Files/utils.py | KushajveerSingh/pytorch_projects | 19 | 12773960 | <reponame>KushajveerSingh/pytorch_projects<gh_stars>10-100
import onnx
from onnx_tf.backend import prepare
import numpy as np
from PIL import Image
__all__ = ['prepare_model', 'open_img', 'classes', 'get_pred']
def prepare_model(path='mobilenetv2.onnx'):
model = onnx.load(path)
tf_rep = prepare(model, device='CPU', strict=True)
return tf_rep
def open_img(path='glass51.jpg', size=(256, 256)):
# Open image using PIL
img = Image.open(path).resize(size).convert('RGB')
a = np.asarray(img)
a = a/255
# Normalize image
mean=np.array([0.485, 0.456, 0.406])
std=np.array([0.229, 0.224, 0.225])
a = (a - mean)/std
# Convert Image to PyTorch format
a = np.transpose(a, (1, 0, 2))
a = np.transpose(a, (2, 1, 0))
a = np.expand_dims(a, axis=0)
return a.astype(np.float32, copy=False)
def get_pred(tf_rep, img):
out = tf_rep.run(img)
pred = np.argmax(out)
if pred == 0 or pred == 3:
pred = 0
if pred == 4:
pred = 3
return pred
classes = ['paper',
'glass',
'metal',
'plastic']
def do_inference(model, path):
img = open_img(path)
return get_pred(model, img) | 2.328125 | 2 |
lib/losses/centernet_loss.py | DuZzzs/monodleX | 10 | 12773961 | import torch
import torch.nn as nn
import torch.nn.functional as F
from lib.helpers.decode_helper import _transpose_and_gather_feat
from lib.losses.focal_loss import focal_loss_cornernet
from lib.losses.uncertainty_loss import laplacian_aleatoric_uncertainty_loss
from lib.losses.dim_aware_loss import dim_aware_l1_loss
eps = 1e-6
def compute_centernet3d_loss(input, target):
stats_dict = {}
edge_fusion = False
if 'edge_len' in target.keys():
edge_fusion = True
seg_loss = compute_segmentation_loss(input, target)
offset2d_loss = compute_offset2d_loss(input, target, edge_fusion=edge_fusion)
size2d_loss = compute_size2d_loss(input, target)
offset3d_loss = compute_offset3d_loss(input, target, edge_fusion=edge_fusion)
depth_loss = compute_depth_loss(input, target)
size3d_loss = compute_size3d_loss(input, target)
heading_loss = compute_heading_loss(input, target)
# statistics
stats_dict['seg'] = seg_loss.item()
stats_dict['offset2d'] = offset2d_loss.item()
stats_dict['size2d'] = size2d_loss.item()
stats_dict['offset3d'] = offset3d_loss.item()
stats_dict['depth'] = depth_loss.item()
stats_dict['size3d'] = size3d_loss.item()
stats_dict['heading'] = heading_loss.item()
total_loss = seg_loss + offset2d_loss + size2d_loss + offset3d_loss + \
depth_loss + size3d_loss + heading_loss
return total_loss, stats_dict
def compute_segmentation_loss(input, target):
input['heatmap'] = torch.clamp(input['heatmap'].sigmoid_(), min=1e-4, max=1 - 1e-4)
loss = focal_loss_cornernet(input['heatmap'], target['heatmap'])
return loss
def compute_size2d_loss(input, target):
# compute size2d loss
size2d_input = extract_input_from_tensor(input['size_2d'], target['indices'], target['mask_2d'])
size2d_target = extract_target_from_tensor(target['size_2d'], target['mask_2d'])
size2d_loss = F.l1_loss(size2d_input, size2d_target, reduction='mean')
if torch.any(torch.isnan(size2d_loss)):
size2d_loss = torch.tensor([0.0]).to(size2d_input.device)
return size2d_loss
def compute_offset2d_loss(input, target, edge_fusion=False):
# compute offset2d loss
offset2d_input = extract_input_from_tensor(input['offset_2d'], target['indices'], target['mask_2d'])
offset2d_target = extract_target_from_tensor(target['offset_2d'], target['mask_2d'])
if edge_fusion:
trunc_mask = extract_target_from_tensor(target['trunc_mask'], target['mask_2d']).bool()
offset2d_loss = F.l1_loss(offset2d_input, offset2d_target, reduction='none').sum(dim=1)
# use different loss functions for inside and outside objects
trunc_offset_loss = torch.log(1 + offset2d_loss[trunc_mask]).sum() / torch.clamp(trunc_mask.sum() + eps, min=1)
offset2d_loss = offset2d_loss[~trunc_mask].mean()
return trunc_offset_loss + offset2d_loss
elif(target['mask_2d'].sum() > 0):
offset2d_loss = F.l1_loss(offset2d_input, offset2d_target, reduction='mean')
return offset2d_loss
else:
offset2d_loss = torch.tensor([0.0]).to(offset2d_input.device)
return offset2d_loss
def compute_depth_loss(input, target):
depth_input = extract_input_from_tensor(input['depth'], target['indices'], target['mask_3d'])
depth_input, depth_log_variance = depth_input[:, 0:1], depth_input[:, 1:2]
depth_input = 1. / (depth_input.sigmoid() + 1e-6) - 1.
depth_target = extract_target_from_tensor(target['depth'], target['mask_3d'])
if target['mask_3d'].sum() > 0:
depth_loss = laplacian_aleatoric_uncertainty_loss(depth_input, depth_target, depth_log_variance)
else:
depth_loss = torch.tensor([0.0]).to(depth_input.device)
return depth_loss
def compute_offset3d_loss(input, target, edge_fusion=False):
offset3d_input = extract_input_from_tensor(input['offset_3d'], target['indices'], target['mask_3d'])
offset3d_target = extract_target_from_tensor(target['offset_3d'], target['mask_3d'])
if target['mask_3d'].sum() > 0:
offset3d_loss = F.l1_loss(offset3d_input, offset3d_target, reduction='mean')
else:
offset3d_loss = torch.tensor([0.0]).to(offset3d_input.device)
if edge_fusion:
sum_target_trunc_mask = target['trunc_mask'].sum()
if sum_target_trunc_mask > 0:
trunc_offset3d_input = extract_input_from_tensor(input['offset_3d'], target['indices'], target['trunc_mask'])
trunc_offset3d_target = extract_target_from_tensor(target['offset_3d'], target['trunc_mask'])
trunc_offset3d_loss = torch.log(1 + F.l1_loss(trunc_offset3d_input,
trunc_offset3d_target, reduction='none').sum() / torch.clamp(sum_target_trunc_mask, min=1))
else:
trunc_offset3d_loss = torch.tensor([0.0]).to(offset3d_input.device)
return offset3d_loss + trunc_offset3d_loss
return offset3d_loss
def compute_size3d_loss(input, target):
size3d_input = extract_input_from_tensor(input['size_3d'], target['indices'], target['mask_3d'])
size3d_target = extract_target_from_tensor(target['size_3d'], target['mask_3d'])
# target['dimension'] is size3d_target
dimension_target = extract_target_from_tensor(target['dimension'], target['mask_3d'])
if target['mask_3d'].sum() > 0:
size3d_loss = dim_aware_l1_loss(size3d_input, size3d_target, dimension_target)
else:
size3d_loss = torch.tensor([0.0]).to(size3d_input.device)
return size3d_loss
def compute_heading_loss(input, target):
heading_input = _transpose_and_gather_feat(input['heading'], target['indices']) # B * C * H * W ---> B * K * C
heading_input = heading_input.view(-1, 24)
heading_target_cls = target['heading_bin'].view(-1)
heading_target_res = target['heading_res'].view(-1)
mask = target['mask_2d'].view(-1)
# classification loss
heading_input_cls = heading_input[:, 0:12]
# heading_input_cls, heading_target_cls = heading_input_cls[mask], heading_target_cls[mask]
heading_input_cls, heading_target_cls = heading_input_cls[mask > 0], heading_target_cls[mask > 0]
if mask.sum() > 0:
cls_loss = F.cross_entropy(heading_input_cls, heading_target_cls, reduction='mean')
else:
cls_loss = torch.tensor([0.0]).to(heading_input_cls.device)
# regression loss
heading_input_res = heading_input[:, 12:24]
heading_input_res, heading_target_res = heading_input_res[mask > 0], heading_target_res[mask > 0]
cls_onehot = torch.zeros(heading_target_cls.shape[0], 12).cuda().scatter_(dim=1, index=heading_target_cls.view(-1, 1), value=1)
heading_input_res = torch.sum(heading_input_res * cls_onehot, 1)
reg_loss = F.l1_loss(heading_input_res, heading_target_res, reduction='mean')
if torch.any(torch.isnan(reg_loss)):
reg_loss = torch.tensor([0.0]).to(heading_input_res.device)
return cls_loss + reg_loss
###################### auxiliary functions #########################
def extract_input_from_tensor(input, ind, mask):
input = _transpose_and_gather_feat(input, ind) # B*C*H*W --> B*K*C
return input[mask > 0] # B*K*C --> M * C
def extract_target_from_tensor(target, mask):
return target[mask > 0]
if __name__ == '__main__':
input_cls = torch.zeros(2, 50, 12) # B * 50 * 24
input_reg = torch.zeros(2, 50, 12) # B * 50 * 24
target_cls = torch.zeros(2, 50, 1, dtype=torch.int64)
target_reg = torch.zeros(2, 50, 1)
input_cls, target_cls = input_cls.view(-1, 12), target_cls.view(-1)
cls_loss = F.cross_entropy(input_cls, target_cls, reduction='mean')
a = torch.zeros(2, 24, 10, 10)
b = torch.zeros(2, 10).long()
c = torch.ones(2, 10).long()
d = torch.zeros(2, 10, 1).long()
e = torch.zeros(2, 10, 1)
print(compute_heading_loss(a, b, c, d, e))
| 1.976563 | 2 |
25_DFSBFS/Step01/wowo0709.py | StudyForCoding/BEAKJOON | 0 | 12773962 | '''인접 행렬로 풀이'''
import sys
input = sys.stdin.readline
def dfs(v): # 재귀
print(v,end=' ')
visited[v] = 1
for i in range(1,N+1):
if (not visited[i]) and (graph[v][i]):
dfs(i)
def bfs(v): # 큐
q = []
q.append(v)
visited[v] = 1
while q:
v = q.pop(0)
print(v,end=' ')
for i in range(1,N+1):
if (not visited[i]) and (graph[v][i]):
q.append(i)
visited[i] = 1
N,M,V = map(int,input().split())
graph = [[0 for _ in range(N+1)] for _ in range(N+1)]
visited = [0 for _ in range(N+1)]
for _ in range(M):
x,y = map(int,input().split())
graph[x][y] = graph[y][x] = 1
dfs(V)
print()
for i in range(N+1): visited[i] = 0
bfs(V) | 3.296875 | 3 |
ABC/187/D.py | yu9824/AtCoder | 0 | 12773963 | <reponame>yu9824/AtCoder
def LI(): return list(map(int, input().split()))
def I(): return int(input())
import sys
sys.setrecursionlimit(10 ** 9)
'''
青木派でsortして上から順番に寝返り?
得失点差でsortして上から順番に寝返り?
'''
def main(*args):
N, AB = args
# aoki = []
# takahashi = []
# for a, b in AB:
# takahashi.append(a+b)
# aoki.append(a)
# 得失点差を求める
# tokushittenn = [t + a for t, a in zip(takahashi, aoki)]
# 得失点差を求める
tokushittenn = []
aoki = 0
for a, b in AB:
tokushittenn.append(a + b + a)
aoki += a
# print(aoki, tokushittenn)
# 得失点順を降順に並べ替えて,その順番に演説していく(効果的な街の順番)
for i, city in enumerate(argsort(tokushittenn, reverse = True)):
aoki -= tokushittenn[city]
if aoki < 0:
print(i+1)
break
def argsort(seq, reverse = False):
# http://stackoverflow.com/questions/3071415/efficient-method-to-calculate-the-rank-vector-of-a-list-in-python
# https://stackoverflow.com/questions/3382352/equivalent-of-numpy-argsort-in-basic-python
return sorted(range(len(seq)), key=seq.__getitem__, reverse = reverse)
if __name__ == '__main__':
N = int(input())
args = [N]
args.append([LI() for n in range(N)])
main(*args) | 3.203125 | 3 |
hkm/migrations/0022_museum_only_products.py | andersinno/kuvaselaamo | 1 | 12773964 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-07-19 10:17
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hkm', '0021_page_ref'),
]
operations = [
migrations.AddField(
model_name='printproduct',
name='is_museum_only',
field=models.BooleanField(default=False, verbose_name='Museum purchase only'),
),
]
| 1.609375 | 2 |
zfactor_py/calculate_zfactor.py | mkamyab/zfactor | 2 | 12773965 | # This code calculates compressibility factor (z-factor) for natural hydrocarbon gases
# with 3 different methods. It is the outcomes of the following paper:
# <br>
# <NAME>.; <NAME>., <NAME>.; <NAME>. & <NAME>, <NAME>.
# Using artificial neural networks to estimate the Z-Factor for natural hydrocarbon gases
# Journal of Petroleum Science and Engineering, 2010, 73, 248-257
# <br>
# The original paper can be found at:
# <a href="http://www.sciencedirect.com/science/article/pii/S0920410510001427">here</a>.
# <p>
# Artificial Neural Network (ANN)has been applied and two accurate non-iterative methods are presented.
# The Dranchuk and Abou-Kassem equation of state model, which is an iterative method, is
# also presented here for comparison. All the methods are:
# <ul>
# <li> ANN10: this method is the most accurate ANN method that presented in the paper.
# <li> ANN5: this method is the next accurate ANN method that presented in the paper.
# <li> DAK: this is the Dranchuk and Abou-Kassem equation of state.
# </ul>
#
# @author <a href="mailto:<EMAIL>"><NAME></a>
# @author <a href="mailto:<EMAIL>"><NAME>.</a>
import numpy as np
class CalculateZFactor:
# Minimum and Maximum values used in the neural network to normalize the input and output values.
def __init__(self):
pass
Ppr_min = 0
Ppr_max = 30
Tpr_min = 1
Tpr_max = 3
Z_min = 0.25194
Z_max = 2.66
# -------------START OF NETWORK 2-5-5-1 STRUCTURE-------------
# Weights and Biases for the 1st layer of neurons
wb1_5 = [
[-1.5949, 7.9284, 7.2925],
[-1.7917, 1.2117, 2.221],
[5.3547, -4.5424, -0.9846],
[4.6209, 2.2228, 8.9966],
[-2.3577, -0.1499, -1.5063]
]
# Weights and Biases for the 2nd layer of neurons
wb2_5 = [
[2.3617, -4.0858, 1.2062, -1.1518, -1.2915, 2.0626],
[10.0141, 9.8649, -11.4445, -123.0698, 7.5898, 95.1393],
[10.4103, 14.1358, -10.9061, -125.5468, 6.3448, 93.8916],
[-1.7794, 14.0742, -1.4195, 12.0894, -15.4537, -9.9439],
[-0.5988, -0.4354, -0.336, 9.9429, -0.4029, -8.3371]
]
# Weights and Biases for the 3rd layer of neurons
wb3_5 = [1.4979, -37.466, 37.7958, -7.7463, 6.9079, 2.8462]
# -------------END OF NETWORK 2-5-5-1 STRUCTURE-------------
# -------------START OF NETWORK 2-10-10-1 STRUCTURE-------------
# Weights and Biases for the 1st layer of neurons
wb1_10 = [
[2.2458, -2.2493, -3.7801],
[3.4663, 8.1167, -14.9512],
[5.0509, -1.8244, 3.5017],
[6.1185, -0.2045, 0.3179],
[1.3366, 4.9303, 2.2153],
[-2.8652, 1.1679, 1.0218],
[-6.5716, -0.8414, -8.1646],
[-6.1061, 12.7945, 7.2201],
[13.0884, 7.5387, 19.2231],
[70.7187, 7.6138, 74.6949]
]
# Weights and Biases for the 2nd layer of neurons
wb2_10 = [
[4.674, 1.4481, -1.5131, 0.0461, -0.1427, 2.5454, -6.7991, -0.5948, -1.6361, 0.5801, -3.0336],
[-6.7171, -0.7737, -5.6596, 2.975, 14.6248, 2.7266, 5.5043, -13.2659, -0.7158, 3.076, 15.9058],
[7.0753, -3.0128, -1.1779, -6.445, -1.1517, 7.3248, 24.7022, -0.373, 4.2665, -7.8302, -3.1938],
[2.5847, -12.1313, 21.3347, 1.2881, -0.2724, -1.0393, -19.1914, -0.263, -3.2677, -12.4085, -10.2058],
[-19.8404, 4.8606, 0.3891, -4.5608, -0.9258, -7.3852, 18.6507, 0.0403, -6.3956, -0.9853, 13.5862],
[16.7482, -3.8389, -1.2688, 1.9843, -0.1401, -8.9383, -30.8856, -1.5505, -4.7172, 10.5566, 8.2966],
[2.4256, 2.1989, 18.8572, -14.5366, 11.64, -19.3502, 26.6786, -8.9867, -13.9055, 5.195, 9.7723],
[-16.388, 12.1992, -2.2401, -4.0366, -0.368, -6.9203, -17.8283, -0.0244, 9.3962, -1.7107, -1.0572],
[14.6257, 7.5518, 12.6715, -12.7354, 10.6586, -43.1601, 1.3387, -16.3876, 8.5277, 45.9331, -6.6981],
[-6.9243, 0.6229, 1.6542, -0.6833, 1.3122, -5.588, -23.4508, 0.5679, 1.7561, -3.1352, 5.8675]
]
# Weights and Biases for the 3rd layer of neurons
wb3_10 = [-30.1311, 2.0902, -3.5296, 18.1108, -2.528, -0.7228, 0.0186, 5.3507, -0.1476, -5.0827, 3.9767]
# -------------END OF NETWORK 2-10-10-1 STRUCTURE-------------
# input and output of the 1st layer in 2-5-5-1 network. [,0] ==> inputs, [,1] ==> outputs
n1_5 = np.zeros((5, 2))
# input and output of the 2nd layer in 2-5-5-1 network. [,0] ==> inputs, [,1] ==> outputs
n2_5 = np.zeros((5, 2))
# input and output of the 1st layer in 2-10-10-1 network. [,0] ==> inputs, [,1] ==> outputs
n1_10 = np.zeros((10, 2))
# input and output of the 2nd layer in 2-10-10-1 network. [,0] ==> inputs, [,1] ==> outputs
n2_10 = np.zeros((10, 2))
TOLERANCE = 0.0001 # tolerance of DAK
MAX_NO_Iterations = 20 # Max number of iterations for DAK
def ANN10(self, Ppr: float, Tpr: float) -> float:
"""
his method calculates the z-factor using a 2x10x10x1 Artificial Neural Network
based on training data obtained from Standing-Katz and Katz charts.
It always produces a result, but accuracy is controlled for 0<Ppr<30 and 1<Tpr<3
:param Ppr: pseudo-reduced pressure
:param Tpr: pseudo-reduced temperature
:return: z factor
"""
Ppr_n = 2.0 / (self.Ppr_max - self.Ppr_min) * (Ppr - self.Ppr_min) - 1.0
Tpr_n = 2.0 / (self.Tpr_max - self.Tpr_min) * (Tpr - self.Tpr_min) - 1.0
for i in range(10):
self.n1_10[i][0] = Ppr_n * self.wb1_10[i][0] + Tpr_n * self.wb1_10[i][1] + self.wb1_10[i][2]
self.n1_10[i][1] = log_sig(self.n1_10[i][0])
for i in range(10):
self.n2_10[i][0] = 0
for j in range(len(self.n2_10)):
self.n2_10[i][0] += self.n1_10[j][1] * self.wb2_10[i][j]
self.n2_10[i][0] += self.wb2_10[i][10] # adding the bias value
self.n2_10[i][1] = log_sig(self.n2_10[i][0])
z_n = 0
for j in range(len(self.n2_10)):
z_n += self.n2_10[j][1] * self.wb3_10[j]
z_n += self.wb3_10[10] # adding the bias value
zAnn10 = (z_n + 1) * (self.Z_max - self.Z_min) / 2 + self.Z_min # reverse normalization of normalized z factor.
return zAnn10
def ANN5(self, Ppr: float, Tpr: float) -> float:
"""
This method calculates the z-factor using a 2x5x5x1 Artificial Neural Network
based on training data obtained from Standing-Katz and Katz charts.
It always produces a result, but accuracy is controlled for 0<Ppr<30 and 1<Tpr<3
:param Ppr: pseudo-reduced pressure
:param Tpr: pseudo-reduced temperature
:return: z factor
"""
Ppr_n = 2.0 / (self.Ppr_max - self.Ppr_min) * (Ppr - self.Ppr_min) - 1.0
Tpr_n = 2.0 / (self.Tpr_max - self.Tpr_min) * (Tpr - self.Tpr_min) - 1.0
for i in range(5):
self.n1_5[i][0] = Ppr_n * self.wb1_5[i][0] + Tpr_n * self.wb1_5[i][1] + self.wb1_5[i][2]
self.n1_5[i][1] = log_sig(self.n1_5[i][0])
for i in range(5):
self.n2_5[i][0] = 0
for j in range(len(self.n2_5)):
self.n2_5[i][0] += self.n1_5[j][1] * self.wb2_5[i][j]
self.n2_5[i][0] += self.wb2_5[i][5] # adding the bias value
self.n2_5[i][1] = log_sig(self.n2_5[i][0])
z_n = 0
for j in range(len(self.n2_5)):
z_n += self.n2_5[j][1] * self.wb3_5[j]
z_n += self.wb3_5[5] # adding the bias value
zAnn5 = (z_n + 1) * (
self.Z_max - self.Z_min) / 2 + self.Z_min # reverse normalization of normalized z factor.
return zAnn5
def DAK(self, Ppr: float, Tpr: float) -> float:
"""
This method calculates the z-factor using Dranchuk and Abou-Kassem (DAK) method.
:param Ppr: pseudo-reduced pressure
:param Tpr: pseudo-reduced temperature
:return: z factor
"""
A1 = 0.3265
A2 = -1.07
A3 = -0.5339
A4 = 0.01569
A5 = -0.05165
A6 = 0.5475
A7 = -0.7361
A8 = 0.1844
A9 = 0.1056
A10 = 0.6134
A11 = 0.721
z_new = 1.0
z_old = 1.0
den = calculate_density(Ppr, Tpr, z_old)
for i in range(1, self.MAX_NO_Iterations + 1):
z_old = z_new
z_new = 1 + \
(A1 + A2 / Tpr + A3 / Tpr ** 3 + A4 / Tpr ** 4 + A5 / Tpr ** 5) * den + \
(A6 + A7 / Tpr + A8 / Tpr ** 2) * den ** 2 - \
A9 * (A7 / Tpr + A8 / Tpr ** 2) * den ** 5 + \
A10 * (1 + A11 * den ** 2) * den ** 2 / Tpr ** 3 * np.exp(-1 * A11 * den ** 2)
den = calculate_density(Ppr, Tpr, z_new)
if np.abs(z_new - z_old) < self.TOLERANCE:
break
zDAK = z_new
return zDAK
def log_sig(x):
return 1 / (1 + np.exp(-1 * x))
def calculate_density(pr: float, tr: float, z: float):
return 0.27 * pr / tr / z
| 3 | 3 |
ml_collections/config_dict/tests/field_reference_test.py | wyddmw/ViT-pytorch-1 | 311 | 12773966 | # Copyright 2021 The ML Collections Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for ml_collections.FieldReference."""
import operator
from absl.testing import absltest
from absl.testing import parameterized
import ml_collections
from ml_collections.config_dict import config_dict
class FieldReferenceTest(parameterized.TestCase):
def _test_binary_operator(self,
initial_value,
other_value,
op,
true_value,
new_initial_value,
new_true_value,
assert_fn=None):
"""Helper for testing binary operators.
Generally speaking this checks that:
1. `op(initial_value, other_value) COMP true_value`
2. `op(new_initial_value, other_value) COMP new_true_value
where `COMP` is the comparison function defined by `assert_fn`.
Args:
initial_value: Initial value for the `FieldReference`, this is the first
argument for the binary operator.
other_value: The second argument for the binary operator.
op: The binary operator.
true_value: The expected output of the binary operator.
new_initial_value: The value that the `FieldReference` is changed to.
new_true_value: The expected output of the binary operator after the
`FieldReference` has changed.
assert_fn: Function used to check the output values.
"""
if assert_fn is None:
assert_fn = self.assertEqual
ref = ml_collections.FieldReference(initial_value)
new_ref = op(ref, other_value)
assert_fn(new_ref.get(), true_value)
config = ml_collections.ConfigDict()
config.a = initial_value
config.b = other_value
config.result = op(config.get_ref('a'), config.b)
assert_fn(config.result, true_value)
config.a = new_initial_value
assert_fn(config.result, new_true_value)
def _test_unary_operator(self,
initial_value,
op,
true_value,
new_initial_value,
new_true_value,
assert_fn=None):
"""Helper for testing unary operators.
Generally speaking this checks that:
1. `op(initial_value) COMP true_value`
2. `op(new_initial_value) COMP new_true_value
where `COMP` is the comparison function defined by `assert_fn`.
Args:
initial_value: Initial value for the `FieldReference`, this is the first
argument for the unary operator.
op: The unary operator.
true_value: The expected output of the unary operator.
new_initial_value: The value that the `FieldReference` is changed to.
new_true_value: The expected output of the unary operator after the
`FieldReference` has changed.
assert_fn: Function used to check the output values.
"""
if assert_fn is None:
assert_fn = self.assertEqual
ref = ml_collections.FieldReference(initial_value)
new_ref = op(ref)
assert_fn(new_ref.get(), true_value)
config = ml_collections.ConfigDict()
config.a = initial_value
config.result = op(config.get_ref('a'))
assert_fn(config.result, true_value)
config.a = new_initial_value
assert_fn(config.result, new_true_value)
def testBasic(self):
ref = ml_collections.FieldReference(1)
self.assertEqual(ref.get(), 1)
def testGetRef(self):
config = ml_collections.ConfigDict()
config.a = 1.
config.b = config.get_ref('a') + 10
config.c = config.get_ref('b') + 10
self.assertEqual(config.c, 21.0)
def testFunction(self):
def fn(x):
return x + 5
config = ml_collections.ConfigDict()
config.a = 1
config.b = fn(config.get_ref('a'))
config.c = fn(config.get_ref('b'))
self.assertEqual(config.b, 6)
self.assertEqual(config.c, 11)
config.a = 2
self.assertEqual(config.b, 7)
self.assertEqual(config.c, 12)
def testCycles(self):
config = ml_collections.ConfigDict()
config.a = 1.
config.b = config.get_ref('a') + 10
config.c = config.get_ref('b') + 10
self.assertEqual(config.b, 11.0)
self.assertEqual(config.c, 21.0)
# Introduce a cycle
with self.assertRaisesRegex(config_dict.MutabilityError, 'cycle'):
config.a = config.get_ref('c') - 1.0
# Introduce a cycle on second operand
with self.assertRaisesRegex(config_dict.MutabilityError, 'cycle'):
config.a = ml_collections.FieldReference(5.0) + config.get_ref('c')
# We can create multiple FieldReferences that all point to the same object
l = [0]
config = ml_collections.ConfigDict()
config.a = l
config.b = l
config.c = config.get_ref('a') + ['c']
config.d = config.get_ref('b') + ['d']
self.assertEqual(config.c, [0, 'c'])
self.assertEqual(config.d, [0, 'd'])
# Make sure nothing was mutated
self.assertEqual(l, [0])
self.assertEqual(config.c, [0, 'c'])
config.a = [1]
config.b = [2]
self.assertEqual(l, [0])
self.assertEqual(config.c, [1, 'c'])
self.assertEqual(config.d, [2, 'd'])
@parameterized.parameters(
{
'initial_value': 1,
'other_value': 2,
'true_value': 3,
'new_initial_value': 10,
'new_true_value': 12
}, {
'initial_value': 2.0,
'other_value': 2.5,
'true_value': 4.5,
'new_initial_value': 3.7,
'new_true_value': 6.2
}, {
'initial_value': 'hello, ',
'other_value': 'world!',
'true_value': 'hello, world!',
'new_initial_value': 'foo, ',
'new_true_value': 'foo, world!'
}, {
'initial_value': ['hello'],
'other_value': ['world'],
'true_value': ['hello', 'world'],
'new_initial_value': ['foo'],
'new_true_value': ['foo', 'world']
}, {
'initial_value': ml_collections.FieldReference(10),
'other_value': ml_collections.FieldReference(5.0),
'true_value': 15.0,
'new_initial_value': 12,
'new_true_value': 17.0
}, {
'initial_value': config_dict.placeholder(float),
'other_value': 7.0,
'true_value': None,
'new_initial_value': 12,
'new_true_value': 19.0
}, {
'initial_value': 5.0,
'other_value': config_dict.placeholder(float),
'true_value': None,
'new_initial_value': 8.0,
'new_true_value': None
}, {
'initial_value': config_dict.placeholder(str),
'other_value': 'tail',
'true_value': None,
'new_initial_value': 'head',
'new_true_value': 'headtail'
})
def testAdd(self, initial_value, other_value, true_value, new_initial_value,
new_true_value):
self._test_binary_operator(initial_value, other_value, operator.add,
true_value, new_initial_value, new_true_value)
@parameterized.parameters(
{
'initial_value': 5,
'other_value': 3,
'true_value': 2,
'new_initial_value': -1,
'new_true_value': -4
}, {
'initial_value': 2.0,
'other_value': 2.5,
'true_value': -0.5,
'new_initial_value': 12.3,
'new_true_value': 9.8
}, {
'initial_value': set(['hello', 123, 4.5]),
'other_value': set([123]),
'true_value': set(['hello', 4.5]),
'new_initial_value': set([123]),
'new_true_value': set([])
}, {
'initial_value': ml_collections.FieldReference(10),
'other_value': ml_collections.FieldReference(5.0),
'true_value': 5.0,
'new_initial_value': 12,
'new_true_value': 7.0
}, {
'initial_value': config_dict.placeholder(float),
'other_value': 7.0,
'true_value': None,
'new_initial_value': 12,
'new_true_value': 5.0
})
def testSub(self, initial_value, other_value, true_value, new_initial_value,
new_true_value):
self._test_binary_operator(initial_value, other_value, operator.sub,
true_value, new_initial_value, new_true_value)
@parameterized.parameters(
{
'initial_value': 1,
'other_value': 2,
'true_value': 2,
'new_initial_value': 3,
'new_true_value': 6
}, {
'initial_value': 2.0,
'other_value': 2.5,
'true_value': 5.0,
'new_initial_value': 3.5,
'new_true_value': 8.75
}, {
'initial_value': ['hello'],
'other_value': 3,
'true_value': ['hello', 'hello', 'hello'],
'new_initial_value': ['foo'],
'new_true_value': ['foo', 'foo', 'foo']
}, {
'initial_value': ml_collections.FieldReference(10),
'other_value': ml_collections.FieldReference(5.0),
'true_value': 50.0,
'new_initial_value': 1,
'new_true_value': 5.0
}, {
'initial_value': config_dict.placeholder(float),
'other_value': 7.0,
'true_value': None,
'new_initial_value': 12,
'new_true_value': 84.0
})
def testMul(self, initial_value, other_value, true_value, new_initial_value,
new_true_value):
self._test_binary_operator(initial_value, other_value, operator.mul,
true_value, new_initial_value, new_true_value)
@parameterized.parameters(
{
'initial_value': 3,
'other_value': 2,
'true_value': 1.5,
'new_initial_value': 10,
'new_true_value': 5.0
}, {
'initial_value': 2.0,
'other_value': 2.5,
'true_value': 0.8,
'new_initial_value': 6.3,
'new_true_value': 2.52
}, {
'initial_value': ml_collections.FieldReference(10),
'other_value': ml_collections.FieldReference(5.0),
'true_value': 2.0,
'new_initial_value': 13,
'new_true_value': 2.6
}, {
'initial_value': config_dict.placeholder(float),
'other_value': 7.0,
'true_value': None,
'new_initial_value': 17.5,
'new_true_value': 2.5
})
def testTrueDiv(self, initial_value, other_value, true_value,
new_initial_value, new_true_value):
self._test_binary_operator(initial_value, other_value, operator.truediv,
true_value, new_initial_value, new_true_value)
@parameterized.parameters(
{
'initial_value': 3,
'other_value': 2,
'true_value': 1,
'new_initial_value': 7,
'new_true_value': 3
}, {
'initial_value': ml_collections.FieldReference(10),
'other_value': ml_collections.FieldReference(5),
'true_value': 2,
'new_initial_value': 28,
'new_true_value': 5
}, {
'initial_value': config_dict.placeholder(int),
'other_value': 7,
'true_value': None,
'new_initial_value': 25,
'new_true_value': 3
})
def testFloorDiv(self, initial_value, other_value, true_value,
new_initial_value, new_true_value):
self._test_binary_operator(initial_value, other_value, operator.floordiv,
true_value, new_initial_value, new_true_value)
@parameterized.parameters(
{
'initial_value': 3,
'other_value': 2,
'true_value': 9,
'new_initial_value': 10,
'new_true_value': 100
}, {
'initial_value': 2.7,
'other_value': 3.2,
'true_value': 24.0084457245,
'new_initial_value': 6.5,
'new_true_value': 399.321543621
}, {
'initial_value': ml_collections.FieldReference(10),
'other_value': ml_collections.FieldReference(5),
'true_value': 1e5,
'new_initial_value': 2,
'new_true_value': 32
}, {
'initial_value': config_dict.placeholder(float),
'other_value': 3.0,
'true_value': None,
'new_initial_value': 7.0,
'new_true_value': 343.0
})
def testPow(self, initial_value, other_value, true_value, new_initial_value,
new_true_value):
self._test_binary_operator(
initial_value,
other_value,
operator.pow,
true_value,
new_initial_value,
new_true_value,
assert_fn=self.assertAlmostEqual)
@parameterized.parameters(
{
'initial_value': 3,
'other_value': 2,
'true_value': 1,
'new_initial_value': 10,
'new_true_value': 0
}, {
'initial_value': 5.3,
'other_value': 3.2,
'true_value': 2.0999999999999996,
'new_initial_value': 77,
'new_true_value': 0.2
}, {
'initial_value': ml_collections.FieldReference(10),
'other_value': ml_collections.FieldReference(5),
'true_value': 0,
'new_initial_value': 32,
'new_true_value': 2
}, {
'initial_value': config_dict.placeholder(int),
'other_value': 7,
'true_value': None,
'new_initial_value': 25,
'new_true_value': 4
})
def testMod(self, initial_value, other_value, true_value, new_initial_value,
new_true_value):
self._test_binary_operator(
initial_value,
other_value,
operator.mod,
true_value,
new_initial_value,
new_true_value,
assert_fn=self.assertAlmostEqual)
@parameterized.parameters(
{
'initial_value': True,
'other_value': True,
'true_value': True,
'new_initial_value': False,
'new_true_value': False
}, {
'initial_value': ml_collections.FieldReference(False),
'other_value': ml_collections.FieldReference(False),
'true_value': False,
'new_initial_value': True,
'new_true_value': False
}, {
'initial_value': config_dict.placeholder(bool),
'other_value': True,
'true_value': None,
'new_initial_value': False,
'new_true_value': False
})
def testAnd(self, initial_value, other_value, true_value, new_initial_value,
new_true_value):
self._test_binary_operator(initial_value, other_value, operator.and_,
true_value, new_initial_value, new_true_value)
@parameterized.parameters(
{
'initial_value': False,
'other_value': False,
'true_value': False,
'new_initial_value': True,
'new_true_value': True
}, {
'initial_value': ml_collections.FieldReference(True),
'other_value': ml_collections.FieldReference(True),
'true_value': True,
'new_initial_value': False,
'new_true_value': True
}, {
'initial_value': config_dict.placeholder(bool),
'other_value': False,
'true_value': None,
'new_initial_value': True,
'new_true_value': True
})
def testOr(self, initial_value, other_value, true_value, new_initial_value,
new_true_value):
self._test_binary_operator(initial_value, other_value, operator.or_,
true_value, new_initial_value, new_true_value)
@parameterized.parameters(
{
'initial_value': False,
'other_value': True,
'true_value': True,
'new_initial_value': True,
'new_true_value': False
}, {
'initial_value': ml_collections.FieldReference(True),
'other_value': ml_collections.FieldReference(True),
'true_value': False,
'new_initial_value': False,
'new_true_value': True
}, {
'initial_value': config_dict.placeholder(bool),
'other_value': True,
'true_value': None,
'new_initial_value': True,
'new_true_value': False
})
def testXor(self, initial_value, other_value, true_value, new_initial_value,
new_true_value):
self._test_binary_operator(initial_value, other_value, operator.xor,
true_value, new_initial_value, new_true_value)
@parameterized.parameters(
{
'initial_value': 3,
'true_value': -3,
'new_initial_value': -22,
'new_true_value': 22
}, {
'initial_value': 15.3,
'true_value': -15.3,
'new_initial_value': -0.2,
'new_true_value': 0.2
}, {
'initial_value': ml_collections.FieldReference(7),
'true_value': ml_collections.FieldReference(-7),
'new_initial_value': 123,
'new_true_value': -123
}, {
'initial_value': config_dict.placeholder(int),
'true_value': None,
'new_initial_value': -6,
'new_true_value': 6
})
def testNeg(self, initial_value, true_value, new_initial_value,
new_true_value):
self._test_unary_operator(initial_value, operator.neg, true_value,
new_initial_value, new_true_value)
@parameterized.parameters(
{
'initial_value': config_dict.create(attribute=2),
'true_value': 2,
'new_initial_value': config_dict.create(attribute=3),
'new_true_value': 3,
},
{
'initial_value': config_dict.create(attribute={'a': 1}),
'true_value': config_dict.create(a=1),
'new_initial_value': config_dict.create(attribute={'b': 1}),
'new_true_value': config_dict.create(b=1),
},
{
'initial_value':
ml_collections.FieldReference(config_dict.create(attribute=2)),
'true_value':
ml_collections.FieldReference(2),
'new_initial_value':
config_dict.create(attribute=3),
'new_true_value':
3,
},
{
'initial_value': config_dict.placeholder(config_dict.ConfigDict),
'true_value': None,
'new_initial_value': config_dict.create(attribute=3),
'new_true_value': 3,
},
)
def testAttr(self, initial_value, true_value, new_initial_value,
new_true_value):
self._test_unary_operator(initial_value, lambda x: x.attr('attribute'),
true_value, new_initial_value, new_true_value)
@parameterized.parameters(
{
'initial_value': 3,
'true_value': 3,
'new_initial_value': -101,
'new_true_value': 101
}, {
'initial_value': -15.3,
'true_value': 15.3,
'new_initial_value': 7.3,
'new_true_value': 7.3
}, {
'initial_value': ml_collections.FieldReference(-7),
'true_value': ml_collections.FieldReference(7),
'new_initial_value': 3,
'new_true_value': 3
}, {
'initial_value': config_dict.placeholder(float),
'true_value': None,
'new_initial_value': -6.25,
'new_true_value': 6.25
})
def testAbs(self, initial_value, true_value, new_initial_value,
new_true_value):
self._test_unary_operator(initial_value, operator.abs, true_value,
new_initial_value, new_true_value)
def testToInt(self):
self._test_unary_operator(25.3, lambda ref: ref.to_int(), 25, 27.9, 27)
ref = ml_collections.FieldReference(64.7)
ref = ref.to_int()
self.assertEqual(ref.get(), 64)
self.assertEqual(ref._field_type, int)
def testToFloat(self):
self._test_unary_operator(12, lambda ref: ref.to_float(), 12.0, 0, 0.0)
ref = ml_collections.FieldReference(647)
ref = ref.to_float()
self.assertEqual(ref.get(), 647.0)
self.assertEqual(ref._field_type, float)
def testToString(self):
self._test_unary_operator(12, lambda ref: ref.to_str(), '12', 0, '0')
ref = ml_collections.FieldReference(647)
ref = ref.to_str()
self.assertEqual(ref.get(), '647')
self.assertEqual(ref._field_type, str)
def testSetValue(self):
ref = ml_collections.FieldReference(1.0)
other = ml_collections.FieldReference(3)
ref_plus_other = ref + other
self.assertEqual(ref_plus_other.get(), 4.0)
ref.set(2.5)
self.assertEqual(ref_plus_other.get(), 5.5)
other.set(110)
self.assertEqual(ref_plus_other.get(), 112.5)
# Type checking
with self.assertRaises(TypeError):
other.set('this is a string')
with self.assertRaises(TypeError):
other.set(ml_collections.FieldReference('this is a string'))
with self.assertRaises(TypeError):
other.set(ml_collections.FieldReference(None, field_type=str))
def testSetResult(self):
ref = ml_collections.FieldReference(1.0)
result = ref + 1.0
second_result = result + 1.0
self.assertEqual(ref.get(), 1.0)
self.assertEqual(result.get(), 2.0)
self.assertEqual(second_result.get(), 3.0)
ref.set(2.0)
self.assertEqual(ref.get(), 2.0)
self.assertEqual(result.get(), 3.0)
self.assertEqual(second_result.get(), 4.0)
result.set(4.0)
self.assertEqual(ref.get(), 2.0)
self.assertEqual(result.get(), 4.0)
self.assertEqual(second_result.get(), 5.0)
# All references are broken at this point.
ref.set(1.0)
self.assertEqual(ref.get(), 1.0)
self.assertEqual(result.get(), 4.0)
self.assertEqual(second_result.get(), 5.0)
def testTypeChecking(self):
ref = ml_collections.FieldReference(1)
string_ref = ml_collections.FieldReference('a')
x = ref + string_ref
with self.assertRaises(TypeError):
x.get()
def testNoType(self):
self.assertRaisesRegex(TypeError, 'field_type should be a type.*',
ml_collections.FieldReference, None, 0)
def testEqual(self):
# Simple case
ref1 = ml_collections.FieldReference(1)
ref2 = ml_collections.FieldReference(1)
ref3 = ml_collections.FieldReference(2)
self.assertEqual(ref1, 1)
self.assertEqual(ref1, ref1)
self.assertEqual(ref1, ref2)
self.assertNotEqual(ref1, 2)
self.assertNotEqual(ref1, ref3)
# ConfigDict inside FieldReference
ref1 = ml_collections.FieldReference(ml_collections.ConfigDict({'a': 1}))
ref2 = ml_collections.FieldReference(ml_collections.ConfigDict({'a': 1}))
ref3 = ml_collections.FieldReference(ml_collections.ConfigDict({'a': 2}))
self.assertEqual(ref1, ml_collections.ConfigDict({'a': 1}))
self.assertEqual(ref1, ref1)
self.assertEqual(ref1, ref2)
self.assertNotEqual(ref1, ml_collections.ConfigDict({'a': 2}))
self.assertNotEqual(ref1, ref3)
def testLessEqual(self):
# Simple case
ref1 = ml_collections.FieldReference(1)
ref2 = ml_collections.FieldReference(1)
ref3 = ml_collections.FieldReference(2)
self.assertLessEqual(ref1, 1)
self.assertLessEqual(ref1, 2)
self.assertLessEqual(0, ref1)
self.assertLessEqual(1, ref1)
self.assertGreater(ref1, 0)
self.assertLessEqual(ref1, ref1)
self.assertLessEqual(ref1, ref2)
self.assertLessEqual(ref1, ref3)
self.assertGreater(ref3, ref1)
def testControlFlowError(self):
ref1 = ml_collections.FieldReference(True)
ref2 = ml_collections.FieldReference(False)
with self.assertRaises(NotImplementedError):
if ref1:
pass
with self.assertRaises(NotImplementedError):
_ = ref1 and ref2
with self.assertRaises(NotImplementedError):
_ = ref1 or ref2
with self.assertRaises(NotImplementedError):
_ = not ref1
if __name__ == '__main__':
absltest.main()
| 2.640625 | 3 |
evaluation.py | maylilyo/wd60 | 0 | 12773967 | # Standard
import gc
from pathlib import Path
import time
# PIP
from ignite.metrics import PSNR, SSIM
from lpips import LPIPS
from ptflops import get_model_complexity_info
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
# Custom
from custom.softsplat.model import SoftSplat
from custom.vimeo.dataset import Vimeo
# Timing utilities
start_time = None
def start_timer():
global start_time
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_peak_memory_stats()
torch.cuda.synchronize()
start_time = time.time()
def end_timer_and_print():
global start_time
torch.cuda.synchronize()
end_time = time.time()
print("Total execution time = {:.3f} sec".format(end_time - start_time))
memory = torch.cuda.max_memory_allocated() // 1024 // 1024
print(f"Max memory used by tensors = {memory}MB")
def test(cfg):
print(f"[ {cfg.model.flow_extractor} ]")
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
work_dir = Path(cfg.work_dir).absolute()
data_dir = work_dir / cfg.data_dir
weight_dir = work_dir / cfg.weight_dir
# Load data
test_dataset = Vimeo(
data_dir=data_dir,
state="test",
is_pt=False,
is_aug=False,
is_crop=True,
)
test_dataloader = DataLoader(
test_dataset,
batch_size=cfg.batch_size,
num_workers=cfg.num_workers,
pin_memory=True,
)
# Init model
model = SoftSplat(cfg.model).to(device)
model.eval()
if cfg.flops:
with torch.no_grad():
with torch.cuda.amp.autocast(enabled=cfg.amp):
macs, params = get_model_complexity_info(model, (3, cfg.model.height, cfg.model.width))
print("{:<30} {:<8}".format("Computational complexity: ", macs))
print("{:<30} {:<8}".format("Number of parameters: ", params))
return
# Load model
if cfg.name != "none":
weight_path = weight_dir / f"{cfg.name}.pt"
print(f"Load {cfg.name} model from {weight_path}")
state_dict = torch.load(weight_path)
model.load_state_dict(state_dict)
# Set metrics
if cfg.psnr:
metric_psnr = PSNR(data_range=1.0, device=device)
if cfg.ssim:
metric_ssim = SSIM(data_range=1.0, device=device)
if cfg.lpips:
calculate_lpips = LPIPS(net="alex", verbose=False).to(device)
# Inference
total_psnr = 0
total_ssim = 0
total_lpips = 0
with torch.no_grad():
start_timer()
for batch in tqdm(test_dataloader):
img1, img2, y = batch
img1 = img1.to(device)
img2 = img2.to(device)
if cfg.psnr or cfg.ssim or cfg.lpips:
y = y.to(device)
with torch.cuda.amp.autocast(enabled=cfg.amp):
y_hat = model(img1, img2)
if cfg.amp:
y_hat = y_hat.float()
if cfg.psnr:
metric_psnr.update((y_hat, y))
total_psnr += metric_psnr.compute()
metric_psnr.reset()
if cfg.ssim:
metric_ssim.update((y_hat, y))
total_ssim += metric_ssim.compute()
metric_ssim.reset()
if cfg.lpips:
total_lpips += calculate_lpips(y_hat, y).mean()
end_timer_and_print()
if cfg.psnr:
average_psnr = total_psnr / len(test_dataloader)
print(f"PSNR: {average_psnr:.4f}")
if cfg.ssim:
average_ssim = total_ssim / len(test_dataloader)
print(f"SSIM: {average_ssim:.4f}")
if cfg.lpips:
average_lpips = total_lpips / len(test_dataloader)
print(f"LPIPS: {average_lpips:.5f}")
| 2.015625 | 2 |
Partylist.py | mas250/Python2 | 0 | 12773968 | <filename>Partylist.py
#PartyList.py
print "\t\tThis program allows you to maintain"
print "t\t\a list of names opf people to invite"
print "\t\tto a party\n"
names = [] # Creat an empty list
choice = "z" #Initalize choice with value to set while loop to true
while choice != "q":
print """
Choose what you would like to do from the following menu (or q to exit)"
a: Add a name
b: Remove a name
c: Display the names invited
d: Sort the names alphabetically
e: Display the number of people invited
q: quit the program
"""
choice = raw_input("\nType in your choice (a,b,c,d,e (or q to exit): ")
#add a new name
if choice == "a":
newName = raw_input("\nEnter a name to add to the party list: ")
names.append(newName)
#remove a name
elif choice == "b":
oldName = raw_input("\nEnter the name you wish to remove:")
names.append(newName)
if oldname in names:
names.remove(oldName)
else:
print
print oldname,"is not on the guest list"
#display those invited
elif choice == "c":
print "\nYou have invited"
for i in names:
print i
#sort the names alphabetically
elif choice == "d":
names.sort()
#use the function len to calculate the number of people invited
elif choice == "e":
numberOfPeople = len(names)
if numberOfPeople == 0:
print "n\You have invited no one..."
elif numberOfPeople == 1:
print "\nYou have invited", numberOfPeople, "person"
else:
print "\n You have invited", numberOfPeople, "people"
elif choice == "q":
print "\nGoodbye!"
else: # if user chose some crazy input
print "\n Sorry, there was an error in your input"
print " Valid choices are a,b,c,d or e"
print "to quit: type q"
| 4.28125 | 4 |
fitness-backend/src/app/dao/class_descs_dao.py | cuappdev/archives | 0 | 12773969 | from . import *
def get_all_class_descs():
return ClassDesc.query.all()
def get_class_desc_by_id(gym_class_id):
return ClassDesc.query.filter(ClassDesc.id == gym_class_id).first()
def get_class_descs_by_ids(class_desc_id_list):
result = []
for class_desc_id in class_desc_id_list:
optional_class_desc = get_class_desc_by_id(class_desc_id)
if optional_class_desc is None:
raise Exception('Class desc does not exist.')
result.append(optional_class_desc)
return result
def get_class_desc_by_name(name):
return ClassDesc.query.filter(ClassDesc.name == name).first()
def create_class_desc(name, description=''):
optional_class_desc = get_class_desc_by_name(name)
if optional_class_desc is not None:
return False, optional_class_desc
new_class = ClassDesc(name=name, description=description)
db_utils.commit_model(new_class)
return True, new_class
| 2.78125 | 3 |
setup.py | teknologist/algolia-doc-manager | 0 | 12773970 | <reponame>teknologist/algolia-doc-manager
try:
from setuptools import setup
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup
import sys
test_suite = "tests"
tests_require = ["mongo-orchestration>= 0.2, < 0.4", "requests>=2.5.1"]
if sys.version_info[:2] == (2, 6):
# Need unittest2 to run unittests in Python 2.6
tests_require.append("unittest2")
test_suite = "unittest2.collector"
try:
with open("README.rst", "r") as fd:
long_description = fd.read()
except IOError:
long_description = None # Install without README.rst
setup(name='algolia-doc-manager',
version='0.2.0',
maintainer='teknologist',
description='Algolia plugin for mongo-connector',
long_description=long_description,
platforms=['any'],
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/teknologist/algolia-doc-manager',
install_requires=['mongo-connector >= 2.3.0','algoliasearch >= 1.5.4','urllib3 >= 1.15.1',"requests>=2.5.1"],
packages=["mongo_connector", "mongo_connector.doc_managers"],
license="Apache License, Version 2.0",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Topic :: Database",
"Topic :: Software Development :: Libraries :: Python Modules",
"Operating System :: Unix",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX"
],
keywords=['mongo-connector', "mongodb", "algolia"],
test_suite=test_suite,
tests_require=tests_require
)
| 1.484375 | 1 |
gem/tests/base.py | praekelt/molo-gem | 3 | 12773971 | <gh_stars>1-10
import json
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from wagtail.core.models import Page
from molo.core.models import (
Main, SectionPage, ArticlePage, PageTranslation, Tag,
BannerPage, Languages, SiteLanguageRelation)
from molo.core.utils import generate_slug
from molo.forms.models import (
MoloFormPage, MoloFormField, ArticlePageForms)
class GemTestCaseMixin(object):
def login(self):
# Create a user
user = get_user_model().objects.create_superuser(
username='superuser', email='<EMAIL>', password='<PASSWORD>')
# Login
self.client.login(username='superuser', password='<PASSWORD>')
return user
def mk_root(self):
page_content_type, created = ContentType.objects.get_or_create(
model='page',
app_label='wagtailcore'
)
self.root, _ = Page.objects.get_or_create(
title="Root",
slug='root',
content_type=page_content_type,
path='0001',
depth=1,
numchild=1,
url_path='/',
)
def mk_main(self, title, slug, path, url_path):
self.mk_root()
main_content_type, created = ContentType.objects.get_or_create(
model='main', app_label='core')
# Create a new homepage
main = Main.objects.create(
title=title,
slug=slug,
content_type=main_content_type,
path=path,
depth=2,
numchild=0,
url_path=url_path,
)
main.save_revision().publish()
main.save()
language_setting = Languages.objects.create(
site_id=main.get_site().pk)
SiteLanguageRelation.objects.create(
language_setting=language_setting,
locale='en',
is_active=True)
return main
def mk_tag(self, parent, slug=None, **kwargs):
data = {}
data.update({
'title': 'Test Tag',
})
data.update(kwargs)
if slug:
data.update({'slug': slug})
else:
data.update({'slug': generate_slug(data['title'])})
tag = Tag(**data)
parent.add_child(instance=tag)
tag.save_revision().publish()
return tag
def mk_tags(self, parent, count=2, **kwargs):
tags = []
for i in range(count):
data = {}
data.update({
'title': 'Test Tag {}'.format(i),
})
data.update(kwargs)
data.update({
'slug': generate_slug(data['title'])
})
tag = Tag(**data)
parent.add_child(instance=tag)
tag.save_revision().publish()
tags.append(tag)
return tags
def mk_reaction_question(self, parent, article, **kwargs):
data = {}
data.update({
'introduction': 'Test Question',
})
data.update(kwargs)
data.update({
'slug': generate_slug(data['title'])
})
form = MoloFormPage(**data)
parent.add_child(instance=form)
form.save_revision().publish()
field = MoloFormField(
choices='yes,maybe,no', success_message='well done')
form.add_child(instance=field)
field.save_revision().publish()
ArticlePageForms.objects.create(
reaction_question=form, page=article)
return form
def mk_sections(self, parent, count=2, **kwargs):
sections = []
for i in range(count):
data = {}
data.update({
'title': 'Test Section %s' % (i, ),
})
data.update(kwargs)
data.update({
'slug': generate_slug(data['title']),
})
section = SectionPage(**data)
parent.add_child(instance=section)
section.save_revision().publish()
sections.append(section)
return sections
def mk_articles(self, parent, count=2, **kwargs):
articles = []
for i in range(count):
data = {}
data.update({
'title': 'Test page %s' % (i, ),
'subtitle': 'Sample page description for %s' % (i, ),
'body': json.dumps([{
'type': 'paragraph',
'value': 'Sample page content for %s' % (i, )}]),
})
data.update(kwargs)
data.update({
'slug': generate_slug(data['title'])
})
article = ArticlePage(**data)
parent.add_child(instance=article)
article.save_revision().publish()
articles.append(article)
return articles
def mk_banners(self, parent, count=2, **kwargs):
banners = []
for i in range(count):
data = {}
data.update({
'title': 'Test Banner {}'.format(i),
})
data.update(kwargs)
data.update({
'slug': generate_slug(data['title'])
})
banner = BannerPage(**data)
parent.add_child(instance=banner)
banner.save_revision().publish()
banners.append(banner)
return banners
def mk_section(self, parent, **kwargs):
return self.mk_sections(parent, count=1, **kwargs)[0]
def mk_article(self, parent, **kwargs):
return self.mk_articles(parent, count=1, **kwargs)[0]
def mk_banner(self, parent, **kwargs):
return self.mk_banners(parent, count=1, **kwargs)[0]
def mk_translation(self, source, language, translation):
language_relation = translation.languages.first()
language_relation.language = language
language_relation.save()
translation.language = language
translation.save_revision().publish()
source.specific.translated_pages.add(translation)
source.save()
PageTranslation.objects.get_or_create(
page=source, translated_page=translation)
for translated_page in \
source.specific.translated_pages.all():
translations = source.specific.translated_pages.all().\
exclude(language__pk=translated_page.language.pk)
for t in translations:
translated_page.translated_pages.add(t)
translated_page.save()
return translation
def mk_section_translation(self, source, language, **kwargs):
instance = self.mk_section(source.get_parent(), **kwargs)
return self.mk_translation(source, language, instance)
def mk_article_translation(self, source, language, **kwargs):
instance = self.mk_article(source.get_parent(), **kwargs)
return self.mk_translation(source, language, instance)
def mk_tag_translation(self, source, language, **kwargs):
instance = self.mk_tag(source.get_parent(), **kwargs)
return self.mk_translation(source, language, instance)
def mk_reaction_translation(self, source, article, language, **kwargs):
instance = self.mk_reaction_question(
source.get_parent(), article, **kwargs)
return self.mk_translation(source, language, instance)
| 1.90625 | 2 |
ggongsul/member/migrations/0008_auto_20211126_1014.py | blc-cruise/ggongsul-api | 2 | 12773972 | <filename>ggongsul/member/migrations/0008_auto_20211126_1014.py<gh_stars>1-10
# Generated by Django 3.1.5 on 2021-11-26 10:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('member', '0007_memberdetail_recommended_place'),
]
operations = [
migrations.AlterField(
model_name='socialaccount',
name='provider',
field=models.CharField(choices=[('kakao', '카카오'), ('apple', '애플')], max_length=20, verbose_name='소셜 인증 제공 업체'),
),
]
| 1.671875 | 2 |
nemo/collections/tts/helpers/partialconv1d.py | MikyasDesta/NeMo | 0 | 12773973 | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
# BSD 3-Clause License
#
# Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
#
# Author & Contact: <NAME> (<EMAIL>)
###############################################################################
from typing import Tuple
import torch
import torch.nn.functional as F
from torch import nn
class PartialConv1d(nn.Conv1d):
def __init__(self, *args, **kwargs):
self.multi_channel = False
self.return_mask = False
super(PartialConv1d, self).__init__(*args, **kwargs)
self.weight_maskUpdater = torch.ones(1, 1, self.kernel_size[0])
self.slide_winsize = self.weight_maskUpdater.shape[1] * self.weight_maskUpdater.shape[2]
self.last_size = (None, None, None)
self.update_mask = None
self.mask_ratio = None
@torch.jit.ignore
def forward(self, input: torch.Tensor, mask_in: Tuple[int, int, int] = None):
assert len(input.shape) == 3
# if a mask is input, or tensor shape changed, update mask ratio
if mask_in is not None or self.last_size != tuple(input.shape):
self.last_size = tuple(input.shape)
with torch.no_grad():
if self.weight_maskUpdater.type() != input.type():
self.weight_maskUpdater = self.weight_maskUpdater.to(input)
if mask_in is None:
mask = torch.ones(1, 1, input.data.shape[2]).to(input)
else:
mask = mask_in
self.update_mask = F.conv1d(
mask,
self.weight_maskUpdater,
bias=None,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=1,
)
# for mixed precision training, change 1e-8 to 1e-6
self.mask_ratio = self.slide_winsize / (self.update_mask + 1e-6)
self.update_mask = torch.clamp(self.update_mask, 0, 1)
self.mask_ratio = torch.mul(self.mask_ratio, self.update_mask)
raw_out = super(PartialConv1d, self).forward(torch.mul(input, mask) if mask_in is not None else input)
if self.bias is not None:
bias_view = self.bias.view(1, self.out_channels, 1)
output = torch.mul(raw_out - bias_view, self.mask_ratio) + bias_view
output = torch.mul(output, self.update_mask)
else:
output = torch.mul(raw_out, self.mask_ratio)
if self.return_mask:
return output, self.update_mask
else:
return output
| 1.8125 | 2 |
birthday_pages/apps.py | JoshZero87/site | 4 | 12773974 | from __future__ import unicode_literals
from django.apps import AppConfig
class BirthdayPagesConfig(AppConfig):
name = 'birthday_pages'
| 1.304688 | 1 |
DiseaseIdentifier/DiseaseClassify/migrations/0002_auto_20190515_0951.py | Rosan93/Disease-Identifier | 0 | 12773975 | # Generated by Django 2.2.1 on 2019-05-15 09:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('DiseaseClassify', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='uploadimage',
name='predict_image',
field=models.FileField(upload_to='predict_image/'),
),
]
| 1.398438 | 1 |
python3/286.walls-and-gates.250862961.ac.py | Diego-Zulu/leetcode_answers | 0 | 12773976 | <reponame>Diego-Zulu/leetcode_answers
#
# @lc app=leetcode id=286 lang=python3
#
# [286] Walls and Gates
#
# https://leetcode.com/problems/walls-and-gates/description/
#
# algorithms
# Medium (53.35%)
# Likes: 1050
# Dislikes: 15
# Total Accepted: 113.7K
# Total Submissions: 212.9K
# Testcase Example: '[[2147483647,-1,0,2147483647],[2147483647,2147483647,2147483647,-1],[2147483647,-1,2147483647,-1],[0,-1,2147483647,2147483647]]'
#
# You are given a m x n 2D grid initialized with these three possible
# values.
#
#
# -1 - A wall or an obstacle.
# 0 - A gate.
# INF - Infinity means an empty room. We use the value 2^31 - 1 = 2147483647 to
# represent INF as you may assume that the distance to a gate is less than
# 2147483647.
#
#
# Fill each empty room with the distance to its nearest gate. If it is
# impossible to reach a gate, it should be filled with INF.
#
# Example:
#
# Given the 2D grid:
#
#
# INF -1 0 INF
# INF INF INF -1
# INF -1 INF -1
# 0 -1 INF INF
#
#
# After running your function, the 2D grid should be:
#
#
# 3 -1 0 1
# 2 2 1 -1
# 1 -1 2 -1
# 0 -1 3 4
#
#
#
# @lc code=start
def is_inside(r, c, rooms):
return r >= 0 and c >= 0 and r < len(rooms) and c < len(rooms[0])
def dfs(queue, rooms):
while queue:
r, c, dist = queue.popleft()
if not is_inside(r, c, rooms) or rooms[r][c] == -1 or rooms[r][c] <= dist:
continue
rooms[r][c] = dist
queue.append((r+1, c, dist+1))
queue.append((r, c+1, dist+1))
queue.append((r-1, c, dist+1))
queue.append((r, c-1, dist+1))
class Solution:
def wallsAndGates(self, rooms: List[List[int]]) -> None:
"""
Do not return anything, modify rooms in-place instead.
"""
for r in range(len(rooms)):
for c in range(len(rooms[0])):
if rooms[r][c] == 0:
rooms[r][c] = 1
dfs(collections.deque([(r, c, 0)]), rooms)
rooms[r][c] = 0
# @lc code=end
| 3.671875 | 4 |
OpenGL/lazywrapper.py | t20100/pyopengl | 210 | 12773977 | """Simplistic wrapper decorator for Python-coded wrappers"""
from OpenGL.latebind import Curry
from OpenGL import MODULE_ANNOTATIONS
class _LazyWrapper( Curry ):
"""Marker to tell us that an object is a lazy wrapper"""
def lazy( baseFunction ):
"""Produce a lazy-binding decorator that uses baseFunction
Allows simple implementation of wrappers where the
whole of the wrapper can be summed up as do 1 thing
then call base function with the cleaned up result.
Passes baseFunction in as the first argument of the
wrapped function, all other parameters are passed
unchanged. The wrapper class created has __nonzero__
and similar common wrapper entry points defined.
"""
def wrap( wrapper ):
"""Wrap wrapper with baseFunction"""
def __bool__( self ):
return bool( baseFunction )
def __repr__( self ):
return '%s( %r )'%(
'OpenGL.lazywrapper.lazy',
baseFunction.__name__,
)
_with_wrapper = type( wrapper.__name__, (_LazyWrapper,), {
'__repr__': __repr__,
'__doc__': wrapper.__doc__,
'__nonzero__': __bool__,
'__bool__': __bool__,
'wrappedOperation': baseFunction,
'restype': getattr(wrapper, 'restype',getattr(baseFunction,'restype',None)),
} )
with_wrapper = _with_wrapper(wrapper,baseFunction)
with_wrapper.__name__ = wrapper.__name__
if hasattr( baseFunction, '__module__' ):
with_wrapper.__module__ = baseFunction.__module__
return with_wrapper
return wrap
if __name__ == "__main__":
from OpenGL.raw import GLU
func = GLU.gluNurbsCallbackData
output = []
def testwrap( base ):
"Testing"
output.append( base )
testlazy = lazy( func )( testwrap )
testlazy( )
assert testlazy.__doc__ == "Testing"
assert testlazy.__class__.__name__ == 'testwrap'
assert testlazy.__name__ == 'testwrap'
assert testlazy.baseFunction is func
assert testlazy.wrapperFunction is testwrap
assert output
| 3.265625 | 3 |
train_interfaces.py | pgruening/fp_nets_as_novel_deep_networks_inspired_by_vision | 0 | 12773978 | <filename>train_interfaces.py
import numpy as np
import torch
import torch.nn as nn
import torch.nn.init as init
from DLBio.pt_train_printer import IPrinterFcn
from DLBio.pt_training import ITrainInterface
from DLBio.pytorch_helpers import ActivationGetter, get_device
from DLBio.train_interfaces import (Accuracy, Classification, ErrorRate,
image_counter)
import helpers
from models.conv_blocks import AbsReLUBlock, DWSBlock, FPBlockJOV, INetBlock
USED_BLOCKS = (AbsReLUBlock, FPBlockJOV, DWSBlock, INetBlock)
def get_interface(ti_type, model, device, printer, **kwargs):
if ti_type == Classification.name:
return Classification(model, device, printer)
elif ti_type == Logging.name:
return Logging(model, device, printer, kwargs['logger_dict'])
elif ti_type == NoDRClassification.name:
return NoDRClassification(model, device, printer, kwargs['num_epochs'])
elif ti_type == ClassAndLogging.name:
return ClassAndLogging(model, device, printer, kwargs['logger_dict'])
raise ValueError(f"Unknown ti_type: {ti_type}")
class Logging(ITrainInterface):
name = 'Logging'
def __init__(self, model, device, printer, logger_dict):
self.printer = printer
self.model = model
self.functions = logger_dict
self.printer.dont_print = list(logger_dict.keys())
self.counters = {
'num_samples': image_counter
}
self.d = device
def train_step(self, sample):
images, targets = sample[0].to(self.d), sample[1].to(self.d)
pred = self.model(images)
metrics = None
counters = dict()
counters.update(
{k: v(pred, targets) for k, v in self.counters.items()}
)
functions = {
k: f.update(pred, targets) for k, f in self.functions.items()
}
return torch.Tensor([0.]).float(), metrics, counters, functions
class NoDRClassification(Classification):
name = 'NoDRClassification'
# TODO: assert no resume mode in run training!
def __init__(self, model, device, printer, num_epochs):
super(NoDRClassification, self).__init__(model, device, printer)
re_init_functions = get_reinit_functions(model, num_epochs)
self.functions.update(
re_init_functions
)
self.printer.dont_print = list(re_init_functions.keys())
def get_reinit_functions(model, num_epochs):
mod_list = helpers.get_ordered_module_list(
model, batch_input_size=(1, 3, 32, 32),
device=get_device(),
use_only=USED_BLOCKS
)
reinit_functions = {}
for depth, module in mod_list:
key = f'{module._get_name()}_d{depth}'
upper = module.block_with_shortcut.block.upper
act_getter = ActivationGetter(upper)
upper_conv = upper[0]
ri_func = ReInitDeadReLUs(
upper_conv, act_getter, num_epochs=num_epochs
)
reinit_functions[key] = ri_func
return reinit_functions
class ReInitDeadReLUs(IPrinterFcn):
def __init__(self, module, act_getter, *, num_epochs, init_weight=1., num_restart_calls=2):
self.act_getter = act_getter
self.module = module
self.values = []
# used to compute the weight for reinit
self.num_restart_calls = num_restart_calls
self.thres = 1e-6
self.ctr = 0.
self.N = num_epochs
self.b = init_weight
# N-1: the weight should be zero in the last epoch
self.m = -1. * self.b / (self.N - 1)
def update(self, *args):
with torch.no_grad():
self.values.append(
self.act_getter.out.mean([0, 2, 3]).cpu().numpy()
)
self.act_getter.out = None
return self
def restart(self):
self.values = []
self.ctr += 1. / float(self.num_restart_calls)
assert self.ctr <= self.N
def _get_weight(self):
w = float(self.ctr) * self.m + self.b
return w
def __call__(self):
X = np.stack(self.values, -1).copy()
X = np.mean(X, -1)
is_dead = (X < self.thres)
w = self._get_weight()
with torch.no_grad():
new_weights = init.kaiming_uniform_(
torch.zeros(self.module.weight.shape), a=0.
).to(self.module.weight.device)
self.module.weight[is_dead, :, ...] += (
w * new_weights[is_dead, :, ...]
)
# return the percentage of dead ReLUs
return is_dead.astype('float32').mean() * 100.
class ClassAndLogging(ITrainInterface):
name = 'ClassAndLogging'
def __init__(self, model, device, printer, logger_dict):
self.printer = printer
self.model = model
self.xent_loss = nn.CrossEntropyLoss()
self.functions = {
'acc': Accuracy(),
'er': ErrorRate()
}
self.counters = {
'num_samples': image_counter
}
self.functions.update(logger_dict)
self.printer.dont_print = list(logger_dict.keys())
self.d = device
def train_step(self, sample):
#print('train step')
images, targets = sample[0].to(self.d), sample[1].to(self.d)
pred = self.model(images)
if targets.ndim == 2:
targets = targets[:, 0]
loss = self.xent_loss(pred, targets)
metrics = None
counters = dict()
counters.update(
{k: v(pred, targets) for k, v in self.counters.items()}
)
functions = {
k: f.update(pred, targets) for k, f in self.functions.items()
}
return loss, metrics, counters, functions
class Logger(IPrinterFcn):
"""
A Logger is a printer function that monitors activation values,
the activations are transformed using _get_transformed_activation
and stored in self.values. When calling the Logger, the list of values
is typecasted to an numpy array. On this array, the reduce function is
applied.
"""
def __init__(self, name, act_getter, reduce_fcn):
self.name = name
self.init_name = name
self.act_getter = act_getter
self.reduce = reduce_fcn
self.values = []
def update(self, *args):
if self.act_getter.out is not None:
if self.act_getter.out.numel() != 0:
self.values.append(self._get_transformed_activation())
self.act_getter.out = None
return self
def restart(self):
assert hasattr(self, "init_name"), (
"In your init function you need to "
"set the init_name attribute to name. This attribute is needed "
"for the restart function.")
self.name = self.init_name
self.values = []
def _get_transformed_activation(self):
return self.act_getter.out.mean().item()
def __call__(self):
x = np.array(self.values)
return self.reduce(x)
| 2.265625 | 2 |
main.py | ahlumcho/RyeongBot | 0 | 12773979 | print("Hello world! i'm <NAME>" )
print("캔디크러시팡팡파라바라팡팡팡" )
print('웅앵옹앙응')
| 2.15625 | 2 |
rlagent.py | Ankur-Deka/Emergent-Multiagent-Strategies | 23 | 12773980 | from rlcore.algo import PPO
from rlcore.storage import RolloutStorage
class Neo(object):
def __init__(self, args, policy, obs_shape, action_space):
super().__init__()
self.obs_shape = obs_shape
self.action_space = action_space
self.actor_critic = policy # it is MPNN instance
self.rollouts = RolloutStorage(args.num_steps, args.num_processes, self.obs_shape, self.action_space,
recurrent_hidden_state_size=1)
self.args = args
self.trainer = PPO(self.actor_critic, args.clip_param, args.ppo_epoch, args.num_mini_batch, args.value_loss_coef,
args.entropy_coef, lr=args.lr,max_grad_norm=args.max_grad_norm)
self.alive = True ##
def load_model(self, policy_state):
self.actor_critic.load_state_dict(policy_state)
def initialize_obs(self, obs):
# this function is called at the start of episode
self.rollouts.reset()
self.rollouts.obs[0].copy_(obs)
def initialize_new_episode(self, step, obs, masks):
## one rollout can have multiple episodes
self.rollouts.obs[step].copy_(obs)
self.rollouts.masks[step].copy_(masks)
def update_rollout(self, obs, reward, mask):
self.rollouts.insert(obs, self.states, self.action, self.action_log_prob, self.value, reward, mask)
def act(self, step, deterministic=False):
self.value, self.action, self.action_log_prob, self.states = self.actor_critic.act(self.rollouts.obs[step],
self.rollouts.recurrent_hidden_states[step],self.rollouts.masks[step],deterministic=deterministic)
return self.action
def wrap_horizon(self, next_value, start_pt, end_pt):
self.rollouts.compute_returns(next_value, True, self.args.gamma, self.args.tau, start_pt, end_pt)
def before_update(self):
self.rollouts.before_update()
def after_update(self):
self.rollouts.after_update()
def update(self):
return self.trainer.update(self.rollouts) | 2.265625 | 2 |
serene_load/serene_load/helpers/load_helpers.py | NICTA/serene-etl | 0 | 12773981 | <filename>serene_load/serene_load/helpers/load_helpers.py<gh_stars>0
import collections
import json
import logging
import subprocess
import os
from serene_load.helpers.containers.container_base import BaseContainer, FileContainer
from serene_metadata import REQUIRED_LOAD_FIELDS, PRIMARY_ID
class load_logger(object):
@property
def log(self):
return logging.getLogger()
log = logging.getLogger()
#What characters are allowed in column names?
ALLOWED_COLUMN_NAME_CHARACTERS = 'abcdefghijklmnopqrstuvwxyz1234567890_'
COMPRESSION_RATIO = 15
def non_empty(f):
if isinstance(f, basestring):
return len(f.strip()) > 0
else:
return f is not None
class FileLoader(object):
name = 'load_files'
@staticmethod
def func(args, task):
bytes_processed = 0
_output = task['output']
with _output.open() as output:
for infile in task['bucket']:
try:
_accessor = BaseContainer.unpickle(infile.pop('accessor'), job_args=args)
except KeyError:
_accessor = FileContainer(path=infile['path'], file=infile['file'], job_args=args)
try:
# if encoding is passed from the command line, overwrite the detected encoding
enc = args.enc
except AttributeError:
enc = infile['enc']
log.debug('Using encoding {}'.format(enc))
with _accessor as accessor:
iterator = task['processor'].process_file(args=args, meta=infile, fd=accessor.open())
bytes_processed = 0
primary_id = accessor.relative_path() + ':{}'
row = 0
for d in iterator:
d[PRIMARY_ID.name()] = primary_id.format(row)
row += 1
for r in REQUIRED_LOAD_FIELDS:
assert r in d, 'Processor has not added required field: {}\n{}'.format(r, json.dumps(d, indent=1))
# Check that column names are in ALLOWED_COLUMN_NAME_CHARACTERS
assert all(_ in ALLOWED_COLUMN_NAME_CHARACTERS for _ in ''.join(d.keys())), 'Not'
bytes_processed += output.write(
json.dumps(
# remove any empty fields
{k: v for k, v in filter(lambda _: non_empty(_[1]), d.iteritems())},
ensure_ascii=False).encode('utf-8') + '\n')
if bytes_processed > 100000000:
yield {
'proc_bytes': bytes_processed
}
bytes_processed = 0
# cleanup
yield {
'accessor': _accessor
}
# done processing infile
# done processing bucket
if bytes_processed:
task['proc_bytes'] = bytes_processed
del task['next_func']
yield task
class OutputWriter(object):
def __init__(self, cid, base_name):
self.cid = cid
self.base_name = base_name
self.input_bytes = 0
self.fd = None
def __enter__(self):
return self
def write(self, data):
self.fd.write(data)
ld = len(data)
self.input_bytes += ld
return ld
def __exit__(self, exc_type, exc_val, exc_tb):
self.fd.flush()
self.fd.close()
return False
def process_cmd(self):
raise NotImplementedError()
def cleanup_cmd(self):
raise NotImplementedError()
def cleanup(self):
subprocess.check_call(' '.join(self.cleanup_cmd()), shell=True)
def open(self):
cmd = self.process_cmd()
try:
out = subprocess.Popen(' '.join(cmd), stdin=subprocess.PIPE, shell=True, universal_newlines=False)
except OSError:
raise
# raise OSError('HDFS not on PATH')
self.fd = out.stdin
return self
class OutputToHDFS(OutputWriter):
"""
Final file writer to HDFS
"""
@property
def output_file(self):
return '/data/{:>08}/{}.json.bz2'.format(self.cid, self.base_name)
def process_cmd(self):
return ['bzip2', '-z', '-c', '-', '|', 'hdfs', 'dfs', '-put', '-', self.output_file]
def cleanup_cmd(self):
return ['hdfs', 'dfs', '-rm', self.output_file + '*']
class OutputToLocalFS(OutputWriter):
"""
Output to local directory (for running locally)
"""
@property
def output_file(self):
return 'output-data-{:>08}-{}.json.bz2'.format(self.cid, self.base_name)
def process_cmd(self):
return ['bzip2', '-z', '-c', '-', '>', self.output_file]
def cleanup_cmd(self):
return ['rm', self.output_file]
class JobProcessor(load_logger):
"""
Base type for processing
"""
# define filters that are applied to file metadata. Any files matching these filters will be processed by this processor
FILTERS = [
]
def __init__(self, arguments, worker):
self.args = arguments if arguments else {}
self.worker = worker
def process(self, output, data):
"""
Overwrite this function to process data
Access input files via self.data
"""
raise NotImplementedError()
class LoadedMeta(object):
def __init__(self, loaded_path):
self.loaded = loaded = collections.defaultdict(list)
self.sizes = sizes = {}
self.loaded_path = loaded_path
for dirpath, dirnames, filenames in os.walk(loaded_path):
for fn in filenames:
if fn.endswith('.json'):
with open(os.path.join(dirpath, fn), 'r') as indata:
_fn = fn[:-5]
hashes = json.load(indata)
loaded[_fn].extend(hashes)
def which_file(self, hash):
"""
check if a file (hash) is already listed in a HDFS file
returns the file name if it is, None if it is not listed
"""
for filename, hashes in self.loaded.iteritems():
if hash in hashes:
return filename
return None
def output_bucket(self, filename, bucket):
head, tail = os.path.split(filename)
if tail:
filename = tail
output_file = os.path.join(self.loaded_path, filename + '.json')
assert output_file.startswith(self.loaded_path)
dst_head, dst_tail = os.path.split(output_file)
try:
os.makedirs(dst_head)
except:
pass
with open(output_file, 'w') as outf:
json.dump(
[_['sha256'] for _ in bucket],
outf,
indent=1
)
log.debug('Wrote bucket to {}'.format(output_file))
def layout_files(self, files, MIN_SIZE_MB=512):
"""
create a proposed layout of files on HDFS
if overwrite=True, replace existing files too
new files not in loaded -> add
existing files already in loaded -> dont_rewrite
"""
already_placed = []
not_yet_placed = []
for _ in files:
already = self.which_file(_['sha256'])
if already:
already_placed.append(_)
else:
not_yet_placed.append(_)
log.info('BUCKET: {} files already loaded'.format(len(already_placed)))
log.info('BUCKET: {} files not yet loaded'.format(len(not_yet_placed)))
size_accessor = lambda _: _['bytes']
bucket_size = lambda b: sum(size_accessor(_) for _ in b)
buckets = [
[]
]
for f in sorted(not_yet_placed, key=size_accessor):
smallest_bucket = sorted(buckets, key=bucket_size)[0]
if bucket_size(smallest_bucket) > (COMPRESSION_RATIO * MIN_SIZE_MB * 1024 * 1024):
bucket = []
buckets.append(bucket)
else:
bucket = smallest_bucket
bucket.append(f)
return buckets
| 2 | 2 |
Alihossein/contest/8901/8901.py | alihossein/quera-answers | 0 | 12773982 | <gh_stars>0
# question : https://quera.ir/problemset/contest/8901
x_n = input().split(' ')
x = x_n[1]
n = int(x_n[0])
default_value = {
'L': 0,
'M': 0,
'R': 0,
}
movements = []
for one_input in range(n):
movements.append(input().split(' '))
default_value[x] = 1
for one_movement in movements:
temp = default_value.get(one_movement[0])
default_value[one_movement[0]] = default_value.get(one_movement[1])
default_value[one_movement[1]] = temp
for index in default_value:
if default_value.get(index) == 1:
print(index)
break
| 3.265625 | 3 |
api/users/serializers.py | individuo7/wololo-tournaments-api | 2 | 12773983 | <reponame>individuo7/wololo-tournaments-api<filename>api/users/serializers.py<gh_stars>1-10
from rest_framework.serializers import ModelSerializer
from django.contrib.auth import get_user_model
User = get_user_model()
class UserSerializer(ModelSerializer):
class Meta:
model = User
fields = [
"username",
"email",
"icon",
"gold",
"background_color",
"date_joined",
"is_active",
"last_login",
]
read_only_fields = ["username", "date_joined", "is_active", "last_login"]
| 2.078125 | 2 |
server_client/manager.py | shrijaltamrakar/Descent_py | 2 | 12773984 | <reponame>shrijaltamrakar/Descent_py
import os, threading
from time import sleep
server = "python server.py"
client = "python client.py"
multi_server = "python multi_server.py"
multi_client = "python multi_client.py"
class run_cmd(threading.Thread):
def __init__(self,command):
threading.Thread.__init__(self)
self.command = command
def run(self):
print(f" running {self.command.split(' ')[1]} ......")
os.system(self.command)
print(f"exiting thread {self.command.split(' ')[1]} ......")
if __name__ == '__main__':
while 1:
toggle = input("type S for single server & M for multi server ..... ")
if toggle.upper() =='S':
server_thread = run_cmd(server)
client_thread = run_cmd(client)
server_thread.start()
sleep(1)
client_thread.start()
break
if toggle.upper() =='M':
server_thread = run_cmd(multi_server)
client_thread = run_cmd(multi_client)
server_thread.start()
sleep(1)
client_thread.start()
break | 3.140625 | 3 |
code/Phase_1_CNN/CompileTensor.py | MMarochov/SEE_ICE | 4 | 12773985 | # -*- coding: utf-8 -*-
"""
Created on Sat Feb 6 14:52:32 2021
@author: Patrice
Simple utility script to read tiles from drive and compile a large tensor saved as an npy file.
Use only if you have enough ram to contain all your samples at once
"""
import numpy as np
import glob
import skimage.io as io
def tic():
#Homemade version of matlab tic and toc functions
import time
global startTime_for_tictoc
startTime_for_tictoc = time.time()
def toc():
import time
if 'startTime_for_tictoc' in globals():
print ("Elapsed time is " + str(time.time() - startTime_for_tictoc) + " seconds.")
else:
print ("Toc: start time not set")
tic()
folder='/media/patrice/DataDrive/SEE_ICE/JointTrain/'
OutputName='JointTensor5k'
tilesize=50
bands=4
classes=7
subsample=1#percentage subsample in each class
NormFactor=8192 #will save a normalised tensor ready for the CNN, better for memory to normalise now
UINT8=False #if true this will overide NormFactor and reduce the radiometry to 8-bit via normalisation by 16384
FP16=True #cast final tensor in float 16 for mixed precision training
Itot=0
for c in range(1,classes+1):
class_folder=folder+'C'+str(c)+'/'
clist=glob.glob(class_folder+'*.tif')
Itot=Itot+len(clist)
print ('found '+str(Itot)+' tile samples')
MasterTensor=np.zeros((int(subsample*Itot),tilesize,tilesize,bands), dtype='float16')
MasterLabel=np.zeros((int(subsample*Itot)), dtype='float16')
tile=0
for c in range(1,classes+1):
class_folder=folder+'C'+str(c)+'/'
clist=glob.glob(class_folder+'*.tif')
idx = np.random.choice(np.arange(len(clist)), int(len(clist)*subsample), replace=False)
for i in range(len(idx)):
I=io.imread(clist[idx[i]]).reshape((1,tilesize,tilesize,bands))
Label=c
MasterLabel[tile] = Label
if UINT8 and not(FP16):
MasterTensor=np.uint8(MasterTensor)
MasterTensor[tile,:,:,:] = np.uint8(255*I/16384)
elif FP16 and UINT8:
MasterTensor=np.float16(MasterTensor)
I= np.uint8(255*I/16384)
MasterTensor[tile,:,:,:]=np.float16(I/255)
elif not(UINT8) and FP16:
MasterTensor=np.float16(MasterTensor)
MasterTensor[tile,:,:,:]=np.float16(I/NormFactor)
else:
MasterTensor=np.int16(MasterTensor)
MasterTensor[tile,:,:,:]=np.int16(I)
tile+=1
print('Class '+str(c)+' compiled')
if UINT8 and not(FP16):#downsample radiometry and save as uint8
np.save(folder+OutputName+'_T_uint8',MasterTensor)
np.save(folder+OutputName+'_L_uint8',MasterLabel)
elif FP16 and UINT8:#data will be float 16, but first they have been downsampled to 8bit before normalisation
np.save(folder+OutputName+'_T_uint8float16',MasterTensor)
np.save(folder+OutputName+'_L_uint8float16',MasterLabel)
elif not(UINT8) and FP16:
np.save(folder+OutputName+'_T_float16',MasterTensor)
np.save(folder+OutputName+'_L_float16',MasterLabel)
else:
np.save(folder+OutputName+'_T_int16',MasterTensor)
np.save(folder+OutputName+'_L_int16',MasterLabel)
#Output as npy arrays for both the tensor and the label
toc()
| 2.546875 | 3 |
CursoemVideo/ex104.py | arthxvr/coding--python | 0 | 12773986 | <gh_stars>0
from colorama import init
init()
def leiaInt(msg):
while True:
num = str(input(msg)).strip()
if num.isnumeric():
int(num)
break
else:
print('\033[0;31mERRO! Digite um número inteiro válido.\033[m')
return num
num = leiaInt('Digite um número: ')
print(f'Você acabou de digitar o número {num}')
| 3.5 | 4 |
py/codeforces/842C.py | shhuan/algorithms | 0 | 12773987 | # -*- coding: utf-8 -*-
import math
import collections
import bisect
import heapq
import time
import random
import itertools
import sys
"""
created by shhuan at 2017/10/20 15:45
"""
def gcd(a, b):
if b == 0:
return a
return gcd(b, a%b)
N = 2*10**5+5
vis = [0] * N
G = collections.defaultdict(list)
ans = [0] * N
mas = [0] * N
def dfs1(v):
global vis
global ans
vis[v] = 1
for u in G[v]:
if not vis[u]:
ans[u] = gcd(ans[v], mas[u])
dfs1(u)
def dfs2(v, dist):
vis[v] = 1
for | 3.171875 | 3 |
Python/empire/aws/dynamodb/table_struct.py | Tombmyst/Empire | 0 | 12773988 | <gh_stars>0
from empire import *
from datetime import datetime
from empire.structs import *
from empire.enums.base_enum import BaseEnum
class TableStatuses(BaseEnum):
CREATING: Final[str] = 'CREATING'
UPDATING: Final[str] = 'UPDATING'
DELETING: Final[str] = 'DELETING'
ACTIVE: Final[str] = 'ACTIVE'
INACCESSIBLE_ENCRYPTION_CREDENTIALS: Final[str] = 'INACCESSIBLE_ENCRYPTION_CREDENTIALS'
ARCHIVING: Final[str] = 'ARCHIVING'
ARCHIVED: Final[str] = 'ARCHIVED'
class TableStruct(Struct):
# Determine if it is necessary to go beyond top level values...
def __init__(self, raw: Union[JSON, None] = None):
super().__init__()
self._attribute_definitions: List[JSON] = []
self._table_name: str = ''
self._key_schema: List[JSON] = []
self._table_status: str = ''
self._creation_time: Union[datetime, None] = None
self._provisioned_throughput: JSON = {}
self._table_size_bytes: int = -1
self._item_count: int = -1
self._table_arn: str = ''
self._table_id: str = ''
self._billing_mode_summary: JSON = {}
self._local_secondary_indexes: List[JSON] = []
self._global_secondary_indexes: List[JSON] = []
self._stream_specification: JSON = {}
self._latest_stream_label: str = ''
self._latest_stream_arn: str = ''
self._global_table_version: str = ''
self._replicas: List[JSON] = []
self._restore_summary: JSON = {}
self._sse_description: JSON = {}
self._archival_summary: JSON = {}
super().__fields_decl_end__()
self.assign_values(raw)
@property
def attribute_definitions(self) -> List[JSON]:
return self._attribute_definitions
@property
def table_name(self) -> str:
return self._table_name
@property
def key_schema(self) -> List[JSON]:
return self._key_schema
@property
def table_status(self) -> str:
return self._table_status
@property
def creation_time(self) -> datetime:
return self._creation_time
@property
def provisioned_throughput(self) -> JSON:
return self._provisioned_throughput
@property
def table_size_bytes(self) -> int:
return self._table_size_bytes
@property
def item_count(self) -> int:
return self._item_count
@property
def table_arn(self) -> str:
return self._table_arn
@property
def table_id(self) -> str:
return self._table_id
@property
def billing_mode_summary(self) -> JSON:
return self._billing_mode_summary
@property
def local_secondary_indexes(self) -> List[JSON]:
return self._local_secondary_indexes
@property
def global_secondary_indexes(self) -> List[JSON]:
return self._global_secondary_indexes
@property
def stream_specification(self) -> JSON:
return self._stream_specification
@property
def latest_stream_label(self) -> str:
return self._latest_stream_label
@property
def latest_stream_arn(self) -> str:
return self._latest_stream_arn
@property
def global_table_version(self) -> str:
return self._global_table_version
@property
def replicas(self) -> List[JSON]:
return self._replicas
@property
def restore_summary(self) -> JSON:
return self._restore_summary
@property
def sse_description(self) -> JSON:
return self._sse_description
@property
def archival_summary(self) -> JSON:
return self._archival_summary
| 2.015625 | 2 |
gui_tools/create_gui_zip.py | kusterlab/SIMSI-Transfer | 0 | 12773989 | <filename>gui_tools/create_gui_zip.py
from pathlib import Path
import shutil
simsi_dist_dir = Path.cwd() / 'dist' / 'SIMSI-Transfer'
lib_dir = simsi_dist_dir / 'lib'
lib_dir.mkdir(parents=True, exist_ok=True)
print("Moving libraries to lib directory")
for f in simsi_dist_dir.glob('*'):
if f.name.endswith(".egg-info'") or f.name in ["pytz", "matplotlib", "sqlalchemy", "tcl8", "PIL", "greenlet", "certifi"]:
shutil.rmtree(f)
elif f.is_file() and f.name not in ['base_library.zip', 'python38.dll', 'python39.dll', 'SIMSI-Transfer.exe', 'pyproject.toml']:
f.rename(lib_dir / f.name)
print("Creating zip archive")
shutil.make_archive(Path.cwd() / 'dist' / 'SIMSI-Transfer_GUI_windows', 'zip', simsi_dist_dir) | 2.484375 | 2 |
variation/schemas/hgvs_to_copy_number_schema.py | cancervariants/varlex | 0 | 12773990 | <reponame>cancervariants/varlex
"""Module containing schemas used in HGVS To Copy Number endpoints"""
from typing import Type, Any, Dict, Union
from ga4gh.vrsatile.pydantic.vrs_models import RelativeCopyClass, AbsoluteCopyNumber, \
RelativeCopyNumber, Text
from pydantic import StrictStr
from variation.schemas.classification_response_schema import ClassificationType
from variation.schemas.normalize_response_schema import ServiceResponse
VALID_CLASSIFICATION_TYPES = [
ClassificationType.GENOMIC_DUPLICATION,
ClassificationType.GENOMIC_DELETION,
ClassificationType.GENOMIC_DELETION_RANGE,
ClassificationType.GENOMIC_UNCERTAIN_DELETION
]
VALID_RELATIVE_COPY_CLASS = [rcc.value for
rcc in RelativeCopyClass.__members__.values()]
class HgvsToAbsoluteCopyNumberService(ServiceResponse):
"""A response for translating HGVS to absolute copy number."""
hgvs_expr: StrictStr
absolute_copy_number: Union[AbsoluteCopyNumber, Text]
class Config:
"""Configure model."""
@staticmethod
def schema_extra(schema: Dict[str, Any],
model: Type["HgvsToAbsoluteCopyNumberService"]) -> None:
"""Configure OpenAPI schema."""
if "title" in schema.keys():
schema.pop("title", None)
for prop in schema.get("properties", {}).values():
prop.pop("title", None)
schema["example"] = {
"hgvs_expr": "NC_000003.12:g.49531262dup",
"absolute_copy_number": {
"_id": "ga4gh:VAC.2zTRgNWai56-CSvxw_UerY2ggUz3kJwe",
"type": "AbsoluteCopyNumber",
"subject": {
"type": "DerivedSequenceExpression",
"location": {
"_id": "ga4gh:VSL.G_J9WrfooiONRgjbmGPuCBYbBYFQnYOg",
"type": "SequenceLocation",
"sequence_id": "ga4gh:SQ.Zu7h9AggXxhTaGVsy7h_EZSChSZGcmgX",
"interval": {
"type": "SequenceInterval",
"start": {"type": "Number", "value": 49531260},
"end": {"type": "Number", "value": 49531262}
}
},
"reverse_complement": False
},
"copies": {"type": "Number", "value": 3}
},
"service_meta_": {
"name": "variation-normalizer",
"version": "0.2.17",
"response_datetime": "2022-01-26T22:23:41.821673",
"url": "https://github.com/cancervariants/variation-normalization"
}
}
class HgvsToRelativeCopyNumberService(ServiceResponse):
"""A response for translating HGVS to relative copy number."""
hgvs_expr: StrictStr
relative_copy_number: Union[RelativeCopyNumber, Text]
class Config:
"""Configure model."""
@staticmethod
def schema_extra(schema: Dict[str, Any],
model: Type["HgvsToRelativeCopyNumberService"]) -> None:
"""Configure OpenAPI schema."""
if "title" in schema.keys():
schema.pop("title", None)
for prop in schema.get("properties", {}).values():
prop.pop("title", None)
schema["example"] = {
"hgvs_expr": "NC_000003.12:g.49531262dup",
"relative_copy_number": {
"_id": "ga4gh:VRC.XiXamTGYJ43rc8xheleMKcjxEBOFp82l",
"type": "RelativeCopyNumber",
"subject": {
"type": "DerivedSequenceExpression",
"location": {
"_id": "ga4gh:VSL.G_J9WrfooiONRgjbmGPuCBYbBYFQnYOg",
"type": "SequenceLocation",
"sequence_id": "ga4gh:SQ.Zu7h9AggXxhTaGVsy7h_EZSChSZGcmgX",
"interval": {
"type": "SequenceInterval",
"start": {"type": "Number", "value": 49531260},
"end": {"type": "Number", "value": 49531262}
}
},
"reverse_complement": False
},
"relative_copy_class": "complete loss"
},
"service_meta_": {
"name": "variation-normalizer",
"version": "0.2.17",
"response_datetime": "2022-01-26T22:23:41.821673",
"url": "https://github.com/cancervariants/variation-normalization"
}
}
| 2.15625 | 2 |
scraper/scraper.py | keanu-xoren/budget-generator | 0 | 12773991 | <gh_stars>0
from bs4 import BeautifulSoup
import requests
TEST_CITY = {
"city" : "Oakland",
"state" : "IL",
"country" : "United States"
}
NUMBEO_URL = "https://www.numbeo.com/cost-of-living/in/"
def formatLocationURL(city, country='', state=''):
return ('-').join(filter(None, city.split(' ') + state.split(' ') + country.split(' ')))
def loadPage(city_dict):
numbeo_req = requests.get(NUMBEO_URL + formatLocationURL(city_dict["city"]))
# TODO: add error handling for bad request
bs_obj = BeautifulSoup(numbeo_req.content, 'html.parser')
if ("Cannot find city id" in bs_obj):
numbeo_req = requests.get(NUMBEO_URL + formatLocationURL(city_dict["city"], city_dict["country"], city_dict["state"]))
bs_obj = BeautifulSoup(numbeo_req.content, 'html.parser')
if ("Cannot find city id" in bs_obj):
# TODO: error handling for nonexistent city
pass
return bs_obj
numbeo_bs = loadPage(TEST_CITY)
print(numbeo_bs.title.get_text())
| 3.3125 | 3 |
PathDSP/myModel.py | TangYiChing/PathDSP | 2 | 12773992 | """
Feedforward model construct
number of hidden layers:5
neural units of hidden layers: [2000, 1000, 800, 500, 100]
activation function: elu
"""
import torch as tch
class FNN(tch.nn.Module):
def __init__(self, n_inputs):
# call constructors from superclass
super(FNN, self).__init__()
# define network layers
self.hidden1 = tch.nn.Linear(n_inputs, 1000)
self.hidden2 = tch.nn.Linear(1000, 800)
self.hidden3 = tch.nn.Linear(800, 500)
self.hidden4 = tch.nn.Linear(500, 100)
self.output = tch.nn.Linear(100, 1)
# dropout
self.dropout = tch.nn.Dropout(p=0.1)
# activate
self.fnn = tch.nn.Sequential(self.hidden1, tch.nn.ELU(), self.dropout,
self.hidden2, tch.nn.ELU(), self.dropout,
self.hidden3, tch.nn.ELU(), self.dropout,
self.hidden4, tch.nn.ELU(), self.dropout,
self.output)
def forward(self, x):
return self.fnn(x)
if __name__ == "__main__":
net = FNN(756) #Feedforward_bn(100)
print('initiating an feed forward network....')
print(' construct=\n {:}'.format(net))
| 3.75 | 4 |
train/data/iphi_dates.py | sommerschield/iphi | 7 | 12773993 | <filename>train/data/iphi_dates.py
# Copyright 2021 <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>,
# University of Oxford, DeepMind Technologies Limited, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
def date_parser_phi(d):
'''Rules to convert the PHI date strings to a range.'''
exemptions = {
# GREEK WORLD
'Archaic period': '-600 -479',
'Classical period': '-479 -323',
'early Classical period': '-480 -400',
'high Classical period': '-450 -400',
'late Classical period': '-400 -323',
'Hellenistic period': '-323 -31',
'early Hellenistic period': '-323 -250',
'late Hellenistic period': '-250 -31',
# HELLENISTIC EMPIRE
'Seleucid period': '-311 -64',
'Attalid period': '-330 -30',
'Antigonid period': '-306 -168',
'Ptolemaic period': '-332 -31',
# ROME AND ITALY
'aet. Rom.': '-200 600',
'aet. imp.': '-27 284',
'Roman period': '-200 600',
'early Roman period': '-200 1', # ?
'late Roman period': '250 450',
'Etruscan period': '-616 -509',
'Roman Republic period': '-509 -27',
'late Roman Republic period': '-146 -27',
'Roman Imperial period': '-27 284',
'Rom. Imp. period': '-27 284',
'early Rom. Imp. period': '-27 150',
'adv. Rom. Imp. period': '-27 150',
'later Rom. Imp. period': '150 375',
'late Rom. Imp. period': '150 375',
'high Rom. Imp. period': '68 235',
'Christian period': '27 325',
'late Christian period': '313 476',
'Byzantine period': '330 1453',
'Late Antique period': '284 476',
'late Antiquity': '284 476',
# ROMAN IMPERIAL DYNASTIES
'Julio-Claudian period': '-27 68',
'Augustan period': '-27 14',
'Tiberian period': '14 37',
'Claudian period': '41 54',
'Neronian period': '54 68',
'Flavian period': '69 96',
'Vespasianic period': '69 79',
'Domitianic period': '81 96',
'Antonine period': '96 192', # (dynasty not emperor)
'Trajanic period': '98 117',
'Hadrianic period': '117 138',
'Ant. Pius period': '138 161', # (emperor not dynasty)
'Severan period': '193 235',
'Constantinian period': '307 364',
'Valentinian period': '364 392',
'Theodosian period': '392 456',
'Thracian period': '457 476',
# REIGN
'reign of Augustus': '-27 14',
'reign of Tiberius': '14 37',
'reign of Gaius': '37 41',
'reign of Claudius': '41 54',
'reign of Nero': '54 68',
'reign of Flavius': '69 96',
'reign of Vespasian': '69 79',
'reign of Domitian': '81 96',
'reign of Nerva': '96 98',
'reign of Trajan': '98 117',
'reign of Hadrian': '117 138',
'reign of <NAME>': '138 161',
'reign of Severus': '193 235',
'reign of Constantine': '307 364',
'reign of Valentinian': '364 392',
'reign of Theodosius': '392 456',
# Other exemptions
'1st c. BC/1st c. AD': '-100 199',
'44 BC-267 AD': '-44 267'
}
# Remove parenthesis ()
d = re.sub(r'\([^\)]*\)', r'', d)
# Remove months
d = re.sub(
r'Jan\.|January|Feb\.|February|Mar\.|March|Apr\.|April|May|Jun\.|June|Jul\.|'
r'July|Aug\.|August|Sept\.|September|Oct\.|October|Nov\.|November|'
r'Dec\.|December', r'', d)
# Parse 'circa' from 'ca' or '?'
circa = False
circa_words = ['?', 'probably', 'perhaps', 'perh.', 'prob.', 'or']
if re.search(r'(?:^|\s+)ca(\W|$)', d):
circa = True
else:
for w in circa_words:
if w in d:
circa = True
break
# Period matching, otherwise parse the date as a date
if re.search(r'(' + '|'.join(exemptions.keys()) + ')', d, re.IGNORECASE):
for period, period_date in exemptions.items():
if re.search(period, d, re.IGNORECASE):
return period_date, circa
return None, None
# Collapse spaces
d = re.sub(r'\s+', ' ', d).strip()
# Date matching BC AD
m = re.search(r'(?:^|\s+)(BC|AD|a\.|p\.)(?:$|\s+|\?)', d, re.IGNORECASE)
if m:
# Treat a. as BC
bc_ad = m.group(1).upper().replace('A.', 'BC')
# Parse dates xxx(/xx)-xxx(/xx) BC
m = re.search(
r'\s?(\d{1,4})(\/\d{1,2})?\s?-\s?(\d{1,4})(\/(\d{1,2}))?\??\s+(BC|AD|a\.|p\.)',
d, re.IGNORECASE)
if m:
min_date = int(m.group(1))
max_date = int(m.group(3))
# Pick the upper date margin in case xxx/x e.g. 409/408 BCE
if m.group(5):
max_date = int(m.group(3)[:-len(m.group(5))] + m.group(5))
if bc_ad == 'BC' and min_date >= max_date:
min_date, max_date = -min_date, -max_date
return f'{min_date} {max_date}', circa
elif bc_ad == 'AD' and min_date <= max_date:
return f'{min_date} {max_date}', circa
# Parse dates xxx(/xx) BC
m = re.search(
r'(\d{1,4})(\/(\d{1,4}))?\??\s+(BC|AD|a\.|p\.)', d, re.IGNORECASE)
if m:
min_date = int(m.group(1))
if m.group(3):
# Pick the upper date margin xxx/x
max_date = int(m.group(1)[:-len(m.group(3))] + m.group(3))
else:
max_date = min_date
if bc_ad == 'BC':
min_date = -min_date
max_date = -max_date
# Swap dates if opposite
if min_date > max_date:
min_date, max_date = max_date, min_date
return f'{min_date} {max_date}', circa
# Parse precise dates e.g. AD 43
m = re.search(r'^(BC|AD|a\.|p\.) (\d{1,4})\D', d, re.IGNORECASE)
if m:
min_date = int(m.group(2))
if bc_ad == 'BC':
min_date = -min_date
if min_date <= max_date:
return f'{min_date} {max_date}', circa
else:
return None, None
# Parse century spans e.g. 7th/6th c. BC
early_words = ['early', 'first half', '1st half', 'beginning', 'beg.']
late_words = ['late', 'second half', '2nd half', 'end']
mid_words = ['mid', 'mid.', 'mid-']
regex_words = r'\s?|'.join(early_words + late_words + mid_words) + r'\s?'
m = re.search(
r'(((' + regex_words + r')?(\d{1,2})(st|nd|rd|th))'
r'\s?(/|-|or)\s?)?(' + regex_words + r')?(\d{1,2})(st|nd|rd|th) '
r'(:?c. )?(BC|AD|a\.)',
d, re.IGNORECASE)
if not m:
return None, None
else:
max_date = int(m.group(8))
if m.group(4):
min_date = int(m.group(4))
else:
min_date = int(m.group(8))
if bc_ad == 'BC':
min_date = -(min_date * 100)
max_date = -(max_date * 100 - 99)
else:
min_date = min_date * 100 - 99
max_date = max_date * 100
# if early add/remove/match 50 years from that century
if m.group(3):
if m.group(3).strip() in early_words:
max_date -= 50
elif m.group(3).strip() in late_words:
min_date += 50
# If late add/remove/match 50 years from that century
if m.group(7):
if m.group(7).strip() in early_words:
max_date -= 50
elif m.group(7).strip() in late_words:
min_date += 50
# Mid date
if m.group(3):
if m.group(3).strip() in mid_words:
max_date -= 25
min_date += 25
if m.group(7):
if m.group(7).strip() in mid_words:
max_date -= 25
min_date += 25
if min_date <= max_date:
return f'{min_date} {max_date}', circa
else:
return None, None
| 2.3125 | 2 |
tests/test_startup.py | tearf001/fastapi-sqla | 33 | 12773994 | import httpx
from asgi_lifespan import LifespanManager
from fastapi import FastAPI
from pytest import mark
from sqlalchemy import text
def test_startup():
from fastapi_sqla import _Session, startup
startup()
session = _Session()
assert session.execute(text("SELECT 1")).scalar() == 1
@mark.asyncio
async def test_fastapi_integration():
from fastapi_sqla import _Session, setup
app = FastAPI()
setup(app)
@app.get("/one")
def now():
session = _Session()
result = session.execute(text("SELECT 1")).scalar()
session.close()
return result
async with LifespanManager(app):
async with httpx.AsyncClient(
app=app, base_url="http://example.local"
) as client:
res = await client.get("/one")
assert res.json() == 1
| 2.328125 | 2 |
gblda/__init__.py | ChengjieWU/LatentDirichletAllocation | 2 | 12773995 | """Copyright (c) 2020 <NAME>"""
from .gblda import GibbsLDA
| 0.925781 | 1 |
setup.py | Kushagrabainsla/create-flask-app | 11 | 12773996 | from setuptools import setup, find_packages
import pathlib
HERE = pathlib.Path(__file__).parent
README = (HERE / "README.md").read_text()
setup(
name='build-flask-app',
description='Set up a modern flask web server by running one command.',
long_description=README,
long_description_content_type="text/markdown",
packages=find_packages(),
version='0.1.0',
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/Kushagrabainsla/build-flask-app',
install_requires=[
'Flask',
'Flask-SQLAlchemy',
'Flask-SocketIO',
'gunicorn',
'eventlet',
'gevent',
'dnspython',
'pymongo',
'Flask-PyMongo',
'PyInquirer',
'termcolor',
'flask-cors',
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
entry_points = {
'console_scripts': ['build-flask-app=build_flask_app.main:main'],
},
) | 1.554688 | 2 |
Tests/EndToEndTests/CNTKv2Python/Examples/ConvNet_CIFAR10_DataAug_test.py | shyamalschandra/CNTK | 17,702 | 12773997 | # Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
import numpy as np
import os
import sys
from cntk.ops.tests.ops_test_utils import cntk_device
from cntk.cntk_py import DeviceKind_GPU
from cntk.device import try_set_default_device
import pytest
abs_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(abs_path)
sys.path.append(os.path.join(abs_path, "..", "..", "..", "..", "Examples", "Image", "Classification", "ConvNet", "Python"))
from prepare_test_data import prepare_CIFAR10_data
from ConvNet_CIFAR10_DataAug import *
TOLERANCE_ABSOLUTE = 1e-1
def test_cifar_convnet_error(device_id):
if cntk_device(device_id).type() != DeviceKind_GPU:
pytest.skip('test only runs on GPU')
try_set_default_device(cntk_device(device_id))
base_path = prepare_CIFAR10_data()
# change dir to locate data.zip correctly
os.chdir(base_path)
from _cntk_py import set_fixed_random_seed, force_deterministic_algorithms
set_fixed_random_seed(1)
force_deterministic_algorithms()
reader_train = create_reader(os.path.join(base_path, 'train_map.txt'), os.path.join(base_path, 'CIFAR-10_mean.xml'), False)
model = create_convnet_cifar10_model(num_classes=10)
model.update_signature((num_channels, image_height, image_width))
criterion = create_criterion_function(model, normalize=lambda x: x / 256)
train_loss, metric = train_model(reader_train, model, criterion, epoch_size=128, max_epochs=5)
expected_loss_metric = (2.2963, 0.9062)
assert np.allclose((train_loss, metric), expected_loss_metric, atol=TOLERANCE_ABSOLUTE)
if __name__=='__main__':
test_cifar_convnet_error(0)
| 2.0625 | 2 |
landscape_setup/concourse.py | jia-jerry/cc-utils | 0 | 12773998 | <gh_stars>0
# Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed
# under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import tempfile
import time
import bcrypt
from ensure import ensure_annotations
from textwrap import dedent
from urllib.parse import urlparse
from subprocess import CalledProcessError
import yaml
import concourse.client as client
import semver
from landscape_setup import kube_ctx
from landscape_setup.utils import (
ensure_helm_setup,
create_tls_secret,
execute_helm_deployment,
)
from model import (
ConfigFactory,
ConfigurationSet,
)
from model.concourse import (
ConcourseConfig,
ConcourseApiVersion,
)
from model.container_registry import (
GcrCredentials,
)
from model.proxy import(
ProxyConfig
)
from util import (
ctx as global_ctx,
not_empty,
not_none,
info,
warning,
fail,
which,
)
@ensure_annotations
def create_image_pull_secret(
credentials: GcrCredentials,
image_pull_secret_name: str,
namespace: str,
):
"""Create an image pull secret in the K8s cluster to allow pods to download images from gcr"""
not_none(credentials)
not_empty(image_pull_secret_name)
not_empty(namespace)
ctx = kube_ctx
namespace_helper = ctx.namespace_helper()
namespace_helper.create_if_absent(namespace)
secret_helper = ctx.secret_helper()
if not secret_helper.get_secret(image_pull_secret_name, namespace):
secret_helper.create_gcr_secret(
namespace=namespace,
name=image_pull_secret_name,
password=<PASSWORD>(),
user_name=credentials.username(),
email=credentials.email(),
server_url=credentials.host(),
)
service_account_helper = ctx.service_account_helper()
service_account_helper.patch_image_pull_secret_into_service_account(
name="default",
namespace=namespace,
image_pull_secret_name=image_pull_secret_name
)
# Constants related to the MitM-Proxy installation.
# The name under which the config map will be stored in K8s
MITM_CONFIG_CONFIGMAP_NAME = 'mitm-config'
@ensure_annotations
def create_proxy_configmaps(
proxy_cfg: ProxyConfig,
namespace: str,
):
"""Create the config map that contains the configuration of the mitm-proxy"""
not_empty(namespace)
ctx = kube_ctx
namespace_helper = ctx.namespace_helper()
namespace_helper.create_if_absent(namespace)
config_map_helper = ctx.config_map_helper()
mitm_proxy_config = proxy_cfg.mitm_proxy().config()
config_map_helper.create_or_update_config_map(
namespace=namespace,
name=MITM_CONFIG_CONFIGMAP_NAME,
data={
'config.yaml': yaml.dump(mitm_proxy_config),
}
)
def create_instance_specific_helm_values(
concourse_cfg: ConcourseConfig,
config_factory: ConfigFactory,
):
'''
Creates a dict containing instance specific helm values not explicitly stated in
the `ConcourseConfig`'s helm_chart_values.
'''
not_none(concourse_cfg)
# 'main'-team credentials need to be included in the values.yaml, unlike the other teams
concourse_uam_cfg_name = concourse_cfg.concourse_uam_config()
concourse_uam_cfg = config_factory.concourse_uam(concourse_uam_cfg_name)
main_team = concourse_uam_cfg.main_team()
external_url = concourse_cfg.external_url()
external_host = urlparse(external_url).netloc
ingress_host = concourse_cfg.ingress_host()
concourse_version = concourse_cfg.concourse_version()
if concourse_version is ConcourseApiVersion.V4 or concourse_version is ConcourseApiVersion.V5:
github_config_name = concourse_cfg.github_enterprise_host()
# 'github_enterprise_host' only configured in case of internal concourse
# using github enterprise
if github_config_name:
github_config = config_factory.github(github_config_name)
github_http_url = github_config.http_url()
github_host = urlparse(github_http_url).netloc
else:
github_host = None
bcrypted_pwd = <PASSWORD>pw(
main_team.password().encode('utf-8'),
bcrypt.gensalt()
).decode('utf-8')
instance_specific_values = {
'concourse': {
'web': {
'externalUrl': external_url,
'auth': {
'mainTeam': {
'localUser': main_team.username(),
'github': {
'team': main_team.github_auth_team()
}
},
'github': {
'host': github_host
}
}
}
},
'secrets': {
'localUsers': main_team.username() + ':' + bcrypted_pwd,
'githubClientId': main_team.github_auth_client_id(),
'githubClientSecret': main_team.github_auth_client_secret()
},
'web': {
'ingress': {
'hosts': [external_host, ingress_host],
'tls': [{
'secretName': concourse_cfg.tls_secret_name(),
'hosts': [external_host, ingress_host],
}],
}
}
}
else:
raise NotImplementedError(
"Concourse version {v} not supported".format(v=concourse_version)
)
return instance_specific_values
@ensure_annotations
def add_proxy_values(
config_set,
instance_specific_values: dict,
):
# The dir into which the config map is mounted in the volume.
# NOTE: This _must_ align with what the mitm is configured to use by our docker image.
MITM_CONFIG_DIR = '/.mitmproxy'
# add the sidecar-configuration for the mitm-proxy
config_factory = global_ctx().cfg_factory()
concourse_cfg = config_set.concourse()
secrets_server_cfg = config_set.secrets_server()
proxy_cfg = config_factory.proxy(concourse_cfg.proxy())
mitm_cfg = proxy_cfg.mitm_proxy()
logging_cfg = mitm_cfg.logging()
sidecar_image_cfg = proxy_cfg.sidecar_image()
sidecar_containers = [{
'name': 'setup-iptables-sidecar',
'image': sidecar_image_cfg.image_reference(),
'env': [{
'name': 'PROXY_PORT',
'value': f'{mitm_cfg.config()["listen_port"]}',
},{
'name': 'POD_IP',
'valueFrom': {
'fieldRef': {
'fieldPath':'status.podIP',
},
},
}],
'securityContext': {
'privileged': True,
},
},{
'name': 'mitm-proxy',
'image': mitm_cfg.image_reference(),
'env': [{
'name': 'CONFIG_DIR',
'value': MITM_CONFIG_DIR,
},{
'name': 'SECRETS_SERVER_ENDPOINT',
'value': secrets_server_cfg.endpoint_url(),
},{
'name': 'SECRETS_SERVER_CONCOURSE_CFG_NAME',
'value': secrets_server_cfg.secrets().concourse_cfg_name(),
},{
'name': 'ELASTIC_CONFIG_NAME',
'value': logging_cfg.els_config_name(),
},{
'name': 'ELASTIC_INDEX_NAME',
'value': logging_cfg.els_index_name(),
}],
'ports': [{
'containerPort': mitm_cfg.config()["listen_port"],
'hostPort': mitm_cfg.config()["listen_port"],
'protocol': 'TCP',
}],
'volumeMounts': [{
'name': 'mitm-config',
'mountPath': MITM_CONFIG_DIR,
}],
}]
additional_volumes = [{
'name':'mitm-config',
'configMap': {'name': MITM_CONFIG_CONFIGMAP_NAME},
}]
# add new values to dict without replacing existing ones
vals = instance_specific_values.get('worker', {})
vals.update(
{
'sidecarContainers': sidecar_containers,
'additionalVolumes': additional_volumes,
}
)
instance_specific_values['worker']= vals
return instance_specific_values
@ensure_annotations
def deploy_concourse_landscape(
config_set: ConfigurationSet,
deployment_name: str='concourse',
timeout_seconds: int=180,
):
ensure_helm_setup()
# Fetch all the necessary config
config_factory = global_ctx().cfg_factory()
concourse_cfg = config_set.concourse()
# Kubernetes cluster config
kubernetes_config_name = concourse_cfg.kubernetes_cluster_config()
kubernetes_config = config_factory.kubernetes(kubernetes_config_name)
# Container-registry config
image_pull_secret_name = concourse_cfg.image_pull_secret()
container_registry = config_factory.container_registry(image_pull_secret_name)
cr_credentials = container_registry.credentials()
# TLS config
tls_config_name = concourse_cfg.tls_config()
tls_config = config_factory.tls_config(tls_config_name)
tls_secret_name = concourse_cfg.tls_secret_name()
# Helm config
helm_chart_default_values_name = concourse_cfg.helm_chart_default_values_config()
default_helm_values = config_factory.concourse_helmchart(helm_chart_default_values_name).raw
helm_chart_values_name = concourse_cfg.helm_chart_values()
custom_helm_values = config_factory.concourse_helmchart(helm_chart_values_name).raw
# Proxy config
if concourse_cfg.proxy():
proxy_cfg_name = concourse_cfg.proxy()
proxy_cfg = config_factory.proxy(proxy_cfg_name)
info('Creating config-maps for the mitm proxy ...')
create_proxy_configmaps(
proxy_cfg=proxy_cfg,
namespace=deployment_name,
)
info('Creating default image-pull-secret ...')
create_image_pull_secret(
credentials=cr_credentials,
image_pull_secret_name=image_pull_secret_name,
namespace=deployment_name,
)
info('Creating tls-secret ...')
create_tls_secret(
tls_config=tls_config,
tls_secret_name=tls_secret_name,
namespace=deployment_name,
)
warning(
'Teams will not be set up properly on Concourse if the deployment times out, '
'even if Helm eventually succeeds. In this case, run the deployment command again after '
'Concourse is available.'
)
instance_specific_helm_values = create_instance_specific_helm_values(
concourse_cfg=concourse_cfg, config_factory=config_factory,
)
chart_version = concourse_cfg.helm_chart_version()
# Add proxy sidecars to instance specific values.
# NOTE: Only works for helm chart version 3.8.0 or greater
if concourse_cfg.proxy():
chart_version_semver = semver.parse_version_info(concourse_cfg.helm_chart_version())
min_version = semver.parse_version_info('3.8.0')
if chart_version_semver >= min_version:
instance_specific_helm_values = add_proxy_values(
config_set=config_set,
instance_specific_values=instance_specific_helm_values,
)
else:
fail('Proxy deployment requires the configured helm chart version to be at least 3.8.0')
execute_helm_deployment(
kubernetes_config,
deployment_name,
'stable/concourse',
deployment_name,
default_helm_values,
custom_helm_values,
instance_specific_helm_values,
chart_version=chart_version,
)
info('Waiting until the webserver can be reached ...')
deployment_helper = kube_ctx.deployment_helper()
is_web_deployment_available = deployment_helper.wait_until_deployment_available(
namespace=deployment_name,
name='concourse-web',
timeout_seconds=timeout_seconds,
)
if not is_web_deployment_available:
fail(
dedent(
"""No Concourse webserver reachable after {t} second(s).
Check status of Pods created by "concourse-web"-deployment in namespace {ns}
"""
).format(
t = timeout_seconds,
ns = deployment_name,
)
)
info('Webserver became accessible.')
# Even though the deployment is available, the ingress might need a few seconds to update.
time.sleep(3)
info('Setting teams on Concourse ...')
set_teams(config=concourse_cfg)
def destroy_concourse_landscape(config_name: str, release_name: str):
# Fetch concourse and kubernetes config
config_factory = global_ctx().cfg_factory()
config_set = config_factory.cfg_set(cfg_name=config_name)
concourse_cfg = config_set.concourse()
kubernetes_config_name = concourse_cfg.kubernetes_cluster_config()
kubernetes_config = config_factory.kubernetes(kubernetes_config_name)
context = kube_ctx
context.set_kubecfg(kubernetes_config.kubeconfig())
# Delete helm release
helm_cmd_path = which("helm")
KUBECONFIG_FILE_NAME = 'kubecfg'
helm_env = os.environ.copy()
helm_env['KUBECONFIG'] = KUBECONFIG_FILE_NAME
with tempfile.TemporaryDirectory() as temp_dir:
with open(os.path.join(temp_dir, KUBECONFIG_FILE_NAME), 'w') as f:
yaml.dump(kubernetes_config.kubeconfig(), f)
try:
subprocess.run(
[helm_cmd_path, "delete", release_name, "--purge"],
env=helm_env,
check=True,
cwd=temp_dir
)
except CalledProcessError:
# ignore sporadic connection timeouts from infrastructure
warning("Connection to K8s cluster lost. Continue with deleting namespace {ns}".format(
ns=release_name
))
# delete namespace
namespace_helper = context.namespace_helper()
namespace_helper.delete_namespace(namespace=release_name)
def set_teams(config: ConcourseConfig):
not_none(config)
cfg_factory = global_ctx().cfg_factory()
concourse_uam_cfg_name = config.concourse_uam_config()
concourse_uam_cfg = cfg_factory.concourse_uam(concourse_uam_cfg_name)
# Use main-team, i.e. the team that can change the other teams' credentials
main_team = concourse_uam_cfg.main_team()
concourse_api = client.from_cfg(
concourse_cfg=config,
team_name=main_team.teamname(),
)
for team in concourse_uam_cfg.teams():
# We skip the main team here since we cannot update all its credentials at this time.
if team.teamname() == main_team.teamname():
continue
concourse_api.set_team(team)
| 1.640625 | 2 |
dataStructures/queue.py | evanxg852000/rockstartdev | 1 | 12773999 | from linkedlist import LinkedList
class Queue(object):
def __init__(self):
self._store = LinkedList()
def enqueue(self, data):
self._store.add_back(data)
def dequeue(self):
if(self._store.front() != None):
data = self._store.front().data
self._store.delete(self._store.front())
return data
return None
def peek(self):
if(self._store.front() != None):
return self._store.front().data
return None
def items(self):
for i in self._store.items():
yield i.data
def count(self):
return self._store.count()
| 3.921875 | 4 |
adventofcode/2019/python/day03.py | shanavas786/coding-fu | 1 | 12774000 | <reponame>shanavas786/coding-fu
#!/usr/bin/env python3
def get_vertices(wire):
x = y = steps = 0
vertices = [(x, y, 0)]
for edge in wire:
length = int(edge[1:])
steps += length
if edge[0] == "R":
x += length
if edge[0] == "L":
x -= length
if edge[0] == "U":
y += length
if edge[0] == "D":
y -= length
vertices.append((x, y, steps))
return vertices
def get_intersections(edges1, edges2):
intersections = []
for i in range(len(edges1) - 1):
for j in range(len(edges2) - 1):
intersection = get_intersection(
edges1[i], edges1[i + 1], edges2[j], edges2[j + 1]
)
if intersection:
intersections.append(intersection)
# remove 0,0 if any
try:
intersections.remove((0, 0, 0))
except Exception:
pass
return intersections
def get_distance(vert):
return abs(vert[0]) + abs(vert[1])
def get_intersection(e1from, e1to, e2from, e2to):
e1x1, e1y1, steps_e1 = e1from
e1x2, e1y2, _ = e1to
e2x1, e2y1, steps_e2 = e2from
e2x2, e2y2, _ = e2to
if e1y1 == e1y2 and e2x1 == e2x2:
# e1 is horizontal and e2 is vertical
if (e2y1 <= e1y1 <= e2y2 or e2y1 >= e1y1 >= e2y2) and (
e1x1 <= e2x1 <= e1x2 or e1x1 >= e2x1 >= e1x2
):
steps = steps_e1 + steps_e2 + abs(e2x1 - e1x1) + abs(e1y1 - e2y1)
return (e2x1, e1y1, steps)
elif e2y1 == e2y2 and e1x1 == e1x2:
# e1 is vertical and e2 is horizontal
if (e1y1 <= e2y1 <= e1y2 or e1y1 >= e2y1 >= e1y2) and (
e2x1 <= e1x1 <= e2x2 or e2x1 >= e1x1 >= e2x2
):
steps = steps_e1 + steps_e2 + abs(e1x1 - e2x1) + abs(e2y1 - e1y1)
return (e1x1, e2y1, steps)
def get_closest_intersection(edges1, edges2):
ints = get_intersections(edges1, edges2)
return min(map(lambda vert: get_distance(vert), ints))
def get_min_path_intersection(edges1, edges2):
ints = get_intersections(edges1, edges2)
return min(map(lambda vert: vert[2], ints))
def test1():
wire1 = ["R8", "U5", "L5", "D3"]
wire2 = ["U7", "R6", "D4", "L4"]
edges1 = get_vertices(wire1)
edges2 = get_vertices(wire2)
assert get_closest_intersection(edges1, edges2) == 6
assert get_min_path_intersection(edges1, edges2) == 30
def test2():
wire1 = ["R75", "D30", "R83", "U83", "L12", "D49", "R71", "U7", "L72"]
wire2 = ["U62", "R66", "U55", "R34", "D71", "R55", "D58", "R83"]
edges1 = get_vertices(wire1)
edges2 = get_vertices(wire2)
assert get_closest_intersection(edges1, edges2) == 159
assert get_min_path_intersection(edges1, edges2) == 610
if __name__ == "__main__":
with open("../inputs/day03.txt") as ip:
lines = ip.readlines()
wire1 = lines[0].strip().split(",")
edges1 = get_vertices(wire1)
wire2 = lines[1].strip().split(",")
edges2 = get_vertices(wire2)
print(
"Distanct to closest intersection", get_closest_intersection(edges1, edges2)
)
print("Min Steps to intersection", get_min_path_intersection(edges1, edges2))
| 3.609375 | 4 |
weibospider/daemon_tweet.py | uglyghost/WeiboSpider | 0 | 12774001 | <gh_stars>0
import os
while 1:
run_depth_pct = 'python run_spider.py tweet'
os.system(run_depth_pct) | 1.867188 | 2 |
mandalka/node.py | squirrelinhell/mandalka | 0 | 12774002 | <gh_stars>0
# Copyright (c) 2017 SquirrelInHell
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import sys
import inspect
import hashlib
import threading
def str_hash(s):
h = hashlib.sha256(bytes("mandalka:" + s, "UTF-8"))
return h.digest()[0:8].hex()
class ByInstanceStorage:
def __init__(self):
by_id = {}
def get(obj):
obj_id = id(obj)
try:
return by_id[obj_id]
except KeyError:
return None
def add(obj, value):
obj_id = id(obj)
if value is None:
assert obj_id in by_id
del by_id[obj_id]
else:
assert not obj_id in by_id
by_id[obj_id] = value
self.get = get
self.add = add
global_lock = threading.Lock()
params = ByInstanceStorage()
registered_classes = set()
global_config = {
"lazy": True
}
def config(*, lazy=None):
with global_lock:
if lazy is not None:
global_config["lazy"] = bool(lazy)
def safe_copy(obj):
if obj is None:
return None
if isinstance(obj, (int, bool, float, complex, str, bytes)):
return obj
if isinstance(obj, tuple):
return tuple(safe_copy(v) for v in obj)
if isinstance(obj, list):
return [safe_copy(v) for v in obj]
if isinstance(obj, (set, frozenset)):
return set(safe_copy(v) for v in obj)
if isinstance(obj, dict):
return {safe_copy(k): safe_copy(v) for k, v in obj.items()}
if params.get(obj) is not None:
return obj
if isinstance(obj, type):
raise ValueError("Invalid argument: " + str(obj))
raise ValueError("Invalid argument type: " + str(type(obj)))
def describe(obj, depth=1):
depth = int(depth)
if obj is None:
return "None"
if isinstance(obj, (int, bool, float, complex, str, bytes)):
return repr(obj)
if isinstance(obj, tuple):
if len(obj) == 1:
return "(" + describe(obj[0], depth) + ",)"
else:
return "(" + ", ".join([describe(o, depth) for o in obj]) + ")"
if isinstance(obj, list):
return "[" + ", ".join([describe(o, depth) for o in obj]) + "]"
if isinstance(obj, (set, frozenset)):
return "set(" + ", ".join(sorted(
describe(o, depth) for o in obj
)) + ")"
if isinstance(obj, dict):
return "{" + ", ".join(sorted(
describe(k, depth) + ": " + describe(v, depth)
for k, v in obj.items()
)) + "}"
p = params.get(obj)
if p is not None:
if depth == 0:
return "<" + p["clsname"] + " " + p["nodeid"] + ">"
else:
args = [describe(o, depth-1) for o in p["args"]]
for k in sorted(p["kwargs"]):
args.append(k + "=" + describe(p["kwargs"][k], depth-1))
return p["clsname"] + "(" + ", ".join(args) + ")"
if isinstance(obj, type):
raise ValueError("Invalid argument: " + str(obj))
raise ValueError("Invalid argument type: " + str(type(obj)))
def touch(node):
p = params.get(node)
with p["lock"]:
if ("error" in p) and p["error"]:
raise RuntimeError(
describe(node) + ": failed to run __init__"
)
if "initialized" not in p:
p["initialized"] = True
args = safe_copy(p["args"])
kwargs = safe_copy(p["kwargs"])
p["error"] = False
try:
p["init"](node, *args, **kwargs)
p["error"] = True
finally:
p["error"] = not p["error"]
return node
def evaluate(node):
try:
touch(node)
finally:
p = params.get(node)
with p["lock"]:
del p["args"]
del p["kwargs"]
def lazy(f):
f.is_lazy = True
return f
def wrap(f):
def wrapped_f(self, *args, **kwargs):
touch(self)
return f(self, *args, **kwargs)
return wrapped_f
def argument_parser(method, method_name):
spec = inspect.getfullargspec(method)
if len(spec.args) < 1:
if spec.varargs is None:
raise TypeError("%s must accept an argument" % method_name)
arg_names = []
else:
arg_names = spec.args[1:]
if spec.defaults is None:
start_of_defaults = len(arg_names)
else:
defaults = safe_copy(spec.defaults)
start_of_defaults = len(arg_names) - len(defaults)
kw_defaults = {}
if spec.kwonlydefaults is not None:
kw_defaults = safe_copy(spec.kwonlydefaults)
def parse(*args, **kwargs):
args = list(args)
# Fill all arguments before '*'
for i, name in enumerate(arg_names):
if i < len(args):
pass
elif name in kwargs:
args.append(kwargs[name])
del kwargs[name]
elif i >= start_of_defaults:
args.append(defaults[i - start_of_defaults])
else:
raise TypeError("%s: missing argument '%s'"
% (method_name, name))
# Verify received keyword-only arguments
for name in kwargs:
if name in arg_names:
raise TypeError("%s: duplicate argument '%s'"
% (method_name, name))
if name not in spec.kwonlyargs and spec.varkw is None:
raise TypeError("%s: unknown argument '%s'"
% (method_name, name))
if spec.varargs is None:
# Treat all arguments as named
if len(args) > len(arg_names):
raise TypeError("%s: too many unnamed arguments (+%d)"
% (method_name, len(args) - len(arg_names)))
for name, value in zip(arg_names, args):
kwargs[name] = value
args = []
# Fill default values for keyword-only arguments
for name in spec.kwonlyargs:
if name in kwargs:
pass
elif name in kw_defaults:
kwargs[name] = kw_defaults[name]
else:
raise TypeError("%s: missing argument '%s'"
% (method_name, name))
return safe_copy(args), safe_copy(kwargs)
return parse
def node(cls=None, *, gc=False):
if cls is None:
return lambda cls: node(cls, gc=gc)
# Warn if class names are not unique
with global_lock:
cls_name = str(cls.__name__)
if cls_name in registered_classes:
sys.stderr.write("Warning: class name '"
+ cls_name + "' is already in use\n")
i = 2
while cls_name + "_" + str(i) in registered_classes:
i += 1
cls_name = cls_name + "_" + str(i)
registered_classes.add(cls_name)
# Use weak references if user requests garbage collection
if gc:
import weakref
node_obj_by_nodeid = weakref.WeakValueDictionary()
else:
node_obj_by_nodeid = {}
class Node(cls):
pass
# Build a common argument parser for all instances of this class
init = cls.__init__
arg_parse = argument_parser(init, cls_name + ".__init__()")
def node_new(node_cls, *args, **kwargs):
if node_cls != Node:
raise ValueError("Do not inherit from mandalka nodes")
# Standarize argument names etc.
args, kwargs = arg_parse(*args, **kwargs)
# Build a full description of this constructor call
nodeid = repr(cls_name)
for a in args:
nodeid += "|" + describe(a, 0)
for k in sorted(kwargs):
nodeid += "|" + k + "=" + describe(kwargs[k], 0)
nodeid = str_hash(nodeid)
with global_lock:
# Make sure the object is unique
try:
return node_obj_by_nodeid[nodeid]
except KeyError:
pass
# It's really the first time
node = cls.__new__(node_cls)
node_obj_by_nodeid[nodeid] = node
# Store arguments to run cls.__init__() later
p = {}
p["init"] = init
p["clsname"] = cls_name
p["args"] = args
p["kwargs"] = kwargs
p["nodeid"] = nodeid
p["lock"] = threading.RLock()
params.add(node, p)
return node
def node_to_str(self):
return "<" + cls_name + " " + params.get(self)["nodeid"] + ">"
def node_getattr(self, name):
if name == "__class__":
return Node
# Don't run __init__ for methods tagged with mandalka.lazy
try:
getattr(Node, name).is_lazy
except AttributeError:
touch(self)
return object.__getattribute__(self, name)
def node_init(self, *args, **kwargs):
if not global_config["lazy"]:
touch(self)
def node_del(self):
params.add(self, None)
Node.__getattribute__ = node_getattr
Node.__init__ = node_init
Node.__name__ = cls_name
Node.__new__ = node_new
Node.__del__ = node_del
Node.__qualname__ = cls_name
Node.__repr__ = node_to_str
Node.__setattr__ = wrap(object.__setattr__)
Node.__str__ = node_to_str
for tpe in cls.__mro__:
if tpe == object:
continue
for name, value in tpe.__dict__.items():
if not name.startswith("__"):
continue
if name in ("__dict__", "__weakref__"):
continue
if name in Node.__dict__:
continue
setattr(Node, name, wrap(value))
return Node
def is_node(node):
return params.get(node) is not None
def unique_id(node):
return params.get(node)["nodeid"]
def arguments(node):
p = params.get(node)
if not "args" in p:
raise ValueError("Cannot access arguments after evaluate()")
all_args = safe_copy(p["kwargs"])
for i, value in enumerate(safe_copy(p["args"])):
all_args[i] = value
return all_args
def inputs(node):
result = set()
def visit(obj):
if isinstance(obj, (tuple, list, set, frozenset)):
[visit(o) for o in obj]
if isinstance(obj, dict):
[visit(o) for o in obj.keys()]
[visit(o) for o in obj.values()]
if params.get(obj) is not None:
result.add(obj)
p = params.get(node)
if not "args" in p:
raise ValueError("Cannot access inputs after evaluate()")
[visit(o) for o in p["args"]]
[visit(o) for o in p["kwargs"].values()]
return result
| 1.921875 | 2 |
test/raven/test_raven_utils.py | wudidaizi/RAVEN | 0 | 12774003 | <reponame>wudidaizi/RAVEN<gh_stars>0
# %%
import torch
from RAVEN.pe.raven.utils import poly
import matplotlib.pyplot as plt
# %%
"""
# test poly
"""
# %%
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
a = torch.arange(0, 1, 0.001).to(device)
precise = torch.exp(a).to(device)
var = a
coeff = torch.Tensor([1/1, 1/1, 1/2, 1/6, 1/24, 1/120, 1/720, 1/5040, 1/40320, 1/362880]).to(device)
intwidth = torch.ones(8).to(device) * 3
fracwidth = torch.ones(8).to(device) * 5
coeff = coeff[0:8]
approximate = poly(coeff,
intwidth,
fracwidth,
var,
rounding="round")
error = (approximate - precise) / precise
print("relative error:")
print("min error rate:", error.min())
print("max error rate:", error.max())
print("avg error rate:", error.mean())
print("rms error rate:", error.mul(error).mean().sqrt())
error = (approximate - precise)
print("absolute error:")
print("min error rate:", error.min())
print("max error rate:", error.max())
print("avg error rate:", error.mean())
print("rms error rate:", error.mul(error).mean().sqrt())
| 2.53125 | 3 |
Curso_Em_Video_Python/ex051.py | ThallesTorres/Curso_Em_Video_Python | 0 | 12774004 | <gh_stars>0
# Ex: 051 - Desenvolva um programa que leia o primeiro termo e a razão de uma
# PA. No final. mostre os 10 primeiros termos dessa progressão.
print('''
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
--Seja bem-vindo!
--Exercício 051
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
''')
print('--Progressão Aritmética\n'
'--Preencha os Dados')
termo1 = int(input('Número Inicial: '))
razao = int(input('Razão: '))
print(f'\n{termo1}', end=', ')
for c in range(1, 10):
termo1 = termo1 + razao
print(termo1, end=', ')
print('''
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
--Obrigado pelo uso!
--Desenvolvido por <NAME>
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-''')
| 3.78125 | 4 |
post-sermon/extract-meta.py | tstephen/cbc-gsuite | 0 | 12774005 | #!/usr/bin/python3
#
# Extract audio metadata from m4a file
#
# Author: <NAME>
# Date: 04 Jan 2021
#
import glob
from mutagen.mp4 import MP4
import numpy as np
filez = glob.glob("2020_12_27_AM.m4a")
mp4file = MP4(filez[0])
for tag in mp4file.tags:
print('{}: {}'.format(tag, mp4file.tags[tag]))
| 2.78125 | 3 |
src/test/test_urilib2.py | supheart/python-spider | 0 | 12774006 | # -*- coding: utf-8 -*
import urllib2
import urllib
import cookielib
import json
url = "http://www.baidu.com"
url_json = "http://zhiboba.3b2o.com/article/showListJson/EKo5qjn6Mq4"
# print urllib2.urlopen("http://baike.baidu.com/view/20965.htm").read()
readText = urllib2.urlopen(url_json).read()
content = json.loads(readText);
print content
urllib.urlretrieve("http://iil.3b2o.com/img/show/sid/pB_mREre1QT/w/576/h/1000/t/0/show.jpg", 'test.jpg')
print "第一种方法"
response1 = urllib2.urlopen(url)
print response1.getcode()
print len(response1.read())
print "第二种方法"
request = urllib2.Request(url)
request.add_header("user-agent", "Mozilla/5.0")
response2 = urllib2.urlopen(request)
print response2.getcode()
print len(response2.read())
print "第三种方法"
cj = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
urllib2.install_opener(opener)
response3 = urllib2.urlopen(url)
print response3.getcode()
print cj
print response3.read()
print ('end')
| 2.890625 | 3 |
util.py | ZiyaoWei/pyMatrixProfile | 29 | 12774007 | import numpy as np
import numpy.fft as fft
def zNormalize(ts):
"""Return a z-normalized version of the time series ts.
>>> zNormalize(np.array([1.0, 1.0, 1.0]))
array([ 0., 0., 0.])
>>> np.round(zNormalize(np.array([1.0, 2.0, 0.0])), 3)
array([ 0. , 1.225, -1.225])
>>> np.round(zNormalize(np.array([0.2, 2.2, -1.8])), 3)
array([-0. , 1.225, -1.225])
"""
ts -= np.mean(ts)
stdev = np.std(ts)
if stdev <> 0:
ts /= stdev
return ts
def zNormalizedEuclideanDistance(tsA, tsB):
"""Return the z-normalized Euclidean Distance between tsA and tsB.
>>> zNormalizedEuclideanDistance(np.array([1.0, 1.0, 1.0]), np.array([2.0, 2.0, 2.0]))
0.0
>>> zNormalizedEuclideanDistance(np.array([1.0, 1.0, 1.0]), np.array([2.0, 2.0]))
Traceback (most recent call last):
...
ValueError: tsA and tsB must be of the same length
>>> np.round(zNormalizedEuclideanDistance(np.array([0.0, 2.0, -2.0, 0.0]), np.array([1.0, 5.0, 3.0, 3.0])), 3)
2.0
"""
if len(tsA) <> len(tsB):
raise ValueError("tsA and tsB must be of the same length")
return np.linalg.norm(zNormalize(tsA.astype("float64")) - zNormalize(tsB.astype("float64")))
def movstd(ts, m):
"""
>>> np.round(movstd(np.array([1, 2, 3, 10]), 3), 3)
array([ 0.816, 3.559])
"""
if m < 1:
raise ValueError("Query length m must be >= 1")
ts = ts.astype("float")
s = np.insert(np.cumsum(ts), 0, 0)
sSq = np.insert(np.cumsum(ts ** 2), 0, 0)
segSum = s[m:] - s[:-m]
segSumSq = sSq[m:] - sSq[:-m]
return np.sqrt(segSumSq / m - (segSum / m) ** 2)
def mass(query, ts):
"""
>>> np.round(mass(np.array([0.0, 1.0, -1.0, 0.0]), np.array([-1, 1, 0, 0, -1, 1])), 3)
array([ 2. , 2.828, 2. ])
"""
query = zNormalize(query)
m = len(query)
n = len(ts)
stdv = movstd(ts, m)
query = query[::-1]
query = np.pad(query, (0, n - m), 'constant')
dots = fft.irfft(fft.rfft(ts) * fft.rfft(query))
return np.sqrt(2 * (m - (dots[m - 1 :] / stdv)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3.0625 | 3 |
alocadorDeMemoria/algoritmo.py | lucascust/alocador-de-memoria | 1 | 12774008 | <reponame>lucascust/alocador-de-memoria
# ncoding=utf-8
# DEFINIÇÃO DO PADRÃO DE ESCRITA
# COMENTÁRIOS
# 1º Modo de escrita dos comentários livre, ´~^-_+=, etc, permitidos
#ex: meu nome é, variável de saída
# FUNÇÕES
# 2º Nomes das funções SEM ´~^-_+=, etc. E pode usar "De"
# 3º Comentários no início das funções explicando o que fazem
# 4º Comentários no final das funções explicando o que retornam
#ex: meuNomeE, variavelDeSaida
# VARIÁVEIS
# 5º Variáveis iniciadas em mínuscula, SEM ´~^-_+= etc, SEM "De", se tiver mais de uma palavra, esta, deve ser iniciada em maíuscula
#e x: meuNomeE, variavelSaida
import timeit
import funcoes as f
def main(tamMemoria):
listaSaida = []
modos=["first","best","worst"]
for a in modos:
modo = a
# INICIALIZAÇÃO: Criação das estruturas necessárias
processos = f.interpreta("entrada.txt")
f.listaDeTamanhos(processos)
dicionarioDeEntrada = f.dicionarioDeEntrada(processos)
dicionarioDeSaida = f.dicionarioDeSaida(processos)
dicionarioDeProcessos = {}
clock = 0
"""print("\nDicionário de Entradas:\n")
print(dicionarioDeEntrada, end='\n\n')
print("Dicionário de Saídas:\n")
print(dicionarioDeSaida,end='\n\n')"""
tentativasFalhadas = 0
tempoEspera = [0] * len(processos) #Inicializa vetor dos tempos de espera
tempoAlocacao = []
entradaParteGrafica =[]
del processos
##################################################
# Início algoritmo: Cada loop é um ciclo
while (dicionarioDeSaida):
dicanterior = str(dicionarioDeProcessos) #Utilizado no print de quando muda alguma coisa no dicionárioDeProcessos
# Inserção de processo
if (clock in dicionarioDeEntrada):
while (dicionarioDeEntrada[clock]):
processo = dicionarioDeEntrada[clock].pop() #Remove e salva ultimo elemento da lista
tinicial = timeit.default_timer() #Usado para avaliar o tempo de inserção dos processos
dicionarioDeProcessos, inseriu = f.alocarMemoria(processo,tamMemoria,dicionarioDeProcessos,modo)
tfinal = timeit.default_timer()
tempoAlocacao = f.tempoMedioDeAlocacaoDeProcessos(tempoAlocacao, inseriu, tfinal, tinicial)
tentativasFalhadas = f.tentativasFalhas(inseriu, tentativasFalhadas) #Atualiza o número de falhas do algoritmo
tempoEspera = f.tempoMedioDeEsperaDeProcessos(inseriu, tempoEspera,processo)
if (inseriu == False):
if (dicionarioDeEntrada.get(clock+1, False)):
dicionarioDeEntrada[clock+1].append(processo)
else:
dicionarioDeEntrada[clock+1] = [processo]
for i in dicionarioDeSaida:
if processo in dicionarioDeSaida[i]:
tempoDeSaida = i
if (dicionarioDeSaida.get(tempoDeSaida+1, False)):
dicionarioDeSaida[tempoDeSaida+1].append(processo)
else:
dicionarioDeSaida[tempoDeSaida+1] = [processo]
dicionarioDeSaida[tempoDeSaida].remove(processo)
#Remoção de processo
if (clock in dicionarioDeSaida):
while (dicionarioDeSaida[clock]):
processo = dicionarioDeSaida[clock].pop() # Remove e salva ultimo elemento da lista
dicionarioDeProcessos = f.desalocaProcesso(processo, dicionarioDeProcessos)
del dicionarioDeSaida[clock]
if (dicanterior != str(dicionarioDeProcessos)): #Só printa quando há uma alteração no dicionario
#print(clock , dicionarioDeProcessos,end="\n\n")
entradaParteGrafica = f.geraEntradaDaParteGrafica(entradaParteGrafica,clock,dicionarioDeProcessos) #Gera os dados necessários para parte gráfica
clock += 1
# Chamadas de funções para exibir os resultados finais
clock_final = entradaParteGrafica[-1]
del entradaParteGrafica[-1]
nivelFragmentacaoMemoria = round(f.media(f.calculaFragmentacaoMemoria(entradaParteGrafica,clock_final,tamMemoria)),3) #Quantos buracos existem na memória por ciclo de CLOCK
mediaTempoEspera = round(f.media(tempoEspera),3)
tempoAlocacao = round(f.media(tempoAlocacao) * 1000000, 3) #Tempo em μs
# print("Nº de tentativas falhas: " + str(tentativasFalhadas) + " inserções falhas")
# print("Tempo médio de espera dos processos: " + str(mediaTempoEspera) + " períodos de clock")
# print("Tempo médio para alocação de processos: " + str(tempoAlocacao) + " segundos")
# print("média de buracos por ciclo de CLOCK: " +str(nivelFragmentacaoMemoria))
entradaParteGrafica.append(clock_final) # adicionado para mostrar quando a memória fica vazia
listaSaida.append([entradaParteGrafica, mediaTempoEspera, tentativasFalhadas, nivelFragmentacaoMemoria, tempoAlocacao])
return listaSaida
| 3.578125 | 4 |
imghide.py | heyDevlopr/imghide | 0 | 12774009 | #!/usr/bin/python3
import tkinter as tk
from tkinter import messagebox
from PIL import ImageTk
from PIL import Image
from os import path
from Crypto.Cipher import AES
from Crypto.Hash import SHA256
from Crypto import Random
import base64
from sys import exit
global mainBgColr, secBgColr, theme
mainBgColr = "#121212"
secBgColr = "#1a1a1a"
global path_entry, msg_entry, password_entry, encode_button, label1, label2, label3, label4
def confirmClose():
if messagebox.askyesno(title='confirmation', message='Are you sure that you want to quit?'):
window.destroy()
window = tk.Tk()
window.title('IMGHide v1.0')
window.geometry('630x370')
window.configure(bg='#121212')
window.minsize(550, 370)
window.maxsize(630, 370)
im1 = Image.open("assets/header.png")
header = ImageTk.PhotoImage(im1)
im2 = Image.open("assets/enc_button.png")
enc_button = ImageTk.PhotoImage(im2)
im3 = Image.open("assets/dec_button.png")
dec_button = ImageTk.PhotoImage(im3)
def encrypt(key, source, encode=True):
key = SHA256.new(key).digest() # use SHA-256 over our key to get a proper-sized AES key
IV = Random.new().read(AES.block_size) # generate IV
encryptor = AES.new(key, AES.MODE_CBC, IV)
padding = AES.block_size - len(source) % AES.block_size # calculate needed padding
source += bytes([padding]) * padding # Python 2.x: source += chr(padding) * padding
data = IV + encryptor.encrypt(source) # store the IV at the beginning and encrypt
return base64.b64encode(data).decode() if encode else data
def decrypt(key, source, decode=True):
if decode:
source = base64.b64decode(source.encode())
key = SHA256.new(key).digest() # use SHA-256 over our key to get a proper-sized AES key
IV = source[:AES.block_size] # extract the IV from the beginning
decryptor = AES.new(key, AES.MODE_CBC, IV)
data = decryptor.decrypt(source[AES.block_size:]) # decrypt
padding = data[-1] # pick the padding value from the end; Python 2.x: ord(data[-1])
if data[-padding:] != bytes([padding]) * padding: # Python 2.x: chr(padding) * padding
messagebox.showerror("Error", "Invalid Padding Detected When Decrypting The Message !!!")
return data[:-padding] # remove the padding
def convertToRGB(img):
try:
rgba_image = img
rgba_image.load()
background = Image.new("RGB", rgba_image.size, (255, 255, 255))
background.paste(rgba_image, mask = rgba_image.split()[3])
info_label.config(text='$ Converted image to RGB')
window.update()
return background
except Exception as e:
info_label.config(text="$ Couldn't convert image to RGB")
window.update()
messagebox.showerror("Error", f"Couldn't convert image to RGB\n{e}")
exit(1)
def getPixelCount(img):
width, height = Image.open(img).size
return width*height
def encodeImage(image,message,filename):
info_label.config(text="$ Encoding The Image")
window.update()
try:
width, height = image.size
pix = image.getdata()
current_pixel = 0
tmp=0
# three_pixels = []
x=0
y=0
info_label.config(text="$ Encoding The Image.")
window.update()
for ch in message:
info_label.config(text="$ Encoding The Image..")
window.update()
binary_value = format(ord(ch), '08b')
# For each character, get 3 pixels at a time
p1 = pix[current_pixel]
p2 = pix[current_pixel+1]
p3 = pix[current_pixel+2]
three_pixels = [val for val in p1+p2+p3]
for i in range(0,8):
current_bit = binary_value[i]
if current_bit == '0':
if three_pixels[i]%2!=0:
three_pixels[i]= three_pixels[i]-1 if three_pixels[i]==255 else three_pixels[i]+1
elif current_bit == '1':
if three_pixels[i]%2==0:
three_pixels[i]= three_pixels[i]-1 if three_pixels[i]==255 else three_pixels[i]+1
current_pixel+=3
tmp+=1
#Set 9th value
if(tmp==len(message)):
# Make as 1 (odd) - stop reading
if three_pixels[-1]%2==0:
three_pixels[-1]= three_pixels[-1]-1 if three_pixels[-1]==255 else three_pixels[-1]+1
else:
# Make as 0 (even) - continue reading
if three_pixels[-1]%2!=0:
three_pixels[-1]= three_pixels[-1]-1 if three_pixels[-1]==255 else three_pixels[-1]+1
three_pixels = tuple(three_pixels)
st=0
end=3
for i in range(0,3):
image.putpixel((x,y), three_pixels[st:end])
st+=3
end+=3
if (x == width - 1):
x = 0
y += 1
else:
x += 1
info_label.config(text="$ Encoding The Image...")
window.update()
encoded_filename = filename.split('.')[0] + "-encrypted.png"
image.save(encoded_filename)
info_label.config(text="$ Image Saved Successfully")
window.update()
messagebox.showinfo("Success", f"Image encoded and saved as {encoded_filename}\nOriginal filename {filename}")
except Exception as e:
messagebox.showerror("Error", f"An error occured\n{e}")
exit(1)
def decodeImage(image):
info_label.config(text="$ Decoding The Image")
window.update()
try:
pix = image.getdata()
current_pixel = 0
decoded=""
info_label.config(text="$ Decoding The Image.")
window.update()
while True:
info_label.config(text="$ Decoding The Image..")
window.update()
# Get 3 pixels each time
binary_value=""
p1 = pix[current_pixel]
p2 = pix[current_pixel+1]
p3 = pix[current_pixel+2]
three_pixels = [val for val in p1+p2+p3]
for i in range(0,8):
if three_pixels[i]%2==0:
binary_value+="0"
elif three_pixels[i]%2!=0:
binary_value+="1"
info_label.config(text="$ Decoding The Image...")
window.update()
#Convert binary value to ascii and add to string
binary_value.strip()
ascii_value = int(binary_value,2)
decoded+=chr(ascii_value)
current_pixel+=3
info_label.config(text="$ Decoding The Image.")
window.update()
if three_pixels[-1]%2!=0:
# stop reading
break
info_label.config(text="$ Image Decoded")
window.update()
return decoded
except Exception as e:
messagebox.showerror("Error", f"An error occured\n{e}")
exit(1)
def insertHeaders(img):
pass
def init_encode():
c2.config(state='disabled')
encode_button.config(state='disabled')
img = path_var.get()
if(not(path.exists(img))):
messagebox.showerror("Error", "Image not found!\nGiven Image Name/Path is Invalid")
encode_button.config(state='normal')
return 1
message = str(msg_var.get())
if(len(message)*3 > getPixelCount(img)):
messagebox.showerror("Error", "Given message is too long to be encoded in the image.\nPlease try another image with more pixels")
encode_button.config(state='normal')
return 1
password = password_var.get()
cipher=""
if password!="":
cipher = encrypt(key=password.encode(),source=message.encode())
else:
cipher = message
image = Image.open(img)
info_label.config(text=f"Image Mode: {image.mode}")
window.update()
if image.mode!='RGB':
image = convertToRGB(image)
newimg = image.copy()
encodeImage(image=newimg,message=cipher,filename=image.filename)
encode_button.config(state='normal')
c2.config(state='normal')
checkbox_var1.set(0)
checkbox_var2.set(0)
disable_checkbox()
def copytext(msg):
r = tk.Tk()
r.withdraw()
r.clipboard_clear()
r.clipboard_append(msg)
r.update()
def init_decode():
c1.config(state='disabled')
encode_button.config(state='disabled')
img = path_var.get()
if(not(path.exists(img))):
messagebox.showerror("Error", "Image not found!\nFilename/Path Provided is Invalid!")
encode_button.config(state='normal')
return 1
password = str(password_var.get())
image = Image.open(img)
cipher = decodeImage(image)
decrypted=""
if password!="":
decrypted = decrypt(key=password.encode(), source=cipher)
else:
decrypted=cipher
response = messagebox.askyesno("Decoded Message", f'"{decrypted.decode("UTF-8")}"\n\nDo You Want To Copy The Message ?')
if response == True:
copytext(decrypted)
c1.config(state='normal')
encode_button.config(state='normal')
checkbox_var1.set(0)
checkbox_var2.set(0)
disable_checkbox()
def disable_checkbox():
global path_entry, msg_entry, password_entry, encode_button, label1, label2, label3, label4
if (checkbox_var1.get() == 1) & (checkbox_var2.get() == 0):
info_label.config(text='$ Encode Selected')
c2.config(state='disabled')
label1 = tk.Label(window,bg=mainBgColr,fg='white', text='File Name/Path', font=('calibre',9,'normal'))
label1.place(relx = 0.3, rely = 0.43, anchor = 'center')
path_entry = tk.Entry(window,textvariable = path_var, font=('calibre',10,'normal'))
path_entry.place(relx = 0.4, rely = 0.43, anchor = 'w')
label2 = tk.Label(window,bg=mainBgColr,fg='white', text=' Message', font=('calibre',9,'normal'))
label2.place(relx = 0.33, rely = 0.53, anchor = 'center')
msg_entry = tk.Entry(window,textvariable = msg_var, font=('calibre',10,'normal'))
msg_entry.place(relx = 0.4, rely = 0.53, anchor = 'w')
label3 = tk.Label(window,bg=mainBgColr,fg='white', text='Password', font=('calibre',9,'normal'))
label3.place(relx = 0.33, rely = 0.63, anchor = 'center')
password_entry = tk.Entry(window,textvariable = password_var, show='*', font=('calibre',10,'normal'))
password_entry.place(relx = 0.4, rely = 0.63, anchor = 'w')
label4 = tk.Label(window,bg=mainBgColr,fg='white',text='(Leave Empty For No Password)', font=('calibre',9,'normal'))
label4.place(relx = 0.5, rely = 0.7, anchor = 'center')
encode_button = tk.Button(window,image=enc_button,borderwidth = 2,relief="flat",command=init_encode)
encode_button.place(relx = 0.4, rely = 0.825, anchor = 'w')
elif (checkbox_var1.get() == 0) & (checkbox_var2.get() == 1):
info_label.config(text='$ Decode Selected')
c1.config(state='disabled')
label1 = tk.Label(window,bg=mainBgColr,fg='white', text='File Name/Path', font=('calibre',9,'normal'))
label1.place(relx = 0.3, rely = 0.45, anchor = 'center')
path_entry = tk.Entry(window,textvariable = path_var, font=('calibre',10,'normal'))
path_entry.place(relx = 0.4, rely = 0.45, anchor = 'w')
label2 = tk.Label(window,bg=mainBgColr,fg='white', text='Password', font=('calibre',9,'normal'))
label2.place(relx = 0.33, rely = 0.55, anchor = 'center')
label3 = tk.Label(window, text='', font=('calibre',9,'normal'))
password_entry = tk.Entry(window,textvariable=password_var, show='*', font=('calibre',10,'normal'))
password_entry.place(relx = 0.4, rely = 0.55, anchor = 'w')
label4 = tk.Label(window,bg=mainBgColr,fg='white', text='(Leave Empty For No Password)', font=('calibre',9,'normal'))
label4.place(relx = 0.5, rely = 0.65, anchor = 'center')
msg_entry = tk.Entry(window,textvariable = msg_var, font=('calibre',10,'normal'))
encode_button = tk.Button(window,image=dec_button,command=init_decode,borderwidth = 2,relief="flat")
encode_button.place(relx = 0.4, rely = 0.8, anchor = 'w')
elif (checkbox_var1.get() == 0) & (checkbox_var2.get() == 0):
info_label.config(text='$ Nothing Selected')
c1.config(state='normal')
c2.config(state='normal')
path_entry.delete('0',tk.END)
path_entry.destroy()
msg_entry.delete('0',tk.END)
msg_entry.destroy()
password_entry.delete('0',tk.END)
password_entry.destroy()
encode_button.destroy()
label1.destroy()
label2.destroy()
label3.destroy()
label4.destroy()
else:
info_label.config(text='$ Selected Nothing')
checkbox_var1 = tk.IntVar()
checkbox_var2 = tk.IntVar()
path_var = tk.StringVar()
password_var = tk.StringVar()
msg_var = tk.StringVar()
title = tk.Label(window, image=header, bg=mainBgColr)
title.place(relx = 0.5, rely = 0.12, anchor = 'center')
c1 = tk.Checkbutton(window, text='Encode', bg='#029dd3', variable=checkbox_var1, onvalue=1, offvalue=0, command=disable_checkbox, activebackground='#00bdff')
c1.place(relx = 0.5, rely = 0.28, anchor = 'center')
c2 = tk.Checkbutton(window,text='Decode', bg='#14c700', variable=checkbox_var2, onvalue=1, offvalue=0, command=disable_checkbox, activebackground='#1aff00')
c2.place(relx = 0.5, rely = 0.35, anchor = 'center')
info_label = tk.Label(window, bg=secBgColr, fg='#1aff00',width=30, text='$ Everything Initialised', font=('calibre',9,'normal'))
info_label.place(relx = 0, rely = 1, anchor ='sw')
author_label = tk.Label(window, bg=mainBgColr, fg='#00c6ff', width=30, text='GUI by heyDevlopr ( GitHub )\nIMGHide by TechRaj ( YouTube )', font=('TkHeadingFont',9,'normal'))
author_label.place(relx = 1, rely = 1, anchor ='se')
window.protocol("WM_DELETE_WINDOW", confirmClose)
window.mainloop()
| 3 | 3 |
spar_python/query_generation/query_bounds.py | nathanawmk/SPARTA | 37 | 12774010 | <filename>spar_python/query_generation/query_bounds.py
# *****************************************************************
# Copyright 2013 MIT Lincoln Laboratory
# Project: SPAR
# Authors: ATLH
# Description: Functions to delinate the bounds for different
# query types
#
# Modifications:
# Date Name Modification
# ---- ---- ------------
# 07 May 2014 ATLH Original version
# *****************************************************************
import os
import sys
this_dir = os.path.dirname(os.path.abspath(__file__))
base_dir = os.path.join(this_dir, '..', '..')
sys.path.append(base_dir)
'''
Represents the maximal and minimum lower bounds seen
in the csv input file for each sub_query type, also
the specific bounds for the compound queries
'''
_result_set_size_range_lower = {}
_result_set_size_range_upper = {}
_tm_result_set_size_range_lower = {}
_tm_result_set_size_range_upper = {}
def set_result_set_size_range_lower(query_type, lower):
try:
if lower < _result_set_size_range_lower[query_type]:
_result_set_size_range_lower[query_type] = lower
except KeyError:
_result_set_size_range_lower[query_type] = lower
def get_rss_lower(query_type):
return _result_set_size_range_lower[query_type]
def set_result_set_size_range_upper(query_type, upper):
try:
if upper > _result_set_size_range_upper[query_type]:
_result_set_size_range_upper[query_type] = upper
except KeyError:
_result_set_size_range_upper[query_type] = upper
def get_rss_upper(query_type):
return _result_set_size_range_upper[query_type]
def set_tm_result_set_size_range_lower(query_type, lower):
try:
if lower < _tm_result_set_size_range_lower[query_type]:
_tm_result_set_size_range_lower[query_type] = lower
except KeyError:
_tm_result_set_size_range_lower[query_type] = lower
def get_tm_rss_lower(query_type):
return _tm_result_set_size_range_lower[query_type]
def set_tm_result_set_size_range_upper(query_type, upper):
try:
if upper > _tm_result_set_size_range_upper[query_type]:
_tm_result_set_size_range_upper[query_type] = upper
except KeyError:
_tm_result_set_size_range_upper[query_type] = upper
def get_tm_rss_upper(query_type):
return _tm_result_set_size_range_upper[query_type]
| 2.359375 | 2 |
tests/unit/exponential_distribution_test.py | konradarchicinski/stpp | 1 | 12774011 | <filename>tests/unit/exponential_distribution_test.py
import unittest
import fistpp as fs
class ExponetialDistributionTests(unittest.TestCase):
def setUp(self):
self.expdist = fs.ExponentialDistribution()
self.expected_values = [
0.001000500333583534, 0.005012541823544286, 0.010050335853501450,
0.025317807984289897, 0.051293294387550580, 0.105360515657826280,
0.287682072451780900, 0.693147180559945300, 1.386294361119890600,
2.302585092994046000, 2.995732273553990000, 3.688879454113935400,
4.605170185988091000, 5.298317366548035000, 6.907755278982136000
]
self.expected_mass = [
0.999, 0.995, 0.990, 0.975, 0.950, 0.900, 0.750, 0.500, 0.250,
0.100, 0.050, 0.025, 0.010, 0.005, 0.001
]
self.expected_probability = [
0.001, 0.005, 0.010, 0.025, 0.050, 0.100, 0.250, 0.500, 0.750,
0.900, 0.950, 0.975, 0.990, 0.995, 0.999
]
def test_pdf(self):
for i, x in enumerate(self.expected_values):
self.assertAlmostEqual(self.expdist.pdf(x),
self.expected_mass[i],
msg=f"testing pdf for {x}")
def test_cdf(self):
for i, x in enumerate(self.expected_values):
self.assertAlmostEqual(self.expdist.cdf(x),
self.expected_probability[i],
msg=f"testing cdf for {x}")
def test_ppf(self):
for i, p in enumerate(self.expected_probability):
self.assertAlmostEqual(self.expdist.ppf(p),
self.expected_values[i],
msg=f"testing ppf for {p}")
def test_mean(self):
self.assertEqual(self.expdist.mean(), 1.0)
def test_variance(self):
self.assertEqual(self.expdist.variance(), 1.0)
if __name__ == "__main__":
unittest.main() | 2.78125 | 3 |
config/common/all_params.py | leozz37/makani | 1,178 | 12774012 | # Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""All parameters."""
from makani.config import mconfig
@mconfig.Config(deps={
'control': 'common.control.control_params',
'monitor': 'common.monitor.monitor_params',
'sim': 'common.sim.sim_params',
'system': mconfig.WING_MODEL + '.system_params'
})
def MakeParams(params):
return {
'control': params['control'],
'monitor': params['monitor'],
'sim': params['sim'],
'system': params['system']
}
| 1.570313 | 2 |
cogbot/cogs/robo_mod/robo_mod_options.py | Arcensoth/cogbot | 8 | 12774013 | <filename>cogbot/cogs/robo_mod/robo_mod_options.py
from typing import Dict, List, Optional, Set
from discord import Color
from cogbot.cogs.robo_mod.robo_mod_rule import RoboModRule
from cogbot.cogs.robo_mod.robo_mod_trigger_type import RoboModTriggerType
from cogbot.types import ChannelId, RoleId
class RoboModOptions:
def __init__(self,):
self.rules: List[RoboModRule]
self.rules_by_name: Dict[str, RoboModRule]
self.rules_by_trigger_type: Dict[RoboModTriggerType, List[RoboModRule]]
self.log_channel_id: Optional[ChannelId]
self.compact_logs: Optional[bool]
self.log_emoji: Optional[str]
self.log_icon: Optional[str]
self.log_color: Optional[Color]
self.notify_role_ids: Optional[Set[RoleId]]
@property
def rule_names(self) -> List[str]:
return list(self.rules_by_name.keys())
async def init(self, state: "RoboModServerState", data: dict) -> "RoboModOptions":
self.rules = [await RoboModRule().init(state, entry) for entry in data["rules"]]
self.rules_by_name = {}
self.rules_by_trigger_type = {}
for rule in self.rules:
self.rules_by_name[rule.name] = rule
trigger_type = rule.trigger_type
if trigger_type not in self.rules_by_trigger_type:
self.rules_by_trigger_type[trigger_type] = []
self.rules_by_trigger_type[trigger_type].append(rule)
state.log.info(f"Registered {len(self.rules)} rules")
self.log_channel_id = data.get("log_channel", None)
self.compact_logs = data.get("compact_logs", None)
self.log_emoji = data.get("log_emoji", None)
self.log_icon = data.get("log_icon", None)
raw_log_color = data.get("log_color", None)
self.log_color = (
None if raw_log_color is None else state.bot.color_from_hex(raw_log_color)
)
raw_notify_roles = data.get("notify_roles", None)
self.notify_role_ids = (
None if raw_notify_roles is None else set(raw_notify_roles)
)
return self
| 2.1875 | 2 |
RRT/old versions/29_june_2016_single_RRT/parameters.py | hasauino/Python | 0 | 12774014 | <reponame>hasauino/Python<filename>RRT/old versions/29_june_2016_single_RRT/parameters.py
dim=10.0
eta=0.5
steps=100 #must be integer (no decimal point)
rneighb=eta
| 1.203125 | 1 |
util/measure/modularity.py | yuhsiangfu/Multiple-Spreaders | 0 | 12774015 | """
Measure: modularity (set)
@auth: <NAME>
@date 2015/10/09
@update 2016/02/13
"""
# 模塊性: Newman's modularity
def modularity(G, community_list):
"""
The estimated time complexity of this version (2016/02/13) is approximating
O(V) + O(E)
"""
import copy as c
NODE_DEGREE = 'node_degree'
# ls, ds variables
intra_degree = {i: 0 for i in range(0, len(community_list))} # ds
intra_edges = {i: 0 for i in range(0, len(community_list))} # ls
# calculate ds, time complexity: O(V)
community_index = 0
community_id = {}
for com in community_list:
tmp_index = c.copy(community_index)
for i in com:
intra_degree[tmp_index] += G.node[i][NODE_DEGREE]
community_id[i] = tmp_index
community_index += 1
# calculate ls, time complexity: O(E)
for (ei, ej) in G.edges():
if community_id[ei] == community_id[ej]:
intra_edges[community_id[ei]] += 1
else:
pass
# calculate modularity Q, time complexity: O(C)
modularity = 0
num_edges = G.number_of_edges()
for i in range(0, len(community_list)):
ls = intra_edges[i] / num_edges
ds = pow((intra_degree[i] / (2 * num_edges)), 2)
modularity += (ls - ds)
return modularity
| 3.53125 | 4 |
train/new_train.py | zeroAska/TFSegmentation | 633 | 12774016 | <filename>train/new_train.py
"""
New trainer faster than ever
"""
from metrics.metrics import Metrics
from utils.reporter import Reporter
from utils.misc import timeit
from tqdm import tqdm
import numpy as np
import tensorflow as tf
import matplotlib
import time
matplotlib.use('Agg')
import matplotlib.pyplot as plt
class NewTrain(object):
def __init__(self, args, sess, model):
print("\nTraining is initializing itself\n")
self.args = args
self.sess = sess
self.model = model
# shortcut for model params
self.params = self.model.params
# To initialize all variables
self.init = None
self.init_model()
# Create a saver object
self.saver = tf.train.Saver(max_to_keep=self.args.max_to_keep,
keep_checkpoint_every_n_hours=10,
save_relative_paths=True)
self.saver_best = tf.train.Saver(max_to_keep=1,
save_relative_paths=True)
# Load from latest checkpoint if found
self.load_model()
##################################################################################
# Init summaries
# Summary variables
self.scalar_summary_tags = ['mean_iou_on_val',
'train-loss-per-epoch', 'val-loss-per-epoch',
'train-acc-per-epoch', 'val-acc-per-epoch']
self.images_summary_tags = [
('train_prediction_sample', [None, self.params.img_height, self.params.img_width * 2, 3]),
('val_prediction_sample', [None, self.params.img_height, self.params.img_width * 2, 3])]
self.summary_tags = []
self.summary_placeholders = {}
self.summary_ops = {}
# init summaries and it's operators
self.init_summaries()
# Create summary writer
self.summary_writer = tf.summary.FileWriter(self.args.summary_dir, self.sess.graph)
##################################################################################
if self.args.mode == 'train':
self.num_iterations_training_per_epoch = self.args.tfrecord_train_len // self.args.batch_size
self.num_iterations_validation_per_epoch = self.args.tfrecord_val_len // self.args.batch_size
else:
self.test_data = None
self.test_data_len = None
self.num_iterations_testing_per_epoch = None
self.load_test_data()
##################################################################################
# Init metrics class
self.metrics = Metrics(self.args.num_classes)
# Init reporter class
if self.args.mode == 'train' or 'overfit':
self.reporter = Reporter(self.args.out_dir + 'report_train.json', self.args)
elif self.args.mode == 'test':
self.reporter = Reporter(self.args.out_dir + 'report_test.json', self.args)
##################################################################################
@timeit
def load_test_data(self):
print("Loading Testing data..")
self.test_data = {'X': np.load(self.args.data_dir + "X_val.npy"),
'Y': np.load(self.args.data_dir + "Y_val.npy")}
self.test_data_len = self.test_data['X'].shape[0] - self.test_data['X'].shape[0] % self.args.batch_size
print("Test-shape-x -- " + str(self.test_data['X'].shape))
print("Test-shape-y -- " + str(self.test_data['Y'].shape))
self.num_iterations_testing_per_epoch = (self.test_data_len + self.args.batch_size - 1) // self.args.batch_size
print("Test data is loaded")
@timeit
def init_model(self):
print("Initializing the variables of the model")
self.init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
self.sess.run(self.init)
print("Initialization finished")
def save_model(self):
"""
Save Model Checkpoint
:return:
"""
print("saving a checkpoint")
self.saver.save(self.sess, self.args.checkpoint_dir, self.model.global_step_tensor)
print("Saved a checkpoint")
def save_best_model(self):
"""
Save BEST Model Checkpoint
:return:
"""
print("saving a checkpoint for the best model")
self.saver_best.save(self.sess, self.args.checkpoint_best_dir, self.model.global_step_tensor)
print("Saved a checkpoint for the best model")
def load_best_model(self):
"""
Load the best model checkpoint
:return:
"""
print("loading a checkpoint for BEST ONE")
latest_checkpoint = tf.train.latest_checkpoint(self.args.checkpoint_best_dir)
if latest_checkpoint:
print("Loading model checkpoint {} ...\n".format(latest_checkpoint))
self.saver_best.restore(self.sess, latest_checkpoint)
else:
print("ERROR NO best checkpoint found")
exit(-1)
print("BEST MODEL LOADED..")
def init_summaries(self):
"""
Create the summary part of the graph
:return:
"""
with tf.variable_scope('train-summary-per-epoch'):
for tag in self.scalar_summary_tags:
self.summary_tags += tag
self.summary_placeholders[tag] = tf.placeholder('float32', None, name=tag)
self.summary_ops[tag] = tf.summary.scalar(tag, self.summary_placeholders[tag])
for tag, shape in self.images_summary_tags:
self.summary_tags += tag
self.summary_placeholders[tag] = tf.placeholder('float32', shape, name=tag)
self.summary_ops[tag] = tf.summary.image(tag, self.summary_placeholders[tag], max_outputs=10)
def add_summary(self, step, summaries_dict=None, summaries_merged=None):
"""
Add the summaries to tensorboard
:param step:
:param summaries_dict:
:param summaries_merged:
:return:
"""
if summaries_dict is not None:
summary_list = self.sess.run([self.summary_ops[tag] for tag in summaries_dict.keys()],
{self.summary_placeholders[tag]: value for tag, value in
summaries_dict.items()})
for summary in summary_list:
self.summary_writer.add_summary(summary, step)
if summaries_merged is not None:
self.summary_writer.add_summary(summaries_merged, step)
@timeit
def load_model(self):
"""
Load the latest checkpoint
:return:
"""
try:
# This is for loading the pretrained weights if they can't be loaded during initialization.
self.model.encoder.load_pretrained_weights(self.sess)
except AttributeError:
pass
print("Searching for a checkpoint")
latest_checkpoint = tf.train.latest_checkpoint(self.args.checkpoint_dir)
if latest_checkpoint:
print("Loading model checkpoint {} ...\n".format(latest_checkpoint))
self.saver.restore(self.sess, latest_checkpoint)
print("Model loaded from the latest checkpoint\n")
else:
print("\n.. No ckpt, SO First time to train :D ..\n")
def train(self):
print("Training mode will begin NOW ..")
tf.train.start_queue_runners(sess=self.sess)
curr_lr = self.model.args.learning_rate
for cur_epoch in range(self.model.global_epoch_tensor.eval(self.sess) + 1, self.args.num_epochs + 1, 1):
# init tqdm and get the epoch value
tt = tqdm(range(self.num_iterations_training_per_epoch), total=self.num_iterations_training_per_epoch,
desc="epoch-" + str(cur_epoch) + "-")
# init acc and loss lists
loss_list = []
acc_list = []
# loop by the number of iterations
for cur_iteration in tt:
# get the cur_it for the summary
cur_it = self.model.global_step_tensor.eval(self.sess)
# Feed this variables to the network
feed_dict = {
self.model.handle: self.model.training_handle,
self.model.is_training: True,
self.model.curr_learning_rate: curr_lr
}
# Run the feed forward but the last iteration finalize what you want to do
if cur_iteration < self.num_iterations_training_per_epoch - 1:
# run the feed_forward
_, loss, acc, summaries_merged = self.sess.run(
[self.model.train_op, self.model.loss, self.model.accuracy, self.model.merged_summaries],
feed_dict=feed_dict)
# log loss and acc
loss_list += [loss]
acc_list += [acc]
# summarize
self.add_summary(cur_it, summaries_merged=summaries_merged)
else:
# run the feed_forward
_, loss, acc, summaries_merged, segmented_imgs = self.sess.run(
[self.model.train_op, self.model.loss, self.model.accuracy,
self.model.merged_summaries, self.model.segmented_summary],
feed_dict=feed_dict)
# log loss and acc
loss_list += [loss]
acc_list += [acc]
total_loss = np.mean(loss_list)
total_acc = np.mean(acc_list)
# summarize
summaries_dict = dict()
summaries_dict['train-loss-per-epoch'] = total_loss
summaries_dict['train-acc-per-epoch'] = total_acc
summaries_dict['train_prediction_sample'] = segmented_imgs
self.add_summary(cur_it, summaries_dict=summaries_dict, summaries_merged=summaries_merged)
# report
self.reporter.report_experiment_statistics('train-acc', 'epoch-' + str(cur_epoch), str(total_acc))
self.reporter.report_experiment_statistics('train-loss', 'epoch-' + str(cur_epoch), str(total_loss))
self.reporter.finalize()
# Update the Global step
self.model.global_step_assign_op.eval(session=self.sess,
feed_dict={self.model.global_step_input: cur_it + 1})
# Update the Cur Epoch tensor
# it is the last thing because if it is interrupted it repeat this
self.model.global_epoch_assign_op.eval(session=self.sess,
feed_dict={self.model.global_epoch_input: cur_epoch + 1})
# print in console
tt.close()
print("epoch-" + str(cur_epoch) + "-" + "loss:" + str(total_loss) + "-" + " acc:" + str(total_acc)[
:6])
# Break the loop to finalize this epoch
break
# Update the Global step
self.model.global_step_assign_op.eval(session=self.sess,
feed_dict={self.model.global_step_input: cur_it + 1})
# Save the current checkpoint
if cur_epoch % self.args.save_every == 0:
self.save_model()
# Test the model on validation
if cur_epoch % self.args.test_every == 0:
self.test_per_epoch(step=self.model.global_step_tensor.eval(self.sess),
epoch=self.model.global_epoch_tensor.eval(self.sess))
if cur_epoch % self.args.learning_decay_every == 0:
curr_lr = curr_lr * self.args.learning_decay
print('Current learning rate is ', curr_lr)
print("Training Finished")
def test_per_epoch(self, step, epoch):
print("Validation at step:" + str(step) + " at epoch:" + str(epoch) + " ..")
# init tqdm and get the epoch value
tt = tqdm(range(self.num_iterations_validation_per_epoch), total=self.num_iterations_validation_per_epoch,
desc="Val-epoch-" + str(epoch) + "-")
# init acc and loss lists
loss_list = []
acc_list = []
inf_list = []
# reset metrics
self.metrics.reset()
# get the maximum iou to compare with and save the best model
max_iou = self.model.best_iou_tensor.eval(self.sess)
# init dataset to validation
self.sess.run(self.model.validation_iterator.initializer)
# loop by the number of iterations
for cur_iteration in tt:
# Feed this variables to the network
feed_dict = {
self.model.handle: self.model.validation_handle,
self.model.is_training: False
}
# Run the feed forward but the last iteration finalize what you want to do
if cur_iteration < self.num_iterations_validation_per_epoch - 1:
start = time.time()
# run the feed_forward
next_img, out_argmax, loss, acc = self.sess.run(
[self.model.next_img, self.model.out_argmax, self.model.loss, self.model.accuracy],
feed_dict=feed_dict)
end = time.time()
# log loss and acc
loss_list += [loss]
acc_list += [acc]
inf_list += [end - start]
# log metrics
self.metrics.update_metrics_batch(out_argmax, next_img[1])
else:
start = time.time()
# run the feed_forward
next_img, out_argmax, loss, acc, segmented_imgs = self.sess.run(
[self.model.next_img, self.model.out_argmax, self.model.loss, self.model.accuracy,
self.model.segmented_summary],
feed_dict=feed_dict)
end = time.time()
# log loss and acc
loss_list += [loss]
acc_list += [acc]
inf_list += [end - start]
# log metrics
self.metrics.update_metrics_batch(out_argmax, next_img[1])
# mean over batches
total_loss = np.mean(loss_list)
total_acc = np.mean(acc_list)
mean_iou = self.metrics.compute_final_metrics(self.num_iterations_validation_per_epoch)
mean_iou_arr = self.metrics.iou
mean_inference = str(np.mean(inf_list)) + '-seconds'
# summarize
summaries_dict = dict()
summaries_dict['val-loss-per-epoch'] = total_loss
summaries_dict['val-acc-per-epoch'] = total_acc
summaries_dict['mean_iou_on_val'] = mean_iou
summaries_dict['val_prediction_sample'] = segmented_imgs
self.add_summary(step, summaries_dict=summaries_dict)
self.summary_writer.flush()
# report
self.reporter.report_experiment_statistics('validation-acc', 'epoch-' + str(epoch), str(total_acc))
self.reporter.report_experiment_statistics('validation-loss', 'epoch-' + str(epoch), str(total_loss))
self.reporter.report_experiment_statistics('avg_inference_time_on_validation', 'epoch-' + str(epoch),
str(mean_inference))
self.reporter.report_experiment_validation_iou('epoch-' + str(epoch), str(mean_iou), mean_iou_arr)
self.reporter.finalize()
# print in console
tt.close()
print("Val-epoch-" + str(epoch) + "-" + "loss:" + str(total_loss) + "-" +
"acc:" + str(total_acc)[:6] + "-mean_iou:" + str(mean_iou))
print("Last_max_iou: " + str(max_iou))
if mean_iou > max_iou:
print("This validation got a new best iou. so we will save this one")
# save the best model
self.save_best_model()
# Set the new maximum
self.model.best_iou_assign_op.eval(session=self.sess,
feed_dict={self.model.best_iou_input: mean_iou})
else:
print("hmm not the best validation epoch :/..")
# Break the loop to finalize this epoch
break
def test(self):
print("Testing mode will begin NOW..")
# load the best model checkpoint to test on it
self.load_best_model()
# init tqdm and get the epoch value
tt = tqdm(range(self.test_data_len))
naming = np.load(self.args.data_dir + 'names_train.npy')
# init acc and loss lists
loss_list = []
acc_list = []
img_list = []
# idx of image
idx = 0
# reset metrics
self.metrics.reset()
# loop by the number of iterations
for cur_iteration in tt:
# load mini_batches
x_batch = self.test_data['X'][idx:idx + 1]
y_batch = self.test_data['Y'][idx:idx + 1]
# update idx of mini_batch
idx += 1
# Feed this variables to the network
feed_dict = {self.model.x_pl: x_batch,
self.model.y_pl: y_batch,
self.model.is_training: False
}
# run the feed_forward
out_argmax, loss, acc, summaries_merged, segmented_imgs = self.sess.run(
[self.model.out_argmax, self.model.loss, self.model.accuracy,
self.model.merged_summaries, self.model.segmented_summary],
feed_dict=feed_dict)
np.save(self.args.out_dir + 'npy/' + str(cur_iteration) + '.npy', out_argmax[0])
plt.imsave(self.args.out_dir + 'imgs/' + 'test_' + str(cur_iteration) + '.png', segmented_imgs[0])
# log loss and acc
loss_list += [loss]
acc_list += [acc]
# log metrics
self.metrics.update_metrics(out_argmax[0], y_batch[0], 0, 0)
# mean over batches
total_loss = np.mean(loss_list)
total_acc = np.mean(acc_list)
mean_iou = self.metrics.compute_final_metrics(self.test_data_len)
# print in console
tt.close()
print("Here the statistics")
print("Total_loss: " + str(total_loss))
print("Total_acc: " + str(total_acc)[:6])
print("mean_iou: " + str(mean_iou))
print("Plotting imgs")
def finalize(self):
self.reporter.finalize()
self.summary_writer.close()
self.save_model()
| 2.359375 | 2 |
rps_nk.py | naraekwon/udacity-rock-paper-scissor | 0 | 12774017 | <gh_stars>0
#!/usr/bin/env python3
import random
import pdb
"""This program plays a game of Rock, Paper, Scissors between two Players,
and reports both Player's scores each round."""
moves = ['rock', 'paper', 'scissors']
"""The Player class is the parent class for all of the Players
in this game"""
class Player:
def move(self):
return 'rock'
def learn(self, my_move, their_move):
pass
def beats(one, two):
return ((one == 'rock' and two == 'scissors') or
(one == 'scissors' and two == 'paper') or
(one == 'paper' and two == 'rock'))
class RandomPlayer(Player):
def move(self):
return (random.choice(moves))
class ReflectPlayer(Player):
def __init__(self):
self.reflection = None
def move(self):
if self.reflection is None:
return (random.choice(moves))
else:
return (self.reflection)
def learn(self, my_move, their_move):
super().learn(my_move, their_move)
self.reflection = their_move
class CyclePlayer(Player):
def __init__(self):
self.remember = None
def move(self):
if self.remember is None:
return (random.choice(moves))
else:
location = moves.index(self.remember)
next_location = (location + 1) % 3
return (moves[next_location])
def learn(self, my_move, their_move):
super().learn(my_move, their_move)
self.remember = my_move
class HumanPlayer(Player):
def move(self):
while True:
words = input("Rock, paper, scissors? > ")
if str.lower(words) in moves:
return (words)
break
else:
print("Oops! Couldn't understand. Try it again!")
class Game:
def __init__(self, p1, p2):
self.p1 = p1
self.p2 = p2
self.p1.score = 0
self.p2.score = 0
def play_round(self):
move1 = self.p1.move()
move2 = self.p2.move()
print(f"\nYou played: {move1}.\nOpponent played: {move2}.\n")
# pdb.set_trace()
self.p1.learn(move1, move2)
self.p2.learn(move2, move1)
# pdb.set_trace()
if beats(move1, move2):
self.p1.score += 1
print("You won!")
elif beats(move2, move1):
self.p2.score += 1
print("Opponent won!")
elif move1 == move2:
print("Tied!")
print(f"Score: You {self.p1.score}, Opponent {self.p2.score}")
print()
def play_game(self):
print("Game start!\n")
while True:
customround = input("How many rounds would you like to play? ")
if customround.isdigit():
break
else:
print("Tell me a number. For example: 1, 2, 3, or 10.\n")
for round in range(int(customround)):
print(f"\nRound {round+1} --")
self.play_round()
print("Game over!")
print(f"Final Score: You {self.p1.score} vs Opponent {self.p2.score}")
if self.p1.score > self.p2.score:
print("You won!")
elif self.p2.score > self.p1.score:
print("Opponent won!")
elif self.p1.score == self.p2.score:
print("Tied!")
if __name__ == '__main__':
c_players = [Player(), RandomPlayer(), ReflectPlayer(), CyclePlayer()]
game = Game(HumanPlayer(), random.choice(c_players))
game.play_game()
| 4.1875 | 4 |
BinarySearch/MorPracticesII/Median of Two Sorted Arrays.py | mamoudmatook/Leetcode | 0 | 12774018 | #
# Created on Wed Sep 01 2021
#
# The MIT License (MIT)
# Copyright (c) 2021 Maatuq
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software
# and associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial
# portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
# TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from typing import List
class Solution:
def findMedianSortedArrays(self, nums1: List[int], nums2: List[int]) -> float:
N, M = len(nums1), len(nums2)
if N > M:
return self.findMedianSortedArrays(nums2, nums1)
half = (N + M + 1) // 2
left, right = 0, N
while left <= right:
mid = (left + right) // 2
a_size = mid
b_size = half - mid
a_left = nums1[a_size - 1] if a_size > 0 else float('-inf')
b_left = nums2[b_size - 1] if b_size > 0 else float('-inf')
a_right = nums1[a_size] if a_size < N else float('inf')
b_right = nums2[b_size] if b_size < M else float('inf')
if a_left <= b_right and b_left <= a_right:
if (N + M) % 2 == 0:
return (max(a_left, b_left) + min(a_right, b_right)) / 2
return max(a_left, b_left)
elif a_left > b_right:
right = mid - 1
else:
left = mid + 1
| 3.109375 | 3 |
midonet/neutron/tests/unit/test_midonet_plugin.py | midokura/python-neutron-plugin-midonet | 0 | 12774019 | <reponame>midokura/python-neutron-plugin-midonet<gh_stars>0
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Midokura Japan K.K.
# Copyright (C) 2013 Midokura PTE LTD
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: <NAME>, Midokura Europe SARL
# @author: <NAME>, Midokura Japan KK
# @author: <NAME>, Midokura Japan KK
import mock
import os
from neutron.extensions import portbindings
from neutron.openstack.common import importutils
from neutron.tests.unit import _test_extension_portbindings as test_bindings
import neutron.tests.unit.test_db_plugin as test_plugin
import neutron.tests.unit.test_extension_ext_gw_mode as test_gw_mode
import neutron.tests.unit.test_extension_security_group as sg
import neutron.tests.unit.test_l3_plugin as test_l3_plugin
from oslo.config import cfg
MIDOKURA_PKG_PATH = "midonet.neutron.plugin"
MIDOKURA_EXT_PATH = "midonet.neutron.extensions"
MIDONET_PLUGIN_NAME = ('%s.MidonetPluginV2' % MIDOKURA_PKG_PATH)
class MidonetPluginV2TestCase(test_plugin.NeutronDbPluginV2TestCase):
def setUp(self,
plugin=MIDONET_PLUGIN_NAME,
ext_mgr=None,
service_plugins=None):
self.midoclient_mock = mock.MagicMock()
self.midoclient_mock.midonetclient.neutron.client.return_value = True
modules = {
'midonetclient': self.midoclient_mock,
'midonetclient.neutron': self.midoclient_mock.neutron,
'midonetclient.neutron.client': self.midoclient_mock.client,
}
self.module_patcher = mock.patch.dict('sys.modules', modules)
self.module_patcher.start()
# import midonetclient here because it needs proper mock objects to be
# assigned to this module first. 'midoclient_mock' object is the
# mock object used for this module.
from midonetclient.neutron.client import MidonetClient
client_class = MidonetClient
self.mock_class = client_class()
extensions_path = importutils.import_module(
MIDOKURA_EXT_PATH).__file__
cfg.CONF.set_override('api_extensions_path',
os.path.dirname(extensions_path))
super(MidonetPluginV2TestCase, self).setUp(plugin=plugin)
def tearDown(self):
super(MidonetPluginV2TestCase, self).tearDown()
self.module_patcher.stop()
class TestMidonetNetworksV2(MidonetPluginV2TestCase,
test_plugin.TestNetworksV2):
pass
class TestMidonetL3NatTestCase(MidonetPluginV2TestCase,
test_l3_plugin.L3NatDBIntTestCase):
def test_floatingip_with_invalid_create_port(self):
self._test_floatingip_with_invalid_create_port(MIDONET_PLUGIN_NAME)
class TestMidonetSecurityGroup(MidonetPluginV2TestCase,
sg.TestSecurityGroups):
pass
class TestMidonetSubnetsV2(MidonetPluginV2TestCase,
test_plugin.TestSubnetsV2):
pass
class TestMidonetPortsV2(MidonetPluginV2TestCase,
test_plugin.TestPortsV2):
def test_vif_port_binding(self):
with self.port(name='myname') as port:
self.assertEqual('midonet', port['port']['binding:vif_type'])
self.assertTrue(port['port']['admin_state_up'])
class TestMidonetPluginPortBinding(MidonetPluginV2TestCase,
test_bindings.PortBindingsTestCase):
VIF_TYPE = portbindings.VIF_TYPE_MIDONET
HAS_PORT_FILTER = True
class TestExtGwMode(MidonetPluginV2TestCase,
test_gw_mode.ExtGwModeIntTestCase):
pass
| 1.476563 | 1 |
camkes/visualCAmkES/View/Instance_Property_Widget.py | aisamanra/camkes-tool | 0 | 12774020 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2017, Data61
# Commonwealth Scientific and Industrial Research Organisation (CSIRO)
# ABN 41 687 119 230.
#
# This software may be distributed and modified according to the terms of
# the BSD 2-Clause license. Note that NO WARRANTY is provided.
# See "LICENSE_BSD2.txt" for details.
#
# @TAG(DATA61_BSD)
from PyQt5 import QtWidgets
import Instance_Widget
from Interface.Property import PropertyInterface
import Connection_Widget
class InstancePropertyWidget(QtWidgets.QGroupBox):
"""
ConnectionPropertyWidget - shows the properties of a connection.
"""
# Connection Widget that this widget represent
@property
def instance_widget(self):
assert self._instance_widget is not None
return self._instance_widget
@instance_widget.setter
def instance_widget(self, value):
assert isinstance(value, Instance_Widget.InstanceWidget)
self._instance_widget = value
# views
@property
def name_widget(self):
if self._name_widget is None:
self._name_widget = QtWidgets.QLabel(self.instance_widget.name)
return self._name_widget
@property
def type_widget(self):
# When this becomes editable
# This will be not a label, but a drop down menu
if self._type_widget is None:
self._type_widget = QtWidgets.QLabel(self.instance_widget.component_type)
return self._type_widget
@property
def hardware_widget(self):
if self._hardware_widget is None and self.instance_widget.hardware:
self._hardware_widget = QtWidgets.QLabel("Hardware")
return self._hardware_widget
@property
def control_widget(self):
if self._control_widget is None and self.instance_widget.control:
self._control_widget = QtWidgets.QLabel("Control")
return self._control_widget
# --- INITIALISATION ---
def __init__(self, instance_widget):
self._instance_widget = None
self._name_widget = None
self._type_widget = None
self._hardware_widget = None
self._control_widget = None
self.instance_widget = instance_widget
super(InstancePropertyWidget, self).__init__()
grid_layout = QtWidgets.QGridLayout()
row = 0
# Following must be done after setting instance widget
# Name
grid_layout.addWidget(QtWidgets.QLabel("Name: "), row, 0)
grid_layout.addWidget(self.name_widget, row, 1)
row = row + 1
# Type
grid_layout.addWidget(QtWidgets.QLabel("Type: "), row, 0)
grid_layout.addWidget(self.type_widget, row, 1)
row = row + 1
# Hardware
if self.hardware_widget:
grid_layout.addWidget(self.hardware_widget, row, 0, 1, -1)
row = row + 1
# Control
if self.control_widget:
grid_layout.addWidget(self.control_widget, row, 0, 1, -1)
row = row + 1
# Separator
separator = QtWidgets.QFrame()
separator.setFrameStyle(QtWidgets.QFrame.HLine | QtWidgets.QFrame.Plain)
grid_layout.addWidget(separator, row, 0, 1, -1)
row = row + 1
# List all connection
grid_layout.addWidget(QtWidgets.QLabel("Connections"), row, 0, 1, -1)
row = row + 1
for provide_dict in self.instance_widget.provides:
grid_layout.addWidget(QtWidgets.QLabel("Procedure"), row, 0)
grid_layout.addWidget(QtWidgets.QLabel("%s : %s" % (provide_dict["Interface_type"], provide_dict["Name"]) ), row, 1)
row = row + 1
for use_dict in self.instance_widget.uses:
grid_layout.addWidget(QtWidgets.QLabel("Procedure"), row, 0)
grid_layout.addWidget(QtWidgets.QLabel("%s : %s" % (use_dict["Interface_type"], use_dict["Name"]) ), row, 1)
row = row + 1
for emit_dict in self.instance_widget.emits:
grid_layout.addWidget(QtWidgets.QLabel("Event"), row, 0)
grid_layout.addWidget(QtWidgets.QLabel("%s : %s" % (emit_dict["Interface_type"], emit_dict["Name"]) ), row, 1)
row = row + 1
for consume_dict in self.instance_widget.consumes:
grid_layout.addWidget(QtWidgets.QLabel("Event"), row, 0)
grid_layout.addWidget(QtWidgets.QLabel("%s : %s" % (consume_dict["Interface_type"], consume_dict["Name"]) ), row, 1)
row = row + 1
for dataport_dict in self.instance_widget.dataport:
grid_layout.addWidget(QtWidgets.QLabel("Dataport"), row, 0)
grid_layout.addWidget(QtWidgets.QLabel("%s : %s" % (dataport_dict["Interface_type"], dataport_dict["Name"]) ), row, 1)
row = row + 1
self.setLayout(grid_layout)
| 2.4375 | 2 |
client/utils/config_jim.py | DoctorChe/Python_DataBase_PyQT | 1 | 12774021 | <filename>client/utils/config_jim.py
"""Константы для jim протокола, настройки"""
ACTION = "action" # тип сообщения между клиентом и сервером
TIME = "time" # время запроса
DATA = "data" # данные пересылаемые в сообщении (вложенный словарь)
TOKEN = "token" # токен
RESPONSE = "response" # код ответа
# Значения (Типы данных, передаваемых в поле data)
MESSAGE = "text" # текст сообщения
# TYPE = "type" # необязательное поле
USER = "user" # данные о пользователе - клиенте (вложенный словарь)
ACCOUNT_NAME = "account_name" # имя пользователя - чата
LOGIN = "login" # логин пользователя (может отличаться от имени пользователя)
PASSWORD = "password" # пароль пользователя
STATUS = "status" # статус пользователя
# TO = "message_to" # получатель
# FROM = "account_name" # отправитель
INFO = "info" # информация о контакте
# DATA = "data" # сервисное сообщение (текст ошибки и т.д.)
# Значения (Методы протокола (actions))
PRESENCE = "presence" # присутствие. Сервисное сообщение для извещения сервера о присутствии клиента online
# PROBE = "probe" # проверка присутствия. Сервисное сообщение от сервера для проверки присутствии клиента online
MSG = "msg" # простое сообщение пользователю или в чат
# QUIT = "quit" # отключение от сервера
AUTHENTICATE = "authenticate" # авторизация на сервере
# JOIN = "join" # присоединиться к чату
# LEAVE = "leave" # покинуть чат
GET_CONTACTS = "get_contacts" # получить список контактов
GET_CONTACT = "get_contact" # получить информацию о контакте
ADD_CONTACT = "add_contact" # добавить контакт в список контактов
DEL_CONTACT = "del_contact" # удалить контакт из списка контактов
UPDATE_CONTACT = "update_contact" # обновить контакт в списке контактов
ACT_LOGIN = "login" # вход
ACT_LOGOUT = "logout" # выход
ACT_REGISTRATION = "registration" # регистрация
ECHO = "echo" # эхо-сообщение
ACT_GET_ALL_MESSAGES = "get_all_messages" # получить все сообщения пользователя
ACT_UPDATE_MESSAGE = "update_message" # обновить сообщение пользователя
ACT_DELETE_MESSAGE = "delete_message" # удалить сообщение пользователя
# Коды ответов (будут дополняться)
# 1xx — информационные сообщения
BASIC_NOTICE = 100 # базовое уведомление
IMPORTANT_NOTICE = 101 # важное уведомление
# 2xx — успешное завершение
OK = 200 # OK
CREATED = 201 # объект создан
ACCEPTED = 202 # подтверждение
# 4xx — ошибка на стороне клиента
WRONG_REQUEST = 400 # неправильный запрос/json объект
NOT_AUTORIZED = 401 # не авторизован
WRONG_LOGIN_PASSWORD = <PASSWORD> # неправильный логин/пароль
FORBIDDEN = 403 # пользователь заблокирован
NOT_FOUND = 404 # пользователь/чат отсутствует на сервере
CONFLICT = 409 # уже имеется подключение с указанным логином
GONE = 410 # адресат существует, но недоступен (offline)
CLOSED = 499 # Client Closed Request (клиент закрыл соединение)
# 5xx — ошибка на стороне сервера
SERVER_ERROR = 500 # ошибка сервера
# Словари - ответы:
# # 200
# RESPONSE_200 = {RESPONSE: 200}
# # 400
# RESPONSE_400 = {
# RESPONSE: 400,
# MESSAGE: None
# }
# 499
RESPONSE_499 = {
RESPONSE: 499,
MESSAGE: "Client Closed Request"
}
# Кортеж из кодов ответов
RESPONSE_CODES = (
BASIC_NOTICE,
IMPORTANT_NOTICE,
OK,
CREATED,
ACCEPTED,
WRONG_REQUEST,
NOT_AUTORIZED,
WRONG_LOGIN_PASSWORD,
FORBIDDEN,
NOT_FOUND,
CONFLICT,
GONE,
SERVER_ERROR
)
# ENCODING = "utf-8" # кодировка
| 2.421875 | 2 |
ui.py | volkmaster/word-image-collage | 0 | 12774022 | <reponame>volkmaster/word-image-collage
import sys
from PyQt5.QtWidgets import QMainWindow, QApplication, QDialog, QLabel, QLineEdit, QPushButton
from PyQt5.QtCore import pyqtSlot
from api import api_caller
from filtering import elastic_transform, japanify, pixelsort, smoothing, ripple_effect, segmentation
import pattern_generator
import util
__author__ = '<NAME>'
FILTERS = ['Elastic transform (liquify)', 'Japanify', 'Pixel sorting', 'Smoothing', 'Ripple effect', 'Segmentation']
FILTER_PARAMETERS = [('Alpha', '10000'), ('Density', '50'), ('Sorting path', 'diagonal'), ('Kernel size', '10'), ('K', '5'), ('Weight', '29')]
PIXEL_SORTING_ALLOWED_PARAMETERS = [
'angled-line',
'circles',
'concentric',
'diagonal',
'diagonal-single',
'fill-circles',
'horizontal',
# 'random-walk',
# 'random-walk-horizontal',
# 'random-walk-vertical',
'vertical'
]
PIXEL_SORTING_PARAMETERS_HELP = [
('angled-line,[angle=0]', 'Sort pixels in lines tilted at the given angle.'),
('circles', 'Pixels are sorted in concentric circles about the center of the image.'),
('concentric', 'Pixels are sorted in concentric rectangles.'),
('diagonal', 'Pixels are sorted in diagonal lines.'),
('diagonal-single', 'Pixels sorted in a single path that moves diagonally through the image.'),
('fill-circles,[radius=100]', 'Covers the image in circles of the given radius.'),
('horizontal', 'Pixels sorted horizontally.'),
# ('random-walk', 'Pixels sorted in random walks over the image.'),
# ('random-walk-horizontal', 'Pixels sorted in random walks moving horizontally over the image.'),
# ('random-walk-vertical', 'Pixels sorted in random walks moving vertically over the image.'),
('vertical', 'Pixels sorted vertically.')
]
class App(QMainWindow):
def __init__(self):
super().__init__()
self.title = 'Keyword Pattern Generator'
self.width = 850
self.height = 455
self.label_title_keywords = None
self.textboxes_keywords = [None, None, None, None, None]
self.label_title_filters = None
self.textboxes_numbers_filters = [None for _ in FILTERS]
self.labels_filters = [None for _ in FILTERS]
self.labels_parameters_filters = [None for _ in FILTERS]
self.textboxes_parameters_filters = [None for _ in FILTERS]
self.button_help_pixel_sorting = None
self.button_generate = None
self.label_width = None
self.textbox_width = None
self.label_height = None
self.textbox_height = None
self.label_status = None
with open('serial_number.txt', 'r') as file:
self.serial_no = int(file.read())
self.init_ui()
def init_ui(self):
# Set window properties
self.setWindowTitle(self.title)
self.setFixedSize(self.width, self.height)
self.move(QApplication.desktop().availableGeometry().center() - self.frameGeometry().center())
# Create a title label for keywords
self.label_title_keywords = QLabel('Enter up to 5 keywords:', self)
self.label_title_keywords.move(25, 10)
self.label_title_keywords.resize(250, 30)
# Create textboxes for keywords
for i in range(len(self.textboxes_keywords)):
self.textboxes_keywords[i] = QLineEdit(self)
self.textboxes_keywords[i].move(25, 10 + (i + 1) * 40)
self.textboxes_keywords[i].resize(250, 30)
self.textboxes_keywords[i].textChanged.connect(self.on_keyword_change)
# Create a title label for filters
self.label_title_keywords = QLabel(f'Order filters (1-{len(FILTERS)} / empty) and fill out parameters:', self)
self.label_title_keywords.move(325, 10)
self.label_title_keywords.resize(300, 30)
# Create textboxes for filters (numbers)
for i in range(len(self.textboxes_numbers_filters)):
self.textboxes_numbers_filters[i] = QLineEdit(self)
self.textboxes_numbers_filters[i].move(325, 10 + (i + 1) * 40)
self.textboxes_numbers_filters[i].resize(30, 30)
# Create labels for filters
for i in range(len(self.labels_filters)):
self.labels_filters[i] = QLabel(FILTERS[i], self)
self.labels_filters[i].move(365, 10 + (i + 1) * 40)
self.labels_filters[i].resize(165, 30)
# Create labels for filters (parameters)
for i in range(len(self.labels_filters)):
self.labels_parameters_filters[i] = QLabel(FILTER_PARAMETERS[i][0], self)
self.labels_parameters_filters[i].move(530, 10 + (i + 1) * 40)
self.labels_parameters_filters[i].resize(80, 30)
# Create textboxes for filters (parameters)
for i in range(len(self.textboxes_numbers_filters)):
self.textboxes_parameters_filters[i] = QLineEdit(FILTER_PARAMETERS[i][1], self)
self.textboxes_parameters_filters[i].move(620, 10 + (i + 1) * 40)
self.textboxes_parameters_filters[i].resize(160, 30)
if FILTERS[i] == 'Pixel sorting':
self.button_help_pixel_sorting = QPushButton('Help', self)
self.button_help_pixel_sorting.move(790, 10 + (i + 1) * 40)
self.button_help_pixel_sorting.resize(40, 30)
self.button_help_pixel_sorting.clicked.connect(self.on_help_pixel_sorting)
# Create a button for pattern generation
self.button_generate = QPushButton('Generate pattern', self)
self.button_generate.move(75, 265)
self.button_generate.resize(150, 50)
self.button_generate.setEnabled(False)
self.button_generate.clicked.connect(self.on_generate_pattern)
# Create a width label
self.label_width = QLabel('Width of pattern (in px):', self)
self.label_width.move(325, 315)
self.label_width.resize(150, 30)
# Create a width textbox
self.textbox_width = QLineEdit('1000', self)
self.textbox_width.move(485, 315)
self.textbox_width.resize(70, 30)
# Create a height label
self.label_height = QLabel('Height of pattern (in px):', self)
self.label_height.move(325, 355)
self.label_height.resize(150, 30)
# Create a height textbox
self.textbox_height = QLineEdit('1000', self)
self.textbox_height.move(485, 355)
self.textbox_height.resize(70, 30)
# Create a status label
self.label_status = QLabel(self)
self.label_status.move(25, 395)
self.label_status.resize(550, 60)
self.label_status.setWordWrap(True)
self.show()
@pyqtSlot()
def on_keyword_change(self):
keywords = [textbox.text() for textbox in self.textboxes_keywords if textbox.text() is not '']
self.button_generate.setEnabled(len(keywords) > 0)
self.label_status.setText('')
@pyqtSlot()
def on_help_pixel_sorting(self):
dialog = QDialog()
dialog.setWindowTitle('Pixel sorting: sorting path parameter help')
dialog.setFixedSize(700, 2 * 20 + len(PIXEL_SORTING_PARAMETERS_HELP) * 30)
for i, (name, label_description) in enumerate(PIXEL_SORTING_PARAMETERS_HELP):
label_name = QLabel(name, dialog)
label_name.move(20, 20 + i * 30)
label_name.resize(200, 20)
label_description = QLabel(label_description, dialog)
label_description.move(230, 20 + i * 30)
label_description.resize(450, 20)
dialog.exec_()
@pyqtSlot()
def on_generate_pattern(self):
self.before_generate_pattern()
file = 'images/pattern.png'
keywords = [textbox.text().strip() for textbox in self.textboxes_keywords if textbox.text() is not '']
for keyword in keywords:
try:
api_caller.run(keyword)
except util.Error as e:
self.label_status.setText(e.message())
self.after_generate_pattern()
return
width = int(self.textbox_width.text())
height = int(self.textbox_height.text())
pattern_generator.run(file, width, height, keywords, self.serial_no)
filters = []
for i in range(len(FILTERS)):
text_number = self.textboxes_numbers_filters[i].text()
text_parameter = self.textboxes_parameters_filters[i].text()
if text_number and text_parameter:
filters.append((i, int(text_number), text_parameter))
filters.sort(key=lambda x: x[1])
for (i, number, parameter) in filters:
# Elastic transform (liquify)
if i == 0:
file = elastic_transform.run(file, alpha=int(parameter), sigma=8)
# Japanify
elif i == 1:
file = japanify.run(file, density=int(parameter))
# Pixel sorting
elif i == 2:
tokens = parameter.split(',')
if tokens[0] not in PIXEL_SORTING_ALLOWED_PARAMETERS:
self.label_status.setText('Pixel sorting parameter is not allowed or not formatted properly. ' +
'Check help.')
self.after_generate_pattern()
return
value = '' if len(tokens) == 1 else tokens[1]
file = pixelsort.run(file, sorting_path=tokens[0], value=value)
# Smoothing
elif i == 3:
file = smoothing.run(file, kernel_size=int(parameter))
# Ripple effect
elif i == 4:
file = ripple_effect.run(file, k=int(parameter))
# Segmentation
elif i == 5:
file = segmentation.run(file, weight=int(parameter))
status = ['Pattern with serial number #' + str(self.serial_no) + ' successfully generated.',
'Filters applied: ' + ', '.join([FILTERS[i] for (i, _, _) in filters])]
self.label_status.setText(status[0] + '\n' + status[1])
print(util.timestamp() + ' ' + ' '.join(status) + '\n')
self.increment_serial_no()
self.after_generate_pattern()
def before_generate_pattern(self):
for textbox in self.textboxes_keywords:
textbox.setEnabled(False)
for textbox in self.textboxes_numbers_filters:
textbox.setEnabled(False)
for textbox in self.textboxes_parameters_filters:
textbox.setEnabled(False)
self.button_help_pixel_sorting.setEnabled(False)
self.textbox_width.setEnabled(False)
self.textbox_height.setEnabled(False)
self.button_generate.setEnabled(False)
self.label_status.setText('')
def after_generate_pattern(self):
for textbox in self.textboxes_keywords:
textbox.setEnabled(True)
for textbox in self.textboxes_numbers_filters:
textbox.setEnabled(True)
for textbox in self.textboxes_parameters_filters:
textbox.setEnabled(True)
self.button_help_pixel_sorting.setEnabled(True)
self.textbox_width.setEnabled(True)
self.textbox_height.setEnabled(True)
self.button_generate.setEnabled(True)
def increment_serial_no(self):
self.serial_no += 1
with open('serial_number.txt', 'w') as file:
file.write(str(self.serial_no))
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
sys.exit(app.exec_())
| 2.453125 | 2 |
70/main.py | pauvrepetit/leetcode | 0 | 12774023 | # 70. 爬楼梯
#
# 20210716
# huao
from math import comb
class Solution:
def climbStairs(self, n: int) -> int:
count = 0
for i in range(n // 2 + 1):
count += comb(n - i, i)
return count
print(Solution().climbStairs(2))
print(Solution().climbStairs(3))
| 3.296875 | 3 |
tests/cors_test.py | aio-libs-abandoned/aiorest | 3 | 12774024 | <gh_stars>1-10
import unittest
import asyncio
import aiohttp
import contextlib
from aiorest import RESTServer
class REST:
def __init__(self, test):
self.test = test
def index(self, request):
return {'status': 'ok'}
def check_origin(self, request):
return {'status': 'ok'}
class CorsTests(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
self.server = RESTServer(debug=True, hostname='localhost',
enable_cors=True,
loop=self.loop)
add_url = self.server.add_url
rest = REST(self)
add_url('GET', '/', rest.index)
add_url('GET', '/check_origin', rest.check_origin,
cors_options={'allow-origin': 'http://example.com/'})
def tearDown(self):
self.loop.close()
@contextlib.contextmanager
def run_server(self):
self.assertTrue(self.server.cors_enabled)
srv = self.loop.run_until_complete(self.loop.create_server(
self.server.make_handler,
'127.0.0.1', 0))
self.assertEqual(len(srv.sockets), 1)
sock = next(iter(srv.sockets))
host, port = sock.getsockname()
self.assertEqual('127.0.0.1', host)
self.assertGreater(port, 0)
url = 'http://{}:{}'.format(host, port)
yield url
srv.close()
self.loop.run_until_complete(srv.wait_closed())
def test_simple_GET(self):
with self.run_server() as url:
@asyncio.coroutine
def query():
headers = {
'ORIGIN': 'localhost',
}
resp = yield from aiohttp.request('GET', url,
headers=headers,
loop=self.loop)
yield from resp.read()
self.assertEqual(resp.status, 200)
self.assertIn('ACCESS-CONTROL-ALLOW-ORIGIN', resp.headers)
self.assertEqual(resp.headers['ACCESS-CONTROL-ALLOW-ORIGIN'],
'*')
self.loop.run_until_complete(query())
def test_preflight(self):
with self.run_server() as url:
@asyncio.coroutine
def query():
headers = {
'ACCESS-CONTROL-REQUEST-METHOD': 'GET',
'ORIGIN': 'localhost',
}
resp = yield from aiohttp.request('OPTIONS', url,
headers=headers,
loop=self.loop)
yield from resp.read()
self.assertEqual(resp.status, 200)
self.assertIn('ACCESS-CONTROL-ALLOW-ORIGIN', resp.headers)
self.assertEqual(resp.headers['ACCESS-CONTROL-ALLOW-ORIGIN'],
'*')
self.loop.run_until_complete(query())
def test_preflight_404(self):
with self.run_server() as url:
@asyncio.coroutine
def query():
resp = yield from aiohttp.request('OPTIONS', url,
loop=self.loop)
yield from resp.read()
self.assertEqual(resp.status, 404)
self.assertNotIn('ACCESS-CONTROL-ALLOW-ORIGIN', resp.headers)
self.loop.run_until_complete(query())
def test_check_origin(self):
with self.run_server() as url:
@asyncio.coroutine
def query():
resp = yield from aiohttp.request('GET', url + '/check_origin',
headers={},
loop=self.loop)
yield from resp.read()
self.assertEqual(resp.status, 200)
self.assertNotIn('ACCESS-CONTROL-ALLOW-ORIGIN', resp.headers)
self.assertNotIn('ACCESS-CONTROL-ALLOW-METHOD', resp.headers)
self.assertNotIn('ACCESS-CONTROL-ALLOW-HEADERS', resp.headers)
self.assertNotIn('ACCESS-CONTROL-ALLOW-CREDENTIALS',
resp.headers)
headers = {
'ORIGIN': 'localhost',
}
resp = yield from aiohttp.request('GET', url + '/check_origin',
headers=headers,
loop=self.loop)
yield from resp.read()
self.assertEqual(resp.status, 200)
self.assertNotIn('ACCESS-CONTROL-ALLOW-ORIGIN', resp.headers)
self.assertNotIn('ACCESS-CONTROL-ALLOW-METHOD', resp.headers)
self.assertNotIn('ACCESS-CONTROL-ALLOW-HEADERS', resp.headers)
self.assertNotIn('ACCESS-CONTROL-ALLOW-CREDENTIALS',
resp.headers)
headers = {
'ORIGIN': 'http://example.com/',
}
resp = yield from aiohttp.request('GET', url + '/check_origin',
headers=headers,
loop=self.loop)
yield from resp.read()
self.assertEqual(resp.status, 200)
self.assertIn('ACCESS-CONTROL-ALLOW-ORIGIN', resp.headers)
self.loop.run_until_complete(query())
| 2.390625 | 2 |
algnuth/polynom.py | louisabraham/algnuth | 290 | 12774025 | """
Modular arithmetic
"""
from collections import defaultdict
import numpy as np
class ModInt:
"""
Integers of Z/pZ
"""
def __init__(self, a, n):
self.v = a % n
self.n = n
def __eq__(a, b):
if isinstance(b, ModInt):
return not bool(a - b)
else:
return NotImplemented
def __hash__(self):
return hash((self.v, self.n))
def __bool__(self):
return bool(self.v)
def __add__(a, b):
assert isinstance(b, ModInt)
assert a.n == b.n
return ModInt(a.v + b.v, a.n)
def __radd__(a, b):
assert isinstance(b, int)
return ModInt(a.v + b, a.n)
def __neg__(a): return ModInt(-a.v, a.n)
def __sub__(a, b): return ModInt(a.v - b.v, a.n)
def __mul__(a, b):
if isinstance(b, int):
return ModInt(b * a.v, a.n)
elif isinstance(b, ModInt):
assert a.n == b.n
return ModInt(a.v * b.v, a.n)
return NotImplemented
def __rmul__(a, b):
return a * b
def __pow__(P, k):
assert isinstance(k, int)
V = 1
A = P
while k:
if k & 1:
V *= A
k >>= 1
if not k:
break
A *= A
return V
def inv(self):
if self.v == 0:
raise ZeroDivisionError
return ModInt(ModInt._inv(self.v, self.n), self.n)
@staticmethod
def _inv(k, n):
k %= n
if k == 1:
return k
return (n - n // k) * ModInt._inv(n % k, n) % n
def __truediv__(a, b):
assert isinstance(b, ModInt)
assert a.n == b.n
return a * b.inv()
def __rtruediv__(a, k):
assert isinstance(k, int)
return ModInt(k, a.n) / a
@staticmethod
def extended_euclid(a, b):
"""Extended Euclid algorithm
Return
------
x : int
y : int
a * x + b * y = gcd(a, b)
"""
A, B = a, b
sa, sb = (1 if a >= 0 else -1), (1 if b >= 0 else -1)
xp, yp = 1, 0
x, y = 0, 1
while b:
assert A * xp + B * yp == a
assert A * x + B * y == b
r = a // b
a, b = b, a % b
x, xp = xp - r * x, x
y, yp = yp - r * y, y
return sa * xp, sb * yp
def __repr__(self):
return '%s(%s, %s)' % (self.__class__.__name__, self.v, self.n)
def __str__(self):
return '%s' % self.v
class Polynomial:
"""
Generic class for polynomials
Works with int, float and ModInt
"""
def __len__(self):
return len(self.C)
def trim(C):
i = len(C) - 1
while i >= 0 and not C[i]:
i -= 1
return C[:i + 1]
def __init__(self, C=None):
if C is None:
C = []
self.C = Polynomial.trim(C)
@property
def deg(self):
return len(self.C) - 1
def prime(self): return Polynomial([i * self[i]
for i in range(1, len(self))])
def eval(self, x):
if not self:
return 0
v = self[-1]
for c in self[-2::-1]:
v = v * x + c
return v
def shift(self, d): return Polynomial(
[0 * self[0]] * d + self.C if self else [])
def __eq__(P, Q):
return P.deg == Q.deg and all(cP == cQ for cP, cQ in zip(P, Q))
def __hash__(self):
return hash(tuple(self.C))
def __call__(self, x): return Polynomial.eval(self, x)
def __getitem__(self, x): return self.C[x]
def __neg__(P): return Polynomial([-c for c in P.C])
def __add__(P, Q):
if len(P.C) < len(Q.C):
P, Q = Q, P
return Polynomial([P[d] + Q[d] for d in range(len(Q))] + P[len(Q):])
def __sub__(P, Q): return P + (-Q)
def _mulpoly(P, Q):
assert isinstance(Q, Polynomial)
return Polynomial([sum(P[k] * Q[d - k]
for k in range(max(0, d + 1 - len(Q)),
min(d + 1, len(P)))
) for d in range(len(P) + len(Q) - 1)])
def _mulscal(P, k):
return Polynomial([k * c for c in P])
def __mul__(P, Q):
if isinstance(Q, Polynomial):
return P._mulpoly(Q)
return P._mulscal(Q)
def __rmul__(P, Q):
return P * Q
def __pow__(P, k):
assert isinstance(k, int)
V = 1
A = P
while k:
if k & 1:
V *= A
k >>= 1
if not k:
break
A *= A
return V
def __iter__(self):
yield from self.C
def euclidean_division(A, B):
Q = [0 * B[0]] * max(0, len(A) - len(B) + 1)
while len(A.C) >= len(B.C):
Q[len(A.C) - len(B.C)] = A[-1] / B[-1]
A -= B.shift(len(A) - len(B)) * (A[-1] / B[-1])
return Polynomial(Q), A
def __floordiv__(A, B):
assert isinstance(B, Polynomial)
return A.euclidean_division(B)[0]
def __mod__(A, B):
"""
Polynomial euclidian division
or modular reduction
"""
if isinstance(B, Polynomial):
return A.euclidean_division(B)[1]
else:
assert isinstance(B, int)
assert all(isinstance(c, int) for c in A)
return A.reduceP(B)
def __lt__(A, B): return A.deg < B.deg
def __bool__(self): return bool(self.C)
def gcd(A, B):
while B:
A, B = B, A % B
return A * (1 / A[-1])
@staticmethod
def gaussianElimKer(M, zero, one):
"""
Outputs an element of the kernel of M
zero and one are elements of the same field
"""
# V satisfies the invariant
# M = V M_0
V = [Polynomial([zero] * i + [one]) for i in range(len(M))]
pivots = [None] * (len(M) + 1)
for l in range(len(M)):
while M[l].deg >= 0:
idp = M[l].deg
if pivots[idp] is None:
pivots[idp] = l
break
else:
c = M[l][idp] / M[pivots[idp]][idp]
M[l] -= c * M[pivots[idp]]
V[l] -= c * V[pivots[idp]]
else:
# If a line is null, we found an element of the kernel
return V[l]
return None
def computeQ(P):
# only for Z/pZ[X] square-free polynoms, for p prime
p = P[0].n
# We ignore the image of 1 because (F-Id)(1) = 0
M = [Polynomial(([ModInt(0, p)] * (i * p)) + [ModInt(1, p)]) % P
for i in range(1, P.deg)]
# M -= Id
for i in range(1, P.deg):
M[i - 1] -= Polynomial([ModInt(0, p)] * i + [ModInt(1, p)])
# We find an element of the kernel by Gaussian elimination
pQ = Polynomial.gaussianElimKer(M, ModInt(0, p), ModInt(1, p))
# We put back the 1 tha was removed
return pQ.shift(1) if pQ is not None else None
def factor_unit(P):
"""
Berlekamp's algorithm
only in Z/pZ
"""
assert all(isinstance(c, ModInt) for c in P)
assert len(set(c.n for c in P)) == 1
if P.deg == 1:
return defaultdict(int, {P: 1})
p = P[0].n
S = Polynomial.gcd(P, P.prime())
if S.deg == P.deg:
# P' = 0 so P = R^p
R = Polynomial(P.C[::p])
return defaultdict(int,
{D: p * v
for D, v in Polynomial.factor_unit(R).items()})
else:
factors = defaultdict(int)
if S.deg:
for D, v in S.factor_unit().items():
factors[D] += v
P //= S
# P is now square-free
# We look for Q in Ker(F-Id) \ {1}
Q = Polynomial.computeQ(P)
if Q is None:
# P is irreducible
factors[P] += 1
else:
# P is the product of the gcd(P, Q-i)
# that are factored recursively
for i in range(p):
D = Polynomial.gcd(P, Q - Polynomial([ModInt(i, p)]))
if D.deg:
for DD, v in D.factor_unit().items():
factors[DD] += v
return factors
def factor(P):
"""
Factorization of P
only in Z/pZ
"""
cd = P[-1]
if P.deg == 0:
return (cd, defaultdict(int))
P = P * (1 / cd)
return (cd, P.factor_unit())
@staticmethod
def ppfactors(fz):
c, Ds = fz
a = str(c) if not Ds or c * c != c else ''
l = [a] + [(str(D) if D.deg == 1 and not D[0] else ('(%s)' % D))
+ (v > 1) * ('^%s' % v)
for D, v in sorted(Ds.items(),
key=lambda e: (e[0].deg, e[1]))]
return '⋅'.join(i for i in l if i)
def reduceP(P, p):
return Polynomial([ModInt(c, p) for c in P])
@staticmethod
def sign_changes(l):
return sum(a * b < 0 for a, b in zip(l, l[1:]))
def isreal(P):
return not any(isinstance(c, ModInt) for c in P)
def isinteger(P):
return all(isinstance(c, int) for c in P)
def sturm(P):
"""
Number of distinct real roots
by Sturm's theorem.
Only works on int or float coefficients
"""
inf = float('inf')
assert P.isreal()
A = P
B = A.prime()
l1 = [A(-inf)]
l2 = [A(inf)]
while B:
l1.append(B(-inf))
l2.append(B(inf))
B, A = -A % B, B
return Polynomial.sign_changes(l1) - Polynomial.sign_changes(l2)
@property
def r1(P):
"""
Number of real roots with multiplicity
"""
assert P.isreal()
ans = 0
s = P.sturm()
while s:
ans += s
P = P.gcd(P.prime())
s = P.sturm()
return ans
@property
def r2(P):
ans = P.deg - P.r1
assert ans % 2 == 0
return ans // 2
def sylvester(P, Q):
"""
Sylvester's matrix
"""
assert P.isreal()
assert Q.isreal()
p = P.deg
q = Q.deg
P = np.array(P)
Q = np.array(Q)
m = np.zeros((p + q, p + q))
for i in range(q):
m[i][i:i + p + 1] = P
for i in range(p):
m[q + i][i:i + q + 1] = Q
return m
def resultant(P, Q):
"""
Resultant of two real polynomials
"""
return np.linalg.det(P.sylvester(Q))
@property
def disc(P):
"""
Discriminant of a real polynomial
"""
ans = P.resultant(P.prime()) / P[-1]
if P.isinteger():
ans = int(ans.round())
if P.deg % 4 in [0, 1]:
return ans
else:
return -ans
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self.C)
@staticmethod
def _formatmonomial(c, d):
assert c
a = b = ''
if c * c != c or not d:
a = str(c) + (d != 0) * '⋅'
if d > 1:
b = 'X^' + str(d)
elif d == 1:
b = 'X'
return a + b
def __str__(self):
if not self.C:
return "0"
ans = '+'.join(self._formatmonomial(c, d)
for (d, c) in reversed(list(enumerate(self))) if c)
return ans.replace("+-", "-").replace('-1⋅', '-')
| 3.421875 | 3 |
tests/test_prelude_tagblock.py | iwschris/ezodf2 | 4 | 12774026 | <reponame>iwschris/ezodf2
#!/usr/bin/env python
#coding:utf-8
# Purpose: test node organizer
# Created: 31.01.2011
# Copyright (C) 2011, <NAME>
# License: MIT
from __future__ import unicode_literals, print_function, division
__author__ = "mozman <<EMAIL>>"
import unittest
# test helpers
from mytesttools import create_node
# objects to test
from ezodf2.nodeorganizer import PreludeTagBlock
def getPTB(nodes, preludetags='abc'):
return PreludeTagBlock(create_node(nodes), preludetags)
class CounterAdapter:
def __init__(self, tags='abc'):
self.tags = list(tags)
def count(self, nodes):
prelude = PreludeTagBlock(create_node(nodes), self.tags)
return len(prelude)
class TestPreludeTagBlockLen(unittest.TestCase):
def setUp(self):
self.ptb = CounterAdapter(list('abc'))
def test_aabbcc(self):
self.assertEqual(self.ptb.count('aabbccddeeff'), 6)
def test_bbcc(self):
self.assertEqual(self.ptb.count('bbccddeeff'), 4)
def test_cc(self):
self.assertEqual(self.ptb.count('ccddeeff'), 2)
def test_only_aa(self):
self.assertEqual(self.ptb.count('aa'), 2)
def test_only_bb(self):
self.assertEqual(self.ptb.count('bb'), 2)
def test_only_cc(self):
self.assertEqual(self.ptb.count('cc'), 2)
def test_no_tags(self):
self.assertEqual(self.ptb.count(''), 0)
def test_no_matches(self):
self.assertEqual(self.ptb.count('ddeeff'), 0)
def test_invalid_prelude(self):
self.assertEqual(self.ptb.count('dadeeff'), 0)
class TestPreludeTagBlockBasics(unittest.TestCase):
def test_xmlnode_is_none_error(self):
with self.assertRaises(ValueError):
PreludeTagBlock(None, '')
def test_no_prelude_tags(self):
with self.assertRaises(ValueError):
PreludeTagBlock(create_node('abc'), '')
def test_unique_order_tags(self):
with self.assertRaises(ValueError):
PreludeTagBlock(create_node('abc'), 'abcc')
def test_all_tags_exist(self):
ptb = getPTB('aabbccghixyz')
self.assertEqual(len(ptb), 6)
def test_only_aa(self):
ptb = getPTB('aaghixyz')
self.assertEqual(len(ptb), 2)
def test_only_bb(self):
ptb = getPTB('bbghixyz')
self.assertEqual(len(ptb), 2)
def test_only_cc(self):
ptb = getPTB('ccghixyz')
self.assertEqual(len(ptb), 2)
def test_prelude_only_tree(self):
ptb = getPTB('aabbcc')
self.assertEqual(len(ptb), 6)
def test_without_prelude(self):
ptb = getPTB('ghixyz')
self.assertEqual(len(ptb), 0)
def test_empty_tree(self):
ptb = getPTB('')
self.assertEqual(len(ptb), 0)
def test_from_not_well_formed_tree(self):
ptb = getPTB('haabbccghixyz')
self.assertEqual(len(ptb), 0)
class TestPreludeTagBlockInfo(unittest.TestCase):
def test_tag_info_a(self):
ptb = getPTB('aabbccghixyz')
start_index, count = ptb.tag_info('a')
self.assertEqual((0, 2), (start_index, count))
def test_tag_info_b(self):
ptb = getPTB('aabbbccghixyz')
start_index, count = ptb.tag_info('b')
self.assertEqual((2, 3), (start_index, count))
def test_tag_info_for_not_existing_tag(self):
ptb = getPTB('aabbbghixyz')
start_index, count = ptb.tag_info('c')
self.assertEqual((-1, 0), (start_index, count))
def test_tag_info_invalid_tag_error(self):
ptb = getPTB('aabbbghixyz')
with self.assertRaises(ValueError):
ptb.tag_info('d')
def test_tag_info_tag_not_in_prelude_block(self):
ptb = getPTB('aabbbgccixyz')
start_index, count = ptb.tag_info('c')
self.assertEqual((-1, 0), (start_index, count))
class TestPreludeTagBlockInsertPositionBefore(unittest.TestCase):
def test_tag_error(self):
tb = getPTB('aabbccghixxyyzz')
with self.assertRaises(ValueError):
tb.insert_position_before('d')
def test_before_existing_tag(self):
tb = getPTB('aabbccghixxyyzz')
self.assertEqual(tb.insert_position_before('a'), 0)
self.assertEqual(tb.insert_position_before('b'), 2)
self.assertEqual(tb.insert_position_before('c'), 4)
def test_before_not_existing_tag(self):
tb = getPTB('aaccghixxyyzz')
self.assertEqual(tb.insert_position_before('a'), 0)
self.assertEqual(tb.insert_position_before('b'), 2)
self.assertEqual(tb.insert_position_before('c'), 2)
def test_without_prelude(self):
tb = getPTB('ghixxyyzz')
self.assertEqual(tb.insert_position_before('a'), 0)
self.assertEqual(tb.insert_position_before('b'), 0)
self.assertEqual(tb.insert_position_before('c'), 0)
def test_for_empty_node(self):
tb = getPTB('')
self.assertEqual(tb.insert_position_before('a'), 0)
self.assertEqual(tb.insert_position_before('b'), 0)
self.assertEqual(tb.insert_position_before('c'), 0)
class TestPreludeTagBlockInsertPositionAfter(unittest.TestCase):
def test_tag_error(self):
tb = getPTB('aabbccghixxyyzz')
with self.assertRaises(ValueError):
tb.insert_position_after('d')
def test_after_existing_tag(self):
tb = getPTB('aabbccghixxyyzz')
self.assertEqual(tb.insert_position_after('a'), 2)
self.assertEqual(tb.insert_position_after('b'), 4)
self.assertEqual(tb.insert_position_after('c'), 6)
def test_after_not_existing_tag(self):
tb = getPTB('aaccghixxyyzz')
self.assertEqual(tb.insert_position_after('a'), 2)
self.assertEqual(tb.insert_position_after('b'), 2)
self.assertEqual(tb.insert_position_after('c'), 4)
def test_without_prelude(self):
tb = getPTB('ghixxyyzz')
self.assertEqual(tb.insert_position_after('a'), 0)
self.assertEqual(tb.insert_position_after('b'), 0)
self.assertEqual(tb.insert_position_after('c'), 0)
def test_for_empty_node(self):
tb = getPTB('')
self.assertEqual(tb.insert_position_after('a'), 0)
self.assertEqual(tb.insert_position_after('b'), 0)
self.assertEqual(tb.insert_position_after('c'), 0)
def test_after_all_prelude_tags(self):
tb = getPTB('aabbccghixxyyzz')
self.assertEqual(tb.insert_position_after(), 6)
if __name__=='__main__':
unittest.main()
| 2.453125 | 2 |
clevr_dataloader.py | CatarauCorina/representation_learning | 0 | 12774027 | import os
import torch
import matplotlib.pyplot as plt
from torchvision import transforms
from torch.utils.data import Dataset
import cv2
from PIL import Image
class CustomDataSet(Dataset):
def __init__(self, main_dir, type='train', resolution=(128,128)):
self.main_dir = main_dir
self.root_dir = main_dir
self.img_dir = os.path.join(self.root_dir,"images")
self.work_img_dir = os.path.join(self.img_dir,type)
self.all_imgs = os.listdir(self.work_img_dir)
self.type = type
self.compose = transforms.Compose(
[transforms.Resize(resolution),
transforms.Grayscale(),
transforms.PILToTensor()]
)
def __len__(self):
return len(self.all_imgs)
def __getitem__(self, idx):
img_name = f'CLEVR_{self.type}_{str(idx).zfill(6)}'
dir = f'{self.work_img_dir}\\{img_name}.png'
img = Image.open(dir)
img = img.convert('RGB')
tensor_image = self.compose(img)
#tensor_image = ((tensor_image / 255.0) - 0.5) * 2.0 # Rescale to [-1, 1].
# tensor_image = self.compose(image)
return torch.tensor(tensor_image, dtype=torch.float32)
# def main():
# root_dir = os.path.join(os.path.dirname(os.getcwd()),"CLEVR_v1.0\\CLEVR_v1.0\\")
# df = CustomDataSet(root_dir)
# img = df[4]
# plt.imshow(img.permute(1, 2, 0))
# plt.show()
#
# if __name__ == '__main__':
# main() | 2.84375 | 3 |
examples/sentiment_analysis/sentiment_analysis.py | ruanchaves/word_segmentation | 1 | 12774028 | <reponame>ruanchaves/word_segmentation<gh_stars>1-10
import json
from dataclasses import dataclass, field
import logging
import os
import sys
from torch import nn
import torch
from contextlib import suppress
from pythonjsonlogger import jsonlogger
import datasets
from datasets import (
load_dataset,
load_metric
)
import transformers
from transformers import (
AutoTokenizer,
AutoModelForSequenceClassification,
HfArgumentParser
)
from word_segmentation import (
WordSegmenter,
DataArguments,
TextClassificationArguments,
WordSegmenterArguments
)
import copy
import functools
logger = logging.getLogger(__name__)
def rsetattr(obj, attr, val):
pre, _, post = attr.rpartition('.')
return setattr(rgetattr(obj, pre) if pre else obj, post, val)
def rgetattr(obj, attr, *args):
def _getattr(obj, attr):
return getattr(obj, attr, *args)
return functools.reduce(_getattr, [obj] + attr.split('.'))
def process_rows(
batch,
model=None,
tokenizer=None,
content_field="content",
predictions_field="predictions",
max_length=512,
device="cuda",
return_shape=False):
sentences = batch[content_field]
tokens = tokenizer(
sentences,
padding=True,
truncation=True,
return_tensors="pt",
max_length=max_length
)
tokens = tokens.to(device)
logits = model(**tokens).logits
logits = logits.to("cpu")
softmax_logits = torch.softmax(logits, dim=1)
if return_shape:
return logits.shape[1]
_, preds = torch.max(softmax_logits, 1)
preds = preds.tolist()
if logits.shape[1] == 2:
for idx, item in enumerate(preds):
if item == 1:
preds[idx] = 2
preds = [ str(x) for x in preds ]
batch.update({predictions_field: preds})
return batch
def eval_dataset(
data,
split="test",
reference_field="polarity",
predictions_field="predictions",
metric="./sentiment_metrics.py"):
predictions = data[split][predictions_field]
references = data[split][reference_field]
metric = load_metric(metric)
eval_results = metric.compute(
predictions=predictions,
references=references
)
return eval_results
def deleteEncodingLayers(model, num_layers_to_keep): # must pass in the full bert model
model_type = rgetattr(model, "config.model_type")
oldModuleList = None
keys = ['encoder', 'transformer']
for item in keys:
with suppress(AttributeError):
oldModuleList = rgetattr(model, f"{model_type}.{item}.layer")
if not oldModuleList:
raise NotImplementedError
newModuleList = nn.ModuleList()
# Now iterate over all layers, only keepign only the relevant layers.
for i in range(0, num_layers_to_keep):
newModuleList.append(oldModuleList[i])
# create a copy of the model, modify it with the new list, and return
copyOfModel = copy.deepcopy(model)
for item in keys:
with suppress(AttributeError):
rsetattr(copyOfModel, f"{model_type}.{item}.layer", newModuleList)
return copyOfModel
def main():
parser = HfArgumentParser((TextClassificationArguments, WordSegmenterArguments, DataArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
class_args, ws_args, data_args = \
parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
class_args, ws_args, data_args = \
parser.parse_args_into_dataclasses()
logHandler = logging.StreamHandler(sys.stdout)
formatter = jsonlogger.JsonFormatter()
logHandler.setFormatter(formatter)
logger.addHandler(logHandler)
# Setup logging
# logging.basicConfig(
# format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
# datefmt="%m/%d/%Y %H:%M:%S",
# handlers=[logging.StreamHandler(sys.stdout)],
# )
logger.setLevel(data_args.log_level)
datasets.utils.logging.set_verbosity(data_args.log_level)
transformers.utils.logging.set_verbosity(data_args.log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
if ws_args.hashtag_dict_load_path:
with open(ws_args.hashtag_dict_load_path, 'r') as f:
main_hashtag_dict = json.load(f)
else:
main_hashtag_dict = {}
if ws_args.run_segmenter:
ws = WordSegmenter(
segmenter_model_name_or_path=ws_args.segmenter_model_name_or_path,
segmenter_model_type=ws_args.segmenter_model_type,
segmenter_device=ws_args.segmenter_device,
segmenter_gpu_batch_size=ws_args.segmenter_gpu_batch_size,
reranker_model_name_or_path=ws_args.reranker_model_name_or_path,
reranker_model_type=ws_args.reranker_model_type,
spacy_model=ws_args.spacy_model
)
else:
ws = None
if data_args.dataset_load_path:
logger.info("Loading dataset from disk.")
data = datasets.load_from_disk(data_args.dataset_load_path)
else:
logger.info("Loading dataset with reader.")
data = load_dataset(data_args.dataset_reader, url=data_args.dataset_url)
if data_args.sample:
data[data_args.split] = data[data_args.split]\
.select([i for i in range(0, data_args.sample)])
split_length = data[data_args.split].shape[0]
def segment_content(
batch,
segmenter=None,
content_field="content",
segmented_content_field="segmented_content",
topk=20,
steps=13,
alpha=0.222,
beta=0.111,
use_reranker=True,
dictionary=None):
sentences = batch[content_field]
segmented_content, hashtag_dict = segmenter.process_hashtags(
sentences,
topk=topk,
steps=steps,
alpha=alpha,
beta=beta,
use_reranker=use_reranker,
dictionary=dictionary)
segmented_content = [ " ".join(x) for x in segmented_content]
batch.update({segmented_content_field: segmented_content})
main_hashtag_dict.update(hashtag_dict)
return batch
if ws_args.run_segmenter:
data = data.map(
segment_content,
fn_kwargs={
"segmenter": ws,
"content_field": data_args.content_field,
"segmented_content_field": data_args.segmented_content_field,
"topk": ws_args.topk,
"steps": ws_args.steps,
"alpha": ws_args.alpha,
"beta": ws_args.beta
},
batched=True,
batch_size=split_length
)
if class_args.run_classifier:
original_model = AutoModelForSequenceClassification.from_pretrained(class_args.sentiment_model)
if class_args.prune_layers:
model_range = len(class_args.prune_layers)
else:
model_range = 1
tokenizer = AutoTokenizer.from_pretrained(class_args.sentiment_model)
for idx in range(0, model_range):
if class_args.prune_layers:
logger.info(f"Processing predictions for layer {class_args.prune_layers[idx]}.")
model = deleteEncodingLayers(original_model, class_args.prune_layers[idx])
else:
model = original_model
model.to(class_args.sentiment_model_device)
num_labels = process_rows(
{"content": ["hello world"]},
model=model,
tokenizer=tokenizer,
content_field="content",
return_shape=True
)
if num_labels == 2:
data = data.filter(lambda x: x[data_args.label_field]!="1")
elif num_labels == 3:
pass
else:
raise NotImplementedError
if class_args.run_classifier and data_args.predictions_field:
logger.info("Writing predictions on the dataset.")
data = data.map(
process_rows,
fn_kwargs={
"model": model,
"tokenizer": tokenizer,
"content_field": data_args.content_field,
"predictions_field": data_args.predictions_field
},
batched=True,
batch_size=class_args.batch_size,
keep_in_memory=True)
if class_args.run_classifier and data_args.segmented_predictions_field:
logger.info("Writing processed predictions on the dataset.")
data = data.map(
process_rows,
fn_kwargs={
"model": model,
"tokenizer": tokenizer,
"content_field": data_args.segmented_content_field,
"predictions_field": data_args.segmented_predictions_field
},
batched=True,
batch_size=class_args.batch_size,
keep_in_memory=True)
if data_args.do_eval:
data_subset = data.filter(lambda x: x['has_hashtag'])
dataset_evaluation_params = {
"split": data_args.split,
"reference_field": data_args.label_field,
"metric": class_args.metrics
}
if data_args.predictions_field:
subset_evaluation = eval_dataset(
data_subset,
predictions_field=data_args.predictions_field,
**dataset_evaluation_params)
subset_evaluation.update({
"eval": "subset_evaluation"
})
full_evaluation = eval_dataset(
data,
predictions_field=data_args.predictions_field,
**dataset_evaluation_params)
full_evaluation.update({
"eval": "full_evaluation"
})
if data_args.segmented_predictions_field:
subset_evaluation_after_segmentation = eval_dataset(
data_subset,
predictions_field=data_args.segmented_predictions_field,
**dataset_evaluation_params)
subset_evaluation_after_segmentation.update({
"eval": "subset_evaluation_after_segmentation"
})
full_evaluation_after_segmentation = eval_dataset(
data,
predictions_field=data_args.segmented_predictions_field,
**dataset_evaluation_params)
full_evaluation_after_segmentation.update({
"eval": "full_evaluation_after_segmentation"
})
log_args = {}
for item in [class_args, ws_args, data_args]:
log_args.update(vars(item))
for item in [
full_evaluation,
subset_evaluation,
subset_evaluation_after_segmentation,
full_evaluation_after_segmentation]:
item.update(log_args)
if class_args.prune_layers:
item.update({"current_layer": class_args.prune_layers[idx]})
logger.info(item)
if data_args.dataset_save_path:
data.save_to_disk(data_args.dataset_save_path)
if ws_args.hashtag_dict_save_path:
with open(ws_args.hashtag_dict_save_path, 'w') as f:
json.dump(main_hashtag_dict, f)
if __name__ == '__main__':
main() | 2.203125 | 2 |
official/projects/edgetpu/vision/configs/mobilenet_edgetpu_config.py | 62theories/tf-flask | 82,518 | 12774029 | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=line-too-long
# type: ignore
"""Configuration definitions for MobilenetEdgeTPU losses, learning rates, optimizers, and training."""
import dataclasses
import os
from typing import Any, Mapping, Optional
# Import libraries
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import optimization
from official.vision.beta.configs import common
from official.vision.beta.configs import image_classification as base_config
@dataclasses.dataclass
class MobilenetEdgeTPUModelConfig(base_config.ImageClassificationModel):
"""Configuration for the MobilenetEdgeTPU model.
Attributes:
name: The name of the model. Defaults to 'MobilenetEdgeTPU'.
model_params: A dictionary that represents the parameters of the
EfficientNet model. These will be passed in to the "from_name" function.
"""
model_params: Mapping[str, Any] = dataclasses.field(
default_factory=lambda: { # pylint: disable=g-long-lambda
'model_name': 'mobilenet_edgetpu_v2_xs',
'model_weights_path': '',
'checkpoint_format': 'tf_checkpoint',
'overrides': {
'batch_norm': 'tpu',
'num_classes': 1001,
'rescale_input': False,
'dtype': 'bfloat16'
}
})
@dataclasses.dataclass
class MobilenetEdgeTPUTaskConfig(base_config.ImageClassificationTask):
"""Task defination for MobileNetEdgeTPU.
Attributes:
model: A `ModelConfig` instance.
saved_model_path: Instead of initializing a model from the model config,
the model can be loaded from a file path.
"""
model: MobilenetEdgeTPUModelConfig = MobilenetEdgeTPUModelConfig()
saved_model_path: Optional[str] = None
IMAGENET_TRAIN_EXAMPLES = 1281167
IMAGENET_VAL_EXAMPLES = 50000
IMAGENET_INPUT_PATH_BASE = 'imagenet-2012-tfrecord'
def mobilenet_edgetpu_base_experiment_config(
model_name: str) -> cfg.ExperimentConfig:
"""Image classification on imagenet with mobilenet_edgetpu.
Experiment config common across all mobilenet_edgetpu variants.
Args:
model_name: Name of the mobilenet_edgetpu model variant
Returns:
ExperimentConfig
"""
train_batch_size = 4096
eval_batch_size = 4096
steps_per_epoch = IMAGENET_TRAIN_EXAMPLES // train_batch_size
mobilenet_edgetpu_config = MobilenetEdgeTPUModelConfig(
num_classes=1001, input_size=[224, 224, 3])
mobilenet_edgetpu_config.model_params.model_name = model_name
config = cfg.ExperimentConfig(
task=MobilenetEdgeTPUTaskConfig(
model=mobilenet_edgetpu_config,
losses=base_config.Losses(label_smoothing=0.1),
train_data=base_config.DataConfig(
input_path=os.path.join(IMAGENET_INPUT_PATH_BASE, 'train*'),
is_training=True,
global_batch_size=train_batch_size,
dtype='bfloat16',
aug_type=common.Augmentation(type='autoaug')),
validation_data=base_config.DataConfig(
input_path=os.path.join(IMAGENET_INPUT_PATH_BASE, 'valid*'),
is_training=False,
dtype='bfloat16',
drop_remainder=False,
global_batch_size=eval_batch_size)),
trainer=cfg.TrainerConfig(
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch * 5,
max_to_keep=10,
train_steps=550 * steps_per_epoch,
validation_steps=IMAGENET_VAL_EXAMPLES // eval_batch_size,
validation_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'rmsprop',
'rmsprop': {
'rho': 0.9,
'momentum': 0.9,
'epsilon': 0.001,
}
},
'ema': {
'average_decay': 0.99,
'trainable_weights_only': False,
},
'learning_rate': {
'type': 'exponential',
'exponential': {
'initial_learning_rate':
0.008 * (train_batch_size // 128),
'decay_steps':
int(2.4 * steps_per_epoch),
'decay_rate':
0.97,
'staircase':
True
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 5 * steps_per_epoch,
'warmup_learning_rate': 0
}
},
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
# Registration for MobileNet-EdgeTPU-Search models.
# When this config is used, users need to specify the saved model path via
# --params_override=task.saved_model_path='your/saved_model/path/'.
@exp_factory.register_config_factory('mobilenet_edgetpu_search')
def mobilenet_edgetpu_search() -> cfg.ExperimentConfig:
return mobilenet_edgetpu_base_experiment_config('mobilenet_edgetpu_search')
# Registration for MobileNet-EdgeTPU-V2 models.
@exp_factory.register_config_factory('mobilenet_edgetpu_v2_tiny')
def mobilenet_edgetpu_v2_tiny() -> cfg.ExperimentConfig:
return mobilenet_edgetpu_base_experiment_config('mobilenet_edgetpu_v2_tiny')
# Registration for MobileNet-EdgeTPU-V2 models.
@exp_factory.register_config_factory('mobilenet_edgetpu_v2_xs')
def mobilenet_edgetpu_v2_xs() -> cfg.ExperimentConfig:
return mobilenet_edgetpu_base_experiment_config('mobilenet_edgetpu_v2_xs')
@exp_factory.register_config_factory('mobilenet_edgetpu_v2_s')
def mobilenet_edgetpu_v2_s() -> cfg.ExperimentConfig:
return mobilenet_edgetpu_base_experiment_config('mobilenet_edgetpu_v2_s')
@exp_factory.register_config_factory('mobilenet_edgetpu_v2_m')
def mobilenet_edgetpu_v2_m() -> cfg.ExperimentConfig:
return mobilenet_edgetpu_base_experiment_config('mobilenet_edgetpu_v2_m')
@exp_factory.register_config_factory('mobilenet_edgetpu_v2_l')
def mobilenet_edgetpu_v2_l() -> cfg.ExperimentConfig:
return mobilenet_edgetpu_base_experiment_config('mobilenet_edgetpu_v2_l')
# Registration for MobileNet-EdgeTPU-V1 models.
@exp_factory.register_config_factory('mobilenet_edgetpu')
def mobilenet_edgetpu() -> cfg.ExperimentConfig:
return mobilenet_edgetpu_base_experiment_config('mobilenet_edgetpu')
# Registration for MobileNet-EdgeTPU-V1 models.
# We use 'depth_multiplier' to scale the models.
# E.g. dm1p25 implies depth multiplier of 1.25x
@exp_factory.register_config_factory('mobilenet_edgetpu_dm1p25')
def mobilenet_edgetpu_dm1p25() -> cfg.ExperimentConfig:
return mobilenet_edgetpu_base_experiment_config('mobilenet_edgetpu_dm1p25')
@exp_factory.register_config_factory('mobilenet_edgetpu_dm1p5')
def mobilenet_edgetpu_dm1p5() -> cfg.ExperimentConfig:
return mobilenet_edgetpu_base_experiment_config('mobilenet_edgetpu_dm1p5')
@exp_factory.register_config_factory('mobilenet_edgetpu_dm1p75')
def mobilenet_edgetpu_dm1p75() -> cfg.ExperimentConfig:
return mobilenet_edgetpu_base_experiment_config('mobilenet_edgetpu_dm1p75')
# Registration for AutoSeg-EdgeTPU backbones
@exp_factory.register_config_factory('autoseg_edgetpu_backbone_xs')
def autoseg_edgetpu_backbone_xs() -> cfg.ExperimentConfig:
return mobilenet_edgetpu_base_experiment_config('autoseg_edgetpu_backbone_xs')
@exp_factory.register_config_factory('autoseg_edgetpu_backbone_s')
def autoseg_edgetpu_backbone_s() -> cfg.ExperimentConfig:
return mobilenet_edgetpu_base_experiment_config('autoseg_edgetpu_backbone_s')
@exp_factory.register_config_factory('autoseg_edgetpu_backbone_m')
def autoseg_edgetpu_backbone_m() -> cfg.ExperimentConfig:
return mobilenet_edgetpu_base_experiment_config('autoseg_edgetpu_backbone_m')
| 1.828125 | 2 |
whales_model.py | avain/DeepLearningTutorial | 1 | 12774030 | <reponame>avain/DeepLearningTutorial
from keras.models import Sequential
from keras.layers import Flatten,Dense,Conv2D,MaxPooling2D
from keras.engine import Layer
import keras.backend as K
def my_ConvNet(input_shape):
model = Sequential()
#conv1
model.add(Conv2D(filters=96, kernel_size=(11, 11),
strides=2,
padding='SAME',
input_shape=input_shape,
activation='relu'
)
)
#pooling1
model.add( MaxPooling2D(pool_size=2,strides=2)
)
#conv2
model.add(Conv2D(filters=256, kernel_size=5,
strides=2,
padding='SAME',
activation='relu'
)
)
#pooling2
model.add( MaxPooling2D(pool_size=2,strides=2)
)
#conv3
model.add(Conv2D(filters=256, kernel_size=3,
strides=2,
padding='SAME',
activation='relu'
)
)
#conv4
model.add(Conv2D(filters=384, kernel_size=3,
strides=1,
padding='SAME',
activation='relu'
)
)
#conv5
model.add(Conv2D(filters=384, kernel_size=3,
strides=1,
padding='SAME',
activation='relu'
)
)
#pooling3
model.add( MaxPooling2D(pool_size=2,strides=2)
)
return model
class SoftmaxMap(Layer):
# Init function
def __init__(self, axis=-1, **kwargs):
self.axis = axis
super(SoftmaxMap, self).__init__(**kwargs)
# There's no parameter, so we don't need this one
def build(self,input_shape):
pass
# This is the layer we're interested in:
# very similar to the regular softmax but note the additional
# that we accept x.shape == (batch_size, w, h, n_classes)
# which is not the case in Keras by default.
def call(self, x, mask=None):
enum = K.exp(x - K.max(x, axis=self.axis, keepdims=True))
denom = K.sum(enum, axis=self.axis, keepdims=True)
return enum / denom
# The output shape is the same as the input shape
def compute_output_shape(self, input_shape):
return (input_shape)
| 3.390625 | 3 |
server.py | RobinKarlsson/protobuf-chat | 1 | 12774031 | <reponame>RobinKarlsson/protobuf-chat<filename>server.py<gh_stars>1-10
import protobuf
import select, socket, struct
class ChatServer:
def __init__(self, host = "", port = 8942):
self.port = port
self.host = host
#inet streaming server socket
self.ssocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#bind to host and port
self.ssocket.bind((self.host, self.port))
#allow to que up to 5 connection requests
self.ssocket.listen(5)
#list to store connected sockets
self.connections = [self.ssocket]
#dictionary to store basic data on connected peers
self.peers = {}
self.startServer()
'''
run server
'''
def startServer(self):
print "server started on port %i" %self.port
while True:
#read socket, write socket, exception socket
rsocket, wsocket, xsocket = select.select(self.connections, [], [], 0)
for sock in rsocket:
#new connection request
if sock == self.ssocket:
self.newConnection(sock)
else:
#data received
try:
self.dataReceived(sock)
#disconnected user
except struct.error:
self.closeConnection(sock)
'''
process a connection request
'''
def newConnection(self, sock):
#accept connection
connsocket, address = sock.accept()
#let the client identify itself
user = self.receiveDataSet(connsocket)
user = protobuf.getUsr(user)
#if username is already taken
if user.nick in [usr.nick for usr in self.peers.values()]:
#connection denied
self.sendDataSet(connsocket, [protobuf.newMsg("n")])
#close connection
connsocket.shutdown(socket.SHUT_RDWR)
connsocket.close()
return
#connection confirmed
self.sendDataSet(connsocket, [protobuf.newMsg("y")])
#store connected client in connections
self.connections.append(connsocket)
#dictionary with some info on the users
self.peers[connsocket] = user
#notify peers
print address[0] + " connected"
self.transmit(user.channel, protobuf.newMsg("server > " + user.nick + " (" + address[0] + ") joined the chat"))
'''
close and remove a disconnected socket
'''
def closeConnection(self, sock):
#remove from list of connected sockets
self.connections.remove(sock)
#notify peers
print sock.getsockname()[0] + " disconnected"
self.transmit(self.peers[sock].channel, protobuf.newMsg("server > " + self.peers[sock].nick + " (" + sock.getsockname()[0] + ") left the chat"))
#remove from dict of users
del self.peers[sock]
#ensure socket gets closed
sock.close()
'''
Read a incoming transmision and send it to all connected sockets on same channel
'''
def dataReceived(self, sock):
#receive data
msg = self.receiveDataSet(sock)
msg = protobuf.getMsg(msg)
#send data to peers in same channel
if msg != "":
msg = self.peers[sock].nick + " > " + msg.text
self.transmit(self.peers[sock].channel, protobuf.newMsg(msg))
'''
Receive a data set from server
a data set consist of two transmissions.
- struct, length of message to be sent
- string, message
Input:
sock: socket to receive from
Return: protocol buffer message
'''
def receiveDataSet(self, sock):
#receive a int struct specifying length of message
length = sock.recv(4)
length = struct.unpack("I", length)[0]
#receive message
data = sock.recv(length)
return data
'''
Send data sets to client
a data set consist of two transmissions.
- struct, length of message to be sent
- string, message
Input:
sock: socket to transmit on
data: List of protocol buffer message's to be transmitted
'''
def sendDataSet(self, sock, data):
#send elements in data to socket
for el in data:
#serialize to string
el = el.SerializeToString()
#send int struct specifying length of message
sock.send(struct.pack("I", len(el)))
#send message
sock.send(el)
'''
Transmit a message to all connected user on a channel
input:
channel: channel to send on
msg protocol buffer message to send
'''
def transmit(self, channel, msg):
for sock in self.connections:
if sock != self.ssocket and self.peers[sock].channel == channel:
self.sendDataSet(sock, [msg])
if __name__ == "__main__":
ChatServer()
| 2.90625 | 3 |
caldera/app/commands/command.py | m4l1c3/caldera | 3 | 12774032 | <reponame>m4l1c3/caldera
from typing import Union, List
class CommandLine(object):
def __init__(self, command_line: Union[str, List[str]] = None):
if command_line and isinstance(command_line, list):
command_line = ' '.join(command_line)
self.command_line = command_line
| 2.734375 | 3 |
tensorflow_impl/rsrcs/aggregator_tf/average.py | sahareslami/Garfield | 8 | 12774033 | import numpy as np
class Average:
@staticmethod
def aggregate(gradients):
assert len(gradients) > 0, "Empty list of gradient to aggregate"
if len(gradients) > 1:
return np.mean(gradients, axis=0)
else:
return gradients[0]
| 3.265625 | 3 |
FbxPipeline/generated/apemodefb/EAnimCurvePropertyFb.py | johnfredcee/FbxPipeline | 72 | 12774034 | # automatically generated by the FlatBuffers compiler, do not modify
# namespace: apemodefb
class EAnimCurvePropertyFb(object):
LclTranslation = 0
RotationOffset = 1
RotationPivot = 2
PreRotation = 3
PostRotation = 4
LclRotation = 5
ScalingOffset = 6
ScalingPivot = 7
LclScaling = 8
GeometricTranslation = 9
GeometricRotation = 10
GeometricScaling = 11
| 1.179688 | 1 |
mscv/image/__init__.py | misads/mscv | 1 | 12774035 | <reponame>misads/mscv<filename>mscv/image/__init__.py
from .image_io import tensor2im
__all__ = ['tensor2im']
| 1.1875 | 1 |
setup.py | rotdrop/rhasspy-wake-precise-hermes | 0 | 12774036 | <filename>setup.py
"""Setup file for rhasspywake_precise_hermes"""
from pathlib import Path
import setuptools
this_dir = Path(__file__).parent
with open(this_dir / "README.md") as readme_file:
long_description = readme_file.read()
with open(this_dir / "requirements.txt") as requirements_file:
requirements = requirements_file.read().splitlines()
with open(this_dir / "VERSION") as version_file:
version = version_file.read().strip()
module_dir = this_dir / "rhasspywake_precise_hermes"
model_dir = module_dir / "models"
model_files = [str(f.relative_to(module_dir)) for f in model_dir.rglob("*")]
setuptools.setup(
name="rhasspy-wake-precise-hermes",
version=version,
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/rhasspy/rhasspy-wake-precise-hermes",
packages=setuptools.find_packages(),
package_data={"rhasspywake_precise_hermes": model_files + ["py.typed"]},
install_requires=requirements,
entry_points={
"console_scripts": [
"rhasspy-wake-precise-hermes = rhasspywake_precise_hermes.__main__:main"
]
},
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: MIT License",
],
long_description=long_description,
long_description_content_type="text/markdown",
python_requires=">=3.7",
)
| 1.835938 | 2 |
examples/example11.py | pyrate-build/pyrate-build | 41 | 12774037 | <gh_stars>10-100
#!/usr/bin/env pyrate
executable('example11.bin', 'test.cpp test.c foo.cpp', link_mode = 'direct') # automatic switching to 'single' mode
| 1.289063 | 1 |
src/main.py | chengkunli96/KinectFusion | 28 | 12774038 | <reponame>chengkunli96/KinectFusion<gh_stars>10-100
import matplotlib.pyplot as plt
from os.path import join as opj
import os
import json
import open3d as o3d
from data_loader import *
from kinect_fusion import *
from utils.pyrender_show import showMesh
from utils.open3d_show import showPointCloud
# for checking whether your defined volume is suitable
CHECK_VOLUME = True
LOGGING = True
if __name__ == '__main__':
# argument
config_dict = {
'camera_config_path': '../data/camera_config.txt',
'dataset_path': '../data/rgbd_dataset_freiburg2_xyz/',
'pose_estimation_method': 'icp method', # "Park's method",
'ICP': {
'sample_rate': 0.2,
'mode': 'frame2frame',
'threshold': 0.1,
},
'truncation_distance': 1,
'volume_resolution': 5,
'color_method': 'by volume',
}
# for saving experiment's results
out_dir = None
exp_out_dir = None
if LOGGING:
out_dir = '../output/'
if not os.path.exists(out_dir):
os.makedirs(out_dir)
# check the num of dirs of the out_dir
dirnum = 0
for lists in os.listdir(out_dir):
sub_path = opj(out_dir, lists)
if os.path.isdir(sub_path):
dirnum = dirnum + 1
# experiment
exp_out_dir = opj(out_dir, 'experiment_{:02d}'.format(dirnum))
if not os.path.exists(exp_out_dir):
os.makedirs(exp_out_dir)
# write down the experiment configuration
json_str = json.dumps(config_dict, indent=4)
with open(opj(exp_out_dir, 'configuration.json'), 'w') as json_file:
json_file.write(json_str)
camera = Camera(config_dict['camera_config_path'])
dataset = Dataset(config_dict['dataset_path'])
dataloader = DataLoader(dataset=dataset)
kinect = KinectFusion()
if config_dict['pose_estimation_method'] == 'icp method':
kinect.configPosEstimationMethod('icp')
kinect.configIcp(sample_rate=config_dict['ICP']['sample_rate'],
pose_estimation_mode=config_dict['ICP']['mode'],
threshold=config_dict['ICP']['threshold'])
else:
kinect.configPosEstimationMethod('Park')
kinect.configTsdf(truncation_distance=config_dict['truncation_distance'])
kinect.configVolume(scale=(30, 30, 30),
shape=(config_dict['volume_resolution'],
config_dict['volume_resolution'],
config_dict['volume_resolution']),
origin=(-15, -17.5, 5))
kinect.configMeshColorComputingMethod(config_dict['color_method'])
if CHECK_VOLUME:
img = dataloader[0]
frame = Frame(img['depth'], img['rgb'], camera.parameter)
kinect.checkPositionOfVolumeAndFrame(frame)
plt.show()
for (i, img) in enumerate(dataloader):
n = 0 # start from which frame
if i > n:
depth_img = img['depth'] # the raw depth image from Kinect device which is not scaled
rgb_img = img['rgb']
frame = Frame(depth_img, rgb_img, camera.parameter)
kinect.run(frame)
if LOGGING:
# saving for each iteration
pcd = kinect.getPointClould()
o3d.io.write_point_cloud(opj(exp_out_dir, 'pointcloud_frame{:02d}.pcd'.format(i + 1)), pcd)
mesh = kinect.getMesh()
mesh.export(opj(exp_out_dir, 'mesh_frame{:02d}.obj'.format(i + 1)))
if i - n >= 20:
# show the result
pcd = kinect.getPointClould(sample_rate=0.2)
showPointCloud(vertices=pcd.points, colors=pcd.colors)
mesh = kinect.getMesh()
showMesh(mesh)
| 2.15625 | 2 |
jug/tests/jugfiles/custom_hash_function.py | dombrno/jug | 309 | 12774039 | <gh_stars>100-1000
from jug import TaskGenerator
from jug.utils import CustomHash
hash_called = 0
def bad_hash(x):
global hash_called
hash_called += 1
return ('%s' % x).encode('utf-8')
@TaskGenerator
def double(x):
return 2*x
one = CustomHash(1, bad_hash)
two = double(one)
| 2.484375 | 2 |
recipes/wav2vec_collect.py | sciforce/phones-las | 35 | 12774040 | <filename>recipes/wav2vec_collect.py
from tqdm import tqdm
import h5py
import os
import argparse
import numpy as np
import tensorflow as tf
from preprocess_all import make_example
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, help='directory with generated h5context files')
parser.add_argument('--input_file', type=str, help='csv file generated by recipe')
parser.add_argument('--replace_dir', type=str, help='directory int csv to replace to get h5context files paths')
parser.add_argument('--output_file', type=str, help='TF record to write results to')
args = parser.parse_args()
with open(args.input_file, 'r') as f:
with tf.io.TFRecordWriter(args.output_file) as writer:
for line in tqdm(f):
p, _, phones = line.strip().split(',')
h5_p = p.replace(args.replace_dir, args.data_dir).replace('.wav', '.h5context')
with h5py.File(os.path.join(args.data_dir, h5_p), 'r') as h5_f:
shape = np.array(h5_f['info'])
features = np.array(h5_f['features']).reshape([int(shape[1]), int(shape[2])])
phones = list(phones)
writer.write(make_example(features, phones).SerializeToString())
| 2.59375 | 3 |
pegasusio/nanostring_data.py | hoondy/pegasusio | 0 | 12774041 | <reponame>hoondy/pegasusio<filename>pegasusio/nanostring_data.py
import numpy as np
import pandas as pd
from scipy.sparse import csr_matrix
from typing import List, Dict, Union
import logging
logger = logging.getLogger(__name__)
import anndata
from pegasusio import UnimodalData
from .views import INDEX, _parse_index, UnimodalDataView
class NanostringData(UnimodalData):
_matrix_keywords = ["RawData", "Q3Norm", "HKNorm", "LogMatrix"] # all in dense format, np.ndarray
def __init__(
self,
barcode_metadata: Union[dict, pd.DataFrame],
feature_metadata: Union[dict, pd.DataFrame],
matrices: Dict[str, np.ndarray],
metadata: dict,
barcode_multiarrays: Dict[str, np.ndarray] = None,
feature_multiarrays: Dict[str, np.ndarray] = None,
barcode_multigraphs: Dict[str, csr_matrix] = None,
feature_multigraphs: Dict[str, csr_matrix] = None,
cur_matrix: str = "Q3Norm",
) -> None:
assert metadata["modality"] == "nanostring"
super().__init__(barcode_metadata, feature_metadata, matrices, metadata, barcode_multiarrays, feature_multiarrays, barcode_multigraphs, feature_multigraphs, cur_matrix)
def from_anndata(self, data: anndata.AnnData, genome: str = None, modality: str = None) -> None:
raise ValueError("Cannot convert an AnnData object to a NanostringData object!")
def to_anndata(self) -> anndata.AnnData:
raise ValueError("Cannot convert a NanostringData object ot an AnnData object!")
def __getitem__(self, index: INDEX) -> UnimodalDataView:
barcode_index, feature_index = _parse_index(self, index)
return UnimodalDataView(self, barcode_index, feature_index, self._cur_matrix, obj_name = "NanostringData")
def norm_hk(self, select: bool = True) -> None:
""" Normalize raw protein data using house keeping (HK) genes.
See https://genomebiology.biomedcentral.com/articles/10.1186/gb-2002-3-7-research0034 for geometric mean normalization.
Parameters
----------
select: ``bool``, optional, default: ``True``
If True, select the normalized matrix as the major matrix (X).
Returns
-------
``None``
Update ``self.matrices``:
* ``self.matrices['HKNorm']``: np.ndarray.
Examples
--------
>>> nanostring_data.norm_hk()
"""
from scipy.stats.mstats import gmean
gms = gmean(self.obsm["controls"], axis = 1)
norm_factors = gmean(gms) / gms
self.matrices["HKNorm"] = self.matrices["RawData"] * norm_factors.reshape(-1, 1)
self.obs["norm_factor"] = norm_factors
if select:
self._cur_matrix = "HKNorm"
def log_transform(self, select: bool = True) -> None:
"""Conduct log transformation on the selected matrix: log(x + 1). Selected matrix can be either Q3Norm or HKNorm
Add log-transformed matrix 'LogMatrix'.
Parameters
----------
select: ``bool``, optional, default: ``True``
If True, select the transformed matrix as the major matrix (X).
Returns
-------
``None``
Update ``self.matrices``:
* ``self.matrices['LogMatrix']``: np.ndarray.
Examples
--------
>>> nanostring_data.log_norm()
"""
if self.current_matrix() not in ["Q3Norm", "HKNorm"]:
raise ValueError("Either Q3Norm or HKNorm matrix must be selected in order to run the 'log_norm' function!")
self.matrices["LogMatrix"] = np.log1p(self.X)
if select:
self._cur_matrix = "LogMatrix"
| 2.234375 | 2 |
setup_test.py | nahidupa/grr | 1 | 12774042 | #!/usr/bin/env python
"""A quick script to verify that setup.py actually installs all files."""
# pylint: disable=g-import-not-at-top
import os
import re
try:
import setuptools
setuptools.setup = lambda *args, **kw: None
except ImportError:
from distutils import core
core.setup = lambda *args, **kw: None
import setup
grr_packages = setup.GRRFindPackages()
setup_files = set()
walk_files = set()
for package, files in setup.GRRFindDataFiles(setup.grr_all_files).iteritems():
package_path = package.replace(".", "/").replace("grr", "")
if package_path.startswith("/"):
package_path = package_path[1:]
for f in files:
file_path = os.path.join(package_path, f)
setup_files.add(file_path)
for directory, _, files in os.walk("."):
directory = directory.replace("./", "")
if directory == ".":
package = "grr"
else:
package = "grr." + directory.replace("/", ".")
for f in files:
if package in grr_packages and f.endswith(".py"):
continue
file_path = os.path.join(directory, f)
walk_files.add(file_path)
whitelist = ["test_data/.*\\.py"]
for filename in sorted(setup_files - walk_files):
if any([re.match(regex, filename) for regex in whitelist]):
continue
print "File found by setup.py but not by os.walk:", filename
whitelist = [
# For building the server. Those files should probably be copied since we
# have them in the .deb too but it's not possible to build from an
# installed server anyways so we ignore them.
"config/.*",
# Just test keys, don't overwrite anything.
"keys/.*",
# Those go in /usr/share/grr, not along with the code.
"binaries/.*",
"executables/.*",
"scripts/.*",
# Metadata.
"./AUTHORS",
"./LICENSE",
"./README",
"./.*pyc",
]
for filename in sorted(walk_files - setup_files):
if any([re.match(regex, filename) for regex in whitelist]):
continue
print "File found by os.walk but not by setup.py:", filename
| 2.265625 | 2 |
vbb_backend/users/migrations/0009_auto_20210320_1800.py | patrickb42/backend-vbb-portal | 3 | 12774043 | <gh_stars>1-10
# Generated by Django 3.0.10 on 2021-03-20 18:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0008_auto_20210320_1759'),
]
operations = [
migrations.AlterField(
model_name='newslettersubscriber',
name='subscriber_type',
field=models.CharField(choices=[(100, 'STUDENT'), (200, 'MENTOR'), (300, 'TEACHER'), (400, 'DIRECTOR'), (500, 'ADVISOR'), (600, 'HEADMASTER')], default=20, max_length=254),
),
]
| 1.765625 | 2 |
src/t/__init__.py | danpalmer/t | 1 | 12774044 | from .cli import autodiscover, cli
def main():
autodiscover()
cli()
__all__ = (
"autodiscover",
"cli",
"main",
)
| 1.195313 | 1 |
lib/python3.8/site-packages/ansible/module_utils/facts/sysctl.py | cjsteel/python3-venv-ansible-2.10.5 | 4 | 12774045 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
def get_sysctl(module, prefixes):
sysctl_cmd = module.get_bin_path('sysctl')
cmd = [sysctl_cmd]
cmd.extend(prefixes)
rc, out, err = module.run_command(cmd)
if rc != 0:
return dict()
sysctl = dict()
for line in out.splitlines():
if not line:
continue
(key, value) = re.split(r'\s?=\s?|: ', line, maxsplit=1)
sysctl[key] = value.strip()
return sysctl
| 2.03125 | 2 |
stoa.py | petehague/stoa | 0 | 12774046 | #!/usr/bin/env python
import os
import sys
import re
params = {"port": 9000,
"target": "./example"}
if len(sys.argv)>1:
for arg in sys.argv:
tokens = re.split("=",arg.strip())
if len(tokens)>1:
var = tokens[0]
value = tokens[1]
params[var] = value
#TODO: make this a more pythonic way of controlling the threads
os.system("python userstate.py &")
os.system("python action.py &")
os.system("python webhost.py {} {}".format(params["target"], params["port"]))
| 2.328125 | 2 |
plato/internal/weak_id_dict.py | jgosmann/plato | 0 | 12774047 | """Provides a dictionary indexed by object identity with a weak reference."""
import weakref
from typing import Any, Dict, Generic, Iterator, TypeVar
T = TypeVar("T")
class WeakIdDict(Generic[T]):
"""Dictionary using object identity with a weak reference as key."""
data: Dict[int, T]
refs: Dict[int, weakref.ref]
def __init__(self) -> None:
self.data = {}
self.refs = {}
def __getitem__(self, obj_key: Any) -> T:
return self.data[id(obj_key)]
def __setitem__(self, obj_key: Any, value: T) -> None:
id_key = id(obj_key)
def clean_stale_ref(_: weakref.ref) -> None:
del self.data[id_key]
del self.refs[id_key]
self.refs[id_key] = weakref.ref(obj_key, clean_stale_ref)
self.data[id_key] = value
def __delitem__(self, obj_key: Any) -> None:
id_key = id(obj_key)
del self.data[id_key]
del self.refs[id_key]
def __iter__(self) -> Iterator[Any]:
for ref in self.refs.values():
strong_ref = ref()
if strong_ref:
yield strong_ref
def __len__(self) -> int:
return len(self.data)
| 2.84375 | 3 |
parser/team02/proyec/Valor/Valor.py | webdev188/tytus | 35 | 12774048 | from ast.Expresion import Expresion
class Valor(Expresion):
def __init__(self,value,line,column):
self.value = valor
def getValor(self,entorno,tree):
return self.value
| 2.5625 | 3 |
Swit/inner/branch.py | NogaOs/wit | 0 | 12774049 | <filename>Swit/inner/branch.py
from Swit.common.exceptions import BranchNameExistsError, CommitRequiredError
from loguru import logger
def does_branch_exist(branch_name: str) -> bool:
"""Returns True if there's already a branch with the given name."""
lines = path_to.references.read_text().split("\n")
for line in lines:
name, _, branch_id = line.partition("=")
if name == branch_name:
return True
return False
def add_branch_name_to_references(
branch_name: str,
) -> None:
"""Adds a line to references file with the given branch name.
The branch id will be identical to the current HEAD id.
Branch will be added only if there isn't another branch
with the same name.
"""
if not path_to.references.exists():
raise CommitRequiredError(
"Must commit at least once before adding a branch name."
)
if does_branch_exist(branch_name):
raise BranchNameExistsError(
f"There is already a branch named {branch_name}."
)
head_id = get_head_id()
with open(path_to.references, "a") as f:
f.write(f"{branch_name}={head_id}\n")
def branch(name: str) -> bool:
try:
add_branch_name_to_references(name)
except (CommitRequiredError, BranchNameExistsError) as e:
logger.warning(e)
return False
logger.info(">>> Branch added.")
return True
| 3.125 | 3 |
utils/bleu_metric/__init__.py | arfu2016/DuReader | 0 | 12774050 | __author__ = 'tylin'
# from .bleu import Bleu
#
# __all__ = ['Bleu']
| 1.101563 | 1 |