max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
miopia.py | NaroaLegarra/Myopy-model | 0 | 12762651 | <reponame>NaroaLegarra/Myopy-model
import pandas as pd
import numpy as np
import pickle
class Model:
def __init__(self,x_path):
self.data = pd.read_csv(x_path)
self.model = pickle.load(open("svm_linear_model.pickle", 'rb'))
self.clean_data()
def del_columns1(self):
#Delete columns that are not in the model
to_del = ['origen antepasados (extranjeros)', 'hº act.cerca sem','fototipo',
'Grupo fot','fecha','origen antepasados (españa)']
for col in to_del:
del self.data[col]
def fill_NAs(self):
to_fill = ['hº ocio exteriores sem', 'horas interior sem','Familiar miope num.','familiar miope','pat. Ret. Miop magna']
modes = [0,60,1,'No lo se','No lo se']
for col,moda in zip(to_fill,modes):
self.data[col].fillna(moda,inplace=True)
def create_dummies(self):
#Create and clean dummies
binary_columns = []
for col in self.data.select_dtypes(include = [object]):
if len(self.data[col].unique()) == 2:
binary_columns.append(col)
X_prov = self.data.loc[:,~self.data.columns.isin(binary_columns)].copy()
dummies = pd.get_dummies(X_prov)
binary_data = self.data.loc[:,binary_columns]
for col in binary_columns[1:]:
binary_data[col] = (binary_data[col] == 'SI').astype(int).astype(object)
binary_data['sexo'] = (binary_data['sexo'] == 'Mujer').astype(int).astype(object) # Mujer = 1, Hombre = 0
dummies = pd.concat([dummies,binary_data], axis = 1)
dummies.rename(columns = {'sexo':'Mujer'}, inplace = True)
#Clean dummies with redundant columns
#familiar miope
import re #for regex operations
familiar_miope_names = list(dummies.columns)
reg = re.compile(r'familiar miope_.*')
familiar_miope_names = list(filter(reg.search,familiar_miope_names))
reg = re.compile(r'.*,.*')
to_change = list(filter(reg.search,familiar_miope_names))
for change in to_change:
other_columns = change.split("_")[1].split(", ")
other_columns = [f'familiar miope_{col}' for col in other_columns]
select_list = dummies[change].ne(0)
dummies.loc[select_list,other_columns] = 1
dummies.drop(to_change,axis = 1, inplace = True)
#familiar miope magno
select_list = dummies['familiar miope magno_No lo se, No'].ne(0)
dummies.loc[select_list,'familiar miope magno_No lo se'] = 1
dummies.drop('familiar miope magno_No lo se, No',axis = 1, inplace = True)
familiar_miope_names = list(dummies.columns)
reg = re.compile(r'familiar miope magno_.*')
familiar_miope_names = list(filter(reg.search,familiar_miope_names))
reg = re.compile(r'.*,.*')
to_change = list(filter(reg.search,familiar_miope_names))
for change in to_change:
other_columns = change.split("_")[1].split(", ")
other_columns = [f'familiar miope_{col}' for col in other_columns]
select_list = dummies[change].ne(0)
dummies.loc[select_list,other_columns] = 1
dummies.drop(to_change,axis = 1, inplace = True)
return dummies
def separate_dummies(self):
self.numeric = self.dummies_df.select_dtypes(include=[np.float64, np.int64])
self.categorical = self.dummies_df.select_dtypes(exclude=[np.float64, np.int64])
def scale_numeric(self):
with open('min_max.pickle','rb') as f:
min_max_data = pickle.load(f)
scaled = {}
for col in min_max_data:
scaled[col] = (self.numeric[col] - min_max_data[col][1])/(min_max_data[col][0]-min_max_data[col][1])
self.numeric = pd.DataFrame(scaled)
def join_final(self):
self.final = pd.concat([self.numeric,self.categorical], axis=1)
keep = ['hº deporte sem', 'horas exterior sem', 'Familiar miope num.', 'LA Media', 'Querato OD', 'OD-Media',
'OS-Media', 'OS-DS', 'CUVAF-Media', 'CUVAF-DS', 'Promedio Nasal', 'Promedio Temporal',
'familiar miope_Hermanos', 'familiar miope_Madre', 'familiar miope_No', 'familiar miope_Padre',
'Familiar MM SI/NO_No', 'Familiar MM SI/NO_No lo se', 'Familiar MM SI/NO_Si',
'familiar miope magno_Hermanos', 'familiar miope magno_Madre', 'familiar miope magno_No',
'familiar miope magno_No lo se', 'pat. Ret. Miop magna_No', 'pat. Ret. Miop magna_No lo se',
'pat. Ret. Miop magna_Sí, en ambos ojos','toma sol']
self.final = self.final[keep]
def clean_data(self):
self.del_columns1()
self.fill_NAs()
self.dummies_df = self.create_dummies()
self.separate_dummies()
self.scale_numeric()
self.join_final()
def predict(self):
prediction = self.model.predict(self.final)
translate_dict = {
'MM': ['SI','SI','MM','MM'],
'M1': ['SI','NO','M','M1'],
'M2': ['SI','NO','M','M2'],
'C': ['NO','NO','C','C']
}
results_dict = {
'M':[translate_dict[tag][0] for tag in prediction],
'MM':[translate_dict[tag][1] for tag in prediction],
'Combo':[translate_dict[tag][2] for tag in prediction],
'DCombo':[translate_dict[tag][3] for tag in prediction]
}
self.results = pd.DataFrame(results_dict)
print(self.results)
def save_prediction(self):
self.results.to_csv('prediction.csv', index=False)
| 2.65625 | 3 |
v2/backend/blog/models/category.py | jonfairbanks/rtsp-nvr | 558 | 12762652 | <filename>v2/backend/blog/models/category.py
from backend.database import (
Column,
Model,
String,
relationship,
slugify,
)
@slugify('name')
class Category(Model):
name = Column(String(32))
slug = Column(String(32))
articles = relationship('Article', back_populates='category')
series = relationship('Series', back_populates='category')
__repr_props__ = ('id', 'name')
def __init__(self, name, **kwargs):
super().__init__(**kwargs)
self.name = name
| 2.453125 | 2 |
textmatrix.py | chrismue/tegschtuhr | 0 | 12762653 | from common import SUNNY, CLOUDY, RAINY, SNOWY
"""
MATRIX = " MINUSACHTNOLL" + \
"EINZWOIVIERDRÜ" + \
"ZWÖLFNÜN FÖFÜF" + \
"ESEBENSÄCHSEIS" + \
"DRISGIVIERTELF" + \
"ZWÄNZGZÄHKOMMA" + \
"VORAB ESCHALBI" + \
"ELFI RACHTIDRÜ" + \
" KEISÄCHSINÜNI" + \
"SEBNIG NZÄHNI " + \
"FÜFISEBEZWÖLFI" + \
"ZWOI VIERIGRAD"
"""
class CharacterMatrix:
MATRIX = "BMINUSACHTNOLL" + \
"EINZWOIVIERDRÜ" + \
"ZWÖLFNÜNRFÖFÜF" + \
"ESEBENSÄCHSEIS" + \
"DRISGIVIERTELF" + \
"ZWÄNZGZÄHKOMMA" + \
"VORABUESCHALBI" + \
"ELFINRACHTIDRÜ" + \
"OKEISÄCHSINÜNI" + \
"SEBNIGMNZÄHNIU" + \
"FÜFISEBEZWÖLFI" + \
"ZWOIEVIERIGRAD"
ROW_LEN = 14
@classmethod
def findTexts(cls, texts_array):
result_coordinates = []
pos_in_matrix = 0
for text in texts_array:
found_in_one_row = False
while not found_in_one_row:
found_start = cls.MATRIX.find(text.upper(), pos_in_matrix)
# print("found", text, "at", found_start)
if found_start < 0:
return []
found_end = found_start + len(text)
if found_start % cls.ROW_LEN + len(text) <= cls.ROW_LEN: # result is on one line
result_coordinates.extend([(p // cls.ROW_LEN, p % cls.ROW_LEN) for p in range(found_start, found_end)])
found_in_one_row = True
pos_in_matrix = found_end
return result_coordinates
class TextFinder:
PIXEL_NUMBERS=[[[0,1], [0,2], [1,0], [1,3], [2,0], [2,3], [3,0], [3,3], [4,0], [4,3], [5,1], [5,2]],
[[3,0], [2,1], [1,2], [0,3], [1,3], [2,3], [3,3], [4,3], [5,3]],
[[1,0], [0,1], [0,2], [1,3], [2,3], [3,2], [4,1], [5,0], [5,1], [5,2], [5,3]],
[[0,0], [0,1], [0,2], [1,3], [2,1], [2,2], [3,3], [4,3], [5,0], [5,1], [5,2]],
[[2,0], [1,1], [0,2], [1,2], [2,2], [3,0], [3,1], [3,2], [3,3], [4,2], [5,2]],
[[0,0], [0,1], [0,2], [0,3], [1,0], [2,0], [2,1], [2,2], [3,3], [4,3], [5,0], [5,1], [5,2]],
[[0,1], [0,2], [0,3], [1,0], [2,0], [3,0], [4,0], [2,1], [2,2], [3,3], [4,3], [5,1], [5,2]],
[[0,0], [0,1], [0,2], [0,3], [1,3], [2,2], [3,2], [4,2], [5,2]],
[[0,1], [0,2], [1,0], [1,3], [2,1], [2,2], [3,0], [3,3], [4,0], [4,3], [5,1], [5,2]],
[[0,1], [0,2], [1,0], [1,3], [2,0], [2,3], [3,1], [3,2], [3,3], [4,3], [5,0], [5,1], [5,2]]]
WEATHER = {SUNNY: [[0,5], [1,5], [2,5], [3,5], [4,5], [5,5]],
CLOUDY: [[5,1], [6,1], [7,1], [8,1], [9,1]],
RAINY: [[7,5], [8,5], [9,5], [10,5]],
SNOWY: [[6,7], [7,7], [8,7], [9,7], [10,7], [11,7]]}
PERCENT = [[4,10], [4,13], [5,12], [6,11], [7,10], [7,13]]
LUM = [[9, 3], [10, 3], [11, 3],
[10, 5], [11, 5], [11, 6], [10, 7], [11, 7],
[10, 9], [11, 9], [10, 10], [10, 11], [11, 11], [10, 12], [10, 13], [11, 13]]
MINUTES_TEXTS = [["ES", "ESCH"], ["EIS", "AB"], ["ZWOI", "AB"], ["DRÜ", "AB"], ["VIER", "AB"], ["FÜF", "AB"], ["SÄCHS", "AB"], ["SEBE", "AB"], ["ACHT", "AB"], ["NÜN", "AB"],
["ZÄH", "AB"], ["ELF", "AB"], ["ZWÖLF", "AB"], ["DRI", "ZÄH", "AB"], ["VIER", "ZÄH", "AB"], ["VIERTEL", "AB"], ["SÄCH", "ZÄH", "AB"], ["SEB", "ZÄH", "AB"], ["ACHT", "ZÄH", "AB"], ["NÜN", "ZÄH", "AB"],
["ZWÄNZG", "AB"], ["EIN", "E", "ZWÄNZG", "AB"], ["ZWOI", "E", "ZWÄNZG", "AB"], ["DRÜ", "E", "ZWÄNZG", "AB"], ["VIER", "E", "ZWÄNZG", "AB"], ["FÜF", "VOR", "HALBI"], ["VIER", "VOR", "HALBI"], ["DRÜ", "VOR", "HALBI"], ["ZWOI", "VOR", "HALBI"], ["EIS", "VOR", "HALBI"],
["HALBI"], ["EIS", "AB", "HALBI"], ["ZWOI", "AB", "HALBI"], ["DRÜ", "AB", "HALBI"], ["VIER", "AB", "HALBI"], ["FÜF", "AB", "HALBI"], ["SÄCHS", "AB", "HALBI"], ["SEBE", "AB", "HALBI"], ["ACHT", "AB", "HALBI"], ["NÜN", "AB", "HALBI"], ["ZWÄNZG", "VOR"],
["NÜN", "ZÄH", "VOR"], ["ACHT", "ZÄH", "VOR"], ["SEB", "ZÄH", "VOR"], ["SÄCH", "ZÄH", "VOR"], ["VIERTEL", "VOR"], ["VIER", "ZÄH", "VOR"], ["DRI", "ZÄH", "VOR"], ["ZWÖLF", "VOR"], ["ELF", "VOR"], ["ZÄH", "VOR"],
["NÜN", "VOR"], ["ACHT", "VOR"], ["SEBE", "VOR"], ["SÄCHS", "VOR"], ["FÜF", "VOR"], ["VIER", "VOR"], ["DRÜ", "VOR"], ["ZWOI", "VOR"], ["EIS", "VOR"]]
HOURS_TEXTS = ["ZWÖLFI", "EIS", "ZWOI", "DRÜ", "VIERI", "FÜFI", "SÄCHSI", "SEBNI", "ACHTI", "NÜNI", "ZÄHNI", "ELFI"]
TEMP_BEFORE_DIGIT = [["NOLL"], ["EIS"], ["ZWOI"], ["DRÜ"], ["VIER"], ["FÜF"], ["SÄCHS"], ["SEBE"], ["ACHT"], ["NÜN"],
["ZÄH"], ["ELF"], ["ZWÖLF"], ["DRI", "ZÄH"], ["VIER", "ZÄH"], ["FÖF", "ZÄH"], ["SÄCH", "ZÄH"], ["SEBE", "ZÄH"], ["ACHT", "ZÄH"], ["NÜN", "ZÄH"],
["ZWÄNZG"], ["EIN", "E", "ZWÄNZG"], ["ZWOI", "E", "ZWÄNZG"], ["DRÜ", "E", "ZWÄNZG"], ["VIER", "E", "ZWÄNZG"], ["FÜF", "E", "ZWÄNZG"], ["SÄCHS", "E", "ZWÄNZG"], ["SEBEN", "E", "ZWÄNZG"], ["ACHT", "E", "ZWÄNZG"], ["NÜN", "E", "ZWÄNZG"],
["DRISG"], ["EIN", "E", "DRISG"], ["ZWOI", "E", "DRISG"], ["DRÜ", "E", "DRISG"], ["VIER", "E", "DRISG"], ["FÜF", "E", "DRISG"], ["SÄCHS", "E", "DRISG"], ["SEBEN", "E", "DRISG"], ["ACHT", "E", "DRISG"], ["NÜN", "E", "DRISG"]]
TEMP_AFTER_DIGIT = [[], ["EIS"], ["ZWOI"], ["DRÜ"], ["VIER"], ["FÜF"], ["SÄCHS"], ["SEBE"], ["ACHT"], ["NÜN"]]
MINUS = "MINUS"
DOT = "KOMMA"
DEGREE = "GRAD"
def __init__(self):
self._matrix = CharacterMatrix
#@classmethod
def _get_minutes_text(self, minutes):
#try:
return self.MINUTES_TEXTS[minutes]
#except IndexError:
# print(f"Illegal Minute Value: {minutes}")
# return []
#@classmethod
def _get_hours_text(self, hours):
return [self.HOURS_TEXTS[hours % 12]] # zero == twelve, 13..24 == 1..12
def get_time_positions(self, hours, minutes):
print("Searching", hours, ":", minutes)
if minutes >= 25: # We say "Halbi <Next Hour>" and "zäh vor <Next Hour>"
hours = hours + 1
return self._matrix.findTexts(self._get_minutes_text(minutes) + self._get_hours_text(hours))
def get_temperature_positions(self, temperature):
print("Searching Temp.", temperature)
sign = [self.MINUS] if temperature < 0 else []
before = int(abs(temperature))
after = int(round(abs(temperature) * 10, 0)) % 10
after_texts = [self.DOT] + self.TEMP_AFTER_DIGIT[after] if after != 0 else []
return self._matrix.findTexts(sign + self.TEMP_BEFORE_DIGIT[before] + after_texts + [self.DEGREE])
def get_humidity_positions(self, humidity):
print("Searching Hum.", humidity)
humidity_int = int(round(humidity,0))
ten_positions = self.PIXEL_NUMBERS[humidity_int // 10]
one_positions = self.PIXEL_NUMBERS[humidity_int % 10]
return [[p[0]+3, p[1]] for p in ten_positions] + [[p[0]+3, p[1]+5] for p in one_positions] + self.PERCENT
def get_date_positions(self, day, month):
print("Searching date", day, month)
positions = [[p[0], p[1]+8] for p in self.PIXEL_NUMBERS[day % 10]]
if day >= 10:
positions += [[p[0], p[1]+3] for p in self.PIXEL_NUMBERS[day // 10]]
positions += [[p[0]+6, p[1]+8] for p in self.PIXEL_NUMBERS[month % 10]]
if month >= 10:
positions += [[p[0]+6, p[1]+3] for p in self.PIXEL_NUMBERS[month // 10]]
return positions + [[5, 13], [11, 13]]
def get_luminance_position(self, luminance):
print("Searching Lum.", luminance)
luminance_int = min(int(round(luminance,0)), 999)
if luminance_int >= 100:
hun_positions = self.PIXEL_NUMBERS[luminance_int // 100]
else:
hun_positions = []
if luminance_int >= 10:
ten_positions = self.PIXEL_NUMBERS[(luminance_int // 10) % 10]
else:
ten_positions = []
one_positions = self.PIXEL_NUMBERS[luminance_int % 10]
return [[p[0]+2, p[1]] for p in hun_positions] + \
[[p[0]+2, p[1]+5] for p in ten_positions] + \
[[p[0]+2, p[1]+10] for p in one_positions] + \
self.LUM
def get_weather_positions(self, weather_code):
return self.WEATHER[weather_code]
if __name__ == "__main__":
import time
def debugPrintPositions(positions):
out = [" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" "]
for r, c in positions:
out[r] = out[r][:c] + CharacterMatrix.MATRIX[r*CharacterMatrix.ROW_LEN + c] + out[r][c+1:]
print("-------------")
print("\n".join(out))
start = time.time()
finder = TextFinder()
for h in range(13):
for m in range(60):
positions = finder.get_time_positions(h, m)
# debugPrintPositions(positions)
print(time.time() - start) | 2.8125 | 3 |
src/Table_Extraction_Weight_Creation/Table_extracter_robust_concatenate.py | hong-yh/datasheet-scrubber | 13 | 12762654 | <reponame>hong-yh/datasheet-scrubber
from keras.layers import Dense, Conv2D, Permute, MaxPooling2D, AveragePooling2D, LSTM, Reshape, Flatten, Dropout
from keras.layers import multiply, add, average, maximum, Concatenate, Lambda
import keras
import tensorflow as tf
from sklearn.model_selection import train_test_split
import numpy as np
import os
import cv2
def crop(dimension, start, end):
# Crops (or slices) a Tensor on a given dimension from start to end
# example : to crop tensor x[:, :, 5:10]
# call slice(2, 5, 10) as you want to crop on the second dimension
def func(x):
if dimension == 0:
return x[start: end]
if dimension == 1:
return x[:, start: end]
if dimension == 2:
return x[:, :, start: end]
if dimension == 3:
return x[:, :, :, start: end]
if dimension == 4:
return x[:, :, :, :, start: end]
return Lambda(func)
petal = 1 #1,2 or 4
root = r"C:\Users\Zach\Downloads\Table_extract_robust"
data_final = np.load(os.path.join(root, "DATA_concatenate_cols.npy"), allow_pickle=True)
LABELS = np.load(os.path.join(root, "LABELS_concatenate_cols.npy"), allow_pickle=True)
print(data_final.shape)
x_train, x_valid, y_train, y_valid = train_test_split(data_final, LABELS, test_size = 0.2, shuffle = True)
keras_input = keras.layers.Input(shape=(100,200, 1))
center_data_original = crop(2, 90, 110)(keras_input)
center_data_original = Conv2D(32, (5,5), activation="relu")(center_data_original)
center_data_original = MaxPooling2D((2,2))(center_data_original)
for i in range(3):
temp0 = Conv2D(32, (3,3), activation="relu", padding='same')(center_data_original)
temp1 = Conv2D(32, (3,3), activation="relu", padding='same')(temp0)
temp2 = Conv2D(32, (3,3), activation="relu", padding='same')(temp1)
center_data_original = keras.layers.concatenate([center_data_original,temp2])
center_data_original = Conv2D(32, (3,3), activation="relu")(center_data_original)
center_data_original = MaxPooling2D((2,1))(center_data_original)
center_data_original = Dropout(.1)(center_data_original)
center_data_original = Flatten()(center_data_original)
ver_data = AveragePooling2D((100,1))(keras_input)
ver_data = MaxPooling2D((1,4))(ver_data)
ver_data = Conv2D(6, (1, 3), activation='relu')(ver_data)
ver_data = Conv2D(6, (1, 3), activation='relu')(ver_data)
ver_data = Conv2D(6, (1, 3), activation='relu')(ver_data)
ver_data = Dropout(.3)(ver_data)
ver_data = Flatten()(ver_data)
full_data = MaxPooling2D((2,2))(keras_input)
full_data = Conv2D(32, (5, 5), activation='relu')(full_data)
full_data = MaxPooling2D((2,2))(full_data)
full_data = Conv2D(64, (3, 3), activation='relu')(full_data)
full_data = MaxPooling2D((2,2))(full_data)
full_data = Conv2D(64, (3, 3), activation='relu')(full_data)
full_data = MaxPooling2D((2,2))(full_data)
full_data = Conv2D(64, (3, 3), activation='relu')(full_data)
full_data = MaxPooling2D((2,2))(full_data)
full_data = Dropout(.3)(full_data)
full_data = Flatten()(full_data)
data = Concatenate()([ver_data, full_data])
data = Dense(512, activation='relu')(data)
data = Dropout(.2)(data)
data = Dense(512, activation='relu')(data)
out = Dense(2, activation='sigmoid')(data)
conc = keras.models.Model(inputs=keras_input, outputs= out)
conc.compile(loss="binary_crossentropy", optimizer="adam", metrics = ["accuracy"])
conc.fit(x_train, y_train[:,1:], validation_data = (x_valid, y_valid[:,1:]), epochs = 15, batch_size = 32)
#y_train[:, 0]
conc.save(r"C:\Users\Zach\Downloads\Table_extract_robust\valid_cells.h5")
pred = conc.predict(data_final)
if(1):
for i in range(len(pred)):
if((pred[i][0] > .5) != LABELS[i][0]):
print(pred[i][0], " ", LABELS[i][0])
#print(LABELS[i][1], " ", LABELS[i][2])
print("")
temp_img = cv2.cvtColor(data_final[i],cv2.COLOR_GRAY2RGB)
cv2.line(temp_img, (100, 0), (100, 100), (255,0,0), 1)
cv2.line(temp_img, (90, 0), (90, 100), (0,255,0), 1)
cv2.line(temp_img, (110, 0), (110, 100), (0,255,0), 1)
cv2.imshow('image', temp_img)
cv2.waitKey(0)
cv2.destroyAllWindows()
else:
for i in range(len(pred)):
if(((pred[i][0] > .5) != LABELS[i][0]) or ((pred[i][1] > .5) != LABELS[i][1]) or ((pred[i][2] > .5) != LABELS[i][2])):
print(pred[i][0], " ", LABELS[i][0])
print(pred[i][1], " ", LABELS[i][1])
print(pred[i][2], " ", LABELS[i][2])
print("")
cv2.imshow('image',data_final[i])
cv2.waitKey(0)
cv2.destroyAllWindows()
| 2.953125 | 3 |
setup.py | melanieihuei/Web-Traffic-Forecasting | 2 | 12762655 | #!/usr/bin/env python3
from setuptools import setup
setup(
packages = ['ARIMA','LSTM']
)
| 0.9375 | 1 |
contrib/drf_introspection/tests.py | hluk/product-definition-center | 18 | 12762656 | <filename>contrib/drf_introspection/tests.py
#
# Copyright (c) 2018 Red Hat
# Licensed under The MIT License (MIT)
# https://opensource.org/licenses/MIT
#
import unittest
from .serializers import _normalized_fields_set
class TestNormalizedFieldsSet(unittest.TestCase):
def test_normal(self):
self.assertEqual(_normalized_fields_set("a"), set(['a']))
self.assertEqual(_normalized_fields_set(["a"]), set(['a']))
self.assertEqual(_normalized_fields_set(["a", "b"]), set(['a', 'b']))
def test_empty(self):
self.assertEqual(_normalized_fields_set(None), set())
self.assertEqual(_normalized_fields_set([]), set())
self.assertEqual(_normalized_fields_set(['']), set())
def test_comma_separated(self):
self.assertEqual(_normalized_fields_set("a,b"), set(['a', 'b']))
self.assertEqual(_normalized_fields_set(["a,b"]), set(['a', 'b']))
self.assertEqual(_normalized_fields_set(["a,b", "c"]), set(['a', 'b', 'c']))
def test_trailing_comma(self):
self.assertEqual(_normalized_fields_set(','), set())
self.assertEqual(_normalized_fields_set('a,'), set(['a']))
| 2.484375 | 2 |
school_management/models/school.py | piccolo09/ninetails | 0 | 12762657 | <reponame>piccolo09/ninetails
from django.db import models
from ninetails_utils.abstract_models import BaseModel
from django.utils.translation import gettext_lazy as _
from django.contrib.auth import get_user_model
User = get_user_model()
import random
class School(BaseModel):
name = models.CharField(
_('School Name'),
help_text=_('Official registed name of school'),
max_length=255)
mobile = models.CharField(
verbose_name=_("Contact Phone No."),
max_length=15)
email = models.EmailField(
verbose_name=_("Contact Email"),
help_text=_("Official Email address of school")
)
keywords = models.TextField(
verbose_name=_("SEO keys"),
help_text=_("eg: mathematics,BusinessStudies,Management"),
blank=True,null=True
)
promo = models.URLField(
verbose_name=_("Promotion Link"),
)
owner = models.ForeignKey(
User,
verbose_name=_("Owner user"),
on_delete=models.RESTRICT,
help_text=_("User responsible for school profile")
)
country = models.CharField(
_('Country'),
max_length=255)
short_intro = models.TextField(blank=True,null=True)
long_intro = models.TextField(blank=True,null=True)
# def clean(self) -> None:
# if self.pk:
# pass
# return super().clean()
def __str__(self) -> str:
return f"{self.name}"
@property
def student_count(self):
"""
will check students and return count of students
"""
return self.students.count()
@property
def teacher_count(self):
"""
will check students and return count of students
"""
return self.teachers.count()
class Meta:
verbose_name = "School"
verbose_name_plural = "Registered Schools"
| 2.21875 | 2 |
pythonexercicios/ex008-mtr-cent-mil.py | marroni1103/exercicios-pyton | 0 | 12762658 | <reponame>marroni1103/exercicios-pyton<gh_stars>0
m = float(input('Informe os metros: '))
print(f'{m} metros equivale a: \n{m*0.001}km\n{m*0.01}hm\n{m*0.1:.1f}dam\n{m*10:.0f}dm\n{m*100:.0f}cm\n{m*1000:.0f}mm')
#km, hm, dam, m, dm, cm, mm | 3.25 | 3 |
CloneDatasets.py | dblanchardDev/cloneDatasets | 2 | 12762659 | # coding: utf-8
'''CLONE DATASETS – <NAME> – Esri Canada 2017
Creates new datasets (feature classes, tables, or relationship
classes plus domains) using existing datasets as templates'''
# All literal strings will be Unicode instead of bytes
from __future__ import unicode_literals
# Import modules
import arcpy
## IN-CODE PARAMETERS #################
params = {
"datasets": [],
"outGDB": r"",
"overwrite": False
}
## END ################################
##MAIN CODE########################################################################################
def execute(datasetList, outGDB, overwrite):
'''Run through and clone datasets'''
arcpy.SetProgressor("step", None, 0, len(datasetList), 1)
results = {"successes": 0, "failures": 0}
# Loop through datasets
relationshipClasses = []
for dataset in datasetList:
arcpy.SetProgressorLabel("Cloning {0}".format(dataset.split(".")[-1]))
success = None
try:
desc = arcpy.Describe(dataset)
# Feature classes
if desc.dataType == "FeatureClass":
success = cloneFeatureClass(desc, outGDB, overwrite)
# Tables
elif desc.dataType == "Table":
success = cloneTables(desc, outGDB, overwrite)
# Relationship Classes
#(kept for last, ensuring related tables copied first)
elif desc.dataType == "RelationshipClass":
relationshipClasses.append(desc)
# All other types are unsupported
else:
success = False
arcpy.AddError("Dataset {0} is of an unsupported type ({1})".format(dataset, desc.dataType))
except Exception:
success = False
arcpy.AddError("An error occurred while cloning {0}".format(dataset))
if success is not None:
arcpy.SetProgressorPosition()
results["successes" if success else "failures"] += 1
# Relationship Classes
for desc in relationshipClasses:
arcpy.SetProgressorLabel("Cloning {0}".format(desc.name.split(".")[-1]))
success = None
try:
success = cloneRelationshipClass(desc, outGDB)
except Exception:
success = False
arcpy.AddError("An error occurred while cloning the {0} relationship class".format(desc.name))
arcpy.SetProgressorPosition()
results["successes" if success else "failures"] += 1
return results
##CLONING FUNCTIONS################################################################################
def cloneFeatureClass(desc, outGDB, overwrite):
'''Clone a feature class (name, shape type, schema, and domains)'''
success = True
# Cannot clone FCs without a shape type
if desc.shapeType == "Any":
arcpy.AddError("Unable to clone {0} as the shape type is not defined".format(desc.name))
success = False
# Cannot clone non-simple feature classes
elif not desc.featureType == "Simple":
arcpy.AddError("Unable to clone {0} as it is not a simple feature class".format(desc.name))
else:
cloneDomains(desc, outGDB)
# Translate properties to parameters
name = desc.name.split(".")[-1]
shape = desc.shapeType.upper()
template = "{0}\\{1}".format(desc.path, desc.name)
SAT = "SAME_AS_TEMPLATE"
if existsOrReplace(outGDB, name, overwrite):
arcpy.CreateFeatureclass_management(outGDB, name, shape, template, SAT, SAT, template)
arcpy.AddMessage("Cloned Feature Class {0}".format(name))
return success
def cloneTables(desc, outGDB, overwrite):
'''Clone a GDB table (name, schema and domains)'''
success = True
cloneDomains(desc, outGDB)
name = desc.name.split(".")[-1]
template = "{0}\\{1}".format(desc.path, desc.name)
if existsOrReplace(outGDB, name, overwrite):
arcpy.CreateTable_management(outGDB, name, template)
arcpy.AddMessage("Cloned Table {0}".format(name))
return success
def cloneDomains(datasetDesc, outGDB):
'''Clone all domains attached to a dataset and not yet present in output GDB'''
# Get all domains in dataset not yet in output GDB
missingDomains = []
gdbDesc = arcpy.Describe(outGDB)
for field in datasetDesc.fields:
if field.domain and field.domain not in gdbDesc.domains and field.domain not in missingDomains:
missingDomains.append(field.domain)
# Add missing domains to output GDB
if len(missingDomains) > 0:
domainList = arcpy.da.ListDomains(datasetDesc.path) #pylint: disable=E1101
for domainName in missingDomains:
domain = [e for e in domainList if e.name == domainName][0]
# Translate properties to parameters
name = domain.name
description = domain.description
fieldType = domain.type.upper()
domainType = {"CodedValue": "CODED", "Range": "RANGE"}[domain.domainType]
splitPolicy = {"DefaultValue": "DEFAULT", "Duplicate": "DUPLICATE", "GeometryRatio": "GEOMETRY_RATIO"}[domain.splitPolicy]
mergePolicy = {"AreaWeighted": "AREA_WEIGHTED", "DefaultValue": "DEFAULT", "SumValues": "SUM_VALUES"}[domain.mergePolicy]
# Create the domain
arcpy.management.CreateDomain(outGDB, name, description, fieldType, domainType, splitPolicy, mergePolicy)
# Add Values
if domainType == "CODED":
for key, value in domain.codedValues.iteritems():
arcpy.management.AddCodedValueToDomain(outGDB, name, key, value)
else:
arcpy.management.SetValueForRangeDomain(outGDB, name, domain.range[0], domain.range[1])
arcpy.AddMessage("Cloned Domain {0}".format(domainName))
return
def cloneRelationshipClass(desc, outGDB):
'''Clone a relationship class (all properties)'''
success = True
name = desc.name.split(".")[-1]
# Derive origin/destination tables paths for the output GDB
originTableName = desc.originClassNames[0].split(".")[-1]
originTable = "{0}\\{1}".format(outGDB, originTableName)
destinTableName = desc.destinationClassNames[0].split(".")[-1]
destinTable = "{0}\\{1}".format(outGDB, destinTableName)
# Ensure origin/destination tables exists in output GDB
if not arcpy.Exists(originTable):
arcpy.AddError("Can't clone {0} as the {1} origin table is missing".format(name, originTableName))
success = False
elif not arcpy.Exists(destinTable):
arcpy.AddError("Can't clone {0} as the {1} destination table is missing".format(name, destinTableName))
success = False
else:
# Translate properties to parameters
path_name = "{0}\\{1}".format(outGDB, name)
relType = "COMPOSITE" if desc.isComposite else "SIMPLE"
fLabel = desc.forwardPathLabel
bLabel = desc.backwardPathLabel
msg_dir = {"None": "NONE", "Forward": "FORWARD", "Backward": "BACK", "Both": "BOTH"}[desc.notification]
cardinality = {"OneToOne": "ONE_TO_ONE", "OneToMany": "ONE_TO_MANY", "ManyToMany": "MANY_TO_MANY"}[desc.cardinality]
attributed = "ATTRIBUTED" if desc.isAttributed else "NONE"
originKeyPrim = desc.originClassKeys[0][0]
originKeyFore = desc.originClassKeys[1][0]
if len(desc.destinationClassKeys) > 0:
destinKeyPrim = desc.destinationClassKeys[0][0]
destinKeyFore = desc.destinationClassKeys[1][0]
else:
destinKeyPrim = None
destinKeyFore = None
# If attributed, copy the intermediate table while creating rel. class
if desc.isAttributed:
fields = [e.name for e in desc.fields]
table = arcpy.CreateTable_management("in_memory", "relClass", "{0}\\{1}".format(desc.path, desc.name))
arcpy.TableToRelationshipClass_management(originTable, destinTable, path_name, relType, fLabel, bLabel, msg_dir, cardinality, table, fields, originKeyPrim, originKeyFore, destinKeyPrim, destinKeyFore)
arcpy.Delete_management(table)
# If not attributed, create a simple relationship class
else:
arcpy.CreateRelationshipClass_management(originTable, destinTable, path_name, relType, fLabel, bLabel, msg_dir, cardinality, attributed, originKeyPrim, originKeyFore, destinKeyPrim, destinKeyFore)
# Check for relationship rules (which are not copied by this tool)
if len(desc.relationshipRules) > 0:
arcpy.AddWarning("The {0} relationship class was cloned, but relationship rules could not be copied over".format(name))
else:
arcpy.AddMessage("Cloned Relationship Class {0}".format(name))
return success
##UTILITIES########################################################################################
def existsOrReplace(outGDB, name, overwrite):
'''Check whether dataset exists, and delete if overwriting'''
dataset = "{0}\\{1}".format(outGDB,name)
continueCloning = True
# Check for dataset existence
if arcpy.Exists(dataset):
# If overwriting enabled, delete it, otherwise stop cloning
if overwrite:
try:
arcpy.Delete_management(dataset)
except Exception:
arcpy.AddError("Could not delete {0}. Make sure it isn't locked. Dataset not cloned.".format(dataset))
continueCloning = False
else:
continueCloning = False
arcpy.AddWarning("Could not clone {0} as it already exists in output geodatabase.".format(dataset))
return continueCloning
##MAIN EXECUTION CODE##############################################################################
if __name__ == "__main__":
#Execute when running outside Python Toolbox
# Attempt to retrieve parameters from normal toolbox tool
datasetsParam = arcpy.GetParameterAsText(0)
outGDBParam = arcpy.GetParameterAsText(1)
overwriteParam = arcpy.GetParameterAsText(2).lower() == "true"
# Process the attributes
if datasetsParam is not None:
datasetListParam = [x[1:-1] for x in datasetsParam.split(";")]
# If none provided through parameters, fall-back to in-code parameters
else:
datasetListParam = params["datasets"]
outGDBParamParam = params["outGDB"]
overwriteParam = params["overwrite"]
# Run the processing
execute(datasetListParam, outGDBParam, overwriteParam)
| 1.976563 | 2 |
LR/lr/lib/uri_validate.py | LearningRegistry/LearningRegistry | 26 | 12762660 | #!/usr/bin/env python
"""
Regex for URIs
These regex are directly derived from the collected ABNF in RFC3986
(except for DIGIT, ALPHA and HEXDIG, defined by RFC2234).
They should be processed with re.VERBOSE.
"""
__license__ = """
Copyright (c) 2009 <NAME> (code portions)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
### basics
DIGIT = r"[\x30-\x39]"
ALPHA = r"[\x41-\x5A\x61-\x7A]"
HEXDIG = r"[\x30-\x39A-Fa-f]"
# pct-encoded = "%" HEXDIG HEXDIG
pct_encoded = r" %% %(HEXDIG)s %(HEXDIG)s" % locals()
# unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
unreserved = r"(?: %(ALPHA)s | %(DIGIT)s | \- | \. | _ | ~ )" % locals()
# gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@"
gen_delims = r"(?: : | / | \? | \# | \[ | \] | @ )"
# sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
# / "*" / "+" / "," / ";" / "="
sub_delims = r"""(?: ! | \$ | & | ' | \( | \) |
\* | \+ | , | ; | = )"""
# pchar = unreserved / pct-encoded / sub-delims / ":" / "@"
pchar = r"(?: %(unreserved)s | %(pct_encoded)s | %(sub_delims)s | : | @ )" % locals()
# reserved = gen-delims / sub-delims
reserved = r"(?: %(gen_delims)s | %(sub_delims)s )" % locals()
### scheme
# scheme = ALPHA *( ALPHA / DIGIT / "+" / "-" / "." )
scheme = r"%(ALPHA)s (?: %(ALPHA)s | %(DIGIT)s | \+ | \- | \. )*" % locals()
### authority
# dec-octet = DIGIT ; 0-9
# / %x31-39 DIGIT ; 10-99
# / "1" 2DIGIT ; 100-199
# / "2" %x30-34 DIGIT ; 200-249
# / "25" %x30-35 ; 250-255
dec_octet = r"""(?: %(DIGIT)s |
[\x31-\x39] %(DIGIT)s |
1 %(DIGIT)s{2} |
2 [\x30-\x34] %(DIGIT)s |
25 [\x30-\x35]
)
""" % locals()
# IPv4address = dec-octet "." dec-octet "." dec-octet "." dec-octet
IPv4address = r"%(dec_octet)s \. %(dec_octet)s \. %(dec_octet)s \. %(dec_octet)s" % locals()
# h16 = 1*4HEXDIG
h16 = r"(?: %(HEXDIG)s ){1,4}" % locals()
# ls32 = ( h16 ":" h16 ) / IPv4address
ls32 = r"(?: (?: %(h16)s : %(h16)s ) | %(IPv4address)s )" % locals()
# IPv6address = 6( h16 ":" ) ls32
# / "::" 5( h16 ":" ) ls32
# / [ h16 ] "::" 4( h16 ":" ) ls32
# / [ *1( h16 ":" ) h16 ] "::" 3( h16 ":" ) ls32
# / [ *2( h16 ":" ) h16 ] "::" 2( h16 ":" ) ls32
# / [ *3( h16 ":" ) h16 ] "::" h16 ":" ls32
# / [ *4( h16 ":" ) h16 ] "::" ls32
# / [ *5( h16 ":" ) h16 ] "::" h16
# / [ *6( h16 ":" ) h16 ] "::"
IPv6address = r"""(?: (?: %(h16)s : ){6} %(ls32)s |
:: (?: %(h16)s : ){5} %(ls32)s |
%(h16)s :: (?: %(h16)s : ){4} %(ls32)s |
(?: %(h16)s : ) %(h16)s :: (?: %(h16)s : ){3} %(ls32)s |
(?: %(h16)s : ){2} %(h16)s :: (?: %(h16)s : ){2} %(ls32)s |
(?: %(h16)s : ){3} %(h16)s :: %(h16)s : %(ls32)s |
(?: %(h16)s : ){4} %(h16)s :: %(ls32)s |
(?: %(h16)s : ){5} %(h16)s :: %(h16)s |
(?: %(h16)s : ){6} %(h16)s ::
)
""" % locals()
# IPvFuture = "v" 1*HEXDIG "." 1*( unreserved / sub-delims / ":" )
IPvFuture = r"v %(HEXDIG)s+ \. (?: %(unreserved)s | %(sub_delims)s | : )+" % locals()
# IP-literal = "[" ( IPv6address / IPvFuture ) "]"
IP_literal = r"\[ (?: %(IPv6address)s | %(IPvFuture)s ) \]" % locals()
# reg-name = *( unreserved / pct-encoded / sub-delims )
reg_name = r"(?: %(unreserved)s | %(pct_encoded)s | %(sub_delims)s )*" % locals()
# userinfo = *( unreserved / pct-encoded / sub-delims / ":" )
userinfo = r"(?: %(unreserved)s | %(pct_encoded)s | %(sub_delims)s | : )" % locals()
# host = IP-literal / IPv4address / reg-name
host = r"(?: %(IP_literal)s | %(IPv4address)s | %(reg_name)s )" % locals()
# port = *DIGIT
port = r"(?: %(DIGIT)s )*" % locals()
# authority = [ userinfo "@" ] host [ ":" port ]
authority = r"(?: %(userinfo)s @)? %(host)s (?: : %(port)s)?" % locals()
### Path
# segment = *pchar
segment = r"%(pchar)s*" % locals()
# segment-nz = 1*pchar
segment_nz = r"%(pchar)s+" % locals()
# segment-nz-nc = 1*( unreserved / pct-encoded / sub-delims / "@" )
# ; non-zero-length segment without any colon ":"
segment_nz_nc = r"(?: %(unreserved)s | %(pct_encoded)s | %(sub_delims)s | @ )+" % locals()
# path-abempty = *( "/" segment )
path_abempty = r"(?: / %(segment)s )*" % locals()
# path-absolute = "/" [ segment-nz *( "/" segment ) ]
path_absolute = r"/ (?: %(segment_nz)s (?: / %(segment)s )* )?" % locals()
# path-noscheme = segment-nz-nc *( "/" segment )
path_noscheme = r"%(segment_nz_nc)s (?: / %(segment)s )*" % locals()
# path-rootless = segment-nz *( "/" segment )
path_rootless = r"%(segment_nz)s (?: / %(segment)s )*" % locals()
# path-empty = 0<pchar>
path_empty = r"" ### FIXME
# path = path-abempty ; begins with "/" or is empty
# / path-absolute ; begins with "/" but not "//"
# / path-noscheme ; begins with a non-colon segment
# / path-rootless ; begins with a segment
# / path-empty ; zero characters
path = r"""(?: %(path_abempty)s |
%(path_absolute)s |
%(path_noscheme)s |
%(path_rootless)s |
%(path_empty)s
)
""" % locals()
### Query and Fragment
# query = *( pchar / "/" / "?" )
query = r"(?: %(pchar)s | / | \? )*" % locals()
# fragment = *( pchar / "/" / "?" )
fragment = r"(?: %(pchar)s | / | \? )*" % locals()
### URIs
# hier-part = "//" authority path-abempty
# / path-absolute
# / path-rootless
# / path-empty
hier_part = r"""(?: (?: // %(authority)s %(path_abempty)s ) |
%(path_absolute)s |
%(path_rootless)s |
%(path_empty)s
)
""" % locals()
# relative-part = "//" authority path-abempty
# / path-absolute
# / path-noscheme
# / path-empty
relative_part = r"""(?: (?: // %(authority)s %(path_abempty)s ) |
%(path_absolute)s |
%(path_noscheme)s |
%(path_empty)s
)
""" % locals()
# relative-ref = relative-part [ "?" query ] [ "#" fragment ]
relative_ref = r"%(relative_part)s (?: \? %(query)s)? (?: \# %(fragment)s)?" % locals()
# URI = scheme ":" hier-part [ "?" query ] [ "#" fragment ]
URI = r"(?: %(scheme)s : %(hier_part)s (?: \? %(query)s )? (?: \# %(fragment)s )? )" % locals()
# URI-reference = URI / relative-ref
URI_reference = r"(?: %(URI)s | %(relative_ref)s )" % locals()
# absolute-URI = scheme ":" hier-part [ "?" query ]
absolute_URI = r"(?: %(scheme)s : %(hier_part)s (?: \? %(query)s )? )" % locals()
if "__main__" == __name__:
import re
import sys
try:
instr = sys.argv[1]
except IndexError:
print "usage: %s test-string" % sys.argv[0]
sys.exit(1)
print 'testing: "%s"' % instr
print "URI:",
if re.match("^%s$" % URI, instr, re.VERBOSE):
print "yes"
else:
print "no"
print "URI reference:",
if re.match("^%s$" % URI_reference, instr, re.VERBOSE):
print "yes"
else:
print "no"
print "Absolute URI:",
if re.match("^%s$" % absolute_URI, instr, re.VERBOSE):
print "yes"
else:
print "no"
| 2.375 | 2 |
feedler/__meta__.py | xheuz/feedler | 0 | 12762661 | __title__ = "feedler"
__description__ = "A dead simple parser"
__version__ = "0.0.2"
__author__ = "<NAME>"
__author_email__ = "<EMAIL>"
__license__ = "MIT"
__copyright__ = "Copyright 2020 <NAME>"
| 0.972656 | 1 |
azdevman/commands/delete.py | kcraley/azdevman | 0 | 12762662 | import click
@click.group('delete')
@click.pass_obj
def delete(ctx):
"""Delete Azure DevOps resources"""
@delete.command('repo')
@click.option('-p', '--project', 'project',
help='Project name or id the repository in')
@click.argument('repository_names', nargs=-1, required=True)
@click.pass_obj
def create_repo(ctx, project, repository_names):
"""Delete an Azure DevOps repository"""
try:
click.confirm('Are you sure you want to delete these repositories?',
default=False, abort=True)
_git_client = ctx.connection.clients.get_git_client()
if not project:
project = ctx._azure_devops_project
for repo_name in repository_names:
repository = _git_client.get_repository(repo_name, project)
_git_client.delete_repository(repository.id, repository.project.name)
click.echo('Deleted repository ' + repo_name + ' within project ' + project)
except Exception as err:
raise click.UsageError(err)
@delete.command('build-definition')
@click.option('-p', '--project', 'project',
help='Project name or id the build definition in')
@click.argument('build_definitions', nargs=-1, required=True)
@click.pass_obj
def delete_build_definition(ctx, project, build_definitions):
"""Delete an Azure DevOps build definition"""
try:
click.confirm('Are you sure you want to delete these build definitions?',
default=False, abort=True)
_build_client = ctx.connection.clients.get_build_client()
if not project:
project = ctx._azure_devops_project
for build_definition in build_definitions:
definition = _build_client.get_definitions(project, build_definition)
_build_client.delete_definition(project, definition[0].id)
except Exception as err:
raise err
| 2.71875 | 3 |
coding-challenges/hackerrank/python/day-16-exceptions-string-to-integer.py | acfromspace/infinitygauntlet | 3 | 12762663 | """
@author: acfromspace
"""
import sys
def is_bad_string(S):
try:
print(int(S))
except:
print("Bad String")
S = input("Input: ").strip()
is_bad_string(S)
"""
NOTE: Hackerrank has a weird time compiling, needs to be strict w/o comments and with exception handles
Accepted answer on Hackerrank:
S = input().strip()
try:
print(int(S))
except:
print("Bad String")
"""
| 3.78125 | 4 |
tests/integration/test_fixture_builders.py | dendisuhubdy/trinity | 3 | 12762664 | <reponame>dendisuhubdy/trinity
import pytest
from eth.db.atomic import AtomicDB
from .integration_fixture_builders import build_pow_fixture, build_pow_churning_fixture
@pytest.mark.parametrize('builder', (build_pow_fixture, build_pow_churning_fixture))
def test_fixture_builders(builder):
# just make sure it doesn't crash, for now
db = AtomicDB()
builder(db, num_blocks=5)
# TODO add a long test that makes sure that we can rebuild the zipped ldb fixtures
# with the expected state roots. But probably skip during normal CI runs, for speed.
| 1.8125 | 2 |
ismo/train/model_skeleton_from_simple_config.py | kjetil-lye/iterative_surrogate_optimization | 6 | 12762665 | import tensorflow.keras
import tensorflow.keras.models
import tensorflow.keras.layers
import tensorflow.keras.regularizers
import json
def model_skeleton_from_simple_config_file(config_filename):
with open(config_filename) as f:
configuration = json.load(f)
return model_skeleton_from_simple_config(configuration)
def model_skeleton_from_simple_config(configuration):
activation = configuration['activation']
if 'l1_regularization' in configuration.keys():
regularization_l1 = configuration['l1_regularization']
regularizer = tensorflow.keras.regularizers.l1(regularization_l1)
if 'l2_regularization' in configuration.keys():
regularization_l2 = configuration['l2_regularization']
regularizer = tensorflow.keras.regularizers.l2(regularization_l2)
else:
regularizer = None
network_topology = configuration['network_topology']
model = tensorflow.keras.models.Sequential()
model.add(tensorflow.keras.layers.Dense(network_topology[1],
input_shape=(network_topology[0],),
activation=activation,
kernel_regularizer=regularizer))
for layer in network_topology[2:-1]:
model.add(tensorflow.keras.layers.Dense(layer, activation=configuration['activation'],
kernel_regularizer=regularizer))
model.add(tensorflow.keras.layers.Dense(network_topology[-1]))
return model | 2.59375 | 3 |
57_InsertInterval.py | kannan5/LeetCode | 0 | 12762666 | <reponame>kannan5/LeetCode
class Solution:
def InsertInterval(self, Interval, newInterval):
nums,mid = list(), 0
int_len = len(Interval)
for s,e in Interval:
if s < newInterval[0]:
nums.append([s,e])
mid += 1
else:
break
if not nums or nums[-1][1] < newInterval[0]:
nums.append(newInterval)
else:
nums[-1][1] = max(newInterval[1], nums[-1][1])
for s,e in Interval[mid:]:
if s > nums[-1][1]:
nums.append([s,e])
else:
nums[-1][1] = max(nums[-1][1], e)
return nums
if __name__ == "__main__":
a = Solution()
print(a.InsertInterval([[2,3],[6,9]], [5,7]))
print(a.InsertInterval([[1,2],[3,5],[6,7],[8,10],[12,16]], [4,8]))
print(a.InsertInterval([], [4,8]))
| 3.484375 | 3 |
cookbook/migrations/0018_auto_20200216_2303.py | mhoellmann/recipes | 0 | 12762667 | <reponame>mhoellmann/recipes
# Generated by Django 3.0.2 on 2020-02-16 22:03
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('cookbook', '0017_auto_20200216_2257'),
]
operations = [
migrations.RenameModel(
old_name='RecipeIngredients',
new_name='RecipeIngredient',
),
]
| 1.617188 | 2 |
main_1.py | Alexkorkod/deckmaker | 0 | 12762668 | <filename>main_1.py
#!/usr/bin/python
import math, json, operator
import sys
import random
import time
sys.setrecursionlimit(10)
with open('bd.json', 'r') as f:
read_data = f.read()
ar = json.loads(read_data)
def findMostExpensiveCardInHand(hand):
meindex = 0
max_cost = 0
for (i,card) in enumerate(hand):
if card['mana_cost'] > max_cost:
meindex = i
max_cost = card['mana_cost']
return meindex
def checkFieldForCards():
global field
if len(field) > 0:
return True
else:
return False
def checkForTurn(hand,mana):
turnexists = False
for card in hand:
if int(card['mana_cost']) <= mana:
if card['type'] == 'Spell':
if card['target'] == 'FRIENDLY' or card['target'] == 'ENEMY' or card['target'] == 'ANY':
turnexists = checkFieldForCards()
else:
turnexists = True
if turnexists:
break
else:
turnexists = True
break
return turnexists
def replace(deck,hand):
tmp = hand.pop(findMostExpensiveCardInHand(hand))
index = random.randint(0,len(deck)-1)
card = deck.pop(index)
hand.append(card)
deck.append(tmp)
return hand
def mullForFirstTurn(deck,hand,mana,replace_count):
turnexists = checkForTurn(hand,mana)
if not turnexists and replace_count < 2:
hand = replace(deck,hand)
replace_count += 1
mullForFirstTurn(deck,hand,mana,replace_count)
else:
return hand
def simulateFirstDraw(deck, hand):
j = 0
while j < 5:
index = random.randint(0,len(deck)-1)
card = deck.pop(index)
hand.append(card)
j += 1
return hand
def fishForMostExpensivePlay(hand,mana):
max_index = 0
max_cost = 0
for (i,card) in enumerate(hand):
if int(card['mana_cost']) <= mana and int(card['mana_cost']) >= max_cost:
playable = True
if card['type'] == 'Spell':
if card['target'] == 'FRIENDLY' or card['target'] == 'ENEMY' or card['target'] == 'ANY':
playable = checkFieldForCards()
if playable:
max_cost = int(card['mana_cost'])
max_index = i
return max_index
def makeCollectionOfPlayables(hand,mana,playables):
for (i,card) in enumerate(hand):
if int(card['mana_cost']) <= mana:
playable = True
if card['type'] == 'Spell':
if card['target'] == 'FRIENDLY' or card['target'] == 'ENEMY' or card['target'] == 'ANY':
playable = checkFieldForCards()
if playable:
playables.append(card)
return playables
def choseCardForTurn(hand,mana):
playables = []
playables = makeCollectionOfPlayables(hand,mana,playables)
playables = sorted(playables,key=operator.itemgetter('mana_cost'))
sum_mana = 0
card = {}
cards_to_play = []
while sum_mana < mana:
if len(playables) > 0:
top_card = playables.pop()
if int(top_card['mana_cost']) == mana:
card = top_card
else:
playables.append(top_card)
card = playables.pop(0)
sum_mana += int(card['mana_cost'])
cards_to_play.append(card)
else:
break
if sum_mana > mana:
for card in cards_to_play:
if sum_mana - int(card['mana_cost']) <= mana:
odd_card = cards_to_play.pop(0)
sum_mana -= int(odd_card['mana_cost'])
break
return cards_to_play
def findLeastExpensiveCardCost(hand):
min_cost = 100
for card in hand:
if int(card['mana_cost']) < min_cost:
playable = True
if card['type'] == 'Spell':
if card['target'] == 'FRIENDLY' or card['target'] == 'ENEMY' or card['target'] == 'ANY':
playable = checkFieldForCards()
if playable:
min_cost = int(card['mana_cost'])
return min_cost
def makeTurn(deck,hand,mana,replace_count):
global sum_lost_mana, cur_mana, field, enemy_gen, game_stats_per_mana,played_cards,side
cards_to_play = choseCardForTurn(hand,mana)
mana_left = mana
if len(cards_to_play) > 0 :
for played_card in cards_to_play:
for (i,card) in enumerate(hand):
if card == played_card:
card_from_hand = hand.pop(i)
if card_from_hand['type'] != 'Spell' and card_from_hand['type'] != 'Artifact':
if card_from_hand['mana_cost'] == '0':
card_from_hand['mana_cost'] = '1'
game_stats_per_mana += (int(card_from_hand['attack'])+int(card_from_hand['health']))/float(card_from_hand['mana_cost'])
played_cards += 1
adj = getAdjForPlacement(side)
index = random.randint(0,len(adj)-1)
chosen_place = adj[index]
for tile in field:
if tile == chosen_place:
card_from_hand['side'] = side
tile['card'] = card_from_hand
break
mana_left = mana - int(played_card['mana_cost'])
elif replace_count < 1:
hand = replace(deck,hand)
replace_count += 1
makeTurn(deck,hand,mana_left,replace_count)
sum_lost_mana += mana_left*(9-cur_mana)
return hand
def endTurn(deck,hand):
global sum_hand_size
index = random.randint(0,len(deck)-1)
card = deck.pop(index)
if len(hand) < 6:
hand.append(card)
sum_hand_size += len(hand)
return hand
def generateField():
global field
i = 1
while i <= 9:
j = 1
while j <= 5:
pos = {'i':i,'j':j}
card = {}
field.append({'pos':pos,'card':card})
j += 1
i += 1
def initialPlacement():
global field, general
fp_gen_pos = {'i':1,'j':3}
sp_gen_pos = {'i':9,'j':3}
for tile in field:
if tile['pos'] == fp_gen_pos:
tile['card'] = general
if tile['pos'] == sp_gen_pos:
tile['card'] = enemy_general
def getAdjForMove(card):
global field
adj = []
for tile in field:
if tile['card'] == card:
cur_adj = getAdj(tile)
for place in cur_adj:
adj.append(place)
return adj
def getAdjForPlacement(side):
global field
adj = []
for tile in field:
if tile['card'] != {} and tile['card']['side'] == side:
cur_adj = getAdj(tile)
for place in cur_adj:
adj.append(place)
return adj
def getAdj(cur_tile):
global field
adj = []
cur_pos = cur_tile['pos']
i = cur_pos['i']
j = cur_pos['j']
range_i = range(i-1,i+2)
range_j = range(j-1,j+2)
for tile in field:
if tile['pos']['i'] in range_i and tile['pos']['j'] in range_j:
adj.append(tile)
tile = []
adj = dict((i,el) for i,el in enumerate(adj))
tiles_with_cards = []
for i,tile in adj.items():
if tile['card']:
tiles_with_cards.append(i)
for i in tiles_with_cards:
adj.pop(i)
adj = adj.values()
return adj
def firstIterationTrade():
#TODO trade with nearest THEN TODO trade with adj
global field, side, other_side
for tile in field:
if tile['card'] != {} and tile['card']['side'] == side:
for deep_tile in field:
if deep_tile['card'] != {} and deep_tile['card']['side'] == other_side:
tile['card']['health'] = int(tile['card']['health']) - int(deep_tile['card']['attack'])
deep_tile['card']['health'] = int(deep_tile['card']['health']) - int(tile['card']['attack'])
if int(tile['card']['health']) <= 0:
tile['card'] = {}
if int(deep_tile['card']['health']) <= 0:
deep_tile['card'] = {}
break
break
def showField():
#TODO make this shit beatyfull
global field
local_field = {'row1':{},'row2':{},'row3':{},'row4':{},'row5':{}}
for tile in field:
if tile['pos']['j'] == 1:
if tile['card'] != {}:
local_field['row1'][tile['pos']['i']] = '|%2s:%2s|' % (tile['card']['attack'],tile['card']['health'])
else:
local_field['row1'][tile['pos']['i']] = '| : |'
elif tile['pos']['j'] == 2:
if tile['card'] != {}:
local_field['row2'][tile['pos']['i']] = '|%2s:%2s|' % (tile['card']['attack'],tile['card']['health'])
else:
local_field['row2'][tile['pos']['i']] = '| : |'
elif tile['pos']['j'] == 3:
if tile['card'] != {}:
local_field['row3'][tile['pos']['i']] = '|%2s:%2s|' % (tile['card']['attack'],tile['card']['health'])
else:
local_field['row3'][tile['pos']['i']] = '| : |'
elif tile['pos']['j'] == 4:
if tile['card'] != {}:
local_field['row4'][tile['pos']['i']] = '|%2s:%2s|' % (tile['card']['attack'],tile['card']['health'])
else:
local_field['row4'][tile['pos']['i']] = '| : |'
elif tile['pos']['j'] == 5:
if tile['card'] != {}:
local_field['row5'][tile['pos']['i']] = '|%2s:%2s|' % (tile['card']['attack'],tile['card']['health'])
else:
local_field['row5'][tile['pos']['i']] = '| : |'
for key, row in local_field.items():
l = row.keys()
l = list(l)
l.sort()
for i in l:
sys.stdout.write(row[i])
sys.stdout.write('\n')
sys.stdout.write('---------------------------------\n')
def placeCard(card):
global field
def moveGeneral(side):
if side == 'first':
cur_gen = general
else:
cur_gen = enemy_general
adj = getAdjForMove(cur_gen)
if len(adj) > 0:
index = random.randint(0,len(adj)-1)
chosen_place = adj[index]
for tile in field:
if tile == chosen_place:
tile['card'] = cur_gen
elif tile['card'] != {} and tile['card']['type'] == 'GENERAL' and tile['card']['side'] == side:
tile['card'] = {}
c_limit = 1000
cc_limit = 1000
show_field = False
if len(sys.argv) > 1:
c_limit = int(sys.argv[1])
if len(sys.argv) > 2:
cc_limit = int(sys.argv[2])
if len(sys.argv) > 3:
show_field = True
backup_ar = list(ar)
deck_info = []
c = 0
lost_mana = float('inf')
hand_size = 0
best_deck = []
made_turns = 0
stats_per_mana = 0
while c < c_limit:
random.seed()
i = 0
backup_deck = []
deck = []
factions = ['Abyssian','Lyonar','Songhai','Vetruvian','Magmar','Vanar']
faction = factions[random.randint(0,len(factions)-1)]
ar = list(backup_ar)
while i < 39:
index = random.randint(0,len(ar)-1)
card = ar.pop(index)
if card['mana_cost'] != '' and card['faction'] == faction:
k = 0
limitk = random.randint(2,3)
while k < limitk:
card['side'] = 'first'
deck.append(card.copy())
i += 1
k += 1
if i == 39:
break
backup_deck = list(deck)
cc = 0
sum_lost_mana = 0
sum_hand_size = 0
sum_stats_per_mana = 0
hand = []
field = []
turns = 0
while cc < cc_limit:
side = 'first'
other_side = 'second'
enemy_general = {'attack':2,'health':25,'type':'GENERAL','side':other_side}
for card in backup_deck:
deck.append(card.copy())
if deck == backup_deck:
for card in deck:
sys.stdout.write(str(card['label'])+':'+str(card['health']))
sys.stdout.write('\n')
sys.stdout.write('-----------------------\n')
simulateFirstDraw(deck,hand)
mullForFirstTurn(deck,hand,2,0)
general = {'attack':2,'health':25,'type':'GENERAL','side':side}
generateField()
initialPlacement()
played_cards = 0
game_stats_per_mana = 0
mana = 2
while enemy_general['health'] > 0:
turns += 1
cur_mana = mana
moveGeneral(side)
makeTurn(deck,hand,mana,0)
moveGeneral(other_side)
firstIterationTrade()
endTurn(deck,hand)
if mana < 9:
mana += 1
if show_field:
showField()
if played_cards > 0:
sum_stats_per_mana += game_stats_per_mana/float(played_cards)
cc += 1
hand = []
field = []
c += 1
if len(sys.argv) <= 2:
sys.stdout.write('%(progress)2.2f%% done\r' % {'progress': (c*c_limit)/float(c_limit*c_limit/100)})
sys.stdout.flush()
if sum_lost_mana < lost_mana and sum_hand_size > hand_size and sum_stats_per_mana > stats_per_mana:
stats_per_mana = sum_stats_per_mana
lost_mana = sum_lost_mana
hand_size = sum_hand_size
best_deck = list(backup_deck)
made_turns = turns/float(cc_limit)
if len(sys.argv) <= 2:
best_deck.append({'stats_per_mana':stats_per_mana/cc_limit})
best_deck.append({'turns':made_turns})
best_deck.append({'avg_hand_size':hand_size/(made_turns*cc_limit)})
best_deck.append({'lost_mana':lost_mana/cc_limit})
deck_info.append(best_deck)
json.dump(deck_info,open('best_deck.json','w'),indent=4)
| 3.03125 | 3 |
maintenance/alert/imbalance_trigger.py | avenkats/Smart-City-Sample | 1 | 12762669 | #!/usr/bin/python3
from db_query import DBQuery
from trigger import Trigger
import time
import os
service_interval=list(map(float,os.environ["SERVICE_INTERVAL"].split(",")))
office=list(map(float, os.environ["OFFICE"].split(",")))
dbhost=os.environ["DBHOST"]
class ImbalanceTrigger(Trigger):
def __init__(self):
super(ImbalanceTrigger,self).__init__()
self._dbs=DBQuery(index="sensors",office=office,host=dbhost)
self._dba=DBQuery(index="algorithms",office=office,host=dbhost)
def trigger(self):
time.sleep(service_interval[2])
info=[]
try:
nsensors={
"total": self._dbs.count("sensor:*"),
"streaming": self._dbs.count("status:'streaming'"),
"idle": self._dbs.count("status:'idle'"),
}
nalgorithms={
"total": self._dba.count("name:*"),
}
except Exception as e:
print("Exception: "+str(e), flush=True)
return info
if nsensors["total"]>nsensors["streaming"]+nsensors["idle"]:
info.append({
"fatal": [{
"message": "Check sensor: #disconnected="+str(nsensors["total"]-nsensors["streaming"]-nsensors["idle"]),
"args": nsensors,
}]
})
if nalgorithms["total"]>nsensors["streaming"]+nsensors["idle"]:
info.append({
"warning": [{
"message": "Imbalance: #analytics="+str(nalgorithms["total"])+",#sensors="+str(nsensors["streaming"]+nsensors["idle"]),
"args": {
"nalgorithms": nalgorithms["total"],
"nsensors": nsensors["streaming"]+nsensors["idle"],
},
}],
})
return info
| 2.359375 | 2 |
wzdat/nbdependresolv.py | haje01/wzdat | 15 | 12762670 | <reponame>haje01/wzdat
# -*- coding: utf-8 -*-
"""Notebook dependency resolver."""
import os
import logging
from wzdat.rundb import check_notebook_error_and_changed, reset_run,\
get_run_info
from wzdat.util import iter_notebook_manifest, get_notebook_dir
from wzdat.ipynb_runner import update_notebook_by_run, NoDataFound
from wzdat.manifest import Manifest
class UnresolvedHDFDependency(Exception):
pass
class CircularDependency(Exception):
pass
class DependencyTree(object):
def __init__(self, nbdir, skip_nbs=None):
self.notebooks = []
# collect notebooks with manifest
for nbpath, manifest in iter_notebook_manifest(nbdir, False, skip_nbs):
nb = Notebook(nbpath, manifest)
self.notebooks.append(nb)
# add dependencies
for nb, hdf in self.iter_notebook_with_dephdf(skip_nbs):
if type(hdf[0]) is not list:
self._find_and_add_depend(nb, hdf)
else:
for ahdf in hdf:
self._find_and_add_depend(nb, ahdf)
def _find_hdf_out_notebook(self, _hdf):
for nb, hdf in self.iter_notebook_with_outhdf():
if _hdf == hdf:
return nb
def _find_and_add_depend(self, nb, hdf):
hnb = self._find_hdf_out_notebook(hdf)
if hnb is None:
logging.error(u"UnresolvedHDFDependency for {}".format(nb.path))
raise UnresolvedHDFDependency()
nb.add_depend(hnb)
def iter_notebook_with_outhdf(self):
for nb in self.notebooks:
mdata = nb.manifest._data
if 'output' in mdata and 'hdf' in mdata['output']:
yield nb, mdata['output']['hdf']
def iter_notebook_with_dephdf(self, skip_nbs):
'''Iterate notebook with depending hdf.'''
for nb in self.notebooks:
if skip_nbs is not None and nb.path in skip_nbs:
continue
if 'depends' not in nb.manifest or 'hdf' not in\
nb.manifest.depends:
continue
yield nb, nb.manifest.depends['hdf']
def get_notebook_by_fname(self, fname):
for nb in self.notebooks:
if fname in nb.path:
return nb
def iter_noscd_notebook(self):
'''Iterate non-scheduled notebooks.'''
for nb in self.notebooks:
if 'schedule' in nb.manifest:
continue
yield nb
def _clear_externally_stopped(self):
for nb in self.notebooks:
path = nb.path
# reset run info if previous run stopped externally
info = get_run_info(path)
if info is not None:
start, elapsed, cur, total, error = info
if error is None and cur > 0 and elapsed is None:
reset_run(path)
def resolve(self, updaterun=False):
if len(self.notebooks) == 0:
logging.debug("no notebooks to run.")
return
self._clear_externally_stopped()
resolved = []
runs = []
for nb in self.iter_noscd_notebook():
if nb not in resolved:
self._resolve(updaterun, nb, resolved, runs, [])
return resolved, runs
def _resolve(self, updaterun, notebook, resolved, runs, seen):
seen.append(notebook)
# resolve dependencies
for dnb in notebook.depends:
if dnb not in resolved:
if dnb in seen:
raise CircularDependency()
self._resolve(updaterun, dnb, resolved, runs, seen)
self._run_resolved(updaterun, notebook, resolved, runs)
def _run_resolved(self, updaterun, notebook, resolved, runs):
'''Run notebook after all its dependencies resolved.'''
logging.debug(u"_run_resolved '{}'".format(notebook.path))
notebook.reload_manifest()
path = notebook.path
# Only run when dependecies changed and notebook has no error or
# changed
error, changed = check_notebook_error_and_changed(path)
logging.debug("nb error {}, nb changed {}".format(error, changed))
if updaterun:
# run notebook when its depends changed or had fixed after error
if notebook.manifest._need_run: # or (error and changed):
try:
update_notebook_by_run(path)
except NoDataFound, e:
logging.debug(unicode(e))
runs.append(notebook)
elif error and not changed:
logging.debug(u"_run_resolved - skip unfixed {}".format(path))
else:
logging.debug(u"no need to run")
resolved.append(notebook)
class Notebook(object):
def __init__(self, path, manifest):
self.path = path
self.manifest = manifest
self.depends = []
def add_depend(self, notebook):
self.depends.append(notebook)
def is_depend(self, parent):
return parent in self.depends
def reload_manifest(self):
'''Reload manifest to check to run'''
self.manifest = Manifest(True, self.path)
@property
def fname(self):
return os.path.basename(self.path)
def update_all_notebooks(skip_nbs=None):
logging.debug('update_all_notebooks start')
nbdir = get_notebook_dir()
from wzdat.nbdependresolv import DependencyTree
dt = DependencyTree(nbdir, skip_nbs)
rv = dt.resolve(True)
logging.debug('update_all_notebooks done')
return rv
| 2.140625 | 2 |
BAEKJOON/Python/10102.py | cmsong111/NJ_code | 0 | 12762671 | <gh_stars>0
import sys
count = int(input())
stringarr = sys.stdin.readline().strip()
score = [0,0]
for i in range(count):
if stringarr[i] == "A":
score[0] +=1
elif stringarr[i] == "B":
score[1] +=1
if score[0] == score[1]:
print("Tie")
elif score[0] > score[1]:
print("A")
elif score[0] < score[1]:
print("B")
| 3.265625 | 3 |
examples/example6.py | lg-gonzalez-juarez/guipython | 0 | 12762672 | <filename>examples/example6.py
# https://www.guru99.com/pyqt-tutorial.html
import sys
from PyQt5.QtWidgets import QApplication, QWidget, QLabel, QPushButton, QMessageBox
def dialog():
mbox = QMessageBox()
mbox.setText("Your allegiance has been noted")
mbox.setDetailedText("You are now a disciple and subject of the all-knowing Guru")
mbox.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel)
mbox.exec_()
if __name__ == "__main__":
app = QApplication(sys.argv)
w = QWidget()
w.resize(300,300)
w.setWindowTitle('Guru99')
label = QLabel(w)
label.setText("Behold the Guru, Guru99")
label.move(100,130)
label.show()
btn = QPushButton(w)
btn.setText('Beheld')
btn.move(110,150)
btn.show()
btn.clicked.connect(dialog)
w.show()
sys.exit(app.exec_()) | 3.140625 | 3 |
AppDB/test/unit/test_cassandra_interface.py | christianbaun/appscale | 2 | 12762673 | <filename>AppDB/test/unit/test_cassandra_interface.py
#!/usr/bin/env python
# Programmer: <NAME>
import os
import sys
import unittest
from cassandra.cluster import Cluster
from cassandra.query import BatchStatement
from flexmock import flexmock
sys.path.append(os.path.join(os.path.dirname(__file__), '../../'))
from cassandra_env import cassandra_interface
sys.path.append(os.path.join(os.path.dirname(__file__), "../../../lib/"))
import file_io
class TestCassandra(unittest.TestCase):
def testConstructor(self):
flexmock(file_io) \
.should_receive('read') \
.and_return('127.0.0.1')
flexmock(Cluster).should_receive('connect').\
and_return(flexmock(execute=lambda x: None))
db = cassandra_interface.DatastoreProxy()
def testGet(self):
flexmock(file_io) \
.should_receive('read') \
.and_return('127.0.0.1')
flexmock(Cluster).should_receive('connect').\
and_return(flexmock(execute=lambda x, **y: []))
db = cassandra_interface.DatastoreProxy()
# Make sure no exception is thrown
assert {} == db.batch_get_entity('table', [], [])
def testPut(self):
flexmock(file_io) \
.should_receive('read') \
.and_return('127.0.0.1')
session = flexmock(prepare=lambda x: '', execute=lambda x: None)
flexmock(BatchStatement).should_receive('add')
flexmock(Cluster).should_receive('connect').\
and_return(session)
db = cassandra_interface.DatastoreProxy()
# Make sure no exception is thrown
assert None == db.batch_put_entity('table', [], [], {})
def testDeleteTable(self):
flexmock(file_io) \
.should_receive('read') \
.and_return('127.0.0.1')
flexmock(Cluster).should_receive('connect').\
and_return(flexmock(execute=lambda x: None))
db = cassandra_interface.DatastoreProxy()
# Make sure no exception is thrown
db.delete_table('table')
def testRangeQuery(self):
flexmock(file_io) \
.should_receive('read') \
.and_return('127.0.0.1')
flexmock(Cluster).should_receive('connect').\
and_return(flexmock(execute=lambda x, **y: []))
db = cassandra_interface.DatastoreProxy()
self.assertListEqual([], db.range_query("table", [], "start", "end", 0))
def test_batch_mutate(self):
app_id = 'guestbook'
transaction = 1
flexmock(file_io).should_receive('read').and_return('127.0.0.1')
flexmock(Cluster).should_receive('connect').\
and_return(flexmock(execute=lambda x, **y: []))
db = cassandra_interface.DatastoreProxy()
db.batch_mutate(app_id, [], [], transaction)
if __name__ == "__main__":
unittest.main()
| 2.34375 | 2 |
tools/Video/DeltaColormap/analyze.py | swharden/ephys-projects | 0 | 12762674 | <filename>tools/Video/DeltaColormap/analyze.py
"""
This script starts with a folder of BMP files generated with ImageJ.
It calculates a mean baseline image and uses that to create dF/F images.
Images are then plotted, annotated, saved in another folder, and encoded as a video.
"""
from os import path
import pathlib
import numpy as np
import matplotlib.pyplot as plt
import cv2
def makeFigures(inputFolder: pathlib.Path, outputFolder: pathlib.Path,
baselineFrame1: int, baselineFrame2: int, secPerFrame: float):
print("GENERATING FIGURES")
imagePaths = sorted(inputFolder.glob("*.bmp"))
imageStack = np.dstack([plt.imread(x) for x in imagePaths])
imageStackBaseline = imageStack[:, :, baselineFrame1:baselineFrame2]
imageBaseline = np.mean(imageStackBaseline, axis=2)
dffLimit = 100
for i in range(len(imagePaths)):
thisImage = imageStack[:, :, i]
dFF = (thisImage / imageBaseline) - 1
title = f"{imagePaths[i].name} ({secPerFrame * i / 60.0 :0.02f} min)"
subtitle = "10 µM norepinephrine" if i > 15 else "baseline"
plt.title(f"{title}\n{subtitle}")
plt.imshow(dFF * 100, cmap=plt.cm.bwr, vmin=-dffLimit, vmax=dffLimit)
plt.colorbar(label="ΔF/F (%)")
saveFile = outputFolder.joinpath(imagePaths[i].name+".png")
print(f"Saving: {saveFile.name}")
plt.savefig(saveFile)
plt.close()
def makeVideo(imageFolder: pathlib.Path, fps: float = 5):
print("Encoding video...")
imagePaths = [x for x in imageFolder.glob("*.png")]
outputFile = str(imageFolder.joinpath("../video.mp4"))
firstFrame = cv2.imread(str(imagePaths[0]))
height, width, layers = firstFrame.shape
video = cv2.VideoWriter(outputFile, 0, fps, (width, height))
for imagePath in imagePaths:
image = cv2.imread(str(imagePath))
video.write(image)
cv2.destroyAllWindows()
video.release()
if __name__ == "__main__":
inputFolder = pathlib.Path(
R"X:\Data\C57\GRABNE\2021-09-23-ne-washon\TSeries-09232021-1216-1850-ne-washon\Analysis\01-raw-bmp")
outputFolder = pathlib.Path(
R"X:\Data\C57\GRABNE\2021-09-23-ne-washon\TSeries-09232021-1216-1850-ne-washon\Analysis\02-annotated")
makeFigures(inputFolder, outputFolder, 5, 15, 22.874986)
makeVideo(outputFolder)
| 2.8125 | 3 |
BoundedInteger.py | jafager/python_simulator | 0 | 12762675 | from SimulatorExceptions import ValueOutOfRangeException
class BoundedInteger:
def __init__(self, minimum, maximum, default):
assert (minimum <= maximum)
assert (default >= minimum)
assert (default <= maximum)
self.minimum = minimum
self.maximum = maximum
self.default = default
self.value = default
def get(self):
value = self.value
assert (value >= self.minimum)
assert (value <= self.maximum)
return value
def set(self, value):
if ((value >= self.minimum) and (value <= self.maximum)):
self.value = value
else:
raise ValueOutOfRangeException(value)
| 3.484375 | 3 |
coriolisclient/cli/endpoints.py | aznashwan/python-coriolisclient | 0 | 12762676 | # Copyright (c) 2017 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Command-line interface sub-commands related to endpoints.
"""
import json
from cliff import command
from cliff import lister
from cliff import show
from coriolisclient import exceptions
from coriolisclient.cli import formatter
class EndpointFormatter(formatter.EntityFormatter):
columns = ("ID",
"Name",
"Type",
"Description",
)
def _get_sorted_list(self, obj_list):
return sorted(obj_list, key=lambda o: o.created_at)
def _get_formatted_data(self, obj):
data = (obj.id,
obj.name,
obj.type,
obj.description or "",
)
return data
class EndpointDetailFormatter(formatter.EntityFormatter):
def __init__(self, show_instances_data=False):
self.columns = [
"id",
"name",
"type",
"description",
"connection_info",
"last_updated",
]
def _get_formatted_data(self, obj):
data = [obj.id,
obj.name,
obj.type,
obj.description or "",
obj.connection_info.to_dict(),
obj.created_at,
obj.updated_at,
]
return data
class CreateEndpoint(show.ShowOne):
"""Creates a new endpoint"""
def get_parser(self, prog_name):
parser = super(CreateEndpoint, self).get_parser(prog_name)
parser.add_argument('--name', required=True,
help='The endpoints\'s name')
parser.add_argument('--provider', required=True,
help='The provider, e.g.: '
'vmware_vsphere, openstack')
parser.add_argument('--description',
help='A description for this endpoint')
parser.add_argument('--connection',
help='JSON encoded connection data')
parser.add_argument('--connection-secret',
help='The url of the Barbican secret containing '
'the connection info')
parser.add_argument('--skip-validation', dest='skip_validation',
action='store_true',
help='Whether to skip validating the connection '
'when creating the endpoint.')
return parser
def take_action(self, args):
if args.connection_secret and args.connection:
raise exceptions.CoriolisException(
"Please specify either --connection or "
"--connection-secret, but not both")
conn_info = None
if args.connection_secret:
conn_info = {"secret_ref": args.connection_secret}
if args.connection:
conn_info = json.loads(args.connection)
endpoint = self.app.client_manager.coriolis.endpoints.create(
args.name,
args.provider,
conn_info,
args.description)
if not args.skip_validation:
valid, message = (
self.app.client_manager.coriolis.endpoints.validate_connection(
endpoint.id))
if not valid:
raise exceptions.EndpointConnectionValidationFailed(message)
return EndpointDetailFormatter().get_formatted_entity(endpoint)
class UpdateEndpoint(show.ShowOne):
"""Updates an endpoint"""
def get_parser(self, prog_name):
parser = super(UpdateEndpoint, self).get_parser(prog_name)
parser.add_argument('id', help='The endpoint\'s id')
parser.add_argument('--name',
help='The endpoints\'s name')
parser.add_argument('--description',
help='A description for this endpoint')
parser.add_argument('--connection',
help='JSON encoded connection data')
parser.add_argument('--connection-secret',
help='The url of the Barbican secret containing '
'the connection info')
return parser
def take_action(self, args):
if args.connection_secret and args.connection:
raise exceptions.CoriolisException(
"Please specify either --connection or "
"--connection-secret, but not both")
conn_info = None
if args.connection_secret:
conn_info = {"secret_ref": args.connection_secret}
if args.connection:
conn_info = json.loads(args.connection)
updated_values = {}
if args.name is not None:
updated_values["name"] = args.name
if args.description is not None:
updated_values["description"] = args.description
if conn_info:
updated_values["connection_info"] = conn_info
endpoint = self.app.client_manager.coriolis.endpoints.update(
args.id, updated_values)
return EndpointDetailFormatter().get_formatted_entity(endpoint)
class ShowEndpoint(show.ShowOne):
"""Show an endpoint"""
def get_parser(self, prog_name):
parser = super(ShowEndpoint, self).get_parser(prog_name)
parser.add_argument('id', help='The endpoint\'s id')
return parser
def take_action(self, args):
endpoint = self.app.client_manager.coriolis.endpoints.get(args.id)
return EndpointDetailFormatter().get_formatted_entity(endpoint)
class DeleteEndpoint(command.Command):
"""Delete an endpoint"""
def get_parser(self, prog_name):
parser = super(DeleteEndpoint, self).get_parser(prog_name)
parser.add_argument('id', help='The endpoint\'s id')
return parser
def take_action(self, args):
self.app.client_manager.coriolis.endpoints.delete(args.id)
class ListEndpoint(lister.Lister):
"""List endpoints"""
def get_parser(self, prog_name):
parser = super(ListEndpoint, self).get_parser(prog_name)
return parser
def take_action(self, args):
obj_list = self.app.client_manager.coriolis.endpoints.list()
return EndpointFormatter().list_objects(obj_list)
class EndpointValidateConnection(command.Command):
"""validates an edpoint's connection"""
def get_parser(self, prog_name):
parser = super(EndpointValidateConnection, self).get_parser(prog_name)
parser.add_argument('id', help='The endpoint\'s id')
return parser
def take_action(self, args):
endpoints = self.app.client_manager.coriolis.endpoints
valid, message = endpoints.validate_connection(args.id)
if not valid:
raise exceptions.EndpointConnectionValidationFailed(message)
| 2.140625 | 2 |
h5_file.py | mns1yash/Work_Yash | 0 | 12762677 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 24 12:17:42 2020
@author: BEL
"""
import tensorflow as tf
import keras
import h5py
f = h5py.File('individual_model.h5','r')
list(f.keys())
strategy = tf.distribute.MirroredStrategy()
strategy
| 2.046875 | 2 |
helper_functions.py | dendisuhubdy/pytorch_HMM | 88 | 12762678 | <filename>helper_functions.py<gh_stars>10-100
import torch
def one_hot(letters, S):
"""
letters : LongTensor of shape (batch size, sequence length)
S : integer
Convert batch of integer letter indices to one-hot vectors of dimension S (# of possible letters).
"""
out = torch.zeros(letters.shape[0], letters.shape[1], S)
for i in range(0, letters.shape[0]):
for t in range(0, letters.shape[1]):
out[i, t, letters[i,t]] = 1
return out
def one_hot_to_string(input, S):
"""
input : Tensor of shape (T, |Sx|)
S : list of characters (alphabet, Sx or Sy)
"""
return "".join([S[c] for c in input.max(dim=1)[1]]).rstrip()
| 2.84375 | 3 |
ui/about.py | Chereq/cryptex | 0 | 12762679 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'about.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_about_dialog(object):
def setupUi(self, about_dialog):
about_dialog.setObjectName("about_dialog")
about_dialog.setWindowModality(QtCore.Qt.ApplicationModal)
about_dialog.resize(400, 331)
about_dialog.setFocusPolicy(QtCore.Qt.NoFocus)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("ui/images/passkey.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
about_dialog.setWindowIcon(icon)
about_dialog.setModal(True)
self.verticalLayout = QtWidgets.QVBoxLayout(about_dialog)
self.verticalLayout.setObjectName("verticalLayout")
self.ablout_label = QtWidgets.QLabel(about_dialog)
font = QtGui.QFont()
font.setPointSize(15)
self.ablout_label.setFont(font)
self.ablout_label.setAlignment(QtCore.Qt.AlignCenter)
self.ablout_label.setObjectName("ablout_label")
self.verticalLayout.addWidget(self.ablout_label)
self.about_field = QtWidgets.QTextBrowser(about_dialog)
self.about_field.setFocusPolicy(QtCore.Qt.NoFocus)
self.about_field.setAutoFillBackground(False)
self.about_field.setStyleSheet("background: rgba(0, 255, 0, 0)")
self.about_field.setFrameShape(QtWidgets.QFrame.NoFrame)
self.about_field.setObjectName("about_field")
self.verticalLayout.addWidget(self.about_field)
self.cryptex_image = QtWidgets.QLabel(about_dialog)
self.cryptex_image.setText("")
self.cryptex_image.setPixmap(QtGui.QPixmap("ui/images/cryptex.png"))
self.cryptex_image.setAlignment(QtCore.Qt.AlignCenter)
self.cryptex_image.setObjectName("cryptex_image")
self.verticalLayout.addWidget(self.cryptex_image)
self.author_label = QtWidgets.QLabel(about_dialog)
self.author_label.setLayoutDirection(QtCore.Qt.LeftToRight)
self.author_label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.author_label.setObjectName("author_label")
self.verticalLayout.addWidget(self.author_label)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.close_button = QtWidgets.QPushButton(about_dialog)
self.close_button.setMaximumSize(QtCore.QSize(75, 16777215))
self.close_button.setLayoutDirection(QtCore.Qt.LeftToRight)
self.close_button.setAutoFillBackground(False)
self.close_button.setObjectName("close_button")
self.horizontalLayout.addWidget(self.close_button)
self.verticalLayout.addLayout(self.horizontalLayout)
self.retranslateUi(about_dialog)
QtCore.QMetaObject.connectSlotsByName(about_dialog)
def retranslateUi(self, about_dialog):
_translate = QtCore.QCoreApplication.translate
about_dialog.setWindowTitle(_translate("about_dialog", "About..."))
self.ablout_label.setText(_translate("about_dialog", "CryptEX"))
self.about_field.setPlaceholderText(_translate("about_dialog", "Blah-blah-blah~"))
self.author_label.setText(_translate("about_dialog", "Author"))
self.close_button.setText(_translate("about_dialog", "Close"))
| 1.765625 | 2 |
test/dataset_test.py | LFrancesco/pytorch_geometric_temporal | 0 | 12762680 | <reponame>LFrancesco/pytorch_geometric_temporal
import numpy as np
import networkx as nx
from torch_geometric_temporal.data.dataset import ChickenpoxDatasetLoader, METRLADatasetLoader, PemsBayDatasetLoader, PedalMeDatasetLoader
from torch_geometric_temporal.data.discrete.static_graph_discrete_signal import StaticGraphDiscreteSignal
from torch_geometric_temporal.data.discrete.dynamic_graph_discrete_signal import DynamicGraphDiscreteSignal
from torch_geometric_temporal.data.splitter import discrete_train_test_split
def get_edge_array(n_count):
return np.array([edge for edge in nx.gnp_random_graph(n_count, 0.1).edges()]).T
def generate_signal(snapshot_count, n_count, feature_count):
edge_indices = [get_edge_array(n_count) for _ in range(snapshot_count)]
edge_weights = [np.ones(edge_indices[t].shape[1]) for t in range(snapshot_count)]
features = [np.random.uniform(0,1,(n_count, feature_count)) for _ in range(snapshot_count)]
return edge_indices, edge_weights, features
def test_dynamic_graph_discrete_signal_real():
snapshot_count = 250
n_count = 100
feature_count = 32
edge_indices, edge_weights, features = generate_signal(250, 100, 32)
targets = [np.random.uniform(0,10,(n_count,)) for _ in range(snapshot_count)]
dataset = DynamicGraphDiscreteSignal(edge_indices, edge_weights, features, targets)
for epoch in range(2):
for snapshot in dataset:
assert snapshot.edge_index.shape[0] == 2
assert snapshot.edge_index.shape[1] == snapshot.edge_attr.shape[0]
assert snapshot.x.shape == (100, 32)
assert snapshot.y.shape == (100, )
targets = [np.floor(np.random.uniform(0,10,(n_count,))).astype(int) for _ in range(snapshot_count)]
dataset = DynamicGraphDiscreteSignal(edge_indices, edge_weights, features, targets)
for epoch in range(2):
for snapshot in dataset:
assert snapshot.edge_index.shape[0] == 2
assert snapshot.edge_index.shape[1] == snapshot.edge_attr.shape[0]
assert snapshot.x.shape == (100, 32)
assert snapshot.y.shape == (100, )
def test_static_graph_discrete_signal():
dataset = StaticGraphDiscreteSignal(None, None, [None, None],[None, None])
for snapshot in dataset:
assert snapshot.edge_index is None
assert snapshot.edge_attr is None
assert snapshot.x is None
assert snapshot.y is None
def test_dynamic_graph_discrete_signal():
dataset = DynamicGraphDiscreteSignal([None, None], [None, None], [None, None],[None, None])
for snapshot in dataset:
assert snapshot.edge_index is None
assert snapshot.edge_attr is None
assert snapshot.x is None
assert snapshot.y is None
def test_static_graph_discrete_signal_typing():
dataset = StaticGraphDiscreteSignal(None, None, [np.array([1])],[np.array([2])])
for snapshot in dataset:
assert snapshot.edge_index is None
assert snapshot.edge_attr is None
assert snapshot.x.shape == (1,)
assert snapshot.y.shape == (1,)
def test_chickenpox():
loader = ChickenpoxDatasetLoader()
dataset = loader.get_dataset()
for epoch in range(3):
for snapshot in dataset:
assert snapshot.edge_index.shape == (2, 102)
assert snapshot.edge_attr.shape == (102, )
assert snapshot.x.shape == (20, 4)
assert snapshot.y.shape == (20, )
def test_pedalme():
loader = PedalMeDatasetLoader()
dataset = loader.get_dataset()
for epoch in range(3):
for snapshot in dataset:
assert snapshot.edge_index.shape == (2, 225)
assert snapshot.edge_attr.shape == (225, )
assert snapshot.x.shape == (15, 4)
assert snapshot.y.shape == (15, )
def test_metrla():
loader = METRLADatasetLoader(raw_data_dir="/tmp/")
dataset = loader.get_dataset()
for epoch in range(3):
for snapshot in dataset:
assert snapshot.edge_index.shape == (2, 1722)
assert snapshot.edge_attr.shape == (1722, )
assert snapshot.x.shape == (207, 2, 12)
assert snapshot.y.shape == (207, 12)
def test_metrla_task_generator():
loader = METRLADatasetLoader(raw_data_dir="/tmp/")
dataset = loader.get_dataset(num_timesteps_in=6, num_timesteps_out=5)
for epoch in range(3):
for snapshot in dataset:
assert snapshot.edge_index.shape == (2, 1722)
assert snapshot.edge_attr.shape == (1722, )
assert snapshot.x.shape == (207, 2, 6)
assert snapshot.y.shape == (207, 5)
def test_pemsbay():
loader = PemsBayDatasetLoader(raw_data_dir="/tmp/")
dataset = loader.get_dataset()
for epoch in range(3):
for snapshot in dataset:
assert snapshot.edge_index.shape == (2, 2694)
assert snapshot.edge_attr.shape == (2694, )
assert snapshot.x.shape == (325, 2, 12)
assert snapshot.y.shape == (325, 2, 12)
def test_pemsbay_task_generator():
loader = PemsBayDatasetLoader(raw_data_dir="/tmp/")
dataset = loader.get_dataset(num_timesteps_in=6, num_timesteps_out=5)
for epoch in range(3):
for snapshot in dataset:
assert snapshot.edge_index.shape == (2, 2694)
assert snapshot.edge_attr.shape == (2694, )
assert snapshot.x.shape == (325, 2, 6)
assert snapshot.y.shape == (325, 2, 5)
def test_discrete_train_test_split_static():
loader = ChickenpoxDatasetLoader()
dataset = loader.get_dataset()
train_dataset, test_dataset = discrete_train_test_split(dataset, 0.8)
for epoch in range(2):
for snapshot in train_dataset:
assert snapshot.edge_index.shape == (2, 102)
assert snapshot.edge_attr.shape == (102, )
assert snapshot.x.shape == (20, 4)
assert snapshot.y.shape == (20, )
for epoch in range(2):
for snapshot in test_dataset:
assert snapshot.edge_index.shape == (2, 102)
assert snapshot.edge_attr.shape == (102, )
assert snapshot.x.shape == (20, 4)
assert snapshot.y.shape == (20, )
def test_discrete_train_test_split_dynamic():
snapshot_count = 250
n_count = 100
feature_count = 32
edge_indices, edge_weights, features = generate_signal(250, 100, 32)
targets = [np.random.uniform(0,10,(n_count,)) for _ in range(snapshot_count)]
dataset = DynamicGraphDiscreteSignal(edge_indices, edge_weights, features, targets)
train_dataset, test_dataset = discrete_train_test_split(dataset, 0.8)
for epoch in range(2):
for snapshot in test_dataset:
assert snapshot.edge_index.shape[0] == 2
assert snapshot.edge_index.shape[1] == snapshot.edge_attr.shape[0]
assert snapshot.x.shape == (100, 32)
assert snapshot.y.shape == (100, )
for epoch in range(2):
for snapshot in train_dataset:
assert snapshot.edge_index.shape[0] == 2
assert snapshot.edge_index.shape[1] == snapshot.edge_attr.shape[0]
assert snapshot.x.shape == (100, 32)
assert snapshot.y.shape == (100, )
| 2.15625 | 2 |
shark/policy/ppo.py | 7starsea/shark | 0 | 12762681 | <reponame>7starsea/shark<gh_stars>0
# coding=utf-8
import torch
import torch.nn.functional as F
from collections import namedtuple
from torch.distributions import Categorical
import numpy as np
from .base import BasePGPolicy
from .namedarraytuple import namedarraytuple
PPOTransition = namedarraytuple('PPOTransition', ('obs', 'act', 'v_label'))
def compute_target(v_final, r_lst, done_lst, gamma):
G = v_final
td_target = list()
for r, done in zip(r_lst[::-1], done_lst[::-1]):
G = r + gamma * G * (1 - done)
td_target.append(G)
return torch.cat(td_target[::-1])
class PPOPolicy(BasePGPolicy):
def __init__(self, policy_net, optimizer, gamma, dist_fn=Categorical,
eps_clip=0.2, vf_coef=.8, ent_coef=.01, max_grad_norm=.5, k_epochs=3):
super().__init__('PPO', policy_net, optimizer, gamma)
self.dist_fn = dist_fn
self.eps_clip = eps_clip
self.w_vf = vf_coef
self.w_ent = ent_coef
self.max_grad_norm = max_grad_norm
self.k_epochs = k_epochs
def actor(self, s, noise=None):
prob = self.policy_net.pi(s, softmax_dim=1)
a = self.dist_fn(prob).sample()
return a
def critic(self, s):
return self.policy_net.v(s)
def collect(self, s_final, s_lst, a_lst, r_lst, done_lst):
with torch.no_grad():
v_final = self.policy_net.v(s_final).detach()
v_label = compute_target(v_final, r_lst, done_lst, self.gamma)
s_vec = torch.cat(s_lst)
a_vec = torch.cat(a_lst)
# s_vec_next = s_vec.clone()
# s_vec_next[:-1] = s_vec[1:]
# s_vec_next[-1] = s_final
return PPOTransition(s_vec, a_vec, v_label)
def replay_transition(self):
return object
def learn(self, batch, **kwargs):
s_vec, a_vec, v_label = batch.obs, batch.act, batch.v_label
with torch.no_grad():
dist_old = self.dist_fn(self.target_net.pi(s_vec).detach())
log_prob_old = dist_old.log_prob(a_vec)
losses, td_errors = [], []
for _ in range(self.k_epochs):
v_hat = self.policy_net.v(s_vec)
advantage = (v_label - v_hat).detach()
td_errors.append(torch.abs(advantage))
advantage = (advantage - advantage.mean()) / (advantage.std() + 1e-5)
dist = self.dist_fn(self.policy_net.pi(s_vec))
ratio = torch.exp(dist.log_prob(a_vec) - log_prob_old)
surr1 = ratio * advantage
surr2 = ratio.clamp(1. - self.eps_clip, 1 + self.eps_clip) * advantage
clip_loss = -torch.min(surr1, surr2).mean()
e_loss = dist.entropy().mean()
vf_loss = F.mse_loss(v_hat, v_label)
loss = clip_loss + self.w_vf * vf_loss - self.w_ent * e_loss
# pi = self.policy_net.pi(s_vec, softmax_dim=1)
#
# # # policy loss (Q-network) + critic loss (Critic)
# # # loss = -(torch.log(pi_a) * advantage).mean() + advantage.pow(2).mean()
# m = self.dist_fn(pi)
# loss = -(m.log_prob(a_vec) * advantage).mean() + F.smooth_l1_loss(v_hat, v_label)
# # policy loss (Q-network) + critic loss (Critic)
# # loss = -(torch.log(pi_a) * advantage).mean() + advantage.pow(2).mean()
# pi_a = pi.gather(1, a_vec.unsqueeze(1)).squeeze(1)
# loss = -(torch.log(pi_a) * advantage).mean() + F.smooth_l1_loss(v_hat, v_label)
self.optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(self.policy_net.parameters(), self.max_grad_norm)
self.optimizer.step()
losses.append(loss.item())
self.sync_target()
return np.mean(losses), torch.mean(torch.stack(td_errors, dim=1), dim=1)
| 2.0625 | 2 |
apscheduler/util.py | drunkpig/apscheduler | 1 | 12762682 | <filename>apscheduler/util.py<gh_stars>1-10
"""This module contains several handy functions primarily meant for internal use."""
import re
from datetime import datetime, timedelta
from functools import partial
from inspect import signature, isclass, ismethod
from typing import Tuple, Any
class _Undefined:
def __bool__(self):
return False
def __repr__(self):
return '<undefined>'
undefined = _Undefined() #: a unique object that only signifies that no value is defined
_DATE_REGEX = re.compile(
r'(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})'
r'(?:[ T](?P<hour>\d{1,2}):(?P<minute>\d{1,2}):(?P<second>\d{1,2})'
r'(?:\.(?P<microsecond>\d{1,6}))?'
r'(?P<timezone>Z|[+-]\d\d:\d\d)?)?$')
def datetime_ceil(dateval: datetime):
"""Round the given datetime object upwards."""
if dateval.microsecond > 0:
return dateval + timedelta(seconds=1, microseconds=-dateval.microsecond)
return dateval
def get_callable_name(func):
"""
Returns the best available display name for the given function/callable.
:rtype: str
"""
# the easy case (on Python 3.3+)
if hasattr(func, '__qualname__'):
return func.__qualname__
# class methods, bound and unbound methods
f_self = getattr(func, '__self__', None) or getattr(func, 'im_self', None)
if f_self and hasattr(func, '__name__'):
f_class = f_self if isclass(f_self) else f_self.__class__
else:
f_class = getattr(func, 'im_class', None)
if f_class and hasattr(func, '__name__'):
return '%s.%s' % (f_class.__name__, func.__name__)
# class or class instance
if hasattr(func, '__call__'):
# class
if hasattr(func, '__name__'):
return func.__name__
# instance of a class with a __call__ method
return func.__class__.__name__
raise TypeError('Unable to determine a name for %r -- maybe it is not a callable?' % func)
def obj_to_ref(obj):
"""
Returns the path to the given callable.
:rtype: str
:raises TypeError: if the given object is not callable
:raises ValueError: if the given object is a :class:`~functools.partial`, lambda or a nested
function
"""
if isinstance(obj, partial):
raise ValueError('Cannot create a reference to a partial()')
name = get_callable_name(obj)
if '<lambda>' in name:
raise ValueError('Cannot create a reference to a lambda')
if '<locals>' in name:
raise ValueError('Cannot create a reference to a nested function')
if ismethod(obj):
if hasattr(obj, 'im_self') and obj.im_self:
# bound method
module = obj.im_self.__module__
elif hasattr(obj, 'im_class') and obj.im_class:
# unbound method
module = obj.im_class.__module__
else:
module = obj.__module__
else:
module = obj.__module__
return '%s:%s' % (module, name)
def ref_to_obj(ref):
"""
Returns the object pointed to by ``ref``.
:type ref: str
"""
if not isinstance(ref, str):
raise TypeError('References must be strings')
if ':' not in ref:
raise ValueError('Invalid reference')
modulename, rest = ref.split(':', 1)
try:
obj = __import__(modulename, fromlist=[rest])
except ImportError:
raise LookupError('Error resolving reference %s: could not import module' % ref)
try:
for name in rest.split('.'):
obj = getattr(obj, name)
return obj
except Exception:
raise LookupError('Error resolving reference %s: error looking up object' % ref)
def maybe_ref(ref):
"""
Returns the object that the given reference points to, if it is indeed a reference.
If it is not a reference, the object is returned as-is.
"""
if not isinstance(ref, str):
return ref
return ref_to_obj(ref)
def check_callable_args(func, args, kwargs):
"""
Ensures that the given callable can be called with the given arguments.
:type args: tuple
:type kwargs: dict
"""
pos_kwargs_conflicts = [] # parameters that have a match in both args and kwargs
positional_only_kwargs = [] # positional-only parameters that have a match in kwargs
unsatisfied_args = [] # parameters in signature that don't have a match in args or kwargs
unsatisfied_kwargs = [] # keyword-only arguments that don't have a match in kwargs
unmatched_args = list(args) # args that didn't match any of the parameters in the signature
# kwargs that didn't match any of the parameters in the signature
unmatched_kwargs = list(kwargs)
# indicates if the signature defines *args and **kwargs respectively
has_varargs = has_var_kwargs = False
try:
sig = signature(func)
except ValueError:
# signature() doesn't work against every kind of callable
return
for param in sig.parameters.values():
if param.kind == param.POSITIONAL_OR_KEYWORD:
if param.name in unmatched_kwargs and unmatched_args:
pos_kwargs_conflicts.append(param.name)
elif unmatched_args:
del unmatched_args[0]
elif param.name in unmatched_kwargs:
unmatched_kwargs.remove(param.name)
elif param.default is param.empty:
unsatisfied_args.append(param.name)
elif param.kind == param.POSITIONAL_ONLY:
if unmatched_args:
del unmatched_args[0]
elif param.name in unmatched_kwargs:
unmatched_kwargs.remove(param.name)
positional_only_kwargs.append(param.name)
elif param.default is param.empty:
unsatisfied_args.append(param.name)
elif param.kind == param.KEYWORD_ONLY:
if param.name in unmatched_kwargs:
unmatched_kwargs.remove(param.name)
elif param.default is param.empty:
unsatisfied_kwargs.append(param.name)
elif param.kind == param.VAR_POSITIONAL:
has_varargs = True
elif param.kind == param.VAR_KEYWORD:
has_var_kwargs = True
# Make sure there are no conflicts between args and kwargs
if pos_kwargs_conflicts:
raise ValueError('The following arguments are supplied in both args and kwargs: %s' %
', '.join(pos_kwargs_conflicts))
# Check if keyword arguments are being fed to positional-only parameters
if positional_only_kwargs:
raise ValueError('The following arguments cannot be given as keyword arguments: %s' %
', '.join(positional_only_kwargs))
# Check that the number of positional arguments minus the number of matched kwargs matches the
# argspec
if unsatisfied_args:
raise ValueError('The following arguments have not been supplied: %s' %
', '.join(unsatisfied_args))
# Check that all keyword-only arguments have been supplied
if unsatisfied_kwargs:
raise ValueError(
'The following keyword-only arguments have not been supplied in kwargs: %s' %
', '.join(unsatisfied_kwargs))
# Check that the callable can accept the given number of positional arguments
if not has_varargs and unmatched_args:
raise ValueError(
'The list of positional arguments is longer than the target callable can handle '
'(allowed: %d, given in args: %d)' % (len(args) - len(unmatched_args), len(args)))
# Check that the callable can accept the given keyword arguments
if not has_var_kwargs and unmatched_kwargs:
raise ValueError(
'The target callable does not accept the following keyword arguments: %s' %
', '.join(unmatched_kwargs))
def marshal_object(obj) -> Tuple[str, Any]:
return f'{obj.__class__.__module__}:{obj.__class__.__qualname__}', obj.__getstate__()
def unmarshal_object(ref: str, state):
cls = ref_to_obj(ref)
instance = cls.__new__(cls)
instance.__setstate__(state)
return instance
| 2.921875 | 3 |
solutions/server/server-09-connect-database/server/routes/task.py | FroeMic/CDTM-Backend-Workshop | 0 | 12762683 | from flask import request, jsonify
from server import app
from server.database import *
from server.utils import json_abort, list_exists, has_json
from server.models import *
# MARK: Task routes
@app.route('/api/lists/<string:list_id>/tasks', methods=['GET'])
@list_exists
def get_tasks(list_id):
response = {}
response['tasks'] = [t.__dict__ for t in db_get_tasks_for_list(list_id)]
return jsonify(response)
# CREATE ROUTE
@app.route('/api/lists/<string:list_id>/tasks', methods=['POST'])
@list_exists
@has_json
def create_task(list_id):
''' creates a new task for a list '''
data = request.get_json()
title = data.get('title', None)
if title == None:
json_abort(400, 'Invalid request parameters')
newTask = db_create_task(list_id, title)
if newTask == None:
json_abort(400, 'Could not create task')
return jsonify(newTask.__dict__)
# DESTROY ROUTE
@app.route('/api/lists/<string:list_id>/tasks/<string:task_id>', methods=['DELETE'])
@list_exists
def remove_task(list_id, task_id):
db_delete_task(task_id)
return jsonify({'result': True})
# UPDATE ROUTE
@app.route('/api/lists/<string:list_id>/tasks/<string:task_id>', methods=['PUT'])
@list_exists
@has_json
def update_task(list_id, task_id):
data = request.get_json()
task = db_get_task(list_id, task_id)
if task == None:
json_abort(404, 'Task not found')
title = data.get('title', None)
status = data.get('status', None)
description = data.get('description', None)
due = data.get('due', None)
revision = data.get('revision', None)
if title == None or status == None or description == None or \
due == None or revision == None:
json_abort(400, 'Invalid request parameters')
# Only update tasks with there is no newer version on the server
if revision < task.revision:
json_abort(409, 'Newer version of task available')
task.title = title
task.status = status
task.description = description
task.due = due
task.revision = task.revision + 1
task = db_update_task(list_id, task)
if task == None:
json_abort(500, 'Could not update task')
return jsonify(task.__dict__)
| 2.6875 | 3 |
scrapers/ANN-antrim-and-newtownabbey/councillors.py | DemocracyClub/LGSF | 4 | 12762684 | <gh_stars>1-10
import re
from urllib.parse import urljoin
from bs4 import BeautifulSoup
from lgsf.councillors.scrapers import HTMLCouncillorScraper
class Scraper(HTMLCouncillorScraper):
base_url = "https://antrimandnewtownabbey.gov.uk/councillors/"
list_page = {
"container_css_selector": "main",
"councillor_css_selector": ".contact-card",
}
raw_html = None
def get_raw_html(self):
if not self.raw_html:
self.raw_html = self.get_page(self.base_url)
return self.raw_html
def get_ward_for_person(self, name):
raw_html = self.get_raw_html()
title_tag = raw_html.find(string=re.compile(name))
ward = title_tag.find_all_previous("div", {"class": re.compile("wrapper-*")})[
0
].h2.get_text(strip=True)
return ward.replace(" Councillors", "").strip()
def get_single_councillor(self, councillor_html):
image_style = councillor_html.select("div.img")[0]["style"]
image_url = image_style.split("'")[1].split("?")[0]
image_url = urljoin(self.base_url, image_url)
url = image_url
name = councillor_html.select("p.title")[0].get_text(strip=True)
party = councillor_html.select("p.title span")[0].get_text(strip=True)
name = name.replace(party, "")
division = self.get_ward_for_person(name)
councillor = self.add_councillor(
url, identifier=url, name=name, party=party, division=division
)
councillor.email = councillor.email = councillor_html.select("a[href^=mailto]")[
0
]["href"].split(":")[1]
councillor.photo_url = image_url
return councillor
| 3.015625 | 3 |
pronto/entity/__init__.py | althonos/pronto | 182 | 12762685 | <reponame>althonos/pronto<filename>pronto/entity/__init__.py<gh_stars>100-1000
import datetime
import operator
import typing
import weakref
from typing import AbstractSet, Any, Dict, FrozenSet, Iterable, Iterator, Optional, Set
from ..definition import Definition
from ..pv import PropertyValue
from ..synonym import Synonym, SynonymData, SynonymType
from ..utils.meta import roundrepr, typechecked
from ..xref import Xref
if typing.TYPE_CHECKING:
from ..ontology import _DataGraph, Ontology
from ..relationship import Relationship, RelationshipSet
from .attributes import Relationships
__all__ = ["EntityData", "Entity", "EntitySet"]
_D = typing.TypeVar("_D", bound="EntityData")
_E = typing.TypeVar("_E", bound="Entity")
_S = typing.TypeVar("_S", bound="EntitySet")
class EntityData:
id: str
alternate_ids: Set[str]
annotations: Set[PropertyValue]
anonymous: bool
builtin: bool
comment: Optional[str]
consider: Set[str]
created_by: Optional[str]
creation_date: Optional[datetime.datetime]
disjoint_from: Set[str]
definition: Optional[Definition]
equivalent_to: Set[str]
name: Optional[str]
namespace: Optional[str]
obsolete: bool
relationships: Dict[str, Set[str]]
replaced_by: Set[str]
subsets: Set[str]
synonyms: Set[SynonymData]
union_of: Set[str]
xrefs: Set[Xref]
if typing.TYPE_CHECKING:
__annotations__: Dict[str, str]
__slots__ = ("__weakref__",) + tuple(__annotations__) # noqa: E0602
class Entity(typing.Generic[_D, _S]):
"""An entity in the ontology graph.
With respects to the OBO semantics, an `Entity` is either a term or a
relationship in the ontology graph. Any entity has a unique identifier as
well as some common properties.
"""
if __debug__ or typing.TYPE_CHECKING:
__data: "weakref.ReferenceType[_D]"
__slots__: Iterable[str] = ()
def __init__(self, ontology: "Ontology", data: "_D"):
self.__data = weakref.ref(data)
self.__id = data.id
self.__ontology = ontology
def _data(self) -> "EntityData":
rdata = self.__data()
if rdata is None:
raise RuntimeError("internal data was deallocated")
return rdata
else:
__slots__: Iterable[str] = ("_data",) # type: ignore
def __init__(self, ontology: "Ontology", data: "_D"):
self._data = weakref.ref(data) # type: ignore
self.__ontology = ontology
self.__id = data.id
_Set: typing.ClassVar[typing.Type[_S]] = NotImplemented
_data_getter: typing.Callable[["Ontology"], "_DataGraph"] = NotImplemented
# --- Private helpers ----------------------------------------------------
def _ontology(self) -> "Ontology":
return self.__ontology
# --- Magic Methods ------------------------------------------------------
def __eq__(self, other: Any) -> bool:
if isinstance(other, Entity):
return self.id == other.id
return False
def __lt__(self, other):
if isinstance(other, Entity):
return self.id < other.id
return NotImplemented
def __le__(self, other):
if isinstance(other, Entity):
return self.id <= other.id
return NotImplemented
def __gt__(self, other):
if isinstance(other, Entity):
return self.id > other.id
return NotImplemented
def __ge__(self, other):
if isinstance(other, Entity):
return self.id >= other.id
return NotImplemented
def __hash__(self):
return hash((self.id))
def __repr__(self):
return roundrepr.make(type(self).__name__, self.id, name=(self.name, None))
# --- Data descriptors ---------------------------------------------------
@property
def alternate_ids(self) -> Set[str]:
"""`set` of `str`: A set of alternate IDs for this entity."""
return self._data().alternate_ids
@alternate_ids.setter # type: ignore
def alternate_ids(self, ids: Iterable[str]):
self._data().alternate_ids = set(ids)
@property
def annotations(self) -> Set[PropertyValue]:
"""`set` of `PropertyValue`: Annotations relevant to the entity."""
return self._data().annotations
@annotations.setter
def annotations(self, value: Iterable[PropertyValue]) -> None:
self._data().annotations = set(value)
@property
def anonymous(self) -> bool:
"""`bool`: Whether or not the entity has an anonymous id.
Semantics of anonymous entities are the same as B-Nodes in RDF.
"""
return self._data().anonymous
@anonymous.setter
def anonymous(self, value: bool):
self._data().anonymous = value
@property
def builtin(self) -> bool:
"""`bool`: Whether or not the entity is built-in to the OBO format.
``pronto`` uses this tag on the ``is_a`` relationship, which is the
axiomatic to the OBO language but treated as a relationship in the
library.
"""
return self._data().builtin
@builtin.setter # type: ignore
@typechecked(property=True)
def builtin(self, value: bool):
self._data().builtin = value
@property
def comment(self) -> Optional[str]:
"""`str` or `None`: A comment about the current entity.
Comments in ``comment`` clauses are guaranteed to be conserved by OBO
parsers and serializers, unlike bang comments. A non `None` `comment`
is semantically equivalent to a ``rdfs:comment`` in OWL2. When parsing
from OWL, several RDF comments will be merged together into a single
``comment`` clause spanning over multiple lines.
"""
return self._data().comment
@comment.setter
def comment(self, value: Optional[str]):
self._data().comment = value
@property
def consider(self) -> _S:
"""`EntitySet`: A set of potential substitutes for an obsolete term.
An obsolete entity can provide one or more entities which may be
appropriate substitutes, but needs to be looked at carefully by a
human expert before the replacement is done.
See Also:
`~Entity.replaced_by`, which provides a set of entities suitable
for automatic replacement.
"""
s = self._Set()
s._ids = self._data().consider
s._ontology = self._ontology()
return s
@consider.setter
def consider(self, consider: Iterable[_E]) -> None:
if isinstance(consider, EntitySet):
data = consider._ids
else:
data = {entity.id for entity in consider}
self._data().consider = data
@property
def created_by(self) -> Optional[str]:
"""`str` or `None`: The name of the creator of the entity, if any.
This property gets translated to a ``dc:creator`` annotation in OWL2,
which has very broad semantics. Some OBO ontologies may instead use
other annotation properties such as the ones found in `Information
Interchange Ontology <http://www.obofoundry.org/ontology/iao.html>`_,
which can be accessed in the `annotations` attribute of the entity,
if any.
"""
return self._data().created_by
@created_by.setter # type: ignore
@typechecked(property=True)
def created_by(self, value: Optional[str]):
self._data().created_by = value
@property
def creation_date(self) -> Optional[datetime.datetime]:
"""`~datetime.datetime` or `None`: The date the entity was created."""
return self._data().creation_date
@creation_date.setter # type: ignore
@typechecked(property=True)
def creation_date(self, value: Optional[datetime.datetime]):
self._data().creation_date = value
@property
def definition(self) -> Optional[Definition]:
"""`Definition` or `None`: The definition of the current entity.
Definitions in OBO are intended to be human-readable text describing
the entity, with some additional cross-references if possible.
Example:
>>> hp = pronto.Ontology.from_obo_library("hp.obo")
>>> term = hp["HP:0009882"]
>>> term.name
'Short distal phalanx of finger'
>>> str(term.definition)
'Short distance from the end of the finger to the most distal...'
>>> sorted(term.definition.xrefs)
[Xref('HPO:probinson'), Xref('PMID:19125433')]
"""
return self._data().definition
@definition.setter # type: ignore
@typechecked(property=True)
def definition(self, definition: Optional[Definition]):
self._data().definition = definition
@property
def disjoint_from(self) -> _S:
"""`EntitySet`: The entities declared as disjoint from this entity.
Two entities are disjoint if they have no instances in common. Two
entities that are disjoint cannot share any subentities, but the
opposite is not always true.
"""
s = self._Set()
s._ids = self._data().disjoint_from
s._ontology = self._ontology()
return s
@disjoint_from.setter
def disjoint_from(self, disjoint: Iterable[_E]):
if isinstance(disjoint, EntitySet):
data = disjoint._ids
else:
data = {entity.id for entity in disjoint}
self._data().disjoint_from = data
@property
def equivalent_to(self) -> _S:
"""`EntitySet`: The entities declared as equivalent to this entity."""
s = self._Set()
s._ids = self._data().equivalent_to
s._ontology = self._ontology()
return s
@equivalent_to.setter
def equivalent_to(self, entities: Iterable[_E]):
if isinstance(entities, EntitySet):
data = entities._ids
else:
data = {entity.id for entity in entities}
self._data().equivalent_to = data
@property
def id(self) -> str:
"""`str`: The OBO identifier of the entity.
Identifiers can be either prefixed (e.g. ``MS:1000031``), unprefixed
(e.g. ``part_of``) or given as plain URLs. Identifiers cannot be
edited.
"""
return self.__id
@property
def name(self) -> Optional[str]:
"""`str` or `None`: The name of the entity.
Names are formally equivalent to ``rdf:label`` in OWL2. The OBO format
version 1.4 made names optional to improve OWL interoperability, as
labels are optional in OWL.
"""
return self._data().name
@name.setter # type: ignore
@typechecked(property=True)
def name(self, value: Optional[str]):
self._data().name = value
@property
def namespace(self) -> Optional[str]:
"""`str` or `None`: The namespace this entity is defined in."""
return self._data().namespace
@namespace.setter # type: ignore
@typechecked(property=True)
def namespace(self, ns: Optional[str]):
self._data().namespace = ns
@property
def obsolete(self) -> bool:
"""`bool`: Whether or not the entity is obsolete.
Hint:
All OBO entities can be made obsolete through a boolean flag, and
map to one or several replacements. When querying an obsolete
entity, ``pronto`` will **not** attempt to perform any kind of
replacement itself ::
>>> ms = pronto.Ontology.from_obo_library("ms.obo")
>>> term = ms["MS:1001414"]
>>> term
Term('MS:1001414', name='MGF scans')
>>> term.obsolete
True
To always get the up-to-date, non-obsolete entity, you could use
the following snippet, going through a term replacement if there
is no ambiguity ::
>>> while term.obsolete:
... if len(term.replaced_by) != 1:
... raise ValueError(f"no replacement for {term.id}")
... term = term.replaced_by.pop()
>>> term
Term('MS:1000797', name='peak list scans')
See Also:
`~.Entity.consider` and `~Entity.replaced_by`, storing some
replacement options for an obsolete entity.
"""
return self._data().obsolete
@obsolete.setter # type: ignore
@typechecked(property=True)
def obsolete(self, value: bool):
self._data().obsolete = value
@property
def relationships(self: _E) -> "Relationships[_E, _S]":
"""`~.Relationships`: The links from an entity to other entities.
This property returns an object that maps a `~.Relationship` to
an `~.EntitySet` (either a `~.TermSet` for `Term.relationships`, or
a `~.RelationshipSet` for `Relationship.relationships`).
Hint:
The mapping is mutable, so relationships can be created or removed
using the usual interface of a `~collections.abc.MutableMapping`.
Example:
Get the ``MS:1000004`` term (*sample mass*) from the Mass
Spectrometry ontology::
>>> ms = pronto.Ontology.from_obo_library("ms.obo")
>>> sample_mass = ms["MS:1000004"]
Then use the ``relationships`` property to get the relevant
unit from the Unit Ontology::
>>> sorted(sample_mass.relationships.keys())
[Relationship('has_units', name='has_units')]
>>> sample_mass.relationships[ms.get_relationship('has_units')]
TermSet({Term('UO:0000021', name='gram')})
"""
from .attributes import Relationships
return Relationships(self)
@relationships.setter
def relationships(self, rels: typing.Mapping["Relationship", Iterable[_E]]):
self._data().relationships = {
relation.id: set(entity.id for entity in entities)
for relation, entities in rels.items()
}
@property
def replaced_by(self) -> _S:
"""`EntitySet`: A set of of replacements for an obsolete term.
An obsolete entity can provide one or more replacement that can
safely be used to automatically reassign instances to non-obsolete
classes.
See Also:
`~Entity.consider`, which provides a set of entities suitable
for replacement but requiring expert curation.
"""
s = self._Set()
s._ids = self._data().replaced_by
s._ontology = self._ontology()
return s
@replaced_by.setter
def replaced_by(self, replacements: Iterable[_E]) -> None:
if isinstance(replacements, EntitySet):
data = replacements._ids
else:
data = set(entity.id for entity in replacements)
self._data().replaced_by = data
@property
def subsets(self) -> FrozenSet[str]:
"""`frozenset` of `str`: The subsets containing this entity."""
return frozenset(self._data().subsets)
@subsets.setter # type: ignore
@typechecked(property=True)
def subsets(self, subsets: FrozenSet[str]):
declared = set(s.name for s in self._ontology().metadata.subsetdefs)
for subset in subsets:
if subset not in declared:
raise ValueError(f"undeclared subset: {subset!r}")
self._data().subsets = set(subsets)
@property
def synonyms(self) -> FrozenSet[Synonym]:
"""`frozenset` of `Synonym`: A set of synonyms for this entity."""
ontology, termdata = self._ontology(), self._data()
return frozenset(Synonym(ontology, s) for s in termdata.synonyms)
@synonyms.setter # type: ignore
@typechecked(property=True)
def synonyms(self, synonyms: Iterable[Synonym]):
self._data().synonyms = {syn._data() for syn in synonyms}
@property
def union_of(self) -> _S:
s = self._Set()
s._ids = self._data().union_of
s._ontology = self._ontology()
return s
@union_of.setter
def union_of(self, union_of: Iterable[_E]) -> None:
if isinstance(union_of, EntitySet):
data = union_of._ids
else:
data = set()
for entity in union_of:
if not isinstance(entity, Entity):
ty = type(entity).__name__
raise TypeError(f"expected `Entity`, found {ty}")
data.add(entity.id)
if len(data) == 1:
raise ValueError("'union_of' cannot have a cardinality of 1")
self._data().union_of = data
@property
def xrefs(self) -> FrozenSet[Xref]:
"""`frozenset` of `Xref`: A set of database cross-references.
Xrefs can be used to describe an analogous entity in another
vocabulary, such as a database or a semantic knowledge base.
"""
return frozenset(self._data().xrefs)
@xrefs.setter # type: ignore
@typechecked(property=True)
def xrefs(self, xrefs: FrozenSet[Xref]):
self._data().xrefs = set(xrefs)
# --- Convenience methods ------------------------------------------------
def add_synonym(
self,
description: str,
scope: Optional[str] = None,
type: Optional[SynonymType] = None,
xrefs: Optional[Iterable[Xref]] = None,
) -> Synonym:
"""Add a new synonym to the current entity.
Arguments:
description (`str`): The alternate definition of the entity, or a
related human-readable synonym.
scope (`str` or `None`): An optional synonym scope. Must be either
**EXACT**, **RELATED**, **BROAD** or **NARROW** if given.
type (`~pronto.SynonymType` or `None`): An optional synonym type.
Must be declared in the header of the current ontology.
xrefs (iterable of `Xref`, or `None`): A collections of database
cross-references backing the origin of the synonym.
Raises:
ValueError: when given an invalid synonym type or scope.
Returns:
`~pronto.Synonym`: A new synonym for the terms. The synonym is
already added to the `Entity.synonyms` collection.
"""
# check the type is declared in the current ontology
if type is None:
type_id: Optional[str] = None
else:
try:
type_id = self._ontology().get_synonym_type(type.id).id
except KeyError as ke:
raise ValueError(f"undeclared synonym type {type.id!r}") from ke
data = SynonymData(description, scope, type_id, xrefs=xrefs)
self._data().synonyms.add(data)
return Synonym(self._ontology(), data)
class EntitySet(typing.Generic[_E], typing.MutableSet[_E]):
"""A specialized mutable set to store `Entity` instances."""
# --- Magic methods ------------------------------------------------------
def __init__(self, entities: Optional[Iterable[_E]] = None) -> None:
self._ids: Set[str] = set()
self._ontology: "Optional[Ontology]" = None
for entity in entities if entities is not None else ():
if __debug__ and not isinstance(entity, Entity):
err_msg = "'entities' must be iterable of Entity, not {}"
raise TypeError(err_msg.format(type(entity).__name__))
if self._ontology is None:
self._ontology = entity._ontology()
if self._ontology is not entity._ontology():
raise ValueError("entities do not originate from the same ontology")
self._ids.add(entity.id)
def __contains__(self, other: object):
if isinstance(other, Entity):
return other.id in self._ids
return False
def __iter__(self) -> Iterator[_E]:
return map(lambda t: self._ontology[t], iter(self._ids)) # type: ignore
def __len__(self):
return len(self._ids)
def __repr__(self):
ontology = self._ontology
elements = (repr(ontology[id_]) for id_ in self._ids)
return f"{type(self).__name__}({{{', '.join(elements)}}})"
def __iand__(self, other: AbstractSet[_E]) -> "EntitySet[_E]":
if isinstance(other, EntitySet):
self._ids &= other._ids
else:
super().__iand__(other)
if not self._ids:
self._ontology = None
return self
def __and__(self, other: AbstractSet[_E]) -> "EntitySet[_E]":
if isinstance(other, EntitySet):
s = type(self)()
s._ids = self._ids.__and__(other._ids)
s._ontology = self._ontology if s._ids else None
else:
s = type(self)(super().__and__(other))
return s
def __ior__(self, other: AbstractSet[_E]) -> "EntitySet[_E]":
if not isinstance(other, EntitySet):
other = type(self)(other)
self._ids |= other._ids
self._ontology = self._ontology or other._ontology
return self
def __or__(self, other: AbstractSet[_E]) -> "EntitySet[_E]":
if isinstance(other, EntitySet):
s = type(self)()
s._ids = self._ids.__or__(other._ids)
s._ontology = self._ontology or other._ontology
else:
s = type(self)(super().__or__(other))
return s
def __isub__(self, other: AbstractSet[_E]) -> "EntitySet[_E]":
if isinstance(other, EntitySet):
self._ids -= other._ids
else:
super().__isub__(other)
if not self._ids:
self._ontology = None
return self
def __sub__(self, other: AbstractSet[_E]) -> "EntitySet[_E]":
if isinstance(other, EntitySet):
s = type(self)()
s._ids = self._ids.__sub__(other._ids)
s._ontology = self._ontology
else:
s = type(self)(super().__sub__(other))
return s
def __ixor__(self, other: AbstractSet[_E]) -> "EntitySet[_E]":
if isinstance(other, EntitySet):
self._ids ^= other._ids
self._ontology = self._ontology or other._ontology
else:
super().__ixor__(other)
if not self._ids:
self._ontology = None
return self
def __xor__(self, other: AbstractSet[_E]) -> "EntitySet[_E]":
if isinstance(other, EntitySet):
s = type(self)()
s._ids = self._ids.__xor__(other._ids)
s._ontology = self._ontology or other._ontology
else:
s = type(self)(super().__xor__(other))
if not s._ids:
s._ontology = None
return s
# --- Methods ------------------------------------------------------------
def add(self, entity: _E) -> None:
if self._ontology is None:
self._ontology = entity._ontology()
elif self._ontology is not entity._ontology():
raise ValueError("cannot use `Entity` instances from different `Ontology`")
self._ids.add(entity.id)
def clear(self) -> None:
self._ids.clear()
self._ontology = None
def discard(self, entity: _E) -> None:
self._ids.discard(entity.id)
def pop(self) -> _E:
id_ = self._ids.pop()
entity = self._ontology[id_] # type: ignore
if not self._ids:
self._ontology = None
return entity # type: ignore
def remove(self, entity: _E):
if self._ontology is not None and self._ontology is not entity._ontology():
raise ValueError("cannot use `Entity` instances from different `Ontology`")
self._ids.remove(entity.id)
# --- Attributes ---------------------------------------------------------
@property
def ids(self) -> FrozenSet[str]:
return frozenset(map(operator.attrgetter("id"), iter(self)))
@property
def alternate_ids(self) -> FrozenSet[str]:
return frozenset(id for entity in self for id in entity.alternate_ids)
@property
def names(self) -> FrozenSet[str]:
return frozenset(map(operator.attrgetter("name"), iter(self)))
| 2.03125 | 2 |
api/server.py | littlebenlittle/python-api-starter | 0 | 12762686 |
from config import *
from flask import Flask, request
app = Flask(__name__)
@app.route('/ping', methods=['GET', 'POST'])
def index():
if request.method == 'GET':
print('received a get request')
else:
print(request.json())
return b'success', 200
| 2.625 | 3 |
wifimanager.py | timhawes/timhawes_circuitpython_misc | 0 | 12762687 | <reponame>timhawes/timhawes_circuitpython_misc<filename>wifimanager.py<gh_stars>0
# SPDX-FileCopyrightText: 2022 <NAME>
#
# SPDX-License-Identifier: MIT
import binascii
import json
import microcontroller
import wifi
class WiFiManager:
def __init__(self, hostname=None):
self._connected_state = False
self._connect_count = 0
self.connected_callback = None
self.disconnected_callback = None
if hostname:
wifi.radio.hostname = hostname
else:
wifi.radio.hostname = "esp-{}".format(
binascii.hexlify(microcontroller.cpu.uid).decode("ascii")
)
self.reconfigure()
def reconfigure(self):
try:
with open("/wifi.json", "r") as f:
data = json.load(f)
wifi.radio.connect(data["ssid"], data["password"], timeout=-1)
except OSError:
print("WiFiManager: /wifi.json not found")
@property
def connect_count(self):
return self._connect_count
@property
def connected(self):
if wifi.radio.ap_info is None:
return False
if wifi.radio.ipv4_address is None:
return False
if str(wifi.radio.ipv4_address) == "0.0.0.0":
return False
if str(wifi.radio.ipv4_dns) == "0.0.0.0":
return False
return True
def loop(self):
if self._connected_state is False:
if self.connected:
print(
"WiFiManager: connected ssid={} ipv4={}".format(
wifi.radio.ap_info.ssid, wifi.radio.ipv4_address
)
)
self._connected_state = True
self._connect_count += 1
if self.connected_callback:
self.connected_callback()
elif self._connected_state is True:
if not self.connected:
print("WiFiManager: disconnected")
self._connected_state = False
if self.disconnected_callback:
self.disconnected_callback()
| 2.859375 | 3 |
contact/views.py | uktrade/invest-ui | 0 | 12762688 | from directory_components.mixins import CountryDisplayMixin, GA360Mixin
from django.views.generic import TemplateView
from django.views.generic.edit import FormView
from django.urls import reverse_lazy
from django.conf import settings
from contact import forms
from core.mixins import LocalisedURLsMixin, InvestEnableTranslationsMixin
class ActiveViewNameMixin:
def get_context_data(self, *args, **kwargs):
return super().get_context_data(
active_view_name=self.active_view_name,
*args,
**kwargs
)
class ContactFormView(
ActiveViewNameMixin,
InvestEnableTranslationsMixin,
LocalisedURLsMixin,
CountryDisplayMixin,
GA360Mixin,
FormView,
):
success_url = reverse_lazy('contact-success')
template_name = 'contact/contact.html'
form_class = forms.ContactForm
active_view_name = 'contact'
available_languages = settings.LANGUAGES
def __init__(self):
super().__init__()
self.set_ga360_payload(
page_id='InvestContactForm',
business_unit='Invest',
site_section='Contact'
)
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['utm_data'] = self.request.utm
kwargs['submission_url'] = self.request.path
return kwargs
def form_valid(self, form):
form.save()
return super().form_valid(form)
class ContactFormSuccessView(
ActiveViewNameMixin,
LocalisedURLsMixin,
InvestEnableTranslationsMixin,
CountryDisplayMixin,
GA360Mixin,
TemplateView,
):
template_name = 'contact/contact_form_success_page.html'
active_view_name = 'contact'
available_languages = settings.LANGUAGES
def __init__(self):
super().__init__()
self.set_ga360_payload(
page_id='InvestContactFormSuccess',
business_unit='Invest',
site_section='Contact',
site_subsection='ContactSuccess'
)
| 1.890625 | 2 |
Common Python Code/Code_Ed/3M_2__Sort_Search.py | vibwipro/Machine-Learning-Python | 3 | 12762689 | '''
Data of XYZ company is stored in sorted list.
Write a program for searching specific data from that list.
Hint: Use if/elif to deal with conditions
'''
import re
print ('Enter sorted list of lines : ')
lines = []
while True :
line = input()
if line:
lines.append(line)
else :
break
text = '\n'.join(lines)
test_1 = text.split("\n")
sr = input('Enter data you wanted to search : ')
sr_spt = sr.split()
for i in sr_spt :
for j in test_1 :
if (i.strip() == j.strip()) :
print ('Character found in the list is : ' + i)
break
print('Character not found in the list is : ' + i)
| 3.90625 | 4 |
monasca-events-api-0.3.0/monasca_events_api/policies/__init__.py | scottwedge/OpenStack-Stein | 13 | 12762690 | <reponame>scottwedge/OpenStack-Stein<gh_stars>10-100
# Copyright 2017 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import pkgutil
from oslo_log import log
from oslo_utils import importutils
LOG = log.getLogger(__name__)
_BASE_MOD_PATH = 'monasca_events_api.policies.'
def load_policy_modules():
"""Load all modules that contain policies.
Method iterates over modules of :py:mod:`monasca_events_api.policies`
and imports only those that contain following methods:
- list_rules
"""
for modname in _list_module_names():
mod = importutils.import_module(_BASE_MOD_PATH + modname)
if hasattr(mod, 'list_rules'):
yield mod
def _list_module_names():
package_path = os.path.dirname(os.path.abspath(__file__))
for _, modname, ispkg in pkgutil.iter_modules(path=[package_path]):
if not (modname == "opts" and ispkg):
yield modname
def list_rules():
"""List all policy modules rules.
Goes through all policy modules and yields their rules
"""
all_rules = []
for mod in load_policy_modules():
rules = mod.list_rules()
all_rules.extend(rules)
return all_rules
| 1.828125 | 2 |
tests/tests/base_tests/__init__.py | zhouhanjiang/aws-device-farm-appium-python-tests-for-android-sample-app | 0 | 12762691 | <reponame>zhouhanjiang/aws-device-farm-appium-python-tests-for-android-sample-app<filename>tests/tests/base_tests/__init__.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from native_test import NativeTest
| 1.164063 | 1 |
STI Buttons with Info Display.py | ksu-hmi/STIMythBuster | 1 | 12762692 | import tkinter as tk
chlamydia_info = """
General Facts
A common sexually transmitted infection caused by the bacteria Chlamydia trachomatis.
The infection is transmitted through vaginal, oral, and anal unprotected sex.
It can be passed on from an infected mother to the child during childbirth.
Chlamydia eye infection can occur through genital contact with the eyes.
Risk Factors
Having multiple partners.
Unprotected sex.
History of STI.
Symptoms
Usually, no symptoms during the initial stages of infection.
Women
Vaginal discharge and itching
Bleeding between periods
Painful sexual intercourse
Men
Pain and swelling in testicles
Discharge from penis
Diagnosis
Urine culture for men.
Swab test of cervix for women.
Treatment
Antibiotics to kill bacteria such as Azithromycin or Doxycycline.
Specialist to consult
OB GYN
Urologist
If left untreated:
Pelvic inflammatory disease (PID), infertility and ectopic pregnancy in women.
"""
gonorrhea_info = """
Gonorrhea
General Facts
A sexually transmitted bacterial infection caused by the bacteria Neisseria gonorrhea. It often affects the urethra, rectum, or throat.
Symptoms:
Men:
Frequent urination
Puss-like discharge from the penis or pain in the testicle
Persistent sore throat
Women:
Discharge from the vagina
Pain or burning sensation while urinating
Heavier periods or spotting
Pain during sexual intercourse
Sharp pain in the lower abdomen
Sore throat, fever
Causes:
It is caused by the bacterium Neisseria gonorrhea.
Affects the mouth, throat, eyes, rectum and female reproductive tract.
It spreads through unprotected sex.
Can be passed from an infected mother to her baby during delivery.
Prevention:
Stay away from unprotected sex
Always use a condom
Get tested if suspicious of infection
Complications:
Pelvic inflammatory disease in women (PID)
Blockage or scarring of fallopian tubes
Scarring in the urethra in men
Ectopic pregnancy
Painful abscess may develop in the interior of the penis.
Diagnosis:
Swab test: a sample is collected either from the genitals or mouth and tested for the presence of bacteria.
Treatment:
Antibiotics to kill the bacteria such as Ceftriaxone and Azithromycin
Self-care Strategies:
Avoid sexual intercourse during the treatment period.
Specialist to Consult:
Gynecologist
Urologist
"""
hpv_info = """
ENTER TEXT HERE
"""
syphilis_info = """
ENTER TEXT HERE
"""
trichomoniasis_info = """
ENTER TEXT HERE
"""
genitalHerpes_info = """
ENTER TEXT HERE
"""
#ACTIONS
def click():
print("Chlamydia")
window = tk.Toplevel(main_window)
window.title("Chlamydia Information")
info = tk.Label(window, text=chlamydia_info, foreground="black")
info.config(font=('Georgia', 12))
info.grid(row=0, column=0, columnspan=3)
def click1():
print("Gonorrhea")
window = tk.Toplevel(main_window)
window.title("Gonorrhea Information")
info = tk.Label(window, text=gonorrhea_info, foreground="black")
info.config(font=('Georgia', 12))
info.grid(row=0, column=0, columnspan=3)
def click2():
print("HPV")
window = tk.Toplevel(main_window)
window.title("HPV Information")
info = tk.Label(window, text=hpv_info, foreground="black")
info.config(font=('Georgia', 12))
info.grid(row=0, column=0, columnspan=3)
def click3():
print("Syphilis")
window = tk.Toplevel(main_window)
window.title("Syphilis Information")
info = tk.Label(window, text=syphilis_info, foreground="black")
info.config(font=('Georgia', 12))
info.grid(row=0, column=0, columnspan=3)
def click4():
print("Trichomoniasis")
window = tk.Toplevel(main_window)
window.title("Trichomoniasis Information")
info = tk.Label(window, text=trichomoniasis_info, foreground="black")
info.config(font=('Georgia', 12))
info.grid(row=0, column=0, columnspan=3)
def click5():
print("<NAME>")
window = tk.Toplevel(main_window)
window.title("Genital Herpes Information")
info = tk.Label(window, text=genitalHerpes_info, foreground="black")
info.config(font=('Georgia', 12))
info.grid(row=0, column=0, columnspan=3)
#SETUP
def click_setup():
button1 = tk.Button(text='Chlamydia')
button1.config(command=click) # performs call back of function
button1.config(height = 5, width = 25)
button1.config(font=('Comic Sans', 15, 'bold'))
button1.config(bg='orange')
button1.config(fg='white')
button1.grid(row=0, column=0)
def click1_setup():
button2 = tk.Button(text='Gonorrhea')
button2.config(command=click1) # performs call back of function
button2.config(height = 5, width = 25)
button2.config(font=('Comic Sans', 15, 'bold'))
button2.config(bg='#DE1F27')
button2.config(fg='white')
button2.grid(row=1, column=0)
def click2_setup():
button3 = tk.Button(text='Human Papillomavirus')
button3.config(command=click2) # performs call back of function
button3.config(height = 5, width = 25)
button3.config(font=('Comic Sans', 15, 'bold'))
button3.config(bg='#1FDED3')
button3.config(fg='white')
button3.grid(row=2, column=0)
def click3_setup():
button4 = tk.Button(text='Syphilis')
button4.config(command=click3) # performs call back of function
button4.config(height = 5, width = 25)
button4.config(font=('Comic Sans', 15, 'bold'))
button4.config(bg='#B6DE1F')
button4.config(fg='white')
button4.grid(row=3, column=0)
def click4_setup():
print("Trichomoniasis")
button5 = tk.Button(text='Trichomoniasis')
button5.config(command=click4) # performs call back of function
button5.config(height = 5, width = 25)
button5.config(font=('Comic Sans', 15, 'bold'))
button5.config(bg='#1FDED6')
button5.config(fg='white')
button5.grid(row=4, column=0)
def click5_setup():
print("<NAME>")
button5 = tk.Button(text='<NAME>')
button5.config(command=click4) # performs call back of function
button5.config(height = 5, width = 25)
button5.config(font=('Comic Sans', 15, 'bold'))
button5.config(bg='#DE1FBC')
button5.config(fg='white')
button5.grid(row=4, column=0)
main_window = tk.Tk()
main_window.title("STI Educational Health App")
click_setup()
click1_setup()
click2_setup()
click3_setup()
click4_setup()
click5_setup()
main_window.mainloop() | 3.09375 | 3 |
tests/test_utils/test_request.py | daniktl/request-tools | 0 | 12762693 | from meta_requests.utils.request import *
from tests.utils import get_response_with_text
def test_response_detect_blocking_messages():
blocked_message: str = "You got blocked"
text = get_response_with_text(blocked_message)
assert response_detect_blocking_messages(text, [blocked_message])
| 2.125 | 2 |
example/example.py | Teichlab/GPfates | 18 | 12762694 | import pandas as pd
import numpy as np
from GPfates import GPfates
etpm = pd.read_table('tapio_tcell_tpm.txt', index_col=0)
etpm = etpm[(etpm > 2).sum(1) > 2]
logexp = np.log10(etpm + 1)
tcells = pd.read_csv('tcells_rebuttal.csv', index_col=0)
m = GPfates.GPfates(tcells, logexp)
# m.dimensionality_reduction()
#
# m.store_dr()
#
# m.infer_pseudotime(priors=m.s.day_int, s_columns=['bgplvm_0', 'bgplvm_1'])
# m.infer_pseudotime(priors=m.s.day_int, s_columns=['bgplvm_2d_0', 'bgplvm_2d_1'])
# GPfates.plt.scatter(m.s.scaled_pseudotime, m.s.pseudotime); GPfates.plt.show()
# m.model_fates(X=['bgplvm_1'])
m.model_fates(X=['bgplvm_2d_1'])
# p = m.identify_bifurcation_point()
# print(p)
# m.calculate_bifurcation_statistics()
# m.fate_model.plot(); GPfates.plt.show()
m.make_fates_viz(['bgplvm_2d_0', 'bgplvm_2d_1'])
m.fates_viz.plot(); GPfates.plt.show()
| 2.28125 | 2 |
rdkit/ML/Composite/AdjustComposite.py | docking-org/rdk | 1 | 12762695 | # $Id$
#
# Copyright (C) 2003 <NAME> and Rational Discovery LLC
# All Rights Reserved
#
""" functionality to allow adjusting composite model contents
"""
from __future__ import print_function
import copy
import numpy
def BalanceComposite(model, set1, set2, weight, targetSize, names1=None, names2=None):
""" adjusts the contents of the composite model so as to maximize
the weighted classification accuracty across the two data sets.
The resulting composite model, with _targetSize_ models, is returned.
**Notes**:
- if _names1_ and _names2_ are not provided, _set1_ and _set2_ should
have the same ordering of columns and _model_ should have already
have had _SetInputOrder()_ called.
"""
#
# adjust the weights to be proportional to the size of the two data sets
# The normalization we do here assures that a perfect model contributes
# a score of S1+S2 to the final
#
S1 = len(set1)
S2 = len(set2)
weight1 = float(S1 + S2) * (1 - weight) / S1
weight2 = float(S1 + S2) * weight / S2
# print('\t:::', S1, S2, weight1, weight2)
# print('nModels:', len(model))
# start with a copy so that we get all the additional schnick-schnack
res = copy.copy(model)
res.modelList = []
res.errList = []
res.countList = []
res.quantizationRequirements = []
startSize = len(model)
scores = numpy.zeros(startSize, numpy.float)
actQuantBounds = model.GetActivityQuantBounds()
if names1 is not None:
model.SetInputOrder(names1)
for pt in set1:
pred, conf = model.ClassifyExample(pt)
if actQuantBounds:
ans = model.QuantizeActivity(pt)[-1]
else:
ans = pt[-1]
votes = model.GetVoteDetails()
for i in range(startSize):
if votes[i] == ans:
scores[i] += weight1
if names2 is not None:
model.SetInputOrder(names2)
for pt in set2:
pred, conf = model.ClassifyExample(pt)
if actQuantBounds:
ans = model.QuantizeActivity(pt)[-1]
else:
ans = pt[-1]
votes = model.GetVoteDetails()
for i in range(startSize):
if votes[i] == ans:
scores[i] += weight2
# normalize the scores
nPts = S1 + S2
scores /= nPts
# sort them:
bestOrder = list(numpy.argsort(scores))
bestOrder.reverse()
print('\tTAKE:', bestOrder[:targetSize])
# and now take the best set:
for i in range(targetSize):
idx = bestOrder[i]
mdl = model.modelList[idx]
res.modelList.append(mdl)
res.errList.append(1. - scores[idx])
res.countList.append(1)
# FIX: this should probably be more general:
res.quantizationRequirements.append(0)
return res
| 2.421875 | 2 |
stagpy/_step.py | StagPython/StagPy | 12 | 12762696 | <reponame>StagPython/StagPy
"""Implementation of Step objects.
Note:
This module and the classes it defines are internals of StagPy, they
should not be used in an external script. Instead, use the
:class:`~stagpy.stagyydata.StagyyData` class.
"""
from collections.abc import Mapping
from collections import namedtuple
from itertools import chain
import numpy as np
from . import error, phyvars, stagyyparsers, _helpers
from ._helpers import CachedReadOnlyProperty as crop
class _Geometry:
"""Geometry information.
It is deduced from the information in the header of binary field files
output by StagYY.
"""
def __init__(self, header, step):
self._header = header
self._step = step
self._shape = {'sph': False, 'cyl': False, 'axi': False,
'ntot': list(header['nts']) + [header['ntb']]}
self._init_shape()
def _scale_radius_mo(self, radius):
"""Rescale radius for MO runs."""
if self._step.sdat.par['magma_oceans_in']['magma_oceans_mode']:
return self._header['mo_thick_sol'] * (
radius + self._header['mo_lambda'])
return radius
@crop
def nttot(self):
"""Number of grid point along the x/theta direction."""
return self._shape['ntot'][0]
@crop
def nptot(self):
"""Number of grid point along the y/phi direction."""
return self._shape['ntot'][1]
@crop
def nrtot(self):
"""Number of grid point along the z/r direction."""
return self._shape['ntot'][2]
@crop
def nbtot(self):
"""Number of blocks."""
return self._shape['ntot'][3]
nxtot = nttot
nytot = nptot
nztot = nrtot
@crop
def r_walls(self):
"""Position of FV walls along the z/r direction."""
rgeom = self._header.get("rgeom")
if rgeom is not None:
walls = rgeom[:, 0] + self.rcmb
else:
walls = self._header["e3_coord"] + self.rcmb
walls = np.append(walls, self._step.rprofs.bounds[1])
return self._scale_radius_mo(walls)
@crop
def r_centers(self):
"""Position of FV centers along the z/r direction."""
rgeom = self._header.get("rgeom")
if rgeom is not None:
walls = rgeom[:-1, 1] + self.rcmb
else:
walls = self._step.rprofs.centers
return self._scale_radius_mo(walls)
@crop
def t_walls(self):
"""Position of FV walls along x/theta."""
if self.threed or self.twod_xz:
if self.yinyang:
tmin, tmax = -np.pi / 4, np.pi / 4
elif self.curvilinear:
# should take theta_position/theta_center into account
tmin = 0
tmax = min(np.pi,
self._step.sdat.par['geometry']['aspect_ratio'][0])
else:
tmin = 0
tmax = self._step.sdat.par['geometry']['aspect_ratio'][0]
return np.linspace(tmin, tmax, self.nttot + 1)
# twoD YZ
center = np.pi / 2 if self.curvilinear else 0
d_t = (self.p_walls[1] - self.p_walls[0]) / 2
return np.array([center - d_t, center + d_t])
@crop
def t_centers(self):
"""Position of FV centers along x/theta."""
return (self.t_walls[:-1] + self.t_walls[1:]) / 2
@crop
def p_walls(self):
"""Position of FV walls along y/phi."""
if self.threed or self.twod_yz:
if self.yinyang:
pmin, pmax = -3 * np.pi / 4, 3 * np.pi / 4
elif self.curvilinear:
pmin = 0
pmax = min(2 * np.pi,
self._step.sdat.par['geometry']['aspect_ratio'][1])
else:
pmin = 0
pmax = self._step.sdat.par['geometry']['aspect_ratio'][1]
return np.linspace(pmin, pmax, self.nptot + 1)
# twoD YZ
d_p = (self.t_walls[1] - self.t_walls[0]) / 2
return np.array([-d_p, d_p])
@crop
def p_centers(self):
"""Position of FV centers along y/phi."""
return (self.p_walls[:-1] + self.p_walls[1:]) / 2
z_walls = r_walls
z_centers = r_centers
x_walls = t_walls
x_centers = t_centers
y_walls = p_walls
y_centers = p_centers
def _init_shape(self):
"""Determine shape of geometry."""
shape = self._step.sdat.par['geometry']['shape'].lower()
aspect = self._header['aspect']
if self._header['rcmb'] is not None and self._header['rcmb'] >= 0:
# curvilinear
self._shape['cyl'] = self.twod_xz and (shape == 'cylindrical' or
aspect[0] >= np.pi)
self._shape['sph'] = not self._shape['cyl']
elif self._header['rcmb'] is None:
self._header['rcmb'] = self._step.sdat.par['geometry']['r_cmb']
if self._header['rcmb'] >= 0:
if self.twod_xz and shape == 'cylindrical':
self._shape['cyl'] = True
elif shape == 'spherical':
self._shape['sph'] = True
self._shape['axi'] = self.cartesian and self.twod_xz and \
shape == 'axisymmetric'
@crop
def rcmb(self):
"""Radius of CMB, 0 in cartesian geometry."""
return max(self._header["rcmb"], 0)
@property
def cartesian(self):
"""Whether the grid is in cartesian geometry."""
return not self.curvilinear
@property
def curvilinear(self):
"""Whether the grid is in curvilinear geometry."""
return self.spherical or self.cylindrical
@property
def cylindrical(self):
"""Whether the grid is in cylindrical geometry (2D spherical)."""
return self._shape['cyl']
@property
def spherical(self):
"""Whether the grid is in spherical geometry."""
return self._shape['sph']
@property
def yinyang(self):
"""Whether the grid is in Yin-yang geometry (3D spherical)."""
return self.spherical and self.nbtot == 2
@property
def twod_xz(self):
"""Whether the grid is in the XZ plane only."""
return self.nytot == 1
@property
def twod_yz(self):
"""Whether the grid is in the YZ plane only."""
return self.nxtot == 1
@property
def twod(self):
"""Whether the grid is 2 dimensional."""
return self.twod_xz or self.twod_yz
@property
def threed(self):
"""Whether the grid is 3 dimensional."""
return not self.twod
def at_z(self, zval):
"""Return iz closest to given zval position.
In spherical geometry, the bottom boundary is considered to be at z=0.
Use :meth:`at_r` to find a cell at a given radial position.
"""
if self.curvilinear:
zval += self.rcmb
return np.argmin(np.abs(self.z_centers - zval))
def at_r(self, rval):
"""Return ir closest to given rval position.
If called in cartesian geometry, this is equivalent to :meth:`at_z`.
"""
return np.argmin(np.abs(self.r_centers - rval))
Field = namedtuple('Field', ['values', 'meta'])
class _Fields(Mapping):
"""Fields data structure.
The :attr:`Step.fields` attribute is an instance of this class.
:class:`_Fields` inherits from :class:`collections.abc.Mapping`. Keys are
fields names defined in :data:`stagpy.phyvars.[S]FIELD[_EXTRA]`. Each item
is a name tuple ('values', 'meta'), respectively the field itself, and a
:class:`stagpy.phyvars.Varf` instance with relevant metadata.
Attributes:
step (:class:`Step`): the step object owning the :class:`_Fields`
instance.
"""
def __init__(self, step, variables, extravars, files, filesh5):
self.step = step
self._vars = variables
self._extra = extravars
self._files = files
self._filesh5 = filesh5
self._data = {}
super().__init__()
def __getitem__(self, name):
if name in self._data:
return self._data[name]
if name in self._vars:
fld_names, parsed_data = self._get_raw_data(name)
elif name in self._extra:
meta = self._extra[name]
field = meta.description(self.step)
meta = phyvars.Varf(_helpers.baredoc(meta.description), meta.dim)
self._data[name] = Field(field, meta)
return self._data[name]
else:
raise error.UnknownFieldVarError(name)
if parsed_data is None:
raise error.MissingDataError(
f'Missing field {name} in step {self.step.istep}')
header, fields = parsed_data
self._cropped__header = header
for fld_name, fld in zip(fld_names, fields):
self._set(fld_name, fld)
return self._data[name]
def __iter__(self):
return (fld for fld in chain(self._vars, self._extra)
if fld in self)
def __contains__(self, item):
try:
return self[item] is not None
except error.StagpyError:
return False
def __len__(self):
return len(iter(self))
def __eq__(self, other):
return self is other
def _get_raw_data(self, name):
"""Find file holding data and return its content."""
# try legacy first, then hdf5
filestem = ''
for filestem, list_fvar in self._files.items():
if name in list_fvar:
break
fieldfile = self.step.sdat.filename(filestem, self.step.isnap,
force_legacy=True)
if not fieldfile.is_file():
fieldfile = self.step.sdat.filename(filestem, self.step.isnap)
parsed_data = None
if fieldfile.is_file():
parsed_data = stagyyparsers.fields(fieldfile)
elif self.step.sdat.hdf5 and self._filesh5:
# files in which the requested data can be found
files = [(stem, fvars) for stem, fvars in self._filesh5.items()
if name in fvars]
for filestem, list_fvar in files:
if filestem in phyvars.SFIELD_FILES_H5:
xmff = 'Data{}.xmf'.format(
'Bottom' if name.endswith('bot') else 'Surface')
header = self._header
else:
xmff = 'Data.xmf'
header = None
parsed_data = stagyyparsers.read_field_h5(
self.step.sdat.hdf5 / xmff, filestem,
self.step.isnap, header)
if parsed_data is not None:
break
return list_fvar, parsed_data
def _set(self, name, fld):
sdat = self.step.sdat
col_fld = sdat._collected_fields
col_fld.append((self.step.istep, name))
if sdat.nfields_max is not None:
while len(col_fld) > sdat.nfields_max:
istep, fld_name = col_fld.pop(0)
del sdat.steps[istep].fields[fld_name]
self._data[name] = Field(fld, self._vars[name])
def __delitem__(self, name):
if name in self._data:
del self._data[name]
@crop
def _header(self):
binfiles = self.step.sdat._binfiles_set(self.step.isnap)
if binfiles:
return stagyyparsers.fields(binfiles.pop(), only_header=True)
elif self.step.sdat.hdf5:
xmf = self.step.sdat.hdf5 / 'Data.xmf'
return stagyyparsers.read_geom_h5(xmf, self.step.isnap)[0]
@crop
def geom(self):
"""Geometry information.
:class:`_Geometry` instance holding geometry information. It is
issued from binary files holding field information. It is set to
None if not available for this time step.
"""
if self._header is not None:
return _Geometry(self._header, self.step)
class _Tracers:
"""Tracers data structure.
The :attr:`Step.tracers` attribute is an instance of this class.
:class:`_Tracers` implements the getitem mechanism. Items are tracervar
names such as 'Type' or 'Mass'. The position of tracers are the 'x', 'y'
and 'z' items.
Attributes:
step (:class:`Step`): the step object owning the :class:`_Tracers`
instance.
"""
def __init__(self, step):
self.step = step
self._data = {}
def __getitem__(self, name):
if name in self._data:
return self._data[name]
data = stagyyparsers.tracers(
self.step.sdat.filename('tra', timestep=self.step.isnap,
force_legacy=True))
if data is None and self.step.sdat.hdf5:
position = any(axis not in self._data for axis in 'xyz')
self._data.update(
stagyyparsers.read_tracers_h5(
self.step.sdat.hdf5 / 'DataTracers.xmf', name,
self.step.isnap, position))
elif data is not None:
self._data.update(data)
if name not in self._data:
self._data[name] = None
return self._data[name]
def __iter__(self):
raise TypeError('tracers collection is not iterable')
Rprof = namedtuple('Rprof', ['values', 'rad', 'meta'])
class _Rprofs:
"""Radial profiles data structure.
The :attr:`Step.rprofs` attribute is an instance of this class.
:class:`_Rprofs` implements the getitem mechanism. Keys are profile names
defined in :data:`stagpy.phyvars.RPROF[_EXTRA]`. An item is a named tuple
('values', 'rad', 'meta'), respectively the profile itself, the radial
position at which it is evaluated, and meta is a
:class:`stagpy.phyvars.Varr` instance with relevant metadata. Note that
profiles are automatically scaled if conf.scaling.dimensional is True.
Attributes:
step (:class:`Step`): the step object owning the :class:`_Rprofs`
instance
"""
def __init__(self, step):
self.step = step
self._cached_extra = {}
@crop
def _data(self):
step = self.step
return step.sdat._rprof_and_times[0].get(step.istep)
@property
def _rprofs(self):
if self._data is None:
step = self.step
raise error.MissingDataError(
f'No rprof data in step {step.istep} of {step.sdat}')
return self._data
def __getitem__(self, name):
step = self.step
if name in self._rprofs.columns:
rprof = self._rprofs[name].values
rad = self.centers
if name in phyvars.RPROF:
meta = phyvars.RPROF[name]
else:
meta = phyvars.Varr(name, None, '1')
elif name in self._cached_extra:
rprof, rad, meta = self._cached_extra[name]
elif name in phyvars.RPROF_EXTRA:
meta = phyvars.RPROF_EXTRA[name]
rprof, rad = meta.description(step)
meta = phyvars.Varr(_helpers.baredoc(meta.description),
meta.kind, meta.dim)
self._cached_extra[name] = rprof, rad, meta
else:
raise error.UnknownRprofVarError(name)
rprof, _ = step.sdat.scale(rprof, meta.dim)
rad, _ = step.sdat.scale(rad, 'm')
return Rprof(rprof, rad, meta)
@crop
def centers(self):
"""Radial position of cell centers."""
return self._rprofs['r'].values + self.bounds[0]
@crop
def walls(self):
"""Radial position of cell walls."""
rbot, rtop = self.bounds
try:
walls = self.step.fields.geom.r_walls
except error.StagpyError:
# assume walls are mid-way between T-nodes
# could be T-nodes at center between walls
centers = self.centers
walls = (centers[:-1] + centers[1:]) / 2
walls = np.insert(walls, 0, rbot)
walls = np.append(walls, rtop)
return walls
@crop
def bounds(self):
"""Radial or vertical position of boundaries.
Radial/vertical positions of boundaries of the domain.
"""
step = self.step
if step.geom is not None:
rcmb = step.geom.rcmb
else:
rcmb = step.sdat.par['geometry']['r_cmb']
if step.sdat.par['geometry']['shape'].lower() == 'cartesian':
rcmb = 0
rbot = max(rcmb, 0)
thickness = (step.sdat.scales.length
if step.sdat.par['switches']['dimensional_units'] else 1)
return rbot, rbot + thickness
class Step:
"""Time step data structure.
Elements of :class:`~stagpy.stagyydata._Steps` and
:class:`~stagpy.stagyydata._Snaps` instances are all :class:`Step`
instances. Note that :class:`Step` objects are not duplicated.
Examples:
Here are a few examples illustrating some properties of :class:`Step`
instances.
>>> sdat = StagyyData('path/to/run')
>>> istep_last_snap = sdat.snaps[-1].istep
>>> assert(sdat.steps[istep_last_snap] is sdat.snaps[-1])
>>> n = 0 # or any valid time step / snapshot index
>>> assert(sdat.steps[n].sdat is sdat)
>>> assert(sdat.steps[n].istep == n)
>>> assert(sdat.snaps[n].isnap == n)
>>> assert(sdat.steps[n].geom is sdat.steps[n].fields.geom)
>>> assert(sdat.snaps[n] is sdat.snaps[n].fields.step)
Args:
istep (int): the index of the time step that the instance
represents.
sdat (:class:`~stagpy.stagyydata.StagyyData`): the StagyyData
instance owning the :class:`Step` instance.
Attributes:
istep (int): the index of the time step that the instance
represents.
sdat (:class:`~stagpy.stagyydata.StagyyData`): the StagyyData
instance owning the :class:`Step` instance.
fields (:class:`_Fields`): fields available at this time step.
sfields (:class:`_Fields`): surface fields available at this time
step.
tracers (:class:`_Tracers`): tracers available at this time step.
"""
def __init__(self, istep, sdat):
self.istep = istep
self.sdat = sdat
self.fields = _Fields(self, phyvars.FIELD, phyvars.FIELD_EXTRA,
phyvars.FIELD_FILES, phyvars.FIELD_FILES_H5)
self.sfields = _Fields(self, phyvars.SFIELD, [],
phyvars.SFIELD_FILES, phyvars.SFIELD_FILES_H5)
self.tracers = _Tracers(self)
self.rprofs = _Rprofs(self)
self._isnap = -1
def __repr__(self):
if self.isnap is not None:
return f'{self.sdat!r}.snaps[{self.isnap}]'
else:
return f'{self.sdat!r}.steps[{self.istep}]'
@property
def geom(self):
"""Geometry information.
:class:`_Geometry` instance holding geometry information. It is
issued from binary files holding field information. It is set to
None if not available for this time step.
"""
return self.fields.geom
@property
def timeinfo(self):
"""Time series data of the time step."""
try:
info = self.sdat.tseries.at_step(self.istep)
except KeyError:
raise error.MissingDataError(f'No time series for {self!r}')
return info
@property
def time(self):
"""Time of this time step."""
steptime = None
try:
steptime = self.timeinfo['t']
except error.MissingDataError:
if self.isnap is not None:
steptime = self.geom._header.get('ti_ad')
return steptime
@property
def isnap(self):
"""Snapshot index corresponding to time step.
It is set to None if no snapshot exists for the time step.
"""
if self._isnap == -1:
istep = None
isnap = -1
# could be more efficient if do 0 and -1 then bisection
# (but loose intermediate <- would probably use too much
# memory for what it's worth if search algo is efficient)
while (istep is None or istep < self.istep) and isnap < 99999:
isnap += 1
try:
istep = self.sdat.snaps[isnap].istep
except KeyError:
pass
# all intermediate istep could have their ._isnap to None
if istep != self.istep:
self._isnap = None
return self._isnap
| 2.390625 | 2 |
test_programs/readConfig.py | hackpsu-tech/hackPSUS2018-rfid | 0 | 12762697 | #!/usr/bin/python
"""
This application simply reads a config file created by the HackPSUconfig module and prints the output to the console
"""
import HackPSUconfig as config
configFile = input('Please enter the name of a configuration file: ')
dict = config.getProperties(configFile)
print('Dictionary:')
for key in dict:
print(key + ':' + dict[key])
print('Dictionary complete') | 3.890625 | 4 |
code/Visualizer/applications/movies/views.py | sidgairo18/IMDB-Movie-Poster-Visualizer | 1 | 12762698 | from django.http import HttpResponse
from django.shortcuts import render
from django.db.models import Q
from applications.movies.models import *
import applications.utils as utils
import visualizer.settings as settings
import json
import os
def index(request):
return render(request, 'movies/index.html', {})
def top_k_neighbours(request):
return render(request, 'movies/top_k_neighbours.html', {})
def feature_visualization(request):
return render(request, 'movies/embeddings.html', {})
def ajax_get_stats(request):
movie_ids = None
genres = []
if 'movie_ids[]' in request.GET:
movie_ids = request.GET.getlist('movie_ids[]')
if movie_ids != None:
queries = [Q(movie__id=movie_id) for movie_id in movie_ids]
query = queries.pop()
for item in queries:
query |= item
genres = list(MovieToGenre.objects.filter(query).values_list('genre__name', flat=True))
return HttpResponse(json.dumps({
'genres': genres
}), content_type="applications/json", status=200)
def ajax_get_embeddings(request):
syear = None
eyear = None
categories = None
andopr = None
feature = None
if 'syear' in request.GET:
syear = request.GET['syear']
if 'eyear' in request.GET:
eyear = request.GET['eyear']
if 'category[]' in request.GET:
categories = request.GET.getlist('category[]')
if 'andopr' in request.GET:
andopr = request.GET['andopr']
andopr = True if (andopr == 'true') else False
if 'feature' in request.GET:
feature = request.GET['feature']
movies = get_movies_range(syear=syear, eyear=eyear, categories=categories, andopr=andopr)
if len(movies) > 0:
# X_t, Y_t, I_t = utils.preprocess_data(settings.FEATURES[feature], settings.DATASET, movies)
# plot = utils.visualize_features(X_t, Y_t, I_t, min(settings.E_PCA, X_t.shape[0]))
X_cor, Y_cor, I_t = utils.get_plot_values(settings.DATASET, movies, feature)
for i in range(len(movies)):
movies[i]['x'] = X_cor[i]
movies[i]['y'] = Y_cor[i]
plot = utils.bokeh_plot(I_t, X_cor, Y_cor)
else:
return HttpResponse(json.dumps({
'error': 'No movies in this category'
}), content_type="application/json", status=200)
return HttpResponse(json.dumps({
'plot': plot,
'movies': movies
}), content_type="application/json", status=200)
def ajax_get_movies(request):
syear = None
eyear = None
categories = None
andopr = None
if 'syear' in request.GET:
syear = request.GET['syear']
if 'eyear' in request.GET:
eyear = request.GET['eyear']
if 'category[]' in request.GET:
categories = request.GET.getlist('category[]')
if 'andopr' in request.GET:
andopr = request.GET['andopr']
andopr = True if (andopr == 'true') else False
movies = get_movies_range(syear=syear, eyear=eyear, categories=categories, andopr=andopr)
return HttpResponse(json.dumps({
'total': len(movies),
'movies': movies
}), content_type="application/json", status=200)
def ajax_get_genres(request):
genres = get_genres()
return HttpResponse(json.dumps({
'total': len(genres),
'genres': genres
}), content_type="application/json", status=200)
def ajax_get_features(request):
features = get_features()
return HttpResponse(json.dumps({
'total': len(features),
'features': features
}), content_type="application/json", status=200)
def ajax_get_top_neighbours(request):
image = None
k = None
feature = None
if 'image' in request.GET:
image = request.GET['image']
if 'k' in request.GET:
k = int(request.GET['k'])
if 'feature' in request.GET:
feature = request.GET['feature']
if feature not in settings.FEATURES:
raise Exception('path for this feature not specified')
movies = get_movies()
movies = utils.get_top_neighbours(settings.FEATURES[feature], image, movies, k)
for movie in movies:
movie['genres'] = get_genres_by_movie(movie)
return HttpResponse(json.dumps({
'total': len(movies),
'movies': movies
}), content_type="application/json", status=200)
def get_genres_by_movie(movie):
movie = Movie.objects.filter(id=movie['id']).first()
items = MovieToGenre.objects.filter(movie=movie)
genres = []
for item in items:
genres.append(item.genre.serialize()['name'])
return genres
def get_movies(year=None, category=None):
items = MovieToGenre.objects.select_related('movie', 'genre')
if year != None:
items = items.filter(movie__year=year)
if category != None:
items = items.filter(genre__name=category)
movies = [item.movie.serialize() for item in items]
movies = utils.filter_unique(movies, 'image')
return movies
def get_genres():
items = Genre.objects.all()
genres = [item.serialize() for item in items]
return genres
def get_features():
items = Feature.objects.all()
features = [item.serialize() for item in items]
return features
def get_movies_range(syear, eyear, categories, andopr):
items = MovieToGenre.objects.select_related('movie', 'genre')
if syear != None:
items = items.filter(movie__year__gte=syear)
if eyear != None:
items = items.filter(movie__year__lte=eyear)
if categories != None and andopr != None:
if andopr == True:
movies = set(items.values_list('movie', flat=True))
for category in categories:
movies = movies.intersection(set(items.filter(genre__name=category).values_list('movie', flat=True)))
movies = list(movies)
if len(movies) > 0:
queries = [Q(id=movie_id) for movie_id in movies]
query = queries.pop()
for item in queries:
query |= item
items = Movie.objects.filter(query)
movies = [item.serialize() for item in items]
return movies
return []
else:
queries = [Q(genre__name=category) for category in categories]
query = queries.pop()
for item in queries:
query |= item
items = items.filter(query)
movies = [item.movie.serialize() for item in items]
movies = utils.filter_unique(movies, 'image')
return movies | 2.171875 | 2 |
neurec/model/item_ranking/IRGAN.py | qiqiding/NeuRec | 24 | 12762699 | '''
Reference: <NAME>, et al., "IRGAN: A Minimax Game for Unifying Generative and
Discriminative Information Retrieval Models." SIGIR 2017.
@author: <NAME>
'''
from neurec.model.AbstractRecommender import AbstractRecommender
import tensorflow as tf
import pickle
import numpy as np
from concurrent.futures import ThreadPoolExecutor
from neurec.util import data_gen, reader
from neurec.evaluation import Evaluate
from neurec.util.properties import Properties
class GEN(object):
def __init__(self, itemNum, userNum, emb_dim, lamda, param=None, initdelta=0.05, learning_rate=0.05):
self.itemNum = itemNum
self.userNum = userNum
self.emb_dim = emb_dim
self.lamda = lamda # regularization parameters
self.param = param
self.initdelta = initdelta
self.learning_rate = learning_rate
self.g_params = []
with tf.variable_scope('generator'):
if self.param == None:
self.user_embeddings = tf.Variable(
tf.random_uniform([self.userNum, self.emb_dim], minval=-self.initdelta, maxval=self.initdelta,
dtype=tf.float32))
self.item_embeddings = tf.Variable(
tf.random_uniform([self.itemNum, self.emb_dim], minval=-self.initdelta, maxval=self.initdelta,
dtype=tf.float32))
self.item_bias = tf.Variable(tf.zeros([self.itemNum]))
else:
self.user_embeddings = tf.Variable(self.param[0])
self.item_embeddings = tf.Variable(self.param[1])
self.item_bias = tf.Variable(param[2])
self.g_params = [self.user_embeddings, self.item_embeddings, self.item_bias]
self.u = tf.placeholder(tf.int32)
self.i = tf.placeholder(tf.int32)
self.reward = tf.placeholder(tf.float32)
self.u_embedding = tf.nn.embedding_lookup(self.user_embeddings, self.u)
self.i_embedding = tf.nn.embedding_lookup(self.item_embeddings, self.i)
self.i_bias = tf.gather(self.item_bias, self.i)
self.all_logits = tf.reduce_sum(tf.multiply(self.u_embedding, self.item_embeddings), 1) + self.item_bias
self.i_prob = tf.gather(
tf.reshape(tf.nn.softmax(tf.reshape(self.all_logits, [1, -1])), [-1]),
self.i)
self.gan_loss = -tf.reduce_mean(tf.log(self.i_prob) * self.reward) + self.lamda * (
tf.nn.l2_loss(self.u_embedding) + tf.nn.l2_loss(self.i_embedding) + tf.nn.l2_loss(self.i_bias))
g_opt = tf.train.GradientDescentOptimizer(self.learning_rate)
self.gan_updates = g_opt.minimize(self.gan_loss, var_list=self.g_params)
# for test stage, self.u: [batch_size]
self.all_rating = tf.matmul(self.u_embedding, self.item_embeddings, transpose_a=False,
transpose_b=True) + self.item_bias
class DIS(object):
def __init__(self, itemNum, userNum, emb_dim, lamda, param=None, initdelta=0.05, learning_rate=0.05):
self.itemNum = itemNum
self.userNum = userNum
self.emb_dim = emb_dim
self.lamda = lamda # regularization parameters
self.param = param
self.initdelta = initdelta
self.learning_rate = learning_rate
self.d_params = []
with tf.variable_scope('discriminator'):
if self.param == None:
self.user_embeddings = tf.Variable(
tf.random_uniform([self.userNum, self.emb_dim], minval=-self.initdelta, maxval=self.initdelta,
dtype=tf.float32))
self.item_embeddings = tf.Variable(
tf.random_uniform([self.itemNum, self.emb_dim], minval=-self.initdelta, maxval=self.initdelta,
dtype=tf.float32))
self.item_bias = tf.Variable(tf.zeros([self.itemNum]))
else:
self.user_embeddings = tf.Variable(self.param[0])
self.item_embeddings = tf.Variable(self.param[1])
self.item_bias = tf.Variable(self.param[2])
self.d_params = [self.user_embeddings, self.item_embeddings, self.item_bias]
# placeholder definition
self.u = tf.placeholder(tf.int32)
self.i = tf.placeholder(tf.int32)
self.label = tf.placeholder(tf.float32)
self.u_embedding = tf.nn.embedding_lookup(self.user_embeddings, self.u)
self.i_embedding = tf.nn.embedding_lookup(self.item_embeddings, self.i)
self.i_bias = tf.gather(self.item_bias, self.i)
self.pre_logits = tf.reduce_sum(tf.multiply(self.u_embedding, self.i_embedding), 1) + self.i_bias
self.pre_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=self.label,
logits=self.pre_logits) + self.lamda * (
tf.nn.l2_loss(self.u_embedding) + tf.nn.l2_loss(self.i_embedding) + tf.nn.l2_loss(self.i_bias)
)
d_opt = tf.train.GradientDescentOptimizer(self.learning_rate)
self.d_updates = d_opt.minimize(self.pre_loss, var_list=self.d_params)
self.reward_logits = tf.reduce_sum(tf.multiply(self.u_embedding, self.i_embedding),
1) + self.i_bias
self.reward = 2 * (tf.sigmoid(self.reward_logits) - 0.5)
# for test stage, self.u: [batch_size]
self.all_rating = tf.matmul(self.u_embedding, self.item_embeddings, transpose_a=False,
transpose_b=True) + self.item_bias
self.all_logits = tf.reduce_sum(tf.multiply(self.u_embedding, self.item_embeddings), 1) + self.item_bias
self.NLL = -tf.reduce_mean(tf.log(
tf.gather(tf.reshape(tf.nn.softmax(tf.reshape(self.all_logits, [1, -1])), [-1]), self.i))
)
# for dns sample
self.dns_rating = tf.reduce_sum(tf.multiply(self.u_embedding, self.item_embeddings), 1) + self.item_bias
class IRGAN(AbstractRecommender):
properties = [
"factors_num",
"lr",
"g_reg",
"d_reg",
"epochs",
"g_epoch",
"d_epoch",
"batch_size",
"d_tau",
"topk",
"pretrain_file"
]
def __init__(self, sess, dataset):
# super(IRGAN, self).__init__()
super().__init__(**kwds)
train_matrix = dataset.trainMatrix.tocsr()
self.num_users, self.num_items = train_matrix.shape
self.factors_num = self.conf["factors_num"]
self.lr = self.conf["lr"]
self.g_reg = self.conf["g_reg"]
self.d_reg = self.conf["d_reg"]
self.epochs = self.conf["epochs"]
self.g_epoch = self.conf["g_epoch"]
self.d_epoch = self.conf["d_epoch"]
self.batch_size = self.conf["batch_size"]
self.d_tau = self.conf["d_tau"]
self.topK = self.conf["topk"]
self.pretrain_file = self.conf["pretrain_file"]
self.loss_function = "None"
idx_value_dict = {}
for idx, value in enumerate(train_matrix):
if any(value.indices):
idx_value_dict[idx] = value.indices
self.user_pos_train = idx_value_dict
self.num_users, self.num_items = dataset.num_users, dataset.num_items
self.all_items = np.arange(self.num_items)
def build_graph(self):
file = reader.lines(self.pretrain_file)
pretrain_params = pickle.load(file, encoding="latin")
self.generator = GEN(self.num_items, self.num_users, self.factors_num, self.g_reg, param=pretrain_params,
learning_rate=self.lr)
self.discriminator = DIS(self.num_items, self.num_users, self.factors_num, self.d_reg, param=None,
learning_rate=self.lr)
def get_train_data(self):
users_list, items_list, labels_list = [], [], []
train_users = list(self.user_pos_train.keys())
with ThreadPoolExecutor() as executor:
data = executor.map(self.get_train_data_one_user, train_users)
data = list(data)
for users, items, labels in data:
users_list.extend(users)
items_list.extend(items)
labels_list.extend(labels)
return users_list, items_list, labels_list
def get_train_data_one_user(self, user):
user_list, items_list, label_list = [], [], []
pos = self.user_pos_train[user]
rating = self.sess.run(self.generator.all_rating, {self.generator.u: [user]})
rating = np.reshape(rating, [-1])
rating = np.array(rating) / self.d_tau # Temperature
exp_rating = np.exp(rating)
prob = exp_rating / np.sum(exp_rating)
neg = np.random.choice(self.all_items, size=len(pos), p=prob)
for i, j in zip(pos, neg):
user_list.append(user)
items_list.append(i)
label_list.append(1.0)
user_list.append(user)
items_list.append(j)
label_list.append(0.0)
return (user_list, items_list, label_list)
def train_model(self):
for _ in range(self.epochs):
for _ in range(self.d_epoch):
users_list, items_list, labels_list = self.get_train_data()
self.training_discriminator(users_list, items_list, labels_list)
for _ in range(self.g_epoch):
self.training_generator()
Evaluate.test_model(self, self.dataset)
def training_discriminator(self, user, item, label):
num_training_instances = len(user)
for num_batch in np.arange(int(num_training_instances / self.batch_size)):
bat_users, bat_items, bat_lables = \
data_gen._get_pointwise_batch_data(user, item, label, num_batch, self.batch_size)
feed = {self.discriminator.u: bat_users,
self.discriminator.i: bat_items,
self.discriminator.label: bat_lables}
self.sess.run(self.discriminator.d_updates, feed_dict=feed)
def training_generator(self):
for user, pos in self.user_pos_train.items():
sample_lambda = 0.2
rating = self.sess.run(self.generator.all_logits, {self.generator.u: user})
exp_rating = np.exp(rating)
prob = exp_rating / np.sum(exp_rating) # prob is generator distribution p_\theta
pn = (1 - sample_lambda) * prob
pn[pos] += sample_lambda * 1.0 / len(pos)
# Now, pn is the Pn in importance sampling, prob is generator distribution p_\theta
sample = np.random.choice(self.all_items, 2 * len(pos), p=pn)
###########################################################################
# Get reward and adapt it with importance sampling
###########################################################################
feed = {self.discriminator.u: user, self.discriminator.i: sample}
reward = self.sess.run(self.discriminator.reward, feed_dict=feed)
reward = reward * prob[sample] / pn[sample]
###########################################################################
# Update G
###########################################################################
feed = {self.generator.u: user, self.generator.i: sample, self.generator.reward: reward}
self.sess.run(self.generator.gan_updates, feed_dict=feed)
def predict(self, user_id, items):
user_embedding, item_embedding, item_bias = self.sess.run(self.generator.g_params)
u_embedding = user_embedding[user_id]
item_embedding = item_embedding[items]
item_bias = item_bias[items]
ratings = np.matmul(u_embedding, item_embedding.T) + item_bias
return ratings
| 2.4375 | 2 |
create_auto_mount.py | Hofei90/create_automount | 3 | 12762700 | <gh_stars>1-10
#!/usr/bin/python3
import getpass
import os
import platform
import shlex
import shutil
import subprocess
import sys
import toml
SKRIPTPFAD = os.path.abspath(os.path.dirname(__file__))
SPEICHERORT_ZUGANGSDATEN = "/etc/smbcredentials"
PFAD_PING_SERVER_SERVICE = "/etc/systemd/system/ping_server.service"
PFAD_PING_SERVER = "/usr/local/sbin/ping_server.py"
PFAD_SYSTEMD_SERVICE_UNIT = "/etc/systemd/system"
def pfadeingabe():
ordner = input("Name für neuen Mountordner: ")
pfad = input("Verzeichnis für den Mountordner, wenn leer: -> /media ")
if pfad == "":
pfad = "/media"
return os.path.join(pfad, ordner)
def zugangsdaten_eingeben():
print("Zugangsdaten für das einzuhängende Gerät - Zugang muss am anderen Gerät freigeben/erstellt werden.")
username = input("Benutzername: ")
pw = getpass.getpass("Passwort: ")
return {"username": username, "pw": pw}
def adresse_eingeben():
return input("Externe Adresse eingeben: ")
def optionen_eingeben():
uid = "uid={}".format(input("uid: Bsp. '1000': "))
gid = "gid={}".format(input("gid: Bsp. '1000': "))
eingabe_liste = [uid, gid]
eingabe = True
while eingabe:
eingabe = input("Weitere Optionen eingeben - Bsp: vers=1.0, weiter mit leerer Eingabe: ")
if eingabe:
eingabe_liste.append(eingabe)
optionen = ",".join(eingabe_liste)
return optionen
def zugangsdaten_erstellen(zugangsdaten):
with open(SPEICHERORT_ZUGANGSDATEN, "w") as file:
file.write("username={username}\npassword={pw}".format(username=zugangsdaten["username"],
pw=zugangsdaten["pw"]))
shutil.chown(SPEICHERORT_ZUGANGSDATEN, "root", "root")
os.chmod(SPEICHERORT_ZUGANGSDATEN, 0o600)
print("Zugangsdaten erstellt - Pfad: {}".format(SPEICHERORT_ZUGANGSDATEN))
def ordner_erstellen(pfad):
if os.path.exists(pfad):
print("Pfad existiert schon!")
else:
os.mkdir(pfad)
if os.path.exists(pfad):
print("Ordner {} erstellt".format(pfad))
else:
raise BaseException("Ordner konnte nicht erstellt werden")
def inhalt_systemd_service_mount_unit_generieren(mount_pfad, adresse, optionen, type_="cifs"):
mount_unit = """[Unit]
Description=Mount von {mount_pfad}
Requires=ping_server.service
After=ping_server.service
Conflicts=shutdown.target
ConditionPathExists={mount_pfad}
[Mount]
What={adresse}
Where={mount_pfad}
Options=credentials={zugangsdaten},{optionen}
Type={type}
[Install]
WantedBy=multi-user.target
""".format(mount_pfad=mount_pfad, adresse=adresse, zugangsdaten=SPEICHERORT_ZUGANGSDATEN, optionen=optionen, type=type_)
return mount_unit
def name_mount_unit_ermitteln(mount_pfad):
cmd = shlex.split("systemd-escape --suffix=mount --path {}".format(mount_pfad))
instanz = subprocess.Popen(cmd, stdout=subprocess.PIPE)
filename = instanz.stdout.read().decode("utf-8").strip()
return filename
def mount_unit_erstellen(inhalt, mount_pfad):
filename = name_mount_unit_ermitteln(mount_pfad)
pfad = os.path.join(PFAD_SYSTEMD_SERVICE_UNIT, filename)
with open(pfad, "w") as file:
file.write(inhalt)
shutil.chown(pfad, "root", "root")
os.chmod(pfad, 0o644)
print("Datei {} erstellt".format(pfad))
return filename
def ping_server_kopieren():
src = os.path.join(SKRIPTPFAD, "ping_server.py")
shutil.copy(src, PFAD_PING_SERVER)
shutil.chown(PFAD_PING_SERVER, "root", "root")
os.chmod(PFAD_PING_SERVER, 0o755)
print("Datei {} erstellt".format(PFAD_PING_SERVER))
def ip_pingziel_eingeben():
ip_pingziel = input("IP Pingziel zur Überprüfung der Netwerkverfügbarkeit eingeben: ")
return ip_pingziel
def ping_server_service_erstellen(ip_pingziel):
inhalt = """[Unit]
Description=serverctl.service: Waiting for Network or Server to be up
After=network.target
[Service]
Type=oneshot
TimeoutStartSec=95
ExecStart=/usr/local/sbin/ping_server.py {}
[Install]
WantedBy=multi-user.target""".format(ip_pingziel)
with open(PFAD_PING_SERVER_SERVICE, "w") as file:
file.write(inhalt)
shutil.chown(PFAD_PING_SERVER_SERVICE, "root", "root")
os.chmod(PFAD_PING_SERVER_SERVICE, 0o644)
print("Datei {} erstellt".format(PFAD_PING_SERVER_SERVICE))
def mount_unit_aktivieren(mount_unit):
cmd = shlex.split("systemctl start {}".format(mount_unit))
start = subprocess.Popen(cmd, stdout=subprocess.PIPE)
print(start.stdout.read())
befehl = input("Unit aktivieren? (j|n)")
if befehl == "j":
cmd = shlex.split("systemctl enable {}".format(mount_unit))
start = subprocess.Popen(cmd, stdout=subprocess.PIPE)
print(start.stdout.read())
else:
print("Hinweis, wird eine Service Unit verändert muss anschließend 'systemctl daemon-reload' ausgeführt werden")
def eingabe_sichern(pfad_mountpunkt, zugangsdaten, adresse, optionen, ip_pingziel):
ausgabe = {"pfad_mountpunkt": pfad_mountpunkt,
"zugangsdaten": zugangsdaten,
"adresse": adresse,
"optionen": optionen,
"ip_pingziel": ip_pingziel}
ausgabe_toml = toml.dumps(ausgabe)
name = input("Configname eingeben: ")
filename = "{}_cfg.toml".format(name)
pfad = os.path.join(SKRIPTPFAD, filename)
with open(pfad, "w") as file:
file.write(ausgabe_toml)
shutil.chown(pfad, "root", "root")
os.chmod(pfad, 0o600)
print("Datei {} erstellt".format(pfad))
def lade_daten(cfg):
if "cfg.toml" in cfg:
datei = os.path.join(SKRIPTPFAD, cfg)
with open(datei) as file:
config = toml.loads(file.read())
return config
else:
raise ValueError("Dateiformat falsch")
def willkommen():
text = """Dieses Skript soll die Einrichtung zum Einhängen von Netzwerkfreigaben beschleunigen.
Es kann nicht das notwendige Wissen zu den einzelnen Punkten während der Erstellung ersetzen.
Verwendung und Benutzung auf eigene Gefahr!"""
print(text)
def main():
willkommen()
if platform.system() == "Linux":
if len(sys.argv) > 1:
daten = lade_daten(sys.argv[1])
pfad_mountpunkt = daten["pfad_mountpunkt"]
zugangsdaten = daten["zugangsdaten"]
adresse = daten["adresse"]
optionen = daten["optionen"]
ip_pingziel = daten["ip_pingziel"]
else:
pfad_mountpunkt = pfadeingabe()
zugangsdaten = zugangsdaten_eingeben()
adresse = adresse_eingeben()
optionen = optionen_eingeben()
ip_pingziel = ip_pingziel_eingeben()
print("Die Konfigruationsdatei enthält wenn sie gespeichert wird alle Eingaben einschließlich Passwörter "
"in Klartext!")
eingabe = input("Eingaben sichern? (j|n)")
if eingabe == "j":
eingabe_sichern(pfad_mountpunkt, zugangsdaten, adresse, optionen, ip_pingziel)
ordner_erstellen(pfad_mountpunkt)
zugangsdaten_erstellen(zugangsdaten)
mount_unit = mount_unit_erstellen(inhalt_systemd_service_mount_unit_generieren(pfad_mountpunkt, adresse,
optionen),
pfad_mountpunkt)
ping_server_kopieren()
ping_server_service_erstellen(ip_pingziel)
mount_unit_aktivieren(mount_unit)
else:
print("Falsches Betriebssystem")
if __name__ == "__main__":
main()
| 2.375 | 2 |
tests/test_humid_air_inputs.py | portyanikhin/PyFluids | 0 | 12762701 | # PyFluids
# Copyright (c) 2021 <NAME>
import pytest
from pyfluids import *
class TestHAInputs:
@pytest.mark.parametrize("name", list(HAInput))
def test_with_value(self, name):
assert name.with_value(0).value == 0
@pytest.mark.parametrize(
"name, coolprop_key",
[
(HAInput.Density, "Vha"),
(HAInput.DewTemperature, "D"),
(HAInput.Enthalpy, "Hha"),
(HAInput.Entropy, "Sha"),
(HAInput.Humidity, "W"),
(HAInput.PartialPressure, "P_w"),
(HAInput.Pressure, "P"),
(HAInput.RelativeHumidity, "R"),
(HAInput.Temperature, "T"),
(HAInput.WBTemperature, "B"),
],
)
def test_coolprop_key(self, name, coolprop_key):
assert name.coolprop_key == coolprop_key
@pytest.mark.parametrize("name", list(HAInput))
def test_value(self, name):
assert name.value is None
| 2.78125 | 3 |
hatefull/apps/tests/admin.py | MauricioDinki/hatefull | 0 | 12762702 | <filename>hatefull/apps/tests/admin.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.contrib import admin
from .models import Test
@admin.register(Test)
class TestAdmin(admin.ModelAdmin):
list_display = ('user', 'name',)
| 1.601563 | 2 |
screenplay_pdf_to_json/parse_pdf/processInitialPages.py | SMASH-CUT/screenplay-parser | 16 | 12762703 | <filename>screenplay_pdf_to_json/parse_pdf/processInitialPages.py
import json
import re
from screenplay_pdf_to_json.parse_pdf import cleanPage
def processInitialPages(script):
total = []
for page in script:
existingY = {}
for content in page["content"]:
if content["y"] not in existingY:
existingY[content["y"]] = True
total.append(len(existingY))
avg = sum(total)/len(total)
firstPages = []
i = 0
while i < len(total):
if total[i] > avg - 10:
break
firstPages.append({
"page": i,
"content": script[i]["content"]
})
i += 1
firstPages = cleanPage(firstPages, 0)
firstPages = [x for x in firstPages]
for page in firstPages:
page["type"] = "FIRST_PAGES"
return {
"firstPages": firstPages,
"pageStart": i
}
| 3.015625 | 3 |
分类代表题目/字符串/最长不含重复字符的子字符串(动态规划).py | ResolveWang/algorithm_qa | 79 | 12762704 | """
问题描述:请从字符串中找出一个最长的不包含重复字符的子字符串,计算
该最长子字符串的长度。假设字符串中只含有'a~z'的字符。例如,在字符
串'arabcacfr'中,最长不含重复字符的子字符串是'acfr',长度为4
思路:
分别求必须以i(0<=i<=len-1)结尾的最长不含重复字符的子串长度
"""
class LongestSubStr:
def get_longest_substr(self, input_str):
length = len(input_str)
if length <= 1:
return length
dp = [0 for _ in range(length)]
dp[0] = 1
index = 1
while index < length:
if input_str[index] not in input_str[:index]:
dp[index] = dp[index-1] + 1
else:
pre_index = input_str.rindex(input_str[index], 0, index-1)
distance = index - pre_index
if dp[index-1] < distance:
dp[index] = dp[index-1] + 1
else:
dp[index] = distance
index += 1
return dp[length-1]
if __name__ == '__main__':
print(LongestSubStr().get_longest_substr('arabcacfr')) | 3.6875 | 4 |
skmob/core/trajectorydataframe.py | LarryShamalama/scikit-mobility | 0 | 12762705 | import pandas as pd
from ..utils import constants, plot, utils
import numpy as np
from warnings import warn
from shapely.geometry import Polygon, Point
import geopandas as gpd
from .flowdataframe import FlowDataFrame
from skmob.preprocessing import routing
class TrajSeries(pd.Series):
@property
def _constructor(self):
return TrajSeries
@property
def _constructor_expanddim(self):
return TrajDataFrame
class TrajDataFrame(pd.DataFrame):
_metadata = ['_parameters', '_crs'] # All the metadata that should be accessible must be also in the metadata method
def __init__(self, data, latitude=constants.LATITUDE, longitude=constants.LONGITUDE, datetime=constants.DATETIME,
user_id=constants.UID, trajectory_id=constants.TID,
timestamp=False, crs={"init": "epsg:4326"}, parameters={}):
original2default = {latitude: constants.LATITUDE,
longitude: constants.LONGITUDE,
datetime: constants.DATETIME,
user_id: constants.UID,
trajectory_id: constants.TID}
columns = None
if isinstance(data, pd.DataFrame):
tdf = data.rename(columns=original2default)
columns = tdf.columns
# Dictionary
elif isinstance(data, dict):
tdf = pd.DataFrame.from_dict(data).rename(columns=original2default)
columns = tdf.columns
# List
elif isinstance(data, list) or isinstance(data, np.ndarray):
tdf = data
columns = []
num_columns = len(data[0])
for i in range(num_columns):
try:
columns += [original2default[i]]
except KeyError:
columns += [i]
elif isinstance(data, pd.core.internals.BlockManager):
tdf = data
else:
raise TypeError('DataFrame constructor called with incompatible data and dtype: {e}'.format(e=type(data)))
super(TrajDataFrame, self).__init__(tdf, columns=columns)
# Check crs consistency
if crs is None:
warn("crs will be set to the default crs WGS84 (EPSG:4326).")
if not isinstance(crs, dict):
raise TypeError('crs must be a dict type.')
self._crs = crs
if not isinstance(parameters, dict):
raise AttributeError("parameters must be a dictionary.")
self._parameters = parameters
if self._has_traj_columns():
self._set_traj(timestamp=timestamp, inplace=True)
def _has_traj_columns(self):
if (constants.DATETIME in self) and (constants.LATITUDE in self) and (constants.LONGITUDE in self):
return True
return False
def _is_trajdataframe(self):
if ((constants.DATETIME in self) and pd.core.dtypes.common.is_datetime64_any_dtype(self[constants.DATETIME]))\
and ((constants.LONGITUDE in self) and pd.core.dtypes.common.is_float_dtype(self[constants.LONGITUDE])) \
and ((constants.LATITUDE in self) and pd.core.dtypes.common.is_float_dtype(self[constants.LATITUDE])):
return True
return False
def _set_traj(self, timestamp=False, inplace=False):
if not inplace:
frame = self.copy()
else:
frame = self
if timestamp:
frame[constants.DATETIME] = pd.to_datetime(frame[constants.DATETIME], unit='s')
if not pd.core.dtypes.common.is_datetime64_any_dtype(frame[constants.DATETIME].dtype):
frame[constants.DATETIME] = pd.to_datetime(frame[constants.DATETIME])
if not pd.core.dtypes.common.is_float_dtype(frame[constants.LONGITUDE].dtype):
frame[constants.LONGITUDE] = frame[constants.LONGITUDE].astype('float')
if not pd.core.dtypes.common.is_float_dtype(frame[constants.LATITUDE].dtype):
frame[constants.LATITUDE] = frame[constants.LATITUDE].astype('float')
frame.parameters = self._parameters
frame.crs = self._crs
if not inplace:
return frame
def to_flowdataframe(self, tessellation, remove_na=False, self_loops=True):
"""
:param tessellation:
:param remove_na:
:param self_loop: if True, it counts self movements (default True)
:return:
"""
# Step 1: order the dataframe by user_id, traj_id, datetime
self.sort_values(by=self.__operate_on(), ascending=True, inplace=True)
# Step 2: map the trajectory onto the tessellation
flow = self.mapping(tessellation, remove_na=remove_na)
# Step 3: groupby tile_id and sum to obtain the flow
flow.loc[:, constants.DESTINATION] = flow[constants.TILE_ID].shift(-1)
flow = flow.groupby([constants.TILE_ID, constants.DESTINATION]).size().reset_index(name=constants.FLOW)
flow.rename(columns={constants.TILE_ID: constants.ORIGIN}, inplace=True)
if not self_loops:
flow = flow[flow[constants.ORIGIN] != flow[constants.DESTINATION]]
return FlowDataFrame(flow, tessellation=tessellation)
def to_geodataframe(self):
gdf = gpd.GeoDataFrame(self.copy(), geometry=gpd.points_from_xy(self[constants.LONGITUDE],
self[constants.LATITUDE]), crs=self._crs)
return gdf
def mapping(self, tessellation, remove_na=False):
"""
Method to assign to each point of the TrajDataFrame a corresponding tile_id of a given tessellation.
:param tessellation: GeoDataFrame containing a tessellation (geometry of points or polygons).
:param remove_na: (default False) it removes points that do not have a corresponding tile_id
:return: TrajDataFrame with an additional column containing the tile_ids.
"""
gdf = self.to_geodataframe()
if all(isinstance(x, Polygon) for x in tessellation.geometry):
if remove_na:
how = 'inner'
else:
how = 'left'
tile_ids = gpd.sjoin(gdf, tessellation, how=how, op='within')[[constants.TILE_ID]]
elif all(isinstance(x, Point) for x in tessellation.geometry):
tile_ids = utils.nearest(gdf, tessellation, constants.TILE_ID)
new_data = self._constructor(self).__finalize__(self)
new_data = new_data.merge(tile_ids, right_index=True, left_index=True)
return new_data
def __getitem__(self, key):
"""
It the result contains lat, lng and datetime, return a TrajDataFrame, else a pandas DataFrame.
"""
result = super(TrajDataFrame, self).__getitem__(key)
if (isinstance(result, TrajDataFrame)) and result._is_trajdataframe():
result.__class__ = TrajDataFrame
result.crs = self._crs
result.parameters = self._parameters
elif isinstance(result, TrajDataFrame) and not result._is_trajdataframe():
result.__class__ = pd.DataFrame
return result
def settings_from(self, trajdataframe):
"""
Method to copy attributes from another TrajDataFrame.
:param trajdataframe: TrajDataFrame from which copy the attributes.
"""
for k in trajdataframe.metadata:
value = getattr(trajdataframe, k)
setattr(self, k, value)
@classmethod
def from_file(cls, filename, latitude=constants.LATITUDE, longitude=constants.LONGITUDE, datetime=constants.DATETIME,
user_id=constants.UID, trajectory_id=constants.TID,
usecols=None, header='infer', timestamp=False, crs={"init": "epsg:4326"}, sep=",", parameters=None):
df = pd.read_csv(filename, sep=sep, header=header, usecols=usecols)
if parameters is None:
# Init prop dictionary
parameters = {'from_file': filename}
return cls(df, latitude=latitude, longitude=longitude, datetime=datetime, user_id=user_id,
trajectory_id=trajectory_id, parameters=parameters, crs=crs, timestamp=timestamp)
@property
def lat(self):
if constants.LATITUDE not in self:
raise AttributeError("The TrajDataFrame does not contain the column '%s.'" % constants.LATITUDE)
return self[constants.LATITUDE]
@property
def lng(self):
if constants.LONGITUDE not in self:
raise AttributeError("The TrajDataFrame does not contain the column '%s.'"%constants.LONGITUDE)
return self[constants.LONGITUDE]
@property
def datetime(self):
if constants.DATETIME not in self:
raise AttributeError("The TrajDataFrame does not contain the column '%s.'"%constants.DATETIME)
return self[constants.DATETIME]
@property
def _constructor(self):
return TrajDataFrame
@property
def _constructor_sliced(self):
return TrajSeries
@property
def _constructor_expanddim(self):
return TrajDataFrame
@property
def metadata(self):
md = ['crs', 'parameters'] # Add here all the metadata that are accessible from the object
return md
def __finalize__(self, other, method=None, **kwargs):
"""propagate metadata from other to self """
# merge operation: using metadata of the left object
if method == 'merge':
for name in self._metadata:
object.__setattr__(self, name, getattr(other.left, name, None))
# concat operation: using metadata of the first object
elif method == 'concat':
for name in self._metadata:
object.__setattr__(self, name, getattr(other.objs[0], name, None))
else:
for name in self._metadata:
object.__setattr__(self, name, getattr(other, name, None))
return self
def set_parameter(self, key, param):
self._parameters[key] = param
@property
def crs(self):
return self._crs
@crs.setter
def crs(self, crs):
self._crs = crs
@property
def parameters(self):
return self._parameters
@parameters.setter
def parameters(self, parameters):
self._parameters = dict(parameters)
def __operate_on(self):
"""
Check which optional fields are present and return a list of them plus mandatory fields to which apply
built-in pandas functions such as sort_values or groupby.
:return: list
"""
cols = []
if constants.UID in self:
cols.append(constants.UID)
if constants.TID in self:
cols.append(constants.TID)
cols.append(constants.DATETIME)
return cols
# Sorting
def sort_by_uid_and_datetime(self):
if constants.UID in self.columns:
return self.sort_values(by=[constants.UID, constants.DATETIME], ascending=[True, True])
else:
return self.sort_values(by=[constants.DATETIME], ascending=[True])
# Plot methods
def plot_trajectory(self, map_f=None, max_users=10, max_points=1000, style_function=plot.traj_style_function,
tiles='cartodbpositron', zoom=12, hex_color=-1, weight=2, opacity=0.75, start_end_markers=True):
"""
:param map_f: folium.Map
`folium.Map` object where the trajectory will be plotted. If `None`, a new map will be created.
:param max_users: int
maximum number of users whose trajectories should be plotted.
:param max_points: int
maximum number of points per user to plot.
If necessary, a user's trajectory will be down-sampled to have at most `max_points` points.
:param style_function: lambda function
function specifying the style (weight, color, opacity) of the GeoJson object.
:param tiles: str
folium's `tiles` parameter.
:param zoom: int
initial zoom.
:param hex_color: str or int
hex color of the trajectory line. If `-1` a random color will be generated for each trajectory.
:param weight: float
thickness of the trajectory line.
:param opacity: float
opacity (alpha level) of the trajectory line.
:param start_end_markers: bool
add markers on the start and end points of the trajectory.
:return: `folium.Map` object with the plotted trajectories.
"""
return plot.plot_trajectory(self, map_f=map_f, max_users=max_users, max_points=max_points,
style_function=style_function, tiles=tiles, zoom=zoom, hex_color=hex_color,
weight=weight, opacity=opacity, start_end_markers=start_end_markers)
def plot_stops(self, map_f=None, max_users=10, tiles='cartodbpositron', zoom=12, hex_color=-1, opacity=0.3,
radius=12, popup=True):
"""
Requires a TrajDataFrame with stops or clusters, output of `preprocessing.detection.stops`
or `preprocessing.clustering.cluster`. The column `constants.LEAVING_DATETIME` must be present.
:param map_f: folium.Map
`folium.Map` object where the stops will be plotted. If `None`, a new map will be created.
:param max_users: int
maximum number of users whose stops should be plotted.
:param tiles: str
folium's `tiles` parameter.
:param zoom: int
initial zoom.
:param hex_color: str or int
hex color of the stop markers. If `-1` a random color will be generated for each user.
:param opacity: float
opacity (alpha level) of the stop makers.
:param radius: float
size of the markers.
:param popup: bool
if `True`, when clicking on a marker a popup window displaying information on the stop will appear.
:return: `folium.Map` object with the plotted stops.
"""
return plot.plot_stops(self, map_f=map_f, max_users=max_users, tiles=tiles, zoom=zoom,
hex_color=hex_color, opacity=opacity, radius=radius, popup=popup)
def plot_diary(self, user, start_datetime=None, end_datetime=None, ax=None):
"""
Requires a TrajDataFrame with clusters, output of `preprocessing.clustering.cluster`.
The column `constants.CLUSTER` must be present.
:param user: str or int
user ID whose diary should be plotted.
:param start_datetime: datetime.datetime
Only stops made after this date will be plotted.
If `None` the datetime of the oldest stop will be selected.
:param end_datetime: datetime.datetime
Only stops made before this date will be plotted.
If `None` the datetime of the newest stop will be selected.
:param ax: matplotlib.axes
axes where the diary will be plotted.
:return: `matplotlib.axes` of the plotted diary.
"""
return plot.plot_diary(self, user, start_datetime=start_datetime, end_datetime=end_datetime, ax=ax)
def route(self, G=None, index_origin=0, index_destin=-1):
return routing.route(self, G=G, index_origin=index_origin, index_destin=index_destin)
def timezone_conversion(self, from_timezone, to_timezone):
"""
:param from_timezone: str
current timezone (e.g. 'GMT')
:param to_timezone: str
new timezone (e.g. 'Asia/Shanghai')
"""
self.rename(columns={'datetime': 'original_datetime'}, inplace=True)
self['datetime'] = self['original_datetime']. \
dt.tz_localize(from_timezone). \
dt.tz_convert(to_timezone). \
dt.tz_localize(None)
self.drop(columns=['original_datetime'], inplace=True)
def nparray_to_trajdataframe(trajectory_array, columns, parameters={}):
df = pd.DataFrame(trajectory_array, columns=columns)
tdf = TrajDataFrame(df, parameters=parameters)
return tdf
def _dataframe_set_geometry(self, col, timestampe=False, drop=False, inplace=False, crs=None):
if inplace:
raise ValueError("Can't do inplace setting when converting from"
" DataFrame to GeoDataFrame")
gf = TrajDataFrame(self)
# this will copy so that BlockManager gets copied
return gf._set_traj() #.set_geometry(col, drop=drop, inplace=False, crs=crs)
pd.DataFrame._set_traj = _dataframe_set_geometry
| 2.234375 | 2 |
tests/composite/examples/prim_composite_full.py | strint/myia | 222 | 12762706 | """Definitions for the primitive `composite_full`."""
from myia.lib import (
SHAPE,
TYPE,
VALUE,
AbstractArray,
AbstractScalar,
AbstractType,
abstract_array,
distribute,
force_pending,
scalar_cast,
u64tup_typecheck,
)
from myia.operations import primitives as P
from myia.xtype import NDArray
def pyimpl_composite_full(shape, fill_value, abstract_scalar_type):
"""Implement `composite_full`."""
scalar_value = scalar_cast(fill_value, abstract_scalar_type)
return distribute(
P.scalar_to_array(scalar_value, abstract_array(shape, scalar_value)),
shape,
)
async def infer_composite_full(
self,
engine,
shape: u64tup_typecheck,
fill_value: AbstractScalar,
dtype: AbstractType,
):
"""Infer the return type of primitive `composite_full`."""
return AbstractArray(
AbstractScalar(
{
TYPE: await force_pending(dtype.element.xtype()),
VALUE: fill_value.xvalue(),
}
),
{
SHAPE: tuple(
self.require_constant(e, argnum=f'"0:shape[{edx}]"')
for edx, e in enumerate(shape.elements)
),
TYPE: NDArray,
},
)
| 2.15625 | 2 |
tests/examples/minlplib/st_qpc-m3a.py | ouyang-w-19/decogo | 2 | 12762707 | # NLP written by GAMS Convert at 04/21/18 13:54:25
#
# Equation counts
# Total E G L N X C B
# 11 1 0 10 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 11 11 0 0 0 0 0 0
# FX 0 0 0 0 0 0 0 0
#
# Nonzero counts
# Total const NL DLL
# 108 98 10 0
#
# Reformulation has removed 1 variable and 1 equation
from pyomo.environ import *
model = m = ConcreteModel()
m.x1 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x3 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x5 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x6 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x7 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x8 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x9 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x10 = Var(within=Reals,bounds=(0,None),initialize=0)
m.obj = Objective(expr=10*m.x1 - 6.8*m.x1*m.x1 - 4.6*m.x1*m.x2 + 10*m.x2 - 7.9*m.x1*m.x3 + 10*m.x3 - 5.1*m.x1*m.x4 + 10*
m.x4 - 6.9*m.x1*m.x5 + 10*m.x5 - 6.8*m.x1*m.x6 + 10*m.x6 - 4.6*m.x1*m.x7 + 10*m.x7 - 7.9*m.x1*
m.x8 + 10*m.x8 - 5.1*m.x1*m.x9 + 10*m.x9 - 6.9*m.x1*m.x10 + 10*m.x10 - 4.6*m.x2*m.x1 - 5.5*m.x2*
m.x2 - 5.8*m.x2*m.x3 - 4.5*m.x2*m.x4 - 6*m.x2*m.x5 - 4.6*m.x2*m.x6 - 5.5*m.x2*m.x7 - 5.8*m.x2*
m.x8 - 4.5*m.x2*m.x9 - 6*m.x2*m.x10 - 7.9*m.x3*m.x1 - 5.8*m.x3*m.x2 - 13.3*m.x3*m.x3 - 6.7*m.x3*
m.x4 - 8.9*m.x3*m.x5 - 7.9*m.x3*m.x6 - 5.8*m.x3*m.x7 - 13.3*m.x3*m.x8 - 6.7*m.x3*m.x9 - 8.9*m.x3*
m.x10 - 5.1*m.x4*m.x1 - 4.5*m.x4*m.x2 - 6.7*m.x4*m.x3 - 6.9*m.x4*m.x4 - 5.8*m.x4*m.x5 - 5.1*m.x4*
m.x6 - 4.5*m.x4*m.x7 - 6.7*m.x4*m.x8 - 6.9*m.x4*m.x9 - 5.8*m.x4*m.x10 - 6.9*m.x5*m.x1 - 6*m.x5*
m.x2 - 8.9*m.x5*m.x3 - 5.8*m.x5*m.x4 - 11.9*m.x5*m.x5 - 6.9*m.x5*m.x6 - 6*m.x5*m.x7 - 8.9*m.x5*
m.x8 - 5.8*m.x5*m.x9 - 11.9*m.x5*m.x10 - 6.8*m.x6*m.x1 - 4.6*m.x6*m.x2 - 7.9*m.x6*m.x3 - 5.1*m.x6
*m.x4 - 6.9*m.x6*m.x5 - 6.8*m.x6*m.x6 - 4.6*m.x6*m.x7 - 7.9*m.x6*m.x8 - 5.1*m.x6*m.x9 - 6.9*m.x6*
m.x10 - 4.6*m.x7*m.x1 - 5.5*m.x7*m.x2 - 5.8*m.x7*m.x3 - 4.5*m.x7*m.x4 - 6*m.x7*m.x5 - 4.6*m.x7*
m.x6 - 5.5*m.x7*m.x7 - 5.8*m.x7*m.x8 - 4.5*m.x7*m.x9 - 6*m.x7*m.x10 - 7.9*m.x8*m.x1 - 5.8*m.x8*
m.x2 - 13.3*m.x8*m.x3 - 6.7*m.x8*m.x4 - 8.9*m.x8*m.x5 - 7.9*m.x8*m.x6 - 5.8*m.x8*m.x7 - 13.3*m.x8
*m.x8 - 6.7*m.x8*m.x9 - 8.9*m.x8*m.x10 - 5.1*m.x9*m.x1 - 4.5*m.x9*m.x2 - 6.7*m.x9*m.x3 - 6.9*m.x9
*m.x4 - 5.8*m.x9*m.x5 - 5.1*m.x9*m.x6 - 4.5*m.x9*m.x7 - 6.7*m.x9*m.x8 - 6.9*m.x9*m.x9 - 5.8*m.x9*
m.x10 - 6.9*m.x10*m.x1 - 6*m.x10*m.x2 - 8.9*m.x10*m.x3 - 5.8*m.x10*m.x4 - 11.9*m.x10*m.x5 - 6.9*
m.x10*m.x6 - 6*m.x10*m.x7 - 8.9*m.x10*m.x8 - 5.8*m.x10*m.x9 - 11.9*m.x10*m.x10, sense=minimize)
m.c1 = Constraint(expr= 20*m.x1 + 20*m.x2 + 60*m.x3 + 60*m.x4 + 60*m.x5 + 60*m.x6 + 5*m.x7 + 45*m.x8 + 55*m.x9
+ 65*m.x10 <= 600.1)
m.c2 = Constraint(expr= 5*m.x1 + 7*m.x2 + 3*m.x3 + 8*m.x4 + 13*m.x5 + 13*m.x6 + 2*m.x7 + 14*m.x8 + 14*m.x9 + 14*m.x10
<= 310.5)
m.c3 = Constraint(expr= 100*m.x1 + 130*m.x2 + 50*m.x3 + 70*m.x4 + 70*m.x5 + 70*m.x6 + 20*m.x7 + 80*m.x8 + 80*m.x9
+ 80*m.x10 <= 1800)
m.c4 = Constraint(expr= 200*m.x1 + 280*m.x2 + 100*m.x3 + 200*m.x4 + 250*m.x5 + 280*m.x6 + 100*m.x7 + 180*m.x8
+ 200*m.x9 + 220*m.x10 <= 3850)
m.c5 = Constraint(expr= 2*m.x1 + 2*m.x2 + 4*m.x3 + 4*m.x4 + 4*m.x5 + 4*m.x6 + 2*m.x7 + 6*m.x8 + 6*m.x9 + 6*m.x10
<= 18.6)
m.c6 = Constraint(expr= 4*m.x1 + 8*m.x2 + 2*m.x3 + 6*m.x4 + 10*m.x5 + 10*m.x6 + 5*m.x7 + 10*m.x8 + 10*m.x9 + 10*m.x10
<= 198.7)
m.c7 = Constraint(expr= 60*m.x1 + 110*m.x2 + 20*m.x3 + 40*m.x4 + 60*m.x5 + 70*m.x6 + 10*m.x7 + 40*m.x8 + 50*m.x9
+ 50*m.x10 <= 882)
m.c8 = Constraint(expr= 150*m.x1 + 210*m.x2 + 40*m.x3 + 70*m.x4 + 90*m.x5 + 105*m.x6 + 60*m.x7 + 100*m.x8 + 140*m.x9
+ 180*m.x10 <= 4200)
m.c9 = Constraint(expr= 80*m.x1 + 100*m.x2 + 6*m.x3 + 16*m.x4 + 20*m.x5 + 22*m.x6 + 20*m.x8 + 30*m.x9 + 30*m.x10
<= 40.25)
m.c10 = Constraint(expr= 40*m.x1 + 40*m.x2 + 12*m.x3 + 20*m.x4 + 24*m.x5 + 28*m.x6 + 40*m.x9 + 50*m.x10 <= 327)
| 1.78125 | 2 |
mdstudio/mdstudio/cache/cache.py | NLeSC/LIEStudio | 10 | 12762708 | import abc
import six
from typing import Any, Union, List, Optional, Tuple
@six.add_metaclass(abc.ABCMeta)
class ICache(object):
@abc.abstractmethod
def put(self, key, value, expiry=None):
# type: (str, Any, Optional[int]) -> dict
raise NotImplementedError
@abc.abstractmethod
def put_many(self, values, expiry=None):
# type: (List[Tuple[str, Any]], Optional[int]) -> dict
raise NotImplementedError
@abc.abstractmethod
def get(self, key):
# type: (str) -> dict
raise NotImplementedError
@abc.abstractmethod
def extract(self, key):
# type: (str) -> dict
raise NotImplementedError
@abc.abstractmethod
def has(self, key):
# type: (str) -> dict
raise NotImplementedError
@abc.abstractmethod
def touch(self, keys):
# type: (str) -> dict
raise NotImplementedError
@abc.abstractmethod
def forget(self, keys):
# type: (Union[List[str], str]) -> dict
raise NotImplementedError
| 2.796875 | 3 |
backend/api/apps/authentication/serializers.py | vivekthoppil/InsuranceSuite | 2 | 12762709 | <reponame>vivekthoppil/InsuranceSuite<filename>backend/api/apps/authentication/serializers.py
from django.contrib.auth import authenticate
from django.core import exceptions as django_exceptions
from rest_framework import exceptions as drf_exceptions
from rest_framework import serializers
from .services import create_user_token
class RegistrationSerializer(serializers.Serializer):
username = serializers.CharField(max_length=255, allow_blank=False)
email = serializers.EmailField(max_length=255, allow_blank=False)
password = serializers.CharField(
max_length=128,
min_length=8,
write_only=True
)
class LoginSerializer(serializers.Serializer):
email = serializers.CharField(max_length=255)
username = serializers.CharField(max_length=255, read_only=True)
password = serializers.CharField(max_length=128, write_only=True)
token = serializers.CharField(max_length=255, read_only=True)
def validate(self, data):
email = data.get('email', None)
password = data.get('password', None)
if email is None:
raise serializers.ValidationError(
'An email address is required to log in.'
)
if password is None:
raise serializers.ValidationError(
'A password is required to log in.'
)
user = authenticate(username=email, password=password)
if user is None:
raise drf_exceptions.AuthenticationFailed(
'A user with this email and password was not found.'
)
if not user.is_active:
raise drf_exceptions.AuthenticationFailed(
'This user has been deactivated.'
)
try:
token = create_user_token(user.email, user.password)
except django_exceptions.ValidationError as ve:
raise drf_exceptions.AuthenticationFailed(ve.message)
return {
'email': user.email,
'username': user.username,
'token': token
}
class UserSerializer(serializers.Serializer):
email = serializers.CharField(max_length=255)
username = serializers.CharField(max_length=255, read_only=True)
password = serializers.CharField(max_length=128, write_only=True)
| 2.3125 | 2 |
ExerciciosPython/ex045.py | MecaFlavio/Exercicios-Python-3-Curso-em-Video | 0 | 12762710 | <gh_stars>0
# Crie um programa que faça o computador jogar Jokenpô com você.
import random
print(5 * '=', 'Hora do <NAME>', 5 * '=',
'''\nPEDRA
PAPEL
TESOURA''')
mão = str(input('Qual a sua escolha: ')).strip().upper()
computador = random.choice(['PEDRA', 'PAPEL', 'TESOURA'])
print(f'Voce escolheu {mão} e eu escolhi {computador}')
if mão == computador:
print('Empatamos')
elif (mão == 'PAPEL' and computador == 'PEDRA') or (mão == 'PEDRA' and computador == 'TESOURA') or \
(mão == 'TESOURA' and computador == 'PAPEL'):
print('Voce GANHOU!')
else:
print('Voce PERDEU!')
| 4.03125 | 4 |
BirdSongToolbox/PreProcFlow.py | Darilbii/BirdSongToolbox | 3 | 12762711 | import numpy as np
import os
import h5py
import sys
import scipy
import scipy.io.wavfile
from scipy.signal import butter
# Reconsider the Handling of SN_L, Gp_L, and Gp in the Freq_Bin Commands
# Command for Initiallizing work space with Access to both: All the Data and Ephysflow Commands
def initiate_path():
"""
This Code is used to construct a path to the Data Folder using both the os and sys modules
please
:return: Path to the Bird Song Data
"""
experiment_folder = '/net/expData/birdSong/'
ss_data_folder = os.path.join(experiment_folder, 'ss_data') # Path to All Awake Bird Data
sys.path.append(os.path.join(experiment_folder, 'ephysflow')) # Appends the module created by Gentner Lab
return ss_data_folder
def get_birds_data(Bird_Id=str, Session=str, ss_data_folder=str):
"""
This code is used to grab the data from the Awake Free Behaving Experiments done by Zeke and store them in a format that
works with the Python Environment
:param Bird_Id: Specify the Specific Bird you are going to be looking at
:param Session: Specify which Session you will be working with
:param ss_data_folder: This Parameter is created by the initiate_path
:return: Returns a List containing the Designated Experiments Results, and the Labels for its Motifs
"""
bird_id = Bird_Id
sess_name = Session
kwd_file_folder = os.path.join(ss_data_folder, bird_id, sess_name)
kwd_files = [f for f in os.listdir(kwd_file_folder) if f.endswith('.kwd')]
assert (len(kwd_files) == 1)
kwd_file = kwd_files[0]
print(kwd_file) # Sanity Check to Make Sure You are working with the Correct File
# open the file in read mode
kwd_file = h5py.File(os.path.join(kwd_file_folder, kwd_file), 'r')
# Dynamic Members Size
Num_Member = kwd_file.get('recordings') # Test for making the For Loop for HD5 file dynamic
Num_Members = Num_Member.keys()
P = len(Num_Members)
# Import Data from the .kwd File.
Entire_trial = []
File_loc = 'recordings/'
k = ''
j = 0
# Isolate and Store Data into Numpy Array. Then Store Numpy Array into a List.
for j in range(0, P):
k = File_loc + str(j) + '/data'
print(k) # This is a Sanity Check to Ensure the Correct Data is accessed
Epoch_data = np.array(kwd_file.get(k))
Entire_trial.append(Epoch_data)
j += 1
# File Structure Part 2
kwe_files = [f for f in os.listdir(kwd_file_folder) if f.endswith('.kwe')]
assert (len(kwe_files) == 1)
kwe_file = kwe_files[0]
print(kwe_file) # Sanity Check to Make Sure You are working with the Correct File
# open the file in read mode
kwe_file = h5py.File(os.path.join(kwd_file_folder, kwe_file), 'r')
# Import Data from the .kwe File.
# Store the Labels and Markers to Variables
epoch_label = np.array(kwe_file.get('event_types/singing/motiff_1/recording'))
print('Number of Motifs:', epoch_label.size) # Good to Know/Sanity Check
# print('')
start_time = np.array(kwe_file.get('event_types/singing/motiff_1/time_samples'))
print('Number of Start Times:', start_time.size) # Sanity Check The Two Numbers should be equal
assert (start_time.size == epoch_label.size) # Check to Make Sure they are the same Length
print('')
print(epoch_label)
print('')
print(start_time)
return Entire_trial, epoch_label, start_time
def clip_all_motifs(Entire_trial, Labels=np.ndarray, Starts=np.ndarray, song_length=str, Gaps=str):
"""
Command that Clips and Store Motifs or Bouts with a given Set of Parameters: Song Length, and Gap Length.
:param Entire_trial:
:param Labels:
:param Starts:
:param Song_Length:
:param Gaps:
:return:
"""
All_Songs = []
Motif_T = []
Epoch_w_motif = []
Testes = []
Song_length = song_length # Expected Song Duration in Seconds
Gap = Gaps # How much time before and after to add
SN_L = int(Song_length * 30000)
Gp = int(Gap * 30000)
Gp_L = Gp * 2
############## SN_L and GP aren't integers which causes problems downstream, Changing this to int also causes problems
fs = 30000.0 # 30 kHz
lowcut = 400.0
highcut = 10000.0
# Motif_starts = []
# New_Labels
z = Labels.size
stop_time = Starts + 30000 * Song_length
i = 0
for i in range(0, z):
j = int(Labels[i])
Holder = []
Epoch_w_motif = Entire_trial[j]
Motif_T = Epoch_w_motif[int(Starts[i] - Gp):int(stop_time[i] + Gp), :]
# Holder = scipy.signal.lfilter( bT, aT, Motif_T[:,16])
Holder = butter_bandpass_filter(Motif_T[:, 16], lowcut, highcut, fs, order=2)
Motif_T[:, 16] = Holder
All_Songs.append(Motif_T[:, :])
# All_Songs.append(Epoch_w_motif[int(start_time[i]-Gp):int(stop_time[i]+Gp),:])
# i += 1
print('Song Motifs Acquired')
return All_Songs, SN_L, Gp_L, Gp
# noinspection PyTupleAssignmentBalance
def butter_bandpass(lowcut, highcut, fs, order=5):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = butter(order, [low, high], btype='bandpass')
return b, a
def butter_bandpass_filter(data, lowcut, highcut, fs, order=5):
b, a = butter_bandpass(lowcut, highcut, fs, order=order) # Pycharm Freaks out here
y = scipy.signal.filtfilt(b, a, data)
return y
| 2.59375 | 3 |
src/metrics.py | kundajelab/retina-models | 0 | 12762712 | import tensorflow as tf
from tensorflow import keras
from utils import data_utils, argmanager
from utils.loss import multinomial_nll
import numpy as np
import os
import json
import scipy
import sklearn.metrics
import scipy.stats
from collections import OrderedDict
def softmax(x, temp=1):
norm_x = x - np.mean(x,axis=1, keepdims=True)
return np.exp(temp*norm_x)/np.sum(np.exp(temp*norm_x), axis=1, keepdims=True)
def get_jsd(preds, cts, min_tot_cts=10):
return np.array([scipy.spatial.distance.jensenshannon(x,y) for x,y in zip(preds, cts) \
if y.sum()>min_tot_cts])
def main():
args = argmanager.fetch_metrics_args()
print(args)
# load model
with keras.utils.CustomObjectScope({'multinomial_nll':multinomial_nll, 'tf':tf}):
model = keras.models.load_model(args.model)
inputlen = int(model.input_shape[1])
outputlen = int(model.output_shape[0][1])
# load data
test_peaks_seqs, test_peaks_cts, \
test_nonpeaks_seqs, test_nonpeaks_cts = data_utils.load_test_data(
args.peaks, args.nonpeaks, args.genome, args.bigwig,
args.test_chr, inputlen, outputlen
)
# predict on peaks and nonpeaks
test_peaks_pred_logits, test_peaks_pred_logcts = \
model.predict(test_peaks_seqs,
batch_size=args.batch_size,
verbose=True)
test_nonpeaks_pred_logits, test_nonpeaks_pred_logcts = \
model.predict(test_nonpeaks_seqs,
batch_size=args.batch_size,
verbose=True)
metrics = OrderedDict()
# counts metrics
all_test_logcts = np.log(1 + np.vstack([test_peaks_cts, test_nonpeaks_cts]).sum(-1))
cur_pair = (all_test_logcts,
np.vstack([test_peaks_pred_logcts,
test_nonpeaks_pred_logcts]).ravel())
metrics['bpnet_cts_pearson_peaks_nonpeaks'] = scipy.stats.pearsonr(*cur_pair)[0]
metrics['bpnet_cts_spearman_peaks_nonpeaks'] = scipy.stats.spearmanr(*cur_pair)[0]
cur_pair = ([1]*len(test_peaks_pred_logcts) + [0]*len(test_nonpeaks_pred_logcts),
np.vstack([test_peaks_pred_logcts,
test_nonpeaks_pred_logcts]).ravel())
metrics['binary_auc'] = sklearn.metrics.roc_auc_score(*cur_pair)
peaks_test_logcts = np.log(1 + test_peaks_cts.sum(-1))
cur_pair = (peaks_test_logcts, test_peaks_pred_logcts.ravel())
metrics['bpnet_cts_pearson_peaks'] = scipy.stats.pearsonr(*cur_pair)[0]
metrics['bpnet_cts_spearman_peaks'] = scipy.stats.spearmanr(*cur_pair)[0]
# profile metrics (all within peaks)
cur_pair = (softmax(test_peaks_pred_logits), test_peaks_cts)
metrics['bpnet_profile_median_jsd_peaks'] = np.median(get_jsd(*cur_pair))
cur_pair = (softmax(test_peaks_pred_logits),
test_peaks_cts[:, np.random.permutation(test_peaks_cts.shape[1])])
metrics['bpnet_profile_median_jsd_peaks_randomized'] = np.median(get_jsd(*cur_pair))
with open(args.output_prefix + ".metrics.json", "w") as f:
json.dump(metrics, f, ensure_ascii=False, indent=4)
if __name__=="__main__":
main()
| 2.203125 | 2 |
tr_sys/tr_ars/migrations/0002_actor_active.py | jdr0887/Relay | 4 | 12762713 | # Generated by Django 3.2.1 on 2021-06-01 13:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tr_ars', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='actor',
name='active',
field=models.BooleanField(default=True, verbose_name='actor is active'),
),
]
| 1.773438 | 2 |
visdialch/utils/connection_counter.py | gicheonkang/sglkt-visdial | 9 | 12762714 | <reponame>gicheonkang/sglkt-visdial
class ConnectionCounter(object):
def __init__(self):
self.n_connct = 0
def add(self, mask):
# mask to the number
new_connct = 0
self.n_connct += new_connct
def retrieve(self):
return self.n_connct
def reset(self):
self.n_connct = 0
| 2.609375 | 3 |
autodriver/src/autodriver/models/ros_subscriber.py | rel1c/robocar | 0 | 12762715 | <reponame>rel1c/robocar
import rospy
from abc import abstractmethod
from ros_node import ROSNode
class ROSSubscriber(ROSNode):
"""A base class for all ROS Nodes with subscribing functionality."""
def __init__(self, name, topic, msg_type, q_size=None):
super(ROSSubscriber, self).__init__(name)
self.listener = rospy.Subscriber(
topic, msg_type, self.callback, queue_size=q_size)
def start(self):
super(ROSSubscriber, self).start()
rospy.spin()
@abstractmethod
def callback(self, data):
pass
| 2.890625 | 3 |
psdconvert/__init__.py | mrstephenneal/psdconvert | 0 | 12762716 | <filename>psdconvert/__init__.py<gh_stars>0
from psdconvert.psdconvert import BatchConvertPSD, ConvertPSD
__all__ = ["BatchConvertPSD", "ConvertPSD"]
| 1.367188 | 1 |
model_zoo/__init__.py | rahulgupta9202/ColossalAI | 1 | 12762717 | from .vit import *
from .mlp_mixer import *
| 0.957031 | 1 |
src/patchy/api.py | adamchainz/patchy | 105 | 12762718 | <gh_stars>100-1000
import __future__
import ast
import inspect
import os
import shutil
import subprocess
import sys
from functools import wraps
from tempfile import mkdtemp
from textwrap import dedent
from types import CodeType, TracebackType
from typing import (
Any,
Callable,
Dict,
List,
Optional,
Tuple,
Type,
TypeVar,
Union,
cast,
)
from weakref import WeakKeyDictionary
from .cache import PatchingCache
if sys.version_info >= (3, 9):
from pkgutil import resolve_name as pkgutil_resolve_name
else:
from pkgutil_resolve_name import resolve_name as pkgutil_resolve_name
__all__ = ("patch", "mc_patchface", "unpatch", "replace", "temp_patch")
# Public API
def patch(func: Union[Callable[..., Any], str], patch_text: str) -> None:
_do_patch(func, patch_text, forwards=True)
mc_patchface = patch
def unpatch(func: Union[Callable[..., Any], str], patch_text: str) -> None:
_do_patch(func, patch_text, forwards=False)
def replace(
func: Callable[..., Any],
expected_source: Optional[str],
new_source: str,
) -> None:
if expected_source is not None:
expected_source = dedent(expected_source)
current_source = _get_source(func)
_assert_ast_equal(current_source, expected_source, func.__name__)
new_source = dedent(new_source)
_set_source(func, new_source)
AnyFunc = TypeVar("AnyFunc", bound=Callable[..., Any])
class temp_patch:
def __init__(self, func: Union[Callable[..., Any], str], patch_text: str) -> None:
self.func = func
self.patch_text = patch_text
def __enter__(self) -> None:
patch(self.func, self.patch_text)
def __exit__(
self,
exc_type: Union[Type[BaseException], None],
exc_val: Union[BaseException, None],
exc_tb: Union[TracebackType, None],
) -> None:
unpatch(self.func, self.patch_text)
def __call__(self, decorable: AnyFunc) -> AnyFunc:
@wraps(decorable)
def wrapper(*args: Any, **kwargs: Any) -> Any:
with self:
decorable(*args, **kwargs)
return cast(AnyFunc, wrapper)
# Gritty internals
def _do_patch(
func: Union[Callable[..., Any], str],
patch_text: str,
forwards: bool,
) -> None:
if isinstance(func, str):
func = cast(Callable[..., Any], pkgutil_resolve_name(func))
source = _get_source(func)
patch_text = dedent(patch_text)
new_source = _apply_patch(source, patch_text, forwards, func.__name__)
_set_source(func, new_source)
_patching_cache = PatchingCache(maxsize=100)
def _apply_patch(
source: str,
patch_text: str,
forwards: bool,
name: str,
) -> str:
# Cached ?
try:
return _patching_cache.retrieve(source, patch_text, forwards)
except KeyError:
pass
# Write out files
tempdir = mkdtemp(prefix="patchy")
try:
source_path = os.path.join(tempdir, name + ".py")
with open(source_path, "w") as source_file:
source_file.write(source)
patch_path = os.path.join(tempdir, name + ".patch")
with open(patch_path, "w") as patch_file:
patch_file.write(patch_text)
if not patch_text.endswith("\n"):
patch_file.write("\n")
# Call `patch` command
command = ["patch"]
if not forwards:
command.append("--reverse")
command.extend([source_path, patch_path])
proc = subprocess.Popen(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
stdout, stderr = proc.communicate()
if proc.returncode != 0:
msg = "Could not {action} the patch {prep} '{name}'.".format(
action=("apply" if forwards else "unapply"),
prep=("to" if forwards else "from"),
name=name,
)
msg += " The message from `patch` was:\n{}\n{}".format(
stdout.decode("utf-8"), stderr.decode("utf-8")
)
msg += "\nThe code to patch was:\n{}\nThe patch was:\n{}".format(
source, patch_text
)
raise ValueError(msg)
with open(source_path) as source_file:
new_source = source_file.read()
finally:
shutil.rmtree(tempdir)
_patching_cache.store(source, patch_text, forwards, new_source)
return new_source
def _get_flags_mask() -> int:
result = 0
for name in __future__.all_feature_names:
result |= getattr(__future__, name).compiler_flag
return result
FEATURE_MASK = _get_flags_mask()
# Stores the source of functions that have had their source changed
# Bad type hints because WeakKeyDictionary only indexable on Python 3.9+
_source_map: Dict[Callable[..., Any], str] = cast(
Dict[Callable[..., Any], str],
WeakKeyDictionary(),
)
def _get_source(func: Callable[..., Any]) -> str:
real_func = _get_real_func(func)
try:
return _source_map[real_func]
except KeyError:
source = inspect.getsource(func)
source = dedent(source)
return source
def _class_name(func: Callable[..., Any]) -> Optional[str]:
split_name = func.__qualname__.split(".")
try:
class_name = split_name[-2]
except IndexError:
return None
else:
if class_name == "<locals>":
return None
return class_name
def _set_source(func: Callable[..., Any], func_source: str) -> None:
# Fetch the actual function we are changing
real_func = _get_real_func(func)
# Figure out any future headers that may be required
feature_flags = real_func.__code__.co_flags & FEATURE_MASK
class_name = _class_name(func)
def _compile(
code: Union[str, ast.Module],
flags: int = 0,
) -> Union[CodeType, ast.Module]:
return compile(
code, "<patchy>", "exec", flags=feature_flags | flags, dont_inherit=True
)
def _parse(code: str) -> ast.Module:
result = _compile(code, flags=ast.PyCF_ONLY_AST)
assert isinstance(result, ast.Module)
return result
def _process_freevars() -> Tuple[str, ast.AST, List[str]]:
"""
Wrap the new function in a __patchy_freevars__ method that provides all
freevars of the original function.
Because the new function must use exectaly the same freevars as the
original, also append to the new function with a body of code to force
use of those freevars (in the case the the patch drops use of any
freevars):
def __patchy_freevars__():
eg_free_var_spam = object() <- added in wrapper
eg_free_var_ham = object() <- added in wrapper
def patched_func():
return some_global(eg_free_var_ham)
eg_free_var_spam <- appended to new func body
eg_free_var_ham <- appended to new func body
return patched_func
"""
_def = "def __patchy_freevars__():"
fvs = func.__code__.co_freevars
fv_body = [f" {fv} = object()" for fv in fvs]
fv_force_use_body = [f" {fv}" for fv in fvs]
if fv_force_use_body:
fv_force_use_ast = _parse("\n".join([_def] + fv_force_use_body))
fv_force_use = fv_force_use_ast.body[0].body # type: ignore [attr-defined]
else:
fv_force_use = []
_ast = _parse(func_source).body[0]
_ast.body = _ast.body + fv_force_use # type: ignore [attr-defined]
return _def, _ast, fv_body
def _process_method() -> ast.Module:
"""
Wrap the new method in a class to ensure the same mangling as would
have been performed on the original method:
def __patchy_freevars__():
class SomeClass(object):
def patched_func(self):
return some_globals(self.__some_mangled_prop)
return SomeClass.patched_func
"""
_def, _ast, fv_body = _process_freevars()
_global = (
""
if class_name in func.__code__.co_freevars
else f" global {class_name}\n"
)
class_src = "{_global} class {name}(object):\n pass".format(
_global=_global, name=class_name
)
ret = " return {class_name}.{name}".format(
class_name=class_name, name=func.__name__
)
to_parse = "\n".join([_def] + fv_body + [class_src, ret])
new_source = _parse(to_parse)
new_source.body[0].body[-2].body[0] = _ast # type: ignore [attr-defined]
return new_source
def _process_function() -> ast.Module:
_def, _ast, fv_body = _process_freevars()
name = func.__name__
ret = f" return {name}"
_global = [] if name in func.__code__.co_freevars else [f" global {name}"]
to_parse = "\n".join([_def] + _global + fv_body + [" pass", ret])
new_source = _parse(to_parse)
new_source.body[0].body[-2] = _ast # type: ignore [attr-defined]
return new_source
if class_name:
new_source = _process_method()
else:
new_source = _process_function()
# Compile and retrieve the new Code object
localz: Dict[str, Any] = {}
new_code = cast(CodeType, _compile(new_source))
exec(
new_code,
dict(func.__globals__), # type: ignore [attr-defined]
localz,
)
new_func = localz["__patchy_freevars__"]()
# Put the new Code object in place
real_func.__code__ = new_func.__code__
# Store the modified source. This used to be attached to the function but
# that is a bit naughty
_source_map[real_func] = func_source
def _get_real_func(func: Callable[..., Any]) -> Callable[..., Any]:
"""
Duplicates some of the logic implicit in inspect.getsource(). Basically
some function-esque things, such as classmethods, aren't functions but we
can peel back the layers to the underlying function very easily.
"""
if inspect.ismethod(func):
return func.__func__ # type: ignore [attr-defined]
else:
return func
def _assert_ast_equal(current_source: str, expected_source: str, name: str) -> None:
current_ast = ast.parse(current_source)
expected_ast = ast.parse(expected_source)
if not ast.dump(current_ast) == ast.dump(expected_ast):
msg = (
"The code of '{name}' has changed from expected.\n"
"The current code is:\n{current_source}\n"
"The expected code is:\n{expected_source}"
).format(
name=name, current_source=current_source, expected_source=expected_source
)
raise ValueError(msg)
| 2.09375 | 2 |
agilicus/v1/apigenerator/deployment.py | Agilicus/kustomize-plugins | 58 | 12762719 | <reponame>Agilicus/kustomize-plugins
deployment = """
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {cfg[name_version]}-{cfg[name]}
namespace: {cfg[metadata][namespace]}
spec:
replicas: {cfg[replicas]}
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
selector:
matchLabels:
app: {cfg[name_version]}-{cfg[name]}
template:
metadata:
labels:
app: {cfg[name_version]}-{cfg[name]}
annotations:
fluentbit.io/parser: "json"
cluster-autoscaler.kubernetes.io/safe-to-evict: "true"
spec:
topologySpreadConstraints:
- maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: DoNotSchedule
labelSelector:
matchLabels:
app: {cfg[name_version]}-{cfg[name]}
imagePullSecrets:
- name: regcred
containers:
- name: {cfg[name]}
image: {cfg[image]}
imagePullPolicy: Always
ports:
- containerPort: {cfg[port]}
name: http
env: []
envFrom:
- secretRef:
name: {cfg[name_version]}-{cfg[name]}-{cfg[hash]}
livenessProbe:
httpGet:
path: {cfg[liveness_path]}
port: http
timeoutSeconds: 2
failureThreshold: 2
initialDelaySeconds: 10
periodSeconds: 30
readinessProbe:
httpGet:
path: {cfg[readiness_path]}
port: http
initialDelaySeconds: 10
periodSeconds: 2
timeoutSeconds: 2
failureThreshold: 2
resources:
limits:
memory: "{cfg[mem_limit]}"
requests:
memory: "{cfg[mem_request]}"
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
capabilities:
drop:
- all
volumeMounts:
- mountPath: /tmp
name: tmpdir
volumes:
- name: tmpdir
emptyDir:
medium: Memory
"""
| 1.46875 | 1 |
test-framework/test-suites/integration/tests/remove/test_remove_storage_partition.py | knutsonchris/stacki | 0 | 12762720 | <filename>test-framework/test-suites/integration/tests/remove/test_remove_storage_partition.py
import os
import subprocess
import pytest
STORAGE_SPREADSHEETS = ['multi_teradata_global', 'multi_teradata_backend']
@pytest.mark.parametrize("csvfile", STORAGE_SPREADSHEETS)
def test_remove_storage_partition(host, add_host, csvfile, test_file):
# get filename
input_file = test_file(f'load/storage_partition_{csvfile}_input.csv')
if 'global' in input_file:
hostname = ''
else:
hostname = 'scope=host backend-0-0'
# check that it has no partition info by default
result = host.run('stack list storage partition %s' % hostname)
assert result.rc == 0
assert result.stdout == ''
# load the partition file
result = host.run('stack load storage partition file=%s' % input_file)
assert result.rc == 0
# check that it has partition info
result = host.run('stack list storage partition %s' % hostname)
assert result.rc == 0
assert 'sda' in result.stdout
assert 'sdb' in result.stdout
assert '/var/opt/teradata' in result.stdout
assert result.stderr == ''
# remove the partition info for a single device
result = host.run('stack remove storage partition %s device=sdb' % hostname)
assert result.rc == 0
assert result.stdout == ''
assert result.stderr == ''
# Check that it is indeed removed
result = host.run('stack list storage partition %s' % hostname)
assert result.rc == 0
assert 'sda' in result.stdout
assert 'sdb' not in result.stdout
# remove the partition info for a single mountpoint
result = host.run('stack remove storage partition %s mountpoint="/var/opt/teradata"' % hostname)
assert result.rc == 0
assert result.stdout == ''
assert result.stderr == ''
# Check that it is indeed removed
result = host.run('stack list storage partition %s' % hostname)
assert result.rc == 0
assert '/var/opt/teradata' not in result.stdout
# remove all the partition info
result = host.run('stack remove storage partition %s device="*"' % hostname)
assert result.rc == 0
assert result.stdout == ''
assert result.stderr == ''
# check that it has no partition info again
result = host.run('stack list storage partition %s' % hostname)
assert result.rc == 0
assert result.stdout == ''
assert result.stderr == ''
def test_negative_remove_storage_partition(host, add_host):
"""
Trying to hit the below exceptions. The order is important as it is contextual to the attempted input.
if scope not in accepted_scopes:
raise ParamValue(self, '%s' % params, 'one of the following: %s' % accepted_scopes )
elif scope == 'global' and len(args) >= 1:
raise ArgError(self, '%s' % args, 'unexpected, please provide a scope: %s' % accepted_scopes)
elif scope == 'global' and (device is None and mountpoint is None):
raise ParamRequired(self, 'device OR mountpoint')
elif scope != 'global' and len(args) < 1:
raise ArgRequired(self, '%s name' % scope)
"""
accepted_scopes = ['global', 'os', 'appliance', 'host']
# Provide extra data on global scope
result = host.run('stack remove storage partition scope=global backend-0-0')
assert result.rc == 255
assert 'argument unexpected' in result.stderr
result = host.run('stack remove storage partition scope=garbage backend-0-0')
assert result.rc == 255
assert "{'scope': 'garbage'}" in result.stderr
for scope in accepted_scopes:
if scope != 'global':
result = host.run('stack remove storage partition scope=%s' % scope)
assert result.rc == 255
assert '"%s name" argument is required' % scope in result.stderr
else:
result = host.run('stack remove storage partition scope=%s' % scope)
assert result.rc == 255
assert '"device OR mountpoint" parameter is required' in result.stderr
| 2.375 | 2 |
Start_Conditions/Node_Maker.py | TorstenPaul/pythrahyper_net-1 | 4 | 12762721 | <gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 6 16:38:25 2017
@author: top40ub
"""
import numpy as np
from pathlib import Path
import Growth_Classes.Cone_Classes as Cone_Classes
import Growth_Classes.NodeEdge_Classes as NodeEdge_Classes
""" This function constructs the first seeds/nodes of the growth cones """
"""
Function name : cone_construction()
***Description***
The function constructs the initial GrowthCone class objects.
Each cone is initialised by its name 'cn' for each of the n starting positions
and start orientations of the imput array start_pos and st_angle.
All needed parametes for dem paramter dictionaries internal, growth, multilayer
are transfered to each GrowthCone class object. Each GrowthCone class object
collects its own flavour from the flavor list.
A dictionary cod with key = 'cn' with for each for the n positions
and value = GrowthCone class object for each position is returned
***I/O***
Input parameter:
a) start_pos type(nd.array).shape(n,3) array with all 3D starting postions
of the cone
b) st_angle type(nd.array).shape(n,2) array with all starting orientations
of the cone (pol,az)
c) internal type('dict') dictionary with all internal GrowthCone class and
NodeEdge class object parameter
d) growth type('dict') dictionary with all parameter for the growth simulation
e) multilayer type('dict') dictionary with all parameter of the multilayer
computation field
f) flavour type('list') the flavour list to distribute a flavour to each
GrowthCone class object
Output:
a) cod type('dict') dictionary with all n starting Cone Class objects
Inline output:
Plot output:
Save file:
"""
def cone_construction(start_pos, st_angle, internal, growth, multilayer, flavour, cone_names):
cone_start_list = {}
st_angle_list = {}
if len(start_pos) != len(st_angle):
raise Exception("Number of start positions don't match with number of start angles")
'define grothcone dictionary [cod], growth aperture [aperture]'
cod = {}
for i in range(len(start_pos)):
if len(start_pos[i]) != 3:
raise Exception("Three dimensional vector for each start postion is needed (x,y,z)")
if len(st_angle[i]) != 2:
raise Exception("Two angles (pol,az) are needed for each the start orientation")
j = cone_names.pop(0)
cone_start_list['c' + str(j)] =start_pos[int(j-1)]
st_angle_list['c' + str(j)] = st_angle[int(j-1)]
del i
for key in cone_start_list.keys():
cod[key] = Cone_Classes.GrowthCone(key, cone_start_list[key][0], cone_start_list[key][1], cone_start_list[key][2])
"Starting point parameter"
cod[key].angle_list = np.array([st_angle_list[key]])
pol = cod[key].angle_list[0][0]
az = cod[key].angle_list[0][1]
cod[key].vector_list = np.array([[np.sin(np.deg2rad(pol)) * np.cos(np.deg2rad(az)), np.sin(np.deg2rad(pol)) * np.sin(np.deg2rad(az)), np.cos(np.deg2rad(pol))]])
cod[key].vec_mem = cod[key].vector_list[0]
"Internal paramter"
cod[key].growth_length = internal['stepsize']
cod[key].aperture = internal['aperture']
cod[key].covariance_revert = growth['covariance_revert']
cod[key].branching_angle = internal['branching_angle']
cod[key].bifurcation_angle = internal['bifurcation_angle']
cod[key].memory = internal['memory']
"Propabilities for branching, bifuraction, termination, reactivation..."
"... and Monte Carlo iterations"
cod[key].branchingprob = internal['branchingprob']
cod[key].bifurcationprob = internal['bifurcationprob']
cod[key].deathprob = internal['deathprob']
cod[key].reactivationprob = internal['reactivationprob']
cod[key].montecarlo_iterations = growth['montecarlo_iterations']
"Memory and imformation about nodes, edges and splitting events"
"and infomation about the compfield/substrate and search field"
cod[key].searchfield = internal['searchfield'].reshape(3,3,3)
cod[key].field_dim = multilayer['dim']
cod[key].max_drift = multilayer['max_drift']
cod[key].min_drift = multilayer['min_drift']
cod[key].max_eig = multilayer['max_eigenvalue']
cod[key].min_eig = multilayer['min_eigenvalue']
cod[key].frame_seq.append(0)
cod[key].flavour = flavour.pop(0)
"""Proxy_PDF_Parameter"""
cod[key].proxy_drift = growth['Proxy_drift']
cod[key].proxy_tensor = growth['Proxy_tensor']
cod[key].proxy_corr = growth['Proxy_corr']
cod[key].proxy_reverse_eig = growth['Proxy_reverse_eig']
del key
return cod
"""
Function name : node_construction()
***Description***
The function creates for each unique element in the common_node
array a NodeEdge class object (node) with the starting point for the node at
the position in the start_pos array for the first unique element in
common_node.
The node name is 'n0',...,'nn' where n indicates the unique elements.
A dictionary cod with key = 'n0',...'nn' with for each for the n uniques
and value = NodeEdge class object (node) for each unique position is returned
***I/O***
Input parameter:
a) start_pos type(np.'ndarray').shape(n,3) array with all 3D starting postions
of the cone
b) common_node type('np.ndarray').shape(n,1) array with the name of the starting node
for each GrowthCone class object
Output:
a) nod type('dict') dictionary with all n starting NodeEdge class objects (node)
Inline output:
Plot output:
Save file:
"""
def node_construction(start_pos, common_node, node_names):
node_start_list ={}
nod = {}
uni, ind_uni = np.unique(common_node, return_index = True)
for i, ind in zip(uni, ind_uni):
j = node_names.pop(0)
node_start_list['n' + str(int(j))] = start_pos[int(ind)]
for key in node_start_list.keys():
nod[key] = NodeEdge_Classes.Node(key, node_start_list[key][0], node_start_list[key][1], node_start_list[key][2])
return nod
"""
Function name : cod_nod_edd_match()
***Description***
The function constructs the first edge for each of of the n GrowthCone class objects.
Wit the information in cod, nod and the common_node array all parameter and references
between the three class objects are matched and updated.
Each cone points towards two nodes, its starting node and one at its tip that moves
alongside.
Each cone has starts with on edge connection those nodes. During the growth the edge is
elongated. These edges inherited the flavour of the cone.
Each node lists all edges connecting to it.
Three dictionaries cod, nod, edd for the GrowthCone NodeEdge (node), NodeEdge (edge) class
objects are returned
***I/O***
Input parameter:
a) cod type('dict') dictionary with all n starting Cone Class objects
b) nod type('dict') dictionary with all n starting NodeEdge class objects (node)
c) common_node type('np.ndarray').shape(n,1) array with the name of the starting node
for each GrowthCone class object
Output:
a) cod type('dict') dictionary with all n starting Cone Class objects updated
b) nod type('dict') dictionary with all n starting NodeEdge class objects (node) updated
c) edd type('dict') dictionary with all starting edges
Inline output:
Plot output:
Save file:
"""
def cod_nod_edd_match(cod, nod, common_node, node_names, edge_names):
edd = {}
new_nod = {}
for keytup in zip(cod,common_node):
nod['n' + str(int(keytup[1]))].constructor_cone.append(keytup[0])
cod[keytup[0]].node_list.append('n' + str(int(keytup[1])))
node2 = cod[keytup[0]].node_construction(node_names.pop(0))
new_nod[node2] = NodeEdge_Classes.Node(node2,*cod[keytup[0]].pos_list[0])
cod[keytup[0]].node_list.append(node2)
edge =cod[keytup[0]].edge_construction(edge_names.pop(0))
cod[keytup[0]].current_edge = [edge]
nod['n' + str(int(keytup[1]))].edges.append(edge)
new_nod[node2].edges.append(edge)
edd[edge] = NodeEdge_Classes.Edge(edge,*cod[keytup[0]].pos_list[0],*cod[keytup[0]].pos_list[0])
edd[edge].flavour=cod[keytup[0]].flavour
edd[edge].constructor_cone.append(keytup[0])
edd[edge].nodes.append('n' + str(int(keytup[1])))
edd[edge].nodes.append(node2)
edd[edge].pos_list=cod[keytup[0]].pos_list
nod.update(new_nod)
return cod, nod, edd
if __name__=='__main__':
pass
| 2.859375 | 3 |
scipy-example/fitsincos.py | truongduy134/levenberg-marquardt | 6 | 12762722 | """ Given the set of points generated by
f(x) = 0.5 * cos(2 * x) + 2 * sin(0.5 * x) with some noise,
use Levenberg-Marquardt algorithm to find the model of the form
f(x) = a * cos(b * x) + b * sin(a * x) to fit all the points.
"""
import numpy as np
import scipy.optimize as scipy_opt
def sincos_func(x_data, a, b):
""" Computes the function a * sin(b * x) + b * cos(a * x)
Args:
x_data : A Numpy array of input data
a : Real-valued argument of the function
b : Real-valued argument of the function
Returns:
A Numpy array of values of the function a * sin(b * x) + b * cos(a * x)
evaluated at each x in xData
"""
return a * np.cos(b * x_data) + b * np.sin(a * x_data)
def main():
""" Main function to set up data points and calls Scipy curve fitting
routine (whose underlying algorithm is Levenberg-Marquardt)
"""
x_data = np.array([
1.0, 1.5, -1.0, 2.0, 1.8, 2.5, -0.5, -0.8, -1.1, 2.2, 2.6, 2.8, -2.0,
-2.2, -1.7, -1.4, 0.05, 0.0, 1.570796, -1.570796, 0.6, -0.6,
1.67, 2.4, 0.1
])
y_data = np.array([
0.76, 0.860000, -1.18, 1.356, 1.118, 2.039, -0.224, -0.7934, -1.339,
1.63, 2.1613, 2.35, -2.009, -1.936, -1.985, -1.759, 0.55, 0.5, 0.914,
-1.9142, 0.77, -0.4, 1.0, 1.9, 0.59
])
guess_abs = [[0.25, 1.5], [1.7, 3], [10, 5], [0.0, 3.0]]
for guess_ab in guess_abs:
ab, covariance = scipy_opt.curve_fit(
sincos_func, x_data, y_data, guess_ab)
print 'Intial guess: %s' % str(guess_ab)
print 'LM results: %s' % str(ab)
if __name__ == "__main__":
main()
| 3.484375 | 3 |
py/mtree/heap_queue.py | wjcskqygj2015/M-Tree | 48 | 12762723 | <reponame>wjcskqygj2015/M-Tree<filename>py/mtree/heap_queue.py<gh_stars>10-100
from collections import namedtuple
_HeapItem = namedtuple('_HeapItem', 'k, value')
class HeapQueue(object):
def __init__(self, content=(), key=lambda x:x, max=False):
if max:
self.key = lambda x: -key(x)
else:
self.key = key
self._items = [_HeapItem(self.key(value), value) for value in content]
self.heapify()
def _items_less_than(self, checked_method, n):
return self._items[checked_method].k < self._items[n].k
def _swap_items(self, checked_method, n):
self._items[checked_method], self._items[n] = self._items[n], self._items[checked_method]
def _make_heap(self, i):
smallest = i
l = 2*i + 1
if l < len(self._items) and self._items_less_than(l, smallest):
smallest = l
r = 2*i + 2
if r < len(self._items) and self._items_less_than(r, smallest):
smallest = r
if smallest != i:
self._swap_items(i, smallest)
self._make_heap(smallest)
def heapify(self):
for i in xrange(len(self._items)//2, -1, -1):
self._make_heap(i)
def head(self):
return self._items[0].value
def push(self, value):
i = len(self._items)
new_item = _HeapItem(self.key(value), value)
self._items.append(new_item)
while i > 0:
p = int((i - 1) // 2)
if self._items_less_than(p, i):
break
self._swap_items(i, p)
i = p
def pop(self):
popped = self._items[0].value
self._items[0] = self._items[-1]
self._items.pop(-1)
self._make_heap(0)
return popped
def pushpop(self, value):
k = self.key(value)
if k <= self._items[0].k:
return value
else:
popped = self._items[0].value
self._items[0] = _HeapItem(k, value)
self._make_heap(0)
return popped
def __len__(self):
return len(self._items)
def extractor(self):
while self._items:
yield self.pop()
| 3.40625 | 3 |
2015/13_seatings_test.py | pchudzik/adventofcode | 0 | 12762724 | <filename>2015/13_seatings_test.py
import importlib
parse_seatings = importlib \
.import_module("13_seatings") \
.parse_seatings
count_happiness = importlib \
.import_module("13_seatings") \
.count_happiness
happines_change = importlib \
.import_module("13_seatings") \
.happines_change
include_me = importlib \
.import_module("13_seatings") \
.include_me
attendees = [
"Alice would gain 54 happiness units by sitting next to Bob.",
"Alice would lose 79 happiness units by sitting next to Carol.",
"Alice would lose 2 happiness units by sitting next to David.",
"Bob would gain 83 happiness units by sitting next to Alice.",
"Bob would lose 7 happiness units by sitting next to Carol.",
"Bob would lose 63 happiness units by sitting next to David.",
"Carol would lose 62 happiness units by sitting next to Alice.",
"Carol would gain 60 happiness units by sitting next to Bob.",
"Carol would gain 55 happiness units by sitting next to David.",
"David would gain 46 happiness units by sitting next to Alice.",
"David would lose 7 happiness units by sitting next to Bob.",
"David would gain 41 happiness units by sitting next to Carol."
]
def test_including_me():
assert include_me({
"Alice": {
"David": -2
},
"David": {
"Alice": 46,
}
}, 0) == {
"Alice": {
"David": -2,
"me": 0
},
"David": {
"Alice": 46,
"me": 0,
},
"me": {
"Alice": 0,
"David": 0
}
}
def test_parse_seatings():
assert parse_seatings(attendees) == {
"Alice": {
"Bob": 54,
"Carol": -79,
"David": -2
},
"Bob": {
"Alice": 83,
"Carol": -7,
"David": -63
},
"Carol": {
"Alice": -62,
"Bob": 60,
"David": 55
},
"David": {
"Alice": 46,
"Bob": -7,
"Carol": 41
}
}
def test_count_happiness():
assert count_happiness(parse_seatings(attendees), ("Alice", "Bob", "Carol", "David")) == 330
def test_count_happines_change():
best_seatings = parse_seatings(attendees)
assert happines_change(best_seatings)[1] == 330
| 3.140625 | 3 |
Iris_Data.py | peterdt713/Programming_Scripting_Ex | 0 | 12762725 | # <NAME>, 04 Mar 18
# Exercise 5
# Please complete the following exercise this week.
# Write a Python script that reads the Iris data set in and prints the four numerical values on each row in a nice format.
# That is, on the screen should be printed the petal length, petal width, sepal length and sepal width, and these values should have the decimal places aligned, with a space between the columns.
with open("iris.data.csv") as t:
for line in t:
print(line.split(',')[0], line.split(',')[1], line.split(',')[2], line.split(',')[3])
| 4.03125 | 4 |
src/test.py | EricSekyere/gtools | 0 | 12762726 | <gh_stars>0
import unittest
class TestScripts(unittest.TestCase):
def setUp(self):
pass
# self.files = get_files("./mock", filters)
def teardown(self):
pass
def testget_files(self):
pass
| 1.757813 | 2 |
problemas/problema12.py | Yadkee/HPcodewarsMadrid2018 | 1 | 12762727 | <reponame>Yadkee/HPcodewarsMadrid2018
#! python3
"""[12] El General Manager rata - 23 Puntos:
Se recibirán el nombre del equipo, el número de integrantes y las
habilidades de cada uno de los jugadores.
Se deben devolver la alineación y la calificación total del equipo."""
coeficientes = ((0, 0.2, 0.45, 0.15, 0.2, 0),
(0, 0.45, 0.15, 0.35, 0.05, 0),
(0.2, 0.3, 0, 0.3, 0.1, 0.1),
(0.4, 0, 0, 0.05, 0.25, 0.30),
(0.2, 0, 0, 0, 0.3, 0.5))
nombreDelEquipo = input()[1:-1]
numeroDeJugadores = int(input()[1:-1])
jugadores = [input()[1:-1].split(" ", 1) for _ in range(numeroDeJugadores)]
listaAlineacion = []
for puesto in range(5):
opciones = []
for nombre, habilidades in jugadores:
if nombre in (i[0] for i in listaAlineacion): # Ya está cogido
continue
habilidades = [int(i) for i in habilidades.split(" ")]
valor = sum(habilidades[i] * coeficientes[puesto][i] for i in range(6))
opciones.append((nombre, valor))
listaAlineacion.append(max(opciones, key=lambda x: x[1]))
alineacion = " ".join(i[0] for i in listaAlineacion)
calificacion = round(sum(i[1] for i in listaAlineacion) / numeroDeJugadores)
print("Alineación de %s: %s. Calificación %d." % (nombreDelEquipo,
alineacion,
min(calificacion, 5)))
| 2.796875 | 3 |
tasks.py | daffidwilde/blog | 0 | 12762728 | <gh_stars>0
from invoke import task
@task
def test(c):
c.run("pytest --doctest-glob='*.md'")
@task
def main(c):
c.run("python main.py")
| 1.210938 | 1 |
ckanext/issues/tests/factories.py | apteksdi/ckanext-issues | 1 | 12762729 | from ckanext.issues import model
try:
from ckan.new_tests import factories, helpers
except ImportError:
from ckan.tests import factories, helpers
import factory
class Issue(factory.Factory):
class Meta:
model = model.Issue
abstract = False
title = factory.Sequence(lambda n: 'Test Issue [{n}]'.format(n=n))
description = 'Some description'
dataset_id = factory.LazyAttribute(lambda _: factories.Dataset()['id'])
@classmethod
def _build(cls, target_class, *args, **kwargs):
raise NotImplementedError(".build() isn't supported in CKAN")
@classmethod
def _create(cls, target_class, *args, **kwargs):
if args:
assert False, "Positional args aren't supported, use keyword args."
context = {'user': factories._get_action_user_name(kwargs)}
# issue_create is so badly behaved I'm doing this for now
data_dict = dict(**kwargs)
data_dict.pop('user', None)
issue_dict = helpers.call_action('issue_create',
context=context,
**data_dict)
return issue_dict
class IssueComment(factory.Factory):
class Meta:
model = model.IssueComment
abstract = False
comment = 'some comment'
@classmethod
def _build(cls, target_class, *args, **kwargs):
raise NotImplementedError(".build() isn't supported in CKAN")
@classmethod
def _create(cls, target_class, *args, **kwargs):
if args:
assert False, "Positional args aren't supported, use keyword args."
context = {'user': factories._get_action_user_name(kwargs)}
issue_comment_dict = helpers.call_action('issue_comment_create',
context=context,
**kwargs)
return issue_comment_dict
| 2.265625 | 2 |
ResumeWebsite/resumeApp/migrations/0002_auto_20170707_2027.py | patryan117/Django_Resume_Website | 0 | 12762730 | <reponame>patryan117/Django_Resume_Website<filename>ResumeWebsite/resumeApp/migrations/0002_auto_20170707_2027.py<gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-07-08 00:27
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('resumeApp', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Education',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('university', models.CharField(max_length=64)),
('major', models.CharField(max_length=64)),
('minor', models.CharField(max_length=64)),
('semester', models.CharField(max_length=32)),
('year', models.DateField()),
],
options={
'verbose_name_plural': 'Education',
},
),
migrations.CreateModel(
name='GeneralInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('about', models.TextField(max_length=5000)),
('executive_summary', models.TextField(max_length=5000)),
],
options={
'verbose_name_plural': 'General Info',
},
),
migrations.CreateModel(
name='Skill',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField(max_length=5000)),
],
),
migrations.AlterField(
model_name='job',
name='summary',
field=models.TextField(max_length=5000),
),
]
| 1.835938 | 2 |
src/anonymization/utils/constants.py | CrisesUrv/SoBigDataAnonymizationPypi | 0 | 12762731 | <reponame>CrisesUrv/SoBigDataAnonymizationPypi<filename>src/anonymization/utils/constants.py
EPSILON = "epsilon"
K = "k"
MAX_VALUE = "max_value"
MIN_VALUE = "min_value"
ATTRIBUTE = "attribute"
NAME = "name"
SENSITIVITY_TYPE = "sensitivity_type"
ATTRIBUTE_TYPE = "attribute_type"
# window size is used in the disclosure risk calculation
# it indicates the % of the num of records in the dataset
WINDOW_SIZE = 1
# border margin is used in differential privacy anonymization
# it indicates the margin to be applied to the attribute domain
BORDER_MARGIN = 1.5
| 1.414063 | 1 |
pyppl/transforms/ppl_symbol_simplifier.py | bradleygramhansen/PySPPL | 12 | 12762732 | #
# This file is part of PyFOPPL, an implementation of a First Order Probabilistic Programming Language in Python.
#
# License: MIT (see LICENSE.txt)
#
# 20. Mar 2018, <NAME>
# 21. Mar 2018, <NAME>
#
from ..ppl_ast import *
from ..aux.ppl_transform_visitor import TransformVisitor
class SymbolSimplifier(TransformVisitor):
def __init__(self):
super().__init__()
self.names_map = {}
self.name_count = {}
def simplify_symbol(self, name: str):
if name in self.names_map:
return self.names_map[name]
elif name.startswith('__'):
if '____' in name:
short = name[:name.index('____')+2]
if short not in self.name_count:
self.name_count[short] = 1
else:
self.name_count[short] += 1
short += "_{}".format(self.name_count[short])
self.names_map[name] = short
return short
else:
return name
elif '__' in name:
short = name[:name.index('__')]
if short not in self.name_count:
self.name_count[short] = 1
else:
self.name_count[short] += 1
short += "_{}".format(self.name_count[short])
self.names_map[name] = short
return short
else:
self.names_map[name] = name
if name not in self.name_count:
self.name_count[name] = 1
else:
self.name_count[name] += 1
return name
def visit_def(self, node: AstDef):
value = self.visit(node.value)
name = self.simplify_symbol(node.name)
if name != node.name or value is not node.value:
return node.clone(name=name, value=value)
else:
return node
def visit_let(self, node: AstLet):
source = self.visit(node.source)
name = self.simplify_symbol(node.target)
body = self.visit(node.body)
if name == node.target and source is node.source and body is node.body:
return node
else:
return node.clone(target=name, source=source, body=body)
def visit_symbol(self, node: AstSymbol):
name = self.simplify_symbol(node.name)
if name != node.name:
return node.clone(name=name)
else:
return node
| 2.796875 | 3 |
configs/BostonHousing/mc_dropout.py | Neronjust2017/pytorch-regression-project | 1 | 12762733 | pdrop = 0.1
tau = 0.1
lengthscale = 0.01
N = 364
print(lengthscale ** 2 * (1 - pdrop) / (2. * N * tau)) | 2.328125 | 2 |
treelstm/model.py | navid5792/Tree-Transformer | 2 | 12762734 | <gh_stars>1-10
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from . import Constants
from transformer.models import Transformer
# module for childsumtreelstm
class ChildSumTreeLSTM(nn.Module):
def __init__(self, in_dim, mem_dim, opt):
super(ChildSumTreeLSTM, self).__init__()
self.in_dim = in_dim
self.mem_dim = mem_dim
'''
self.ioux = nn.Linear(self.in_dim, self.mem_dim)
self.iouh = nn.Linear(self.mem_dim, self.mem_dim)
self.fx = nn.Linear(self.in_dim, self.mem_dim)
self.fh = nn.Linear(self.mem_dim, self.mem_dim)
self.Wv = nn.Linear(self.mem_dim, self.mem_dim)
'''
self.transformer = Transformer(opt)
#self.W_mv = nn.Parameter(torch.randn(50, 100))
#self.W_mv_M = nn.Parameter(torch.randn(50, 100))
def node_forward(self, inputs, child_c, child_h):
child_h_sum = torch.sum(child_h, dim=0, keepdim=True)
iou = self.ioux(inputs) + self.iouh(child_h_sum)
i, o, u = torch.split(iou, iou.size(1) // 3, dim=1)
i, o, u = F.sigmoid(i), F.sigmoid(o), F.tanh(u)
f = F.sigmoid(
self.fh(child_h) +
self.fx(inputs).repeat(len(child_h), 1)
)
fc = torch.mul(f, child_c)
c = torch.mul(i, u) + torch.sum(fc, dim=0, keepdim=True)
h = torch.mul(o, F.tanh(c))
return c, h
def forward(self, tree, inputs, arcs, S, ttype):
'''
num_words = 1
child_words = []
residual = []
residual.append(inputs[tree.idx].unsqueeze(0))
for idx in range(tree.num_children):
self.forward(tree.children[idx], inputs, arc, S)
num_words += tree.children[idx].words
child_words.append(tree.children[idx].words)
residual.append(inputs[tree.children[idx].idx].unsqueeze(0))
tree.words = num_words
child_words.append(tree.words)
if tree.num_children == 0:
tree.state = inputs[tree.idx].view(1,-1) #child_h
tree.words = 1
return tree.words
else:
states = []
for x in tree.children:
states.append(x.state)
child_h = torch.cat(states, dim=0)
x_hat = inputs[tree.idx].view(1,-1)
tree.state = self.transformer.tree_encode(x_hat, child_h.unsqueeze(0), S, child_words, residual)
return tree.state
'''
num_words = 1
child_words = []
residual = []
residual.append(inputs[tree.idx].unsqueeze(0))
for idx in range(tree.num_children):
self.forward(tree.children[idx], inputs, arcs, S, ttype)
num_words += tree.children[idx].words
child_words.append(tree.children[idx].words)
residual.append(inputs[tree.children[idx].idx].unsqueeze(0))
tree.words = num_words
child_words.append(tree.words)
if tree.num_children == 0:
tree.state = inputs[tree.idx].view(1,-1) #child_h
tree.arc = arcs[tree.idx].view(1,-1)
tree.words = 1
return tree.words
else:
states = []
arc_labels= []
for x in tree.children:
states.append(x.state)
arc_labels.append(x.arc)
child_h = torch.cat(states, dim=0) #+ self.Wv(torch.cat(arc_labels, dim=0))
child_arcs = torch.cat(arc_labels, dim=0)
x_hat = inputs[tree.idx].view(1,-1)
tree.state = self.transformer.tree_encode(x_hat, child_h.unsqueeze(0), child_arcs.unsqueeze(0), S, child_words, residual, ttype)
tree.arc = arcs[tree.idx].view(1,-1)
return tree.state
def forward1(self, tree, inputs, S):
if tree.num_children == 0:
tree.state = inputs[tree.idx].view(1,-1) #child_h
return [tree.state]
subtree_list = []
for idx in range(tree.num_children):
subtree_list += self.forward1(tree.children[idx], inputs, S)
dummy = torch.cat(subtree_list, dim=0)
word_vec = self.transformer.tree_encode1(dummy.unsqueeze(0), S)
return [word_vec.squeeze(0)]
def forward_MVRNN(self, tree, inputs, Minputs, S): # for dependency RNNs
for idx in range(tree.num_children):
self.forward_MVRNN(tree.children[idx], inputs, Minputs, S)
if tree.num_children == 0:
tree.Vstate = inputs[tree.idx].view(1,-1) #child_h
tree.Mstate = Minputs[tree.idx].view(1,50,-1) #child_h
return
else:
states = []
matrix = []
for x in tree.children:
states.append(x.Vstate.view(1, -1))
matrix.append(x.Mstate.view(1, 50, -1))
child_hV = torch.cat(states, dim=0)
child_hM = torch.cat(matrix, dim=0)
term1 = torch.mm(child_hM[1].view(50,-1), child_hV[0].view(-1,1)).view(1,-1)
term2 = torch.mm(child_hM[0].view(50,-1), child_hV[1].view(-1,1)).view(1,-1)
tree.Vstate = torch.tanh(torch.mm(self.W_mv, torch.cat([term1, term2], dim=1).t()).t())
tree.Mstate = torch.mm( self.W_mv_M, torch.cat([child_hM[0], child_hM[1]], dim=1).t())
return tree.Vstate.view(1,-1)
# module for distance-angle similarity
class Similarity(nn.Module):
def __init__(self, mem_dim, hidden_dim, num_classes):
super(Similarity, self).__init__()
self.mem_dim = mem_dim
self.hidden_dim = hidden_dim
self.num_classes = num_classes
self.dpout_fc = 0.1
self.wh = nn.Linear(4 * self.mem_dim, self.hidden_dim)
self.wp = nn.Linear(self.hidden_dim, self.num_classes)
'''
self.classifier = nn.Sequential(
nn.Dropout(p=self.dpout_fc),
nn.Linear(4 * self.mem_dim, self.hidden_dim),
nn.Tanh(),
nn.Dropout(p=self.dpout_fc),
nn.Linear(self.hidden_dim,self.hidden_dim),
nn.Tanh(),
nn.Dropout(p=self.dpout_fc),
nn.Linear(self.hidden_dim, self.num_classes),
)'''
def forward(self, lvec, rvec):
lvec = lvec
rvec = rvec
mult_dist = torch.mul(lvec, rvec)
abs_dist = torch.abs(torch.add(lvec, -rvec))
vec_dist = torch.cat((mult_dist, abs_dist), 1)
out = F.sigmoid(self.wh(vec_dist))
out = F.log_softmax(self.wp(out), dim=1)
#out = self.classifier(vec_dist)
return out
def position_encoding_init(n_position, d_pos_vec):
''' Init the sinusoid position encoding table '''
# keep dim 0 for padding token position encoding zero vector
position_enc = np.array([
[pos / np.power(10000, 2 * (j // 2) / d_pos_vec) for j in range(d_pos_vec)]
if pos != 0 else np.zeros(d_pos_vec) for pos in range(n_position)])
position_enc[1:, 0::2] = np.sin(position_enc[1:, 0::2]) # dim 2i
position_enc[1:, 1::2] = np.cos(position_enc[1:, 1::2]) # dim 2i+1
return torch.from_numpy(position_enc).type(torch.FloatTensor)
# putting the whole model together
class SimilarityTreeLSTM(nn.Module):
def __init__(self, vocab_size, arc_vocab_size, in_dim, mem_dim, hidden_dim, num_classes, sparsity, freeze, opt):
super(SimilarityTreeLSTM, self).__init__()
self.emb = nn.Embedding(vocab_size, in_dim, padding_idx=Constants.PAD, sparse=sparsity)
self.arc_emb = nn.Embedding(arc_vocab_size, in_dim, padding_idx=Constants.PAD, sparse=sparsity)
if freeze:
self.emb.weight.requires_grad = False
self.arc_emb.weight.requires_grad = False
self.childsumtreelstm = ChildSumTreeLSTM(in_dim, mem_dim, opt)
self.similarity = Similarity(mem_dim, hidden_dim, num_classes)
self.n_positions = 100
def forward(self, ltree, linputs, rtree, rinputs, larc, rarc):
linputs = self.emb(linputs)
rinputs = self.emb(rinputs)
linputs_arc = self.arc_emb(larc)
rinputs_arc = self.arc_emb(rarc)
lstate = self.childsumtreelstm(ltree, linputs, linputs_arc, torch.FloatTensor(), 0)
rstate = self.childsumtreelstm(rtree, rinputs, rinputs_arc, torch.FloatTensor(), 0)
lstate1 = self.childsumtreelstm(ltree, linputs, linputs_arc, torch.FloatTensor(), 1)
rstate1 = self.childsumtreelstm(rtree, rinputs, rinputs_arc, torch.FloatTensor(), 1)
output = self.similarity(torch.cat([lstate, lstate1], dim = -1), torch.cat([rstate, rstate1], dim = -1))
#output = self.similarity(lstate, rstate)
return output
| 2.203125 | 2 |
parameters.py | Yale-LILY/QueryReformulator | 3 | 12762735 | <reponame>Yale-LILY/QueryReformulator
import os
from collections import OrderedDict
######################
# General parameters #
######################
data_folder = '.'
n_words = 374000 # words for the vocabulary
vocab_path = data_folder + '/data/D_cbow_pdw_8B.pkl' # Path to the python dictionary containing the vocabulary.
wordemb_path = data_folder + '/data/D_cbow_pdw_8B.pkl' # Path to the python dictionary containing the word embeddings.
dataset_path = data_folder + '/data/jeopardy_dataset.hdf5' # path to load the hdf5 dataset containing queries and ground-truth documents.
docs_path = data_folder + '/data/jeopardy_corpus.hdf5' # Path to load the articles and links.
docs_path_term = data_folder + '/data/jeopardy_corpus.hdf5' # Path to load the articles and links.
############################
# Search Engine Parameters #
############################
engine = 'lucene' # Search engine used to retrieve documents.
n_threads = 20 # number of parallel process that will execute the queries on the search engine.
index_name = 'index' # index name for the search engine. Used when engine is 'lucene'.
index_name_term = 'index_terms' # index name for the search engine. Used when engine is 'lucene'.
index_folder = data_folder + '/data/' + index_name + '/' # folder to store lucene's index. It will be created in case it does not exist.
index_folder_term = data_folder + '/data/' + index_name_term + '/' # folder to store lucene's index. It will be created in case it does not exist.
local_index_folder = './' + index_name
local_index_folder_term = './' + index_name_term
use_cache = False # If True, cache (query-retrieved docs) pairs. Watch for memory usage.
####################
# Model parameters #
####################
optimizer='adam' # valid options are: 'sgd', 'rmsprop', 'adadelta', and 'adam'.
dim_proj=500 # LSTM number of hidden units.
dim_emb=500 # word embedding dimension.
patience=1000 # Number of epochs to wait before early stop if no progress.
max_epochs=5000 # The maximum number of epochs to run.
dispFreq=100 # Display to stdout the training progress every N updates.
lrate=0.0002 # Learning rate for sgd (not used for adadelta and rmsprop).
erate=0.002 # multiplier for the entropy regularization.
l2reg=0.0 # multiplier for the L2 regularization.
saveto='model.npz' # The best model will be saved there.
validFreq=10000 # Compute the validation error after this number of updates.
saveFreq=10000 # Save the parameters after every saveFreq updates.
batch_size_train=64 # The batch size during training.
batch_size_pred=16 # The batch size during training.
#reload_model='model.npz' # Path to a saved model we want to start from.
reload_model=False # Path to a saved model we want to start from.
train_size=10000 # If >0, we keep only this number of train examples when measuring accuracy.
valid_size=10000 # If >0, we keep only this number of valid examples when measuring accuracy.
test_size=10000 # If >0, we keep only this number of test examples when measuring accuracy.
fixed_wemb = True # set to true if you don't want to learn the word embedding weights.
dropout = -1 # If >0, <dropout> fraction of the units in the fully connected layers will be set to zero at training time.
window_query = [3,3] # Window size for the CNN used on the query.
filters_query = [250,250] # Number of filters for the CNN used on the query.
window_cand = [9,3] # Window size for the CNN used on the candidate words.
filters_cand = [250,250] # Number of filters for the CNN used on the candidate words.
n_hidden_actor = [250] # number of hidden units per scoring layer on the actor.
n_hidden_critic = [250] # number of hidden units per scoring layer on the critic.
max_words_input = 200 # Maximum number of words from the input text.
max_terms_per_doc = 200 # Maximum number of candidate terms from each feedback doc. Must be always less than max_words_input .
max_candidates = 40 # maximum number of candidate documents that will be returned by the search engine.
max_feedback_docs = 7 # maximum number of feedback documents whose words be used to reformulate the query.
max_feedback_docs_train = 1 # maximum number of feedback documents whose words be used to reformulate the query. Only used during training.
n_iterations = 2 # number of query reformulation iterations.
frozen_until = 1 # don't learn and act greedly until this iteration (inclusive). If frozen_until <= 0, learn everything.
reward = 'RECALL' # metric that will be optimized. Valid values are 'RECALL', 'F1', 'MAP', and 'gMAP'.
metrics_map = OrderedDict([('RECALL',0), ('PRECISION',1), ('F1',2), ('MAP',3), ('LOG-GMAP',4)])
q_0_fixed_until = 2 # Original query will be fixed until this iteration (inclusive). If <=0, original query can be modified in all iterations.
| 2.34375 | 2 |
src/utils/data_mgmt.py | mohantyaditya/NLPUseCase | 0 | 12762736 |
import logging
from tqdm import tqdm
import random
import re
import xml.etree.ElementTree as ET
def process_posts(fd_in,fd_out_train,fd_out_test,target_tag,split):
line_num = 1
for line in tqdm(fd_in):
try:
fd_out = fd_out_train if random.random() > split else fd_out_test
attr = ET.fromstring(line).attrib
pid = attr.get("Id","")
label = 1 if target_tag in attr.get("Tags","") else 0
title = re.sub(r"\s+"," ",attr.get("Ttile","")).strip()
body = re.sub(r"\s+"," ",attr.get("Body","")).strip()
text = title+ " "+ body
fd_out.write(f"{pid}\t {label}\t{text}\n")
line_num+=1
except Exception as e:
msg = f"skipping the broken line{line_num}: {e}\n"
logging.exception(e)
| 2.5 | 2 |
data/data_scapers/asset_pricing_factors.py | AlainDaccache98/Quantropy | 1 | 12762737 | <reponame>AlainDaccache98/Quantropy
import os
import re
import urllib.request
import zipfile
from datetime import timedelta
import pandas as pd
import config
from data.data_preparation_helpers import save_into_csv
def resample_daily_df(daily_df, path):
for freq in ['Weekly', 'Monthly', 'Quarterly', 'Yearly']:
df = daily_df.resample(freq[0]).apply(lambda x: ((x + 1).cumprod() - 1).last("D"))
df.index = df.index + timedelta(days=1) - timedelta(seconds=1) # reindex to EOD
save_into_csv(filename=path, df=df, sheet_name=freq)
def scrape_AQR_factors():
url = 'https://images.aqr.com/-/media/AQR/Documents/Insights/Data-Sets/Quality-Minus-Junk-Factors-Daily.xlsx'
path = os.path.join(config.FACTORS_DIR_PATH, "AQR Factors Data.xlsx") # save it as this name
urllib.request.urlretrieve(url, path)
daily_df = pd.DataFrame()
for sheet_name in ['QMJ Factors', 'MKT', 'SMB', 'HML Devil', 'UMD', 'RF']:
temp = pd.read_excel(io=pd.ExcelFile(path), sheet_name=sheet_name, skiprows=18, index_col=0)
temp.index = pd.to_datetime(temp.index)
usa_series = pd.Series(temp['USA'] if sheet_name != 'RF' else temp['Risk Free Rate'], name=sheet_name)
daily_df = daily_df.join(usa_series, how='outer') if not daily_df.empty else pd.DataFrame(usa_series)
daily_df.index = daily_df.index + timedelta(days=1) - timedelta(seconds=1) # reindex to EOD
daily_df.rename(columns={'MKT': 'MKT-RF', 'QMJ Factors': 'QMJ', 'HML Devil': 'HML'}, inplace=True)
os.remove(path)
daily_df.to_excel(path, sheet_name='Daily')
resample_daily_df(daily_df=daily_df, path=path)
daily_df.to_pickle(os.path.join(config.FACTORS_DIR_PATH, 'pickle', "AQR Factors Data.pkl"))
def scrape_Fama_French_factors():
factors_urls = [
('Fama-French 3 Factors Data',
'http://mba.tuck.dartmouth.edu/pages/faculty/ken.french/ftp/F-F_Research_Data_Factors_daily_CSV.zip'),
('Carhart 4 Factors Data',
'https://mba.tuck.dartmouth.edu/pages/faculty/ken.french/ftp/F-F_Momentum_Factor_daily_CSV.zip'),
('Fama-French 5 Factors Data',
'https://mba.tuck.dartmouth.edu/pages/faculty/ken.french/ftp/F-F_Research_Data_5_Factors_2x3_daily_CSV.zip')]
for idx, url in enumerate(factors_urls):
urllib.request.urlretrieve(url[1], 'fama_french.zip')
zip_file = zipfile.ZipFile('fama_french.zip', 'r')
zip_file.extractall()
zip_file.close()
file_name = next(file for file in os.listdir('.') if re.search('F-F', file))
skiprows = 4 if idx == 0 else 13 if idx == 1 else 3 if idx == 2 else Exception
ff_factors = pd.read_csv(file_name, skiprows=skiprows, index_col=0)
ff_factors.dropna(how='all', inplace=True)
ff_factors.index = pd.to_datetime(ff_factors.index, format='%Y%m%d')
ff_factors = ff_factors.apply(lambda x: x / 100) # original is in percent
ff_factors.rename(columns={'Mkt-RF': 'MKT-RF'}, inplace=True)
ff_factors.index = ff_factors.index + timedelta(days=1) - timedelta(seconds=1) # reindex to EOD
if idx == 1: # carhart
ff_factors.rename(columns={'Mom ': 'UMD'}, inplace=True)
three_factors = pd.read_pickle(
os.path.join(config.FACTORS_DIR_PATH, 'pickle', '{}.pkl'.format(factors_urls[0][0])))
ff_factors = three_factors.join(ff_factors, how='inner')
os.remove(file_name)
os.remove('fama_french.zip')
excel_path = os.path.join(config.FACTORS_DIR_PATH, '{}.xlsx'.format(url[0]))
ff_factors.to_excel(excel_path, sheet_name='Daily')
resample_daily_df(daily_df=ff_factors, path=excel_path)
pickle_path = os.path.join(config.FACTORS_DIR_PATH, 'pickle', '{}.pkl'.format(url[0]))
ff_factors.to_pickle(pickle_path)
def scrape_Q_factors():
pass
if __name__ == '__main__':
# scrape_AQR_factors()
scrape_Fama_French_factors()
| 2.828125 | 3 |
calc/contour.py | pshchelo/vampy | 1 | 12762738 | #!/usr/bin/env python
"""
"""
import numpy as np
from scipy.odr import Model
from scipy.optimize import leastsq
from scipy import ndimage
from scipy.ndimage import gaussian_gradient_magnitude
from scipy.ndimage import map_coordinates
from common import PIX_ERR
from features import line_profile
def contour(img, A0, R0, phi1=-np.pi/2, phi2=np.pi/2, dphi=np.pi/180, DR=0.2,
sigma=3):
#this is just a rough draft not intended to be working
y0, x0 = A0
phi = np.arange(phi1, phi2, dphi)
x1 = x0+R0*(1-DR)*np.cos(phi)
y1 = y0+R0*(1-DR)*np.sin(phi)
x2 = x0+R0*(1+DR)*np.cos(phi)
y2 = y0+R0*(1+DR)*np.sin(phi)
rim=[]
Nphi, = phi.shape
for i in range(Nphi):
A1 = np.asarray(((y1[i],x1[i]),(PIX_ERR, PIX_ERR)))
A2 = np.asarray(((y2[i],x2[i]),(PIX_ERR, PIX_ERR)))
metrics, metrics_err, profile = line_profile(img, A1[i], A2[i])
rel_rim = find_rim(profile, sigma)*metrics
real_rim = A1 + rel_rim
rim.append(real_rim)
return rim
def find_rim(profile, sigma=3):
grad = ndimage.gaussian_gradient_magnitude(
ndimage.gaussian_filter1d(profile,sigma) , sigma)
return np.argmax(grad)
def line_from_points(point1, point2):
"""
@param point1: array in numpy order = (y,x)
@param point2:
"""
k = (point2 - point1)[0] / (point2 - point1)[1]
b = point1[0] - k * point1[1]
return k, b
def line_perpendicular(k,b,x):
"""
@param k: y=kx+b
@param b: y=kx+b
@param x: where the perpendicular has to intersect the line
"""
# y = k*x+b
k_perp = -1./k
b_perp = (k - k_perp) * x + b
return k_perp, b_perp
def circle_fcn(B, x, y):
return B[0]**2 - (B[1]-x)**2 - (B[2]-y)**2
def _circle_fjacb(B,x,y):
fjacb = np.empty((x.shape[0],3))
fjacb[:,0] = 2*B[0]
fjacb[:,1] = -2*(B[1]-x)
fjacb[:,2] = -2*(B[2]-y)
return fjacb
def _circle_fjacd(B,x,y):
fjacd = np.empty((x.shape[0],2))
fjacd[:,0] = 2*(B[1]-x)
fjacd[:,1] = 2*(B[1]-y)
return fjacd
def _circle_est(x,y):
return np.mean((x.ptp(), y.ptp()))/2.0, x.mean(), y.mean()
def _circle_meta():
return {'name':'Equation of a circle'}
circle_model = Model(circle_fcn, estimate=_circle_est,
fjacb=_circle_fjacb, fjacd=_circle_fjacd,
meta=_circle_meta, implicit=True)
def FitCircle(x,y):
'''
leastsq without errors
'''
return leastsq(circle_fcn, _circle_est(x,y), (x, y), Dfun=_circle_fjacb, full_output=1)
def section_profile(img, point1, point2):
'''define the brightness profile along the line defined by 2 points
coordinates of points with their errors are supplied as numpy arrays
in notation array((y,x),(dy,dx))!
might as well submit other options to map_coordinates function
it is assumed that pipette is more or less horizontal
so that axis intersects left and right image sides
'''
# define the line going though 2 points
y1,x1,dy1,dx1 = point1.flatten()
y2,x2,dy2,dx2 = point2.flatten()
k = (y2 - y1) / (x2 - x1)
dk = np.sqrt(dy1*dy1 + dy2*dy2 + k*k*(dx1*dx1+dx2*dx2) )/np.fabs(x2-x1)
# number of points for profile
# it is assumed that pipette is more or less horizontal
# so that axis intersects left and right image sides
nPoints = int(max(np.fabs(y2-y1), np.fabs(x2-x1)))
#coordinates of points in the profile
x = np.linspace(x1, x2, nPoints)
y = np.linspace(y1, y2, nPoints)
#calculate profile metric - coefficient for lengths in profile vs pixels
if np.fabs(k) <=1:
metric = np.sqrt(1 + k*k)
metric_err = np.fabs(k)*dk/metric
else:
metric = np.sqrt(1 + 1/(k*k))
metric_err = dk/np.fabs(metric * k*k*k)
#output interpolated values at points of profile and profile metric
return metric, metric_err, map_coordinates(img, [y, x], output = float)
def CircleFunc(r, N=100):
phi = np.linspace(0,2*np.pi,N)
return r*np.cos(phi), r*np.sin(phi)
def VesicleEdge_phc(img, x0, y0, r0, N=100, phi1=0, phi2=2*np.pi, sigma=1):
Xedge = np.empty(N)
Yedge = np.empty(N)
for i, phi in enumerate(np.linspace(phi1, phi2, N)):
x = x0+r0*np.cos(phi)
y = y0+r0*np.sin(phi)
if x < 0:
x = 0
y = y0+(x-x0)*np.tan(phi)
elif x > img.shape[1]-1:
x = img.shape[1]-1
y = y0+(x-x0)*np.tan(phi)
if y < 0:
y = 0
x = x0+(y-y0)/np.tan(phi)
elif y > img.shape[0]-1:
y = img.shape[1]-1
x = x0+(y-y0)/np.tan(phi)
point1 = np.asarray(((y0,x0),(PIX_ERR, PIX_ERR)))
point2 = np.asarray(((y,x),(PIX_ERR, PIX_ERR)))
metric, metric_err, line = section_profile(img, point1, point2)
grad = gaussian_gradient_magnitude(line,sigma)
pos = np.argmax(grad)
Xedge[i] = x0+pos*np.cos(phi)*metric
Yedge[i] = y0+pos*np.sin(phi)*metric
return Xedge, Yedge
| 2.171875 | 2 |
apps/inventory/__init__.py | lsdlab/djshop_toturial | 0 | 12762739 | <reponame>lsdlab/djshop_toturial
from django.apps import AppConfig
class InventoryConfig(AppConfig):
name = 'apps.inventory'
verbose_name = "Inventory"
def ready(self):
import apps.inventory.signals
default_app_config = 'apps.inventory.InventoryConfig'
| 1.703125 | 2 |
biblio/stats-and-tops.py | lokal-profil/isfdb_site | 0 | 12762740 | #!_PYTHONLOC
#
# (C) COPYRIGHT 2013-2022 Ahasuerus
# ALL RIGHTS RESERVED
#
# The copyright notice above does not evidence any actual or
# intended publication of such source code.
#
# Version: $Revision$
# Date: $Date$
from isfdb import *
from common import PrintHeader, PrintNavbar, PrintTrailer
from library import ISFDBLink
def print_line(script, query_string, display_name):
print '<li>%s' % ISFDBLink(script, query_string, display_name)
PrintHeader('ISFDB Statistics and Top Lists')
PrintNavbar('stats', 0, 0, 'stats-and-tops.cgi', 0)
print '<h4>Database Tables</h4>'
print '<ul>'
print_line('languages.cgi', '', 'Supported Languages')
print_line('verification_sources.cgi', '', 'Secondary Verification Sources')
print '</ul>'
print '<hr>'
print 'The following lists are regenerated weekly'
print '<h4>Database Statistics</h4>'
print '<ul>'
print_line('stats.cgi', '4', 'Summary Database Statistics')
print_line('stats.cgi', '11', 'Submissions per Year')
print '</ul>'
print '<h4>Author Statistics</h4>'
print '<ul>'
print_line('authors_by_debut_year_table.cgi', '', 'Authors By Debut Year')
print_line('stats.cgi', '13', 'Most-Viewed Authors')
print '<li>Authors by Age:'
print '<ul>'
print_line('stats.cgi', '16', 'Oldest Living Authors')
print_line('stats.cgi', '17', 'Oldest Non-Living Authors')
print_line('stats.cgi', '18', 'Youngest Living Authors')
print_line('stats.cgi', '19', 'Youngest Non-Living Authors')
print '</ul>'
print '<li>Authors/Editors Ranked by Awards and Nominations:'
print '<ul>'
print_line('popular_authors_table.cgi', '0', 'All Authors and Editors')
print_line('popular_authors_table.cgi', '1', 'Novel Authors')
print_line('popular_authors_table.cgi', '2', 'Short Fiction Authors')
print_line('popular_authors_table.cgi', '3', 'Collection Authors')
print_line('popular_authors_table.cgi', '4', 'Anthology Editors')
print_line('popular_authors_table.cgi', '5', 'Non-Fiction Authors')
print_line('popular_authors_table.cgi', '6', 'Other Types Authors and Editors')
print '</ul>'
print '</ul>'
print '<h4>Language Statistics</h4>'
print '<ul>'
print_line('stats.cgi', '20', 'Authors by Working Language')
print_line('stats.cgi', '21', 'Titles by Language')
print '</ul>'
print '<h4>Title Statistics</h4>'
print '<ul>'
print_line('stats.cgi', '5', 'Titles by Year of First Publication')
print_line('stats.cgi', '7', 'Titles by Author Age')
print_line('stats.cgi', '8', 'Percent of Titles in Series by Year')
print_line('most_reviewed_table.cgi', '', 'Most-Reviewed Titles (in genre publications)')
print '<li>Titles Ranked by Awards and Nominations:'
print '<ul>'
print_line('most_popular_table.cgi', '0', 'All Titles')
print_line('most_popular_table.cgi', '1', 'Novels')
print_line('most_popular_table.cgi', '2', 'Short Fiction')
print_line('most_popular_table.cgi', '3', 'Collections')
print_line('most_popular_table.cgi', '4', 'Anthologies')
print_line('most_popular_table.cgi', '5', 'Non-Fiction')
print_line('most_popular_table.cgi', '6', 'Other Types')
print '</ul>'
print_line('stats.cgi', '12', 'Top Novels as Voted by ISFDB Users')
print_line('stats.cgi', '25', 'Top Short Fiction Titles as Voted by ISFDB Users')
print '<li>Most-Viewed Titles:'
print '<ul>'
print_line('stats.cgi', '14', 'Most-Viewed Novels')
print_line('stats.cgi', '15', 'Most-Viewed Short Fiction')
print '</ul>'
print '</ul>'
print '<h4>Publication Statistics</h4>'
print '<ul>'
print_line('stats.cgi', '6', 'Publications by Year')
print_line('stats.cgi', '9', 'Percent of Books by Type by Year')
print_line('stats.cgi', '10', 'Percent of Publications by Format by Year')
print '</ul>'
print '<h4>Top ISFDB Editors</h4>'
print '<ul>'
print_line('stats.cgi', '2', 'Top Verifiers')
print_line('stats.cgi', '1', 'Top Moderators')
print_line('stats.cgi', '22', 'Top Taggers')
print_line('stats.cgi', '23', 'Top Voters')
print_line('topcontrib.cgi', '', 'Top Contributors (All Submission Types)')
print '<ul>'
for sub_type in sorted(SUBMAP.keys()):
if SUBMAP[sub_type][3]:
print_line('topcontrib.cgi', sub_type, 'Top Contributors (%s)' % SUBMAP[sub_type][3])
print '</ul>'
print '</ul>'
print '<h4>Historical snapshots (not up to date)</h4>'
print '<ul>'
print '<li><a href="%s://%s/degrees.html">Author Communities</a> [as of 2005]' % (PROTOCOL, HTMLHOST)
print '<li><a href="%s://%s/agestuff.html">Award-Winning Titles by Author Age</a> [as of 2005]' % (PROTOCOL, HTMLHOST)
print '<li><a href="%s://%s/index.php/Annual_Page_Views_and_Database_Growth">Database Growth and Annual Page Views</a>' % (PROTOCOL, WIKILOC)
print '</ul>'
PrintTrailer('frontpage', 0, 0)
| 1.84375 | 2 |
frb/tests/test_frbigm.py | KshitijAggarwal/FRB | 0 | 12762741 | <filename>frb/tests/test_frbigm.py
# Module to run tests on FRB calculations using DLAs
from __future__ import print_function, absolute_import, division, unicode_literals
# TEST_UNICODE_LITERALS
import numpy as np
import pytest
from astropy import units as u
from frb import igm
def test_rhoMstar():
rho_Mstar_full = igm.avg_rhoMstar(1., remnants=True)
# Test
assert rho_Mstar_full.unit == u.Msun/u.Mpc**3
assert np.isclose(rho_Mstar_full.value, 4.65882439e+08)
def test_rhoISM():
rhoISM = igm.avg_rhoISM(0.)
# Test
assert rhoISM.unit == u.Msun/u.Mpc**3
assert np.isclose(rhoISM.value, 2.19389268e+08)
def test_igmDM():
DM = igm.average_DM(1.)
# Value and unit
assert DM.unit == u.pc/u.cm**3
assert np.isclose(DM.value, 941.13451342, rtol=0.001)
# Cumulative
DM_cum, _ = igm.average_DM(1., cumul=True)
assert DM == DM_cum[-1]
# Cross through HeII reionization
DM4 = igm.average_DM(4.)
assert np.isclose(DM4.value, 3551.37492765, rtol=0.001)
def test_z_from_DM():
# Note this removes 100 DM units of 'nuisance'
z = igm.z_from_DM(1000.*u.pc/u.cm**3)
# Test
assert np.isclose(z, 0.95739493, rtol=0.001)
| 2.1875 | 2 |
jVMC/nets/lstm.py | JunaidAkhter/vmc_jax | 0 | 12762742 | <reponame>JunaidAkhter/vmc_jax
import jax
from jax.config import config
config.update("jax_enable_x64", True)
import flax
import flax.linen as nn
import numpy as np
import jax.numpy as jnp
import jVMC.global_defs as global_defs
from jVMC.util.symmetries import LatticeSymmetry
from functools import partial
class LSTMCell(nn.Module):
"""
Implementation of a LSTM-cell, that is scanned over an input sequence.
The LSTMCell therefore receives two inputs, the hidden state (if it is in a deep part of the CellStack) or the
input (if it is the first cell of the CellStack) aswell as the hidden state of the previous RNN-cell.
Both inputs are mapped to obtain a new hidden state, which is what the RNNCell implements.
Arguments:
* ``inputDim``: size of the input Dimension
* ``actFun``: non-linear activation function
Returns:
new hidden state
"""
inputDim: int = 2
actFun: callable = nn.elu
@nn.compact
def __call__(self, carry, x):
newCarry, out = nn.LSTMCell(kernel_init=partial(flax.nn.linear.default_kernel_init, dtype=global_defs.tReal),
recurrent_kernel_init=partial(flax.nn.initializers.orthogonal(), dtype=global_defs.tReal),
bias_init=partial(flax.nn.initializers.zeros, dtype=global_defs.tReal))(carry, x)
out = self.actFun(nn.Dense(features=self.inputDim)(out))
return newCarry, out.reshape((-1))
# ** end class LSTMCell
class LSTM(nn.Module):
"""
Implementation of an LSTM which consists of an LSTMCell with an additional output layer.
This class defines how sequential input data is treated.
Arguments:
* ``L``: length of the spin chain
* ``hiddenSize``: size of the hidden state vector
* ``inputDim``: dimension of the input
* ``actFun``: non-linear activation function
* ``logProbFactor``: factor defining how output and associated sample probability are related. 0.5 for pure states and 1 for POVMs.
Returns:
logarithmic wave-function coefficient or POVM-probability
"""
L: int = 10
hiddenSize: int = 10
inputDim: int = 2
actFun: callable = nn.elu
logProbFactor: float = 0.5
def setup(self):
self.lstmCell = LSTMCell(inputDim=self.inputDim, actFun=self.actFun)
def __call__(self, x):
state = nn.LSTMCell.initialize_carry(jax.random.PRNGKey(0), (1,), self.hiddenSize)
_, probs = self.lstm_cell((state, jnp.zeros(self.inputDim)), jax.nn.one_hot(x, self.inputDim))
return self.logProbFactor * jnp.sum(probs, axis=0)
@partial(nn.transforms.scan,
variable_broadcast='params',
split_rngs={'params': False})
def lstm_cell(self, carry, x):
newCarry, out = self.lstmCell(carry[0], carry[1])
prob = nn.softmax(out)
prob = jnp.log(jnp.sum(prob * x, axis=-1))
return (newCarry, x), prob
def sample(self, batchSize, key):
"""sampler
"""
outputs = jnp.asarray(np.zeros((batchSize, self.L, self.L)))
state = nn.LSTMCell.initialize_carry(jax.random.PRNGKey(0), (batchSize,), self.hiddenSize)
keys = jax.random.split(key, self.L)
_, res = self.lstm_cell_sample((state, jnp.zeros((batchSize, self.inputDim))), keys)
return jnp.transpose(res[1])
@partial(nn.transforms.scan,
variable_broadcast='params',
split_rngs={'params': False})
def lstm_cell_sample(self, carry, x):
newCarry, logits = jax.vmap(self.lstmCell)(carry[0], carry[1])
sampleOut = jax.random.categorical(x, logits)
sample = jax.nn.one_hot(sampleOut, self.inputDim)
logProb = jnp.log(jnp.sum(nn.softmax(logits) * sample, axis=1))
return (newCarry, sample), (logProb, sampleOut)
# ** end class LSTM
class LSTMsym(nn.Module):
"""
Implementation of an LSTM which consists of an LSTMCellStack with an additional output layer.
It uses the LSTM class to compute probabilities and averages the outputs over all symmetry-invariant configurations.
Arguments:
* ``orbit``: collection of maps that define symmetries (instance of ``util.symmetries.LatticeSymmetry``)
* ``L``: length of the spin chain
* ``hiddenSize``: size of the hidden state vector
* ``inputDim``: dimension of the input
* ``actFun``: non-linear activation function
Returns:
Symmetry-averaged logarithmic wave-function coefficient or POVM-probability
"""
orbit: LatticeSymmetry
L: int = 10
hiddenSize: int = 10
inputDim: int = 2
actFun: callable = nn.elu
logProbFactor: float = 0.5
def setup(self):
self.lstm = LSTM.shared(L=L, hiddenSize=hiddenSize, inputDim=inputDim, actFun=actFun)
def __call__(self, x, L=10, hiddenSize=10, inputDim=2, actFun=nn.elu, logProbFactor=0.5, orbit=None):
x = jax.vmap(lambda o, s: jnp.dot(o, s), in_axes=(0, None))(self.orbit.orbit, x)
def evaluate(x):
return self.lstm(x)
logProbs = logProbFactor * jnp.log(jnp.mean(jnp.exp((1. / logProbFactor) * jax.vmap(evaluate)(x)), axis=0))
return logProbs
def sample(self, batchSize, key):
key1, key2 = jax.random.split(key)
configs = self.lstm.sample(batchSize, key1)
orbitIdx = jax.random.choice(key2, orbit.orbit.shape[0], shape=(batchSize,))
configs = jax.vmap(lambda k, o, s: jnp.dot(o[k], s), in_axes=(0, None, 0))(orbitIdx, self.orbit.orbit, configs)
return configs
# ** end class LSTMsym
| 2.515625 | 3 |
varsom_flood_client/models/formatted_content_result_list_alert.py | NVE/python-varsom-flood-client | 0 | 12762743 | <filename>varsom_flood_client/models/formatted_content_result_list_alert.py
# coding: utf-8
"""
Flomvarsel API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1.0.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class FormattedContentResultListAlert(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'status_code': 'int',
'content': 'list[Alert]',
'formatter': 'MediaTypeFormatter',
'media_type': 'MediaTypeHeaderValue',
'request': 'object'
}
attribute_map = {
'status_code': 'StatusCode',
'content': 'Content',
'formatter': 'Formatter',
'media_type': 'MediaType',
'request': 'Request'
}
def __init__(self, status_code=None, content=None, formatter=None, media_type=None, request=None): # noqa: E501
"""FormattedContentResultListAlert - a model defined in Swagger""" # noqa: E501
self._status_code = None
self._content = None
self._formatter = None
self._media_type = None
self._request = None
self.discriminator = None
if status_code is not None:
self.status_code = status_code
if content is not None:
self.content = content
if formatter is not None:
self.formatter = formatter
if media_type is not None:
self.media_type = media_type
if request is not None:
self.request = request
@property
def status_code(self):
"""Gets the status_code of this FormattedContentResultListAlert. # noqa: E501
:return: The status_code of this FormattedContentResultListAlert. # noqa: E501
:rtype: int
"""
return self._status_code
@status_code.setter
def status_code(self, status_code):
"""Sets the status_code of this FormattedContentResultListAlert.
:param status_code: The status_code of this FormattedContentResultListAlert. # noqa: E501
:type: int
"""
allowed_values = [100, 101, 200, 201, 202, 203, 204, 205, 206, 300, 301, 302, 303, 304, 305, 306, 307, 400, 401, 402, 403, 404, 405, 406, 407, 408, 409, 410, 411, 412, 413, 414, 415, 416, 417, 426, 500, 501, 502, 503, 504, 505] # noqa: E501
if status_code not in allowed_values:
raise ValueError(
"Invalid value for `status_code` ({0}), must be one of {1}" # noqa: E501
.format(status_code, allowed_values)
)
self._status_code = status_code
@property
def content(self):
"""Gets the content of this FormattedContentResultListAlert. # noqa: E501
:return: The content of this FormattedContentResultListAlert. # noqa: E501
:rtype: list[Alert]
"""
return self._content
@content.setter
def content(self, content):
"""Sets the content of this FormattedContentResultListAlert.
:param content: The content of this FormattedContentResultListAlert. # noqa: E501
:type: list[Alert]
"""
self._content = content
@property
def formatter(self):
"""Gets the formatter of this FormattedContentResultListAlert. # noqa: E501
:return: The formatter of this FormattedContentResultListAlert. # noqa: E501
:rtype: MediaTypeFormatter
"""
return self._formatter
@formatter.setter
def formatter(self, formatter):
"""Sets the formatter of this FormattedContentResultListAlert.
:param formatter: The formatter of this FormattedContentResultListAlert. # noqa: E501
:type: MediaTypeFormatter
"""
self._formatter = formatter
@property
def media_type(self):
"""Gets the media_type of this FormattedContentResultListAlert. # noqa: E501
:return: The media_type of this FormattedContentResultListAlert. # noqa: E501
:rtype: MediaTypeHeaderValue
"""
return self._media_type
@media_type.setter
def media_type(self, media_type):
"""Sets the media_type of this FormattedContentResultListAlert.
:param media_type: The media_type of this FormattedContentResultListAlert. # noqa: E501
:type: MediaTypeHeaderValue
"""
self._media_type = media_type
@property
def request(self):
"""Gets the request of this FormattedContentResultListAlert. # noqa: E501
:return: The request of this FormattedContentResultListAlert. # noqa: E501
:rtype: object
"""
return self._request
@request.setter
def request(self, request):
"""Sets the request of this FormattedContentResultListAlert.
:param request: The request of this FormattedContentResultListAlert. # noqa: E501
:type: object
"""
self._request = request
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(FormattedContentResultListAlert, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FormattedContentResultListAlert):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 2.15625 | 2 |
Adafruit_BluefruitLE/bluez_dbus/device.py | acoomans/Adafruit_Python_BluefruitLE | 415 | 12762744 | <reponame>acoomans/Adafruit_Python_BluefruitLE
# Python object to represent the bluez DBus device object. Provides properties
# and functions to easily interact with the DBus object.
# Author: <NAME>
#
# Copyright (c) 2015 Adafruit Industries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from past.builtins import map
import threading
import time
import uuid
import dbus
from ..config import TIMEOUT_SEC
from ..interfaces import Device
from ..platform import get_provider
from .adapter import _INTERFACE as _ADAPTER_INTERFACE
from .gatt import BluezGattService, BluezGattCharacteristic, _SERVICE_INTERFACE, _CHARACTERISTIC_INTERFACE
_INTERFACE = 'org.bluez.Device1'
class BluezDevice(Device):
"""Bluez BLE device."""
def __init__(self, dbus_obj):
"""Create an instance of the bluetooth device from the provided bluez
DBus object.
"""
self._device = dbus.Interface(dbus_obj, _INTERFACE)
self._props = dbus.Interface(dbus_obj, 'org.freedesktop.DBus.Properties')
self._connected = threading.Event()
self._disconnected = threading.Event()
self._props.connect_to_signal('PropertiesChanged', self._prop_changed)
def _prop_changed(self, iface, changed_props, invalidated_props):
# Handle property changes for the device. Note this call happens in
# a separate thread so be careful to make thread safe changes to state!
# Skip any change events not for this adapter interface.
if iface != _INTERFACE:
return
# If connected then fire the connected event.
if 'Connected' in changed_props and changed_props['Connected'] == 1:
self._connected.set()
# If disconnected then fire the disconnected event.
if 'Connected' in changed_props and changed_props['Connected'] == 0:
self._disconnected.set()
def connect(self, timeout_sec=TIMEOUT_SEC):
"""Connect to the device. If not connected within the specified timeout
then an exception is thrown.
"""
self._connected.clear()
self._device.Connect()
if not self._connected.wait(timeout_sec):
raise RuntimeError('Exceeded timeout waiting to connect to device!')
def disconnect(self, timeout_sec=TIMEOUT_SEC):
"""Disconnect from the device. If not disconnected within the specified
timeout then an exception is thrown.
"""
self._disconnected.clear()
self._device.Disconnect()
if not self._disconnected.wait(timeout_sec):
raise RuntimeError('Exceeded timeout waiting to disconnect from device!')
def list_services(self):
"""Return a list of GattService objects that have been discovered for
this device.
"""
return map(BluezGattService,
get_provider()._get_objects(_SERVICE_INTERFACE,
self._device.object_path))
def discover(self, service_uuids, char_uuids, timeout_sec=TIMEOUT_SEC):
"""Wait up to timeout_sec for the specified services and characteristics
to be discovered on the device. If the timeout is exceeded without
discovering the services and characteristics then an exception is thrown.
"""
# Turn expected values into a counter of each UUID for fast comparison.
expected_services = set(service_uuids)
expected_chars = set(char_uuids)
# Loop trying to find the expected services for the device.
start = time.time()
while True:
# Find actual services discovered for the device.
actual_services = set(self.advertised)
# Find actual characteristics discovered for the device.
chars = map(BluezGattCharacteristic,
get_provider()._get_objects(_CHARACTERISTIC_INTERFACE,
self._device.object_path))
actual_chars = set(map(lambda x: x.uuid, chars))
# Compare actual discovered UUIDs with expected and return true if at
# least the expected UUIDs are available.
if actual_services >= expected_services and actual_chars >= expected_chars:
# Found at least the expected services!
return True
# Couldn't find the devices so check if timeout has expired and try again.
if time.time()-start >= timeout_sec:
return False
time.sleep(1)
@property
def advertised(self):
"""Return a list of UUIDs for services that are advertised by this
device.
"""
uuids = []
# Get UUIDs property but wrap it in a try/except to catch if the property
# doesn't exist as it is optional.
try:
uuids = self._props.Get(_INTERFACE, 'UUIDs')
except dbus.exceptions.DBusException as ex:
# Ignore error if device has no UUIDs property (i.e. might not be
# a BLE device).
if ex.get_dbus_name() != 'org.freedesktop.DBus.Error.InvalidArgs':
raise ex
return [uuid.UUID(str(x)) for x in uuids]
@property
def id(self):
"""Return a unique identifier for this device. On supported platforms
this will be the MAC address of the device, however on unsupported
platforms (Mac OSX) it will be a unique ID like a UUID.
"""
return self._props.Get(_INTERFACE, 'Address')
@property
def name(self):
"""Return the name of this device."""
return self._props.Get(_INTERFACE, 'Name')
@property
def is_connected(self):
"""Return True if the device is connected to the system, otherwise False.
"""
return self._props.Get(_INTERFACE, 'Connected')
@property
def rssi(self):
"""Return the RSSI signal strength in decibels."""
return self._props.Get(_INTERFACE, 'RSSI')
@property
def _adapter(self):
"""Return the DBus path to the adapter that owns this device."""
return self._props.Get(_INTERFACE, 'Adapter')
| 2.21875 | 2 |
python/tests/unit/test_ledger_create_user.py | DACH-NY/dazl-client | 0 | 12762745 | # Copyright (c) 2017-2022 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
from dazl import connect
from dazl.ledger import ActAs, Admin, ReadAs, User
import pytest
@pytest.mark.asyncio
async def test_ledger_create_user(sandbox_v2) -> None:
async with connect(url=sandbox_v2, admin=True) as conn:
party_info = await conn.allocate_party()
await conn.create_user(User("testuser1", party_info.party))
@pytest.mark.asyncio
async def test_ledger_create_user_with_rights(sandbox_v2) -> None:
async with connect(url=sandbox_v2, admin=True) as conn:
party_info = await conn.allocate_party()
await conn.create_user(
User("testuser2", party_info.party),
[ActAs(party_info.party), ReadAs(party_info.party), Admin],
)
| 1.875 | 2 |
shazi.py | wholesomegarden/WhatsappReminder | 1 | 12762746 | #shazi.py
from ShazamAPI import Shazam
from threading import Thread
import traceback
from pydub import AudioSegment
import time
# class Shazi(object):
'''
None-blocking function to get title, artist, and other shazam data from a file
'''
def shazam(mp3path, outDict = None, checkFull = False):
if outDict is None:
outDict = {"out":None}
sT = Thread(target=shazamAsync,args=[[mp3path, outDict, checkFull]])
sT.start()
return outDict
def shazamAsync(data, round = 0):
print('''%%%%%%%%%%% SHAZAMMING %%%%%%%%%%%''')
print('''%%%%%%%%%%% SHAZAMMING %%%%%%%%%%%''')
print('''%%%%%%%%%%% SHAZAMMING %%%%%%%%%%%''')
t = time.time()
try:
mp3path, outDict, checkFull = data
if checkFull:
mp3_file_content_to_recognize = open(mp3path, 'rb').read()
else:
audio = AudioSegment.from_mp3(mp3path)
mp3_file_content_to_recognize = audio.export(format="mp3").read()
start = 0
seconds = 1.2
length = len(audio)
if length > 0:
if length > seconds:
seconds = seconds
else:
seconds = length/1000
mp3_file_content_to_recognize = mp3_file_content_to_recognize[start*60*1000:int((start+seconds)*60*1000)]
# shazam = Shazam(mp3_file_content_to_recognize)
outDict["out"] = next(Shazam(mp3_file_content_to_recognize).recognizeSong())
# recognize_generator = shazam.recognizeSong()
# outDict["out"] = next(recognize_generator)
if outDict is not None:
firstRes = None
try:
print(firstRes)
firstRes = outDict["out"][1]["track"]
except:
print("EEEEE SHAZAM COULD NOT FIND SONG")
traceback.print_exc()
if firstRes is not None and "title" in firstRes and "subtitle" in firstRes:
outDict["title"] = firstRes["title"]
outDict["artist"] = firstRes["subtitle"]
print(outDict["title"] + " - " + outDict["artist"])
print('''%%%%%%%%%%% DONE! %%%%%%%%%%%''', "time",time.time()-t)
# while True:
# print(next(recognize_generator)) # current offset & shazam response to recognize requests1
except:
traceback.print_exc()
| 3.078125 | 3 |
simulation.py | fugufisch/wholecell | 0 | 12762747 | <reponame>fugufisch/wholecell<gh_stars>0
import state
class Simulation(object):
"""
Model simulation class
- Runs simulations
- Stores and loads simulation data
"""
def __init__(self, processes, states, steps):
"""
Sets up simulation and links processes and states.
:type steps: int
:param processes: List of Process objects
:param states: List of State objects
:return: None
"""
super(Simulation, self).__init__()
self.__processes = processes
self.__states = states
self.steps = steps
self._construct_states()
self._construct_processes()
def get_state(self, id):
"""
Get the state object 'id'.
:param id: wholeCellId
:return: state object
"""
assert isinstance(id, str)
return self.__states[id]
def get_process(self, id):
"""
Get the state object 'id'.
:param id: wholeCellId
:return: process object
"""
assert isinstance(id, str)
return self.__processes[id]
def evolve_state(self):
"""
Simulate the next step.
:rtype : tuple
:param requirements: list of Requirements for each process
:return: metabolite usage in current state
"""
requirements = [] # what processes need
usages = [] # what was used in this step
for p in self.__processes:
p.copy_from_state()
p.copy_to_state()
return (requirements, usages)
def run(self, loggers):
"""
Run and log the simulation
:param loggers:
:return:
"""
metabolite = self.__states["metabolite"]
for step in xrange(self.steps):
req, usages = self.evolve_state
metabolite.requirements = req
metabolite.usages = usages
def _construct_states(self):
"""
instantiate state objects according to the specification
:return:
"""
state_objects = {}
for s in self.__states:
package_name = "state.{0}".format(s["ID"].lower())
state_package = __import__(package_name)
state_module = getattr(state_package, s["ID"].lower())
state_name = getattr(state_module, s["ID"])
state_objects[s["ID"]] = state_name(s)
self.__states = state_objects
def _construct_processes(self):
"""
instantiate state objects according to the specification
:return:
"""
process_objects = {}
for s in self.__processes:
package_name = "process.{0}".format(s["ID"].lower())
process_package = __import__(package_name)
process_module = getattr(process_package, s["ID"].lower())
process_name = getattr(process_module, s["ID"])
process_objects[s["ID"]] = process_name(s)
self.__processes = process_objects
| 3.046875 | 3 |
optidrift/model.py | nicolet5/publicoptidrift | 1 | 12762748 | import os
import pickle
import pandas as pd
import matplotlib.pyplot as plt
from datetime import datetime
from datetime import timedelta
from sklearn import preprocessing, svm
from lassofeatsel import Lasso_wrapper, edit_features
#####################
# Wrapper Function
#####################
def model_exploration(df, obj):
"""This function is the wrapper function of changing time slices for
training, validation, and testing sets. It will perform lasso on the
training data, allow features to be edited, build a model, and test
the model. Then it will ask if the user would like to explore different
time slices - this is useful in finding the optimum amount of data
necessary to build an adequate model. This takes the entire dataframe
(df) and the sensor to build a model for (obj)"""
see_another_set = 'y'
while see_another_set == 'y':
# this while loop is so we don't have to load and reclean etc every
# time we want to see a different timeslice of the data
train_months_start = input('Input the start date of training data: ')
train_months_end = input('Input the end date of training data: ')
val_months_start = input('Input the start date of validation data: ')
val_months_end = input('Input the end date of validation data: ')
train = df[train_months_start: train_months_end]
# Training dataframe
val_set = df[val_months_start: val_months_end]
# Testing (Validation set)
feat_mo_og = Lasso_wrapper(val_set, train, obj, 0.1)
# get features from lasso, with an initial alpha value of 0.1
# this alpha can be changed by the user during the lasso_wrapper
# function
features = edit_features(feat_mo_og, train)
# this allows the user to change features that don't make sense
# df_val and df_test might have some NaN values in them for the
# features selected by LASSO- clean those out
# val_set = val_set.dropna(subset = features)
df_val, savepickleas = build_model(train, val_set, obj, features)
# (ability to catch out of calibration)
# plot the train, validation:
fig2 = plt.figure(figsize=(20, 10), facecolor='w', edgecolor='k')
plt.subplot(211)
myplot2 = plt.scatter(
df_val.index,
df_val[obj],
color='red',
label='val data-actual')
plt.scatter(
df_val.index,
df_val.Predicted,
color='blue',
label='val data-model',
alpha=0.5)
plt.scatter(train.index, train[obj], color='green', label='train data')
plt.ylabel(obj, fontsize=16)
plt.xlabel('Index', fontsize=16)
plt.title('Training, Validation, and Test Model of ' + obj,
fontsize=28)
plt.legend(fontsize=16)
plt.xlim()
# plot the absolute error between the model and the test data
# this is the metric that would be used to "raise an alarm" if sensor
# begins to drift
allow_error = input(
'Please input the allowable error in ' +
'this sensor (|predicted - actual|): ')
# this allows the user to set the amount of drift that is acceptable
# before an alarm should be raised
plt.subplot(212)
myplot3 = plt.plot(
df_val.index,
df_val['Absolute Error'],
color='green')
plt.axhline(y=int(allow_error), color='red', linestyle='dashed',
label='Allowable Error')
plt.ylabel('Absolute Error (sensor dependent unit)', fontsize=16)
plt.xlabel('Index', fontsize=16)
plt.legend(fontsize=16)
plt.show()
test_yn = input(
'Would you like to test the model on the month ' +
'subsequent to the validation data? If that data' +
' is not available in the folder, answer "n" (y/n): ')
if test_yn == 'n':
None
else:
test_initial_start = val_set.index[-1] + timedelta(hours=1)
test_initial_end = val_set.index[-1] + timedelta(days=30)
# want the first set of testing data to be after the
# set validation date range
# subsequent test sets will be after the training data
df_test = retest_model(
savepickleas,
features,
df,
obj,
test_initial_start,
test_initial_end)
# this is testing the model on the test dates - using the
# test_initial_start and the test_initial_end
# then we plot the test,train, and validation dataframes:
plt.figure(figsize=(20, 10), facecolor='w', edgecolor='k')
plt.subplot(211)
myplot2 = plt.scatter(
df_val.index,
df_val[obj],
color='red',
label='val data-actual')
plt.scatter(
df_val.index,
df_val.Predicted,
color='blue',
label='val data-model',
alpha=0.5)
plt.scatter(
df_test.index,
df_test[obj],
color='purple',
label='test data-actual',
alpha=0.5)
plt.scatter(
df_test.index,
df_test.Predicted,
color='yellow',
label='test data-model',
alpha=0.5)
plt.scatter(
train.index,
train[obj],
color='green',
label='train data',
alpha=0.5)
plt.ylabel(obj, fontsize=16)
plt.xlabel('Index', fontsize=16)
plt.title('Training, Validation, and Test Model of ' + obj,
fontsize=28)
plt.legend(fontsize=16)
plt.xlim()
plt.subplot(212)
myplot3 = plt.plot(
df_test.index,
df_test['Absolute Error'],
color='green')
plt.axhline(y=int(allow_error), color='red', linestyle='dashed',
label='Allowable Error')
plt.ylabel('Absolute Error (sensor dependent unit)', fontsize=16)
plt.xlabel('Index', fontsize=16)
plt.legend(fontsize=16)
plt.show()
y_n = input(
'Would you like to remove the out-of-calibration data from ' +
'the training set, re-train, and predict the ' +
'following month? (y/n):')
# if the answer is 'y', this while loop starts, removing data.
while y_n == 'y':
df_train_raw = pd.concat([train, df_test])
df_test = df_test[df_test['Absolute Error'] < int(allow_error)]
# adding the df_test section where the sensor error is below
# the allowable error
add_train = df[df.index.isin(df_test.index)]
train = pd.concat([train, add_train])
# adding the "in calibration" data to the training dataframe
plt.figure(figsize=(20, 4), facecolor='w', edgecolor='k')
plt.scatter(
train.index,
train[obj],
color='green',
label='train data')
plt.show()
y_n2 = input(
'Is there a date range you would like to add ' +
'back in? (y/n): ')
# this allows the user to add back in any date ranges
# that were removed because they were above the
# allowable sensor error.
# this could probably be streamlined to have the date
# ranges not removed before the user gives input,
# since it's easier to see if you want to keep any
# ranges while you can see them, before they
# are removed.
while y_n2 == 'y':
start = input('Input the start date: ')
end = input('Input the end date: ')
add_train2 = df[start:end]
train = pd.concat([train, add_train2])
train = train.sort_index()
plt.figure(figsize=(20, 4), facecolor='w', edgecolor='k')
plt.scatter(
train.index,
train[obj],
color='green',
label='train data')
plt.show()
y_n2 = input('Another date range? (y/n): ')
if y_n2 == 'n':
pass
elif y_n2 != 'y' or 'n':
break
# now we are setting the new test set to thirty days
# after the training set
test_nmodel_start = df_train_raw.index[-1] + timedelta(hours=1)
test_nmodel_end = df_train_raw.index[-1] + timedelta(days=30)
# leave val set as the same one inputted at first
feat_mo_og = Lasso_wrapper(val_set, train, obj, 0.1)
# get the features from LASSO
features = edit_features(feat_mo_og, train)
# give the user the option to edit those features from LASSO
df_val, savepickleas = build_model(
train, val_set, obj, features)
# building the model based off of the training data and those
# edited features
df_test = retest_model(
savepickleas,
features,
df,
obj,
test_nmodel_start,
test_nmodel_end)
# this is testing the model on the test data
# set bound by test_nmodel_start
# and test_nmodel_end
# now we plot the train and test data sets
plt.figure(figsize=(20, 10), facecolor='w', edgecolor='k')
plt.subplot(211)
myplot2 = plt.scatter(
df_val.index,
df_val[obj],
color='red',
label='val data-actual')
plt.scatter(
df_val.index,
df_val.Predicted,
color='blue',
label='val data-model',
alpha=0.5)
plt.scatter(
df_test.index,
df_test[obj],
color='purple',
label='test data-actual',
alpha=0.5)
plt.scatter(
df_test.index,
df_test.Predicted,
color='yellow',
label='test data-model',
alpha=0.5)
plt.scatter(
train.index,
train[obj],
color='green',
label='train data',
alpha=0.5)
plt.ylabel(obj, fontsize=16)
plt.xlabel('Index', fontsize=16)
plt.title('Training and Testing Model of ' + obj,
fontsize=28)
plt.legend(fontsize=16)
plt.xlim()
plt.subplot(212)
myplot3 = plt.plot(
df_test.index,
df_test['Absolute Error'],
color='green')
plt.axhline(
y=int(allow_error),
color='red',
linestyle='dashed',
label='Allowable Error')
plt.ylabel(
'Absolute Error (sensor dependent unit)',
fontsize=16)
plt.xlabel('Index', fontsize=16)
plt.legend(fontsize=16)
plt.show()
# asking if we would like to repeat, adding on another month
# of training data and retesting on the next month.
# can only do this if there is enough data in the
# given data folder.
y_n = input('Would you like to repeat? (y/n):')
if y_n == 'n':
pass
# this is if you want to change where the initial
# training and validation
# is - the second and third questions that pop up when the code is ran.
see_another_set = input(
'Would you like to see another set of '
+ 'training/validation/testing data? (y/n): ')
#####################
# Component Functions
#####################
def build_model(train, val_set, obj, features):
"""This function takes a train and validation set (train, val_set),
which are both data frames, builds an SVR model for the
sensor of interest (obj - a string) using the given
features (features - a list of strings) and pickles it.
This returns the validation dataframe with the errors
and the filename the model was pickled as."""
val_set = val_set.dropna(subset=features)
train = train.dropna(subset=features)
# set the train and val y values - which is the thing
# we are trying to predict.
train_y = train[obj]
val_y = val_set[obj]
# the train and val _x are the features used to predict
# the _y
train_x = train[features]
val_x = val_set[features]
# have to normalize the features by l1
train_x_scaled = preprocessing.normalize(train_x, norm='l1')
val_x_scaled = preprocessing.normalize(val_x, norm='l1')
# gather the filname to save the pickled model as, so
# it can be reloaded and referenced later.
savepickleas = input(
'Input the model name to save this as (example.sav): ')
filenamesaveas = 'svr_model' + savepickleas
# Change path to save sav files
os.chdir(os.path.abspath(os.path.join(os.getcwd(), '..')))
os.chdir(os.getcwd() + '/saved_models')
# checks to see if the savepickle as file already exists or not
# and asks if we should overwrite it if it does - or gives the
# user the option to use a different .sav filename.
if os.path.isfile(savepickleas):
print('There is already a model for this!')
rewrite = input('Would you like to overwrite the file? (y/n): ')
if rewrite == 'y':
# this is where the linear SVR model for the
# sensor (train_y) is being built based off of the
# features (train_x)
lin_svr = svm.LinearSVR().fit(train_x, train_y)
# then we can use that lin_svr to predict the
# train and val sets based off of the scaled features
trainpred = lin_svr.predict(train_x_scaled)
valpred = lin_svr.predict(val_x_scaled)
filename = filenamesaveas
# then we pickle the model:
pickle.dump(lin_svr, open(savepickleas, 'wb'))
else:
# this is the same as above - just would be a different
# filename
savepickleas_new = input(
'Input a different name to save this as (example.sav): ')
filenamesaveas_new = 'svr_model' + savepickleas_new
lin_svr = svm.LinearSVR().fit(train_x, train_y)
trainpred = lin_svr.predict(train_x_scaled)
valpred = lin_svr.predict(val_x_scaled)
filename = filenamesaveas_new
pickle.dump(lin_svr, open(savepickleas_new, 'wb'))
# this could be changed to overwrite the file
else:
# this is the same as above - just ran when there
# is no previous file with the same name.
lin_svr = svm.LinearSVR().fit(train_x, train_y)
trainpred = lin_svr.predict(train_x_scaled)
valpred = lin_svr.predict(val_x_scaled)
filename = filenamesaveas
pickle.dump(lin_svr, open(savepickleas, 'wb'))
# Should be reducing the number of things we need to type in.
# If only focusing on continuous real-time training, the
# model will never be reused anyway.
# Calls the pickled model
loaded_model = pickle.load(open(savepickleas, 'rb'))
predict = loaded_model.predict(val_x)
# predicting the validation set.
result = loaded_model.score(val_x, val_y)
# the model score is an R^2 value.
print('the model score is: ' + str(result))
df_val = pd.DataFrame(val_y)
df_val['Predicted'] = predict
df_val['Error'] = (abs(df_val['Predicted'] - df_val[obj])
) / abs(df_val[obj])
df_val['Absolute Error'] = abs(df_val['Predicted'] - df_val[obj])
print('the mean absolute error is: ' +
str(df_val['Absolute Error'].mean()))
return df_val, savepickleas
def retest_model(
savepickleas,
features,
df,
obj,
test_model_start,
test_model_end):
"""This function tests the model for the sensor of interests (obj)
on data that may or may not be calibrated,
in the date range constrained by test_model_start
and test_model_end (both strings) in the dataframe loaded (df)
Use this function to see if the model retains the
accurate levels when the sensor begins to drift.
Features is a list of strings of the model features,
savepickleas is the .sav filename where the model is saved.
This function returns the df_test dataframe with calculated
absolute errors."""
df_test = df[test_model_start: test_model_end]
# Need to clean out of dataframe sets that have nan values
# in the features
df_test = df_test.dropna(subset=features)
test_y = df_test[obj]
test_x = df_test[features]
loaded_model = pickle.load(open(savepickleas, 'rb'))
# load the pickled model
predict = loaded_model.predict(test_x)
# use that loaded model to predict based off of the features
# in the test set.
df_test = pd.DataFrame(test_y)
df_test['Predicted'] = predict
df_test['Error'] = (
abs(df_test['Predicted'] - df_test[obj])) / abs(df_test[obj])
df_test['Absolute Error'] = abs(df_test['Predicted'] - df_test[obj])
# calculate the absolute error.
return df_test
| 3.203125 | 3 |
sapmon/payload/netweaver/soapclient.py | rsponholtz/AzureMonitorForSAPSolutions | 36 | 12762749 | <filename>sapmon/payload/netweaver/soapclient.py
# Python modules
import json
import logging
from datetime import datetime, timedelta, timezone
from time import time
from typing import Any, Callable
import re
import requests
from requests import Session
from threading import Lock
# SOAP Client modules
from zeep import Client
from zeep import helpers
from zeep.transports import Transport
from zeep.exceptions import Fault
# Payload modules
from helper.tools import *
from netweaver.metricclientfactory import NetWeaverSoapClientBase
# Suppress SSLError warning due to missing SAP server certificate
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# timeout to use for all SOAP WSDL fetch and other API calls
SOAP_API_TIMEOUT_SECS = 5
########
# implementation for the NetWeaverSoapClientBase abstract class.
# concrete implementation that initializes a SOAP client based on a WSDL URL to the same instance.
########
class NetWeaverSoapClient(NetWeaverSoapClientBase):
def __init__(self,
tracer: logging.Logger,
logTag: str,
sapSid: str,
sapHostName: str,
sapSubdomain: str,
httpProtocol: str,
httpPort: int):
if not sapHostName or not httpProtocol or not httpPort:
raise Exception("%s cannot create client with empty SID, hostname, httpProtocol, or port (%s|%s|%s|%s)" % \
(logTag, sapSid, sapHostName, httpProtocol, httpPort))
httpProtocol = httpProtocol.lower()
if httpProtocol != "http" and httpProtocol != "https":
raise Exception("%s httpProtocol %s is not valid for hostname: %s, port: %s" % \
(logTag, httpProtocol, sapHostName, httpPort))
self.tracer = tracer
self.sapSid = sapSid
self.wsdlUrl = NetWeaverSoapClient._getFullyQualifiedWsdl(sapHostName, sapSubdomain, httpProtocol, httpPort)
# fetch WSDL URL to initialize internal SOAP API client
self.client = self._initSoapClient(logTag=logTag)
#####
# public property getter methods
#####
"""
fully qualified WSDL url that was used to initialize this SOAP client
"""
@property
def Wsdl(self) -> str:
return self.wsdlUrl
##########
# public methods for NetWeaverSoapClientBase abstract base class interface
##########
"""
invoke GetSystemInstanceList SOAP API - returns list of metadata for all server instances in SAP system,
including availability status and supported features/functions
"""
def getSystemInstanceList(self, logTag: str) -> list:
apiName = 'GetSystemInstanceList'
result = self._callSoapApi(apiName, logTag)
return NetWeaverSoapClient._parseResults(result)
"""
invoke GetProcessList SOAP API - metrics for availability of SAP services running on all machines in SAP system
applies to all instances within SAP system
"""
def getProcessList(self, logTag: str) -> list:
apiName = 'GetProcessList'
result = self._callSoapApi(apiName, logTag)
return NetWeaverSoapClient._parseResults(result)
"""
invoke ABAPGetWPTable SOAP API - metrics for active ABAP worker processes
applies to hosts with features: ABAP
"""
def getAbapWorkerProcessTable(self, logTag: str) -> list:
apiName = 'ABAPGetWPTable'
result = self._callSoapApi(apiName, logTag)
return NetWeaverSoapClient._parseResults(result)
"""
invoke GetQueueStatistic SOAP API - metrics for application server worker process queues
applies to hosts with features: ABAP, J2EE, JEE
"""
def getQueueStatistic(self, logTag: str) -> list:
apiName = 'GetQueueStatistic'
result = self._callSoapApi(apiName, logTag)
return NetWeaverSoapClient._parseResults(result)
"""
invoke EnqGetStatistic SOAP API - metrics from ENQUE server around enqueue lock statistics
applies to hosts with features: ENQUE
"""
def getEnqueueServerStatistic(self, logTag: str) -> list:
apiName = 'EnqGetStatistic'
result = self._callSoapApi(apiName, logTag)
return NetWeaverSoapClient._parseResult(result)
"""
invoke GetEnvironment SOAP API - host details from SAP instance
used for mapping all hosts with azure resource id
"""
def getEnvironment(self, logTag: str) -> list:
apiName = 'GetEnvironment'
result = self._callSoapApi(apiName, logTag)
return NetWeaverSoapClient._parseResults(result)
##########
# private static helper methods
##########
"""
create fully qualified domain name of format {hostname}[.{subdomain}]
"""
@staticmethod
def _getFullyQualifiedDomainName(hostname: str, subdomain: str) -> str:
if subdomain:
return hostname + "." + subdomain
else:
return hostname
"""
create SOAP WSDL url with fully qualified domain name and the specified protocol+port
"""
@staticmethod
def _getFullyQualifiedWsdl(hostname: str,
subdomain: str,
httpProtocol: str,
httpPort: int) -> str:
fqdn = NetWeaverSoapClient._getFullyQualifiedDomainName(hostname, subdomain).lower()
return '%s://%s:%d/?wsdl' % (httpProtocol, fqdn, httpPort)
"""
per SAP documentation, return default HTTP port of form 5XX13, where XX is the SAP Instance Number
"""
@staticmethod
def _getHttpPortFromInstanceNr(instanceNr: str) -> str:
return '5%s13' % str(instanceNr).zfill(2)
"""
per SAP documentation, return default HTTPS port of form 5XX14, where XX is the SAP Instance Number
"""
@staticmethod
def _getHttpsPortFromInstanceNr(instanceNr: str) -> str:
return '5%s14' % str(instanceNr).zfill(2)
"""
helper method to deserialize a LIST of zeep SOAP API results and return as list of python dictionary objects
"""
@staticmethod
def _parseResults(results: list) -> list:
return helpers.serialize_object(results, dict)
"""
helper method to deserialize a SINGLE zeep SOAP API result and return as single-element list of python dictionary objects
"""
@staticmethod
def _parseResult(result: object) -> list:
return [helpers.serialize_object(result, dict)]
##########
# private member methods
##########
"""
private method to initialize internal SOAP API client and return the initialized client object, or throw if initialization fails
"""
def _initSoapClient(self, logTag: str) -> Client:
self.tracer.info("%s begin initialize SOAP client for wsdl: %s", logTag, self.wsdlUrl)
startTime = time()
client = None
try:
session = Session()
session.verify = False
client = Client(self.wsdlUrl, transport=Transport(session=session, timeout=SOAP_API_TIMEOUT_SECS, operation_timeout=SOAP_API_TIMEOUT_SECS))
self.tracer.info("%s initialize SOAP client SUCCESS for wsdl: %s [%d ms]",
logTag, self.wsdlUrl, TimeUtils.getElapsedMilliseconds(startTime))
return client
except Exception as e:
self.tracer.error("%s initialize SOAP client ERROR for wsdl: %s [%d ms] %s",
logTag, self.wsdlUrl, TimeUtils.getElapsedMilliseconds(startTime), e, exc_info=True)
raise e
"""
reflect against internal SOAP API client and return flag indicating if specified API name exists
"""
def _isSoapApiDefined(self, apiName: str) -> bool:
try:
method = getattr(self.client.service, apiName)
return True
except Exception as e:
return False
"""
verify against wsdl that the specified SOAP API is defined for the current client,
and if so we will attempt to call it and return the result
"""
def _callSoapApi(self, apiName: str, logTag: str) -> str:
if (not self._isSoapApiDefined(apiName)):
raise Exception("%s SOAP API not defined: %s, wsdl: %s", logTag, apiName, self.wsdlUrl)
self.tracer.info("%s SOAP API executing: %s, wsdl: %s", logTag, apiName, self.wsdlUrl)
startTime = time()
try:
method = getattr(self.client.service, apiName)
result = method()
self.tracer.info("%s SOAP API success for %s, wsdl: %s [%d ms]",
logTag, apiName, self.wsdlUrl, TimeUtils.getElapsedMilliseconds(startTime))
return result
except Exception as e:
self.tracer.error("%s SOAP API error for %s, wsdl: %s [%d ms] %s",
logTag, apiName, self.wsdlUrl, TimeUtils.getElapsedMilliseconds(startTime), e, exc_info=True)
raise e | 2.375 | 2 |
2020-11/src/201403624.py | ivanLM2310/CoronavirusML_ant | 2 | 12762750 | <reponame>ivanLM2310/CoronavirusML_ant
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.metrics import mean_squared_error, r2_score
import matplotlib.pyplot as plt
import numpy as np
import random
import pandas as pd
data = pd.read_csv('201403624_data.csv')
X = data.iloc[:, 0].values.reshape(-1, 1)
Y = data.iloc[:, 2].values.reshape(-1, 1)
linear_regressor = LinearRegression()
linear_regressor.fit(X, Y)
Y_pred = linear_regressor.predict(X)
x_new_min = 0.0
x_new_max = 250.0
plt.xlim(x_new_min,x_new_max)
plt.ylim(0,100)
title = 'Number of deaths in Guatemala\n'+'Trained Model : Y = ' + str(linear_regressor.coef_[0][0]) + 'X+' + str(linear_regressor.intercept_[0])
plt.title("Polynomial Linear Regression using scikit-learn and python3 \n" + title, fontsize=10)
plt.xlabel('Days')
plt.ylabel('Total of deaths')
plt.scatter(X, Y)
plt.plot(X, Y_pred, color='cyan')
plt.legend(('Linear Regression','Data'), loc='upper right')
plt.savefig("201403624_img1.png", bbox_inches='tight')
plt.show()
| 2.78125 | 3 |