hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9cc815b9d439805f37f2bd58858a3f7458d191de | 8,155 | py | Python | main.py | boxanm/CityRunHeatMaps | 8df90b86edab7e530c463000067f569f8e657ccf | [
"MIT"
] | 1 | 2019-09-14T06:58:36.000Z | 2019-09-14T06:58:36.000Z | main.py | boxanm/CityRunHeatMaps | 8df90b86edab7e530c463000067f569f8e657ccf | [
"MIT"
] | null | null | null | main.py | boxanm/CityRunHeatMaps | 8df90b86edab7e530c463000067f569f8e657ccf | [
"MIT"
] | null | null | null | import os.path
import datetime
import gpxpy
import osmnx
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
import functions as functions
from collections import Counter, OrderedDict
import time
import math
import argparse
from customClasses import Cell
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--activities', help='download new activities from garmin connect', action='store_true')
parser.add_argument('-d', '--downloadMap', help='download map', action='store_true')
parser.add_argument('-s', '--saveMap', help='save map', action='store_true')
parser.add_argument('-f', '--file', help='load existing .graphml file')
args = parser.parse_args()
path_gpx = 'data/gpx/'
startDate = datetime.datetime(2019, 1, 21)
start = time.time()
lap = time.time()
cmap = plt.cm.gist_heat
grid_size = 10 #grid size in meters
max_density = 0
min_density = 1000000
color_change_threshold = 0.2
last_color = 0
segment_len = 0
fade_size = 5
fade_last_segment_len = 10
min_segment_len = 100
datetime_format = '%Y-%m-%dT%H:%M:%S.%fZ'
# find all gpx files
gpx_files = functions.loadFiles(path_gpx)
print("Done in: ", time.time() - lap, " s")
lap = time.time()
print("================ Extracting gps locations===================")
max_latitude = -500
min_latitude = 500
max_longitude = -500
min_longitude = 500
lon_lat_list = []
for file in gpx_files:
gpx_file = open(file,'r')
gpx = gpxpy.parse(gpx_file)
lon = []
lat = []
last_point = (0,0)
for track in gpx.tracks:
for segment in track.segments:
for gps_point in segment.points:
if(functions.haversine(last_point, (gps_point.longitude, gps_point.latitude)) > 2):#to avoid bias in heat in uphills, when you move slower, the haversine distance between two points has to be at least 2 meters
lon.append(gps_point.longitude)
lat.append(gps_point.latitude)
if(max_latitude < gps_point.latitude):
max_latitude = gps_point.latitude
if(min_latitude > gps_point.latitude):
min_latitude = gps_point.latitude
if(max_longitude < gps_point.longitude):
max_longitude = gps_point.longitude
if(min_longitude > gps_point.longitude):
min_longitude = gps_point.longitude
last_point = (gps_point.longitude, gps_point.latitude)
lon_lat_list.append((lon,lat))
print("Done in: ", time.time() - lap, " s")
lap = time.time()
loc_width = functions.haversine((max_longitude,max_latitude),(min_longitude,max_latitude))
loc_height = functions.haversine((max_longitude,max_latitude),(max_longitude,min_latitude))
num_width = loc_width / grid_size #number of cells in width
num_height = loc_height / grid_size #number of cells in height
cell_size_width = (max_longitude - min_longitude) / num_width
cell_size_height = (max_latitude - min_latitude) / num_height
print("The location's dimensions are: ",round(loc_width,3), "x" , round(loc_height,3) , " m")
print('with ', round(num_width), "x", round(num_height) ," cells in the grid")
print("and cell's sizes of: ", cell_size_width, "x" ,cell_size_height)
#load map
lap = time.time()
figure_tracks, ax1 = functions.loadOSMnxMap(file = args.file, download = args.downloadMap, saveMap = args.saveMap)
print("Done in: ", time.time() - lap, " s")
lap = time.time()
cells_width = np.arange(min_longitude, max_longitude, cell_size_width)
cells_height = np.arange(min_latitude, max_latitude, cell_size_height)
print("================ Adding to Dictionary ===================")
grid_dict = {}
for lon,lat in lon_lat_list:
for point in zip(lon,lat):
x,y = functions.naiveSearch(cells_width, cells_height, point[0], point[1])
if not (x,y) in grid_dict.keys():
grid_dict[(x,y)] = Cell(min_longitude + x*cell_size_width,min_latitude + y*cell_size_height)
grid_dict[(x,y)].addUsage()
grid_dict[(x,y)].addUsage()
print("Done in: ", time.time() - lap, " s")
lap = time.time()
print("============Neighbors adjustement============")
for key, point in grid_dict.items():
sum = 0
if (key[0]-1, key[1]) in grid_dict:
sum += grid_dict[key[0]-1, key[1]].density/4
if (key[0], key[1]-1) in grid_dict:
sum += grid_dict[key[0], key[1]-1].density/4
if (key[0]+1, key[1]) in grid_dict:
sum += grid_dict[key[0]+1, key[1]].density/4
if (key[0], key[1]+1) in grid_dict:
sum += grid_dict[key[0], key[1]+1].density/4
if (key[0]-1, key[1]-1) in grid_dict:
sum += grid_dict[key[0]-1, key[1]-1].density/8
if (key[0]-1, key[1]+1) in grid_dict:
sum += grid_dict[key[0]-1, key[1]+1].density/8
if (key[0]+1, key[1]-1) in grid_dict:
sum += grid_dict[key[0]+1, key[1]-1].density/8
if (key[0]+1, key[1]+1) in grid_dict:
sum += grid_dict[key[0]+1, key[1]+1].density/8
point.density_norm = sum
print("Done in: ", time.time() - lap, " s")
lap = time.time()
print("============Density normalization============")
counter = Counter()
for point in grid_dict.values():
counter[point.density_norm] += 1
min_density = min(min_density, point.density_norm)
max_density = max(max_density, point.density_norm)
orderedDict = OrderedDict(sorted(counter.items(), key=lambda t: t[0]))
# min_density = 1
print("Min: ", min_density, " Max:", max_density)
counter = Counter()
for point in grid_dict.values():
point.density_norm = functions.normalize(point.density_norm, min_density, max_density)
point.color = functions.logarithmAsymptotic(point.density_norm)
counter[point.density_norm] += 1
print("Done in: ", time.time() - lap, " s")
lap = time.time()
segment = [[],[]]
all_segments = [] #lon, lat, color
print("============Plotting tracks============")
for lon, lat in lon_lat_list:
for point in zip(lon,lat):
x,y = functions.naiveSearch(cells_width, cells_height, point[0], point[1])
if((x,y) not in grid_dict.keys()):
print("unknown point")
color = grid_dict[(x,y)].color
if(abs(color - last_color) > color_change_threshold and segment_len > min_segment_len): #change of density
all_segments.append([segment[0][:-fade_last_segment_len],segment[1][:-fade_last_segment_len],last_color])
#fade into a new segment
end_last_segment = (segment[0][-fade_last_segment_len-1:],segment[1][-fade_last_segment_len-1:])
end_last_segment[0].append(point[0])
end_last_segment[1].append(point[1])
color_change = (last_color - color)/len(end_last_segment[0])
for i in range(len(end_last_segment[0])-1):
all_segments.append([[end_last_segment[0][i],end_last_segment[0][i+1]],[end_last_segment[1][i],end_last_segment[1][i+1]],
last_color - (i+1)*color_change])
last_point = (segment[0][-1],segment[1][-1])
segment[0] = []
segment[1] = []
segment_len = 0
last_color = color
segment_len+=1
segment[0].append(point[0])
segment[1].append(point[1])
all_segments.append([segment[0], segment[1], last_color])
segment_len = 0
segment[0] = []
segment[1] = []
all_segments.sort(key = lambda x: x[2])
for segment in all_segments:
plt.plot(segment[0], segment[1], color = cmap(segment[2]), linewidth = 7)
print("Done in: ", time.time() - lap, " s")
lap = time.time()
print("============Plotting density histogram============")
figure_histogram = plt.figure(2)
orderedDict2 = OrderedDict(sorted(counter.items(), key=lambda t: t[0]))
plt.bar(orderedDict.keys(), orderedDict.values(), color = 'red')
figure_histogram.show()
print("============Plotting color distribution============")
figure_function = plt.figure(3)
plot_list = [functions.logarithmAsymptotic(x) for x in orderedDict2.keys()]
plt.scatter(orderedDict2.keys(), plot_list)
figure_function.show()
print("====================Total time: ", time.time() - start, " s==========================")
plt.show()
| 36.900452 | 225 | 0.643164 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,198 | 0.146904 |
9cc8a27b231af691d3fbf8c4702b4f91f01e88a2 | 6,728 | py | Python | scripts/ftp_mar_data.py | SmithB/SMBcorr | 7c35cf8383058986fd82b28beab3c5580f9f8667 | [
"MIT"
] | null | null | null | scripts/ftp_mar_data.py | SmithB/SMBcorr | 7c35cf8383058986fd82b28beab3c5580f9f8667 | [
"MIT"
] | null | null | null | scripts/ftp_mar_data.py | SmithB/SMBcorr | 7c35cf8383058986fd82b28beab3c5580f9f8667 | [
"MIT"
] | 1 | 2020-08-06T19:48:52.000Z | 2020-08-06T19:48:52.000Z | #!/usr/bin/env python
u"""
ftp_mar_data.py
Written by Tyler Sutterley (05/2020)
Syncs MAR regional climate outputs for a given ftp url
ftp://ftp.climato.be/fettweis
CALLING SEQUENCE:
python ftp_mar_data.py --directory=<path> <ftp://url>
INPUTS:
full ftp url
COMMAND LINE OPTIONS:
--help: list the command line options
-P X, --np=X: Run in parallel with X number of processes
-D X, --directory=X: full path to working data directory
-Y X, --year=X: Reduce files to years of interest
-C, --clobber: Overwrite existing data in transfer
-M X, --mode=X: Local permissions mode of the directories and files synced
UPDATE HISTORY:
Updated 05/2020: added years option to reduce list of files
Updated 11/2019: added multiprocessing option to run in parallel
Written 07/2019
"""
from __future__ import print_function
import sys
import getopt
import os
import re
import calendar, time
import ftplib
import traceback
import posixpath
import multiprocessing
if sys.version_info[0] == 2:
import urlparse
else:
import urllib.parse as urlparse
#-- PURPOSE: check internet connection
def check_connection():
#-- attempt to connect to ftp host for MAR datasets
try:
f = ftplib.FTP('ftp.climato.be')
f.login()
f.voidcmd("NOOP")
except IOError:
raise RuntimeError('Check internet connection')
else:
return True
#-- PURPOSE: sync local MAR files with ftp and handle error exceptions
def ftp_mar_data(netloc, remote_file, local_file, CLOBBER=False, MODE=0o775):
#-- try to download the file
try:
#-- connect and login to MAR ftp server
ftp = ftplib.FTP(netloc)
ftp.login()
ftp_mirror_file(ftp,remote_file,local_file,CLOBBER=CLOBBER,MODE=MODE)
except:
#-- if there has been an error exception
#-- print the type, value, and stack trace of the
#-- current exception being handled
print('process id {0:d} failed'.format(os.getpid()))
traceback.print_exc()
else:
#-- close the ftp connection
ftp.quit()
#-- PURPOSE: pull file from a remote host checking if file exists locally
#-- and if the remote file is newer than the local file
def ftp_mirror_file(ftp, remote_file, local_file, CLOBBER=False, MODE=0o775):
#-- if file exists in file system: check if remote file is newer
TEST = False
OVERWRITE = ' (clobber)'
#-- get last modified date of remote file and convert into unix time
mdtm = ftp.sendcmd('MDTM {0}'.format(remote_file))
remote_mtime = calendar.timegm(time.strptime(mdtm[4:],"%Y%m%d%H%M%S"))
#-- check if local version of file exists
if os.access(local_file, os.F_OK):
#-- check last modification time of local file
local_mtime = os.stat(local_file).st_mtime
#-- if remote file is newer: overwrite the local file
if (remote_mtime > local_mtime):
TEST = True
OVERWRITE = ' (overwrite)'
else:
TEST = True
OVERWRITE = ' (new)'
#-- if file does not exist locally, is to be overwritten, or CLOBBER is set
if TEST or CLOBBER:
#-- Printing files transferred
print('{0} -->'.format(posixpath.join('ftp://',ftp.host,remote_file)))
print('\t{0}{1}\n'.format(local_file,OVERWRITE))
#-- copy remote file contents to local file
with open(local_file, 'wb') as f:
ftp.retrbinary('RETR {0}'.format(remote_file), f.write)
#-- keep remote modification time of file and local access time
os.utime(local_file, (os.stat(local_file).st_atime, remote_mtime))
os.chmod(local_file, MODE)
#-- PURPOSE: help module to describe the optional input parameters
def usage():
print('\nHelp: {}'.format(os.path.basename(sys.argv[0])))
print(' -P X, --np=X\tRun in parallel with X number of processes')
print(' -D X, --directory=X\tWorking Data Directory')
print(' -Y X, --year=X\tReduce files to years of interest')
print(' -C, --clobber\t\tOverwrite existing data in transfer')
print(' -M X, --mode=X\t\tPermission mode of directories and files\n')
#-- This is the main part of the program that calls the individual modules
def main():
#-- Read the system arguments listed after the program
long_options = ['help','np=','directory=','year=','clobber','mode=']
optlist,arglist = getopt.getopt(sys.argv[1:],'hP:D:Y:CM:',long_options)
#-- command line parameters
local_dir = os.getcwd()
#-- years to sync (default all)
YEARS = '\d+'
#-- number of processes
PROCESSES = 1
CLOBBER = False
#-- permissions mode of the local directories and files (number in octal)
MODE = 0o775
for opt, arg in optlist:
if opt in ('-h','--help'):
usage()
sys.exit()
elif opt in ("-D","--directory"):
local_dir = os.path.expanduser(arg)
elif opt in ("-Y","--year"):
YEARS = '|'.join(arg.split(','))
elif opt in ("-P","--np"):
PROCESSES = int(arg)
elif opt in ("-C","--clobber"):
CLOBBER = True
elif opt in ("-M","--mode"):
MODE = int(arg, 8)
#-- need to input a ftp path
if not arglist:
raise IOError('Need to input a path to the MAR ftp server')
#-- check internet connection
if check_connection():
#-- check if local directory exists and recursively create if not
os.makedirs(local_dir,MODE) if not os.path.exists(local_dir) else None
#-- connect and login to MAR ftp server
#-- get list of files to download
parsed_ftp = urlparse.urlparse(arglist[0])
ftp = ftplib.FTP(parsed_ftp.netloc)
ftp.login()
# find files and reduce to years of interest if specified
remote_files = sorted([f for f in ftp.nlst(parsed_ftp.path)
if re.search(YEARS,posixpath.basename(f))])
ftp.quit()
#-- run in parallel with multiprocessing Pool
pool = multiprocessing.Pool(processes=PROCESSES)
#-- download remote MAR files to local directory
for j,remote_file in enumerate(remote_files):
#-- extract filename
url,fi = posixpath.split(remote_file)
args = (parsed_ftp.netloc, remote_file, os.path.join(local_dir,fi),)
kwds = dict(CLOBBER=CLOBBER, MODE=MODE)
pool.apply_async(ftp_mar_data, args=args, kwds=kwds)
#-- start multiprocessing jobs
#-- close the pool
#-- prevents more tasks from being submitted to the pool
pool.close()
#-- exit the completed processes
pool.join()
#-- run main program
if __name__ == '__main__':
main()
| 36.765027 | 80 | 0.640012 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,340 | 0.496433 |
9ccb29cee770a8f04a2031d1320f314942e54690 | 13,400 | py | Python | sondages/sondages_wiki_scrap.py | verycourt/Elections | b954c2bb23422e85d10074d41f7d2adc537a1766 | [
"MIT"
] | null | null | null | sondages/sondages_wiki_scrap.py | verycourt/Elections | b954c2bb23422e85d10074d41f7d2adc537a1766 | [
"MIT"
] | null | null | null | sondages/sondages_wiki_scrap.py | verycourt/Elections | b954c2bb23422e85d10074d41f7d2adc537a1766 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# encoding=utf8
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import requests
from bs4 import BeautifulSoup
import numpy as np
import pandas as pd
import warnings
import dateparser
import datetime
import time
import json
from json import encoder
encoder.FLOAT_REPR = lambda o: format(o, '.1f')
warnings.filterwarnings('ignore')
URL = "https://fr.wikipedia.org/wiki/Liste_de_sondages_sur_l'%C3%A9lection_pr%C3%A9sidentielle_fran%C3%A7aise_de_2017#2016"
path1 = "/var/www/html/1ertour/"
path2 = "/var/www/html/2ndtour/"
'''dicoTableMois = {4:"Janvier 2016", 5:"Février 2016", 6:"Mars 2016", 7:"Avril 2016", 8:"Mai 2016", 9:"Juin 2016",\
10:"Juillet 2016", 11:"Septembre 2016", 12:"Octobre 2016", 13:"Novembre 2016", 14:"Décembre 2016", \
15:"Janvier 2017", 16:"Février 2017"}
'''
dicoTableMois = {0:"Mars 2017", 1:"Février 2017", 2:"Janvier 2017"}
dico_couleurs_candidats = {u"Arnaud Montebourg":"#CC0066", u"Benoît Hamon":"#CC3399",u"Cécile Duflot":"#008000", u"Emmanuel Macron":"#A9A9A9",
u"François Bayrou":"#FF6600", u"François Fillon":"#3399FF", u"François Hollande":"#FF9999", u"Jacques Cheminade":"#CC0000",
u"Jean-Luc Mélenchon":"#FF0000", u"Manuel Valls":"#FF6699", u"Marine Le Pen":"#000080", u"Nathalie Arthaud":"#CC0033",
u"Nicolas Dupont-Aignan":"#0000CC", u"Nicolas Hulot":"#66CC00", u"Philippe Poutou":"#990033",
u"Sylvia Pinel":"#FF0066", u"Yannick Jadot":"#339900"}
dico_candidat_parti = {u"Arnaud Montebourg":"PS",u"Benoît Hamon":"PS",u"Cécile Duflot":"eelv",
u"Emmanuel Macron" : "En Marche",
u"François Bayrou" : "MoDem", u"François Fillon":"Les Républicains",
u"François Hollande" : "PS", u"Jacques Cheminade" : "sp",
u"Jean-Luc Mélenchon" : "Parti de Gauche", u"Manuel Valls":"PS",u"Marine Le Pen":"FN",
u"Nathalie Arthaud":"lutte ouvriere",
u"Nicolas Dupont-Aignan":"Debout La France", u"Nicolas Hulot":"empty", u"Philippe Poutou":"NPA",
u"Sylvia Pinel":"ps", u"Yannick Jadot":"eelv"}
def loadHTML(URL):
resultats = requests.get(URL)
return BeautifulSoup(resultats.text, 'html.parser')
def loadPandas(URL):
tables = loadHTML(URL).findAll("table")
dfF = pd.DataFrame()
dfFs = pd.DataFrame()
#Pour chaque table de wikipedia :
for idx, table in enumerate(tables) :
lignes = table.findAll("tr")
#On récupère le nom de chaque colonne :
colonnes = []
for elem in lignes[0].findAll("th"):
if elem.find("a") is None :
if elem.text != u'Autres candidats':
colonnes.append(elem.text)
else :
if(elem.find("a").text != ""):
colonnes.append(elem.find("a").text)
for elem in lignes[1].findAll("th"):
if elem.find("a") is not None :
colonnes.append(elem.find("a").text)
if len(colonnes) < 7:
for elem in lignes[2].findAll("th"):
a=3
colonnes.append(elem.text)
#On crée un pandas dataframe pour stocker nos table :
df = pd.DataFrame(columns = colonnes)
#print(len(colonnes))
nbRowspan = 0
rowspan = []
rowspanMil = []
#our chaque ligne de notre table :
for j,ligne in enumerate(lignes[2:]):
line = list(np.zeros(len(colonnes)))
line = ["/" for item in line]
#lorsque certains éléments de notre tableau occupent plusieurs lignes
for i,item in enumerate(rowspanMil):
if item[0] > 1 :
line[item[1]] = item[2]
item[0] -= 1
for i,elem in enumerate(ligne.findAll("td")):
try:
while line[i] != "/":
i+=1
except:
continue
if elem.has_attr("rowspan"):
nbRowspan = int(elem["rowspan"])
if nbRowspan >1:
try :
rowspanMil.append([nbRowspan, i, float(elem.text.replace("%", "").replace(",",".").replace("<",""))])
except Exception as e :
rowspanMil.append([nbRowspan, i, (elem.text.replace("%", "").replace(",",".").replace("<",""))])
try:
line[i] = (float(elem.text.replace("%", "").replace(",",".").replace("<","")))
except Exception as e :
line[i] = (elem.text.replace("%", "").replace(",",".").replace("<",""))
if len(line) > len(colonnes) - 3 :
df.loc[j] = line
#print(df)
try :
df = df[df["Date"] != "/"]
except:
continue
if idx >= 0 and idx <= 2:
df["Date"] = df["Date"].map(lambda x : x.lower().replace(dicoTableMois[idx].lower()[:-5],""))
df["Date"] = df["Date"].map(lambda x : x+" "+dicoTableMois[idx])
#2ème tour :
if len(colonnes) < 7 :
dfFs = dfFs.append(df)
#1er tour :
elif idx >= 0 and idx <= 2:
dfF = dfF.append(df.ix[1:])
return (dfF, dfFs)
dfF, dfFs = loadPandas(URL)
#######################################################################
########################### 1er tour ##################################
#######################################################################
dfF = dfF.replace(to_replace=["-", "–"], value=" ")
dfF = dfF[dfF["Pourrait changer d'avis"]!="/"]
dfF["Pourrait changer d'avis"] = dfF["Pourrait changer d'avis"].map(lambda x : (str(x).split("[")[0].strip()))
dfF["Pourrait changer d'avis"] = dfF["Pourrait changer d'avis"].map(lambda x : 0 if x == "nan" or x == "" else float(x[:2]))
notCandidats = [u"Date", u"Sondeur", u"Échantillon"]
anciensCandidats = [u"Alain Juppé", u"Bruno Le Maire", u"Jean-François Copé", u"Nicolas Sarkozy", u"Eva Joly", u"Sylvia Pinel", u"Vincent Peillon", u"Arnaud Montebourg"]
for col in dfF.columns:
if col not in notCandidats:
dfF[col] = dfF[col].map(lambda x: x if isinstance(x, float) else np.nan)
dfF2 = dfF
for col in anciensCandidats:
if col in dfF2.columns :
dfF2 = dfF2[dfF2[col].isnull()]
dfF2 = dfF2.drop(col, axis=1)
dfF2["Pourrait changer d'avis"] = dfF2["Pourrait changer d'avis"].map(lambda x : np.nan if x==0 else x)
#print(dfF)
dfF3 = dfF2
dfF3["Date"] = dfF3["Date"].map(lambda x : x.replace("1er", "1").replace("fév.", ""))
dfF3["Date"] = dfF3["Date"].map(lambda x : ' '.join(x.split()))
dfF3["Date"] = dfF3["Date"].map(lambda x : x if len(x.split(" ")) < 4 else " ".join(x.split(" ")[-3:]))
dfF3["Date"] = dfF3["Date"].map(lambda x : dateparser.parse(x).date())
dfF3 = dfF3.groupby(["Date"]).mean().reset_index()
dfF3 = dfF3.sort_values('Date', ascending=1)
def dateToString(date):
if len(str(date.month))==1:
month = "0"+str(date.month)
else :
month = str(date.month)
if len(str(date.day))==1:
day = "0"+str(date.day)
else :
day = str(date.day)
return str(date.year)+month+day
dfF3 = dfF3.round(2)
dfF3 = dfF3[dfF3["Date"] > datetime.date(year=2017,month=01,day=01)]
dfF4 = dfF3
#dfF4 = dfF4.drop([u"Cécile Duflot", u"François Hollande", u"Nicolas Hulot", u"Rama Yade"], axis=1)
for col in dfF4.columns:
if col not in [u"Benoît Hamon", u"Emmanuel Macron", u"Date", u"François Fillon",\
u"Jean-Luc Mélenchon", u"Marine Le Pen", u"Nicolas Dupont-Aignan"]:
dfF4 = dfF4.drop(col, axis=1)
dfF5 = dfF4
dfF4["date"] = dfF4["Date"].map(lambda x: dateToString(x))
dfF4 = dfF4.drop("Date", axis=1)
dfF4 = dfF4.set_index("date")
dfF4 = dfF4.dropna(axis=1, how='all')
dfF4 = dfF4.dropna(axis=0, how='all')
# --- To json --- #
dfF5 = dfF5.dropna(axis=1, how='all')
dfF5 = dfF5.dropna(axis=0, how='all')
dfF5 = dfF5.set_index("Date")
#dfF5.to_csv("table_agrege.csv")
dfF5 = pd.read_csv("table_agrege.csv", encoding="utf-8")
dfF5["Date"] = pd.to_datetime(dfF5["Date"])
dfF5 = dfF5.groupby(["Date", "date"]).mean().reset_index()
dfF5.set_index("Date", inplace=True)
print(dfF5)
idx = pd.date_range(min(dfF5.index), max(dfF5.index))
dfF5 = dfF5.reindex(idx, fill_value="null")
########################
# Agrégats sur 6 jours #
########################
dfF5 = dfF5.drop("date", axis=1)
dfF5 = dfF5.replace(to_replace=["null"], value=np.nan)
diffDaysLast = (datetime.datetime.now()-max(dfF5.index).to_datetime()).days
#dfF5.index = dfF5.index.map(lambda x : x.to_datetime() + datetime.timedelta(days=diffDaysLast))
#dfF5 = dfF5.map(lambda x : )
lastsondages = max(dfF5.index)
to_add = (max(dfF5.index) - (max(dfF5.groupby(pd.TimeGrouper('6D')).mean().index))).days
dfF5.index = dfF5.index.map(lambda x : (x + datetime.timedelta(days=to_add)) )
dfF5 = dfF5.groupby(pd.TimeGrouper('6D')).mean()
#dfF5 = dfF5.index.map(lambda x : x.to_datetime() + datetime.timedelta(days=6))
for col in dfF5.columns :
dfF5[col] = np.round(dfF5[col], 1)
print(dfF5)
to_json = []
dico_sondage = {}
dico_sondage["id"] = 1
dico_sondage["refresh"] = {}
dfF5 = dfF5.fillna("null")
dico_sondage["refresh"]["last"] = time.mktime((lastsondages.to_datetime()).timetuple())
dico_sondage["refresh"]["dayInterval"] = 6
dico_sondage["title"] = "Agrégation des sondages pour le 1er tour de 11 instituts*"
dico_sondage["legende"] = "* Les données de ce graphique sont les moyennes des sondages d'intentions de vote de 11 instituts sur six jours. \
Plus précisément, pour chaque jour affiché, il fait la moyenne sur les six derniers jours. \
Les instituts sont : Ifop-Fiducial, OpinionWay, CSA, Future Thinking - SSI, BVA, Odoxa, Harris Interactive, TNS Sofres, Cevipof Ipsos-Sopra Steria, Elabe, Dedicated Research."
dico_sondage["unit"] = "%"
dico_sondage["dataset"] = []
for col in dfF5.columns:
#On garde les candidats demandés :
dico_temp = {}
dico_temp["title"] = col
if col in dico_candidat_parti.keys():
dico_temp["subtitle"] = dico_candidat_parti[col]
else :
dico_temp["subtitle"] = ""
if col in dico_couleurs_candidats.keys():
dico_temp["color"] = dico_couleurs_candidats[col]
else :
dico_temp["color"] = "#ffffff"
dico_temp["data"] = list(dfF5[col])
dico_sondage["dataset"].append(dico_temp)
to_json.append(dico_sondage)
#dfF4.to_csv(path+"sondages1er.csv", sep="\t", encoding='utf-8')
#dfF4.to_json(path1+"pollster1.json", force_ascii=False)
dfF4.to_csv(path1+"sondages1er.csv", sep="\t", encoding='utf-8')
dfF4.to_csv(path1+"data.tsv", sep="\t", encoding='utf-8')
dfF4.to_csv("data.tsv", sep="\t", encoding='utf-8')
#print(dfF3[["Manuel Valls", "Date"]])
#######################################################################
########################### 2nd tour ##################################
#######################################################################
dfFs2 = dfFs
dfFs2["Date"] = dfFs2["Date"].map(lambda x : x if len(x)>5 else np.nan)
dfFs2 = dfFs2[dfFs2["Date"].notnull()]
dfFs2["Date"] = dfFs2["Date"].map(lambda x : x.replace(u"-", " ").replace(u"–", " "))
dfFs2["Date"] = dfFs2["Date"].map(lambda x : x if len(x.split(" ")) < 4 else " ".join(x.split(" ")[-3:]))
dfFs2["Date"] = dfFs2["Date"].map(lambda x : dateparser.parse(x).date())
#dfFs2 = dfFs2.set_index(["Date"])
#dfFs2.index = pd.to_datetime(dfFs2.index)
notCandidats = [u"Date", u"Sondeur", u"Échantillon"]
def dateToString2(date):
if len(str(date.month))==1:
month = "0"+str(date.month)
else :
month = str(date.month)
if len(str(date.day))==1:
day = "0"+str(date.day)
else :
day = str(date.day)
return day+"/"+month+"/"+str(date.year)
def getDuel(df, nom1, nom2):
return df[[nom1, nom2, "date"]].set_index("date").dropna(axis=0, how='any')
for col in dfFs2.columns:
if col not in notCandidats:
if col != "Abstention, blanc ou nul":
dfFs2[col] = dfFs2[col].map(lambda x: x if isinstance(x, float) else np.nan)
else :
dfFs2[col] = dfFs2[col].map(lambda x: x if isinstance(x, float) else 0)
#dfFs2["Date"] = pd.to_datetime(dfFs2["Date"])
#dfFs2 = dfFs2.groupby(dfFs2["Date"].dt.month).mean()
#dfFs2 = dfFs2.reset_index()
dfFs2["date"] = dfFs2["Date"].map(lambda x: dateToString2(x))
dfFs2 = dfFs2.drop("Date", axis=1)
getDuel(dfFs2, u"Marine Le Pen", u"François Fillon").to_csv(path2+"mlpVSff.tsv", sep="\t", encoding="utf-8")
getDuel(dfFs2, u"Marine Le Pen", u"Manuel Valls").to_csv(path2+"mlpVSmv.tsv", sep="\t", encoding='utf-8')
getDuel(dfFs2, u"Marine Le Pen", u"Emmanuel Macron").to_csv(path2+"mlpVSem.tsv", sep="\t", encoding='utf-8')
getDuel(dfFs2, u"Emmanuel Macron", u"François Fillon").to_csv(path2+"emvsff.tsv", sep="\t", encoding="utf-8")
'''
getDuel(dfFs2, u"Marine Le Pen", u"Manuel Valls").to_json(path2+"mlpVSmv.json", force_ascii=False)
getDuel(dfFs2, u"Marine Le Pen", u"François Fillon").to_json(path2+"mlpVSff.json", force_ascii=False)
getDuel(dfFs2, u"Marine Le Pen", u"Emmanuel Macron").to_json(path2+"mlpVSem.json", force_ascii=False)
getDuel(dfFs2, u"Emmanuel Macron", u"François Fillon").to_json(path2+"emvsff.json", force_ascii=False)
'''
dfFs2.to_csv(path2+"sondages2e.csv", encoding='utf-8')
#dfFs2.to_json(path2+"sondages2e.json")
print("Done")
| 35.356201 | 175 | 0.589552 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,514 | 0.409993 |
9ccc9caa284546838f6e690c84b3287d39799b8f | 7,645 | py | Python | RecoMuon/MuonIsolationProducers/test/isoTest_cfg.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | RecoMuon/MuonIsolationProducers/test/isoTest_cfg.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | RecoMuon/MuonIsolationProducers/test/isoTest_cfg.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | # The following comments couldn't be translated into the new config version:
#
# keep only muon-related info here
#
import FWCore.ParameterSet.Config as cms
process = cms.Process("MISO")
process.load("Configuration.EventContent.EventContent_cff")
# service = MessageLogger {
# untracked vstring destinations = { "cout" }
# untracked vstring debugModules = { "muIsoDepositTk",
# "muIsoDepositCalByAssociatorHits",
# "muIsoDepositCalByAssociatorTowers",
# "muIsoDepositCal" }
# untracked vstring categories = { "RecoMuon" , "MuonIsolation" }
#
# untracked PSet cout = {
# untracked string threshold = "DEBUG"
# untracked int32 lineLength = 132
# untracked bool noLineBreaks = true
# untracked PSet DEBUG = {untracked int32 limit = 0 }
# untracked PSet RecoMuon = { untracked int32 limit = 10000000}
# untracked PSet MuonIsolation = { untracked int32 limit = 10000000}
# }
# }
process.load("FWCore.MessageLogger.MessageLogger_cfi")
#process.load("RecoLocalMuon.Configuration.RecoLocalMuon_cff")
#process.load("RecoMuon.Configuration.RecoMuon_cff")
process.load("Configuration.StandardSequences.Services_cff")
process.load("Configuration.StandardSequences.Geometry_cff")
process.load("Configuration.StandardSequences.MagneticField_38T_cff")
process.load("Configuration.StandardSequences.FakeConditions_cff")
#process.load("Configuration.StandardSequences.RawToDigi_cff")
process.load("Configuration.StandardSequences.Reconstruction_cff")
#has everything(?) one needs
# pick muIsolation sequence for "standard" iso reco for tracker and global muons
process.load("RecoMuon.MuonIsolationProducers.muIsolation_cff")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(2000)
)
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('/store/mc/2007/12/7/RelVal-RelValBJets_Pt_50_120-1197045102/0002/0A21A5F4-02A5-DC11-89F5-000423DD2F34.root')
)
process.source = cms.Source ("PoolSource",
fileNames = cms.untracked.vstring (
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/10438122-2A5F-DD11-A77F-000423D985E4.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/12F34420-2A5F-DD11-AB6E-000423D6CA6E.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/244E7C0B-315F-DD11-ACFC-001617E30F58.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/2ADD8A12-315F-DD11-8AB8-000423D6C8E6.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/34A291FB-305F-DD11-833E-001617C3B6CC.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/383E09CA-2C5F-DD11-9A28-000423D6BA18.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/40F0F8A4-2A5F-DD11-BC72-001617C3B64C.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/4AD39C8C-2A5F-DD11-B935-001617C3B710.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/4C0D4911-315F-DD11-A20D-001617DBD332.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/4C32E425-2A5F-DD11-B819-000423D6C8EE.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/50881CBB-2A5F-DD11-92C6-001617C3B6E8.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/52B83F75-2A5F-DD11-AD56-001617C3B6CC.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/544DC99A-2A5F-DD11-9160-001617C3B6E2.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/62F7698D-2A5F-DD11-907A-001617C3B6DC.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/7C8A2791-2A5F-DD11-814D-001617DBCF6A.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/7EDA5005-315F-DD11-8019-001617C3B706.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/8A91E518-2A5F-DD11-B49A-000423D6B42C.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/8CC497AE-2A5F-DD11-AE43-000423DD2F34.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/9A469FA8-2A5F-DD11-9909-001617C3B6FE.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/9A5BE3A4-2A5F-DD11-A61B-001617DF785A.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/9AC2141C-2A5F-DD11-ADF5-000423D6A6F4.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/9CCFA319-2A5F-DD11-B0AA-000423D94700.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/A0F6C41D-2A5F-DD11-8685-000423D6BA18.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/B0159DAC-2A5F-DD11-98A8-001617E30D00.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/B05C32FC-305F-DD11-A957-001617C3B70E.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/C6ADD999-2A5F-DD11-AF9F-0016177CA7A0.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/C8AEE585-2A5F-DD11-BB37-001617C3B77C.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/CC5178C4-2A5F-DD11-BCE6-001617E30F4C.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/CE9FE020-2A5F-DD11-9846-000423D6CA72.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/D24BFA7E-2A5F-DD11-8F79-001617C3B70E.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/D62761FA-305F-DD11-A108-0016177CA778.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/DA0DDFB6-2A5F-DD11-987A-001617DBD5B2.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/E64386FE-305F-DD11-BA68-0019DB29C614.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/E6BC0D37-2A5F-DD11-9ACB-000423D6B444.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/F251D794-2A5F-DD11-BA5D-00161757BF42.root'
),
secondaryFileNames = cms.untracked.vstring (
)
)
process.RECO = cms.OutputModule("PoolOutputModule",
process.FEVTSIMEventContent,
fileName = cms.untracked.string('file:isoTest.root')
)
process.p1 = cms.Path(process.muIsolation)
process.outpath = cms.EndPath(process.RECO)
process.RECO.outputCommands.append('drop *_*_*_*')
process.RECO.outputCommands.extend(process.RecoMuonRECO.outputCommands)
| 71.448598 | 147 | 0.780379 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,505 | 0.850883 |
9cce74ca65c4a70248a44251c279dedff70a0c3a | 220 | py | Python | app/egg/__init__.py | MultyXu/Islandr | 5f965638d0cda68e76f63b0625b2411029e90c2d | [
"MIT"
] | 2 | 2019-06-05T01:36:53.000Z | 2019-06-05T01:37:12.000Z | app/egg/__init__.py | dingding0606/Islandr | dd2277a8a7ceb002af3b28045133797fa9ffed9d | [
"MIT"
] | null | null | null | app/egg/__init__.py | dingding0606/Islandr | dd2277a8a7ceb002af3b28045133797fa9ffed9d | [
"MIT"
] | 1 | 2019-08-28T20:49:22.000Z | 2019-08-28T20:49:22.000Z | '''
@Description: Easter Egg
@Author: Tianyi Lu
@Date: 2019-08-10 10:30:29
@LastEditors: Tianyi Lu
@LastEditTime: 2019-08-10 10:36:24
'''
from flask import Blueprint
egg = Blueprint('egg', __name__)
from . import views | 18.333333 | 34 | 0.722727 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 142 | 0.645455 |
9ccf491bea316d314b0ea1d3246d4253a60ec8dc | 1,830 | py | Python | tests/backends/test_sqlalchemy_backend.py | kuc2477/news | 215f87e6ce1a7fc99175596e6fd5b4b50a3179c6 | [
"MIT"
] | 2 | 2016-01-21T04:16:57.000Z | 2016-04-27T04:46:13.000Z | tests/backends/test_sqlalchemy_backend.py | kuc2477/news | 215f87e6ce1a7fc99175596e6fd5b4b50a3179c6 | [
"MIT"
] | null | null | null | tests/backends/test_sqlalchemy_backend.py | kuc2477/news | 215f87e6ce1a7fc99175596e6fd5b4b50a3179c6 | [
"MIT"
] | null | null | null | def test_get_news(sa_session, sa_backend, sa_child_news):
assert(sa_child_news == sa_backend.get_news(sa_child_news.id))
assert(sa_backend.get_news(None) is None)
def test_get_news_list(sa_session, sa_backend, sa_child_news):
assert(sa_child_news in sa_backend.get_news_list())
assert(sa_child_news in sa_backend.get_news_list(
owner=sa_child_news.owner))
assert(sa_child_news in sa_backend.get_news_list(
root_url=sa_child_news.root.url))
assert(sa_child_news in sa_backend.get_news_list(
owner=sa_child_news.owner,
root_url=sa_child_news.root.url
))
def test_news_exists(sa_session, sa_backend, sa_child_news):
assert(sa_backend.news_exists(sa_child_news.id))
sa_backend.delete_news(sa_child_news)
assert(not sa_backend.news_exists(sa_child_news.id))
def test_save_news(sa_session, sa_backend,
sa_schedule, sa_news_model, url_root, content_root):
news = sa_news_model.create_instance(
schedule=sa_schedule,
url=url_root,
title='title',
content=content_root,
summary='summary'
)
assert(news not in sa_backend.get_news_list(sa_schedule.owner, url_root))
sa_backend.save_news(news)
assert(news in sa_backend.get_news_list(sa_schedule.owner, url_root))
def test_delete_news(sa_session, sa_backend, sa_child_news):
assert(sa_backend.news_exists(sa_child_news.id))
sa_backend.delete_news(sa_child_news)
assert(not sa_backend.news_exists(sa_child_news.id))
def test_get_schedule(sa_session, sa_backend, sa_schedule):
assert(sa_schedule == sa_backend.get_schedule(sa_schedule.id))
def test_get_schedules(sa_session, sa_backend, sa_schedule,
sa_owner, url_root):
assert(sa_schedule in sa_backend.get_schedules(sa_owner, url_root))
| 35.882353 | 77 | 0.749727 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 16 | 0.008743 |
9ccf4e8bda92837d4e656009c423fc5151a4c436 | 1,024 | py | Python | database/repositories/ModelBlobRepository.py | roblkenn/EECS441-Backend | 96fa0ca6775c13426dacdcbe1da02a8fa6e887ed | [
"RSA-MD"
] | null | null | null | database/repositories/ModelBlobRepository.py | roblkenn/EECS441-Backend | 96fa0ca6775c13426dacdcbe1da02a8fa6e887ed | [
"RSA-MD"
] | null | null | null | database/repositories/ModelBlobRepository.py | roblkenn/EECS441-Backend | 96fa0ca6775c13426dacdcbe1da02a8fa6e887ed | [
"RSA-MD"
] | null | null | null | from azure.storage.blob.baseblobservice import BaseBlobService
from azure.storage.blob.blockblobservice import BlockBlobService
from database.models.Blob import Blob
class ModelBlobRepository:
def __init__(self):
self.blockBlobService = BlockBlobService(account_name='stylesblobstorage', account_key='B4qA7PlPtEk+y/zDsn16+0KXjlLJpcmnZb9C/CLDTbU9PzI294Ithc6j3y+jBz6j4KKAe3LcqadtkKe24JhxIw==')
self.blobService = BaseBlobService(account_name='stylesblobstorage', account_key='B4qA7PlPtEk+y/zDsn16+0KXjlLJpcmnZb9C/CLDTbU9PzI294Ithc6j3y+jBz6j4KKAe3LcqadtkKe24JhxIw==')
self.containerName = 'models'
def create(self, modelBytes, userId):
self.blockBlobService.create_blob_from_text(self.containerName, userId, modelBytes)
return userId
def read(self, blobName):
blob = self.blobService.get_blob_to_text(self.containerName, blobName)
return Blob(blob)
def delete(self, blobName):
return self.blobService.delete_blob(self.containerName, blobName)
| 48.761905 | 186 | 0.783203 | 856 | 0.835938 | 0 | 0 | 0 | 0 | 0 | 0 | 226 | 0.220703 |
9cd03be08a58b5db01aa5e339f8033f2b9c3bad6 | 1,229 | py | Python | day03/puzzle2b.py | techrabbit58/AdventOfCode2020 | 9e0f4a633f50f5dd48186d38fe58ad8d40dff387 | [
"Unlicense"
] | 1 | 2020-12-15T22:27:38.000Z | 2020-12-15T22:27:38.000Z | day03/puzzle2b.py | techrabbit58/AdventOfCode2020 | 9e0f4a633f50f5dd48186d38fe58ad8d40dff387 | [
"Unlicense"
] | null | null | null | day03/puzzle2b.py | techrabbit58/AdventOfCode2020 | 9e0f4a633f50f5dd48186d38fe58ad8d40dff387 | [
"Unlicense"
] | null | null | null | """
Advent Of Code 2020. Day 03. Puzzle 2.
2nd approach, just to see if this may be faster.
It turns out, the first approach had been at least 3 times faster.
This second approach is slower. Hummm!
"""
import time
input_file = 'day03.txt'
TREE = '#'
SLOPES = [(1, 1), (3, 1), (5, 1), (7, 1), (1, 2)]
def parse(puzzle_input):
positions = set()
height, width = 0, 0
with open(puzzle_input) as f:
for y, line in enumerate(f):
height, width = y + 1, len(line)
for x, ch in enumerate(line):
if ch == TREE:
positions.add((x, y))
return positions, height, width
def trajectory(forest, height, width, slope):
count, x = 0, 0
dx, dy = slope
for y in range(0, height, dy):
if (x, y) in forest:
count += 1
x = (x + dx) % width
return count
def solution(forest, height, tile_width):
hit_count = 1
for slope in SLOPES:
hit_count *= trajectory(forest, height, tile_width, slope)
return hit_count
if __name__ == '__main__':
forest, height, tile_width = parse(input_file)
start = time.perf_counter()
print(solution(forest, height, tile_width), time.perf_counter() - start)
| 25.081633 | 76 | 0.598047 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 225 | 0.183076 |
9cd0d98d0b63b4259d4d0c7661566ba636ce4420 | 20,644 | py | Python | src/mvdef/import_util.py | lmmx/mvdef | c1e5002fbbcb3382025f2c6410767fee3ffe2500 | [
"MIT"
] | null | null | null | src/mvdef/import_util.py | lmmx/mvdef | c1e5002fbbcb3382025f2c6410767fee3ffe2500 | [
"MIT"
] | 42 | 2019-12-31T12:22:10.000Z | 2021-07-11T10:57:06.000Z | src/mvdef/import_util.py | lmmx/mvdef | c1e5002fbbcb3382025f2c6410767fee3ffe2500 | [
"MIT"
] | null | null | null | import ast
from ast import Import as IType, ImportFrom as IFType
from astor import to_source
from asttokens import ASTTokens
from .colours import colour_str as colour
from os import linesep as nl
from sys import stderr
__all__ = [
"get_import_stmt_str",
"multilinify_import_stmt_str",
"colour_imp_stmt",
"get_imported_name_sources",
"get_module_srcs",
"count_imported_names",
"annotate_imports",
"imp_def_subsets",
]
def get_import_stmt_str(alias_list, import_src=None, max_linechars=88):
"""
Construct an import statement by building an AST, convert it to source using
astor.to_source, and then return the string.
alias_list: List of strings to use as ast.alias `name`, and optionally also
`asname entries. If only one name is listed per item in the
alias_list, the `asname` will be instantiated as None.
import_src: If provided, the import statement will be use the
`ast.ImportFrom` class, otherwise it will use `ast.Import`.
Relative imports are permitted for "import from" statements
(such as `from ..foo import bar`) however absolute imports
(such as `from foo import bar`) are recommended in PEP8.
max_linechars: Maximum linewidth, beyond which the import statement string will
be multilined with `multilinify_import_stmt_str`.
"""
alias_obj_list = []
assert type(alias_list) is list, "alias_list must be a list"
for alias_pair in alias_list:
if type(alias_pair) is str:
alias_pair = [alias_pair]
assert len(alias_pair) > 0, "Cannot import using an empty string"
assert type(alias_pair[0]) is str, "Import alias name must be a string"
if len(alias_pair) < 2:
alias_pair.append(None)
al = ast.alias(*alias_pair[0:2])
alias_obj_list.append(al)
if import_src is None:
ast_imp_stmt = ast.Import(alias_obj_list)
else:
import_level = len(import_src) - len(import_src.lstrip("."))
import_src = import_src.lstrip(".")
ast_imp_stmt = ast.ImportFrom(import_src, alias_obj_list, level=import_level)
import_stmt_str = to_source(ast.Module([ast_imp_stmt]))
if len(import_stmt_str.rstrip(nl)) > max_linechars:
return multilinify_import_stmt_str(import_stmt_str)
else:
return import_stmt_str
def multilinify_import_stmt_str(import_stmt_str, indent_spaces=4, trailing_comma=True):
"""
Takes a single line import statement and turns it into a multiline string.
Will raise a `ValueError` if given a multiline string (a newline at the end
of the string is permitted).
This function is written in expectation of the output of `get_import_stmt_str`,
and is not intended to process all potential ways of writing an import statement.
import_stmt_str: String of Python code carrying out an import statement.
indent_spaces: Number of spaces to indent by in multiline format.
trailing_comma: Whether to add a trailing comma to the final alias in a
multiline list of import aliases (default: True)
"""
import_stmt_str = import_stmt_str.rstrip(nl)
n_nl = import_stmt_str.count(nl)
if n_nl > 0:
raise ValueError(f"{import_stmt_str} is not a single line string")
imp_ast = ast.parse(import_stmt_str)
assert type(imp_ast.body[0]) in [IType, IFType], "Not a valid import statement"
tko = ASTTokens(import_stmt_str)
first_tok = tko.tokens[0]
import_tok = tko.find_token(first_tok, tok_type=1, tok_str="import")
assert import_tok.type > 0, f"Unable to find import token in the given string"
imp_preamble_str = import_stmt_str[: import_tok.endpos]
post_import_tok = tko.tokens[import_tok.index + 1]
imp_names_str = import_stmt_str[post_import_tok.startpos :]
aliases = [(x.name, x.asname) for x in imp_ast.body[0].names]
seen_comma_tok = None
multiline_import_stmt_str = imp_preamble_str
multiline_import_stmt_str += " (" + nl
for al_i, (a_n, a_as) in enumerate(aliases):
is_final_alias = al_i + 1 == len(aliases)
if seen_comma_tok is None:
# Get start of alias by either full name or first part of .-separated name
al_n_tok = tko.find_token(import_tok, 1, tok_str=a_n.split(".")[0])
assert al_n_tok.type > 0, f"Unable to find the token for {a_n}"
else:
al_n_tok = tko.find_token(seen_comma_tok, 1, tok_str=a_n.split(".")[0])
assert al_n_tok.type > 0, f"Unable to find the token for {a_n}"
al_startpos = al_n_tok.startpos
if a_as is None:
if is_final_alias:
# There won't be a comma after this (it is the last import name token)
al_endpos = al_n_tok.endpos
else:
comma_tok = tko.find_token(al_n_tok, tok_type=53, tok_str=",")
if comma_tok.type == 0:
# Due to an error in asttokens, sometimes tok_type is given as 54
# although this should be an error (the failure tok_type is 0)
comma_tok = tko.find_token(al_n_tok, tok_type=54, tok_str=",")
assert comma_tok.type > 0, f"Unable to find comma token"
al_endpos = comma_tok.endpos
else:
al_as_tok = tko.find_token(al_n_tok, tok_type=1, tok_str=a_as)
assert al_as_tok.type > 0, f"Unable to find the token for {a_as}"
if is_final_alias:
# There won't be a comma after this (it's the last import asname token)
al_endpos = al_as_tok.endpos
else:
comma_tok = tko.find_token(al_as_tok, tok_type=53, tok_str=",")
if comma_tok.type == 0:
# Due to an error in asttokens, sometimes tok_type is given as 54
# although this should be an error (the failure tok_type is 0)
comma_tok = tko.find_token(al_n_tok, tok_type=54, tok_str=",")
assert comma_tok.type > 0, f"Unable to find comma token"
al_endpos = comma_tok.endpos
alias_chunk = import_stmt_str[al_startpos:al_endpos]
if is_final_alias:
if trailing_comma:
alias_chunk += ","
else:
seen_comma_tok = comma_tok
multiline_import_stmt_str += (" " * indent_spaces) + alias_chunk + nl
# Finally, verify that the end of the tokenised string was reached
assert al_endpos == tko.tokens[-1].endpos, "Did not tokenise to the end of string"
# No need to further slice the input string, return the final result
multiline_import_stmt_str += ")" + nl
return multiline_import_stmt_str
def colour_imp_stmt(imp_stmt, lines):
"""
Summary: get a string which when printed will show the separate parts of an
import statement in different colours (preamble in blue, alias names in red,
alias asnames in purple, the word "as" itself in yellow, commas between import
aliases in light green, and post-matter (a bracket) in light red.
For an import statement within an asttokens-annotated AST, which comes with
all subnodes annotated with first and last token start/end positional information,
access all the tokens corresponding to the import statement name(s) and asname(s).
Do this using a list of lines (i.e. a list of strings, each of which is a line),
the subset of which corresponding to the import statement `imp_stmt` are given
by its `first_token.start` and `last_token.end` attributes (in each case, the
attribute is a tuple of `(line, column)` numbers, and it is conventional to store
these as a 1-based index, so to cross-reference to a 0-based index of the list
of lines we decrement this value and store as `imp_startln` and `imp_endln`).
The subset of lines corresponding to `imp_stmt` is then assigned as `nodelines`,
and we join this into a single string as `nodestring`.
Then a new ASTTokens object, `tko`, can be made by parsing `nodestring`, on which
the `find_tokens` method provides access to each name/asname one at a time, when
matched to the name/asname string. These name/asname strings are available
within the `imp_stmt` object via its `names` attribute, which is a list of
`ast.alias` class instances, each of which has both a `name` and `asname` attribute
(the latter of which is `None` if no asname is given for the import name).
`find_tokens` returns a token with attribute `type` of value `1` for a name (1 is
the index of "NAME" in the `token.tok_name` dictionary), and `startpos`/`endpos`
attributes (integers which indicate the string offsets within `nodestring`).
These `startpos` integers are an efficient way to store this list of tokens
(the "NAME" tokens corresponding to import statement alias names and asnames),
and so even though it would be possible to store all tokens, I choose to simply
re-access them with the `tko.get_token_from_offset(startpos)` method.
At the moment, I only re-access these tokens to retrieve their `endpos` (end
position offset), which is also an integer and could also be stored easily
without much problem, however for the sake of clarity I prefer to re-access
the entire token and not have to construct an arbitrary data structure for
storing the start and end positions (which could get confusing).
Lastly, I construct a colourful string representation of the import statement
by using these start positions and re-retrieved end positions to pull out
and modify (using the `mvdef.colours`⠶`colour_str` function) the names and asnames
(names are coloured red, asnames are coloured purple), and use string slicing
to swap the ranges that the names and asnames were in in the original
`nodestring` for these colourful replacements.
The end result, `modified_nodestring` is then returned, which will then
display in colour on Linux and OSX (I don't think Windows supports ANSI codes,
so I made `colour_str` only apply on these platforms).
"""
assert "first_token" in imp_stmt.__dir__(), "Not an asttokens-annotated AST node"
assert type(imp_stmt) in [IType, IFType], "Not an import statement"
is_from = type(imp_stmt) is IFType
imp_startln = imp_stmt.first_token.start[0] - 1 # Use 0-based line index
imp_endln = imp_stmt.last_token.end[0] - 1 # to match list of lines
nodelines = lines[imp_startln : (imp_endln + 1)]
n_implines = len(nodelines)
nodestring = "".join(nodelines)
tko = ASTTokens(nodestring)
new_nodelines = [list() for _ in range(n_implines)]
# Subtract the import statement start position from the name or asname
# token start position to get the offset, then use the offset to extract
# a range of text from the re-parsed ASTTokens object for the nodestring
# corresponding to the import name or asname in question.
imp_startpos = imp_stmt.first_token.startpos
alias_starts = []
for alias in imp_stmt.names:
al_n, al_as = alias.name, alias.asname
# 1 is the key for "NAME" in Python's tokens.tok_name
s = [tko.find_token(tko.tokens[0], 1, tok_str=al_n).startpos]
if al_as is not None:
s.append(tko.find_token(tko.tokens[0], 1, tok_str=al_as).startpos)
alias_starts.append(s)
assert len(alias_starts) > 0, "An import statement cannot import no names!"
assert alias_starts[0][0] > 0, "An import statement cannot begin with a name!"
modified_nodestring = ""
# -------------------------------------------------------------------------
# Now set up colour definitions for the modified import statement string
name_colour, asname_colour = ["red", "purple"]
pre_colour, post_colour = ["light_blue", "light_red"]
as_string_colour = "yellow"
comma_colour = "light_green"
# -------------------------------------------------------------------------
first_import_name_startpos = alias_starts[0][0]
pre_str = nodestring[:first_import_name_startpos]
modified_nodestring += colour(pre_colour, pre_str)
seen_endpos = first_import_name_startpos
# (Could add a try/except here to verify colours are in colour dict if modifiable)
for al_i, alias_start_list in enumerate(alias_starts):
for al_j, al_start in enumerate(alias_start_list):
if seen_endpos < al_start:
# There is an intervening string, append it to modified_nodestring
intervening_str = nodestring[seen_endpos:al_start]
if al_j > 0:
# This is the word "as", which comes between a name and an asname
modified_nodestring += colour(as_string_colour, intervening_str)
else:
if al_i > 0:
assert "," in intervening_str, "Import aliases not comma-sep.?"
modified_nodestring += colour(comma_colour, intervening_str)
else:
modified_nodestring += intervening_str
# Possible here to distinguish between names and asnames by al_j if needed
is_asname = bool(al_j) # al_j is 0 if name, 1 if asname
name_tok = tko.get_token_from_offset(al_start)
assert name_tok.type > 0, f"No import name at {al_start} in {nodestring}"
al_endpos = name_tok.endpos
imp_name = nodestring[al_start:al_endpos]
cstr_colour = [name_colour, asname_colour][al_j]
cstr = colour(cstr_colour, imp_name)
modified_nodestring += cstr
seen_endpos = al_endpos
end_str = nodestring[seen_endpos:]
modified_nodestring += colour(post_colour, end_str)
return modified_nodestring
def get_imported_name_sources(trunk, report=True):
import_types = [IType, IFType]
imports = [n for n in trunk if type(n) in import_types]
imp_name_lines, imp_name_dict_list = annotate_imports(imports, report=report)
imported_names = {}
for ld in imp_name_lines:
ld_n = imp_name_lines.get(ld).get("n")
line_n = imp_name_dict_list[ld_n]
imp_src = next(x for x in [*line_n.items()] if x[1] == ld)
imported_names[ld] = imp_src
return imported_names
def get_module_srcs(imports):
ifr_srcs = []
for imp in imports:
if type(imp) == IFType:
ifr_srcs.append(imp.module)
else:
ifr_srcs.append(None)
return ifr_srcs
def count_imported_names(nodes):
"""
Return an integer for a single node (0 if not an import statement),
else return a list of integers for a list of AST nodes.
"""
if type(nodes) is not list:
if type(nodes) in [IType, IFType]:
return len(nodes.names)
else:
assert ast.stmt in type(nodes).mro(), f"{nodes} is not an AST statement"
return 0
counts = []
for node in nodes:
if type(node) in [IType, IFType]:
counts.append(len(node.names))
else:
assert ast.stmt in type(nodes).mro(), f"{nodes} is not an AST statement"
counts.append(0)
return counts
def annotate_imports(imports, report=True):
"""
Produce two data structures from the list of import statements (the statements
of type ast.Import and ast.ImportFrom in the source program's AST),
imp_name_linedict: A dictionary whose keys are all the names imported by the
program (i.e. the names which they are imported as: the
asname if one is used), and whose value for each name
is a dictionary of keys (`n`, `line`):
n: [0-based] index of the import statement importing
the name, over the set of all import statements.
line: [1-based] line number of the file of the import
statement importing the name. Note that it may
not correspond to the line number on which the
name is given, only to the import function call.
imp_name_dict_list: List of one dict per import statement, whose keys
are the full import path (with multi-part paths conjoined
by a period `.`) and the values of which are the names
that these import paths are imported as (either the asname
or else just the terminal part of the import path). The
dict preserves the per-line order of the imported
names.
"""
report_VERBOSE = False # Silencing debug print statements
# This dictionary gives the import line it's on for cross-ref with either
# the imports list above or the per-line imported_name_dict
imp_name_linedict = dict() # Stores all names and their asnames
imp_name_dict_list = [] # Stores one dict per AST import statement
for imp_no, imp_line in enumerate(imports):
imp_name_dict = dict()
for imported_names in imp_line.names:
name, asname = imported_names.name, imported_names.asname
if type(imp_line) == IFType:
assert imp_line.level == 0, "I've only encountered level 0 imports"
fullname = ".".join([imp_line.module, name])
else:
fullname = name
if asname is None:
imp_name_dict[fullname] = name
# Store both which import in the list of imports it's in
# and the line number it's found on in the parsed file
imp_name_linedict[name] = {"n": imp_no, "line": imp_line.lineno}
else:
imp_name_dict[fullname] = asname
imp_name_linedict[asname] = {"n": imp_no, "line": imp_line.lineno}
imp_name_dict_list.append(imp_name_dict)
# Ensure that they each got all the names
assert len(imp_name_dict_list) == len(imports)
assert sum([len(d) for d in imp_name_dict_list]) == len(imp_name_linedict)
if report_VERBOSE:
print("The import name line dict is:", file=stderr)
for ld in imp_name_linedict:
# print(f" {ld}: {imp_name_linedict.get(ld)}")
pass
print("The import name dict list is:", file=stderr)
for ln in imp_name_dict_list:
print(ln, file=stderr)
return imp_name_linedict, imp_name_dict_list
def imp_def_subsets(linkfile):
"""
Given the list of mvdef_names and nonmvdef_names, construct the subsets:
mv_imports: imported names used by the functions to move,
nonmv_imports: imported names used by the functions not to move,
mutual_imports: imported names used by both the functions to move and
the functions not to move
"""
# report = linkfile.report
report_VERBOSE = False # Silencing debug print statements
mvdef_dicts = linkfile.mvdef_names # rename to emphasise that these are dicts
mvdef_names = set().union(
*[list(mvdef_dicts[x]) for x in mvdef_dicts]
) # funcdef names
nonmvdef_dicts = linkfile.nonmvdef_names # (as for mvdef_dicts)
nonmvdef_names = set().union(*[list(nonmvdef_dicts[x]) for x in nonmvdef_dicts])
linkfile.mv_imports = mvdef_names - nonmvdef_names
linkfile.nonmv_imports = nonmvdef_names - mvdef_names
linkfile.mutual_imports = mvdef_names.intersection(nonmvdef_names)
assert linkfile.mv_imports.isdisjoint(
linkfile.nonmv_imports
), "mv/nonmv_imports intersect!"
assert linkfile.mv_imports.isdisjoint(
linkfile.mutual_imports
), "mv/mutual imports intersect!"
assert linkfile.nonmv_imports.isdisjoint(
linkfile.mutual_imports
), "nonmv/mutual imports intersect!"
if report_VERBOSE:
print(
f"mv_imports: {linkfile.mv_imports}",
f", nonmv_imports: {linkfile.nonmv_imports}",
f", mutual_imports: {linkfile.mutual_imports}",
sep="",
file=stderr,
)
all_defnames = set().union(*[mvdef_names, nonmvdef_names])
all_def_imports = set().union(
*[linkfile.mv_imports, linkfile.nonmv_imports, linkfile.mutual_imports]
)
assert sorted(all_defnames) == sorted(all_def_imports), "Defnames =/= import names"
return
| 50.722359 | 87 | 0.653749 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10,481 | 0.507653 |
9cd18766a6cefc07e6ae4aced26c08d612cd472e | 384 | py | Python | tests/bench/test_yahoo_nyse_VRS.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | 377 | 2016-10-13T20:52:44.000Z | 2022-03-29T18:04:14.000Z | tests/bench/test_yahoo_nyse_VRS.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | 160 | 2016-10-13T16:11:53.000Z | 2022-03-28T04:21:34.000Z | tests/bench/test_yahoo_nyse_VRS.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | 63 | 2017-03-09T14:51:18.000Z | 2022-03-27T20:52:57.000Z | import pyaf.Bench.TS_datasets as tsds
import pyaf.Bench.YahooStocks as ys
import warnings
symbol_lists = tsds.get_yahoo_symbol_lists();
y_keys = sorted(symbol_lists.keys())
print(y_keys)
k = "nysecomp"
tester = ys.cYahoo_Tester(tsds.load_yahoo_stock_prices(k) , "YAHOO_STOCKS_" + k);
with warnings.catch_warnings():
warnings.simplefilter("error")
tester.testSignals('VRS')
| 29.538462 | 81 | 0.770833 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 37 | 0.096354 |
9cd50d98408fbc789fcaead7d9f721fe9a6e105c | 1,376 | py | Python | fabric_utils/ci.py | selfpub-org/fabric-utils | ee5793d242a9525df8fca45a67f0042002f4a6ab | [
"BSD-3-Clause"
] | null | null | null | fabric_utils/ci.py | selfpub-org/fabric-utils | ee5793d242a9525df8fca45a67f0042002f4a6ab | [
"BSD-3-Clause"
] | 1 | 2018-05-31T13:41:38.000Z | 2018-05-31T13:41:38.000Z | fabric_utils/ci.py | selfpub-org/fabric-utils | ee5793d242a9525df8fca45a67f0042002f4a6ab | [
"BSD-3-Clause"
] | 1 | 2020-11-10T16:20:06.000Z | 2020-11-10T16:20:06.000Z | import os
from functools import wraps
from fabric.api import settings, warn
def teamcity(message_name, *params, **kwargs):
force = kwargs.get('force') or False
messages = {
'testSuiteStarted': "testSuiteStarted name='%s'",
'testSuiteFinished': "testSuiteFinished name='%s'",
'buildStatus': "buildStatus text='%s'",
'testStarted': "testStarted name='%s'",
'testFailed': "testFailed name='%s' message='%s'",
'testFinished': "testFinished name='%s'",
'setParameter': "setParameter name='%s' value='%s'",
}
is_teamcity_mode = os.environ.get('TEAMCITY_VERSION') or force
if not is_teamcity_mode:
return
message_tpl = messages.get(message_name)
if not message_tpl:
warn(f'teamcity message {message_name} not supported')
return
message = message_tpl % params
print(f"##teamcity[{message}]") # noqa
def with_teamcity(task):
@wraps(task)
def wrapper(*args, **kwargs):
teamcity('testStarted', task.__name__)
try:
with settings(abort_exception=Exception):
return task(*args, **kwargs)
except Exception as exc:
teamcity('testFailed', task.__name__, f'Exception: {type(exc).__name__}')
raise
finally:
teamcity('testFinished', task.__name__)
return wrapper
| 30.577778 | 85 | 0.62282 | 0 | 0 | 0 | 0 | 411 | 0.298692 | 0 | 0 | 476 | 0.34593 |
9cd79b368d2d4cdb843e42b4dcffedf36436d6a1 | 2,331 | py | Python | sandy/sections/mf35.py | AitorBengoechea/sandy | c55e4ee7b593389947a52e1adb85282dcb73dda3 | [
"MIT"
] | null | null | null | sandy/sections/mf35.py | AitorBengoechea/sandy | c55e4ee7b593389947a52e1adb85282dcb73dda3 | [
"MIT"
] | null | null | null | sandy/sections/mf35.py | AitorBengoechea/sandy | c55e4ee7b593389947a52e1adb85282dcb73dda3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
This module contains a single public function:
* `read_mf35`
Function `read_mf35` reads a MF35/MT section from a string and produces a
content object with a dictionary-like structure.
The content object can be accessed using most of the keywords specified in
the ENDF6 manual for this specific MF section.
"""
import sandy
__author__ = "Aitor Bengoechea"
def read_mf35(tape, mat, mt):
"""
Parse MAT/MF=35/MT section from `sandy.Endf6` object and return
structured content in nested dcitionaries.
Parameters
----------
tape : `sandy.Endf6`
endf6 object containing requested section
mat : `int`
MAT number
mt : `int`
MT number
Returns
-------
`dict`
Content of the ENDF-6 tape structured as nested `dict`.
Examples
--------
>>> import numpy as np
>>> tape = sandy.get_endf6_file("jeff_33", 'xs', 922380)
>>> out = read_mf35(tape, mat=9237, mt=18)["SUB"][1]["FKK"][0:15]
>>> np.round(out, decimals=35)
array([0.00000e+00, 1.00000e-35, 1.00000e-35, 3.00000e-35, 2.00000e-34,
2.80000e-34, 9.60000e-34, 6.17000e-33, 8.96000e-33, 3.04700e-32,
1.95070e-31, 2.83330e-31, 9.63620e-31, 6.16871e-30, 8.95957e-30])
"""
mf = 35
df = tape._get_section_df(mat, mf, mt)
out = {"MAT": mat,
"MF": mf,
"MT": mt}
i = 0
C, i = sandy.read_cont(df, i)
out.update({"ZA": C.C1,
"AWR": C.C2,
"NK": C.N1, # Number of subsections
"SUB": {}})
for k in range(out["NK"]):
L, i = sandy.read_list(df, i)
D = {"ELO": L.C1, # Lowest incident neutron energy for this subsection
"EHI": L.C2, # Highest incident neutron energy for this subsection
"LS": L.L1, # Flago to indicate if the covariance matrix is symmetric
"LB": L.L2, # Flag to indicate if the covariance matrix is given in absolute or relative terms
"NE": L.N2, # Number of entries in the array containing outgoing particle energies
"EK": L.B[:L.N2], # Array containing outgoing particle energies
"FKK": L.B[L.N2:]} # Covariance matrix ordered by rows and starting from the diagonal term
out["SUB"].update({k+1: D})
return out
| 34.279412 | 108 | 0.592879 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,733 | 0.743458 |
9cd80afed190fb3c356c200fcfd34546c3bed147 | 29,748 | py | Python | src/pycropml/transpiler/antlr_py/parse.py | brichet/PyCrop2ML | 7177996f72a8d95fdbabb772a16f1fd87b1d033e | [
"MIT"
] | 5 | 2020-06-21T18:58:04.000Z | 2022-01-29T21:32:28.000Z | src/pycropml/transpiler/antlr_py/parse.py | brichet/PyCrop2ML | 7177996f72a8d95fdbabb772a16f1fd87b1d033e | [
"MIT"
] | 27 | 2018-12-04T15:35:44.000Z | 2022-03-11T08:25:03.000Z | src/pycropml/transpiler/antlr_py/parse.py | brichet/PyCrop2ML | 7177996f72a8d95fdbabb772a16f1fd87b1d033e | [
"MIT"
] | 7 | 2019-04-20T02:25:22.000Z | 2021-11-04T07:52:35.000Z | import pycropml.transpiler.antlr_py.grammars
from pycropml.transpiler.antlr_py.grammars.CSharpLexer import CSharpLexer
from pycropml.transpiler.antlr_py.grammars.CSharpParser import CSharpParser
from pycropml.transpiler.antlr_py.grammars.Fortran90Lexer import Fortran90Lexer
from pycropml.transpiler.antlr_py.grammars.Fortran90Parser import Fortran90Parser
from pycropml.transpiler.antlr_py.csharp import csharp_generate_tree
from pycropml.transpiler.antlr_py.fortran import fortran_generate_tree
from antlr4 import *
import warnings
import inspect
from typing import Dict, Optional, List, Union, Type, Any, Callable
from functools import reduce
from collections import OrderedDict, namedtuple
from ast import AST, NodeTransformer
from antlr4.Token import CommonToken
from antlr4 import CommonTokenStream, ParseTreeVisitor, ParserRuleContext, RuleContext
from antlr4.tree.Tree import ErrorNode, TerminalNodeImpl, ParseTree
from antlr4.error.ErrorListener import ErrorListener, ConsoleErrorListener
from operator import methodcaller
from antlr4 import InputStream
languages = ['cs',"bioma", 'f90', 'dssat']
gen = {'cs':"csharp","bioma":"csharp", 'f90':"fortran", 'dssat':"fortran"}
NAMES = {'cs':'CSharp','sirius':'CSharp',"bioma":"CSharp", 'f90':'Fortran90', 'dssat':'Fortran90'}
def langLexerParser(ant):
generator = {
format: getattr(
getattr(
pycropml.transpiler.antlr_py.grammars,
'%s%s' % (NAMES[format], ant)),
'%s%s' % (NAMES[format], ant))
for format in languages
}
return generator
LexersGenerators = langLexerParser("Lexer")
ParsersGenerators = langLexerParser("Parser")
genTree= {
format: getattr(
getattr(
pycropml.transpiler.antlr_py,
'%s' % (gen[format])),
'%s_generate_tree' % (gen[format]))
for format in languages
}
def parsef(code, language,
start="compilation_unit",
strict = "False",
transform: Union[str, Callable] = None,
error_listener: ErrorListener = None,
):
input_stream = InputStream(code) #encoding="utf-8"
lexer = LexersGenerators[language](input_stream)
lexer.removeErrorListeners()
lexer.addErrorListener(LexerErrorListener())
stream = CommonTokenStream(lexer)
parser = ParsersGenerators[language](stream)
#tree = parser.compilation_unit()
tree = genTree[language].generate(parser)
parser.buildParseTrees = True # default
return tree
"""
from antlr-ast
It allows you to use ANTLR grammars and use the parser output to generate an abstract syntax tree (AST).
https://github.com/datacamp/antlr-ast/blob/master/README.md
"""
class CaseTransformInputStream(InputStream):
"""Support case insensitive languages
https://github.com/antlr/antlr4/blob/master/doc/case-insensitive-lexing.md#custom-character-streams-approach
"""
UPPER = "upper"
LOWER = "lower"
def __init__(self, *args, transform=None, **kwargs):
if transform is None:
self.transform = lambda x: x
elif transform == self.UPPER:
self.transform = methodcaller("upper")
elif transform == self.LOWER:
self.transform = methodcaller("lower")
elif callable(transform):
self.transform = transform
else:
raise ValueError("Invalid transform")
super().__init__(*args, **kwargs)
def _loadString(self):
self._index = 0
self.data = [ord(self.transform(c)) for c in self.strdata]
self._size = len(self.data)
def __repr__(self):
return "<{} {}>".format(self.__class__.__name__, self.transform)
def dump_node(node, node_class=AST):
if isinstance(node, node_class):
fields = OrderedDict()
for name in node._fields:
attr = getattr(node, name, None)
if attr is not None:
fields[name] = dump_node(attr, node_class=node_class)
return {"type": node.__class__.__name__, "data": fields}
elif isinstance(node, list):
return [dump_node(x, node_class=node_class) for x in node]
else:
return node
FieldSpec = namedtuple("FieldSpec", ["name", "origin"])
def parse_field_spec(spec: str) -> FieldSpec:
# parse mapping for = and . # old: and indices [] -----
name, *origin = [part.strip() for part in spec.split("=")]
origin = name if not origin else origin[0]
origin = origin.split(".")
return FieldSpec(name, origin)
class AstNodeMeta(type):
@property
def _fields(cls):
od = OrderedDict([(parse_field_spec(el).name, None) for el in cls._fields_spec])
return tuple(od)
# Speaker class ---------------------------------------------------------------
class Speaker:
def __init__(self, **cfg):
"""Initialize speaker instance, for a set of AST nodes.
Arguments:
nodes: dictionary of node names, and their human friendly names.
Each entry for a node may also be a dictionary containing
name: human friendly name, fields: a dictionary to override
the field names for that node.
fields: dictionary of human friendly field names, used as a default
for each node.
"""
self.node_names = cfg["nodes"]
self.field_names = cfg.get("fields", {})
def describe(self, node, fmt="{node_name}", field=None, **kwargs):
cls_name = node.__class__.__name__
def_field_name = (
self.field_names.get(field) or field.replace("_", " ") if field else ""
)
node_cfg = self.node_names.get(cls_name, cls_name)
node_name, field_names = self.get_info(node_cfg)
d = {
"node": node,
"field_name": field_names.get(field, def_field_name),
"node_name": node_name.format(node=node),
}
return fmt.format(**d, **kwargs)
@staticmethod
def get_info(node_cfg):
"""Return a tuple with the verbal name of a node, and a dict of field names."""
node_cfg = node_cfg if isinstance(node_cfg, dict) else {"name": node_cfg}
return node_cfg.get("name"), node_cfg.get("fields", {})
# Error Listener ------------------------------------------------------------------
# from antlr4.error.Errors import RecognitionException
class AntlrException(Exception):
def __init__(self, msg, orig):
self.msg, self.orig = msg, orig
class StrictErrorListener(ErrorListener):
# The recognizer will be the parser instance
def syntaxError(self, recognizer, badSymbol, line, col, msg, e):
msg = "line {line}:{col} {msg}".format(
badSymbol=badSymbol, line=line, col=col, msg=msg
)
raise AntlrException(msg, e)
def reportAmbiguity(
self, recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs
):
return
# raise Exception("TODO")
def reportAttemptingFullContext(
self, recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs
):
return
# raise Exception("TODO")
def reportContextSensitivity(
self, recognizer, dfa, startIndex, stopIndex, prediction, configs
):
return
# raise Exception("TODO")
class LexerErrorListener(ConsoleErrorListener):
def syntaxError(self, recognizer, offendingSymbol, line, column, msg, e):
if isinstance(e.input, CaseTransformInputStream):
msg = msg + " " + repr(e.input)
super().syntaxError(recognizer, offendingSymbol, line, column, msg, e)
# Parse Tree Visitor ----------------------------------------------------------
# TODO: visitor inheritance not really needed, but indicates compatibility
# TODO: make general node (Terminal) accessible in class property (.subclasses)?
IndexReferences = Dict[str, Union[int, List[int]]]
class BaseNode(AST):
"""AST is subclassed so we can use Python ast module visiting and walking on the custom AST"""
def __init__(
self,
children: list,
field_references: IndexReferences,
label_references: IndexReferences,
ctx: Optional[ParserRuleContext] = None,
position: Optional[dict] = None,
text: Optional[str] = None,
):
self.children = children
self._field_references = field_references
self.children_by_field = materialize(self._field_references, self.children)
self._label_references = label_references
self.children_by_label = materialize(self._label_references, self.children)
self._ctx = ctx
self.position = position
self.text = text
_fields = ()
# whether to descend for selection (greater descends into lower)
_priority = 2
# getattr: return None or raise for nonexistent attr
# in Transformer conditionals:
# - getattr(obj, attr, None) works with both
# - hasattr(obj, attr) if strict
# - obj.attr if not strict
_strict = False
@classmethod
def create(
cls,
ctx: ParserRuleContext,
children: Optional[list] = None,
registry: Optional["BaseNodeRegistry"] = None,
) -> "BaseNode":
if registry is None:
registry = BaseNodeRegistry()
if children is None:
children = ctx.children
field_names = get_field_names(ctx)
children_by_field = get_field_references(ctx, field_names)
label_names = get_label_names(ctx)
children_by_label = get_field_references(ctx, label_names)
cls_name = type(ctx).__name__.split("Context")[0]
subclass = registry.get_cls(cls_name, tuple(field_names))
return subclass(children, children_by_field, children_by_label, ctx)
@classmethod
def create_cls(cls, cls_name: str, field_names: tuple) -> Type["BaseNode"]:
return type(cls_name, (cls,), {"_fields": field_names})
def __getattr__(self, name):
try:
result = self.children_by_label.get(name) or self.children_by_field[name]
except KeyError:
if self._strict:
raise AttributeError(
"{}.{} is invalid.".format(self.__class__.__name__, name)
)
else:
result = None
return result
@classmethod
def combine(cls, *fields: "BaseNode") -> List["BaseNode"]:
"""Combine fields
Creates a list field from other fields
Filters None and combines other elements in a flat list
Use in transformer methods.
"""
result = reduce(cls.extend_node_list, fields, [])
return result
@staticmethod
def extend_node_list(
acc: List["BaseNode"], new: Union[List["BaseNode"], "BaseNode"]
) -> List["BaseNode"]:
"""Extend accumulator with Node(s) from new"""
if new is None:
new = []
elif not isinstance(new, list):
new = [new]
return acc + new
def get_text(self, full_text: str = None) -> Optional[str]:
# TODO implement as __str__?
# + easy to combine with str/Terminal
# + use Python instead of custom interface
# (-) very different from repr / json
text = None
if isinstance(self._ctx, (TerminalNodeImpl, RuleContext)):
if full_text is None:
text = self._ctx.getText()
elif getattr(self._ctx, "start", None) and getattr(self._ctx, "stop", None):
text = full_text[self._ctx.start.start : self._ctx.stop.stop + 1]
elif (
getattr(self._ctx, "symbol", None)
and getattr(self._ctx.symbol, "start", None)
and getattr(self._ctx.symbol, "stop", None)
):
text = full_text[self._ctx.symbol.start : self._ctx.symbol.stop + 1]
if text is None and self.text:
text = self.text
return text
def get_position(self) -> Optional[Dict[str, int]]:
position = None
ctx = self._ctx
if ctx is not None:
if isinstance(ctx, TerminalNodeImpl):
position = {
"line_start": ctx.symbol.line,
"column_start": ctx.symbol.column,
"line_end": ctx.symbol.line,
"column_end": ctx.symbol.column
+ (ctx.symbol.stop - ctx.symbol.start),
}
elif getattr(ctx, "start", None) and getattr(ctx, "stop", None):
position = {
"line_start": ctx.start.line,
"column_start": ctx.start.column,
"line_end": ctx.stop.line,
"column_end": ctx.stop.column + (ctx.stop.stop - ctx.stop.start),
}
return position or self.position
def __repr__(self):
return str({**self.children_by_field, **self.children_by_label})
# TODO:
AstNode = BaseNode
class Terminal(BaseNode):
"""This is a thin node wrapper for a string.
The node is transparent when not in debug mode.
In debug mode, it keeps the link to the corresponding ANTLR node.
"""
_fields = tuple(["value"])
DEBUG = True
DEBUG_INSTANCES = []
def __new__(cls, *args, **kwargs):
instance = super().__new__(cls, *args, **kwargs)
if cls.DEBUG:
cls.DEBUG_INSTANCES.append(instance)
return instance
else:
return args[0][0]
@classmethod
def from_text(cls, text: str, ctx: Optional[ParserRuleContext] = None):
return cls([text], {"value": 0}, {}, ctx)
def __eq__(self, other):
return self.value == other
def __str__(self):
# currently just used for better formatting in debugger
return self.value
def __repr__(self):
return "'{}'".format(self.value)
class AliasNode(BaseNode, metaclass=AstNodeMeta):
# TODO: look at AstNode methods
# defines class properties
# - as a property name to copy from ANTLR nodes
# - as a property name defined in terms of (nested) ANTLR node properties
# the field will be set to the first definition that is not undefined
_fields_spec = []
_fields = AstNodeMeta._fields
# Defines which ANTLR nodes to convert to this node. Elements can be:
# - a string: uses AstNode._from_fields as visitor
# - a tuple ('node_name', 'ast_node_class_method_name'): uses ast_node_class_method_name as visitor
# subclasses use _bind_to_visitor to create visit methods for the nodes in _rules on the ParseTreeVisitor
# using this information
_rules = []
_priority = 1
_strict = True
def __init__(self, node: BaseNode, fields: Optional[Dict[str, Any]] = None):
# TODO: keep reference to node?
# TODO: **fields? (easier notation, but hard to name future arguments
super().__init__(
node.children, node._field_references, node._label_references, node._ctx
)
fields = fields or {}
for field, value in fields.items():
if field not in self._fields:
warnings.warn("Key not in fields: {}".format(field))
setattr(self, field, value)
@classmethod
def from_spec(cls, node: BaseNode):
# TODO: no fields_spec argument as before
field_dict = {}
for field_spec in cls._fields_spec:
name, path = parse_field_spec(field_spec)
# _fields_spec can contain field multiple times
# e.g. x=a and x=b
if field_dict.get(name):
# or / elif behaviour
continue
# get node -----
field_dict[name] = cls.get_path(node, path)
return cls(node, field_dict)
@classmethod
def get_path(cls, node: BaseNode, path: List[str]):
# TODO: can be defined on FieldNode too
result = node
for i in range(len(path)):
result = getattr(result, path[i], None)
if result is None:
break
return result
@classmethod
def bind_to_transformer(
cls,
transformer_cls: Type["BaseNodeTransformer"],
default_transform_method: str = "from_spec",
):
for rule in cls._rules:
if isinstance(rule, str):
cls_method = default_transform_method
else:
rule, cls_method = rule[:2]
transformer_method = cls.get_transformer(cls_method)
bind_to_transformer(transformer_cls, rule, transformer_method)
@classmethod
def get_transformer(cls, method_name: str):
"""Get method to bind to visitor"""
transform_function = getattr(cls, method_name)
assert callable(transform_function)
def transformer_method(self, node):
kwargs = {}
if inspect.signature(transform_function).parameters.get("helper"):
kwargs["helper"] = self.helper
return transform_function(node, **kwargs)
return transformer_method
class BaseNodeRegistry:
def __init__(self):
self.dynamic_node_classes = {}
def get_cls(self, cls_name: str, field_names: tuple) -> Type[BaseNode]:
""""""
if cls_name not in self.dynamic_node_classes:
self.dynamic_node_classes[cls_name] = BaseNode.create_cls(
cls_name, field_names
)
else:
existing_cls = self.dynamic_node_classes[cls_name]
all_fields = tuple(set(existing_cls._fields) | set(field_names))
if len(all_fields) > len(existing_cls._fields):
existing_cls._fields = all_fields
return self.dynamic_node_classes[cls_name]
def isinstance(self, instance: BaseNode, class_name: str) -> bool:
"""Check if a BaseNode is an instance of a registered dynamic class"""
if isinstance(instance, BaseNode):
klass = self.dynamic_node_classes.get(class_name, None)
if klass:
return isinstance(instance, klass)
# Not an instance of a class in the registry
return False
else:
raise TypeError("This function can only be used for BaseNode objects")
# TODO: test: if 'visit' in method, it has to be as 'visit_'
class BaseNodeTransformer(NodeTransformer):
def __init__(self, registry: BaseNodeRegistry):
self.helper = TransformerHelper(registry)
def visit(self, node: BaseNode):
# TODO: I think transform_ + node.__class__.__name__ would be better/clearer then
# as the node methods don't need to do any visiting (which is completely done by visit and generic_visit)
method = "visit_" + type(node).__name__
transformer = getattr(self, method, None)
if transformer is None:
return self.generic_visit(node)
else:
alias = transformer(node)
if isinstance(alias, AliasNode) or alias == node:
# this prevents infinite recursion and visiting
# AliasNodes with a name that is also the name of a BaseNode
if isinstance(alias, BaseNode):
self.generic_visit(alias)
else:
# visit BaseNode (e.g. result of Transformer method)
if isinstance(alias, list):
# Transformer method can return array instead of node
alias = [
self.visit(el) if isinstance(el, BaseNode) else el
for el in alias
] # TODO: test
elif isinstance(alias, BaseNode):
alias = self.visit(alias)
return alias
def visit_Terminal(self, terminal: Terminal) -> Terminal:
""" Handle Terminal the same as other non-node types"""
return terminal
@classmethod
def bind_alias_nodes(cls, alias_classes: List[Type[AliasNode]]):
for item in alias_classes:
if getattr(item, "_rules", None) is not None:
item.bind_to_transformer(cls)
def bind_to_transformer(
transformer_cls: Type[BaseNodeTransformer],
rule_name: str,
transformer_method: Callable,
):
"""Assign AST node class constructors to parse tree visitors."""
setattr(transformer_cls, get_transformer_method_name(rule_name), transformer_method)
def get_transformer_method_name(rule_name: str) -> str:
return "visit_{}".format(rule_name[0].upper() + rule_name[1:])
class TransformerHelper:
def __init__(self, registry: BaseNodeRegistry):
self.registry = registry
def isinstance(self, *args):
return self.registry.isinstance(*args)
def get_alias_nodes(items) -> List[Type[AstNode]]:
return list(
filter(
lambda item: inspect.isclass(item) and issubclass(item, AliasNode), items
)
)
def simplify_tree(tree, unpack_lists=True, in_list=False):
"""Recursively unpack single-item lists and objects where fields and labels only reference a single child
:param tree: the tree to simplify (mutating!)
:param unpack_lists: whether single-item lists should be replaced by that item
:param in_list: this is used to prevent unpacking a node in a list as AST visit can't handle nested lists
"""
# TODO: copy (or (de)serialize)? outside this function?
if isinstance(tree, BaseNode) and not isinstance(tree, Terminal):
used_fields = [field for field in tree._fields if getattr(tree, field, False)]
if len(used_fields) == 1:
result = getattr(tree, used_fields[0])
else:
result = None
if (
len(used_fields) != 1
or isinstance(tree, AliasNode)
or (in_list and isinstance(result, list))
):
result = tree
for field in tree._fields:
old_value = getattr(tree, field, None)
if old_value:
setattr(
result,
field,
simplify_tree(old_value, unpack_lists=unpack_lists),
)
return result
assert result is not None
elif isinstance(tree, list) and len(tree) == 1 and unpack_lists:
result = tree[0]
else:
if isinstance(tree, list):
result = [
simplify_tree(el, unpack_lists=unpack_lists, in_list=True)
for el in tree
]
else:
result = tree
return result
return simplify_tree(result, unpack_lists=unpack_lists)
class BaseAstVisitor(ParseTreeVisitor):
"""Visitor that creates a high level tree
~ ANTLR tree serializer
+ automatic node creation using field and label detection
+ alias nodes can work on tree without (ANTLR) visitor
Used from BaseAstVisitor: visitTerminal, visitErrorNode
TODO:
- [done] support labels
- [done] make compatible with AST: _fields = () (should only every child once)
- [done] include child_index to filter unique elements + order
- [done] memoize dynamic classes, to have list + make instance checks work
- [done] tree simplification as part of AliasNode
- [done] flatten nested list (see select with dynamic clause ordering)
- combine terminals / error nodes
- serialize highlight info
- [done] make compatible with AstNode & AstModule in protowhat (+ shellwhat usage: bashlex + osh parser)
- combining fields & labels dicts needed?
- use exact ANTLR names in _rules (capitalize name without changing other casing)
- add labels to _fields if not overlapping with fields from rules
- [done] eliminate overhead of alias parsing (store ref to child index, get children on alias access)
- [necessary?] grammar must use lexer or grammar rules for elements that should be in the tree
and literals for elements that cannot
currently:
- Use AliasNode to add labels to _fields, define custom fields and omit fields
- Use Transformer to replace a node by a combination of fields
- [rejected] alternative dynamic class naming:
- pass parse start to visitor constructor, use as init for self.current_node
- set self.current_node to field.__name__ before self.visit_field
- use self.current_node to create dynamic classes
(does not use #RuleAlias names in grammar)
(other approach: transforming returned dict, needs more work for arrays + top level)
Higher order visitor (or integrated)
- [alternative] allow node aliases (~ AstNode._rules) by dynamically creating a class inheriting from the dynamic node class
(multiple inheritance if node is alias for multiple nodes, class has combined _fields for AST compatibility
- [alternative] allow field aliases using .aliases property with defaultdict(list) (~ AstNode._fields_spec)
- dynamic fields? (~ visit_path)
test code in parse:
tree = parse_ast(grammar, sql_text, start, **kwargs)
field_tree = BaseAstVisitor().visit(tree)
alias_tree = AliasVisitor(Transformer()).visit(field_tree)
import ast
nodes = [el for el in ast.walk(field_tree)]
import json
json_str = json.dumps(field_tree, default=lambda o: o.to_json())
"""
def __init__(self, registry: BaseNodeRegistry):
self.registry = registry
def visitChildren(
self, node: ParserRuleContext, predicate=None, simplify=False
) -> BaseNode:
# children is None if all parts of a grammar rule are optional and absent
children = [self.visit(child) for child in node.children or []]
instance = BaseNode.create(node, children, self.registry)
return instance
def visitTerminal(self, ctx: ParserRuleContext) -> Terminal:
"""Converts case insensitive keywords and identifiers to lowercase"""
text = ctx.getText()
return Terminal.from_text(text, ctx)
def visitErrorNode(self, node: ErrorNode):
return None
# ANTLR helpers
def get_field(ctx: ParserRuleContext, field: str):
"""Helper to get the value of a field"""
# field can be a string or a node attribute
if isinstance(field, str):
field = getattr(ctx, field, None)
# when not alias needs to be called
if callable(field):
field = field()
# when alias set on token, need to go from CommonToken -> Terminal Node
elif isinstance(field, CommonToken):
# giving a name to lexer rules sets it to a token,
# rather than the terminal node corresponding to that token
# so we need to find it in children
field = next(
filter(lambda c: getattr(c, "symbol", None) is field, ctx.children)
)
return field
def get_field_references(
ctx: ParserRuleContext, field_names: List[str], simplify=False
) -> Dict[str, Any]:
"""
Create a mapping from fields to corresponding child indices
:param ctx: ANTLR node
:param field_names: list of strings
:param simplify: if True, omits fields with empty lists or None
this makes it easy to detect nodes that only use a single field
but it requires more work to combine fields that can be empty
:return: mapping str -> int | int[]
"""
field_dict = {}
for field_name in field_names:
field = get_field(ctx, field_name)
if (
not simplify
or field is not None
and (not isinstance(field, list) or len(field) > 0)
):
if isinstance(field, list):
value = [ctx.children.index(el) for el in field]
elif field is not None:
value = ctx.children.index(field)
else:
value = None
field_dict[field_name] = value
return field_dict
def materialize(reference_dict: IndexReferences, source: List[Any]) -> Dict[str, Any]:
"""
Replace indices by actual elements in a reference mapping
:param reference_dict: mapping str -> int | int[]
:param source: list of elements
:return: mapping str -> element | element[]
"""
materialized_dict = {}
for field in reference_dict:
reference = reference_dict[field]
if isinstance(reference, list):
materialized_dict[field] = [source[index] for index in reference]
elif reference is not None:
materialized_dict[field] = source[reference]
else:
materialized_dict[field] = None
return materialized_dict
def get_field_names(ctx: ParserRuleContext):
"""Get fields defined in an ANTLR context for a parser rule"""
# this does not include labels and literals, only rule names and token names
# TODO: check ANTLR parser template for full exclusion list
fields = [
field
for field in type(ctx).__dict__
if not field.startswith("__")
and field not in ["accept", "enterRule", "exitRule", "getRuleIndex", "copyFrom",'OPEN_BRACE',"CLOSE_BRACE", "COMMA"]
]
return fields
def get_label_names(ctx: ParserRuleContext):
"""Get labels defined in an ANTLR context for a parser rule"""
labels = [
label
for label in ctx.__dict__
if not label.startswith("_")
and label
not in [
"children",
"exception",
"invokingState",
"parentCtx",
"parser",
"start",
"stop",
]
]
return labels
| 35.797834 | 128 | 0.624849 | 19,674 | 0.661355 | 0 | 0 | 4,176 | 0.140379 | 0 | 0 | 9,678 | 0.325333 |
9cd8ab1b3376a9a6e91e80aece04a51923c049ca | 10,351 | py | Python | nrgpy/convert/convert_rld.py | kyarazhan/nrgpy | 6a81af79ee4f666892d99a5f0713495bff89214b | [
"MIT"
] | null | null | null | nrgpy/convert/convert_rld.py | kyarazhan/nrgpy | 6a81af79ee4f666892d99a5f0713495bff89214b | [
"MIT"
] | null | null | null | nrgpy/convert/convert_rld.py | kyarazhan/nrgpy | 6a81af79ee4f666892d99a5f0713495bff89214b | [
"MIT"
] | null | null | null | try:
from nrgpy import logger
except ImportError:
pass
from datetime import datetime
import os
import subprocess
import time
import traceback
from nrgpy.api.convert import nrg_api_convert
from nrgpy.utils.utilities import check_platform, windows_folder_path, affirm_directory, count_files
class local(object):
"""For handling NRG SymphoniePRO Data Logger raw data files in the *.rld format.
This method uses locally installed SymphoniePRO Desktop software to convert *.rld files to txt format (tab-delimited-text).
Parameters
----------
rld_dir : str, optional
specify directory. Note for unc values, you
will need to escape all forward slashes, e.g.
rld_dir = "\\\\sol\\techsupport\\data\\"
or use the r'\\path\to\dir' approach
out_dir : str, optional
see note for rld_dir.
encryption_pass : str
specify data encryption password if logger is set up for that.
hex_key : str
specify if using hex data encryption key
sympro_path : str
default is "C:\Program Files (x86)\Renewable NRG Systems\SymPRO Desktop\SymPRODesktop.exe"
process_type : str
[convert], or import
convert_type : str
'meas', alternately specify 'comm', 'diag', 'sample', or 'events'
nec : str
path to nec file
site_filter : str
specify part or all of the file you'd like to filter on, like site_filter='123456_2018-09'
would filter on site 123456 and only the month of September in 2018.
site_file : bool or str
set to True to use local ndb site file, or set to path to an alternate ndb site file
Examples
--------
Convert a folder of RLD files to Text with SymphoniePRO Desktop Software
>>> from nrgpy.convert_rld import local
>>> converter = local(
rld_dir='/path/to/rld/files',
out_dir=/path/to/txt/outputs,
file_filter='123456_2020-01', # for files from January 2020
)
>>> converter.convert()
"""
def __init__(self, rld_dir='', out_dir='', encryption_pass='', hex_key='', filename='',
sympro_path=r'"C:/Program Files (x86)/Renewable NRG Systems/SymPRO Desktop/SymPRODesktop.exe"',
process_type='convert', convert_type='meas', nec='', site_filter='', site_file='', **kwargs):
self.rld_dir = windows_folder_path(rld_dir)
self.out_dir = windows_folder_path(out_dir)
self.encryption_pass = encryption_pass
self.hex_key = hex_key
self.sympro_path = sympro_path
self.process_type = process_type
self.convert_type = convert_type
self.nec = nec
self.site_filter = site_filter
self.site_file = site_file
if 'file_filter' in kwargs and site_filter == '':
self.file_filter = kwargs.get('file_filter')
self.site_filter = self.file_filter
if check_platform() == 'win32':
if filename:
affirm_directory(self.out_dir)
self.single_file(filepath=filename)
else:
print("""
convert_rld.local() method ONLY compatible with Windows OS.
Please use nrgpy.cloud_convert() method instead.
Alternately, follow the instructions for using SymphoniePRO Desktop
with wine here:
https://github.com/nrgpy/nrgpy/blob/master/SymPRODeskop_Linux_README.md
""")
def directory(self):
"""processes all rld files in self.rld_dir, outputs to txt files to out_dir"""
affirm_directory(self.out_dir)
try:
if self.encryption_pass:
encryption = '/pass "{0}"'.format(self.encryption_pass)
else:
encryption = ''
except:
print('could not parse encryption_pass')
try:
if self.hex_key:
encryption_key = '/key "{0}"'.format(self.hex_key)
else:
encryption_key = ''
except:
print('could not parse hex_key')
try:
if self.nec:
nec = '/config "{0}"'.format(self.nec)
else:
nec = ''
except:
print('could not parse encryption_pass')
try:
if self.site_file:
site_file = '/site '
elif self.site_file:
site_file = '/site "{0}"'.format(self.site_file)
else:
site_file = ''
except:
print('could not parse encryption_pass')
try:
rld_count = count_files(self.rld_dir, self.site_filter, 'rld')
self.start_time = time.time()
logger.info('converting {0} files from {1}'.format(rld_count, self.rld_dir))
print('\nConverting {0} files from {1}\n'.format(rld_count, self.rld_dir))
print('Saving outputs to {0}'.format(self.out_dir))
cmd = [
self.sympro_path,
"/cmd", self.process_type,
"/file", '"'+"\\".join([self.rld_dir, '*'+self.site_filter])+'*.rld"',
encryption,
encryption_key,
nec,
site_file,
"/type", '"'+self.convert_type+'"',
"/outputdir", '"'+self.out_dir[:-1]+'"'
]
# print('\nUsing command line script:\n{}'.format(" ".join(cmd)))
self.cmd = cmd
self.start = datetime.now()
subprocess.run(" ".join(cmd), stdout=subprocess.PIPE)
self.end = datetime.now()
self.convert_time = str(self.end - self.start)
logger.info('TXT files saved in {0}'.format(self.out_dir))
print('\nTXT files saved in {0}\n'.format(self.out_dir))
txt_count = count_files(self.out_dir, self.site_filter, 'txt', start_time=self.start_time)
log_count, log_files = count_files(self.out_dir, self.site_filter, 'log', show_files=True, start_time=self.start_time)
print('RLDs in : {}'.format(rld_count))
print('TXTs out : {}'.format(txt_count))
print('LOGs out : {}'.format(log_count))
if len(log_files) > 0:
print('Log files created:')
for _filename in log_files:
print('\t{}'.format(_filename))
print('----------------\nDifference : {}'.format(rld_count - (txt_count + log_count)))
except FileNotFoundError:
logger.error('SymphoniePRO Desktop Application not found: {0}'.format(self.sympro_path))
print("""
No instance of SymphoniePRO Desktop Application found.
Please follow the link below to download and install this software:
https://www.nrgsystems.com/support/product-support/software/symphoniepro-desktop-application
""")
except:
logger.error("unable to process files in {0}".format(self.rld_dir))
logger.debug(traceback.format_exc())
print('Unable to process files in directory')
def convert(self):
self.directory()
def process(self):
self.directory()
def rename_rlds(self, **kwargs):
"""uses SymPRO utility NrgRldSiteSerialRename.exe to rename files with site number and logger serial number.
This function is only compatible with Windows>=7 AND
a local installation of SymphoniePRO Desktop software
"""
try:
renamer_path = kwargs.get('renamer_path', r"C:/Program Files (x86)/Renewable NRG Systems/SymPRO Desktop/Default Application Files/Utilities/NrgRldSiteSerialRename.exe")
for f in os.listdir(self.rld_dir):
filepath = self.rld_dir + f
if f[-4:].lower()==".rld" and self.site_filter in f:
rename_cmd = [renamer_path, '"'+filepath+'"']
try:
subprocess.run(" ".join(rename_cmd), stdout=subprocess.PIPE)
except:
logger.error("unable to rename {0}".format(f))
print("Unable to rename {0}".format(f))
pass
else:
pass
except:
logger.error('Could not rename files')
logger.debug(traceback.format_exc())
print('Could not rename files')
def single_file(self, filepath=''):
self.filepath = filepath.replace('/','\\')
try:
if self.encryption_pass:
encryption = '/pass "{0}"'.format(self.encryption_pass)
else:
encryption = ''
except:
print('could not parse encryption_pass')
try:
if self.hex_key:
encryption_key = '/key "{0}"'.format(self.hex_key)
else:
encryption_key = ''
except:
print('could not parse hex_key')
try:
if self.nec:
nec = '/config "{0}"'.format(self.nec)
else:
nec = ''
except:
print('could not get nec file')
try:
if self.site_file:
site_file = '/site "{0}"'.format(self.site_file)
else:
site_file = ''
except:
print('could not get site file')
cmd = [
self.sympro_path,
"/cmd", "convert",
"/file", '"'+self.filepath+'"',
encryption,
encryption_key,
nec,
site_file,
"/type", '"'+self.convert_type+'"',
"/outputdir", '"'+self.out_dir[:-1]+'"'
]
self.cmd = cmd
try:
print("{0} ... \t\t".format(filepath), end="", flush=True)
subprocess.run(" ".join(cmd), stdout=subprocess.PIPE)
print("[DONE]")
except:
logger.error("processing {0} FAILED".format(filepath))
logger.debug(traceback.format_exc())
print("\n\t processing {0} [FAILED]".format(filepath))
pass
logger.info("files in {0} processed OK".format(self.rld_dir))
logger.info("TXT files saved to {0}".format(self.out_dir))
print("\nQueue processed\n")
nrg_convert_api = nrg_api_convert
| 35.088136 | 180 | 0.562651 | 10,015 | 0.967539 | 0 | 0 | 0 | 0 | 0 | 0 | 4,105 | 0.39658 |
9cd8b09c53d8ecf1b2984937b2ed05ef619c8318 | 23,127 | py | Python | gitea_api/models/timeline_comment.py | r7l/python-gitea-api | 31d3dba27ea7e551e2048a1230c4ab4d73365006 | [
"MIT"
] | 1 | 2022-02-09T23:43:26.000Z | 2022-02-09T23:43:26.000Z | gitea_api/models/timeline_comment.py | r7l/python-gitea-api | 31d3dba27ea7e551e2048a1230c4ab4d73365006 | [
"MIT"
] | null | null | null | gitea_api/models/timeline_comment.py | r7l/python-gitea-api | 31d3dba27ea7e551e2048a1230c4ab4d73365006 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Gitea API.
This documentation describes the Gitea API. # noqa: E501
OpenAPI spec version: 1.16.7
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class TimelineComment(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'assignee': 'User',
'assignee_team': 'Team',
'body': 'str',
'created_at': 'datetime',
'dependent_issue': 'Issue',
'html_url': 'str',
'id': 'int',
'issue_url': 'str',
'label': 'Label',
'milestone': 'Milestone',
'new_ref': 'str',
'new_title': 'str',
'old_milestone': 'Milestone',
'old_project_id': 'int',
'old_ref': 'str',
'old_title': 'str',
'project_id': 'int',
'pull_request_url': 'str',
'ref_action': 'str',
'ref_comment': 'Comment',
'ref_commit_sha': 'str',
'ref_issue': 'Issue',
'removed_assignee': 'bool',
'resolve_doer': 'User',
'review_id': 'int',
'tracked_time': 'TrackedTime',
'type': 'str',
'updated_at': 'datetime',
'user': 'User'
}
attribute_map = {
'assignee': 'assignee',
'assignee_team': 'assignee_team',
'body': 'body',
'created_at': 'created_at',
'dependent_issue': 'dependent_issue',
'html_url': 'html_url',
'id': 'id',
'issue_url': 'issue_url',
'label': 'label',
'milestone': 'milestone',
'new_ref': 'new_ref',
'new_title': 'new_title',
'old_milestone': 'old_milestone',
'old_project_id': 'old_project_id',
'old_ref': 'old_ref',
'old_title': 'old_title',
'project_id': 'project_id',
'pull_request_url': 'pull_request_url',
'ref_action': 'ref_action',
'ref_comment': 'ref_comment',
'ref_commit_sha': 'ref_commit_sha',
'ref_issue': 'ref_issue',
'removed_assignee': 'removed_assignee',
'resolve_doer': 'resolve_doer',
'review_id': 'review_id',
'tracked_time': 'tracked_time',
'type': 'type',
'updated_at': 'updated_at',
'user': 'user'
}
def __init__(self, assignee=None, assignee_team=None, body=None, created_at=None, dependent_issue=None, html_url=None, id=None, issue_url=None, label=None, milestone=None, new_ref=None, new_title=None, old_milestone=None, old_project_id=None, old_ref=None, old_title=None, project_id=None, pull_request_url=None, ref_action=None, ref_comment=None, ref_commit_sha=None, ref_issue=None, removed_assignee=None, resolve_doer=None, review_id=None, tracked_time=None, type=None, updated_at=None, user=None): # noqa: E501
"""TimelineComment - a model defined in Swagger""" # noqa: E501
self._assignee = None
self._assignee_team = None
self._body = None
self._created_at = None
self._dependent_issue = None
self._html_url = None
self._id = None
self._issue_url = None
self._label = None
self._milestone = None
self._new_ref = None
self._new_title = None
self._old_milestone = None
self._old_project_id = None
self._old_ref = None
self._old_title = None
self._project_id = None
self._pull_request_url = None
self._ref_action = None
self._ref_comment = None
self._ref_commit_sha = None
self._ref_issue = None
self._removed_assignee = None
self._resolve_doer = None
self._review_id = None
self._tracked_time = None
self._type = None
self._updated_at = None
self._user = None
self.discriminator = None
if assignee is not None:
self.assignee = assignee
if assignee_team is not None:
self.assignee_team = assignee_team
if body is not None:
self.body = body
if created_at is not None:
self.created_at = created_at
if dependent_issue is not None:
self.dependent_issue = dependent_issue
if html_url is not None:
self.html_url = html_url
if id is not None:
self.id = id
if issue_url is not None:
self.issue_url = issue_url
if label is not None:
self.label = label
if milestone is not None:
self.milestone = milestone
if new_ref is not None:
self.new_ref = new_ref
if new_title is not None:
self.new_title = new_title
if old_milestone is not None:
self.old_milestone = old_milestone
if old_project_id is not None:
self.old_project_id = old_project_id
if old_ref is not None:
self.old_ref = old_ref
if old_title is not None:
self.old_title = old_title
if project_id is not None:
self.project_id = project_id
if pull_request_url is not None:
self.pull_request_url = pull_request_url
if ref_action is not None:
self.ref_action = ref_action
if ref_comment is not None:
self.ref_comment = ref_comment
if ref_commit_sha is not None:
self.ref_commit_sha = ref_commit_sha
if ref_issue is not None:
self.ref_issue = ref_issue
if removed_assignee is not None:
self.removed_assignee = removed_assignee
if resolve_doer is not None:
self.resolve_doer = resolve_doer
if review_id is not None:
self.review_id = review_id
if tracked_time is not None:
self.tracked_time = tracked_time
if type is not None:
self.type = type
if updated_at is not None:
self.updated_at = updated_at
if user is not None:
self.user = user
@property
def assignee(self):
"""Gets the assignee of this TimelineComment. # noqa: E501
:return: The assignee of this TimelineComment. # noqa: E501
:rtype: User
"""
return self._assignee
@assignee.setter
def assignee(self, assignee):
"""Sets the assignee of this TimelineComment.
:param assignee: The assignee of this TimelineComment. # noqa: E501
:type: User
"""
self._assignee = assignee
@property
def assignee_team(self):
"""Gets the assignee_team of this TimelineComment. # noqa: E501
:return: The assignee_team of this TimelineComment. # noqa: E501
:rtype: Team
"""
return self._assignee_team
@assignee_team.setter
def assignee_team(self, assignee_team):
"""Sets the assignee_team of this TimelineComment.
:param assignee_team: The assignee_team of this TimelineComment. # noqa: E501
:type: Team
"""
self._assignee_team = assignee_team
@property
def body(self):
"""Gets the body of this TimelineComment. # noqa: E501
:return: The body of this TimelineComment. # noqa: E501
:rtype: str
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this TimelineComment.
:param body: The body of this TimelineComment. # noqa: E501
:type: str
"""
self._body = body
@property
def created_at(self):
"""Gets the created_at of this TimelineComment. # noqa: E501
:return: The created_at of this TimelineComment. # noqa: E501
:rtype: datetime
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this TimelineComment.
:param created_at: The created_at of this TimelineComment. # noqa: E501
:type: datetime
"""
self._created_at = created_at
@property
def dependent_issue(self):
"""Gets the dependent_issue of this TimelineComment. # noqa: E501
:return: The dependent_issue of this TimelineComment. # noqa: E501
:rtype: Issue
"""
return self._dependent_issue
@dependent_issue.setter
def dependent_issue(self, dependent_issue):
"""Sets the dependent_issue of this TimelineComment.
:param dependent_issue: The dependent_issue of this TimelineComment. # noqa: E501
:type: Issue
"""
self._dependent_issue = dependent_issue
@property
def html_url(self):
"""Gets the html_url of this TimelineComment. # noqa: E501
:return: The html_url of this TimelineComment. # noqa: E501
:rtype: str
"""
return self._html_url
@html_url.setter
def html_url(self, html_url):
"""Sets the html_url of this TimelineComment.
:param html_url: The html_url of this TimelineComment. # noqa: E501
:type: str
"""
self._html_url = html_url
@property
def id(self):
"""Gets the id of this TimelineComment. # noqa: E501
:return: The id of this TimelineComment. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this TimelineComment.
:param id: The id of this TimelineComment. # noqa: E501
:type: int
"""
self._id = id
@property
def issue_url(self):
"""Gets the issue_url of this TimelineComment. # noqa: E501
:return: The issue_url of this TimelineComment. # noqa: E501
:rtype: str
"""
return self._issue_url
@issue_url.setter
def issue_url(self, issue_url):
"""Sets the issue_url of this TimelineComment.
:param issue_url: The issue_url of this TimelineComment. # noqa: E501
:type: str
"""
self._issue_url = issue_url
@property
def label(self):
"""Gets the label of this TimelineComment. # noqa: E501
:return: The label of this TimelineComment. # noqa: E501
:rtype: Label
"""
return self._label
@label.setter
def label(self, label):
"""Sets the label of this TimelineComment.
:param label: The label of this TimelineComment. # noqa: E501
:type: Label
"""
self._label = label
@property
def milestone(self):
"""Gets the milestone of this TimelineComment. # noqa: E501
:return: The milestone of this TimelineComment. # noqa: E501
:rtype: Milestone
"""
return self._milestone
@milestone.setter
def milestone(self, milestone):
"""Sets the milestone of this TimelineComment.
:param milestone: The milestone of this TimelineComment. # noqa: E501
:type: Milestone
"""
self._milestone = milestone
@property
def new_ref(self):
"""Gets the new_ref of this TimelineComment. # noqa: E501
:return: The new_ref of this TimelineComment. # noqa: E501
:rtype: str
"""
return self._new_ref
@new_ref.setter
def new_ref(self, new_ref):
"""Sets the new_ref of this TimelineComment.
:param new_ref: The new_ref of this TimelineComment. # noqa: E501
:type: str
"""
self._new_ref = new_ref
@property
def new_title(self):
"""Gets the new_title of this TimelineComment. # noqa: E501
:return: The new_title of this TimelineComment. # noqa: E501
:rtype: str
"""
return self._new_title
@new_title.setter
def new_title(self, new_title):
"""Sets the new_title of this TimelineComment.
:param new_title: The new_title of this TimelineComment. # noqa: E501
:type: str
"""
self._new_title = new_title
@property
def old_milestone(self):
"""Gets the old_milestone of this TimelineComment. # noqa: E501
:return: The old_milestone of this TimelineComment. # noqa: E501
:rtype: Milestone
"""
return self._old_milestone
@old_milestone.setter
def old_milestone(self, old_milestone):
"""Sets the old_milestone of this TimelineComment.
:param old_milestone: The old_milestone of this TimelineComment. # noqa: E501
:type: Milestone
"""
self._old_milestone = old_milestone
@property
def old_project_id(self):
"""Gets the old_project_id of this TimelineComment. # noqa: E501
:return: The old_project_id of this TimelineComment. # noqa: E501
:rtype: int
"""
return self._old_project_id
@old_project_id.setter
def old_project_id(self, old_project_id):
"""Sets the old_project_id of this TimelineComment.
:param old_project_id: The old_project_id of this TimelineComment. # noqa: E501
:type: int
"""
self._old_project_id = old_project_id
@property
def old_ref(self):
"""Gets the old_ref of this TimelineComment. # noqa: E501
:return: The old_ref of this TimelineComment. # noqa: E501
:rtype: str
"""
return self._old_ref
@old_ref.setter
def old_ref(self, old_ref):
"""Sets the old_ref of this TimelineComment.
:param old_ref: The old_ref of this TimelineComment. # noqa: E501
:type: str
"""
self._old_ref = old_ref
@property
def old_title(self):
"""Gets the old_title of this TimelineComment. # noqa: E501
:return: The old_title of this TimelineComment. # noqa: E501
:rtype: str
"""
return self._old_title
@old_title.setter
def old_title(self, old_title):
"""Sets the old_title of this TimelineComment.
:param old_title: The old_title of this TimelineComment. # noqa: E501
:type: str
"""
self._old_title = old_title
@property
def project_id(self):
"""Gets the project_id of this TimelineComment. # noqa: E501
:return: The project_id of this TimelineComment. # noqa: E501
:rtype: int
"""
return self._project_id
@project_id.setter
def project_id(self, project_id):
"""Sets the project_id of this TimelineComment.
:param project_id: The project_id of this TimelineComment. # noqa: E501
:type: int
"""
self._project_id = project_id
@property
def pull_request_url(self):
"""Gets the pull_request_url of this TimelineComment. # noqa: E501
:return: The pull_request_url of this TimelineComment. # noqa: E501
:rtype: str
"""
return self._pull_request_url
@pull_request_url.setter
def pull_request_url(self, pull_request_url):
"""Sets the pull_request_url of this TimelineComment.
:param pull_request_url: The pull_request_url of this TimelineComment. # noqa: E501
:type: str
"""
self._pull_request_url = pull_request_url
@property
def ref_action(self):
"""Gets the ref_action of this TimelineComment. # noqa: E501
:return: The ref_action of this TimelineComment. # noqa: E501
:rtype: str
"""
return self._ref_action
@ref_action.setter
def ref_action(self, ref_action):
"""Sets the ref_action of this TimelineComment.
:param ref_action: The ref_action of this TimelineComment. # noqa: E501
:type: str
"""
self._ref_action = ref_action
@property
def ref_comment(self):
"""Gets the ref_comment of this TimelineComment. # noqa: E501
:return: The ref_comment of this TimelineComment. # noqa: E501
:rtype: Comment
"""
return self._ref_comment
@ref_comment.setter
def ref_comment(self, ref_comment):
"""Sets the ref_comment of this TimelineComment.
:param ref_comment: The ref_comment of this TimelineComment. # noqa: E501
:type: Comment
"""
self._ref_comment = ref_comment
@property
def ref_commit_sha(self):
"""Gets the ref_commit_sha of this TimelineComment. # noqa: E501
commit SHA where issue/PR was referenced # noqa: E501
:return: The ref_commit_sha of this TimelineComment. # noqa: E501
:rtype: str
"""
return self._ref_commit_sha
@ref_commit_sha.setter
def ref_commit_sha(self, ref_commit_sha):
"""Sets the ref_commit_sha of this TimelineComment.
commit SHA where issue/PR was referenced # noqa: E501
:param ref_commit_sha: The ref_commit_sha of this TimelineComment. # noqa: E501
:type: str
"""
self._ref_commit_sha = ref_commit_sha
@property
def ref_issue(self):
"""Gets the ref_issue of this TimelineComment. # noqa: E501
:return: The ref_issue of this TimelineComment. # noqa: E501
:rtype: Issue
"""
return self._ref_issue
@ref_issue.setter
def ref_issue(self, ref_issue):
"""Sets the ref_issue of this TimelineComment.
:param ref_issue: The ref_issue of this TimelineComment. # noqa: E501
:type: Issue
"""
self._ref_issue = ref_issue
@property
def removed_assignee(self):
"""Gets the removed_assignee of this TimelineComment. # noqa: E501
whether the assignees were removed or added # noqa: E501
:return: The removed_assignee of this TimelineComment. # noqa: E501
:rtype: bool
"""
return self._removed_assignee
@removed_assignee.setter
def removed_assignee(self, removed_assignee):
"""Sets the removed_assignee of this TimelineComment.
whether the assignees were removed or added # noqa: E501
:param removed_assignee: The removed_assignee of this TimelineComment. # noqa: E501
:type: bool
"""
self._removed_assignee = removed_assignee
@property
def resolve_doer(self):
"""Gets the resolve_doer of this TimelineComment. # noqa: E501
:return: The resolve_doer of this TimelineComment. # noqa: E501
:rtype: User
"""
return self._resolve_doer
@resolve_doer.setter
def resolve_doer(self, resolve_doer):
"""Sets the resolve_doer of this TimelineComment.
:param resolve_doer: The resolve_doer of this TimelineComment. # noqa: E501
:type: User
"""
self._resolve_doer = resolve_doer
@property
def review_id(self):
"""Gets the review_id of this TimelineComment. # noqa: E501
:return: The review_id of this TimelineComment. # noqa: E501
:rtype: int
"""
return self._review_id
@review_id.setter
def review_id(self, review_id):
"""Sets the review_id of this TimelineComment.
:param review_id: The review_id of this TimelineComment. # noqa: E501
:type: int
"""
self._review_id = review_id
@property
def tracked_time(self):
"""Gets the tracked_time of this TimelineComment. # noqa: E501
:return: The tracked_time of this TimelineComment. # noqa: E501
:rtype: TrackedTime
"""
return self._tracked_time
@tracked_time.setter
def tracked_time(self, tracked_time):
"""Sets the tracked_time of this TimelineComment.
:param tracked_time: The tracked_time of this TimelineComment. # noqa: E501
:type: TrackedTime
"""
self._tracked_time = tracked_time
@property
def type(self):
"""Gets the type of this TimelineComment. # noqa: E501
:return: The type of this TimelineComment. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this TimelineComment.
:param type: The type of this TimelineComment. # noqa: E501
:type: str
"""
self._type = type
@property
def updated_at(self):
"""Gets the updated_at of this TimelineComment. # noqa: E501
:return: The updated_at of this TimelineComment. # noqa: E501
:rtype: datetime
"""
return self._updated_at
@updated_at.setter
def updated_at(self, updated_at):
"""Sets the updated_at of this TimelineComment.
:param updated_at: The updated_at of this TimelineComment. # noqa: E501
:type: datetime
"""
self._updated_at = updated_at
@property
def user(self):
"""Gets the user of this TimelineComment. # noqa: E501
:return: The user of this TimelineComment. # noqa: E501
:rtype: User
"""
return self._user
@user.setter
def user(self, user):
"""Sets the user of this TimelineComment.
:param user: The user of this TimelineComment. # noqa: E501
:type: User
"""
self._user = user
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(TimelineComment, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TimelineComment):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.434164 | 519 | 0.598564 | 22,863 | 0.988585 | 0 | 0 | 14,919 | 0.64509 | 0 | 0 | 11,902 | 0.514637 |
9cd8b67231efc3277946220bc2baea81f196997d | 686 | py | Python | Blasting/Start.py | Erosion2020/SpaceCore | ba81bf1913461a200f9e88acb7d0d91d7deda8e8 | [
"MIT"
] | 4 | 2022-03-22T08:21:52.000Z | 2022-03-23T12:58:17.000Z | Blasting/Start.py | Erosion2020/SpaceCore | ba81bf1913461a200f9e88acb7d0d91d7deda8e8 | [
"MIT"
] | null | null | null | Blasting/Start.py | Erosion2020/SpaceCore | ba81bf1913461a200f9e88acb7d0d91d7deda8e8 | [
"MIT"
] | null | null | null | import Blasting
name = "Blasting"
input_message = f"[{name} #]"
def menu():
print("-------------------------------弱口令爆破子模块-------------------------------")
print("1、SSH弱口令爆破")
print("2、MySQL弱口令爆破")
print("输入exit退出")
print("-----------------------------------END-----------------------------------")
def start():
menu()
while True:
option = input(input_message)
if option == 'exit':
break
elif option == '1':
Blasting.SSH.ssh_blasting()
elif option == '2':
Blasting.MySQL.blasting_mysql()
elif option == 'help':
menu()
else:
print("无效的输入,请重新输入")
| 23.655172 | 86 | 0.41691 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 305 | 0.404509 |
9cd8dd0d174f6ca548c62ed627de0eb3a16baa55 | 379 | py | Python | events/migrations/0040_event_team_size.py | horacexd/clist | 9759dfea97b86514bec9825d2430abc36decacf0 | [
"Apache-2.0"
] | 166 | 2019-05-16T23:46:08.000Z | 2022-03-31T05:20:23.000Z | events/migrations/0040_event_team_size.py | horacexd/clist | 9759dfea97b86514bec9825d2430abc36decacf0 | [
"Apache-2.0"
] | 92 | 2020-01-18T22:51:53.000Z | 2022-03-12T01:23:57.000Z | events/migrations/0040_event_team_size.py | VadVergasov/clist | 4afcdfe88250d224043b28efa511749347cec71c | [
"Apache-2.0"
] | 23 | 2020-02-09T17:38:43.000Z | 2021-12-09T14:39:07.000Z | # Generated by Django 2.2.10 on 2020-04-03 19:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0039_event_limits'),
]
operations = [
migrations.AddField(
model_name='event',
name='team_size',
field=models.IntegerField(default=3),
),
]
| 19.947368 | 49 | 0.591029 | 285 | 0.751979 | 0 | 0 | 0 | 0 | 0 | 0 | 93 | 0.245383 |
9cd9c5db682a634d72e6f3ad900937179bb19d00 | 1,303 | py | Python | src/origin_ledger_sdk/batch.py | project-origin/ledger-sdk-python | 46212e17e19f1d534c1717d3f3714f791731adea | [
"MIT"
] | null | null | null | src/origin_ledger_sdk/batch.py | project-origin/ledger-sdk-python | 46212e17e19f1d534c1717d3f3714f791731adea | [
"MIT"
] | null | null | null | src/origin_ledger_sdk/batch.py | project-origin/ledger-sdk-python | 46212e17e19f1d534c1717d3f3714f791731adea | [
"MIT"
] | null | null | null | from enum import Enum
from typing import List
from sawtooth_sdk.protobuf.batch_pb2 import BatchHeader
from sawtooth_sdk.protobuf.batch_pb2 import Batch as SignedBatch
from .requests import AbstractRequest
from .requests.helpers import get_signer
class BatchStatus(Enum):
UNKNOWN = 'UNKNOWN'
PENDING = 'PENDING'
COMMITTED = 'COMMITTED'
INVALID = 'INVALID'
class Batch():
def __init__(self, signer_private_key: bytes):
self._requests: List[AbstractRequest] = []
self._signer_private_key = signer_private_key
def add_request(self, request: AbstractRequest):
self._requests.append(request)
def get_signed_batch(self) -> SignedBatch:
signer = get_signer(self._signer_private_key)
signed_transactions = [t for r in self._requests for t in r.get_signed_transactions(signer) ]
batch_header_bytes = BatchHeader(
signer_public_key=signer.get_public_key().as_hex(),
transaction_ids=[txn.header_signature for txn in signed_transactions],
).SerializeToString()
signature = signer.sign(batch_header_bytes)
batch = SignedBatch(
header=batch_header_bytes,
header_signature=signature,
transactions=signed_transactions
)
return batch
| 30.302326 | 101 | 0.707598 | 1,049 | 0.805065 | 0 | 0 | 0 | 0 | 0 | 0 | 38 | 0.029163 |
9cda6fa4db3a1a7b7de2d18f55af0184ab0c8ffc | 1,290 | py | Python | pybud/tests/search_algos.py | Tantan4321/PyBud | 7f9ac026f0b04296ffaa7fad9f905c1374ba7008 | [
"MIT"
] | 11 | 2020-01-23T08:30:15.000Z | 2022-02-11T04:12:30.000Z | pybud/tests/search_algos.py | Tantan4321/PyBud | 7f9ac026f0b04296ffaa7fad9f905c1374ba7008 | [
"MIT"
] | 5 | 2020-02-15T16:44:24.000Z | 2022-01-13T02:07:48.000Z | pybud/tests/search_algos.py | Tantan4321/PyBud | 7f9ac026f0b04296ffaa7fad9f905c1374ba7008 | [
"MIT"
] | 3 | 2020-02-15T16:30:02.000Z | 2020-08-19T06:58:35.000Z | # Implemented from: https://stackoverflow.com/questions/9501337/binary-search-algorithm-in-python
def binary_search(sequence, value):
lo, hi = 0, len(sequence) - 1
while lo <= hi:
mid = (lo + hi) // 2
if sequence[mid] < value:
lo = mid + 1
elif value < sequence[mid]:
hi = mid - 1
else:
return mid
return None
def dfs(graph, node, visited=[]):
if node not in visited:
visited.append(node)
for n in graph[node]:
dfs(graph, n, visited)
return visited
# Implemented from: https://pythoninwonderland.wordpress.com/2017/03/18/how-to-implement-breadth-first-search-in-python/
def bfs(graph, start):
# keep track of all visited nodes
explored = []
# keep track of nodes to be checked
queue = [start]
# keep looping until there are nodes still to be checked
while queue:
# pop shallowest node (first node) from queue
node = queue.pop(0)
if node not in explored:
# add node to list of checked nodes
explored.append(node)
neighbours = graph[node]
# add neighbours of node to queue
for neighbour in neighbours:
queue.append(neighbour)
return explored
| 30 | 120 | 0.599225 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 454 | 0.351938 |
9cdaf7e31350059d27162184ea87fbd63d36f82d | 409 | py | Python | dust/exe.py | tanico-rikudo/raspi4 | ead6064ee5eaf0fb3459487047903aeb031189b7 | [
"MIT"
] | null | null | null | dust/exe.py | tanico-rikudo/raspi4 | ead6064ee5eaf0fb3459487047903aeb031189b7 | [
"MIT"
] | null | null | null | dust/exe.py | tanico-rikudo/raspi4 | ead6064ee5eaf0fb3459487047903aeb031189b7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import RPi.GPIO as GPIO
import time
from datetime import datetime as dt
import logging
import json
import sys
from particle_counter import ParticleCounter
# Make instance
device001 = ParticleCounter(30)
# Set signal pin
device001.set_pin_number(PIN=14)
# Excute
device001.particle_count(count_times=1000000)
# Finish
GPIO.cleanup()
print('==== Done ====') | 16.36 | 45 | 0.753056 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 107 | 0.261614 |
9cdb60e1aaf4a83afb78be721e50868f4655f336 | 7,548 | py | Python | localization/pipeline/InstLoc.py | cameronwp/MMGIS | 5e6277bd4f0c8ff77cd12dce51d41abffe0ac394 | [
"Apache-2.0",
"Unlicense"
] | 63 | 2019-03-28T18:46:55.000Z | 2022-03-25T02:49:24.000Z | localization/pipeline/InstLoc.py | cameronwp/MMGIS | 5e6277bd4f0c8ff77cd12dce51d41abffe0ac394 | [
"Apache-2.0",
"Unlicense"
] | 75 | 2019-10-14T08:54:18.000Z | 2022-03-30T02:33:29.000Z | localization/pipeline/InstLoc.py | cameronwp/MMGIS | 5e6277bd4f0c8ff77cd12dce51d41abffe0ac394 | [
"Apache-2.0",
"Unlicense"
] | 15 | 2019-12-15T11:26:01.000Z | 2022-03-25T02:49:27.000Z | #! /usr/local/msl/bin/python
#******************************************************************************
# InstLoc.py <image.IMG/VIC>
#
# Project: Instrument Loco String for a given file
# Purpose: Localizations stored in python dictionary
#
# Author: Hallie Gengl
#
# Updated: 8/24/18 Corrine Rojas (crojas6@asu.edu)
# Updated: 6/26/19 Hallie Gengl
#
#******************************************************************************
import os
import sys
import parseVicarLabel
import FixedInstLoc
import MastInstLoc
import ArmInstLoc
import msl.instruments as instruments
import msl.PdsToVic as PdsToVic
import msl.placesTranslation as places
LocArray = {'Data_Product' : '', 'Instrument' : '', 'Spacecraft_Clock(sec)' : '',
'Rover_Motion_Counter': '', 'Mission': '', 'Site_Frame_Origin_Offset_Vector': '',
'Spacecraft_Quaternion': '', 'Sol_Number': '','Sequence_ID' : '',
'Instrument_Azimuth(deg)': '','Instrument_Elevation(deg)': '',
'Global_Northing(m)': '','Global_Easting(m)': '','Global_Elevation(m)': '',
'Stereo': '','ObsType': '', 'LocType': '' ,'Frame': '','Method': '', 'APID' : '',
'APID_Name' : '', 'Local_True_Solar_Time' : '', 'Local_Mean_Solar_Time' : '',
'Planetary_Radius': '', 'Surface_Intersection_DEM': '', 'Rover_Global_Northing(m)': '',
'Rover_Global_Easting(m)':'', 'Rover_Global_Elevation(m)': ''}
def runLoco(locType,filen,oLBL,oDAT):
print "Entering runLoco.InstLoc.py"
print "Here is str(locType) from " + __name__ + ".InstLoc.py: ", str(locType)
print "Here is str(filen) from " + __name__ + ".InstLoc.py: ", str(filen)
print "Here is str(oLBL) from " + __name__ + ".InstLoc.py: ",str(oLBL)
print "Here is str(oDAT) from " + __name__ + ".InstLoc.py: ",str(oDAT)
if locType == 'fixed':
x,y,z,sid,p2xyz_status = FixedInstLoc.allLoc(filen)
elif locType == 'mast':
x,y,z,sid,p2xyz_status = MastInstLoc.allLoc(filen)
elif locType == 'contact':
x,y,z,sid,p2xyz_status = ArmInstLoc.ArmInstLoc(filen,oLBL,oDAT)
print "Leaving " + __name__ + ".Instloc.py and returning: ", x, y, z
print "Stereo Intersection DEM: " + sid
print "p2xyz_status: " + str(p2xyz_status)
return x,y,z,sid,p2xyz_status
def InstLocDB(filen):
print "Entering InstLocDB.InstLoc.py"
try:
os.environ['R2LIB']
except KeyError as e:
print "%s is not set, run select" % (e)
raise SystemExit
print "Here is the filen from " + __name__ + ".InstLoc.py: ", filen
original = filen
#print "Split: ", os.path.splitext(filen)[0]
#print os.path.splitext(filen)[1]
filen,oDAT,oLBL = getNewProduct(filen)
inst = parseVicarLabel.getInstrumentId(filen)
if inst == 'CHEMCAM_SOH' or inst == 'CHEMCAM_PARMS':
SystemExit
print "filename: ", filen
print " creating array [" + __name__ + ".InstLoc.py]"
print "instrument parsing of dictionary [" + __name__ + ".InstLoc.py]"
#print "file: ",filen
inst = parseVicarLabel.getInstrumentId(filen)
#print "instrument: ", inst
rover = parseVicarLabel.getSpacecraftId(filen)
sol = parseVicarLabel.getPlanetDayNumber(filen)
sclk = parseVicarLabel.getSclk(filen)
oov = parseVicarLabel.getOriginOffsetVector(filen)
q = parseVicarLabel.getOriginRotationQuaternion(filen)
rmc = parseVicarLabel.getRoverMotionCounter(filen)
az = parseVicarLabel.getAz(filen)
el = parseVicarLabel.getEl(filen)
#c = parseVicarLabel.getCameraCPoint(filen)
#pId = parseVicarLabel.getProcessID(filen)
ltst = parseVicarLabel.getLTST(filen)
print "ltst :" + ltst
lmst = parseVicarLabel.getLMST(filen)
print "lmst :" + lmst
# to do: add APP ID, Planetary Radius, Pointing Vector, ...
# currently empty dictionaries
seqID = parseVicarLabel.getSeqId(filen)
apid = parseVicarLabel.getApId(filen)
if parseVicarLabel.getApIdName(filen) == "McamLRecoveredProduct":
return
elif parseVicarLabel.getApIdName(filen) == "McamRRecoveredProduct":
return
elif parseVicarLabel.getApIdName(filen) == "RADSendData":
return
else:
apidName = parseVicarLabel.getApIdName(filen)
print rmc
loc_x,loc_y,loc_z = places.getLocoRover('ops',rmc[0],rmc[1],'rover2orbital')
inst = parseVicarLabel.getInstrumentId(filen)
locType = instruments.InstDic[inst][1]
print "Here is locType from " + __name__ + ".InstLoc.py: ",locType
print "Here is filen from " + __name__ + ".InstLoc.py: ",filen
print "Here is oLBL from " + __name__ + ".InstLoc.py: ",oLBL
print "Here is oDAT from " + __name__ + ".InstLoc.py: ",oDAT
x,y,z,sid,p2xyz_status = runLoco(locType,filen,oLBL,oDAT)
stereo = parseVicarLabel.frameType(filen)
LocArray['Stereo'] = stereo
LocArray['Data_Product'] = original
LocArray['Instrument'] = inst
LocArray['Spacecraft_Clock(sec)'] = sclk
LocArray['Rover_Global_Northing(m)'] = loc_x
LocArray['Rover_Global_Easting(m)'] = loc_y
LocArray['Rover_Global_Elevation(m)'] = loc_z
LocArray['Global_Northing(m)'] = x
LocArray['Global_Easting(m)'] = y
LocArray['Global_Elevation(m)'] = z
LocArray['LocType'] = locType
LocArray['Rover_Motion_Counter'] = rmc
LocArray['Site_Origin_Offset_Vector'] = oov
LocArray['Quaternion'] = q
LocArray['Instrument_Elevation(deg)'] = el
LocArray['Instrument_Azimuth(deg)'] = az
LocArray['Mission'] = rover
LocArray['Sol_Number'] = sol
#LocArray['Cpnt'] = c
LocArray['Sequence_ID'] = seqID
LocArray['Frame'] = 'orbital'
LocArray['Local_Mean_Solar_Time'] = str(lmst)
LocArray['Local_True_Solar_Time'] = str(ltst)
LocArray['APID'] = apid
LocArray['APID_Name'] = apidName
LocArray['Surface_Intersection_DEM'] = sid
LocArray['p2xyz_status_code'] = p2xyz_status
#Print out the dictionary entry
print "Here is dict.items(LocArray) from " + __name__ + ".InstLoc.py: ",dict.items(LocArray)
print "Leaving " + __name__ + ".InstLoc.py returning: ", LocArray
return LocArray
#InstLocDB(filename) #filename
def getNewProduct(filen):
if os.path.splitext(filen)[1] == '.VIC' or os.path.splitext(filen)[1] == '.IMG':
oDAT = filen
oLBL = filen
if os.path.splitext(filen)[1] == '.DAT' or os.path.splitext(filen)[1] == '.LBL':
if 'ODL' not in open(filen).readline():
oDAT = os.path.splitext(filen)[0] + '.DAT'
oLBL = os.path.splitext(filen)[0] + '.LBL'
filen = os.path.splitext(filen)[0] + '.LBL'
else:
oDAT = os.path.splitext(filen)[0] + '.DAT'
oLBL = os.path.splitext(filen)[0] + '.DAT'
filen = os.path.splitext(filen)[0] + '.DAT'
print "Creating associated VICAR text file [" + __name__ + ".InstLoc.py]"
PdsToVic.PdsToVic(filen)
base = os.path.basename(filen)
print "Base: ", base
core = os.path.splitext(base)[0]
print "Core: ",core
filen = core + '.VIC'
print "oDAT :", oDAT
print "oLBL :", oLBL
print "filename:", filen
return filen,oDAT,oLBL
def main():
InstLocDB(sys.argv[1]) #filename
if (__name__ == "__main__"):
print
main()
print
#print type(LocArray)
#print json.dumps(LocArray, indent=2)
#print dict.header(LocArray)
#print dict.values(LocArray)
#print dict.header(LocArray),dict.values(LocArray)
#print ""
| 36.640777 | 96 | 0.631028 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,111 | 0.412162 |
9cdba0c221d3aaee83d43e1867607d88a6d826b5 | 4,758 | py | Python | bdpy/fig/fig.py | kencan7749/bdpy | 75b909742aa4767f09823cc98a588c41848292a9 | [
"MIT"
] | null | null | null | bdpy/fig/fig.py | kencan7749/bdpy | 75b909742aa4767f09823cc98a588c41848292a9 | [
"MIT"
] | null | null | null | bdpy/fig/fig.py | kencan7749/bdpy | 75b909742aa4767f09823cc98a588c41848292a9 | [
"MIT"
] | null | null | null | '''Figure module
This file is a part of BdPy.
Functions
---------
makefigure
Create a figure
box_off
Remove upper and right axes
draw_footnote
Draw footnote on a figure
'''
__all__ = [
'box_off',
'draw_footnote',
'make_violinplots',
'makefigure',
]
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
def makefigure(figtype='a4landscape'):
'''Create a figure'''
if figtype is 'a4landscape':
figsize = (11.7, 8.3)
elif figtype is 'a4portrait':
figsize = (8.3, 11.7)
else:
raise ValueError('Unknown figure type %s' % figtype)
return plt.figure(figsize=figsize)
def box_off(ax):
'''Remove upper and right axes'''
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
def draw_footnote(fig, string, fontsize=9):
'''Draw footnote on a figure'''
ax = fig.add_axes([0., 0., 1., 1.])
ax.text(0.5, 0.01, string, horizontalalignment='center', fontsize=fontsize)
ax.patch.set_alpha(0.0)
ax.set_axis_off()
return ax
def make_violinplots(df, x=None, y=None, subplot=None, figure=None, x_list=None, subplot_list=None, figure_list=None, title=None, x_label=None, y_label=None, points=100):
x_keys = sorted(df[x].unique())
subplot_keys = sorted(df[subplot].unique())
figure_keys = sorted(df[figure].unique())
x_list = x_keys if x_list is None else x_list
subplot_list = subplot_keys if subplot_list is None else subplot_list
figure_list = figure_keys if figure_list is None else figure_list
print('X: {}'.format(x_list))
print('Subplot: {}'.format(subplot_list))
print('Figures: {}'.format(figure_list))
col_num = np.ceil(np.sqrt(len(subplot_list)))
row_num = int(np.ceil(len(subplot_list) / col_num))
col_num = int(col_num)
print('Subplot in {} x {}'.format(row_num, col_num))
# Figure loop
for fig_label in figure_list:
print('Creating figure for {}'.format(fig_label))
fig = makefigure('a4landscape')
sns.set()
sns.set_style('ticks')
sns.set_palette('gray')
# Subplot loop
for i, sp_label in enumerate(subplot_list):
print('Creating subplot for {}'.format(sp_label))
# Set subplot position
col = int(i / row_num)
row = i - col * row_num
sbpos = (row_num - row - 1) * col_num + col + 1
# Get data
data = []
for j, x_lbl in enumerate(x_list):
df_t = df.query('{} == "{}" & {} == "{}" & {} == "{}"'.format(subplot, sp_label, figure, fig_label, x, x_lbl))
data_t = df_t[y].values
data_t = np.array([np.nan, np.nan]) if len(data_t) == 0 else np.concatenate(data_t)
# violinplot requires at least two elements in the dataset
data.append(data_t)
# Plot
ax = plt.subplot(row_num, col_num, sbpos)
ax.hlines(0, xmin=-1, xmax=len(x_list), color='k', linestyle='-', linewidth=0.5)
ax.hlines([-0.4, -0.2, 0.2, 0.4, 0.6, 0.8], xmin=-1, xmax=len(x_list), color='k', linestyle=':', linewidth=0.5)
xpos = range(len(x_list))
ax.violinplot(data, xpos, showmeans=True, showextrema=False, showmedians=False, points=points)
ax.text(-0.5, 0.85, sp_label, horizontalalignment='left', fontsize=16)
ax.set_xlim([-1, len(x_list)])
ax.set_xticks(range(len(x_list)))
if row == 0:
ax.set_xticklabels(x_list, rotation=-45)
else:
ax.set_xticklabels([])
ax.set_ylim([-0.4, 1.0]) # FXIME: auto-scaling
box_off(ax)
plt.tight_layout()
# X Label
if x_label is not None:
ax = fig.add_axes([0, 0, 1, 1])
ax.text(0.5, 0, x_label,
verticalalignment='center', horizontalalignment='center', fontsize=16)
ax.patch.set_alpha(0.0)
ax.set_axis_off()
# Y label
if y_label is not None:
ax = fig.add_axes([0, 0, 1, 1])
ax.text(0, 0.5, y_label,
verticalalignment='center', horizontalalignment='center', fontsize=16, rotation=90)
ax.patch.set_alpha(0.0)
ax.set_axis_off()
# Figure title
if title is not None:
ax = fig.add_axes([0, 0, 1, 1])
ax.text(0.5, 0.99, '{}: {}'.format(title, fig_label),
horizontalalignment='center', fontsize=16)
ax.patch.set_alpha(0.0)
ax.set_axis_off()
return fig
| 29.924528 | 170 | 0.579235 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 835 | 0.175494 |
9cdd5ffeb6cda836bfae0ca4a1ca1728228423f7 | 6,875 | py | Python | brain/mastermind.py | pabvald/chatbot | 17cdbce4c51eebb27dd0aa090fb9634868eb3e64 | [
"MIT"
] | null | null | null | brain/mastermind.py | pabvald/chatbot | 17cdbce4c51eebb27dd0aa090fb9634868eb3e64 | [
"MIT"
] | null | null | null | brain/mastermind.py | pabvald/chatbot | 17cdbce4c51eebb27dd0aa090fb9634868eb3e64 | [
"MIT"
] | 1 | 2020-10-29T13:38:26.000Z | 2020-10-29T13:38:26.000Z | from app import app, nlp
from brain import ACTIONS, LANGUAGES
from dateparser import parse
from datetime import datetime, date
from services import UserService, IntentService, AppointmentService
from utils import get_content
class MasterMind(object):
""" MasterMind class """
def __init__(self, user_service, text):
""" Initializes a MasterMind instance """
self._text = text
self._user_service = user_service
self._lang = self._user_service.get_language()
self._doc = nlp[self._lang](text.lower())
self._user_service.register_user_msg([self._text]) # Register user's message
@classmethod
def from_telegram_msg(cls, tg_user, tg_text):
""" Creates a MasterMind instance from a Telegram message's user and text """
user_service = UserService.from_telegram_user(tg_user)
return cls(user_service, tg_text)
def get_response_for_telegram(self):
""" Generates a response for the message with the Telegram format """
responses = self._get_response()
telegram_responses = list(map(lambda response: {
'text': response,
'parse_mode': 'HTML'
}, responses))
return telegram_responses
def _get_response(self):
""" Generates an adequate response """
try:
if self._user_service.is_new_user:
responses = [self._welcome_message()]
else:
responses = self._intent_driven_message()
except Exception as e:
app.logger.error(str(e))
responses = [self._internal_error_message()]
finally:
self._user_service.register_bot_msg(responses) # Register bot messages
return responses
def _welcome_message(self):
""" Generates a welcome message in the corresponding language """
return get_content(self._lang, ['welcome'])
def _internal_error_message(self):
""" Generates an internal error message in the corresponding language """
return get_content(self._lang, ['internal_error'])
def _intent_driven_message(self):
""" Generates a intent-driven response for the message """
responses = []
errors = {}
# Set intent service
self._set_intent_service()
intent_response = self._intent_service.get_response(self._doc)
# Do action if there is one
action = self._intent_service.get_action()
if action:
intent_params = self._intent_service.get_params()
errors = self._do_action(action, intent_params)
if errors:
responses.extend(list(map(lambda err: err, errors.values())))
self._intent_service.reset_params(**errors)
self._invalidate_doc_entities()
intent_response = self._intent_service.get_response(self._doc)
# Save intent (if not completed)
self._intent_service.save_intent()
responses.append(intent_response)
return responses
def _set_intent_service(self):
""" Identifies the intent of the message and creates an
IntentService """
# Intent identification
all_intents = self._doc.cats
# Check if there's an active intent
active_intent = self._user_service.get_active_intent()
if active_intent:
all_intents[active_intent.name] = 1
# Take intents priorities into account
all_intents = dict(map(lambda kv: (kv[0], kv[1]*IntentService.priority(kv[0])), all_intents.items()))
# Select the intent with the highest probability
intent = max(all_intents, key=all_intents.get)
# Intent service creation
if active_intent and active_intent.name == intent:
self._intent_service = IntentService.from_stored_intent(self._lang, active_intent)
else:
self._intent_service = IntentService.from_new_intent(self._lang, intent, self._user_service.user_id)
def _invalidate_doc_entities(self):
""" Invalidates the the doc """
self._doc.ents = []
def _do_action(self, name, params):
""" Executes the corresponding action """
errors = {}
if name not in ACTIONS:
raise AttributeError("Action '{}' is not a valid action".format(name))
if name == 'deactivate_intent':
errors = self._deactivate_intent(params)
elif name == 'make_appointment':
errors = self._make_appointment(params)
return errors
def _deactivate_intent(self, params):
""" Deactivates the user's current intent"""
errors = {}
content = get_content(self._lang, ['deactivate_intent'])
try:
self._user_service.deactivate_intent()
except Exception:
errors['main'] = content['error'].format(**params)
else:
return errors
def _make_appointment(self, params):
""" Makes an appointment if the established time is valid """
errors = {}
content = get_content(self._lang, ['make_appointment'])
t_date = parse(params['date'], languages=[self._lang]).date()
t_time = parse(params['time'], languages=[self._lang]).time()
t_datetime = datetime.combine(t_date, t_time)
av_slots = AppointmentService.get_available_slots(t_date.isoformat())
# Parameters validation
if t_date < date.today():
errors['date'] = content['past_date'].format(**params)
elif not AppointmentService.office_is_open_on_date(t_date.isoformat()):
errors['date'] = content['office_close_date'].format(**params)
elif not av_slots:
errors['date'] = content['not_available_date'].format(**params)
elif t_datetime < datetime.now():
errors['time'] = content['past_datetime'].format(**params)
elif not AppointmentService.office_is_open_on_datetime(
t_datetime.isoformat()):
errors['time'] = content['office_close_time'].format(**params)
elif not AppointmentService.is_available(t_datetime.isoformat()):
errors['time'] = content['not_available_time'].format(**params)
if not errors:
closest_datetime = AppointmentService.closest_half(t_datetime.isoformat())
t_time = datetime.fromisoformat(closest_datetime).time()
try:
self._user_service.make_appointment(t_date, t_time, params['topic'])
except Exception as e:
app.logger.error(str(e))
errors['main'] = content['error'].format(**params)
return errors
| 39.285714 | 112 | 0.620945 | 6,647 | 0.966836 | 0 | 0 | 253 | 0.0368 | 0 | 0 | 1,374 | 0.199855 |
9cdd6d64010e9c068a5d368c4a2af2b0acb6a55f | 628 | py | Python | isValidParentheses.py | pflun/learningAlgorithms | 3101e989488dfc8a56f1bf256a1c03a837fe7d97 | [
"MIT"
] | null | null | null | isValidParentheses.py | pflun/learningAlgorithms | 3101e989488dfc8a56f1bf256a1c03a837fe7d97 | [
"MIT"
] | null | null | null | isValidParentheses.py | pflun/learningAlgorithms | 3101e989488dfc8a56f1bf256a1c03a837fe7d97 | [
"MIT"
] | null | null | null | class Solution:
# @param {string} s A string
# @return {boolean} whether the string is a valid parentheses
def isValidParentheses(self, s):
stack = []
dict = {"]": "[", "}": "{", ")": "("}
for char in s:
if char in dict.values():
stack.append(char)
elif char in dict.keys():
if stack == [] or dict[char] != stack.pop():
return False
# If accept other characters "ABCDEF"
else:
return False
return stack == []
test = Solution()
print test.isValidParentheses("(A)[]{}") | 33.052632 | 65 | 0.488854 | 568 | 0.904459 | 0 | 0 | 0 | 0 | 0 | 0 | 153 | 0.243631 |
9cde9f8cf5534efc1b5cec7d28cf00865e25ee25 | 113 | py | Python | fpakman/core/resource.py | vinifmor/fpakman | a719991b8f7ecf366d44fdf074f5950767bdf121 | [
"Zlib"
] | 39 | 2019-06-15T08:27:12.000Z | 2021-11-08T03:33:01.000Z | fpakman/core/resource.py | vinifmor/fpakman | a719991b8f7ecf366d44fdf074f5950767bdf121 | [
"Zlib"
] | 10 | 2019-06-16T12:16:19.000Z | 2020-06-21T18:49:05.000Z | fpakman/core/resource.py | vinifmor/fpakman | a719991b8f7ecf366d44fdf074f5950767bdf121 | [
"Zlib"
] | 3 | 2019-08-01T12:38:46.000Z | 2020-04-30T20:40:23.000Z |
from fpakman import ROOT_DIR
def get_path(resource_path):
return ROOT_DIR + '/resources/' + resource_path
| 16.142857 | 51 | 0.752212 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 13 | 0.115044 |
9ce11bf87b067840337b0d13348e056b624caea2 | 602 | py | Python | studygroups/migrations/0122_auto_20190710_0605.py | p2pu/learning-circles | ccd94208ec18082f8fda6d7f21eacdd71bad6023 | [
"MIT"
] | 10 | 2016-05-03T20:41:25.000Z | 2021-09-17T18:42:01.000Z | studygroups/migrations/0122_auto_20190710_0605.py | p2pu/learning-circles | ccd94208ec18082f8fda6d7f21eacdd71bad6023 | [
"MIT"
] | 655 | 2016-05-04T19:00:35.000Z | 2022-03-28T13:09:20.000Z | studygroups/migrations/0122_auto_20190710_0605.py | p2pu/learning-circles | ccd94208ec18082f8fda6d7f21eacdd71bad6023 | [
"MIT"
] | 8 | 2016-05-06T10:24:27.000Z | 2020-10-21T00:56:59.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2019-07-10 06:05
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('studygroups', '0121_auto_20190708_2246'),
]
operations = [
migrations.RemoveField(
model_name='teammembership',
name='communication_opt_in',
),
migrations.AddField(
model_name='teammembership',
name='weekly_update_opt_in',
field=models.BooleanField(default=True),
),
]
| 24.08 | 52 | 0.619601 | 444 | 0.737542 | 0 | 0 | 0 | 0 | 0 | 0 | 185 | 0.307309 |
9ce1c5f0534fa4d2972cf58a4d700be2541a140d | 244 | py | Python | rllib/contrib/bandits/envs/__init__.py | firebolt55439/ray | 215300b070628c06f0106906fc6c03bd70ebf140 | [
"Apache-2.0"
] | 21,382 | 2016-09-26T23:12:52.000Z | 2022-03-31T21:47:45.000Z | rllib/contrib/bandits/envs/__init__.py | firebolt55439/ray | 215300b070628c06f0106906fc6c03bd70ebf140 | [
"Apache-2.0"
] | 19,689 | 2016-09-17T08:21:25.000Z | 2022-03-31T23:59:30.000Z | rllib/contrib/bandits/envs/__init__.py | firebolt55439/ray | 215300b070628c06f0106906fc6c03bd70ebf140 | [
"Apache-2.0"
] | 4,114 | 2016-09-23T18:54:01.000Z | 2022-03-31T15:07:32.000Z | from ray.rllib.contrib.bandits.envs.discrete import LinearDiscreteEnv, \
WheelBanditEnv
from ray.rllib.contrib.bandits.envs.parametric import ParametricItemRecoEnv
__all__ = ["LinearDiscreteEnv", "WheelBanditEnv", "ParametricItemRecoEnv"]
| 40.666667 | 75 | 0.82377 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 58 | 0.237705 |
9ce27cc5df514eb99788d3adf40ed089c2be01cc | 3,770 | py | Python | cell.py | reachtarunhere/S-LSTM-PyTorch | 72d5ffd0e0b3ac4670d1fd766232253f34b4afa3 | [
"MIT"
] | 5 | 2018-06-18T17:27:54.000Z | 2020-06-15T15:24:06.000Z | cell.py | reachtarunhere/S-LSTM-PyTorch | 72d5ffd0e0b3ac4670d1fd766232253f34b4afa3 | [
"MIT"
] | null | null | null | cell.py | reachtarunhere/S-LSTM-PyTorch | 72d5ffd0e0b3ac4670d1fd766232253f34b4afa3 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
class HiddenGate(nn.Module):
def __init__(self, hidden_size, input_size, bias, nonlinearity="sigmoid"):
super(HiddenGate, self).__init__()
self.linear = nn.Linear(
3*hidden_size + input_size + hidden_size, hidden_size, bias=bias)
self.nonlinearity = F.sigmoid if nonlinearity == "sigmoid" else F.tanh
def forward(self, Xis, x_i, prev_g):
return self.nonlinearity(self.linear(torch.cat([Xis, x_i, prev_g])))
class SentenceStateGate(nn.Module):
def __init__(self, hidden_size, input_size, bias):
super(SentenceStateGate, self).__init__()
self.linear = nn.Linear(
hidden_size + hidden_size, hidden_size, bias=bias)
def forward(self, prev_g, h):
""" h is either h_av or h_i for different i"""
return F.sigmoid(self.linear(torch.cat([prev_g, h])))
class SLSTMCell(nn.Module):
def __init__(self, input_size, hidden_size, bias=True):
super(SLSTMCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
# hidden state gates
self.i_i_op = HiddenGate(hidden_size, input_size, bias)
self.l_i_op = HiddenGate(hidden_size, input_size, bias)
self.r_i_op = HiddenGate(hidden_size, input_size, bias)
self.f_i_op = HiddenGate(hidden_size, input_size, bias)
self.s_i_op = HiddenGate(hidden_size, input_size, bias)
self.o_i_op = HiddenGate(hidden_size, input_size, bias)
self.u_i_op = HiddenGate(hidden_size, input_size, nonlinearity="tanh")
# sentence state gates
self.g_f_g_op = SentenceStateGate(hidden_size, input_size, bias)
self.g_f_i_op = SentenceStateGate(hidden_size, input_size, bias)
self.g_o_op = SentenceStateGate(hidden_size, input_size, bias)
def reset_params(self):
pass
def get_Xis(self, prev_h_states):
"""Apply proper index selection mask to get xis"""
# How do you handle it getting shorter eh??
pass
def forward(self, prev_h_states, prev_c_states, prev_g_state,
x_i, prev_c_g):
Xi_i = self.get_Xi_i(prev_h_states)
i_i = self.i_i_op(Xi_i, x_i, prev_g_state)
l_i = self.l_i_op(Xi_i, x_i, prev_g_state)
r_i = self.l_i_op(Xi_i, x_i, prev_g_state)
f_i = self.l_i_op(Xi_i, x_i, prev_g_state)
s_i = self.l_i_op(Xi_i, x_i, prev_g_state)
o_i = self.l_i_op(Xi_i, x_i, prev_g_state)
u_i = self.u_i_op(Xi_i, x_i, prev_g_state)
# Now Get Softmaxed Versions
i_i, l_i, r_i, f_i, s_i = self.softmaxed_gates(
[i_i, l_i, r_i, f_i, s_i])
# what happens to the the last cell here?????? which has no i+1?
# what happens when first one has no i-1??
prev_c_left, prev_c_right, prev_c = self.get_prev_cs(prev_c_states)
c_i = l_i * prev_c_left + f_i * prev_c + r_i * prev_c_right + \
s_i * prev_c_g + i_i * u_i
h_i = o_i * F.tanh(c_i)
# Now for the sentence level calculations
h_avg = prev_h_states.mean(dim=0)
g_f_g = self.g_f_g_op(prev_g_state, h_avg)
g_f_i = self.g_f_i_op(prev_g_state, prev_h_states)
g_o = self.g_o_op(prev_g_state, h_avg)
temp = self.softmaxed_gates(list(torch.unbind(g_f_i)) + [g_f_g])
g_f_i = torch.stack(temp[:-1], dim=0)
g_f_g = temp[-1]
c_g = g_f_g * prev_c_g + torch.sum(g_f_i * prev_c_states, dim=0)
g = g_o * F.tanh(c_g)
return h_i, c_i, g, c_g
def softmaxed_gates(self, gates_list):
softmaxed = F.softmax(torch.stack(gates_list), dim=0)
return torch.unbind(softmaxed)
| 33.362832 | 78 | 0.644032 | 3,694 | 0.979841 | 0 | 0 | 0 | 0 | 0 | 0 | 380 | 0.100796 |
9ce50f572810c32e69e17aa3bf4ed997493d6ef2 | 94 | py | Python | python/comparatist/gcm/jlloop.py | tkf/comparatist | 44f30077857fc96cb77539f3fe0a7e8112f86c82 | [
"MIT"
] | null | null | null | python/comparatist/gcm/jlloop.py | tkf/comparatist | 44f30077857fc96cb77539f3fe0a7e8112f86c82 | [
"MIT"
] | null | null | null | python/comparatist/gcm/jlloop.py | tkf/comparatist | 44f30077857fc96cb77539f3fe0a7e8112f86c82 | [
"MIT"
] | null | null | null | from ..utils.jl import make_prepare
prepare = make_prepare("Comparatist.Simulators.gcm.loop")
| 31.333333 | 57 | 0.808511 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 33 | 0.351064 |
9ce55c50d74882644904a8b52bbb361eba5b3fff | 1,268 | py | Python | 016_3Sum_Closest.py | adwardlee/leetcode_solutions | f386869161181e153e29165d8fff06492bb192f3 | [
"MIT"
] | null | null | null | 016_3Sum_Closest.py | adwardlee/leetcode_solutions | f386869161181e153e29165d8fff06492bb192f3 | [
"MIT"
] | null | null | null | 016_3Sum_Closest.py | adwardlee/leetcode_solutions | f386869161181e153e29165d8fff06492bb192f3 | [
"MIT"
] | null | null | null | '''
Given an array nums of n integers and an integer target, find three integers in nums such that the sum is closest to target. Return the sum of the three integers. You may assume that each input would have exactly one solution.
Example:
Given array nums = [-1, 2, 1, -4], and target = 1.
The sum that is closest to the target is 2. (-1 + 2 + 1 = 2).
'''
class Solution:
def threeSumClosest(self, nums: list, target: int) -> int:
nums.sort()
closest = 10**10
output = 0
for idx, x in enumerate(nums):
if idx > 0 and nums[idx - 1] == nums[idx]:
continue
l = idx + 1
r = len(nums) - 1
while l < r:
sums = x + nums[l] + nums[r]
subtraction = abs(sums - target)
if sums < target:
if subtraction < abs(closest):
closest = subtraction
output = sums
l += 1
elif sums > target:
if subtraction < abs(closest):
closest = subtraction
output = sums
r -= 1
else:
return target
return output
| 34.27027 | 226 | 0.479495 | 906 | 0.714511 | 0 | 0 | 0 | 0 | 0 | 0 | 359 | 0.283123 |
9ce645cd6acd7ce6fe1f285499eaccb37cd50da8 | 7,279 | py | Python | generated-libraries/python/netapp/flexcache/flexcache_info.py | radekg/netapp-ontap-lib-get | 6445ebb071ec147ea82a486fbe9f094c56c5c40d | [
"MIT"
] | 2 | 2017-03-28T15:31:26.000Z | 2018-08-16T22:15:18.000Z | generated-libraries/python/netapp/flexcache/flexcache_info.py | radekg/netapp-ontap-lib-get | 6445ebb071ec147ea82a486fbe9f094c56c5c40d | [
"MIT"
] | null | null | null | generated-libraries/python/netapp/flexcache/flexcache_info.py | radekg/netapp-ontap-lib-get | 6445ebb071ec147ea82a486fbe9f094c56c5c40d | [
"MIT"
] | null | null | null | from netapp.netapp_object import NetAppObject
class FlexcacheInfo(NetAppObject):
"""
FlexCache Info
When returned as part of the output, all elements of this typedef
are reported, unless limited by a set of desired attributes
specified by the caller.
<p>
When used as input to specify desired attributes to return,
omitting a given element indicates that it shall not be returned
in the output. In contrast, by providing an element (even with
no value) the caller ensures that a value for that element will
be returned, given that the value can be retrieved.
<p>
When used as input to specify queries, any element can be omitted
in which case the resulting set of objects is not constrained by
any specific value of that attribute.
"""
_origin_volume = None
@property
def origin_volume(self):
"""
Origin volume name that contains the authoritative data.
Attributes: non-creatable, non-modifiable
"""
return self._origin_volume
@origin_volume.setter
def origin_volume(self, val):
if val != None:
self.validate('origin_volume', val)
self._origin_volume = val
_origin_aggregate = None
@property
def origin_aggregate(self):
"""
Origin Aggregate Name
Attributes: non-creatable, non-modifiable
"""
return self._origin_aggregate
@origin_aggregate.setter
def origin_aggregate(self, val):
if val != None:
self.validate('origin_aggregate', val)
self._origin_aggregate = val
_cache_percent_used = None
@property
def cache_percent_used(self):
"""
Cache Used Percentage
Attributes: non-creatable, non-modifiable
"""
return self._cache_percent_used
@cache_percent_used.setter
def cache_percent_used(self, val):
if val != None:
self.validate('cache_percent_used', val)
self._cache_percent_used = val
_origin_state = None
@property
def origin_state(self):
"""
Origin Volume State
Attributes: non-creatable, non-modifiable
Possible values:
<ul>
<li> "online" ,
<li> "restricted" ,
<li> "offline" ,
<li> "force_online" ,
<li> "force_offline" ,
<li> "mixed"
</ul>
"""
return self._origin_state
@origin_state.setter
def origin_state(self, val):
if val != None:
self.validate('origin_state', val)
self._origin_state = val
_cache_aggregate = None
@property
def cache_aggregate(self):
"""
Cache Aggregate Name
Attributes: non-creatable, non-modifiable
"""
return self._cache_aggregate
@cache_aggregate.setter
def cache_aggregate(self, val):
if val != None:
self.validate('cache_aggregate', val)
self._cache_aggregate = val
_cache_state = None
@property
def cache_state(self):
"""
Cache Volume State
Attributes: non-creatable, non-modifiable
Possible values:
<ul>
<li> "online" ,
<li> "restricted" ,
<li> "offline" ,
<li> "force_online" ,
<li> "force_offline" ,
<li> "mixed"
</ul>
"""
return self._cache_state
@cache_state.setter
def cache_state(self, val):
if val != None:
self.validate('cache_state', val)
self._cache_state = val
_cache_available = None
@property
def cache_available(self):
"""
Cache Available Size
Attributes: non-creatable, non-modifiable
"""
return self._cache_available
@cache_available.setter
def cache_available(self, val):
if val != None:
self.validate('cache_available', val)
self._cache_available = val
_cache_size = None
@property
def cache_size(self):
"""
Cache Volume Size
Attributes: non-creatable, non-modifiable
"""
return self._cache_size
@cache_size.setter
def cache_size(self, val):
if val != None:
self.validate('cache_size', val)
self._cache_size = val
_vserver = None
@property
def vserver(self):
"""
The name of the Vserver where the created cache is
located. Maximum length is 255 characters.
Attributes: key, non-creatable, non-modifiable
"""
return self._vserver
@vserver.setter
def vserver(self, val):
if val != None:
self.validate('vserver', val)
self._vserver = val
_connection_status = None
@property
def connection_status(self):
"""
The status of the connection between the cache and the
origin volumes. <p> It can be 'connected', 'disconnected'
or 'connecting'.
Attributes: non-creatable, non-modifiable
"""
return self._connection_status
@connection_status.setter
def connection_status(self, val):
if val != None:
self.validate('connection_status', val)
self._connection_status = val
_cache_volume = None
@property
def cache_volume(self):
"""
Cache Volume Name
Attributes: key, non-creatable, non-modifiable
"""
return self._cache_volume
@cache_volume.setter
def cache_volume(self, val):
if val != None:
self.validate('cache_volume', val)
self._cache_volume = val
@staticmethod
def get_api_name():
return "flexcache-info"
@staticmethod
def get_desired_attrs():
return [
'origin-volume',
'origin-aggregate',
'cache-percent-used',
'origin-state',
'cache-aggregate',
'cache-state',
'cache-available',
'cache-size',
'vserver',
'connection-status',
'cache-volume',
]
def describe_properties(self):
return {
'origin_volume': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'origin_aggregate': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'cache_percent_used': { 'class': int, 'is_list': False, 'required': 'optional' },
'origin_state': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'cache_aggregate': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'cache_state': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'cache_available': { 'class': int, 'is_list': False, 'required': 'optional' },
'cache_size': { 'class': int, 'is_list': False, 'required': 'optional' },
'vserver': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'connection_status': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'cache_volume': { 'class': basestring, 'is_list': False, 'required': 'optional' },
}
| 31.647826 | 99 | 0.583322 | 7,231 | 0.993406 | 0 | 0 | 4,898 | 0.672895 | 0 | 0 | 3,376 | 0.4638 |
9ce68b411a96e5e2c7a762c40c8ad6b8d2a605b8 | 5,974 | py | Python | corpus2graph/word_processor.py | zzcoolj/corpus2graph | 0d39f5f845b70a0f42000e3afc507eb7b4b5679c | [
"MIT"
] | 27 | 2018-06-06T16:30:45.000Z | 2022-02-10T15:28:47.000Z | corpus2graph/word_processor.py | zzcoolj/corpus2graph | 0d39f5f845b70a0f42000e3afc507eb7b4b5679c | [
"MIT"
] | 2 | 2019-05-01T15:24:12.000Z | 2019-08-20T20:17:19.000Z | corpus2graph/word_processor.py | zzcoolj/corpus2graph | 0d39f5f845b70a0f42000e3afc507eb7b4b5679c | [
"MIT"
] | 3 | 2019-11-10T15:39:11.000Z | 2020-10-16T12:40:06.000Z | import string
import warnings
import re
from . import util
import spacy
class FileParser(object):
def __init__(self,
file_parser='txt',
xml_node_path=None, fparser=None):
if file_parser not in ['txt', 'xml', 'defined']:
msg = 'file_parser should be txt, xml or defined, not "{file_parser}"'
raise ValueError(msg.format(file_parser=file_parser))
if file_parser == 'defined' and fparser is None:
msg = 'Please define you own file_parser.'
raise ValueError(msg)
self.file_parser = file_parser
self.xml_node_path = xml_node_path
self.fparser = fparser
def xml_parser(self, file_path, xml_node_path):
for paragraph in util.search_all_specific_nodes_in_xml_known_node_path(file_path, xml_node_path):
for sent in util.tokenize_informal_paragraph_into_sentences(paragraph):
yield str.strip(sent)
def txt_parser(self, file_path):
with open(file_path, 'r', encoding='utf-8') as file:
for line in file:
yield str.strip(line)
def __call__(self, file_path):
if self.file_parser == 'txt':
for sent in self.txt_parser(file_path):
yield sent
if self.file_parser == 'xml':
for sent in self.xml_parser(file_path, self.xml_node_path):
yield sent
if self.file_parser == 'defined':
for sent in self.fparser(file_path):
yield sent
class WordPreprocessor(object):
# default: config file.
def __init__(self, remove_stop_words, remove_numbers, replace_digits_to_zeros, remove_punctuations,
stem_word, lowercase, wpreprocessor):
self.remove_stop_words = remove_stop_words
self.remove_numbers = remove_numbers
self.replace_digits_to_zeros = replace_digits_to_zeros
self.remove_punctuations = remove_punctuations
self.stem_word = stem_word
self.lowercase = lowercase
self.wpreprocessor = wpreprocessor
punctuations = set(string.punctuation)
punctuations.update({'“', '”', '—'}) # English
punctuations.update({'...', '«', '»'}) # French
self.puncs = punctuations
def apply(self, word, spacy_loader=None):
# Removing
if self.remove_numbers and word.isnumeric():
return ''
if self.replace_digits_to_zeros:
word = re.sub('\d', '0', word)
if self.remove_punctuations:
if all(c in self.puncs for c in word):
return ''
# Remove combinations of punctuations and digits
if self.remove_numbers and self.remove_punctuations:
if all(j.isdigit() or j in self.puncs for j in word):
return ''
# remove stop words
if self.remove_stop_words and spacy_loader.vocab[word].is_stop:
# print(word, 'is stop words')
return ''
# Stem word
if self.stem_word:
word = util.stem_word(word)
# Make all words in lowercase
if self.lowercase:
word = word.lower()
# customized word preprocessor
if self.wpreprocessor is not None:
if not callable(self.wpreprocessor):
msg = 'wpreprocessor should be callable'
warnings.warn(msg)
else:
word = self.wpreprocessor(word)
if not isinstance(word, str):
msg = 'The output of wpreprocessor should be string'
raise ValueError(msg)
return word
def __call__(self, word):
return self.apply(word)
class Tokenizer(object):
@staticmethod
def mytok(s):
"""
An example of user customized tokenizer.
:return: list of tokens
"""
# TODO NOW spacy.load here is a really stupid idea, cause each time apply has been called spacy.load need to run. TOO SLOW!!!
tk = spacy.load('en')
return [token.text for token in tk(s)]
def __init__(self, word_tokenizer='Treebank', wtokenizer=None):
self.word_tokenizer = None
if word_tokenizer not in ['Treebank', 'PunktWord', 'WordPunct', 'spacy', '']:
msg = 'word_tokenizer "{word_tokenizer}" should be Treebank, PunktWord, WordPunct or empty'
raise ValueError(msg.format(word_tokenizer=word_tokenizer))
if word_tokenizer == 'spacy':
self.tokenizer = None
self.word_tokenizer = 'spacy'
elif word_tokenizer == 'Treebank':
from nltk.tokenize import TreebankWordTokenizer
self.tokenizer = TreebankWordTokenizer().tokenize
elif word_tokenizer == 'PunktWord':
# PunktTokenizer splits on punctuation, but keeps it with the word. => [‘this’, “‘s”, ‘a’, ‘test’]
from nltk.tokenize import PunktWordTokenizer
self.tokenizer = PunktWordTokenizer().tokenize
elif word_tokenizer == 'WordPunct':
# WordPunctTokenizer splits all punctuations into separate tokens. => [‘This’, “‘”, ‘s’, ‘a’, ‘test’]
from nltk.tokenize import WordPunctTokenizer
self.tokenizer = WordPunctTokenizer().tokenize
else:
if wtokenizer is None:
self.tokenizer = None
else:
if not callable(wtokenizer):
msg = 'wtokenizer should be callable'
warnings.warn(msg)
self.tokenizer = None
else:
self.tokenizer = wtokenizer
def apply(self, text, spacy_loader=None):
if self.word_tokenizer == 'spacy':
return [token.text for token in spacy_loader(text)]
if self.tokenizer is not None:
return self.tokenizer(text)
else:
return [text]
def __call__(self, text):
return self.apply(text)
| 38.541935 | 133 | 0.600603 | 5,941 | 0.986549 | 838 | 0.139156 | 347 | 0.057622 | 0 | 0 | 1,186 | 0.196945 |
9ce7bfb4241f59939bdcb06aabfa03be7b8bc555 | 4,688 | py | Python | gaussian/__init__.py | mattaustin/gaussian | c22ab36fd83e5f0b24587861ef34b872e268c97e | [
"Apache-2.0"
] | 1 | 2017-06-04T23:56:54.000Z | 2017-06-04T23:56:54.000Z | gaussian/__init__.py | mattaustin/gaussian | c22ab36fd83e5f0b24587861ef34b872e268c97e | [
"Apache-2.0"
] | null | null | null | gaussian/__init__.py | mattaustin/gaussian | c22ab36fd83e5f0b24587861ef34b872e268c97e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2014 Matt Austin
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, unicode_literals
from .feeds import Feed
import logging
import requests
__title__ = 'gaussian'
__version__ = '0.1.0'
__url__ = 'https://github.com/mattaustin/gaussian'
__author__ = 'Matt Austin <mail@mattaustin.me.uk>'
__copyright__ = 'Copyright 2014 Matt Austin'
__license__ = 'Apache 2.0'
class NewsBlur(object):
"""NewsBlur API client.
http://www.newsblur.com/api
"""
_user_agent = '{name}/{version} ({name}; +{url})'.format(
name=__title__, version=__version__, url=__url__)
endpoint = 'https://www.newsblur.com/'
logged_in = False
def __init__(self, endpoint=None, username=None, password=None):
"""
:param str endpoint: API endpoint URL. Defaults to 'www.newsblur.com'.
Specify this if you have your own NewsBlur server.
:param str username: Your NewsBlur account username.
:param str password: Your NewsBlur account password.
"""
self._logger = self._get_logger()
self._set_endpoint(endpoint)
self.username = username
self.password = password
self.session = self._create_session()
if self.username and self.password:
self.login()
def __repr__(self):
username = self.username.encode('utf-8') if self.username else b''
return b'<{0}: {1}>'.format(self.__class__.__name__, username)
def _get_logger(self):
return logging.getLogger(__name__)
def _create_session(self):
session = requests.Session()
session.headers = {'Accept': 'application/json',
'User-Agent': self._user_agent}
return session
def _set_endpoint(self, endpoint):
self.endpoint = endpoint or self.endpoint
self._logger.debug('API endpoint set to: {0}'.format(self.endpoint))
def _construct_url(self, path):
return '{endpoint}{path}'.format(endpoint=self.endpoint, path=path)
def get_feeds(self, refresh=False):
"""Get the feeds for this account.
:param bool refresh: If True, any cached data is ignored and data is
fetched from the API. Default: False.
:returns: List of :py:class:`~gaussian.feeds.Feed` instances.
:rtype: list
"""
if not hasattr(self, '_feeds') or refresh:
response = self.session.get(self._construct_url('/reader/feeds'))
# TODO: properly check for success, it appears server always
# returns 200.
assert response.json()['result'] == 'ok'
items = response.json()['feeds'].items()
self._feeds = [
Feed(id=id, api_client=self, data=data) for id, data in items]
return self._feeds
def login(self):
"""Login to NewsBlur, using session (cookie) authentication."""
response = self.session.post(self._construct_url('/api/login'),
data={'username': self.username,
'password': self.password})
# TODO: properly check for success, it appears server always returns
# 200.
self._logger.debug(response.content)
assert response.json()['result'] == 'ok'
self.logged_in = True
return True
def mark_as_read(self, days=0):
"""Mark all stories from all feeds as read.
:param int days: Number of days back to mark as read. Default: 0 (all).
"""
response = self.session.post(
self._construct_url('/reader/mark_all_as_read'),
data={'days': days})
return response.json()['result'] == 'ok'
def mark_stories_as_read(self, stories):
"""Mark provided stories as read.
:param list stories: List of :py:class:`~gaussian.stories.Story`
instances.
"""
response = self.session.post(
self._construct_url('/reader/mark_story_hashes_as_read'),
data={'story_hash': [story.hash for story in stories]})
return response.json()['result'] == 'ok'
| 32.331034 | 79 | 0.62884 | 3,753 | 0.800555 | 0 | 0 | 0 | 0 | 0 | 0 | 2,174 | 0.463737 |
9ce7d60aa1fddaa6c915b248970c026594e52b7a | 614 | py | Python | Exercise_07_RGB_LED.py | NaimFuad/hibiscus-sense-micropython-1 | 7475b6099315b4a12ab00f7ae5d62a2eb8ce5e0c | [
"MIT"
] | 3 | 2021-03-04T08:20:24.000Z | 2022-02-11T10:04:03.000Z | Exercise_07_RGB_LED.py | NaimFuad/hibiscus-sense-micropython-1 | 7475b6099315b4a12ab00f7ae5d62a2eb8ce5e0c | [
"MIT"
] | null | null | null | Exercise_07_RGB_LED.py | NaimFuad/hibiscus-sense-micropython-1 | 7475b6099315b4a12ab00f7ae5d62a2eb8ce5e0c | [
"MIT"
] | 2 | 2021-02-26T10:15:57.000Z | 2021-03-04T08:20:36.000Z | # Hibiscus Sense - Exercise 07 RGB LED
#
# There is 1x RGB LED.
# This RGB LED is connected to GPIO16 and integrated with WS2812.
# WS2812 is an LED controller, which use single-wire control protocol to control the LEDs.
from machine import Pin
from neopixel import NeoPixel
from time import sleep
pin = Pin(16, Pin.OUT) # set GPIO0 to output to drive NeoPixels
RGB = NeoPixel(pin, 1) # create NeoPixel driver on GPIO16 for 1 pixels
while True:
# set the first pixel to green
RGB[0] = (0, 255, 0)
# write data to all pixels
RGB.write()
sleep(0.5)
| 27.909091 | 93 | 0.664495 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 377 | 0.614007 |
9cea6c24623ad5445462ff6b79263e46ed1d41ac | 4,082 | py | Python | tests/k8s/k8s_test_utilities.py | shubav/sonic-mgmt | 0ff71b907a55489bb4ed7d17b1682380fd459bf2 | [
"Apache-2.0"
] | 132 | 2016-10-19T12:34:44.000Z | 2022-03-16T09:00:39.000Z | tests/k8s/k8s_test_utilities.py | shubav/sonic-mgmt | 0ff71b907a55489bb4ed7d17b1682380fd459bf2 | [
"Apache-2.0"
] | 3,152 | 2016-09-21T23:05:58.000Z | 2022-03-31T23:29:08.000Z | tests/k8s/k8s_test_utilities.py | shubav/sonic-mgmt | 0ff71b907a55489bb4ed7d17b1682380fd459bf2 | [
"Apache-2.0"
] | 563 | 2016-09-20T01:00:15.000Z | 2022-03-31T22:43:54.000Z | import logging
import time
from tests.common.helpers.assertions import pytest_assert
logger = logging.getLogger(__name__)
def join_master(duthost, master_vip):
"""
Joins DUT to Kubernetes master
Args:
duthost: DUT host object
master_vip: VIP of high availability Kubernetes master
If join fails, test will fail at the assertion to check_connected
"""
logger.info("Joining DUT to Kubernetes master")
dut_join_cmds = ['sudo config kube server disable on',
'sudo config kube server ip {}'.format(master_vip),
'sudo config kube server disable off']
duthost.shell_cmds(cmds=dut_join_cmds)
pytest_assert(poll_for_status_change(duthost, True),"DUT failed to successfully join Kubernetes master")
def make_vip_unreachable(duthost, master_vip):
"""
Makes Kubernetes master VIP unreachable from SONiC DUT by configuring iptables rules. Cleans preexisting iptables rules for VIP.
Args:
duthost: DUT host object
master_vip: VIP of high availability Kubernetes master
"""
logger.info("Making Kubernetes master VIP unreachable from DUT")
clean_vip_iptables_rules(duthost, master_vip)
duthost.shell('sudo iptables -A INPUT -s {} -j DROP'.format(master_vip))
duthost.shell('sudo iptables -A OUTPUT -d {} -j DROP'.format(master_vip))
def make_vip_reachable(duthost, master_vip):
"""
Makes Kubernetes master VIP reachable from SONiC DUT by removing any iptables rules associated with the VIP.
Args:
duthost: DUT host object
master_vip: VIP of high availability Kubernetes master
"""
logger.info("Making Kubernetes master VIP reachable from DUT")
clean_vip_iptables_rules(duthost, master_vip)
def clean_vip_iptables_rules(duthost, master_vip):
"""
Removes all iptables rules associated with the VIP.
Args:
duthost: DUT host object
master_vip: VIP of high availability Kubernetes master
"""
iptables_rules = duthost.shell('sudo iptables -S | grep {} || true'.format(master_vip))["stdout_lines"]
logger.info('iptables rules: {}'.format(iptables_rules))
for line in iptables_rules:
if line:
duthost.shell('sudo iptables -D {}'.format(line[2:]))
def check_connected(duthost):
"""
Checks if the DUT already shows status 'connected' to Kubernetes master
Args:
duthost: DUT host object
Returns:
True if connected, False if not connected
"""
kube_server_status = duthost.shell('show kube server')["stdout_lines"]
logger.info("Kube server status: {}".format(kube_server_status))
for line in kube_server_status:
if line.startswith("KUBERNETES_MASTER SERVER connected"):
return line.endswith("true")
logger.info("Kubernetes server check_connected failed to check server status")
def poll_for_status_change(duthost, exp_status, poll_wait_secs=5, min_wait_time=20, max_wait_time=120):
"""
Polls to see if kube server connected status updates as expected
Args:
duthost: DUT host object
exp_status: expected server connected status once processes are synced
poll_wait_secs: seconds between each server connected status poll. Default: 5 seconds
min_wait_time: seconds before starting poll of server connected status. Default: 20 seconds
max_wait_time: maximum amount of time to spend polling for status change. Default: 120 seconds
Returns:
True if server connected status updates as expected by max_wait_time
False if server connected status fails to update as expected by max_wait_time
"""
time.sleep(min_wait_time)
timeout_wait_secs = max_wait_time - min_wait_time
while (timeout_wait_secs > 0):
if (check_connected(duthost) == exp_status):
logging.info("Time taken to update Kube server status: {} seconds".format(timeout_wait_secs))
return True
time.sleep(poll_wait_secs)
timeout_wait_secs -= poll_wait_secs
return False
| 37.449541 | 133 | 0.706516 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,436 | 0.596766 |
9cea6c5884c806c3032a4309da7131c515de5453 | 2,467 | py | Python | registryServer/apps/account/api.py | HumanDynamics/openPDS-RegistryServer | fe88f513a93c116f6bd4e2d81b29376a45f16249 | [
"MIT"
] | 7 | 2015-01-05T17:25:15.000Z | 2019-06-07T05:19:53.000Z | registryServer/apps/account/api.py | HumanDynamics/openPDS-RegistryServer | fe88f513a93c116f6bd4e2d81b29376a45f16249 | [
"MIT"
] | 1 | 2015-11-10T17:16:01.000Z | 2015-11-18T16:09:43.000Z | registryServer/apps/account/api.py | HumanDynamics/openPDS-RegistryServer | fe88f513a93c116f6bd4e2d81b29376a45f16249 | [
"MIT"
] | 13 | 2015-01-05T17:25:20.000Z | 2021-12-16T12:43:52.000Z | from django.contrib.auth.models import User
from apps.account.models import Profile, Group
from tastypie import fields
from tastypie.authorization import DjangoAuthorization, Authorization
from tastypie.resources import ModelResource, ALL, ALL_WITH_RELATIONS
from tastypie.exceptions import BadRequest
from django.db import IntegrityError
from oauth2app.models import Client, AccessRange
import pdb
class UserResource(ModelResource):
class Meta:
queryset = User.objects.all()
authorization = Authorization()
excludes = ['last_login', 'password', 'date_joined']
def obj_create(self, bundle, request=None, **kwargs):
username, password = bundle.data['username'], bundle.data['password']
try:
bundle.obj = User.objects.create_user(username, '', password)
except IntegrityError:
raise BadRequest('That username already exists.')
return bundle
class GroupResource(ModelResource):
class Meta:
queryset = Group.objects.all()
authorization = Authorization()
# excludes = ['is_staff', 'is_superuser', 'last_login', 'password', 'date_joined']
class ClientResource(ModelResource):
class Meta:
queryset = Client.objects.all()
authorization = Authorization()
# excludes = ['is_staff', 'is_superuser', 'last_login', 'password', 'date_joined']
class ScopeResource(ModelResource):
class Meta:
queryset = AccessRange.objects.all()
authorization = Authorization()
# excludes = ['is_staff', 'is_superuser', 'last_login', 'password', 'date_joined']
class ProfileResource(ModelResource):
user = fields.ForeignKey(UserResource, 'user', full=True)
# group = fields.ForeignKey(GroupResource, 'group', full=True, null=True, blank=True)
# client = fields.ForeignKey(ClientResource, 'client', full=True, null=True, blank=True)
class Meta:
queryset = Profile.objects.all()
authorization = Authorization()
# excludes = ['is_staff', 'is_superuser', 'last_login', 'password', 'date_joined']
def obj_create(self, bundle, request=None, **kwargs):
try:
password = bundle.data["user"].pop("password")
bundle = super(ProfileResource, self).obj_create(bundle, request, **kwargs)
bundle.obj.user.set_password(password)
bundle.obj.user.save()
except IntegrityError:
raise BadRequest('Username already exists')
return bundle
| 39.15873 | 91 | 0.691934 | 1,808 | 0.732874 | 0 | 0 | 0 | 0 | 0 | 0 | 642 | 0.260235 |
9ceb3e1ccabd70fddbc1a71f663a553a5c834729 | 6,253 | bzl | Python | rules/workspace/def.bzl | viovanov/kubecf | a8e1a1c4775b9cab072c286a99ec6140bc867eff | [
"Apache-2.0"
] | null | null | null | rules/workspace/def.bzl | viovanov/kubecf | a8e1a1c4775b9cab072c286a99ec6140bc867eff | [
"Apache-2.0"
] | null | null | null | rules/workspace/def.bzl | viovanov/kubecf | a8e1a1c4775b9cab072c286a99ec6140bc867eff | [
"Apache-2.0"
] | 1 | 2020-08-18T09:10:43.000Z | 2020-08-18T09:10:43.000Z | """An extension for workspace rules."""
load("@bazel_skylib//lib:paths.bzl", "paths")
load("//:dependencies.bzl", "dependencies")
def _workspace_dependencies_impl(ctx):
platform = ctx.os.name if ctx.os.name != "mac os x" else "darwin"
for dependency in dependencies:
ctx.download(
executable = True,
output = dependency["name"],
sha256 = dependency["sha256"][platform],
url = dependency["url"][platform].format(version = dependency["version"]),
)
ctx.file("BUILD.bazel", 'exports_files(glob(["**/*"]))\n')
_workspace_dependencies = repository_rule(_workspace_dependencies_impl)
_WORKSPACE_DEPENDENCIES_REPOSITORY_NAME = "workspace_dependencies"
def workspace_dependencies():
"""A macro for wrapping the workspace_dependencies repository rule with a hardcoded name.
The workspace_dependencies repository rule should be called before any of the other rules in
this Bazel extension.
Hardcoding the target name is useful for consuming it internally. The targets produced by this
rule are only used within the workspace rules.
"""
_workspace_dependencies(
name = _WORKSPACE_DEPENDENCIES_REPOSITORY_NAME,
)
def _workspace_status_impl(ctx):
info_file_json = _convert_status(ctx, ctx.info_file)
version_file_json= _convert_status(ctx, ctx.version_file)
status_merger = ctx.actions.declare_file("status_merger.sh")
workspace_status = ctx.actions.declare_file("workspace_status.json")
ctx.actions.expand_template(
is_executable = True,
output = status_merger,
substitutions = {
"{info_file}": info_file_json.path,
"{version_file}": version_file_json.path,
"{workspace_status}": workspace_status.path,
"{jq}": ctx.executable._jq.path,
},
template = ctx.file._status_merger_tmpl,
)
ctx.actions.run(
executable = status_merger,
inputs = [
info_file_json,
version_file_json,
],
outputs = [workspace_status],
tools = [ctx.executable._jq],
)
return [DefaultInfo(files = depset([workspace_status]))]
def _convert_status(ctx, status_file):
status_file_basename = paths.basename(status_file.path)
status_file_json_name = paths.replace_extension(status_file_basename, ".json")
status_file_json = ctx.actions.declare_file(status_file_json_name)
status_converter = ctx.actions.declare_file("{}_converter.sh".format(status_file_basename))
ctx.actions.expand_template(
is_executable = True,
output = status_converter,
substitutions = {
"{input}": status_file.path,
"{output}": status_file_json.path,
"{jq}": ctx.executable._jq.path,
},
template = ctx.file._status_converter_tmpl,
)
ctx.actions.run(
executable = status_converter,
inputs = [status_file],
outputs = [status_file_json],
tools = [ctx.executable._jq],
)
return status_file_json
workspace_status = rule(
_workspace_status_impl,
attrs = {
"_status_converter_tmpl": attr.label(
allow_single_file = True,
default = "//:status_converter.tmpl.sh",
),
"_status_merger_tmpl": attr.label(
allow_single_file = True,
default = "//:status_merger.tmpl.sh",
),
"_jq": attr.label(
allow_single_file = True,
cfg = "host",
default = "@{}//:jq".format(_WORKSPACE_DEPENDENCIES_REPOSITORY_NAME),
executable = True,
),
},
)
def _yaml_loader(ctx):
# Check if the output file name has the .bzl extension.
out_ext = ctx.attr.out[len(ctx.attr.out)-4:]
if out_ext != ".bzl":
fail("Expected output file ({out}) to have .bzl extension".format(out = ctx.attr.out))
# Get the yq binary path.
yq = ctx.path(ctx.attr._yq)
# Get the YAML src absolute path and convert it to JSON.
src = ctx.path(ctx.attr.src)
res = ctx.execute([yq, "r", "--tojson", src])
if res.return_code != 0:
fail(res.stderr)
ctx.file("file.json", res.stdout)
# Convert the JSON file to the Starlark extension.
converter = ctx.path(ctx.attr._converter)
res = ctx.execute([_python3(ctx), converter, "file.json"])
if res.return_code != 0:
fail(res.stderr)
# Write the .bzl file with the YAML contents converted.
ctx.file(ctx.attr.out, res.stdout)
# An empty BUILD.bazel is only needed to indicate it's a Bazel package.
ctx.file("BUILD.bazel", "")
yaml_loader = repository_rule(
_yaml_loader,
doc = "A repository rule to load a YAML file into a Starlark dictionary",
attrs = {
"src": attr.label(
allow_single_file = True,
doc = "The YAML file to be loaded into a Starlark dictionary",
mandatory = True,
),
"out": attr.string(
doc = "The output file name",
mandatory = True,
),
"_yq": attr.label(
allow_single_file = True,
cfg = "host",
default = "@{}//:yq".format(_WORKSPACE_DEPENDENCIES_REPOSITORY_NAME),
executable = True,
),
"_converter": attr.label(
allow_single_file = True,
default = "//:json_bzl_converter.py",
),
},
)
def _python3(repository_ctx):
"""A helper function to get the Python 3 system interpreter if available. Otherwise, it fails.
"""
for option in ["python", "python3"]:
python = repository_ctx.which(option)
if python != None:
res = repository_ctx.execute([python, "--version"])
if res.return_code != 0:
fail(res.stderr)
version = res.stdout.strip() if res.stdout.strip() != "" else res.stderr.strip()
version = version.split(" ")
if len(version) != 2:
fail("Unable to parse Python output version: {}".format(version))
version = version[1]
major_version = version.split(".")[0]
if int(major_version) == 3:
return python
fail("Python 3 is required")
| 34.932961 | 98 | 0.621142 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,700 | 0.27187 |
9ceb4adfae2836973eb415f54317fb6036b876ab | 2,010 | py | Python | tests/surftrace_test.py | aliyun/surftrace | c3e7856d69c1f73cec5bb2b0f4d41a35b2ff63b1 | [
"MIT"
] | 32 | 2021-12-27T11:54:37.000Z | 2022-03-25T05:14:51.000Z | tests/surftrace_test.py | aliyun/surftrace | c3e7856d69c1f73cec5bb2b0f4d41a35b2ff63b1 | [
"MIT"
] | 8 | 2022-01-07T02:29:05.000Z | 2022-03-02T08:16:09.000Z | tests/surftrace_test.py | aliyun/surftrace | c3e7856d69c1f73cec5bb2b0f4d41a35b2ff63b1 | [
"MIT"
] | 10 | 2022-01-05T11:41:14.000Z | 2022-03-22T08:46:00.000Z | # -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: surftrace_test.py
Description :
Author : liaozhaoyan
date: 2022/2/27
-------------------------------------------------
Change Activity:
2022/2/27:
-------------------------------------------------
"""
__author__ = 'liaozhaoyan'
import sys
sys.path.append("../")
from surftrace.surftrace import surftrace, setupParser
def cbShow(line):
print(line)
def surftraceSetup(cmds):
parser = setupParser()
s = surftrace(cmds, parser, show=True, echo=False, cbShow=cbShow)
s.start()
def test_kprobe():
surftraceSetup(['p wake_up_new_task', 'r wake_up_new_task'])
def test_kprobeArgs():
surftraceSetup(['p do_filp_open dfd=%0', 'p do_filp_open dfd=X%0'])
def test_kprobeArgMeber():
surftraceSetup(['p wake_up_new_task comm=%0->comm',
'p wake_up_new_task uesrs=S%0->mm->mm_users',
'p wake_up_new_task node=%0->se.run_node.rb_left'])
def test_kprobeSkb():
surftraceSetup(['p __netif_receive_skb_core proto=@(struct iphdr *)l3%0->protocol ip_src=@(struct iphdr *)%0->saddr ip_dst=@(struct iphdr *)l3%0->daddr data=X@(struct iphdr *)l3%0->sdata[1] f:proto==1&&ip_src==127.0.0.1',
'p ip_local_deliver len=@(struct iphdr*)%0->ihl',
'p tcp_rcv_established aseq=@(struct tcphdr*)l4%0->ack_seq'
])
def test_globalVars():
surftraceSetup([
'p brnf_sysctl_call_tables comm=$comm value=%2 value2=@jiffies',
'p brnf_sysctl_call_tables comm=$comm value=%2 value2=@(struct tcphdr*)l4@jiffies->ack_seq',
]
)
def test_Events():
expr = [
'e syscalls/sys_enter_dup',
'e syscalls/sys_enter_creat',
'e syscalls/sys_enter_close',
'e syscalls/sys_enter_chmod',
'e sched/sched_stat_wait',
]
surftraceSetup(expr)
if __name__ == "__main__":
pass
| 28.714286 | 225 | 0.571144 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,187 | 0.589374 |
9ced7b3c51ed7b3bb5bdd6b331b57b879e9a8dab | 834 | py | Python | backend/secure_backend/models.py | CS-twop/Secure-django-reacthook | 3d7b3e65c7e1f1e015dcd3ddf2838acdab7cab17 | [
"MIT"
] | null | null | null | backend/secure_backend/models.py | CS-twop/Secure-django-reacthook | 3d7b3e65c7e1f1e015dcd3ddf2838acdab7cab17 | [
"MIT"
] | null | null | null | backend/secure_backend/models.py | CS-twop/Secure-django-reacthook | 3d7b3e65c7e1f1e015dcd3ddf2838acdab7cab17 | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.auth.models import User
class Post(models.Model):
user = models.ForeignKey(User, related_name='posts', on_delete=models.CASCADE)
content = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return str(self.user) + ' : ' + self.content
class Comment(models.Model):
post_id = models.ForeignKey(Post, on_delete=models.CASCADE, related_name='comments')
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='comments')
content = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return str(self.user) + ' >> ' + self.content | 36.26087 | 88 | 0.721823 | 757 | 0.907674 | 0 | 0 | 0 | 0 | 0 | 0 | 38 | 0.045564 |
9ceda4c07b42ed0db1090dd1d2bfcc6805e799d2 | 560 | py | Python | tests/test_database.py | horsewithnoname1985/packit-app | afe83472f092ff1447ae8ae56c99d78792d5bc59 | [
"MIT"
] | null | null | null | tests/test_database.py | horsewithnoname1985/packit-app | afe83472f092ff1447ae8ae56c99d78792d5bc59 | [
"MIT"
] | null | null | null | tests/test_database.py | horsewithnoname1985/packit-app | afe83472f092ff1447ae8ae56c99d78792d5bc59 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `database` module."""
import pytest
import os
import sqlite3
from packit_app.database import Database
def test_database_connection():
db = Database()
assert type(db.cur) is sqlite3.Cursor
assert type(db.connection) is sqlite3.Connection
assert os.path.exists(Database.db_location) is True
def test_database_disconnects():
db = Database()
db.close_connection()
with pytest.raises(sqlite3.ProgrammingError):
db.cur.execute("CREATE TABLE cannot_not_be_created")
| 23.333333 | 60 | 0.725 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 114 | 0.203571 |
9ceeadf5aa7e878adeab42926d0d1a41ef275e04 | 3,943 | py | Python | SS-GCNs/gnns/gin_net.py | TAMU-VITA/SS-GCNs | 644f8a5f3b507be6d59be02747be406fabd8b8f9 | [
"MIT"
] | 1 | 2021-06-07T15:18:10.000Z | 2021-06-07T15:18:10.000Z | SS-GCNs/gnns/gin_net.py | TAMU-VITA/SS-GCNs | 644f8a5f3b507be6d59be02747be406fabd8b8f9 | [
"MIT"
] | null | null | null | SS-GCNs/gnns/gin_net.py | TAMU-VITA/SS-GCNs | 644f8a5f3b507be6d59be02747be406fabd8b8f9 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
from dgl.nn.pytorch.glob import SumPooling, AvgPooling, MaxPooling
"""
GIN: Graph Isomorphism Networks
HOW POWERFUL ARE GRAPH NEURAL NETWORKS? (Keyulu Xu, Weihua Hu, Jure Leskovec and Stefanie Jegelka, ICLR 2019)
https://arxiv.org/pdf/1810.00826.pdf
"""
from gnns.gin_layer import GINLayer, ApplyNodeFunc, MLP
class GINNet(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim = net_params[0]
hidden_dim = net_params[1]
n_classes = net_params[2]
dropout = 0.5
self.n_layers = 2
n_mlp_layers = 1 # GIN
learn_eps = True # GIN
neighbor_aggr_type = 'mean' # GIN
graph_norm = False
batch_norm = False
residual = False
self.n_classes = n_classes
# List of MLPs
self.ginlayers = torch.nn.ModuleList()
for layer in range(self.n_layers):
if layer == 0:
mlp = MLP(n_mlp_layers, in_dim, hidden_dim, hidden_dim)
else:
mlp = MLP(n_mlp_layers, hidden_dim, hidden_dim, n_classes)
self.ginlayers.append(GINLayer(ApplyNodeFunc(mlp), neighbor_aggr_type,
dropout, graph_norm, batch_norm, residual, 0, learn_eps))
# Linear function for output of each layer
# which maps the output of different layers into a prediction score
self.linears_prediction = nn.Linear(hidden_dim, n_classes, bias=False)
def forward(self, g, h, snorm_n, snorm_e):
# list of hidden representation at each layer (including input)
hidden_rep = []
for i in range(self.n_layers):
h = self.ginlayers[i](g, h, snorm_n)
hidden_rep.append(h)
# score_over_layer = (self.linears_prediction(hidden_rep[0]) + hidden_rep[1]) / 2
score_over_layer = (self.linears_prediction(hidden_rep[0]) + hidden_rep[1]) / 2
return score_over_layer
class GINNet_ss(nn.Module):
def __init__(self, net_params, num_par):
super().__init__()
in_dim = net_params[0]
hidden_dim = net_params[1]
n_classes = net_params[2]
dropout = 0.5
self.n_layers = 2
n_mlp_layers = 1 # GIN
learn_eps = True # GIN
neighbor_aggr_type = 'mean' # GIN
graph_norm = False
batch_norm = False
residual = False
self.n_classes = n_classes
# List of MLPs
self.ginlayers = torch.nn.ModuleList()
for layer in range(self.n_layers):
if layer == 0:
mlp = MLP(n_mlp_layers, in_dim, hidden_dim, hidden_dim)
else:
mlp = MLP(n_mlp_layers, hidden_dim, hidden_dim, n_classes)
self.ginlayers.append(GINLayer(ApplyNodeFunc(mlp), neighbor_aggr_type,
dropout, graph_norm, batch_norm, residual, 0, learn_eps))
# Linear function for output of each layer
# which maps the output of different layers into a prediction score
self.linears_prediction = nn.Linear(hidden_dim, n_classes, bias=False)
self.classifier_ss = nn.Linear(hidden_dim, num_par, bias=False)
def forward(self, g, h, snorm_n, snorm_e):
# list of hidden representation at each layer (including input)
hidden_rep = []
for i in range(self.n_layers):
h = self.ginlayers[i](g, h, snorm_n)
hidden_rep.append(h)
score_over_layer = (self.linears_prediction(hidden_rep[0]) + hidden_rep[1]) / 2
h_ss = self.classifier_ss(hidden_rep[0])
return score_over_layer, h_ss
| 33.700855 | 113 | 0.584834 | 3,520 | 0.892721 | 0 | 0 | 0 | 0 | 0 | 0 | 693 | 0.175755 |
9cef8673569c093f97353e11647a929d7f02a79c | 96 | py | Python | venv/lib/python3.8/site-packages/clikit/api/command/command_collection.py | GiulianaPola/select_repeats | 17a0d053d4f874e42cf654dd142168c2ec8fbd11 | [
"MIT"
] | 2 | 2022-03-13T01:58:52.000Z | 2022-03-31T06:07:54.000Z | venv/lib/python3.8/site-packages/clikit/api/command/command_collection.py | DesmoSearch/Desmobot | b70b45df3485351f471080deb5c785c4bc5c4beb | [
"MIT"
] | 19 | 2021-11-20T04:09:18.000Z | 2022-03-23T15:05:55.000Z | venv/lib/python3.8/site-packages/clikit/api/command/command_collection.py | DesmoSearch/Desmobot | b70b45df3485351f471080deb5c785c4bc5c4beb | [
"MIT"
] | null | null | null | /home/runner/.cache/pip/pool/0e/03/a2/8516ce170f58c40a340c994a5cb76273f276d7ad1ea824422b51c9e45c | 96 | 96 | 0.895833 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
9cf04e104c24bd87afecb5f1f99c9dd473ed1508 | 5,080 | py | Python | common/py_vulcanize/third_party/rjsmin/_setup/py2/data.py | tingshao/catapult | a8fe19e0c492472a8ed5710be9077e24cc517c5c | [
"BSD-3-Clause"
] | 2,151 | 2020-04-18T07:31:17.000Z | 2022-03-31T08:39:18.000Z | common/py_vulcanize/third_party/rjsmin/_setup/py2/data.py | tingshao/catapult | a8fe19e0c492472a8ed5710be9077e24cc517c5c | [
"BSD-3-Clause"
] | 4,640 | 2015-07-08T16:19:08.000Z | 2019-12-02T15:01:27.000Z | common/py_vulcanize/third_party/rjsmin/_setup/py2/data.py | tingshao/catapult | a8fe19e0c492472a8ed5710be9077e24cc517c5c | [
"BSD-3-Clause"
] | 698 | 2015-06-02T19:18:35.000Z | 2022-03-29T16:57:15.000Z | # -*- coding: ascii -*-
#
# Copyright 2007, 2008, 2009, 2010, 2011
# Andr\xe9 Malo or his licensors, as applicable
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
===================
Data distribution
===================
This module provides tools to simplify data distribution.
"""
__author__ = u"Andr\xe9 Malo"
__docformat__ = "restructuredtext en"
from distutils import filelist as _filelist
import os as _os
import posixpath as _posixpath
import sys as _sys
from _setup import commands as _commands
def splitpath(path):
""" Split a path """
drive, path = '', _os.path.normpath(path)
try:
splitunc = _os.path.splitunc
except AttributeError:
pass
else:
drive, path = splitunc(path)
if not drive:
drive, path = _os.path.splitdrive(path)
elems = []
try:
sep = _os.path.sep
except AttributeError:
sep = _os.path.join('1', '2')[1:-1]
while 1:
prefix, path = _os.path.split(path)
elems.append(path)
if prefix in ('', sep):
drive = _os.path.join(drive, prefix)
break
path = prefix
elems.reverse()
return drive, elems
def finalizer(installer):
""" Finalize install_data """
data_files = []
for item in installer.data_files:
if not isinstance(item, Data):
data_files.append(item)
continue
data_files.extend(item.flatten(installer))
installer.data_files = data_files
class Data(object):
""" File list container """
def __init__(self, files, target=None, preserve=0, strip=0,
prefix=None):
""" Initialization """
self._files = files
self._target = target
self._preserve = preserve
self._strip = strip
self._prefix = prefix
self.fixup_commands()
def fixup_commands(self):
pass
def from_templates(cls, *templates, **kwargs):
""" Initialize from template """
files = _filelist.FileList()
for tpl in templates:
for line in tpl.split(';'):
files.process_template_line(line.strip())
files.sort()
files.remove_duplicates()
result = []
for filename in files.files:
_, elems = splitpath(filename)
if '.svn' in elems or '.git' in elems:
continue
result.append(filename)
return cls(result, **kwargs)
from_templates = classmethod(from_templates)
def flatten(self, installer):
""" Flatten the file list to (target, file) tuples """
# pylint: disable = W0613
if self._prefix:
_, prefix = splitpath(self._prefix)
telems = prefix
else:
telems = []
tmap = {}
for fname in self._files:
(_, name), target = splitpath(fname), telems
if self._preserve:
if self._strip:
name = name[max(0, min(self._strip, len(name) - 1)):]
if len(name) > 1:
target = telems + name[:-1]
tmap.setdefault(_posixpath.join(*target), []).append(fname)
return tmap.items()
class Documentation(Data):
""" Documentation container """
def fixup_commands(self):
_commands.add_option('install_data', 'without-docs',
help_text='Do not install documentation files',
inherit='install',
)
_commands.add_finalizer('install_data', 'documentation', finalizer)
def flatten(self, installer):
""" Check if docs should be installed at all """
if installer.without_docs:
return []
return Data.flatten(self, installer)
class Manpages(Documentation):
""" Manpages container """
def dispatch(cls, files):
""" Automatically dispatch manpages to their target directories """
mpmap = {}
for manpage in files:
normalized = _os.path.normpath(manpage)
_, ext = _os.path.splitext(normalized)
if ext.startswith(_os.path.extsep):
ext = ext[len(_os.path.extsep):]
mpmap.setdefault(ext, []).append(manpage)
return [cls(manpages, prefix=_posixpath.join(
'share', 'man', 'man%s' % section,
)) for section, manpages in mpmap.items()]
dispatch = classmethod(dispatch)
def flatten(self, installer):
""" Check if manpages are suitable """
if _sys.platform == 'win32':
return []
return Documentation.flatten(self, installer)
| 30.60241 | 75 | 0.598622 | 3,093 | 0.608858 | 0 | 0 | 0 | 0 | 0 | 0 | 1,379 | 0.271457 |
9cf45647d9d8c0126932e004da932d00ac6a9682 | 189 | py | Python | payments/urls.py | caocmai/maker | bec025c3301a21e75638aca28576c724b44e259f | [
"MIT"
] | null | null | null | payments/urls.py | caocmai/maker | bec025c3301a21e75638aca28576c724b44e259f | [
"MIT"
] | 8 | 2020-06-06T01:12:07.000Z | 2021-06-10T19:38:47.000Z | payments/urls.py | caocmai/Maker | bec025c3301a21e75638aca28576c724b44e259f | [
"MIT"
] | 1 | 2020-03-05T08:11:39.000Z | 2020-03-05T08:11:39.000Z | from django.urls import path
from . import views
urlpatterns = [
path('charge/', views.charge, name='charge'),
path('payment/', views.HomePageView.as_view(), name='payment'),
]
| 17.181818 | 67 | 0.671958 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 36 | 0.190476 |
9cf644b1b80793072b365bad95258387b0ed2c8b | 122 | py | Python | cdc/src/__init__.py | ZebinKang/cdc | a32fe41892021d29a1d9c534728a92b67f9b6cea | [
"MIT"
] | null | null | null | cdc/src/__init__.py | ZebinKang/cdc | a32fe41892021d29a1d9c534728a92b67f9b6cea | [
"MIT"
] | null | null | null | cdc/src/__init__.py | ZebinKang/cdc | a32fe41892021d29a1d9c534728a92b67f9b6cea | [
"MIT"
] | null | null | null | from NoteDeid import *
from NoteConceptParser import *
from Converter import *
from D2v import *
from MLPipeline import *
| 20.333333 | 31 | 0.795082 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
9cf660cb297e4f4275f9be8ec5a0164060234d14 | 1,207 | py | Python | src/education/urls.py | compressore/moc | 8e05e3e60d2d2c7534e0c659b6ed0743e9189f6b | [
"MIT"
] | 4 | 2020-10-14T15:35:07.000Z | 2022-01-13T15:31:16.000Z | src/education/urls.py | compressore/moc | 8e05e3e60d2d2c7534e0c659b6ed0743e9189f6b | [
"MIT"
] | null | null | null | src/education/urls.py | compressore/moc | 8e05e3e60d2d2c7534e0c659b6ed0743e9189f6b | [
"MIT"
] | 2 | 2021-01-07T14:39:05.000Z | 2022-01-18T12:31:50.000Z | from django.urls import path
from . import views
from core import views as core
from community import views as community
from library import views as library
from ie.urls_baseline import baseline_urlpatterns
from ie.urls_education_baseline import baseline_education_urlpatterns
app_name = "education"
urlpatterns = baseline_urlpatterns + baseline_education_urlpatterns + [
path("", views.index, name="index"),
path("theses/", views.theses, name="theses"),
path("controlpanel/students/", views.controlpanel_students, name="controlpanel_students"),
path("controlpanel/students/<int:id>/", views.controlpanel_student, name="controlpanel_student"),
path("controlpanel/courses/", views.controlpanel_courses, name="controlpanel_courses"),
path("controlpanel/courses/<int:id>/", views.controlpanel_course, name="controlpanel_course"),
path("controlpanel/courses/<int:id>/edit/", views.controlpanel_course_form, name="controlpanel_course_form"),
path("controlpanel/courses/create/", views.controlpanel_course_form, name="controlpanel_course_form"),
path("controlpanel/courses/<int:id>/<int:content>/", views.controlpanel_course_content, name="controlpanel_course_content"),
]
| 52.478261 | 128 | 0.785418 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 431 | 0.357084 |
9cf6c6a73f439ca6c05d3204935a74fc4fd76abd | 7,138 | py | Python | flowsa/data_source_scripts/StatCan_IWS_MI.py | JohnAndrewTaylor/flowsa | 21b14b19f08370db574bdd59219a2773983c6f95 | [
"CC0-1.0"
] | null | null | null | flowsa/data_source_scripts/StatCan_IWS_MI.py | JohnAndrewTaylor/flowsa | 21b14b19f08370db574bdd59219a2773983c6f95 | [
"CC0-1.0"
] | null | null | null | flowsa/data_source_scripts/StatCan_IWS_MI.py | JohnAndrewTaylor/flowsa | 21b14b19f08370db574bdd59219a2773983c6f95 | [
"CC0-1.0"
] | null | null | null | # Stat_Canada.py (flowsa)
# !/usr/bin/env python3
# coding=utf-8
'''
Pulls Statistics Canada data on water intake and discharge for 3 digit NAICS from 2005 - 2015
'''
import pandas as pd
import io
import zipfile
import pycountry
from flowsa.common import *
def sc_call(url, sc_response, args):
"""
Convert response for calling url to pandas dataframe, begin parsing df into FBA format
:param url: string, url
:param sc_response: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
# Convert response to dataframe
# read all files in the stat canada zip
with zipfile.ZipFile(io.BytesIO(sc_response.content), "r") as f:
# read in file names
for name in f.namelist():
# if filename does not contain "MetaData", then create dataframe
if "MetaData" not in name:
data = f.open(name)
df = pd.read_csv(data, header=0)
return df
def sc_parse(dataframe_list, args):
"""
Functions to being parsing and formatting data into flowbyactivity format
:param dataframe_list: list of dataframes to concat and format
:param args: arguments as specified in flowbyactivity.py ('year' and 'source')
:return: dataframe parsed and partially formatted to flowbyactivity specifications
"""
# concat dataframes
df = pd.concat(dataframe_list, sort=False)
# drop columns
df = df.drop(columns=['COORDINATE', 'DECIMALS', 'DGUID', 'SYMBOL', 'TERMINATED', 'UOM_ID', 'SCALAR_ID', 'VECTOR'])
# rename columns
df = df.rename(columns={'GEO': 'Location',
'North American Industry Classification System (NAICS)': 'Description',
'REF_DATE': 'Year',
'STATUS': 'Spread',
'VALUE': "FlowAmount",
'Water use parameter': 'FlowName'})
# extract NAICS as activity column. rename activity based on flowname
df['Activity'] = df['Description'].str.extract('.*\[(.*)\].*')
df.loc[df['Description'] == 'Total, all industries', 'Activity'] = '31-33' # todo: change these activity names
df.loc[df['Description'] == 'Other manufacturing industries', 'Activity'] = 'Other'
df['FlowName'] = df['FlowName'].str.strip()
df.loc[df['FlowName'] == 'Water intake', 'ActivityConsumedBy'] = df['Activity']
df.loc[df['FlowName'].isin(['Water discharge', "Water recirculation"]), 'ActivityProducedBy'] = df['Activity']
# create "unit" column
df["Unit"] = "million " + df["UOM"] + "/year"
# drop columns used to create unit and activity columns
df = df.drop(columns=['SCALAR_FACTOR', 'UOM', 'Activity'])
# Modify the assigned RSD letter values to numeric value
df.loc[df['Spread'] == 'A', 'Spread'] = 2.5 # given range: 0.01 - 4.99%
df.loc[df['Spread'] == 'B', 'Spread'] = 7.5 # given range: 5 - 9.99%
df.loc[df['Spread'] == 'C', 'Spread'] = 12.5 # given range: 10 - 14.99%
df.loc[df['Spread'] == 'D', 'Spread'] = 20 # given range: 15 - 24.99%
df.loc[df['Spread'] == 'E', 'Spread'] = 37.5 # given range:25 - 49.99%
df.loc[df['Spread'] == 'F', 'Spread'] = 75 # given range: > 49.99%
df.loc[df['Spread'] == 'x', 'Spread'] = withdrawn_keyword
# hard code data
df['Class'] = 'Water'
df['SourceName'] = 'StatCan_IWS_MI'
# temp hardcode canada iso code
df['Location'] = call_country_code('Canada')
df['Year'] = df['Year'].astype(str)
df['LocationSystem'] = "ISO"
df["MeasureofSpread"] = 'RSD'
df["DataReliability"] = '3'
df["DataCollection"] = '4'
# subset based on year
df = df[df['Year'] == args['year']]
return df
def convert_statcan_data_to_US_water_use(df, attr):
"""
Use Canadian GDP data to convert 3 digit canadian water use to us water
use:
- canadian gdp
- us gdp
:param df:
:param attr:
:return:
"""
import flowsa
from flowsa.values_from_literature import get_Canadian_to_USD_exchange_rate
from flowsa.flowbyfunctions import assign_fips_location_system, aggregator
from flowsa.common import fba_default_grouping_fields
from flowsa.dataclean import harmonize_units
from flowsa.common import US_FIPS, load_bea_crosswalk
# load Canadian GDP data
gdp = flowsa.getFlowByActivity(datasource='StatCan_GDP', year=attr['allocation_source_year'], flowclass='Money')
gdp = harmonize_units(gdp)
# drop 31-33
gdp = gdp[gdp['ActivityProducedBy'] != '31-33']
gdp = gdp.rename(columns={"FlowAmount": "CanDollar"})
# merge df
df_m = pd.merge(df, gdp[['CanDollar', 'ActivityProducedBy']], how='left', left_on='ActivityConsumedBy',
right_on='ActivityProducedBy')
df_m['CanDollar'] = df_m['CanDollar'].fillna(0)
df_m = df_m.drop(columns=["ActivityProducedBy_y"])
df_m = df_m.rename(columns={"ActivityProducedBy_x": "ActivityProducedBy"})
df_m = df_m[df_m['CanDollar'] != 0]
exchange_rate = get_Canadian_to_USD_exchange_rate(str(attr['allocation_source_year']))
exchange_rate = float(exchange_rate)
# convert to mgal/USD
df_m.loc[:, 'FlowAmount'] = df_m['FlowAmount'] / (df_m['CanDollar'] / exchange_rate)
df_m.loc[:, 'Unit'] = 'Mgal/USD'
df_m = df_m.drop(columns=["CanDollar"])
# convert Location to US
df_m.loc[:, 'Location'] = US_FIPS
df_m = assign_fips_location_system(df_m, str(attr['allocation_source_year']))
# load us gdp
# load Canadian GDP data
us_gdp_load = flowsa.getFlowByActivity(datasource='BEA_GDP_GrossOutput', year=attr['allocation_source_year'],
flowclass='Money')
us_gdp_load = harmonize_units(us_gdp_load)
# load bea crosswalk
cw_load = load_bea_crosswalk()
cw = cw_load[['BEA_2012_Detail_Code', 'NAICS_2012_Code']].drop_duplicates()
cw = cw[cw['NAICS_2012_Code'].apply(lambda x: len(str(x)) == 3)].drop_duplicates().reset_index(drop=True)
# merge
us_gdp = pd.merge(us_gdp_load, cw, how='left', left_on='ActivityProducedBy', right_on='BEA_2012_Detail_Code')
us_gdp = us_gdp.drop(columns=['ActivityProducedBy', 'BEA_2012_Detail_Code'])
# rename columns
us_gdp = us_gdp.rename(columns={'NAICS_2012_Code': 'ActivityProducedBy'})
# agg by naics
us_gdp = aggregator(us_gdp, fba_default_grouping_fields)
us_gdp = us_gdp.rename(columns={'FlowAmount': 'us_gdp'})
# determine annual us water use
df_m2 = pd.merge(df_m, us_gdp[['ActivityProducedBy', 'us_gdp']], how='left', left_on='ActivityConsumedBy',
right_on='ActivityProducedBy')
df_m2.loc[:, 'FlowAmount'] = df_m2['FlowAmount'] * (df_m2['us_gdp'])
df_m2.loc[:, 'Unit'] = 'Mgal'
df_m2 = df_m2.rename(columns={'ActivityProducedBy_x': 'ActivityProducedBy'})
df_m2 = df_m2.drop(columns=['ActivityProducedBy_y', 'us_gdp'])
return df_m2
# def disaggregate_statcan_to_naics_6(df):
# """
#
# :param df:
# :return:
# """
#
# return df
| 41.260116 | 118 | 0.64738 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,734 | 0.523116 |
9cf6cf2875b70b62f98a1356560f801938cbd27e | 3,952 | py | Python | extra_foam/pipeline/f_zmq.py | scottwedge/EXtra-foam | 9a170e3097987bf8abf30abb64a52439624367b8 | [
"BSD-3-Clause"
] | null | null | null | extra_foam/pipeline/f_zmq.py | scottwedge/EXtra-foam | 9a170e3097987bf8abf30abb64a52439624367b8 | [
"BSD-3-Clause"
] | null | null | null | extra_foam/pipeline/f_zmq.py | scottwedge/EXtra-foam | 9a170e3097987bf8abf30abb64a52439624367b8 | [
"BSD-3-Clause"
] | null | null | null | """
Distributed under the terms of the BSD 3-Clause License.
The full license is in the file LICENSE, distributed with this software.
Author: Jun Zhu <jun.zhu@xfel.eu>
Copyright (C) European X-Ray Free-Electron Laser Facility GmbH.
All rights reserved.
"""
from collections import deque
from threading import Event
import zmq
from karabo_bridge import Client
from ..config import config
from ..utils import run_in_thread
class BridgeProxy:
"""A proxy bridge which can connect to more than one server.
The current implementation has the following limits:
1. All the connections must be alive;
2. It is blocked, which means if one connection is fast and the other
is slow, the overall performance is limited by the slow one.
"""
POLL_TIMEOUT = 100 # timeout of the poller in milliseconds
def __init__(self):
self._context = None
self._client = None
self._frontend = None
self._backend = dict()
self._backend_ready = deque()
self._running = False
self._stopped = Event()
self._stopped.set()
@property
def client(self):
return self._client
def connect(self, endpoints):
"""Connect the backend to one or more endpoints.
:param str/list/tuple endpoints: addresses of endpoints.
"""
if isinstance(endpoints, str):
endpoints = [endpoints]
elif not isinstance(endpoints, (tuple, list)):
raise ValueError("Endpoints must be either a string or "
"a tuple/list of string!")
context = zmq.Context()
for end in endpoints:
backend = context.socket(zmq.DEALER)
backend.connect(end)
self._backend[end] = backend
frontendpoint = "inproc://frontend"
self._frontend = context.socket(zmq.ROUTER)
self._frontend.bind(frontendpoint)
self._client = Client(frontendpoint,
context=context,
timeout=config['BRIDGE_TIMEOUT'])
self._context = context
@run_in_thread()
def start(self):
"""Run the proxy in a thread."""
if self._running:
raise RuntimeError(f"{self.__class__} is already running!")
frontend = self._frontend
poller = zmq.Poller()
poller.register(frontend, zmq.POLLIN)
for address, bk in self._backend.items():
poller.register(bk, zmq.POLLIN)
self._backend_ready.append(address)
self._stopped.clear()
self._running = True
while self._running:
socks = dict(poller.poll(timeout=self.POLL_TIMEOUT))
if socks.get(frontend) == zmq.POLLIN:
message = frontend.recv_multipart()
if len(self._backend_ready) > 0:
address = self._backend_ready.popleft()
self._backend[address].send_multipart(message)
for address, bk in self._backend.items():
if socks.get(bk) == zmq.POLLIN:
message = bk.recv_multipart()
frontend.send_multipart(message)
self._backend_ready.append(address)
# clean up and close all sockets to avoid problems with buffer
poller.unregister(frontend)
for bk in self._backend.values():
poller.unregister(bk)
for bk in self._backend.values():
bk.setsockopt(zmq.LINGER, 0)
self._backend.clear()
self._backend_ready.clear()
self._frontend.setsockopt(zmq.LINGER, 0)
self._frontend = None
self._client = None
self._context.destroy(linger=0)
self._context = None
self._stopped.set()
def stop(self):
"""Stop the proxy running in a thread."""
self._running = False
if not self._stopped.is_set():
self._stopped.wait()
| 30.167939 | 73 | 0.607034 | 3,523 | 0.891447 | 0 | 0 | 1,724 | 0.436235 | 0 | 0 | 1,006 | 0.254555 |
9cf85aaa6823aec72d799dfb11b9d01b66e08e59 | 1,017 | py | Python | src/supervisor/supervisor_loader.py | Telsanyr/Johnny_VI | db9f54457c33b1f70616671611e6bc4fc4c44d1b | [
"WTFPL"
] | 6 | 2018-08-07T14:57:58.000Z | 2020-02-13T18:43:49.000Z | src/supervisor/supervisor_loader.py | Telsanyr/Johnny_VI | db9f54457c33b1f70616671611e6bc4fc4c44d1b | [
"WTFPL"
] | 2 | 2018-08-08T12:12:28.000Z | 2018-08-23T12:46:25.000Z | src/supervisor/supervisor_loader.py | Telsanyr/Johnny_VI | db9f54457c33b1f70616671611e6bc4fc4c44d1b | [
"WTFPL"
] | 1 | 2018-08-07T14:51:15.000Z | 2018-08-07T14:51:15.000Z | #!/usr/bin/env python
# -*- coding: utf8 -*-
# coding: utf8
# This is a module loader. It will reload all the files dedicated to the module
# each time it is reloaded. Therefore, any code modification will be taken into
# account.
# Moreover, because all files are loaded in the same module. All classes and
# global variables are reachable by any other file. When using a module class
# which is outside your file, you do not need to import it.
# However when modifying or adding a new file, you must take care that you do
# not override existing class/variable in the module, outside of your code file.
# Libs imports
import imp
# "supervisor_module" is built from all files in supervisor folder (apart from the loader)
SUPERVISOR_PATH = "./src/supervisor/"
imp.load_source("supervisor_module", SUPERVISOR_PATH + "abstract_service.py")
imp.load_source("supervisor_module", SUPERVISOR_PATH + "service_proxy.py")
Supervisor = imp.load_source("supervisor_module", SUPERVISOR_PATH + "supervisor_main.py").Supervisor
| 46.227273 | 100 | 0.771878 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 833 | 0.819076 |
9cf88f9c3c66dc987145112c9d9843d8bd9f0a49 | 849 | py | Python | todolist/todos/migrations/0002_sharetodolist.py | abdu1aziz/todo-list-app | 698cc046ea4cf9259f8e9830a33166dc6d222abe | [
"MIT"
] | null | null | null | todolist/todos/migrations/0002_sharetodolist.py | abdu1aziz/todo-list-app | 698cc046ea4cf9259f8e9830a33166dc6d222abe | [
"MIT"
] | null | null | null | todolist/todos/migrations/0002_sharetodolist.py | abdu1aziz/todo-list-app | 698cc046ea4cf9259f8e9830a33166dc6d222abe | [
"MIT"
] | null | null | null | # Generated by Django 3.1 on 2020-09-23 06:48
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('todos', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='shareTodoList',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('date', models.DateTimeField(auto_now_add=True)),
('userInfo', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL)),
('workList', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='todos.worklist')),
],
),
]
| 32.653846 | 125 | 0.640754 | 692 | 0.815077 | 0 | 0 | 0 | 0 | 0 | 0 | 127 | 0.149588 |
9cfa1227d896963056de524d0c2b24fd87570c59 | 4,350 | py | Python | keyboard/mechanical-button/single-button/keyboard.py | ELE-Clouds/mpy-lib | 76a45072357fed3fc8dde75260585622057896da | [
"MIT"
] | 116 | 2018-07-16T14:48:44.000Z | 2022-03-16T15:24:54.000Z | keyboard/mechanical-button/single-button/keyboard.py | ELE-Clouds/mpy-lib | 76a45072357fed3fc8dde75260585622057896da | [
"MIT"
] | 8 | 2018-07-11T14:00:30.000Z | 2022-01-20T01:30:09.000Z | keyboard/mechanical-button/single-button/keyboard.py | ELE-Clouds/mpy-lib | 76a45072357fed3fc8dde75260585622057896da | [
"MIT"
] | 66 | 2018-07-11T08:50:00.000Z | 2022-03-28T15:36:00.000Z | # -*- coding:UTF-8 -*-
'''
MIT License
Copyright (c) 2018 Robin Chen
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
'''
******************************************************************************
* 文 件:keyboard.py
* 概 述:识别单个机械按键的单击、连击(暂未限制连击次数)、长按、短按动作,并返回事件。
* 版 本:V0.10
* 作 者:Robin Chen
* 日 期:2018年7月26日
* 历 史: 日期 编辑 版本 记录
2018年7月26日 Robin Chen V0.10 创建文件
`
******************************************************************************'''
class KEYBOARD:
cont = 0
def __init__(self, _btnKey, _tmBtn, _btnDef = 1, even_djlong = None, even_lj = None, _pull = None):
self.btn = _btnKey
if _pull == "UP":
self.btn.init(_btnKey.IN, _btnKey.PULL_UP)
elif _pull == "DOWN":
self.btn.init(_btnKey.IN, _btnKey.PULL_DOWN)
else:
self.btn.init(_btnKey.IN)
self.btnDef = _btnDef
self.eve_btnLon = even_djlong
self.evn_Continuous_Clicks = even_lj
self.btnLabDown = 0 # 按钮扫描记次,按下状态
self.btnLabUp = 0 # 按钮扫描记次,弹起状态
self.Continuous_Clicks = 0 # 连续点击次数
self.clock = 10 # 定时器时钟,单位毫秒
_tmBtn.init(freq = (1000 / self.clock))
_tmBtn.callback(self.doBtnScan)
self.staLon = 1 # 长按标志字,1:长按计时,0:长按计次
self.tLon = 3000 # 计时或计次延时,单位毫秒
self.TIME_CONT_CLICKS = 50 # 连击时间间隔,按下和松开的状态保持时间长度,单位,次
'''*************************************************************************
* 功 能:按键扫描
* 说 明:定时器回调函数,用于识别当前按键是否动作,并判断其动作形式。
* 输入参数:
t : 定时器无参回调函数必备,否则调用不成功。
* 输出参数:None
* 返 回 值:True
**************************************************************************'''
# 扫描按键,定时中断调用函数
def doBtnScan(self, t):
global cont
self.btnLabUp = (self.btnLabUp * int(not(self.btn.value() ^ int(not(self.btnDef))))) + int(not(self.btn.value() ^ int(not(self.btnDef))))
btdown = self.btnLabDown
self.btnLabDown = (self.btnLabDown * int(not(self.btn.value() ^ self.btnDef))) + int(not(self.btn.value() ^ self.btnDef))
# 长按计时/计次
# t1:按键保持按下的时长
if (self.btnLabDown * self.clock) == self.tLon:
if self.staLon == 1:
if self.eve_btnLon != None:
self.eve_btnLon() # 按键长按事件,请勿在事件中执行过长时间的程序,否则会报定时器错误。
elif self.staLon == 0:
if self.eve_btnLon != None:
cont += 1
self.eve_btnLon(cont) # 按键长按事件,请勿在事件中执行过长时间的程序,否则会报定时器错误。
self.btnLabDown = 0
if self.btnLabUp > 5:
cont = 0
# 连续点击
if (btdown > 5 and btdown < self.TIME_CONT_CLICKS) and self.btnLabUp > 0:
self.Continuous_Clicks += 1
if (self.btnLabUp > self.TIME_CONT_CLICKS) and (self.Continuous_Clicks > 0) or (self.btnLabDown > self.TIME_CONT_CLICKS) and (self.Continuous_Clicks > 0):
if self.evn_Continuous_Clicks != None:
self.evn_Continuous_Clicks(self.Continuous_Clicks) # 连续点击事件,次数为1时为单击,请勿在事件中执行过长时间的程序,否则会报定时器错误。
self.Continuous_Clicks = 0
| 46.276596 | 163 | 0.554713 | 3,436 | 0.672934 | 0 | 0 | 0 | 0 | 0 | 0 | 2,865 | 0.561105 |
9cfa728536bee61e0fd7a0cb5ca6747960450394 | 869 | py | Python | dataanalysis/mopitt_data_analysis.py | s-ai-kia/nasa_stf | cc60cf7f74c77a78dc8fa1140d787614fc4cca7b | [
"MIT"
] | 1 | 2020-10-05T04:11:45.000Z | 2020-10-05T04:11:45.000Z | dataanalysis/mopitt_data_analysis.py | s-ai-kia/nasa_stf | cc60cf7f74c77a78dc8fa1140d787614fc4cca7b | [
"MIT"
] | null | null | null | dataanalysis/mopitt_data_analysis.py | s-ai-kia/nasa_stf | cc60cf7f74c77a78dc8fa1140d787614fc4cca7b | [
"MIT"
] | 3 | 2020-12-13T19:36:09.000Z | 2021-11-25T19:01:51.000Z | # -*- coding: utf-8 -*-
"""mopitt_data_analysis.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1bb_9kuO0Suo5761xgJioS84TrEdR5ojj
"""
import pandas as pd
df = pd.read_csv('MOP02J-20200101-L2V18.0.3.csv')
df.head()
df
tx = df[0:5000]
tx
import plotly.express as px
fig4 = px.density_mapbox(tx, lat='# Latitude', lon=' Longitude', z=' COTotalColumn', radius=10,
center=dict(lat=0, lon=180), zoom=0,
mapbox_style="stamen-terrain")
fig4.show()
import chart_studio
import chart_studio.plotly as py
import chart_studio.tools as tls
pip install chart_studio
username = 'saikia'
api_key = 'oIIOTBZOlv8hbx8XorKH'
chart_studio.tools.set_credentials_file(username=username, api_key=api_key)
py.plot(fig4, filename = 'csa-mopitt', auto_open=True)
| 21.195122 | 95 | 0.715765 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 334 | 0.38435 |
9cfa74671b912301e99e5c7c844894769aef765f | 5,413 | py | Python | main.py | dkendrick/starterbot | 2b8d14ddf4c18583e8c5656f8268478c07089d41 | [
"BSD-2-Clause"
] | 2 | 2021-11-30T06:33:17.000Z | 2021-12-05T22:50:15.000Z | main.py | dkendrick/starterbot | 2b8d14ddf4c18583e8c5656f8268478c07089d41 | [
"BSD-2-Clause"
] | null | null | null | main.py | dkendrick/starterbot | 2b8d14ddf4c18583e8c5656f8268478c07089d41 | [
"BSD-2-Clause"
] | null | null | null | import bybit
import math
import pandas as pd
import time
from datetime import datetime
from dateutil.relativedelta import relativedelta
# settings
num_orders = 3
order_size = 1
order_distance = 10
sl_risk = 0.03
tp_distance = 5
api_key = "YOUR_KEY"
api_secret = "YOUR_SECRET"
client = bybit.bybit(test=False, api_key=api_key, api_secret=api_secret)
mid_price = 0
def place_order(price, side, stop_loss, take_profit):
order = client.Order.Order_new(
side=side,
symbol="BTCUSD",
order_type="Limit",
qty=order_size, price=price,
time_in_force="GoodTillCancel",
take_profit=take_profit,
stop_loss=stop_loss
)
order.result()
def get_result_from_response(response):
result = response.result()[0] or {}
return result.get('result', {})
def ensure_buy_order(price, stop_loss, take_profit):
if ((last_price - order_distance) < price):
return
existing_order = list(
filter(lambda elem: int(elem['price']) == price, buy_orders))
if any(existing_order):
existing_order = existing_order[0]
if (int(float(existing_order['take_profit'])) == take_profit):
return
else:
print("cancelling order, tp has moved")
close_order(existing_order)
print("> opening buy order at {} with sl: {} and tp: {}".format(
price, stop_loss, take_profit))
place_order(price, "Buy", stop_loss, take_profit)
def ensure_sell_order(price, stop_loss, take_profit):
if ((last_price + order_distance) > price):
return
existing_order = list(
filter(lambda elem: int(elem['price']) == price, sell_orders))
if any(existing_order):
existing_order = existing_order[0]
if (int(float(existing_order['take_profit'])) == take_profit):
return
else:
print("cancelling order, tp has moved")
close_order(existing_order)
print("> opening sell order at {} with sl: {} and tp: {}".format(
price, stop_loss, take_profit))
place_order(price, "Sell", stop_loss, take_profit)
def close_order(order):
client.Order.Order_cancel(
symbol="BTCUSD", order_id=order['order_id']).result()
def close_all_orders(order_list):
[close_order(order) for order in order_list]
def check_and_update_orders():
print("> check and update orders running")
my_position = get_result_from_response(
client.Positions.Positions_myPosition(symbol="BTCUSD"))
position_side = my_position['side']
entry_price = round(float(my_position['entry_price']))
sl_distance = mid_price * sl_risk
for n in range(0, num_orders):
order_offset = (n + 1) * order_distance
buy_price = round_to_order_distance(last_price - order_offset)
sell_price = round_to_order_distance(last_price + order_offset)
if position_side == "Buy":
buy_tp = round_to_order_distance(
entry_price + (order_distance * tp_distance))
ensure_buy_order(buy_price, mid_price - sl_distance, buy_tp)
close_all_orders(sell_orders)
elif position_side == "Sell":
sell_tp = round_to_order_distance(
entry_price - (order_distance * tp_distance))
ensure_sell_order(sell_price, mid_price + sl_distance, sell_tp)
close_all_orders(buy_orders)
else:
buy_tp = round_to_order_distance(
last_price + (order_distance * tp_distance))
ensure_buy_order(buy_price, mid_price - sl_distance, buy_tp)
sell_tp = round_to_order_distance(
last_price - (order_distance * tp_distance))
ensure_sell_order(sell_price, mid_price + sl_distance, sell_tp)
def round_to_order_distance(num):
if num is None or math.isnan(num):
return
return order_distance * round(float(num) / order_distance)
def calculate_mid_price():
kline = pd.DataFrame([])
for n in range(1, 4):
from_date = datetime.now() + relativedelta(hours=-(n*3))
unix_from_date = time.mktime(from_date.timetuple())
candle_info = get_result_from_response(client.Kline.Kline_get(
symbol="BTCUSD", interval="1", **{'from': unix_from_date}))
kline = kline.append(candle_info)
kline = kline.drop_duplicates()
kline["time"] = pd.to_datetime(kline["open_time"], unit='s')
kline = kline.sort_values(by=["time"])
kline[["open", "high", "low", "close", "volume"]] = kline[[
"open", "high", "low", "close", "volume"]].apply(pd.to_numeric)
kline.drop(columns=["open_time", "symbol",
"interval", "turnover"], inplace=True)
kline['ma'] = kline['close'].rolling(200).mean()
kline['rounded_ma'] = kline['ma'].apply(
lambda n: round_to_order_distance(n))
mid_price = kline['rounded_ma'].iloc[-1]
print("> mid price {}".format(mid_price))
return mid_price
last_price = get_result_from_response(
client.Market.Market_tradingRecords(symbol="BTCUSD", limit=1))[0]['price']
open_orders = get_result_from_response(
client.Order.Order_query(symbol="BTCUSD", order_id=""))
buy_orders = list(filter(lambda elem: elem['side'] == "Buy", open_orders))
sell_orders = list(filter(lambda elem: elem['side'] == "Sell", open_orders))
print(buy_orders)
mid_price = calculate_mid_price()
check_and_update_orders()
| 31.841176 | 78 | 0.661001 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 628 | 0.116017 |
9cfab5f5e7ac823ca82f349ba7f74d5b096b4a5a | 177 | py | Python | Chapter 11/Chapter 06/app1.py | bpbpublications/Building-Web-Apps-with-Python-and-Flask | 4fbbe75fad9629f16ff5bf8bd603aa09dd04f9eb | [
"MIT"
] | 2 | 2021-05-08T23:10:12.000Z | 2021-05-10T14:33:27.000Z | Chapter 11/Chapter 06/app1.py | bpbpublications/Building-Web-Apps-with-Python-and-Flask | 4fbbe75fad9629f16ff5bf8bd603aa09dd04f9eb | [
"MIT"
] | null | null | null | Chapter 11/Chapter 06/app1.py | bpbpublications/Building-Web-Apps-with-Python-and-Flask | 4fbbe75fad9629f16ff5bf8bd603aa09dd04f9eb | [
"MIT"
] | 1 | 2021-07-08T19:17:53.000Z | 2021-07-08T19:17:53.000Z | from flask import Flask, render_template
print (__name__)
app = Flask(__name__)
@app.route('/<name>')
def hello(name):
return render_template("btn.html", name=name)
| 22.125 | 50 | 0.700565 | 0 | 0 | 0 | 0 | 90 | 0.508475 | 0 | 0 | 19 | 0.107345 |
9cfafb2073c9a3aff02273b967553d1d40af2ae0 | 936 | py | Python | app/models/Article.py | ngedev/hypexblog | 9e4b93ed119ff52afb6b67d00df2b26fe09f7e05 | [
"MIT"
] | null | null | null | app/models/Article.py | ngedev/hypexblog | 9e4b93ed119ff52afb6b67d00df2b26fe09f7e05 | [
"MIT"
] | 5 | 2021-03-08T05:07:37.000Z | 2021-03-12T08:00:41.000Z | app/models/Article.py | ngedev/hypexblog | 9e4b93ed119ff52afb6b67d00df2b26fe09f7e05 | [
"MIT"
] | null | null | null | from datetime import datetime
from sqlalchemy import (
Boolean,
Column,
DateTime,
ForeignKey,
Integer,
String,
UnicodeText,
)
from sqlalchemy.orm import relationship
from zemfrog.globals import db
class Article(db.Model):
id = Column(Integer, primary_key=True)
user_id = Column(ForeignKey("user.id"), nullable=False)
title = Column(String(255), nullable=False, unique=True)
slug = Column(String(255), nullable=False, unique=True)
image = Column(UnicodeText)
text = Column(UnicodeText)
drafted = Column(Boolean, default=True)
tags = relationship("Tag", secondary="tag_links", lazy="dynamic")
created_at = Column(DateTime, default=datetime.utcnow)
updated_at = Column(DateTime, onupdate=datetime.utcnow)
class TagLinks(db.Model):
id = Column(Integer, primary_key=True)
article_id = Column(ForeignKey("article.id"))
tag_id = Column(ForeignKey("tag.id"))
| 28.363636 | 69 | 0.708333 | 704 | 0.752137 | 0 | 0 | 0 | 0 | 0 | 0 | 54 | 0.057692 |
9cfb46ed6b05cd0d4c7a16a81985cf1b4466d0db | 7,079 | py | Python | apps/combine-api/tests/run/test_run_get_simulators.py | freiburgermsu/biosimulations | 1c4f604f67c0924b58e1a3a45378c86bab7ace5b | [
"MIT"
] | 20 | 2021-09-05T02:47:07.000Z | 2022-01-25T10:46:47.000Z | apps/combine-api/tests/run/test_run_get_simulators.py | freiburgermsu/biosimulations | 1c4f604f67c0924b58e1a3a45378c86bab7ace5b | [
"MIT"
] | 1,884 | 2020-08-23T17:40:26.000Z | 2021-09-01T16:29:20.000Z | apps/combine-api/tests/run/test_run_get_simulators.py | freiburgermsu/biosimulations | 1c4f604f67c0924b58e1a3a45378c86bab7ace5b | [
"MIT"
] | 2 | 2019-11-04T15:08:05.000Z | 2020-01-02T21:17:51.000Z | from biosimulators_utils.config import get_config
from src import app
from src.handlers.run.utils import get_simulator_api, get_simulators, exec_in_subprocess
from unittest import mock
import os
import parameterized
import pytest
import requests
import shutil
import tempfile
import unittest
class GetSimulatorsTestCase(unittest.TestCase):
def test(self):
endpoint = '/run/simulators'
with app.app.app.test_client() as client:
simulators = [
{
'id': 'copasi',
'name': "COPASI",
'api': {
'module': 'biosimulators_copasi',
'package': 'biosimulators_copasi',
},
},
{
'id': 'gillespy2',
'name': 'GillesPy2',
'api': {
'module': 'biosimulators_gillespy2',
'package': 'biosimulators_gillespy2',
},
},
]
with mock.patch('src.handlers.run.utils.get_simulators', return_value=simulators):
response = client.get(endpoint)
self.assertEqual(response.status_code, 200, response.json)
simulators = response.json
id = 'copasi'
name = 'COPASI'
sim = next(simulator for simulator in simulators if simulator['id'] == id)
api_name = 'biosimulators_copasi'
self.assertEqual(sim, {
'_type': 'Simulator',
'id': id,
'name': name,
'version': sim['version'],
'api': {
'_type': 'SimulatorApi',
'module': api_name,
'package': api_name,
'version': sim['api']['version'],
},
'specs': 'https://api.biosimulators.org/simulators/{}/{}'.format(id, sim['version'])
})
SIMULATORS = os.environ.get('SIMULATORS', None)
if SIMULATORS is not None:
if SIMULATORS:
SIMULATORS = SIMULATORS.split(',')
else:
SIMULATORS = []
SKIPPED_SIMULATORS = os.environ.get('SKIPPED_SIMULATORS', None)
if SKIPPED_SIMULATORS is not None:
if SKIPPED_SIMULATORS:
SKIPPED_SIMULATORS = SKIPPED_SIMULATORS.split(',')
else:
SKIPPED_SIMULATORS = []
EXAMPLES_BASE_URL = 'https://github.com/biosimulators/Biosimulators_test_suite/raw/deploy/examples'
TIMEOUT = 5 * 60 # maximum execution time per test in seconds
class SimulatorsHaveValidApisTestCase(unittest.TestCase):
def setUp(self):
self.tmp_dirname = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmp_dirname)
@parameterized.parameterized.expand(
(simulator['id'], simulator)
for simulator in get_simulators()
if (
(SIMULATORS is None or simulator['id'] in SIMULATORS)
and (SKIPPED_SIMULATORS is None or simulator['id'] not in SKIPPED_SIMULATORS)
)
)
@pytest.mark.timeout(TIMEOUT * 1.25)
def test(self, id, simulator):
exec_in_subprocess(self._test, simulator['api']['module'], simulator['exampleCombineArchive'], self.tmp_dirname,
timeout=TIMEOUT)
@staticmethod
def _test(simulator_module, example_combine_archive, tmp_dirname):
api = get_simulator_api(simulator_module, False)
# __version__
if not hasattr(api, '__version__'):
raise NotImplementedError('API must have a `__version__` attribute whose value is a non-empty string (e.g., 1.0.1)')
if not isinstance(api.__version__, str):
raise ValueError('API must have a `__version__` attribute whose value is a non-empty string (e.g., 1.0.1), not `{}`'.format(
api.__version__.__class__.__name__))
if api.__version__ == '':
raise ValueError('API must have a `__version__` attribute whose value is a non-empty string (e.g., 1.0.1), not `{}`'.format(
api.__version__))
# get_simulator_version
if not hasattr(api, 'get_simulator_version'):
raise NotImplementedError('API must have a `get_simulator_version` callable that returns a non-empty string (e.g., 1.0.1)')
if not callable(api.get_simulator_version):
raise ValueError('`get_simulator_version` must be a callable that returns a non-empty string (e.g., 1.0.1), not `{}`'.format(
api.get_simulator_version.__class__.__name__))
simulator_version = api.get_simulator_version()
if not isinstance(simulator_version, str):
raise ValueError('`get_simulator_version` must return a non-empty string (e.g., 1.0.1), not `{}`'.format(
simulator_version.__class__.__name__))
if simulator_version == '':
raise ValueError('`get_simulator_version` must return a non-empty string (e.g., 1.0.1), not `{}`'.format(
simulator_version))
# exec_sedml_docs_in_combine_archive
if not hasattr(api, 'exec_sedml_docs_in_combine_archive'):
raise NotImplementedError('API must have a `exec_sedml_docs_in_combine_archive` callable')
if not callable(api.exec_sedml_docs_in_combine_archive):
raise ValueError('`exec_sedml_docs_in_combine_archive` must be a callable, not `{}`'.format(
api.exec_sedml_docs_in_combine_archive.__class__.__name__))
response = requests.get(EXAMPLES_BASE_URL + '/' + example_combine_archive)
response.raise_for_status()
archive_filename = os.path.join(tmp_dirname, 'archive.omex')
with open(archive_filename, 'wb') as file:
file.write(response.content)
out_dir = os.path.join(tmp_dirname, 'out')
config = get_config()
config.COLLECT_COMBINE_ARCHIVE_RESULTS = True
config.COLLECT_SED_DOCUMENT_RESULTS = True
config.DEBUG = True
results, log = api.exec_sedml_docs_in_combine_archive(archive_filename, out_dir, config=config)
# exec_sed_doc
if not hasattr(api, 'exec_sed_doc'):
raise NotImplementedError('API must have a `exec_sed_doc` callable')
if not callable(api.exec_sed_doc):
raise ValueError('`exec_sed_doc` must be a callable, not `{}`'.format(
api.exec_sed_doc.__class__.__name__))
# exec_sed_task
if not hasattr(api, 'exec_sed_task'):
raise NotImplementedError('API must have a `exec_sed_task` callable')
if not callable(api.exec_sed_task):
raise ValueError('`exec_sed_task` must be a callable, not `{}`'.format(
api.exec_sed_task.__class__.__name__))
# preprocess_sed_task
if not hasattr(api, 'preprocess_sed_task'):
raise NotImplementedError('API must have a `preprocess_sed_task` callable')
if not callable(api.preprocess_sed_task):
raise ValueError('`preprocess_sed_task` must be a callable, not `{}`'.format(
api.preprocess_sed_task.__class__.__name__))
| 42.136905 | 137 | 0.621133 | 6,215 | 0.877949 | 0 | 0 | 4,369 | 0.617178 | 0 | 0 | 2,000 | 0.282526 |
9cfc5d4e4bd363a9f3917be257e6f8a8b0661722 | 2,372 | py | Python | simulator/flow_change_gen.py | faywh/read_ez | d26a14aed473291b896664faf284ef3027c8be90 | [
"Apache-2.0"
] | 7 | 2017-05-28T21:00:09.000Z | 2021-04-05T16:14:07.000Z | simulator/flow_change_gen.py | faywh/read_ez | d26a14aed473291b896664faf284ef3027c8be90 | [
"Apache-2.0"
] | null | null | null | simulator/flow_change_gen.py | faywh/read_ez | d26a14aed473291b896664faf284ef3027c8be90 | [
"Apache-2.0"
] | 3 | 2018-12-27T07:59:30.000Z | 2021-05-02T07:47:42.000Z | import argparse
import os
import random
from random import Random
from misc import logger
from domain.network_premitives import *
from flow_gen.link_failure_change_generator import LinkFailureChangeGenerator
from flow_gen.random_change_generator import RandomChangeGenerator
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='ez-segway sim.')
parser.add_argument('--logFolder', nargs='?',
type=str, default="logs")
parser.add_argument('--logLevel', nargs='?',
type=str, default="INFO")
parser.add_argument('--logFile', nargs='?',
type=str, default="stdout")
parser.add_argument('--data_folder', nargs='?',
type=str, default="data")
parser.add_argument('--topology', nargs='?',
type=str, default="1755")
parser.add_argument('--topology_type', nargs='?',
type=str, default="adjacency")
parser.add_argument('--generating_method', nargs='?',
type=str, default=constants.RANDOM_GENERATION)
parser.add_argument('--number_of_flows', nargs='?',
type=int, default=1000)
parser.add_argument('--number_of_tests', nargs='?',
type=int, default=10)
parser.add_argument('--failure_rate', nargs='?',
type=float, default=10)
parser.add_argument('--path_generator', nargs='?',
type=str, default=constants.THIRD_SWITCH_GENERATION)
parser.add_argument('--seed', nargs='?',
type=int, default=0)
args = parser.parse_args()
random.seed(args.seed)
directory = "../%s" % (args.logFolder)
if not os.path.exists(directory):
os.makedirs(directory)
logger.init("../" + args.logFolder + "/" + args.logFile, constants.LOG_LEVEL)
log = logger.getLogger("data-generator", constants.LOG_LEVEL)
log.info("---> Log start <---")
if args.generating_method == constants.LINK_FAILURE_GENERATION:
flow_change_generator = LinkFailureChangeGenerator(Random(42), args.failure_rate)
else:
flow_change_generator = RandomChangeGenerator(Random(42), args.path_generator)
flow_change_generator.no_of_middleboxes = 1
flow_change_generator.create_continuously_series_of_flows(args, log)
| 43.127273 | 89 | 0.637437 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 338 | 0.142496 |
9cfcd175b4c5f0ab48925d1924fe24b3bfba9048 | 6,020 | py | Python | pyccel/codegen/python_wrapper.py | noushi/pyccel | f20846897ba2418dc0f432e293bcf8b4ddb24915 | [
"MIT"
] | null | null | null | pyccel/codegen/python_wrapper.py | noushi/pyccel | f20846897ba2418dc0f432e293bcf8b4ddb24915 | [
"MIT"
] | null | null | null | pyccel/codegen/python_wrapper.py | noushi/pyccel | f20846897ba2418dc0f432e293bcf8b4ddb24915 | [
"MIT"
] | 1 | 2021-01-08T12:32:51.000Z | 2021-01-08T12:32:51.000Z | # coding: utf-8
#------------------------------------------------------------------------------------------#
# This file is part of Pyccel which is released under MIT License. See the LICENSE file or #
# go to https://github.com/pyccel/pyccel/blob/master/LICENSE for full license details. #
#------------------------------------------------------------------------------------------#
import sys
import subprocess
import os
import glob
import warnings
from pyccel.ast.bind_c import as_static_function_call
from pyccel.ast.core import SeparatorComment
from pyccel.codegen.printing.fcode import fcode
from pyccel.codegen.printing.cwrappercode import cwrappercode
from pyccel.codegen.utilities import compile_files, get_gfortran_library_dir
from .cwrapper import create_c_setup
from pyccel.errors.errors import Errors
errors = Errors()
__all__ = ['create_shared_library', 'fortran_c_flag_equivalence']
#==============================================================================
PY_VERSION = sys.version_info[0:2]
fortran_c_flag_equivalence = {'-Wconversion-extra' : '-Wconversion' }
#==============================================================================
def create_shared_library(codegen,
language,
pyccel_dirpath,
compiler,
mpi_compiler,
accelerator,
dep_mods,
libs,
libdirs,
includes='',
flags = '',
sharedlib_modname=None,
verbose = False):
# Consistency checks
if not codegen.is_module:
raise TypeError('Expected Module')
# Get module name
module_name = codegen.name
# Change working directory to '__pyccel__'
base_dirpath = os.getcwd()
os.chdir(pyccel_dirpath)
# Name of shared library
if sharedlib_modname is None:
sharedlib_modname = module_name
sharedlib_folder = ''
if language in ['c', 'fortran']:
extra_libs = []
extra_libdirs = []
if language == 'fortran':
# Construct static interface for passing array shapes and write it to file bind_c_MOD.f90
funcs = [f for f in codegen.routines if not f.is_private]
sep = fcode(SeparatorComment(40), codegen.parser)
bind_c_funcs = [as_static_function_call(f, module_name, name=f.name) for f in funcs]
bind_c_code = '\n'.join([sep + fcode(f, codegen.parser) + sep for f in bind_c_funcs])
bind_c_filename = 'bind_c_{}.f90'.format(module_name)
with open(bind_c_filename, 'w') as f:
f.writelines(bind_c_code)
compile_files(bind_c_filename, compiler, flags,
binary=None,
verbose=verbose,
is_module=True,
output=pyccel_dirpath,
libs=libs,
libdirs=libdirs,
language=language)
dep_mods = (os.path.join(pyccel_dirpath,'bind_c_{}'.format(module_name)), *dep_mods)
if compiler == 'gfortran':
extra_libs.append('gfortran')
extra_libdirs.append(get_gfortran_library_dir())
elif compiler == 'ifort':
extra_libs.append('ifcore')
if sys.platform == 'win32':
extra_libs.append('quadmath')
module_old_name = codegen.expr.name
codegen.expr.set_name(sharedlib_modname)
wrapper_code = cwrappercode(codegen.expr, codegen.parser, language)
if errors.has_errors():
return
codegen.expr.set_name(module_old_name)
wrapper_filename_root = '{}_wrapper'.format(module_name)
wrapper_filename = '{}.c'.format(wrapper_filename_root)
with open(wrapper_filename, 'w') as f:
f.writelines(wrapper_code)
c_flags = [fortran_c_flag_equivalence[f] if f in fortran_c_flag_equivalence \
else f for f in flags.strip().split(' ') if f != '']
if sys.platform == "darwin" and "-fopenmp" in c_flags and "-Xpreprocessor" not in c_flags:
idx = 0
while idx < len(c_flags):
if c_flags[idx] == "-fopenmp":
c_flags.insert(idx, "-Xpreprocessor")
idx += 1
idx += 1
setup_code = create_c_setup(sharedlib_modname, wrapper_filename,
dep_mods, compiler, includes, libs + extra_libs, libdirs + extra_libdirs, c_flags)
setup_filename = "setup_{}.py".format(module_name)
with open(setup_filename, 'w') as f:
f.writelines(setup_code)
setup_filename = os.path.join(pyccel_dirpath, setup_filename)
cmd = [sys.executable, setup_filename, "build"]
if verbose:
print(' '.join(cmd))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
out, err = p.communicate()
if verbose:
print(out)
if p.returncode != 0:
err_msg = "Failed to build module"
if verbose:
err_msg += "\n" + err
raise RuntimeError(err_msg)
if err:
warnings.warn(UserWarning(err))
sharedlib_folder += 'build/lib*/'
# Obtain absolute path of newly created shared library
# Set file name extension of Python extension module
if os.name == 'nt': # Windows
extext = 'pyd'
else:
extext = 'so'
pattern = '{}{}*.{}'.format(sharedlib_folder, sharedlib_modname, extext)
sharedlib_filename = glob.glob(pattern)[0]
sharedlib_filepath = os.path.abspath(sharedlib_filename)
# Change working directory back to starting point
os.chdir(base_dirpath)
# Return absolute path of shared library
return sharedlib_filepath
| 36.932515 | 106 | 0.563123 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,327 | 0.220432 |
14007734f5d8729d94671298f144bda00fec3b3f | 151 | py | Python | src/third_party_module/upload_self_define_module/nester/nester.py | HuangHuaBingZiGe/GitHub-Demo | f3710f73b0828ef500343932d46c61d3b1e04ba9 | [
"Apache-2.0"
] | null | null | null | src/third_party_module/upload_self_define_module/nester/nester.py | HuangHuaBingZiGe/GitHub-Demo | f3710f73b0828ef500343932d46c61d3b1e04ba9 | [
"Apache-2.0"
] | null | null | null | src/third_party_module/upload_self_define_module/nester/nester.py | HuangHuaBingZiGe/GitHub-Demo | f3710f73b0828ef500343932d46c61d3b1e04ba9 | [
"Apache-2.0"
] | null | null | null | def print_lol(arr):
for row in arr:
if (isinstance(row, list)):
print_lol(row)
else:
print
row
| 18.875 | 35 | 0.456954 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
1400bb84d2be3b7726e6ee57bf54f265059abc0e | 8,471 | py | Python | xds_plot_integrate.py | keitaroyam/yam_scripts | aa1346be02050032a96f3ea89e5fc836d05c71ea | [
"BSD-3-Clause"
] | 2 | 2018-04-13T21:35:15.000Z | 2020-04-01T07:59:10.000Z | xds_plot_integrate.py | keitaroyam/yam_scripts | aa1346be02050032a96f3ea89e5fc836d05c71ea | [
"BSD-3-Clause"
] | null | null | null | xds_plot_integrate.py | keitaroyam/yam_scripts | aa1346be02050032a96f3ea89e5fc836d05c71ea | [
"BSD-3-Clause"
] | 1 | 2021-12-20T11:04:01.000Z | 2021-12-20T11:04:01.000Z | #!/usr/bin/env cctbx.python
from __future__ import print_function
"""
xds_plot_integrate.py
(c) RIKEN 2015. All rights reserved.
Author: Keitaro Yamashita
This software is released under the new BSD License; see LICENSE.
"""
"""
TODO: plot differences in direct beam and rotation axis
"""
import sys
import re
import collections
from cctbx import sgtbx
class IntegrateLp:
def __init__(self, lpin):
if lpin is not None:
self.parse(lpin)
# __init__()
def parse(self, int_lp):
re_im = re.compile("^ (.....) 0 +([0-9\.]+) +([0-9]+) +([0-9]+) +([0-9]+) +([0-9]+) +([0-9]+) +([0-9\.]+) +([0-9\.]+)")
re_cell = re.compile("^ UNIT CELL PARAMETERS *([0-9\.]+) *([0-9\.]+) *([0-9\.]+) *([0-9\.]+) *([0-9\.]+) *([0-9\.]+)")
re_rotation = re.compile("^ CRYSTAL ROTATION OFF FROM INITIAL ORIENTATION *([-0-9\.]+) *([-0-9\.]+) *([-0-9\.]+)") #
re_mosaicity = re.compile("^ CRYSTAL MOSAICITY \(DEGREES\) *([0-9\.]+)") #
re_axis = re.compile("^ LAB COORDINATES OF ROTATION AXIS *([-0-9\.]+) *([-0-9\.]+) *([-0-9\.]+)") #
re_beam = re.compile("^ DIRECT BEAM COORDINATES \(REC\. ANGSTROEM\) *([-0-9\.]+) *([-0-9\.]+) *([-0-9\.]+)") #
re_dist = re.compile("^ CRYSTAL TO DETECTOR DISTANCE \(mm\) *([-0-9\.]+)")
re_dev_spot = re.compile("^ STANDARD DEVIATION OF SPOT POSITION \(PIXELS\) *([0-9\.]+)")
re_dev_spindle = re.compile("^ STANDARD DEVIATION OF SPINDLE POSITION \(DEGREES\) *([0-9\.]+)")
re_orig = re.compile("^ DETECTOR ORIGIN \(PIXELS\) AT *([0-9\.]+) *([0-9\.]+)")
images = [] # as key of params
self.cell_changes = []
self.blockparams = collections.OrderedDict()
clear_flag = False
self.frames = []
self.scales, self.overloads, self.rejecteds, self.sigmads, self.sigmars = [], [], [], [], []
self.space_group = None
# Read INTEGRATE.LP file
for l in open(int_lp):
r_im = re_im.search(l)
r_cell = re_cell.search(l)
r_rotation = re_rotation.search(l)
r_dist = re_dist.search(l)
r_spot = re_dev_spot.search(l)
r_spindle = re_dev_spindle.search(l)
r_orig = re_orig.search(l)
if l.startswith(" SPACE_GROUP_NUMBER="):
sgnum = int(l.strip().split()[-1])
if sgnum > 0:
self.space_group = sgtbx.space_group_info(sgnum).group()
if r_im:
if clear_flag:
images = []
clear_flag = False
image, scale, nbkg, novl, newald, nstrong, nrej, sigmad, sigmar = r_im.groups()
images.append(int(image))
# for plot
self.frames.append(int(image))
self.scales.append(scale)
self.overloads.append(int(novl))
self.rejecteds.append(int(nrej))
self.sigmads.append(sigmad)
self.sigmars.append(sigmar)
if r_cell:
#a, b, c, alpha, beta, gamma = r_cell.groups()
self.blockparams.setdefault(tuple(images), {})["cell"] = r_cell.groups()
self.cell_changes.append((images, r_cell.groups()))
clear_flag = True
if r_rotation:
self.blockparams.setdefault(tuple(images), {})["rotation"] = r_rotation.groups()
clear_flag = True
if r_dist:
self.blockparams.setdefault(tuple(images), {})["dist"] = r_dist.group(1)
clear_flag = True
if r_spot:
self.blockparams.setdefault(tuple(images), {})["spot"] = r_spot.group(1)
clear_flag = True
if r_spindle:
self.blockparams.setdefault(tuple(images), {})["spindle"] = r_spindle.group(1)
clear_flag = True
if r_orig:
self.blockparams.setdefault(tuple(images), {})["orig"] = r_orig.groups()
clear_flag = True
if l.startswith(" SIGMAB (degree)"):
self.blockparams.setdefault(tuple(images), {})["sigmab9"] = l.strip().split()[-9:]
clear_flag = True
if l.startswith(" SIGMAR (degree)"):
self.blockparams.setdefault(tuple(images), {})["sigmar9"] = l.strip().split()[-9:]
clear_flag = True
# parse_integrate_lp()
# class IntegrateLp
class CellConstraints:
def __init__(self, space_group):
self.cs = space_group.crystal_system()
# __init__()
def is_b_equal_a(self): return self.cs in ("Tetragonal", "Hexagonal", "Trigonal", "Cubic")
def is_c_equal_a_b(self): return self.cs == "Cubic"
def is_angle_constrained(self, angle):
assert angle in ("alpha", "beta", "gamma")
if self.cs == "Triclinic": return False
if self.cs == "Monoclinic": return angle != "beta"
return True
# is_angle_constrained()
# class CellConstraints
def make_plot(lp, log_out):
ofs = open(log_out, "w")
ofs.write("$TABLE: Parameters estimated for each frame:\n")
ofs.write("$GRAPHS\n")
ofs.write(":scales")
ofs.write(":A:1,2:\n")
ofs.write(":number of overloaded reflections")
ofs.write(":A:1,3:\n")
ofs.write(":number of unexpected reflections")
ofs.write(":A:1,4:\n")
ofs.write(":SIGMAB (beam divergence e.s.d.)")
ofs.write(":A:1,5:\n")
ofs.write(":SIGMAR (reflecting range e.s.d.)")
ofs.write(":A:1,6:\n")
ofs.write("$$\n")
ofs.write("Frame scale overlods nrej sigmaD sigmaM $$\n$$\n")
for f, scale, novl, nrej, sd, sm in zip(lp.frames, lp.scales, lp.overloads, lp.rejecteds, lp.sigmads, lp.sigmars):
ofs.write("%5d %s %d %d %s %s\n" % (f, scale, novl, nrej, sd, sm))
ofs.write("$$\n")
ofs.write("\n\n\n")
ofs.write("$TABLE: Parameters estimated for each block:\n")
ofs.write("$GRAPHS\n")
ofs.write(":unit cell length a")
ofs.write(":A:1,2:\n")
cellconstr = CellConstraints(lp.space_group)
if not cellconstr.is_b_equal_a():
ofs.write(":unit cell length b")
ofs.write(":A:1,3:\n")
if not cellconstr.is_c_equal_a_b():
ofs.write(":unit cell length c")
ofs.write(":A:1,4:\n")
if not cellconstr.is_angle_constrained("alpha"):
ofs.write(":unit cell angle alpha")
ofs.write(":A:1,5:\n")
if not cellconstr.is_angle_constrained("beta"):
ofs.write(":unit cell angle beta")
ofs.write(":A:1,6:\n")
if not cellconstr.is_angle_constrained("gamma"):
ofs.write(":unit cell angle gamma")
ofs.write(":A:1,7:\n")
ofs.write(":rotations off from initial orientation")
ofs.write(":A:1,8,9,10:\n")
ofs.write(":distance")
ofs.write(":A:1,11:\n")
ofs.write(":deviations from predicted positions")
ofs.write(":A:1,12,13:\n")
ofs.write(":beam center")
ofs.write(":A:1,14,15:\n")
ofs.write("$$\n")
ofs.write("#image a b c alpha beta gamma rotx roty rotz dist spot spindle orgx orgy$$\n$$\n")
for images, param in sorted(lp.blockparams.items()):
for i in images:
print("%4d " % i, " ".join(param.get("cell", ["D"]*6)), " ".join(param.get("rotation", ["D"]*3)), param.get("dist","D"), param.get("spot","D"), param.get("spindle","D"), " ".join(param.get("orig",["D"]*2)), file=ofs)
ofs.write("$$\n")
ofs.write("\n\n\n")
ofs.write("$TABLE: sigmaB and sigmaR on 9 areas for each block:\n")
ofs.write("$GRAPHS\n")
ofs.write(":SIGMAB")
ofs.write(":A:1,2,3,4,5,6,7,8,9,10:\n")
ofs.write(":SIGMAR")
ofs.write(":A:1,11,12,13,14,15,16,17,18,19:\n")
ofs.write("$$\n")
ofs.write("#image %s %s$$\n$$\n" % (" ".join(["sigmab%d"%x for x in range(1,10)]), " ".join(["sigmar%d"%x for x in range(1,10)])))
for images, param in sorted(lp.blockparams.items()):
for i in images:
print("%4d " % i, " ".join(param["sigmab9"]), " ".join(param["sigmar9"]), file=ofs)
ofs.write("$$\n")
ofs.write("\n\n\n")
# make_plot()
def run(int_lp, log_out="plot_integrate.log"):
lpobj = IntegrateLp(int_lp)
make_plot(lpobj, log_out)
# run()
if __name__ == "__main__":
import sys
if len(sys.argv) > 1:
int_lp = sys.argv[1]
else:
int_lp = "INTEGRATE.LP"
log_out = "plot_integrate.log"
run(int_lp, log_out)
print()
print("Run:")
print("loggraph", log_out)
| 36.830435 | 229 | 0.557785 | 4,616 | 0.544918 | 0 | 0 | 0 | 0 | 0 | 0 | 2,735 | 0.322866 |
1405014d276e1aee0a9bda8aef0a811359dd4eed | 732 | py | Python | app/models.py | JoyWambui/habari-gani | 7c5b33b3b1237efe977dd25e1c9f3c8130f4a756 | [
"MIT"
] | null | null | null | app/models.py | JoyWambui/habari-gani | 7c5b33b3b1237efe977dd25e1c9f3c8130f4a756 | [
"MIT"
] | null | null | null | app/models.py | JoyWambui/habari-gani | 7c5b33b3b1237efe977dd25e1c9f3c8130f4a756 | [
"MIT"
] | null | null | null | class Source:
"""Source class to define News Source Objects"""
def __init__(self,id,name,description,url,category,country):
self.id = id
self.name = name
self.description = description
self.url = url
self.category = category
self.country = country
class Article:
"""Source class to define Article Objects from a news source"""
def __init__(self,author,article_title,article_description,article_url,image_url,published):
self.author = author
self.article_title = article_title
self.article_description = article_description
self.article_url = article_url
self.image_url = image_url
self.published = published
| 31.826087 | 96 | 0.670765 | 720 | 0.983607 | 0 | 0 | 0 | 0 | 0 | 0 | 111 | 0.151639 |
14069f69e286ba7e7815086e118f09ac462708fd | 823 | py | Python | sionna/channel/tr38901/__init__.py | NVlabs/sionna | 488e6c3ff6ff2b3313d0ca0f94e4247b8dd6ff35 | [
"Apache-2.0"
] | 163 | 2022-03-22T19:47:47.000Z | 2022-03-31T23:56:45.000Z | sionna/channel/tr38901/__init__.py | Maryammhsnv/sionna | 527d0f7866b379afffad34a6bef7ed3bf6f33ad2 | [
"Apache-2.0"
] | 2 | 2022-03-24T12:43:07.000Z | 2022-03-29T07:17:16.000Z | sionna/channel/tr38901/__init__.py | Maryammhsnv/sionna | 527d0f7866b379afffad34a6bef7ed3bf6f33ad2 | [
"Apache-2.0"
] | 19 | 2022-03-23T02:31:22.000Z | 2022-03-30T06:35:12.000Z | #
# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
"""
Channel sub-package of the Sionna library implementing 3GPP TR39.801 models.
"""
# pylint: disable=line-too-long
from .antenna import AntennaElement, AntennaPanel, PanelArray, Antenna, AntennaArray
from .lsp import LSP, LSPGenerator
from .rays import Rays, RaysGenerator
from .system_level_scenario import SystemLevelScenario
from .rma_scenario import RMaScenario
from .umi_scenario import UMiScenario
from .uma_scenario import UMaScenario
from .channel_coefficients import Topology, ChannelCoefficientsGenerator
from .system_level_channel import SystemLevelChannel
from .rma import RMa
from .uma import UMa
from .umi import UMi
from .tdl import TDL
from .cdl import CDL
| 32.92 | 103 | 0.818955 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 257 | 0.312272 |
14093ba2d1d6dad0f8503fb3ff80ec728afb186e | 85 | wsgi | Python | katty.wsgi | gabeg805/KattyWebApplication | 16ecd9715f4da9fd4e82a840969e7edfa259d14c | [
"MIT"
] | null | null | null | katty.wsgi | gabeg805/KattyWebApplication | 16ecd9715f4da9fd4e82a840969e7edfa259d14c | [
"MIT"
] | null | null | null | katty.wsgi | gabeg805/KattyWebApplication | 16ecd9715f4da9fd4e82a840969e7edfa259d14c | [
"MIT"
] | null | null | null | import sys
sys.path.insert(0, '/var/www/katty')
from index import app as application
| 21.25 | 36 | 0.764706 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 16 | 0.188235 |
14096a47f1fb73f1f3a7e9c4907910efec6fe0b3 | 5,632 | py | Python | P_bot_TOR.py | derhornspieler/Scavenger | f0cc4c12edea871a74512278781cb10287078023 | [
"Apache-2.0"
] | 1 | 2019-12-14T00:28:35.000Z | 2019-12-14T00:28:35.000Z | P_bot_TOR.py | derhornspieler/Scavenger | f0cc4c12edea871a74512278781cb10287078023 | [
"Apache-2.0"
] | null | null | null | P_bot_TOR.py | derhornspieler/Scavenger | f0cc4c12edea871a74512278781cb10287078023 | [
"Apache-2.0"
] | null | null | null | ######
# If you do not want to post results on Twitter remove the lines marked with TWITTER
######
import time
import tweepy
import os
import classes.utility
import requests
from bs4 import BeautifulSoup, SoupStrainer
tools = classes.utility.ScavUtility()
iterator = 1
session = requests.session()
session.proxies = {}
session.proxies["http"] = "socks5h://localhost:9050"
session.proxies["https"] = "socks5h://localhost:9050"
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 6.1; rv:31.0) Gecko/20100101 Firefox/31.0"}
#Twitter API credentials
consumer_key = "" # TWITTER
consumer_secret = "" # TWITTER
access_key = "" # TWITTER
access_secret = "" # TWITTER
#authorize twitter, initialize tweepy
auth = tweepy.OAuthHandler(consumer_key, consumer_secret) # TWITTER
auth.set_access_token(access_key, access_secret) # TWITTER
api = tweepy.API(auth) # TWITTER
print("[#] Using website scraping to gather pastes. (TOR cycles to avoid IP blocking)")
# loading notification targets
with open("notification_targets.txt") as f:
notificationtargets = f.readlines()
print("[#] Loaded " + str(len(notificationtargets)) + " notification targets.")
while 1:
# test if ready to archive
archivepath = "data/raw_pastes"
archiveit = tools.testifreadytoarchive(archivepath)
if archiveit == 1:
print("[*] Get all the pastes with credentials...")
tools.getthejuicythings(archivepath, "pastebincom")
print("[*] Archiving old Paste.org pastes...")
tools.archivepastes(archivepath, "pastebincom")
print(str(iterator) + ". iterator:")
iterator += 1
try:
response = session.get("https://pastebin.com/archive", headers=headers)
response = response.text
print("[#] Waiting...")
time.sleep(90)
for link in BeautifulSoup(response, parse_only=SoupStrainer('a'), features="lxml"):
if "HTML" not in link:
if link.has_attr('href'):
if len(link["href"]) == 9 and link["href"][0] == "/" and link["href"] != "/messages" and link["href"] != "/settings" and link["href"] != "/scraping":
print("[*] Crawling " + link["href"])
# I implemented a little fix which currently avoids that your IP gets blocked when simply scraping the website without using the API
binResponse = session.get("https://pastebin.com/raw" + link["href"], headers=headers)
binResponse = binResponse.text
try:
foundPasswords = 0
file_ = open("data/raw_pastes" + link["href"], "wb")
file_.write(binResponse.encode('utf-8').strip())
file_.close()
emailPattern = os.popen("grep -l -E -o \"\\b[a-zA-Z0-9.-]+@[a-zA-Z0-9.-]+\\.[a-zA-Z0-9.-]+\\b\" data/raw_pastes" + link["href"]).read()
emailPattern = emailPattern.split("\n")
for file in emailPattern:
if file != "":
with open("data/raw_pastes" + link["href"]) as f:
pasteContent = f.readlines()
skip = 0
for line in pasteContent:
curLine = line.strip()
if (":" in curLine or ";" in curLine or "," in curLine) and "://" not in curLine and len(curLine) <=100 and "android:" not in curLine and "#EXTINF" not in curLine:
tools.checknotificationtargets(notificationtargets, curLine, apiPaste["key"])
else:
skip = 1
if skip == 0:
foundPasswords = 1
curPasteMySQLi = os.popen("grep mysqli_connect\( data/raw_pastes" + link["href"]).read()
curPasteRSA = os.popen("grep 'BEGIN RSA PRIVATE KEY' data/raw_pastes" + link["href"]).read()
curPasteWP = os.popen("grep 'The name of the database for WordPress' data/raw_pastes" + link["href"]).read()
# search for onion links
containsOnion = 0
containsDocument = 0
with open("data/raw_pastes" + link["href"]) as f:
onionContent = f.readlines()
for line in onionContent:
if ".onion" in line and len(line) <= 150:
containsOnion = 1
if ".pdf" in line or ".doc" in line or ".docx" in line or ".xls" in line or ".xlsx" in line:
containsDocument = 1
if foundPasswords == 1:
foundPasswords = 0
print("Found credentials. Posting on Twitter...")
api.update_status() # TWITTER
tools.statisticsaddpoint()
elif curPasteRSA != "":
print("Found RSA key. Posting on Twitter...")
api.update_status() # TWITTER
tools.statisticsaddpoint()
os.system("cp data/raw_pastes" + link["href"] + " data/rsa_leaks/.")
elif curPasteWP != "":
print("Found Wordpress configuration file. Posting on Twitter...")
api.update_status() # TWITTER
tools.statisticsaddpoint()
os.system("cp data/raw_pastes" + link["href"] + " data/wordpress_leaks/.")
elif curPasteMySQLi != "":
print("Found MySQL connect string. Posting on Twitter...")
api.update_status() # TWITTER
tools.statisticsaddpoint()
os.system("cp data/raw_pastes" + link["href"] + " data/mysql_leaks/.")
elif containsOnion == 1:
if containsDocument == 1:
print("Found .onion link to a document. Posting on Twitter...")
api.update_status() # TWITTER
tools.statisticsaddpoint()
os.system("cp data/raw_pastes" + link["href"] + " data/onion_docs/.")
else:
print("Found .onion link. Posting on Twitter...")
api.update_status() # TWITTER
tools.statisticsaddpoint()
os.system("cp data/raw_pastes" + link["href"] + " data/onion/.")
time.sleep(1)
except Exception as e:
print(e)
continue
print("++++++++++")
print("")
except Exception as e:
print(e)
continue
| 39.111111 | 173 | 0.635298 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,068 | 0.367188 |
140ab3e208fd8520fd27c0b61a95fd2da405a3b2 | 428 | py | Python | core/templatetags/extras.py | bugulin/gymgeek-web | 1def491392add2526fb0e8a53098d49ad2fdf983 | [
"Apache-2.0"
] | null | null | null | core/templatetags/extras.py | bugulin/gymgeek-web | 1def491392add2526fb0e8a53098d49ad2fdf983 | [
"Apache-2.0"
] | null | null | null | core/templatetags/extras.py | bugulin/gymgeek-web | 1def491392add2526fb0e8a53098d49ad2fdf983 | [
"Apache-2.0"
] | null | null | null | from django import template
from django.template.defaultfilters import stringfilter
from django.utils.safestring import mark_safe
from docutils.core import publish_parts
register = template.Library()
@register.filter(name='rst')
@stringfilter
def rst_to_html5(text):
parts = publish_parts(text, writer_name='html5', settings_overrides={'initial_header_level': 2})
return mark_safe(parts['html_title'] + parts['body'])
| 32.923077 | 100 | 0.794393 | 0 | 0 | 0 | 0 | 225 | 0.525701 | 0 | 0 | 52 | 0.121495 |
140bd4371229916c2d5f56c52a91980370ca0a17 | 35,931 | py | Python | aether/sdk/auth/keycloak/tests/test_keycloak.py | eHealthAfrica/aether-django-sdk-library | fc371af89bfed155d465049320f32bf43860d001 | [
"Apache-2.0"
] | 1 | 2020-05-04T21:05:11.000Z | 2020-05-04T21:05:11.000Z | aether/sdk/auth/keycloak/tests/test_keycloak.py | eHealthAfrica/aether-django-sdk-library | fc371af89bfed155d465049320f32bf43860d001 | [
"Apache-2.0"
] | 3 | 2019-09-30T15:45:43.000Z | 2020-04-29T08:12:37.000Z | aether/sdk/auth/keycloak/tests/test_keycloak.py | eHealthAfrica/aether-django-sdk-library | fc371af89bfed155d465049320f32bf43860d001 | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2019 by eHealth Africa : http://www.eHealthAfrica.org
#
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from unittest import mock
from http.cookies import SimpleCookie
from importlib import import_module
from django.conf import settings
from django.contrib.auth import get_user_model
from django.test import RequestFactory, override_settings
from django.urls import reverse, resolve
from aether.sdk.tests import AetherTestCase
from aether.sdk.unittest import MockResponse, UrlsTestCase
from aether.sdk.utils import get_meta_http_name
from aether.sdk.auth.keycloak.utils import _KC_TOKEN_SESSION as TOKEN_KEY
from aether.sdk.auth.keycloak.views import KeycloakLogoutView
user_objects = get_user_model().objects
@override_settings(
AUTH_URL='accounts',
KEYCLOAK_BEHIND_SCENES=True,
)
class KeycloakBehindTests(AetherTestCase, UrlsTestCase):
def test__urls__accounts__login(self):
from django.contrib.auth import views
self.assertEqual(reverse('rest_framework:login'), '/accounts/login')
self.assertEqual(resolve('/accounts/login').func.view_class,
views.LoginView.as_view().view_class)
def test__workflow(self):
FAKE_TOKEN = {
'access_token': 'access-keycloak',
'refresh_token': 'refresh-keycloak',
}
REALM = 'testing'
# login using accounts login entrypoint
LOGIN_URL = reverse('rest_framework:login')
SAMPLE_URL = reverse('testmodel-list')
settings.SESSION_ENGINE = 'django.contrib.sessions.backends.file'
engine = import_module(settings.SESSION_ENGINE)
store = engine.SessionStore()
store.save()
self.client.cookies = SimpleCookie({settings.SESSION_COOKIE_NAME: store.session_key})
self.assertIsNotNone(self.client.session)
# visit any page that requires authentication (without being logged)
response = self.client.get(SAMPLE_URL)
self.assertEqual(response.status_code, 403)
# make realm check fail
with mock.patch('aether.sdk.auth.keycloak.utils.exec_request',
side_effect=[
# check realm request
MockResponse(status_code=404),
]) as mock_req_1:
response = self.client.post(LOGIN_URL, data={
'username': 'user',
'password': 'secretsecret',
'realm': 'fake',
})
content = response.content.decode('utf-8')
self.assertIn('Please correct the error below.', content)
self.assertIn('Invalid realm', content)
session = self.client.session
self.assertIsNone(session.get(TOKEN_KEY))
self.assertIsNone(session.get(settings.REALM_COOKIE))
mock_req_1.assert_called_once_with(
method='head',
url=f'{settings.KEYCLOAK_SERVER_URL}/fake/account',
)
# no auth yet
session = self.client.session
self.assertIsNone(session.get(TOKEN_KEY))
self.assertIsNone(session.get(settings.REALM_COOKIE))
# make get `token` from keycloack fail
with mock.patch('aether.sdk.auth.keycloak.utils.exec_request',
side_effect=[
# check realm request
MockResponse(status_code=204),
# get token from keycloak
MockResponse(status_code=400),
]) as mock_req_2:
response = self.client.post(LOGIN_URL, data={
'username': 'user',
'password': 'secretsecret',
'realm': REALM,
})
content = response.content.decode('utf-8')
self.assertIn('Please enter a correct username and password.', content)
self.assertIn('Note that both fields may be case-sensitive.', content)
mock_req_2.assert_has_calls([
mock.call(
method='head',
url=f'{settings.KEYCLOAK_SERVER_URL}/{REALM}/account',
),
mock.call(
method='post',
url=f'{settings.KEYCLOAK_SERVER_URL}/{REALM}/protocol/openid-connect/token',
data={
'grant_type': 'password',
'client_id': settings.KEYCLOAK_CLIENT_ID,
'username': 'user',
'password': 'secretsecret',
},
),
])
# no auth yet
session = self.client.session
self.assertIsNone(session.get(TOKEN_KEY))
self.assertIsNone(session.get(settings.REALM_COOKIE))
# make get `userinfo` from keyclock fail (unlikely if `token` doesn't)
with mock.patch('aether.sdk.auth.keycloak.utils.exec_request',
side_effect=[
# check realm request
MockResponse(status_code=204),
# get token from keycloak
MockResponse(status_code=200, json_data=FAKE_TOKEN),
# get userinfo from keycloak
MockResponse(status_code=404),
]) as mock_req_3:
response = self.client.post(LOGIN_URL, data={
'username': 'user',
'password': 'secretsecret',
'realm': REALM,
})
content = response.content.decode('utf-8')
self.assertIn('Please enter a correct username and password.', content)
self.assertIn('Note that both fields may be case-sensitive.', content)
mock_req_3.assert_has_calls([
mock.call(
method='head',
url=f'{settings.KEYCLOAK_SERVER_URL}/{REALM}/account',
),
mock.call(
method='post',
url=f'{settings.KEYCLOAK_SERVER_URL}/{REALM}/protocol/openid-connect/token',
data={
'grant_type': 'password',
'client_id': settings.KEYCLOAK_CLIENT_ID,
'username': 'user',
'password': 'secretsecret',
},
),
mock.call(
method='get',
url=f'{settings.KEYCLOAK_SERVER_URL}/{REALM}/protocol/openid-connect/userinfo',
headers={'Authorization': 'Bearer {}'.format(FAKE_TOKEN['access_token'])},
),
])
# no auth yet
session = self.client.session
self.assertIsNone(session.get(TOKEN_KEY))
self.assertIsNone(session.get(settings.REALM_COOKIE))
# finally, logs in
with mock.patch('aether.sdk.auth.keycloak.utils.exec_request',
side_effect=[
# check realm request
MockResponse(status_code=204),
# get token from keycloak
MockResponse(status_code=200, json_data=FAKE_TOKEN),
# get userinfo from keycloak
MockResponse(status_code=200, json_data={
'preferred_username': 'user',
'given_name': 'given',
'family_name': 'family',
'email': 'user@example.com',
}),
]) as mock_req_4:
self.assertEqual(user_objects.filter(username='testing__user').count(), 0)
response = self.client.post(LOGIN_URL, data={
'username': 'user',
'password': 'secretsecret',
'realm': REALM,
})
self.assertEqual(user_objects.filter(username='testing__user').count(), 1)
user = user_objects.get(username='testing__user')
self.assertEqual(user.first_name, 'given')
self.assertEqual(user.last_name, 'family')
self.assertEqual(user.email, 'user@example.com')
session = self.client.session
self.assertEqual(session.get(TOKEN_KEY), FAKE_TOKEN)
self.assertEqual(session.get(settings.REALM_COOKIE), REALM)
mock_req_4.assert_has_calls([
mock.call(
method='head',
url=f'{settings.KEYCLOAK_SERVER_URL}/{REALM}/account',
),
mock.call(
method='post',
url=f'{settings.KEYCLOAK_SERVER_URL}/{REALM}/protocol/openid-connect/token',
data={
'grant_type': 'password',
'client_id': settings.KEYCLOAK_CLIENT_ID,
'username': 'user',
'password': 'secretsecret',
},
),
mock.call(
method='get',
url=f'{settings.KEYCLOAK_SERVER_URL}/{REALM}/protocol/openid-connect/userinfo',
headers={'Authorization': 'Bearer {}'.format(FAKE_TOKEN['access_token'])},
),
])
# visit any page that requires authentication
with mock.patch('aether.sdk.auth.keycloak.utils.exec_request',
side_effect=[
# refresh token in keycloak
MockResponse(status_code=200, json_data=FAKE_TOKEN),
]) as mock_req_5:
response = self.client.get(SAMPLE_URL)
self.assertEqual(response.status_code, 200)
mock_req_5.assert_called_once_with(
method='post',
url=f'{settings.KEYCLOAK_SERVER_URL}/{REALM}/protocol/openid-connect/token',
data={
'grant_type': 'refresh_token',
'client_id': settings.KEYCLOAK_CLIENT_ID,
'refresh_token': FAKE_TOKEN['refresh_token'],
},
)
# visit any page that requires authentication and fails
with mock.patch('aether.sdk.auth.keycloak.utils.exec_request',
side_effect=[
# refresh token in keycloak
MockResponse(status_code=400),
# log outs call
MockResponse(status_code=204),
]) as mock_req_6:
response = self.client.get(SAMPLE_URL)
self.assertEqual(response.status_code, 403)
mock_req_6.assert_has_calls([
mock.call(
method='post',
url=f'{settings.KEYCLOAK_SERVER_URL}/{REALM}/protocol/openid-connect/token',
data={
'grant_type': 'refresh_token',
'client_id': settings.KEYCLOAK_CLIENT_ID,
'refresh_token': FAKE_TOKEN['refresh_token'],
},
),
mock.call(
method='post',
url=f'{settings.KEYCLOAK_SERVER_URL}/{REALM}/protocol/openid-connect/logout',
data={
'client_id': settings.KEYCLOAK_CLIENT_ID,
'refresh_token': FAKE_TOKEN['refresh_token'],
},
),
])
# side effect of being logged out
session = self.client.session
self.assertIsNone(session.get(TOKEN_KEY))
self.assertIsNone(session.get(settings.REALM_COOKIE))
# logs in again
with mock.patch('aether.sdk.auth.keycloak.utils.exec_request',
side_effect=[
# check realm request
MockResponse(status_code=204),
# get token from keycloak
MockResponse(status_code=200, json_data=FAKE_TOKEN),
# get userinfo from keycloak
MockResponse(status_code=200, json_data={
'preferred_username': 'user',
'given_name': 'John',
'family_name': 'Doe',
'email': 'john.doe@example.com',
}),
]):
response = self.client.post(LOGIN_URL, data={
'username': 'user',
'password': 'secretsecret',
'realm': REALM,
})
# user data is updated
user = user_objects.get(username='testing__user')
self.assertEqual(user.first_name, 'John')
self.assertEqual(user.last_name, 'Doe')
self.assertEqual(user.email, 'john.doe@example.com')
# logs out
with mock.patch('aether.sdk.auth.keycloak.utils.exec_request') as mock_req_7:
self.client.logout()
mock_req_7.assert_called_once_with(
method='post',
url=f'{settings.KEYCLOAK_SERVER_URL}/{REALM}/protocol/openid-connect/logout',
data={
'client_id': settings.KEYCLOAK_CLIENT_ID,
'refresh_token': FAKE_TOKEN['refresh_token'],
},
)
session = self.client.session
self.assertIsNone(session.get(TOKEN_KEY))
self.assertIsNone(session.get(settings.REALM_COOKIE))
# logs out and visit any page again
with mock.patch('aether.sdk.auth.keycloak.utils.exec_request') as mock_req_8:
self.client.logout()
self.assertEqual(self.client.get(SAMPLE_URL).status_code, 403)
mock_req_8.assert_not_called()
@override_settings(
AUTH_URL='accounts',
KEYCLOAK_BEHIND_SCENES=False,
)
class KeycloakTests(UrlsTestCase):
def test__urls__accounts__login(self):
from aether.sdk.auth.keycloak.views import KeycloakLoginView
self.assertEqual(reverse('rest_framework:login'), '/accounts/login')
self.assertEqual(resolve('/accounts/login').func.view_class,
KeycloakLoginView.as_view().view_class)
def test__workflow(self):
FAKE_TOKEN = {
'access_token': 'access-keycloak',
'refresh_token': 'refresh-keycloak',
}
REALM = 'testing'
# login using accounts login entrypoint
LOGIN_URL = reverse('rest_framework:login')
SAMPLE_URL = reverse('testmodel-list')
settings.SESSION_ENGINE = 'django.contrib.sessions.backends.file'
engine = import_module(settings.SESSION_ENGINE)
store = engine.SessionStore()
store.save()
self.client.cookies = SimpleCookie({settings.SESSION_COOKIE_NAME: store.session_key})
self.assertIsNotNone(self.client.session)
# visit any page that requires authentication (without being logged)
response = self.client.get(SAMPLE_URL)
self.assertEqual(response.status_code, 403)
# make realm check fail
with mock.patch('aether.sdk.auth.keycloak.utils.exec_request',
side_effect=[
# check realm request
MockResponse(status_code=404),
]) as mock_req_1:
response = self.client.post(LOGIN_URL, data={'realm': 'fake'})
content = response.content.decode('utf-8')
self.assertIn('Please correct the error below.', content)
self.assertIn('Invalid realm', content)
session = self.client.session
self.assertIsNone(session.get(TOKEN_KEY))
self.assertIsNone(session.get(settings.REALM_COOKIE))
mock_req_1.assert_called_once_with(
method='head',
url=f'{settings.KEYCLOAK_SERVER_URL}/fake/account',
)
# check that the login response is a redirection to keycloak server
with mock.patch('aether.sdk.auth.keycloak.utils.exec_request',
side_effect=[
# check realm request
MockResponse(status_code=204),
]) as mock_req_2:
response = self.client.post(LOGIN_URL, data={'realm': REALM})
self.assertEqual(response.status_code, 302)
self.assertIn(
f'{settings.KEYCLOAK_SERVER_URL}/{REALM}/protocol/openid-connect/auth?'
f'&client_id={settings.KEYCLOAK_CLIENT_ID}'
'&scope=openid'
'&response_type=code'
'&redirect_uri=',
response.url)
mock_req_2.assert_called_once_with(
method='head',
url=f'{settings.KEYCLOAK_SERVER_URL}/{REALM}/account',
)
# realm is in session but not the token
session = self.client.session
self.assertNotIn(TOKEN_KEY, session)
self.assertEqual(session.get(settings.REALM_COOKIE), REALM)
# go to login page without the proper params does nothing
self.client.get(LOGIN_URL)
# realm is in session but not the token
session = self.client.session
self.assertNotIn(TOKEN_KEY, session)
self.assertEqual(session.get(settings.REALM_COOKIE), REALM)
# make get `token` from keycloack fail
with mock.patch('aether.sdk.auth.keycloak.utils.exec_request',
side_effect=[
# get token from keycloak
MockResponse(status_code=404),
]) as mock_req_3:
# send keycloak response to login page
response = self.client.get(LOGIN_URL + '?code=123&session_state=abc')
mock_req_3.assert_called_once_with(
method='post',
url=f'{settings.KEYCLOAK_SERVER_URL}/{REALM}/protocol/openid-connect/token',
data={
'grant_type': 'authorization_code',
'client_id': settings.KEYCLOAK_CLIENT_ID,
'client_session_state': 'abc',
'client_session_host': mock.ANY,
'code': '123',
'redirect_uri': mock.ANY,
},
)
# realm is not in session
session = self.client.session
self.assertNotIn(TOKEN_KEY, session)
self.assertIsNone(session.get(settings.REALM_COOKIE))
# make get `userinfo` from keyclock fail (unlikely if `token` doesn't)
with mock.patch('aether.sdk.auth.keycloak.utils.exec_request',
side_effect=[
# check realm request
MockResponse(status_code=204),
# get token from keycloak
MockResponse(status_code=200, json_data=FAKE_TOKEN),
# get userinfo from keycloak
MockResponse(status_code=404),
]) as mock_req_4:
# first step
response = self.client.post(LOGIN_URL, data={'realm': REALM})
# realm is in session but not the token
session = self.client.session
self.assertNotIn(TOKEN_KEY, session)
self.assertEqual(session.get(settings.REALM_COOKIE), REALM)
# second step
response = self.client.get(LOGIN_URL + '?code=123&session_state=abc')
content = response.content.decode('utf-8')
self.assertIn('An error ocurred while authenticating against keycloak', content)
# realm is not in session
session = self.client.session
self.assertNotIn(TOKEN_KEY, session)
self.assertIsNone(session.get(settings.REALM_COOKIE))
mock_req_4.assert_has_calls([
mock.call(
method='head',
url=f'{settings.KEYCLOAK_SERVER_URL}/{REALM}/account',
),
mock.call(
method='post',
url=f'{settings.KEYCLOAK_SERVER_URL}/{REALM}/protocol/openid-connect/token',
data={
'grant_type': 'authorization_code',
'client_id': settings.KEYCLOAK_CLIENT_ID,
'client_session_state': 'abc',
'client_session_host': mock.ANY,
'code': '123',
'redirect_uri': mock.ANY,
},
),
mock.call(
method='get',
url=f'{settings.KEYCLOAK_SERVER_URL}/{REALM}/protocol/openid-connect/userinfo',
headers={'Authorization': 'Bearer {}'.format(FAKE_TOKEN['access_token'])},
),
])
# finally, logs in
with mock.patch('aether.sdk.auth.keycloak.utils.exec_request',
side_effect=[
# check realm request
MockResponse(status_code=204),
# get token from keycloak
MockResponse(status_code=200, json_data=FAKE_TOKEN),
# get userinfo from keycloak
MockResponse(status_code=200, json_data={
'preferred_username': 'user',
'given_name': 'given',
'family_name': 'family',
'email': 'user@example.com',
}),
]) as mock_req_5:
self.assertEqual(user_objects.filter(username='testing__user').count(), 0)
# first step
response = self.client.post(LOGIN_URL, data={'realm': REALM})
# second step
response = self.client.get(LOGIN_URL + '?code=123&session_state=abc')
self.assertEqual(user_objects.filter(username='testing__user').count(), 1)
user = user_objects.get(username='testing__user')
self.assertEqual(user.first_name, 'given')
self.assertEqual(user.last_name, 'family')
self.assertEqual(user.email, 'user@example.com')
session = self.client.session
self.assertEqual(session.get(TOKEN_KEY), FAKE_TOKEN)
self.assertEqual(session.get(settings.REALM_COOKIE), REALM)
mock_req_5.assert_has_calls([
mock.call(
method='head',
url=f'{settings.KEYCLOAK_SERVER_URL}/{REALM}/account',
),
mock.call(
method='post',
url=f'{settings.KEYCLOAK_SERVER_URL}/{REALM}/protocol/openid-connect/token',
data={
'grant_type': 'authorization_code',
'client_id': settings.KEYCLOAK_CLIENT_ID,
'client_session_state': 'abc',
'client_session_host': mock.ANY,
'code': '123',
'redirect_uri': mock.ANY,
},
),
mock.call(
method='get',
url=f'{settings.KEYCLOAK_SERVER_URL}/{REALM}/protocol/openid-connect/userinfo',
headers={'Authorization': 'Bearer {}'.format(FAKE_TOKEN['access_token'])},
),
])
# visit any page that requires authentication
with mock.patch('aether.sdk.auth.keycloak.utils.exec_request',
side_effect=[
# refresh token in keycloak
MockResponse(status_code=200, json_data=FAKE_TOKEN),
]) as mock_req_6:
response = self.client.get(SAMPLE_URL)
self.assertEqual(response.status_code, 200)
mock_req_6.assert_called_once_with(
method='post',
url=f'{settings.KEYCLOAK_SERVER_URL}/{REALM}/protocol/openid-connect/token',
data={
'grant_type': 'refresh_token',
'client_id': settings.KEYCLOAK_CLIENT_ID,
'refresh_token': FAKE_TOKEN['refresh_token'],
},
)
# visit any page that requires authentication and fails
with mock.patch('aether.sdk.auth.keycloak.utils.exec_request',
side_effect=[
# refresh token in keycloak
MockResponse(status_code=400),
# log outs call
MockResponse(status_code=204),
]) as mock_req_7:
response = self.client.get(SAMPLE_URL)
self.assertEqual(response.status_code, 403)
mock_req_7.assert_has_calls([
mock.call(
method='post',
url=f'{settings.KEYCLOAK_SERVER_URL}/{REALM}/protocol/openid-connect/token',
data={
'grant_type': 'refresh_token',
'client_id': settings.KEYCLOAK_CLIENT_ID,
'refresh_token': FAKE_TOKEN['refresh_token'],
},
),
mock.call(
method='post',
url=f'{settings.KEYCLOAK_SERVER_URL}/{REALM}/protocol/openid-connect/logout',
data={
'client_id': settings.KEYCLOAK_CLIENT_ID,
'refresh_token': FAKE_TOKEN['refresh_token'],
},
),
])
# side effect of being logged out
session = self.client.session
self.assertIsNone(session.get(TOKEN_KEY))
self.assertIsNone(session.get(settings.REALM_COOKIE))
# logs in again
with mock.patch('aether.sdk.auth.keycloak.utils.exec_request',
side_effect=[
# check realm request
MockResponse(status_code=204),
# get token from keycloak
MockResponse(status_code=200, json_data=FAKE_TOKEN),
# get userinfo from keycloak
MockResponse(status_code=200, json_data={
'preferred_username': 'user',
'given_name': 'John',
'family_name': 'Doe',
'email': 'john.doe@example.com',
}),
]):
# first step
response = self.client.post(LOGIN_URL, data={'realm': REALM})
# second step
response = self.client.get(LOGIN_URL + '?code=123&session_state=abc')
# user data is updated
user = user_objects.get(username='testing__user')
self.assertEqual(user.first_name, 'John')
self.assertEqual(user.last_name, 'Doe')
self.assertEqual(user.email, 'john.doe@example.com')
# logs out
with mock.patch('aether.sdk.auth.keycloak.utils.exec_request') as mock_req_8:
self.client.logout()
mock_req_8.assert_called_once_with(
method='post',
url=f'{settings.KEYCLOAK_SERVER_URL}/{REALM}/protocol/openid-connect/logout',
data={
'client_id': settings.KEYCLOAK_CLIENT_ID,
'refresh_token': FAKE_TOKEN['refresh_token'],
},
)
session = self.client.session
self.assertIsNone(session.get(TOKEN_KEY))
self.assertIsNone(session.get(settings.REALM_COOKIE))
# logs out and visit any page again
with mock.patch('aether.sdk.auth.keycloak.utils.exec_request') as mock_req_9:
self.client.logout()
self.assertEqual(self.client.get(SAMPLE_URL).status_code, 403)
mock_req_9.assert_not_called()
class KeycloakGatewayTests(UrlsTestCase):
def test_logout(self):
logout_url = reverse('logout')
self.assertEqual(logout_url, '/logout')
self.assertNotEqual(logout_url, reverse('rest_framework:logout'))
response = self.client.get(logout_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.template_name[0], settings.LOGGED_OUT_TEMPLATE)
settings.SESSION_ENGINE = 'django.contrib.sessions.backends.file'
engine = import_module(settings.SESSION_ENGINE)
store = engine.SessionStore()
store.save()
request = RequestFactory().get('/')
setattr(request, 'session', store)
# No next page: displays logged out template
response = KeycloakLogoutView.as_view(
next_page=None,
template_name=settings.LOGGED_OUT_TEMPLATE,
)(request)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.template_name[0], settings.LOGGED_OUT_TEMPLATE)
# No realm: goes to next page
response = KeycloakLogoutView.as_view(next_page='/check-app')(request)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/check-app')
# Public realm: goes to next page
next_page = f'/{settings.GATEWAY_PUBLIC_REALM}/{settings.GATEWAY_SERVICE_ID}/check-app'
response = KeycloakLogoutView.as_view(next_page=next_page)(request)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, next_page)
# No public realm: goes to gateway logout
next_page = f'/realm-name/{settings.GATEWAY_SERVICE_ID}/check-app'
response = KeycloakLogoutView.as_view(next_page=next_page)(request)
self.assertEqual(response.status_code, 302)
self.assertIn(
f'/realm-name/{settings.GATEWAY_SERVICE_ID}/logout',
response.url)
def test_workflow(self):
FAKE_TOKEN = 'access-keycloak'
REALM = 'testing'
SAMPLE_URL = reverse('testmodel-list', kwargs={'realm': REALM})
HTTP_HEADER = get_meta_http_name(settings.GATEWAY_HEADER_TOKEN)
self.assertEqual(SAMPLE_URL, f'/{REALM}/{settings.GATEWAY_SERVICE_ID}/testtestmodel/')
# visit any page without a valid token
response = self.client.get(SAMPLE_URL)
self.assertEqual(response.status_code, 403)
with mock.patch('aether.sdk.auth.keycloak.utils.exec_request',
side_effect=[
# get userinfo from keycloak
MockResponse(status_code=404),
]) as mock_req_1:
response = self.client.get(SAMPLE_URL, **{HTTP_HEADER: FAKE_TOKEN})
self.assertEqual(response.status_code, 403)
mock_req_1.assert_called_once_with(
method='get',
url=f'{settings.KEYCLOAK_SERVER_URL}/{REALM}/protocol/openid-connect/userinfo',
headers={'Authorization': f'Bearer {FAKE_TOKEN}'},
)
# visit any page with a valid token
with mock.patch('aether.sdk.auth.keycloak.utils.exec_request',
side_effect=[
# get userinfo from keycloak
MockResponse(status_code=200, json_data={
'preferred_username': 'user',
'given_name': 'John',
'family_name': 'Doe',
'email': 'john.doe@example.com',
}),
]) as mock_req_2:
self.assertEqual(user_objects.filter(username='testing__user').count(), 0)
response = self.client.get(SAMPLE_URL, **{HTTP_HEADER: FAKE_TOKEN})
self.assertEqual(response.status_code, 200)
self.assertEqual(user_objects.filter(username='testing__user').count(), 1)
user = user_objects.get(username='testing__user')
self.assertEqual(user.first_name, 'John')
self.assertEqual(user.last_name, 'Doe')
self.assertEqual(user.email, 'john.doe@example.com')
mock_req_2.assert_called_once_with(
method='get',
url=f'{settings.KEYCLOAK_SERVER_URL}/{REALM}/protocol/openid-connect/userinfo',
headers={'Authorization': f'Bearer {FAKE_TOKEN}'},
)
session = self.client.session
self.assertTrue(session.get(settings.GATEWAY_HEADER_TOKEN),
'flagged as gateway authenticated')
self.assertEqual(session.get(settings.REALM_COOKIE), REALM)
# visit same page with a valid token again
with mock.patch('aether.sdk.auth.keycloak.utils.exec_request',
side_effect=[
# get userinfo from keycloak
MockResponse(status_code=200, json_data={
'preferred_username': 'user',
'given_name': 'John',
'family_name': 'Smith',
'email': 'john.smith@example.com',
}),
]) as mock_req_3:
self.assertEqual(user_objects.filter(username='testing__user').count(), 1)
response = self.client.get(SAMPLE_URL, **{HTTP_HEADER: FAKE_TOKEN})
self.assertEqual(response.status_code, 200)
self.assertEqual(user_objects.filter(username='testing__user').count(), 1)
user = user_objects.get(username='testing__user')
self.assertEqual(user.first_name, 'John')
self.assertEqual(user.last_name, 'Smith')
self.assertEqual(user.email, 'john.smith@example.com')
mock_req_3.assert_called_once_with(
method='get',
url=f'{settings.KEYCLOAK_SERVER_URL}/{REALM}/protocol/openid-connect/userinfo',
headers={'Authorization': f'Bearer {FAKE_TOKEN}'},
)
# visit any page without a valid token
response = self.client.get(SAMPLE_URL)
self.assertEqual(response.status_code, 403)
# the user is logged out
session = self.client.session
self.assertIsNone(session.get(settings.GATEWAY_HEADER_TOKEN))
self.assertIsNone(session.get(settings.REALM_COOKIE))
# visit a non gateway page with the token
with mock.patch('aether.sdk.auth.keycloak.utils.exec_request') as mock_req_4:
response = self.client.get(reverse('testmodel-list'), **{HTTP_HEADER: FAKE_TOKEN})
self.assertEqual(response.status_code, 403)
mock_req_4.assert_not_called()
| 43.238267 | 99 | 0.544627 | 34,421 | 0.957975 | 0 | 0 | 27,790 | 0.773427 | 0 | 0 | 10,421 | 0.290028 |
140dfac64786934c620c3477a54ffeec5d9625f8 | 1,083 | py | Python | ballet/__init__.py | HDI-Project/fhub_core | 9667a47fbd8b4caf2e92118dc5357f34aae2098b | [
"MIT"
] | 19 | 2021-04-06T18:56:39.000Z | 2022-03-15T00:23:00.000Z | ballet/__init__.py | HDI-Project/ballet | 9667a47fbd8b4caf2e92118dc5357f34aae2098b | [
"MIT"
] | 52 | 2018-09-27T01:11:58.000Z | 2021-03-24T19:11:18.000Z | ballet/__init__.py | HDI-Project/ballet | 9667a47fbd8b4caf2e92118dc5357f34aae2098b | [
"MIT"
] | 3 | 2019-12-07T17:55:34.000Z | 2021-02-02T17:58:39.000Z | # -*- coding: utf-8 -*-
"""Top-level package for ballet."""
__author__ = 'Micah Smith'
__email__ = 'micahs@mit.edu'
__version__ = '0.19.5'
# filter warnings
import warnings # noqa E402
warnings.filterwarnings(
action='ignore', module='scipy', message='^internal gelsd')
# silence sklearn deprecation warnings
import logging # noqa E402
logging.captureWarnings(True)
import sklearn # noqa E402
logging.captureWarnings(False)
warnings.filterwarnings(
action='ignore', module='sklearn', category=DeprecationWarning)
warnings.filterwarnings(
action='ignore', module='sklearn', category=FutureWarning)
# configure module-level logging
from ballet.util.log import logger # noqa E402
logger.addHandler(logging.NullHandler())
# re-export some names
from ballet.client import b # noqa E402
from ballet.contrib import collect_contrib_features # noqa E402
from ballet.feature import Feature # noqa E402
from ballet.project import load_config, Project # noqa E402
# for feature development, you really only need these two members
__all__ = (
'b',
'Feature',
)
| 27.769231 | 67 | 0.753463 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 435 | 0.401662 |
140e681121ae14e453921573cd7aea566206c1e5 | 8,816 | py | Python | darling_ansible/python_venv/lib/python3.7/site-packages/oci/apigateway/models/cors_policy.py | revnav/sandbox | f9c8422233d093b76821686b6c249417502cf61d | [
"Apache-2.0"
] | null | null | null | darling_ansible/python_venv/lib/python3.7/site-packages/oci/apigateway/models/cors_policy.py | revnav/sandbox | f9c8422233d093b76821686b6c249417502cf61d | [
"Apache-2.0"
] | null | null | null | darling_ansible/python_venv/lib/python3.7/site-packages/oci/apigateway/models/cors_policy.py | revnav/sandbox | f9c8422233d093b76821686b6c249417502cf61d | [
"Apache-2.0"
] | 1 | 2020-06-25T03:12:58.000Z | 2020-06-25T03:12:58.000Z | # coding: utf-8
# Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class CorsPolicy(object):
"""
Enable CORS (Cross-Origin-Resource-Sharing) request handling.
"""
def __init__(self, **kwargs):
"""
Initializes a new CorsPolicy object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param allowed_origins:
The value to assign to the allowed_origins property of this CorsPolicy.
:type allowed_origins: list[str]
:param allowed_methods:
The value to assign to the allowed_methods property of this CorsPolicy.
:type allowed_methods: list[str]
:param allowed_headers:
The value to assign to the allowed_headers property of this CorsPolicy.
:type allowed_headers: list[str]
:param exposed_headers:
The value to assign to the exposed_headers property of this CorsPolicy.
:type exposed_headers: list[str]
:param is_allow_credentials_enabled:
The value to assign to the is_allow_credentials_enabled property of this CorsPolicy.
:type is_allow_credentials_enabled: bool
:param max_age_in_seconds:
The value to assign to the max_age_in_seconds property of this CorsPolicy.
:type max_age_in_seconds: int
"""
self.swagger_types = {
'allowed_origins': 'list[str]',
'allowed_methods': 'list[str]',
'allowed_headers': 'list[str]',
'exposed_headers': 'list[str]',
'is_allow_credentials_enabled': 'bool',
'max_age_in_seconds': 'int'
}
self.attribute_map = {
'allowed_origins': 'allowedOrigins',
'allowed_methods': 'allowedMethods',
'allowed_headers': 'allowedHeaders',
'exposed_headers': 'exposedHeaders',
'is_allow_credentials_enabled': 'isAllowCredentialsEnabled',
'max_age_in_seconds': 'maxAgeInSeconds'
}
self._allowed_origins = None
self._allowed_methods = None
self._allowed_headers = None
self._exposed_headers = None
self._is_allow_credentials_enabled = None
self._max_age_in_seconds = None
@property
def allowed_origins(self):
"""
**[Required]** Gets the allowed_origins of this CorsPolicy.
The list of allowed origins that the CORS handler will use to respond to CORS requests. The gateway will
send the Access-Control-Allow-Origin header with the best origin match for the circumstances. '*' will match
any origins, and 'null' will match queries from 'file:' origins. All other origins must be qualified with the
scheme, full hostname, and port if necessary.
:return: The allowed_origins of this CorsPolicy.
:rtype: list[str]
"""
return self._allowed_origins
@allowed_origins.setter
def allowed_origins(self, allowed_origins):
"""
Sets the allowed_origins of this CorsPolicy.
The list of allowed origins that the CORS handler will use to respond to CORS requests. The gateway will
send the Access-Control-Allow-Origin header with the best origin match for the circumstances. '*' will match
any origins, and 'null' will match queries from 'file:' origins. All other origins must be qualified with the
scheme, full hostname, and port if necessary.
:param allowed_origins: The allowed_origins of this CorsPolicy.
:type: list[str]
"""
self._allowed_origins = allowed_origins
@property
def allowed_methods(self):
"""
Gets the allowed_methods of this CorsPolicy.
The list of allowed HTTP methods that will be returned for the preflight OPTIONS request in the
Access-Control-Allow-Methods header. '*' will allow all methods.
:return: The allowed_methods of this CorsPolicy.
:rtype: list[str]
"""
return self._allowed_methods
@allowed_methods.setter
def allowed_methods(self, allowed_methods):
"""
Sets the allowed_methods of this CorsPolicy.
The list of allowed HTTP methods that will be returned for the preflight OPTIONS request in the
Access-Control-Allow-Methods header. '*' will allow all methods.
:param allowed_methods: The allowed_methods of this CorsPolicy.
:type: list[str]
"""
self._allowed_methods = allowed_methods
@property
def allowed_headers(self):
"""
Gets the allowed_headers of this CorsPolicy.
The list of headers that will be allowed from the client via the Access-Control-Allow-Headers header.
'*' will allow all headers.
:return: The allowed_headers of this CorsPolicy.
:rtype: list[str]
"""
return self._allowed_headers
@allowed_headers.setter
def allowed_headers(self, allowed_headers):
"""
Sets the allowed_headers of this CorsPolicy.
The list of headers that will be allowed from the client via the Access-Control-Allow-Headers header.
'*' will allow all headers.
:param allowed_headers: The allowed_headers of this CorsPolicy.
:type: list[str]
"""
self._allowed_headers = allowed_headers
@property
def exposed_headers(self):
"""
Gets the exposed_headers of this CorsPolicy.
The list of headers that the client will be allowed to see from the response as indicated by the
Access-Control-Expose-Headers header. '*' will expose all headers.
:return: The exposed_headers of this CorsPolicy.
:rtype: list[str]
"""
return self._exposed_headers
@exposed_headers.setter
def exposed_headers(self, exposed_headers):
"""
Sets the exposed_headers of this CorsPolicy.
The list of headers that the client will be allowed to see from the response as indicated by the
Access-Control-Expose-Headers header. '*' will expose all headers.
:param exposed_headers: The exposed_headers of this CorsPolicy.
:type: list[str]
"""
self._exposed_headers = exposed_headers
@property
def is_allow_credentials_enabled(self):
"""
Gets the is_allow_credentials_enabled of this CorsPolicy.
Whether to send the Access-Control-Allow-Credentials header to allow CORS requests with cookies.
:return: The is_allow_credentials_enabled of this CorsPolicy.
:rtype: bool
"""
return self._is_allow_credentials_enabled
@is_allow_credentials_enabled.setter
def is_allow_credentials_enabled(self, is_allow_credentials_enabled):
"""
Sets the is_allow_credentials_enabled of this CorsPolicy.
Whether to send the Access-Control-Allow-Credentials header to allow CORS requests with cookies.
:param is_allow_credentials_enabled: The is_allow_credentials_enabled of this CorsPolicy.
:type: bool
"""
self._is_allow_credentials_enabled = is_allow_credentials_enabled
@property
def max_age_in_seconds(self):
"""
Gets the max_age_in_seconds of this CorsPolicy.
The time in seconds for the client to cache preflight responses. This is sent as the Access-Control-Max-Age
if greater than 0.
:return: The max_age_in_seconds of this CorsPolicy.
:rtype: int
"""
return self._max_age_in_seconds
@max_age_in_seconds.setter
def max_age_in_seconds(self, max_age_in_seconds):
"""
Sets the max_age_in_seconds of this CorsPolicy.
The time in seconds for the client to cache preflight responses. This is sent as the Access-Control-Max-Age
if greater than 0.
:param max_age_in_seconds: The max_age_in_seconds of this CorsPolicy.
:type: int
"""
self._max_age_in_seconds = max_age_in_seconds
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 36.733333 | 245 | 0.674342 | 8,276 | 0.938748 | 0 | 0 | 8,306 | 0.942151 | 0 | 0 | 6,315 | 0.716311 |
140ff5919b9d8c9821b371db5ca4896db28bf7f0 | 1,549 | py | Python | tests/layers.py | jiansowa/PaddleSlim | a45431c99a775782b7fe5633f313d36ff582e797 | [
"Apache-2.0"
] | null | null | null | tests/layers.py | jiansowa/PaddleSlim | a45431c99a775782b7fe5633f313d36ff582e797 | [
"Apache-2.0"
] | 1 | 2020-07-14T09:50:51.000Z | 2020-07-14T09:50:51.000Z | tests/layers.py | jiansowa/PaddleSlim | a45431c99a775782b7fe5633f313d36ff582e797 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr
def conv_bn_layer(input,
num_filters,
filter_size,
name,
stride=1,
groups=1,
act=None):
conv = fluid.layers.conv2d(
input=input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=(filter_size - 1) // 2,
groups=groups,
act=None,
param_attr=ParamAttr(name=name + "_weights"),
bias_attr=False,
name=name + "_out")
bn_name = name + "_bn"
return fluid.layers.batch_norm(
input=conv,
act=act,
name=bn_name + '_output',
param_attr=ParamAttr(name=bn_name + '_scale'),
bias_attr=ParamAttr(bn_name + '_offset'),
moving_mean_name=bn_name + '_mean',
moving_variance_name=bn_name + '_variance', )
| 34.422222 | 74 | 0.639768 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 661 | 0.426727 |
141276ea795707d07f8dd66621b6cd1a2a1dac24 | 15,825 | py | Python | zpy/db/oracle.py | NoeCruzMW/zpy-flask-msc | 9c2fdcc7e7bdbe3eed4522bfc68afcc00ad5994a | [
"MIT"
] | null | null | null | zpy/db/oracle.py | NoeCruzMW/zpy-flask-msc | 9c2fdcc7e7bdbe3eed4522bfc68afcc00ad5994a | [
"MIT"
] | null | null | null | zpy/db/oracle.py | NoeCruzMW/zpy-flask-msc | 9c2fdcc7e7bdbe3eed4522bfc68afcc00ad5994a | [
"MIT"
] | null | null | null | from zpy.logger import TLogger, c_info
from zpy.db.utils import get_current_schema
from marshmallow_objects import models
from typing import Any, Dict, List, Optional, Union
from zpy.db import DBConnection
from zpy.utils.funcs import exec_ifnt_null, safely_exec
from enum import Enum
from marshmallow import Schema
import cx_Oracle
import json
import logging
# from . import T
__author__ = "Noé Cruz | contactozurckz@gmail.com"
__copyright__ = "Copyright 2021, Small APi Project"
__credits__ = ["Noé Cruz", "Zurck'z"]
__license__ = "MIT"
__version__ = "0.0.1"
__maintainer__ = "Noé Cruz"
__email__ = "contactozurckz@gmail.com"
__status__ = "Dev"
class OracleType(Enum):
cursor = cx_Oracle.CURSOR
number = cx_Oracle.NUMBER
string = cx_Oracle.STRING
integer = cx_Oracle.NUMBER
decimal = cx_Oracle.NUMBER
class OracleParam(Enum):
LIST_INTEGR = "LIST_INTEGER"
LIST_STR = "LIST_VARCHAR"
LIST_CLOB = "LIST_CLOB"
class ZParam:
def __init__(
self,
value: Union[List[int], List[float], List[str], List[Any]],
paramType: OracleParam,
key: str,
origin: str = None,
) -> None:
self.value = value
self.paramType = paramType
self.key = key
self.origin = origin
class IntList(ZParam):
def __init__(self, value: List[int], key: str, origin: str = None) -> None:
super().__init__(value, OracleParam.LIST_INTEGR, key, origin)
class StrList(ZParam):
def __init__(self, value: List[str], key: str, origin: str = None) -> None:
super().__init__(value, OracleParam.LIST_STR, key, origin)
class ClobList(ZParam):
def __init__(self, value: List[Any], key: str, origin: str) -> None:
super().__init__(value, OracleParam.LIST_CLOB, key, origin)
class OracleDB(DBConnection):
__local_client_initialized: bool = False
__local_client_path: str = None
__config_connection: dict = None
__connection = None
__is_connected: bool = False
__pool = None
__pool_created: bool = False
__schemas: List[Dict] = None
__env: str = None
__verbose: bool = False
# * Pool configurations
__max: int = 5
__min: int = 1
__threading: bool = False
__homogeneus: bool = True
def __init__(
self,
config: dict = None,
local_client_path: str = None,
schemas: List[Dict] = None,
env: str = None,
verbose: bool = False,
) -> None:
self.__local_client_path = local_client_path
self.__config_connection = config
self.__schemas = schemas
self.__env = env
self.__verbose = verbose
def init_local_client(self, path: str):
if self.__local_client_initialized:
return
value = path if self.__local_client_path is None else self.__local_client_path
try:
if value is None:
raise Exception("Local client path not provided.")
cx_Oracle.init_oracle_client(lib_dir=value)
self.__local_client_initialized = True
except Exception as e:
self.__local_client_initialized = False
logging.exception(e)
def __data_connection_checker(self, config: dict = None, mode="TSN") -> str:
values = (
config if self.__config_connection is None else self.__config_connection
)
if values is None:
raise Exception("The data for the connection was not provided")
server = values["DB_URI"]
port = values["DB_PORT"]
service = values["DB_SERVICE"]
user = values["DB_USER"]
password = values["DB_PASSWORD"]
if mode == "DSN":
return "{0}:{1}/{2}".format(server, port, service)
return "{0}/{1}@{2}:{3}/{4}".format(user, password, server, port, service)
def get_tsn_dsn_conenction(self, config: dict, mode="TSN") -> str:
return self.__data_connection_checker(config, mode)
def connect(self, config: dict = None):
"""
Start oracle connection
"""
if self.__is_connected:
return True
try:
str_connection = self.__data_connection_checker(config)
self.__connection = cx_Oracle.connect(str_connection)
self.__is_connected = True
return True
except Exception as e:
raise e
def close(self):
if self.__is_connected:
self.__connection.close()
self.__is_connected = False
self.is_connected = False
def is_connected(self) -> bool:
return self.__is_connected
def get_connection(self):
return self.__connection
def initialize_pool(
self,
dns=None,
homogeneous: bool = False,
max: int = 5,
user=None,
pwd=None,
min: int = 1,
threaded: bool = False,
) -> bool:
if self.__pool_created or dns is None:
return False
print("Initializing Pool")
self.__pool = cx_Oracle.SessionPool(
user=user,
password=pwd,
dsn=dns,
homogeneous=homogeneous,
encoding="UTF-8",
max=max,
min=min,
threaded=threaded,
)
self.__pool_created = True
self.__min = min
self.__max = max
self.__threading = threaded
self.__homogeneus = homogeneous
print("Pool Started Successfuly")
return True
def close_pool(self, force: bool = False):
try:
if self.__pool_created:
self.__pool.close(force=force)
self.__pool_created = False
print("Pool Closed Successfuly")
except Exception as e:
logging.exception(e)
def get_pool_connection(self, logger: TLogger = None):
if self.__pool_created:
return self.__pool.acquire()
exec_ifnt_null(
lambda l: l.add_info("DB POOL NOT INITILIZED, TRY INITIALIZE AGAIN"),
args=[logger],
)
try:
self.initialize_pool(
dns=self.get_tsn_dsn_conenction(self.__config_connection, "DSN"),
homogeneous=self.__homogeneus,
user=self.__config_connection["DB_USER"],
pwd=self.__config_connection["DB_PASSWORD"],
max=self.__max,
min=self.__min,
threaded=self.__threading,
)
self.__pool_created = True
return self.__pool.acquire()
except Exception as e:
logger.add_exception(e)
self.__pool_created = False
def release_connection(self, connection) -> bool:
try:
if self.__pool_created:
self.__pool.release(connection)
return True
except Exception as e:
print("CATCH EXCEPTION WHEN TRY RELEASE POOL CONNECTION")
logging.exception(e)
return False
def __proccess_result(self, result_set, type: OracleType, schema: Schema = None):
if type == OracleType.cursor:
columns = [field[0] for field in result_set.description]
if schema is None:
columns = [field[0] for field in result_set.description]
rows = [d for d in result_set]
data = [dict(zip(columns, row)) for row in rows]
for d in data:
for key, value in d.items():
if isinstance(d[key], cx_Oracle.LOB):
d[key] = json.loads(str(value))
return {"hasError": False, "data": json.dumps(data, default=str)}
else:
# [print(dict(zip(columns, r))) for r in result_set]
return [schema.load(dict(zip(columns, r))) for r in result_set]
elif OracleType.number:
try:
return float(result_set)
except:
return result_set
else:
return str(result_set)
def execute(
self,
function: str,
type: OracleType,
parameters: dict = None,
pool_connection=None,
):
"""
Execute or call oracle functions - FN v0.0.1 | Core v0.0.1
"""
if pool_connection is not None:
cursor = pool_connection.cursor()
else:
cursor = self.__connection.cursor()
if self.__verbose:
self.show_info(function, parameters, type, None, None, None)
try:
db_execute = (
cursor.callfunc(function, type.value, keywordParameters=parameters)
if parameters != None
else cursor.callfunc(function, type.value)
)
if type == OracleType.cursor:
columns = [field[0] for field in db_execute.description]
rows = [d for d in db_execute]
data = [dict(zip(columns, row)) for row in rows]
for d in data:
for key, value in d.items():
if isinstance(d[key], cx_Oracle.LOB):
d[key] = json.loads(str(value))
db_dto = {"hasError": False, "data": json.dumps(data, default=str)}
elif OracleType.number:
db_dto = {"hasError": False, "data": str(db_execute)}
else:
db_dto = {"hasError": False, "data": db_execute}
except Exception as e:
db_dto = {"hasError": True, "data": str(e)}
safely_exec(lambda c: c.close(), args=[cursor]) # * Close cursor
return db_dto
def call(
self,
fn: str,
type: OracleType,
params: dict,
schema: Schema = None,
pool_connection=None,
):
"""
Execute or call oracle functions - FN v0.0.1 | Core v0.0.2
"""
if pool_connection is not None:
cursor = pool_connection.cursor()
else:
cursor = self.__connection.cursor()
if self.__verbose:
self.show_info(fn, params, type, schema, None, None)
result_set = (
cursor.callfunc(fn, type.value, keywordParameters=params)
if params != None
else cursor.callfunc(fn, type.value)
)
safely_exec(lambda c: c.close(), args=[cursor]) # * Close cursor
return self.__proccess_result(result_set, type, schema)
def exec(
self,
fn: str,
ret_type: OracleType,
params: Optional[Dict] = None,
custom_params: Optional[List[ZParam]] = None,
model: Optional[models.Model] = None,
connection=None,
db_schema: str = None,
env: str = None,
):
"""
Execute or call oracle functions - FN v0.0.1 | Core v0.0.7
New feature for call oracle db functions
Use this function instead function 'call'
Parameters
----------
fn : str | required
Function name with package name: PO_LINES_PKG.FN_GET_LINE
ret_type : OracleType | required
The return type of oracle db function
params : Dict | Optional
Set parameter that the oracle funtion expects
custom_params : Optional[List[ZParam, IntList, StrList, ClobList]] | Optional
Custom Set parameter that the oracle funtion expects, see avaialble custom types
model : marshmallow_objects.models.Model | Optional
Model specification where the db data will be volcated
connection : DB Connection | Optional
The db connection object, if it is not passed by params, it tries to get a global instance
Raises
------
NotValueProvided
Connection
Returns
-------
result set : Union[List[Model],int,float,str]
The result set of oracle db function
"""
cursor = None
if connection is not None:
cursor = connection.cursor()
else:
cursor = self.__connection.cursor()
if connection is None:
raise Exception("Can't get db connection")
if db_schema is None and self.__schemas is not None:
db_schema = get_current_schema(self.__schemas, env, self.__env)
if custom_params != None and len(custom_params) > 0:
if params == None:
params = {}
# * Find the current env for extract the schema
for custom in custom_params:
params[custom.key] = self.__custom_param(
connection,
paramType=custom.paramType,
value=custom.value,
schema=db_schema,
)
fn = (
fn
if db_schema is None or db_schema.replace(" ", "") == ""
else f"{db_schema}.{fn}"
)
if self.__verbose:
self.show_info(fn, params, ret_type, model, db_schema, env)
result_set = (
cursor.callfunc(fn, ret_type.value, keywordParameters=params)
if params != None
else cursor.callfunc(fn, ret_type.value)
)
safely_exec(lambda c: c.close(), args=[cursor])
return self.__proccess_result_set(result_set, ret_type, model)
def __proccess_result_set(
self, result_set, ret_type: OracleType, model: models.Model = None
):
"""
New version of result set processor
"""
if ret_type == OracleType.cursor:
columns = [field[0] for field in result_set.description]
if model is None:
columns = [field[0] for field in result_set.description]
rows = [d for d in result_set]
data = [dict(zip(columns, row)) for row in rows]
for d in data:
for key, value in d.items():
if isinstance(d[key], cx_Oracle.LOB):
d[key] = json.loads(str(value))
return {"data": json.dumps(data, default=str)}
else:
return [model(**dict(zip(columns, r))) for r in result_set]
elif OracleType.number:
try:
return float(result_set)
except:
return result_set
elif OracleType.integer:
try:
return int(result_set)
except:
return result_set
elif OracleType.decimal:
try:
return float(result_set)
except:
return result_set
else:
return str(result_set)
def __custom_param(
self,
connection: Any,
paramType: OracleParam,
value: Union[List[int], List[float], List[str], List[Any]],
schema: str = None,
):
"""
Make custom param
"""
db_schema = (
"" if (schema is None or schema.replace(" ", "") == "") else f"{schema}."
)
list_type = connection.gettype(f"{db_schema}{paramType.value}")
return list_type.newobject(value)
def show_info(self, fn, params, ret_type, v_model, curr_schema, l_env):
c_info("\n|-------------------------------------------------|\n", True)
c_info(f" Function Called: {fn} ", True)
c_info(" Params: {}".format(params), True)
c_info(" Return Type: {}".format(ret_type.value), True)
c_info(f" Ref Volcated Model: {v_model}", True)
c_info(f" DB Schema: {curr_schema}", True)
c_info(f" Environment: P: {l_env} G: {self.__env}", True)
c_info("\n|-------------------------------------------------|\n", True)
| 33.814103 | 102 | 0.561769 | 15,155 | 0.95748 | 0 | 0 | 0 | 0 | 0 | 0 | 2,725 | 0.172163 |
14144f1b8badd85eb67ed4d9f4ba6dac819651a5 | 2,469 | py | Python | sol/frag_sol.py | hebrewsnabla/pyAutoMR | 8e81ed7fd780abd94f8b51e48ee4b980a868c204 | [
"Apache-2.0"
] | 5 | 2021-06-03T07:49:02.000Z | 2022-02-21T11:35:20.000Z | sol/frag_sol.py | hebrewsnabla/pyAutoMR | 8e81ed7fd780abd94f8b51e48ee4b980a868c204 | [
"Apache-2.0"
] | 2 | 2022-01-20T08:33:59.000Z | 2022-03-26T12:21:15.000Z | sol/frag_sol.py | hebrewsnabla/pyAutoMR | 8e81ed7fd780abd94f8b51e48ee4b980a868c204 | [
"Apache-2.0"
] | 1 | 2022-02-21T11:35:34.000Z | 2022-02-21T11:35:34.000Z | from pyscf import gto
import radii
def from_frag(xyz, frags, chgs, spins, gjfhead='', scrfhead='', gjfname='', basis=None, wfnpath=None):
# mol = gto.Mole()
# mol.atom = xyz
# mol.basis = bas
# mol.verbose = 1
# mol.build()
#
if isinstance(frags[0], str):
frags = str2list(frags)
guess_frag(xyz, frags, chgs, spins, gjfhead.lstrip('\n'), scrfhead, gjfname, basis, wfnpath)
def spin_p2g(spin):
if spin >= 0:
spin = spin + 1
elif spin < 0:
spin = spin - 1
return spin
def str2list(frags):
flist = []
for frag in frags:
alist = []
for s in frag.split(','):
if '-' in s:
start = int(s.split('-')[0])
end = int(s.split('-')[1])
else:
start = int(s)
end = int(s)
alist += range(start, end+1)
flist.append(alist)
return flist
def guess_frag(xyz, frags, chgs, spins, gjfhead, scrfhead, gjfname, basis, wfnpath):
'''
frags: e.g. [[1], [2]] for N2
chgs: e.g. [0, 0] for N2
spins: e.g. [3, -3] for N2
'''
#mol.build()
print('**** generating fragments ****')
atom = gto.format_atom(xyz, unit=1)
#print(atom)
#fraga, fragb = frags
#chga, chgb = chgs
#spina, spinb = spins
allatom = range(1,len(atom)+1)
for k in range(len(frags)):
frag = frags[k]
chg = chgs[k]
spin = spins[k]
g_spin = spin_p2g(spin)
atomk = [atom[i-1] for i in frag]
atomother = [atom[i-1] for i in allatom if i not in frag]
print('fragment %d, chg %d, spin %d' % (k, chg, spin))
#print(atomk)
with open(gjfname+'%d.gjf'%k, 'w') as f:
f.write(gjfhead)
f.write('%d %d\n' % (chg, g_spin))
for a in atomk:
f.write('%s %10.5f %10.5f %10.5f\n' % (a[0], a[1][0], a[1][1], a[1][2]))
#f.write('\n')
if basis is not None:
f.write(basis)
#f.write('\n')
f.write(scrfhead)
f.write('ExtraSph=%d\n\n' % len(atomother))
for b in atomother:
rad = radii.uff_radii[b[0]] / 2.0
f.write(' %10.5f %10.5f %10.5f %10.5f\n' % (b[1][0], b[1][1], b[1][2], rad))
f.write('\n')
if wfnpath is not None:
f.write(wfnpath + '%d.wfn'%k + '\n')
f.write('\n')
| 30.109756 | 102 | 0.475901 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 530 | 0.214662 |
141678d1dfdf2f7da83acc70199631b909f224bc | 7,485 | py | Python | SITE/main_file.py | S-Thaler/SITE | 3a055ec70eaed2c8701857dc36d279bc7bc987f6 | [
"MIT"
] | 5 | 2019-11-20T22:24:54.000Z | 2022-03-30T18:29:36.000Z | SITE/main_file.py | S-Thaler/SITE | 3a055ec70eaed2c8701857dc36d279bc7bc987f6 | [
"MIT"
] | null | null | null | SITE/main_file.py | S-Thaler/SITE | 3a055ec70eaed2c8701857dc36d279bc7bc987f6 | [
"MIT"
] | 1 | 2019-06-05T22:24:35.000Z | 2019-06-05T22:24:35.000Z | """
File to control the parameters of the SITE approach and to specify the postprocessing functionality.
The parameters for each equation are the ones used in the paper. All results of the paper
'Sparse Identification of Trunction Errors' of Thaler, Paehler and Adams, 2019 can be replicated only be
setting the appropriate parameters in this control file. The exceptions are the method of manufactured solutions
in the file 'ManufacturedSolutions', the derivations of the analytic modified differential equations (MDEs) in
the respective files and a few plots are generated in 'Postprocessing_Util'. For an understanding of the parameters
below in this file, we assume knowledge from the preprint of our paper.
"""
import SITE
import Postprocessing_Util
if __name__ == '__main__':
# ########################### User input ##########################################################
# discretization parameters:
equation = 'Advection' # other choices: 'Burgers' ; 'KdV'
a = None # initialize, such that input exists for Burgers, KdV, will be overwritten in Advection case
x_min = 0. # calculation domain Omega = [x_min, x_max]
x_max = 1.
# define discretization parameters and library design for each equation separately
if equation == 'Advection':
a = 1.
x_nodes_list = [300] # default choice
# x_nodes_list = [200, 300, 400, 500] # calculate term orders
# to calculate resolution properties:
# x_nodes_list = [50, 100, 150, 200, 250, 300, 350, 400, 450, 500, 550, 600, 650, 700, 750, 800, 900, 1000]
t_steps = 17 # 12 steps are padded
cfl = 0.01
# library parameters:
D = 6 # highest derivative to be included in library
P = 6 # highest polynomial order to multiply u with derivative basis functions
# cumulative orders to include in the library; for definition see paper or 'findCombinations' in PDE_FIND_lib
combinations = [1, 2, 3, 4, 5, 6] # large library
# combinations = None # small library
hotime = 0 # no higher order time drivatives
elif equation == 'Burgers':
x_nodes_list = [10000] # default choice
# x_nodes_list = [6000, 8000, 10000, 12000] # calculate term orders
# to calculate resolution properties:
# x_nodes_list = [1000, 2000, 4000, 6000, 8000, 10000, 12000, 14000, 16000, 18000, 20000, 22000, 25000]
t_steps = 17
cfl = 0.5
# library parameters:
D = 3 # highest derivative to be included in library
P = 3 # highest polynomial order to multiply u with derivative basis functions
# cumulative orders to include in the library; for definition see paper or 'findCombinations' in PDE_FIND_lib
combinations = [1, 2, 3]
hotime = 0 # no higher order time drivatives
elif equation == 'KdV':
x_nodes_list = [100] # default choice
# x_nodes_list = [87, 100, 112, 125] # calculate term orders
# to calculate resolution properties:
# x_nodes_list = [50, 60, 75, 87, 100, 110, 125, 135, 150, 175]
t_steps = 19 # 14 are padded
cfl = 1.e-6
# library parameters:
D = 7 # highest derivative to be included in library
P = 5 # highest polynomial order to multiply u with derivative basis functions
# cumulative orders to include in the library; for definition see paper or 'findCombinations' in PDE_FIND_lib
combinations = [2, 3]
pairwise = 0
hotime = 3 # higher order time derivatives appended up to 3rd order
else: raise Exception('Equation not implemented! (or typo)')
# accuracy orders of finite difference stencils used in PDE-FIND to build the library Theta(u) and u_t:
acc_time = 8
acc_space = 8
# spline parameters:
# n_ctr: number of NURBS control points within Omega
n_ctr_train = 15 # needs to be larger than curve_degree + 3 to be able to enforce periodicity
n_ctr_test = 11 # needs to be larger than curve_degree + 3 to be able to enforce periodicity
curve_degree = 8 # degree of NURBS; should be high enough to ensure high order differentiability
# number of points of spline per grid node used to interpolate spline values on grid points
eval_points_per_node = 30
# spline optimization parameters:
bound_amplitude = 1. # maximum allowed y-value of each control point
particles = 50 # number of particles for particle swarm optimization
iterations = 100 # number of iterations for particle swarm optimization
# default particle swarm parameters (see documentation of pyswarms for its definitions):
c1 = 0.5
c2 = 0.3
w_pso = 0.9
# Preconditioner choices:
# 'norm_precondition': scale the system matrix and apply a puffer transformation afterwards
# 'norm': only scale the system matrix (robust default)
# 'precondition': applies puffer transform without scaling first (depreciated)
# None: use system matrix as obtained from PDE-FIND (depreciated)
preconditioner = 'norm'
# Initial condition choices:
use_spline = True # if True uses spline initialization; else the Gauss initial condition is used
optimize_spline = False # if True re-runs the particle swarm optimization of the spline; else loads saved spline
# Specify which functionality to be used; setting both true does not make a lot of sense:
# comparison of sparse regression algorithms for given preconditioner and discretization parameters
comparison_sparse_regression_algorithms = False
# study of resolution dependency for given preconditioner and sparse regression algorithm 'BIC_algo'
plot_resolution_dependency = False
BIC_algo = 'FoBa' # sparse regression algorithm for resolution dependency and BIC model selection
# whether to calculate the term orders:
# the function assumes all models from the optimal choice to have the same non-zero parameters
calculate_term_orders = False
# ############################### End user input #######################################################
# Runs SITE for given Parameters:
# list initializations for evaluation of resolution properties
best_model_list = []
BIC_list = []
for x_nodes in x_nodes_list:
BIC_model, best_model, rhs_description = SITE.site(
equation, x_nodes, t_steps, D, P, combinations, optimize_spline=optimize_spline, x_min=x_min,
x_max=x_max, acc_space=acc_space, acc_time=acc_space, preconditioner=preconditioner, a=a, cfl=cfl,
n_ctr_train=n_ctr_train, n_ctr_test=n_ctr_test, curve_degree=curve_degree,
eval_points_per_node=eval_points_per_node, bound_amplitude=bound_amplitude, particles=particles,
iterations=iterations, c1=c1, c2=c2, w_pso=w_pso,
comparison_sparse_regression_algorithms=comparison_sparse_regression_algorithms,
use_spline=use_spline, hotime=hotime, BIC_algo=BIC_algo)
# save BIC choice and optimal choice to evaluate resolution properties
BIC_list.append(BIC_model)
best_model_list.append(best_model)
# Postprocessing
if calculate_term_orders:
Postprocessing_Util.calculate_orders(best_model_list, x_nodes_list)
if plot_resolution_dependency:
Postprocessing_Util.plot_resolution(best_model_list, BIC_list, x_nodes_list, equation, preconditioner, t_steps)
| 51.267123 | 119 | 0.689512 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,851 | 0.648096 |
1416acb60382daea27591407e8abb38bce48a012 | 1,347 | py | Python | skin_detector/scripts.py | version0chiro/xilinx_Code | ae8a274d2f0c44d591ef4dd1d3a677556b31de41 | [
"Apache-2.0"
] | 154 | 2015-04-20T08:17:45.000Z | 2022-03-10T20:22:44.000Z | skin_detector/scripts.py | version0chiro/xilinx_Code | ae8a274d2f0c44d591ef4dd1d3a677556b31de41 | [
"Apache-2.0"
] | 10 | 2015-07-30T01:21:11.000Z | 2020-06-22T19:40:23.000Z | skin_detector/scripts.py | version0chiro/xilinx_Code | ae8a274d2f0c44d591ef4dd1d3a677556b31de41 | [
"Apache-2.0"
] | 47 | 2016-02-24T12:23:28.000Z | 2022-01-25T12:27:16.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Will Brennan'
import os
import cv2
import numpy
def find_images(path, recursive=False, ignore=True):
if os.path.isfile(path):
yield path
elif os.path.isdir(path):
assert os.path.isdir(path), 'FileIO - get_images: Directory does not exist'
assert isinstance(recursive, bool), 'FileIO - get_images: recursive must be a boolean variable'
ext, result = ['png', 'jpg', 'jpeg'], []
for path_a in os.listdir(path):
path_a = path + '/' + path_a
if os.path.isdir(path_a) and recursive:
for path_b in find_images(path_a):
yield path_b
check_a = path_a.split('.')[-1] in ext
check_b = ignore or ('-' not in path_a.split('/')[-1])
if check_a and check_b:
yield path_a
else:
raise ValueError('error! path is not a valid path or directory')
def display(title, img, max_size=200000):
assert isinstance(img, numpy.ndarray), 'img must be a numpy array'
assert isinstance(title, str), 'title must be a string'
scale = numpy.sqrt(min(1.0, float(max_size) / (img.shape[0] * img.shape[1])))
shape = (int(scale * img.shape[1]), int(scale * img.shape[0]))
img = cv2.resize(img, shape)
cv2.imshow(title, img)
| 35.447368 | 103 | 0.605791 | 0 | 0 | 850 | 0.631032 | 0 | 0 | 0 | 0 | 289 | 0.214551 |
1416c17873c1a9581d387ea49ff4f96596f2b2dc | 955 | py | Python | test/test_1.py | rgooler/AOC2019 | f761881240a5fe8711f730887f0f5033ea287e3d | [
"Apache-2.0"
] | null | null | null | test/test_1.py | rgooler/AOC2019 | f761881240a5fe8711f730887f0f5033ea287e3d | [
"Apache-2.0"
] | null | null | null | test/test_1.py | rgooler/AOC2019 | f761881240a5fe8711f730887f0f5033ea287e3d | [
"Apache-2.0"
] | null | null | null | from aoc2019 import *
import unittest
class Day1(unittest.TestCase):
def test_mass_12(self):
chall = Rocket()
self.assertEqual(chall.calc_fuel_weight(12), 2)
def test_mass_14(self):
chall = Rocket()
self.assertEqual(chall.calc_fuel_weight(14), 2)
def test_mass_1969(self):
chall = Rocket()
self.assertEqual(chall.calc_fuel_weight(1969), 654)
def test_mass_100756(self):
chall = Rocket()
self.assertEqual(chall.calc_fuel_weight(100756), 33583)
def test_mass2_12(self):
chall = Rocket()
self.assertEqual(chall.calc_fuel_weight_recursive(12), 2)
def test_mass2_1969(self):
chall = Rocket()
self.assertEqual(chall.calc_fuel_weight_recursive(1969), 966)
def test_mass2_100756(self):
chall = Rocket()
self.assertEqual(chall.calc_fuel_weight_recursive(100756), 50346)
if __name__ == '__main__':
unittest.main() | 28.088235 | 73 | 0.668063 | 868 | 0.908901 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.010471 |
14184fc039d4f10e48794f5f948294e438d060ed | 1,397 | py | Python | xdtools/artwork/compound.py | tjcjc/xdtools | fbfa0b9ea52ab76594adea8c25157dc7b5360ed0 | [
"MIT"
] | 43 | 2018-01-19T22:14:37.000Z | 2022-01-10T22:53:17.000Z | xdtools/artwork/compound.py | tjcjc/xdtools | fbfa0b9ea52ab76594adea8c25157dc7b5360ed0 | [
"MIT"
] | 3 | 2018-01-03T03:26:11.000Z | 2021-12-31T17:21:14.000Z | xdtools/artwork/compound.py | tjcjc/xdtools | fbfa0b9ea52ab76594adea8c25157dc7b5360ed0 | [
"MIT"
] | 9 | 2019-08-01T14:02:08.000Z | 2021-11-30T16:23:02.000Z | """
Contains the definition of Compound.
"""
from xdtools.artwork import Artwork
from xdtools.utils import Point
class Compound(Artwork):
"""
A compound shape.
=== Attributes ===
uid - the unique id of this Compound shape.
name - the name of this Compound shape as it appears in the Layers panel.
position - the position of this Compound shape.
path - the path of this Compound shape.
children - the children contained in this Compound shape.
operation - the operation performed on the paths of this Compound shape.
=== Operations ===
"""
def __init__(self, uid: int, path: str, operation: str, children=None,
name='Compound', x=0, y=0) -> None:
"""Instantiate a new Compound."""
super().__init__(uid, 'compound', name)
self.path = path
self.operation = operation
self.children = [] if children is None else children
self.position = Point(x, y)
def __repr__(self) -> str:
"""Return a constructor-style representation of this Compound."""
return str.format(
"Compound(uid={}, type={}, path={}, operation={}, " +
"children={}, name={}, position={}, styles={})",
repr(self.uid), repr(self.type), repr(self.path), repr(self.operation),
repr(self.children), repr(self.name),repr(self.position), repr(self.styles))
| 34.925 | 88 | 0.623479 | 1,280 | 0.916249 | 0 | 0 | 0 | 0 | 0 | 0 | 702 | 0.502505 |
14195614c335ed63b791f528ce5c516b74a89827 | 9,084 | py | Python | appGUI/preferences/tools/ToolsCutoutPrefGroupUI.py | DannyPol/flatcam | 25a8634d0658e98b7fae31a095f8bef40c1b3067 | [
"MIT"
] | 1 | 2022-02-11T06:19:34.000Z | 2022-02-11T06:19:34.000Z | appGUI/preferences/tools/ToolsCutoutPrefGroupUI.py | MRemy2/FlatCam | d4f941335ca8a8d5351aab23b396f99da06a9029 | [
"MIT"
] | null | null | null | appGUI/preferences/tools/ToolsCutoutPrefGroupUI.py | MRemy2/FlatCam | d4f941335ca8a8d5351aab23b396f99da06a9029 | [
"MIT"
] | null | null | null | from PyQt5 import QtWidgets
from PyQt5.QtCore import QSettings
from appGUI.GUIElements import FCDoubleSpinner, FCCheckBox, RadioSet, FCComboBox, FCLabel
from appGUI.preferences import machinist_setting
from appGUI.preferences.OptionsGroupUI import OptionsGroupUI
import gettext
import appTranslation as fcTranslate
import builtins
fcTranslate.apply_language('strings')
if '_' not in builtins.__dict__:
_ = gettext.gettext
settings = QSettings("Open Source", "FlatCAM")
if settings.contains("machinist"):
machinist_setting = settings.value('machinist', type=int)
else:
machinist_setting = 0
class ToolsCutoutPrefGroupUI(OptionsGroupUI):
def __init__(self, decimals=4, parent=None):
# OptionsGroupUI.__init__(self, "Cutout Tool Options", parent=parent)
super(ToolsCutoutPrefGroupUI, self).__init__(self, parent=parent)
self.setTitle(str(_("Cutout Tool Options")))
self.decimals = decimals
# ## Board cutout
self.board_cutout_label = FCLabel("<b>%s:</b>" % _("Parameters"))
self.board_cutout_label.setToolTip(
_("Create toolpaths to cut around\n"
"the PCB and separate it from\n"
"the original board.")
)
self.layout.addWidget(self.board_cutout_label)
grid0 = QtWidgets.QGridLayout()
self.layout.addLayout(grid0)
tdclabel = FCLabel('%s:' % _('Tool Diameter'))
tdclabel.setToolTip(
_("Diameter of the tool used to cutout\n"
"the PCB shape out of the surrounding material.")
)
self.cutout_tooldia_entry = FCDoubleSpinner()
self.cutout_tooldia_entry.set_range(0.000001, 10000.0000)
self.cutout_tooldia_entry.set_precision(self.decimals)
self.cutout_tooldia_entry.setSingleStep(0.1)
grid0.addWidget(tdclabel, 0, 0)
grid0.addWidget(self.cutout_tooldia_entry, 0, 1)
# Cut Z
cutzlabel = FCLabel('%s:' % _('Cut Z'))
cutzlabel.setToolTip(
_(
"Cutting depth (negative)\n"
"below the copper surface."
)
)
self.cutz_entry = FCDoubleSpinner()
self.cutz_entry.set_precision(self.decimals)
if machinist_setting == 0:
self.cutz_entry.setRange(-10000.0000, 0.0000)
else:
self.cutz_entry.setRange(-10000.0000, 10000.0000)
self.cutz_entry.setSingleStep(0.1)
grid0.addWidget(cutzlabel, 1, 0)
grid0.addWidget(self.cutz_entry, 1, 1)
# Multi-pass
self.mpass_cb = FCCheckBox('%s:' % _("Multi-Depth"))
self.mpass_cb.setToolTip(
_(
"Use multiple passes to limit\n"
"the cut depth in each pass. Will\n"
"cut multiple times until Cut Z is\n"
"reached."
)
)
self.maxdepth_entry = FCDoubleSpinner()
self.maxdepth_entry.set_precision(self.decimals)
self.maxdepth_entry.setRange(0, 10000.0000)
self.maxdepth_entry.setSingleStep(0.1)
self.maxdepth_entry.setToolTip(_("Depth of each pass (positive)."))
grid0.addWidget(self.mpass_cb, 2, 0)
grid0.addWidget(self.maxdepth_entry, 2, 1)
# Object kind
kindlabel = FCLabel('%s:' % _('Kind'))
kindlabel.setToolTip(
_("Choice of what kind the object we want to cutout is.\n"
"- Single: contain a single PCB Gerber outline object.\n"
"- Panel: a panel PCB Gerber object, which is made\n"
"out of many individual PCB outlines.")
)
self.obj_kind_combo = RadioSet([
{"label": _("Single"), "value": "single"},
{"label": _("Panel"), "value": "panel"},
])
grid0.addWidget(kindlabel, 3, 0)
grid0.addWidget(self.obj_kind_combo, 3, 1)
marginlabel = FCLabel('%s:' % _('Margin'))
marginlabel.setToolTip(
_("Margin over bounds. A positive value here\n"
"will make the cutout of the PCB further from\n"
"the actual PCB border")
)
self.cutout_margin_entry = FCDoubleSpinner()
self.cutout_margin_entry.set_range(-10000.0000, 10000.0000)
self.cutout_margin_entry.set_precision(self.decimals)
self.cutout_margin_entry.setSingleStep(0.1)
grid0.addWidget(marginlabel, 4, 0)
grid0.addWidget(self.cutout_margin_entry, 4, 1)
# Gap Size
gaplabel = FCLabel('%s:' % _('Gap size'))
gaplabel.setToolTip(
_("The size of the bridge gaps in the cutout\n"
"used to keep the board connected to\n"
"the surrounding material (the one \n"
"from which the PCB is cutout).")
)
self.cutout_gap_entry = FCDoubleSpinner()
self.cutout_gap_entry.set_range(0.000001, 10000.0000)
self.cutout_gap_entry.set_precision(self.decimals)
self.cutout_gap_entry.setSingleStep(0.1)
grid0.addWidget(gaplabel, 5, 0)
grid0.addWidget(self.cutout_gap_entry, 5, 1)
# Gap Type
self.gaptype_label = FCLabel('%s:' % _("Gap type"))
self.gaptype_label.setToolTip(
_("The type of gap:\n"
"- Bridge -> the cutout will be interrupted by bridges\n"
"- Thin -> same as 'bridge' but it will be thinner by partially milling the gap\n"
"- M-Bites -> 'Mouse Bites' - same as 'bridge' but covered with drill holes")
)
self.gaptype_radio = RadioSet(
[
{'label': _('Bridge'), 'value': 'b'},
{'label': _('Thin'), 'value': 'bt'},
{'label': "M-Bites", 'value': 'mb'}
],
stretch=True
)
grid0.addWidget(self.gaptype_label, 7, 0)
grid0.addWidget(self.gaptype_radio, 7, 1)
# Thin gaps Depth
self.thin_depth_label = FCLabel('%s:' % _("Depth"))
self.thin_depth_label.setToolTip(
_("The depth until the milling is done\n"
"in order to thin the gaps.")
)
self.thin_depth_entry = FCDoubleSpinner()
self.thin_depth_entry.set_precision(self.decimals)
if machinist_setting == 0:
self.thin_depth_entry.setRange(-10000.0000, -0.00001)
else:
self.thin_depth_entry.setRange(-10000.0000, 10000.0000)
self.thin_depth_entry.setSingleStep(0.1)
grid0.addWidget(self.thin_depth_label, 9, 0)
grid0.addWidget(self.thin_depth_entry, 9, 1)
# Mouse Bites Tool Diameter
self.mb_dia_label = FCLabel('%s:' % _("Tool Diameter"))
self.mb_dia_label.setToolTip(
_("The drill hole diameter when doing mouse bites.")
)
self.mb_dia_entry = FCDoubleSpinner()
self.mb_dia_entry.set_precision(self.decimals)
self.mb_dia_entry.setRange(0, 100.0000)
grid0.addWidget(self.mb_dia_label, 11, 0)
grid0.addWidget(self.mb_dia_entry, 11, 1)
# Mouse Bites Holes Spacing
self.mb_spacing_label = FCLabel('%s:' % _("Spacing"))
self.mb_spacing_label.setToolTip(
_("The spacing between drill holes when doing mouse bites.")
)
self.mb_spacing_entry = FCDoubleSpinner()
self.mb_spacing_entry.set_precision(self.decimals)
self.mb_spacing_entry.setRange(0, 100.0000)
grid0.addWidget(self.mb_spacing_label, 13, 0)
grid0.addWidget(self.mb_spacing_entry, 13, 1)
gaps_label = FCLabel('%s:' % _('Gaps'))
gaps_label.setToolTip(
_("Number of gaps used for the cutout.\n"
"There can be maximum 8 bridges/gaps.\n"
"The choices are:\n"
"- None - no gaps\n"
"- lr - left + right\n"
"- tb - top + bottom\n"
"- 4 - left + right +top + bottom\n"
"- 2lr - 2*left + 2*right\n"
"- 2tb - 2*top + 2*bottom\n"
"- 8 - 2*left + 2*right +2*top + 2*bottom")
)
self.gaps_combo = FCComboBox()
grid0.addWidget(gaps_label, 15, 0)
grid0.addWidget(self.gaps_combo, 15, 1)
gaps_items = ['None', 'LR', 'TB', '4', '2LR', '2TB', '8']
for it in gaps_items:
self.gaps_combo.addItem(it)
# self.gaps_combo.setStyleSheet('background-color: rgb(255,255,255)')
# Surrounding convex box shape
self.convex_box = FCCheckBox('%s' % _("Convex Shape"))
self.convex_box.setToolTip(
_("Create a convex shape surrounding the entire PCB.\n"
"Used only if the source object type is Gerber.")
)
grid0.addWidget(self.convex_box, 17, 0, 1, 2)
self.big_cursor_cb = FCCheckBox('%s' % _("Big cursor"))
self.big_cursor_cb.setToolTip(
_("Use a big cursor when adding manual gaps."))
grid0.addWidget(self.big_cursor_cb, 19, 0, 1, 2)
self.layout.addStretch()
| 36.926829 | 96 | 0.593901 | 8,475 | 0.932959 | 0 | 0 | 0 | 0 | 0 | 0 | 2,487 | 0.273778 |
14198076681f0298be4f5ae40bc2ae194fb807eb | 1,094 | py | Python | venv/lib/python2.7/site-packages/SimpleGUICS2Pygame/test/test_sound.py | CrystalCodes01/python-pong | 3d812f53928ecfb791fa5ab47585f02ac68ec2a0 | [
"BSD-3-Clause"
] | null | null | null | venv/lib/python2.7/site-packages/SimpleGUICS2Pygame/test/test_sound.py | CrystalCodes01/python-pong | 3d812f53928ecfb791fa5ab47585f02ac68ec2a0 | [
"BSD-3-Clause"
] | null | null | null | venv/lib/python2.7/site-packages/SimpleGUICS2Pygame/test/test_sound.py | CrystalCodes01/python-pong | 3d812f53928ecfb791fa5ab47585f02ac68ec2a0 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: latin-1 -*-
"""
Test play sounds. (January 1st, 2015)
Piece of SimpleGUICS2Pygame.
https://bitbucket.org/OPiMedia/simpleguics2pygame
GPLv3 --- Copyright (C) 2015 Olivier Pirson
http://www.opimedia.be/
"""
import time
try:
import simplegui
SIMPLEGUICS2PYGAME = False
except ImportError:
import SimpleGUICS2Pygame.simpleguics2pygame as simplegui
SIMPLEGUICS2PYGAME = True
TEST = 'test sound'
sound_jump = simplegui.Sound('http://commondatastorage.googleapis.com/codeskulptor-assets/jump.ogg')
if SIMPLEGUICS2PYGAME:
local_sound_chirp = simplegui._LocalSound('_snd/chirp_1s.wav')
def wait(seconds):
"""
Wait during `seconds` seconds.
:param seconds: (int or float) >= 0
"""
assert isinstance(seconds, int) or isinstance(seconds, float), \
type(seconds)
start = time.time()
while time.time() - start < 1:
pass
# Main
wait(1)
print('Play "jump.ogg"')
sound_jump.play()
wait(1)
if SIMPLEGUICS2PYGAME:
print('Play local "chirp_1s.wav"')
local_sound_chirp.play()
wait(1)
| 18.542373 | 100 | 0.691042 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 478 | 0.436929 |
141b52bbd8190072c77e920996e410dd59fb3378 | 2,270 | py | Python | oslo_config/_list_opts.py | CyrilRoelandteNovance/oslo.config | 7e202148022a289d934bb27b3397ca5b49b52a96 | [
"Apache-1.1"
] | 110 | 2015-01-29T20:10:46.000Z | 2022-03-21T12:29:21.000Z | oslo_config/_list_opts.py | CyrilRoelandteNovance/oslo.config | 7e202148022a289d934bb27b3397ca5b49b52a96 | [
"Apache-1.1"
] | 1 | 2019-03-16T18:35:42.000Z | 2019-03-16T19:40:14.000Z | oslo_config/_list_opts.py | CyrilRoelandteNovance/oslo.config | 7e202148022a289d934bb27b3397ca5b49b52a96 | [
"Apache-1.1"
] | 115 | 2015-01-14T03:25:05.000Z | 2021-12-02T16:49:06.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import inspect
from oslo_config import cfg
import stevedore
def list_opts():
default_config_files = [
'~/.project/project.conf',
'~/project.conf',
'/etc/project/project.conf',
'/etc/project.conf',
]
default_config_dirs = [
'~/.project/project.conf.d/',
'~/project.conf.d/',
'/etc/project/project.conf.d/',
'/etc/project.conf.d/',
]
options = [(None, cfg.ConfigOpts._list_options_for_discovery(
default_config_files,
default_config_dirs,
))]
ext_mgr = stevedore.ExtensionManager(
"oslo.config.driver",
invoke_on_load=True)
source_names = ext_mgr.names()
for source_name in source_names:
source = ext_mgr[source_name].obj
source_options = copy.deepcopy(source.list_options_for_discovery())
source_description = inspect.getdoc(source)
source_options.insert(
0,
cfg.StrOpt(
name='driver',
sample_default=source_name,
help=cfg._SOURCE_DRIVER_OPTION_HELP,
)
)
group_name = 'sample_{}_source'.format(source_name)
group_help = 'Example of using a {} source'.format(source_name)
if source_description:
group_help = '{}\n\n{}: {}'.format(
group_help,
source_name,
source_description,
)
group = cfg.OptGroup(
name=group_name,
help=group_help,
driver_option='driver',
dynamic_group_owner='config_source',
)
options.append((group, source_options))
return options
| 31.09589 | 78 | 0.614537 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 860 | 0.378855 |
141b77a16ef75fb27fd558b08965a19174a77d27 | 2,202 | py | Python | dialogue/moviebot_data/batch.py | waynewu6250/ChatBoxer | ae73604d4778b3b5223049e73e696ad66239c0ff | [
"MIT"
] | 7 | 2019-04-18T14:40:37.000Z | 2021-05-11T08:36:21.000Z | dialogue/moviebot_data/batch.py | waynewu6250/ChatBoxer | ae73604d4778b3b5223049e73e696ad66239c0ff | [
"MIT"
] | 6 | 2020-06-05T20:20:50.000Z | 2021-06-10T17:48:56.000Z | dialogue/moviebot_data/batch.py | waynewu6250/ChatBoxer | ae73604d4778b3b5223049e73e696ad66239c0ff | [
"MIT"
] | 2 | 2019-07-26T06:07:00.000Z | 2020-06-25T17:34:47.000Z | import torch as t
from torch.autograd import Variable
import numpy as np
import re
import pickle
import h5py
from .load import Data
from .load_chinese import ChineseData
class TrainData:
def __init__(self, data_path, conversation_path, results_path, chinese, prev_sent=2, load=True):
if chinese:
self.data = ChineseData(data_path, results_path, load)
else:
self.data = Data(data_path, conversation_path, results_path, prev_sent, load)
self.chinese = chinese
def _mini_batches(self, batch_size):
self.indices_sequences = [(i,j) for i,j in zip(self.data.encoder_input_data, self.data.decoder_input_data)]
np.random.shuffle(self.indices_sequences)
mini_batches = [self.indices_sequences[k: k + batch_size] for k in range(0, len(self.indices_sequences), batch_size)]
for batch in mini_batches:
seq_pairs = sorted(batch, key=lambda seqs: len(seqs[0]), reverse=True) # sorted by input_lengths
input_seqs = [pair[0] for pair in seq_pairs]
target_seqs = [pair[1] for pair in seq_pairs]
input_var = Variable(t.LongTensor(input_seqs)).transpose(0, 1) # time * batch
target_var = Variable(t.LongTensor(target_seqs)).transpose(0, 1) # time * batch
yield (input_var, target_var)
# For evaluation state
def tokenize_seq(self, input_data, mxlen):
if self.chinese:
token_data = ["<START>"]+input_data[:mxlen-2]+["<EOS>"]
encoder_data = np.zeros((1, mxlen), dtype='float32')
else:
token_data = self.data.text_prepare(input_data)[:mxlen]
encoder_data = np.zeros((1, mxlen), dtype='float32')
for t, word in enumerate(token_data):
if word in self.data.word2id:
encoder_data[0, t] = self.data.word2id[word]
else:
encoder_data[0, t] = 3
return encoder_data
def _test_batch(self, input_data, mxlen):
encoder_data = self.tokenize_seq(input_data, mxlen)
input_var = Variable(t.LongTensor(encoder_data)).transpose(0, 1)
return input_var | 38.631579 | 125 | 0.637148 | 2,031 | 0.922343 | 838 | 0.380563 | 0 | 0 | 0 | 0 | 109 | 0.0495 |
141dce44aa13b06a05bddc7ee3b9b4a853b80ebc | 2,935 | py | Python | mediawikiapi/config.py | srevinsaju/MediaWikiAPI | b65dfa3d6a1798ddaa8569c9dbab3b397f305431 | [
"MIT"
] | null | null | null | mediawikiapi/config.py | srevinsaju/MediaWikiAPI | b65dfa3d6a1798ddaa8569c9dbab3b397f305431 | [
"MIT"
] | null | null | null | mediawikiapi/config.py | srevinsaju/MediaWikiAPI | b65dfa3d6a1798ddaa8569c9dbab3b397f305431 | [
"MIT"
] | null | null | null | import requests
from datetime import timedelta
from .language import Language
class Config(object):
"""
Contains global configuration
"""
DEFAULT_USER_AGENT = 'mediawikiapi (https://github.com/lehinevych/MediaWikiAPI/)'
DONATE_URL = 'https://donate.wikimedia.org/w/index.php?title=Special:FundraiserLandingPage'
API_URL = 'https://{}.wikipedia.org/w/api.php'
def __init__(self, language=None, user_agent=None, rate_limit=None):
if language is not None:
self.__lang = Language(language)
else:
self.__lang = Language()
self.__rate_limit_last_call = None
self.__rate_limit = rate_limit
self.timeout = None
self.user_agent = user_agent or self.DEFAULT_USER_AGENT
@classmethod
def donate_url(cls):
'''Return media wiki donate url'''
return cls.DONATE_URL
@property
def language(self):
'''Return current global language'''
return self.__lang.language
@language.setter
def language(self, language):
'''Set a new language
Arguments:
* language - (string or Language instance) specifying the language
'''
if isinstance(language, Language):
self.__lang=language
else:
self.__lang.language = language
def get_api_url(self, language=None):
'''Return api for specified language
Arguments:
* language - (string or Language instance) specifying the language
'''
if language is not None:
if isinstance(language, Language):
return self.API_URL.format(language.language)
else:
# does the language verification
lang = Language(language)
return self.API_URL.format(lang.language)
return self.API_URL.format(self.__lang.language)
@property
def rate_limit(self):
return self.__rate_limit
@property
def rate_limit_last_call(self):
return self.__rate_limit_last_call
@rate_limit_last_call.setter
def rate_limit_last_call(self, last_call):
self.__rate_limit_last_call = last_call
@rate_limit.setter
def rate_limit(self, rate_limit):
'''
Enable or disable rate limiting on requests to the Mediawiki servers.
If rate limiting is not enabled, under some circumstances (depending on
load on Wikipedia, the number of requests you and other `wikipedia` users
are making, and other factors), Wikipedia may return an HTTP timeout error.
Enabling rate limiting generally prevents that issue, but please note that
HTTPTimeoutError still might be raised.
Arguments:
* min_wait - (integer or timedelta) describes the minimum time to wait in miliseconds before requests.
Example timedelta(milliseconds=50). If None, rate_limit won't be used.
'''
if rate_limit is None:
self.__rate_limit = None
elif isinstance(rate_limit, timedelta):
self.__rate_limit = rate_limit
else:
self.__rate_limit = timedelta(milliseconds=rate_limit)
self.__rate_limit_last_call = None
| 30.894737 | 106 | 0.716865 | 2,854 | 0.972402 | 0 | 0 | 1,705 | 0.58092 | 0 | 0 | 1,211 | 0.412606 |
141e206d570003abeaf4105dda08506c129a99b2 | 403 | py | Python | abc/abc107/abc107b.py | c-yan/atcoder | 940e49d576e6a2d734288fadaf368e486480a948 | [
"MIT"
] | 1 | 2019-08-21T00:49:34.000Z | 2019-08-21T00:49:34.000Z | abc/abc107/abc107b.py | c-yan/atcoder | 940e49d576e6a2d734288fadaf368e486480a948 | [
"MIT"
] | null | null | null | abc/abc107/abc107b.py | c-yan/atcoder | 940e49d576e6a2d734288fadaf368e486480a948 | [
"MIT"
] | null | null | null | from sys import stdout
H, W = map(int, input().split())
a = [input() for _ in range(H)]
h = [all(c == '.' for c in a[i]) for i in range(H)]
w = [True] * W
for i in range(H):
for j in range(W):
w[j] = w[j] and a[i][j] == '.'
for i in range(H):
if h[i]:
continue
for j in range(W):
if w[j]:
continue
stdout.write(a[i][j])
stdout.write('\n')
| 19.190476 | 51 | 0.48139 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.024814 |
141efbd7b29a4f99f66f1a42fcfecd8aca71c828 | 175 | py | Python | courses/E375004/fastapi/src/warehouse_app/dto.py | CVUT-FS-12110/Python | 9faef8bba09d1b3382c28f29bdb8eaf144132d7d | [
"MIT"
] | 16 | 2020-03-24T10:08:58.000Z | 2022-03-08T17:18:46.000Z | courses/E375004/fastapi/src/warehouse_app/dto.py | CVUT-FS-12110/Python | 9faef8bba09d1b3382c28f29bdb8eaf144132d7d | [
"MIT"
] | 2 | 2021-05-05T08:13:24.000Z | 2022-02-28T13:22:15.000Z | courses/E375004/fastapi/src/warehouse_app/dto.py | CVUT-FS-12110/Python | 9faef8bba09d1b3382c28f29bdb8eaf144132d7d | [
"MIT"
] | 27 | 2020-03-14T12:51:36.000Z | 2022-03-22T18:47:23.000Z | """ Data Transfer Objects """
from pydantic import BaseModel
class WarehouseDto(BaseModel):
name: str # this is our unique identifier!
location: str
capacity: int | 25 | 46 | 0.72 | 113 | 0.645714 | 0 | 0 | 0 | 0 | 0 | 0 | 61 | 0.348571 |
141f274293bd89340f4b94ad63ff87c3d7c2590d | 155 | py | Python | ecom/carts/admin.py | Bhavitg/FashionKart-Ecommerce | 27327e074effad54d15decae81f41cb722792ab6 | [
"MIT"
] | null | null | null | ecom/carts/admin.py | Bhavitg/FashionKart-Ecommerce | 27327e074effad54d15decae81f41cb722792ab6 | [
"MIT"
] | null | null | null | ecom/carts/admin.py | Bhavitg/FashionKart-Ecommerce | 27327e074effad54d15decae81f41cb722792ab6 | [
"MIT"
] | null | null | null | from django.contrib import admin
# Register your models here.
from .models import Cart,CartItem
admin.site.register(Cart)
admin.site.register(CartItem)
| 17.222222 | 33 | 0.8 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 0.180645 |
141fc184c8d85e8a094bac9ac900fe97cd870755 | 2,865 | py | Python | BboxToolkit/datasets/io.py | qilei123/BboxToolkit | 97f61ae97449009c7952f648be57a28d35c2f39b | [
"Apache-2.0"
] | 38 | 2021-02-05T14:39:11.000Z | 2022-03-28T07:41:41.000Z | BboxToolkit/datasets/io.py | qilei123/BboxToolkit | 97f61ae97449009c7952f648be57a28d35c2f39b | [
"Apache-2.0"
] | 5 | 2021-11-01T08:17:09.000Z | 2022-03-28T12:40:06.000Z | BboxToolkit/datasets/io.py | qilei123/BboxToolkit | 97f61ae97449009c7952f648be57a28d35c2f39b | [
"Apache-2.0"
] | 13 | 2021-08-20T12:26:04.000Z | 2022-03-18T12:45:31.000Z | import os
import os.path as osp
import pickle
import time
import numpy as np
from multiprocessing import Pool
from ..utils import get_bbox_dim
from .misc import read_img_info, change_cls_order, get_classes
def load_imgs(img_dir, ann_dir=None, classes=None, nproc=10,
def_bbox_type='poly'):
assert def_bbox_type in ['hbb', 'obb', 'poly', None]
assert osp.isdir(img_dir), f'The {img_dir} is not an existing dir!'
if ann_dir is not None:
print('ann_dir is no use in load_imgs function')
print('Starting loading images information')
start_time = time.time()
imgpaths = [osp.join(img_dir, imgfile)
for imgfile in os.listdir(img_dir)]
if nproc > 1:
pool = Pool(nproc)
infos = pool.map(read_img_info, imgpaths)
pool.close()
else:
infos = list(map(read_img_info, imgpaths))
if def_bbox_type is not None:
for info in infos:
if info is None:
continue
bbox_dim = get_bbox_dim(def_bbox_type)
bboxes = np.zeros((0, bbox_dim), dtype=np.float32)
labels = np.zeros((0, ), dtype=np.int64)
info['ann'] = dict(bboxes=bboxes, labels=labels)
classes = () if classes is None else classes
end_time = time.time()
print(f'Finishing loading images, get {len(infos)} iamges,',
f'using {end_time-start_time:.3f}s.')
return infos, classes
def load_pkl(ann_dir, img_dir=None, classes=None, nproc=10):
assert osp.isfile(ann_dir), f'The {ann_dir} is not an existing pkl file!'
assert img_dir is None or osp.isdir(img_dir), f'The {img_dir} is not an existing dir!'
print('Starting loading pkl information')
start_time = time.time()
data = pickle.load(open(ann_dir, 'rb'))
old_classes, contents = data['cls'], data['content']
if img_dir is not None:
imgpaths = [osp.join(img_dir, content['filename'])
for content in contents]
if nproc > 1:
pool = Pool(nproc)
infos = pool.map(read_img_info, imgpaths)
pool.close()
else:
infos = list(map(read_img_info, imgpaths))
for info, content in zip(infos, contents):
content.update(info)
if classes is None:
classes = old_classes
else:
classes = get_classes(classes)
change_cls_order(contents, old_classes, classes)
end_time = time.time()
print(f'Finishing loading pkl, get {len(contents)} iamges,',
f'using {end_time-start_time:.3f}s.')
return contents, classes
def save_pkl(save_dir, contents, classes):
assert save_dir.endswith('.pkl')
filepath = osp.split(save_dir)[0]
if not osp.exists(filepath):
os.makedirs(filepath)
data = dict(cls=classes, content=contents)
pickle.dump(data, open(save_dir, 'wb'))
| 32.931034 | 90 | 0.635951 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 480 | 0.167539 |
141ff955c77ded0e08d843f987f0b62eb3d4713d | 562 | py | Python | Fusion/tests/cwin.py | roadnarrows-robotics/rnr-sdk | aee20c65b49fb3eedf924c5c2ec9f19f4f1a1b29 | [
"MIT"
] | null | null | null | Fusion/tests/cwin.py | roadnarrows-robotics/rnr-sdk | aee20c65b49fb3eedf924c5c2ec9f19f4f1a1b29 | [
"MIT"
] | null | null | null | Fusion/tests/cwin.py | roadnarrows-robotics/rnr-sdk | aee20c65b49fb3eedf924c5c2ec9f19f4f1a1b29 | [
"MIT"
] | null | null | null | def cwinstart(callobj, *args, **kwargs):
print('cwinstart')
print(' args', repr(args))
for arg in args:
print(' ', arg)
print(' kwargs', len(kwargs))
for k, v in kwargs.items():
print(' ', k, v)
w = callobj(*args, **kwargs)
print(' callobj()->', w)
return w
def cwincall(req1, req2, *args, **kwargs):
print('cwincall')
print(' req1=', req1, 'req2=', req2)
print(' args', repr(args))
for arg in args:
print(' ', arg)
print('kwargs')
for k, v in kwargs.items():
print(' ', k, v)
return 'tomorrow'
| 21.615385 | 42 | 0.55694 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 120 | 0.213523 |
1420f29df1fa70eb2e3757666daa61e2e39f87c0 | 358 | py | Python | setup.py | rchurch4/emoji_map | f924e6d8d059c4705d3360a690eaac2825de3e49 | [
"MIT"
] | null | null | null | setup.py | rchurch4/emoji_map | f924e6d8d059c4705d3360a690eaac2825de3e49 | [
"MIT"
] | null | null | null | setup.py | rchurch4/emoji_map | f924e6d8d059c4705d3360a690eaac2825de3e49 | [
"MIT"
] | null | null | null | from setuptools import setup
setup(name='emoji_map',
version='0.1',
description='Maps unicode emoji to its description',
url='http://github.com/rchurch4/emoji_map',
author='Rob Churchill',
author_email='rchurch4@bu.edu',
license='MIT',
packages=['emoji_map'],
include_package_data=True,
zip_safe=False)
| 27.538462 | 58 | 0.656425 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 141 | 0.393855 |
142176f2250823447a68c6f5a9ee79511f0fb3b0 | 1,034 | py | Python | tests/conftest.py | EvanKomp/molscore | e21c2dcba467228e498827dd5218ac8c9202bb5e | [
"MIT"
] | null | null | null | tests/conftest.py | EvanKomp/molscore | e21c2dcba467228e498827dd5218ac8c9202bb5e | [
"MIT"
] | 1 | 2021-09-15T20:27:43.000Z | 2021-09-15T20:27:43.000Z | tests/conftest.py | EvanKomp/molscore | e21c2dcba467228e498827dd5218ac8c9202bb5e | [
"MIT"
] | null | null | null | import json
import os
import pytest
import tempfile
@pytest.fixture(scope='session', autouse=True)
def working_test_dir():
# create a produce a temporary directory to use for everything
tmp_working_dir = tempfile.TemporaryDirectory()
yield tmp_working_dir.name
# delete it at the end of the session
tmp_working_dir.cleanup()
return
@pytest.fixture(scope='session', autouse=True)
def patched_config_file(working_test_dir):
# now we have to manually modify the config file and replace it later
this_dir, this_filename = os.path.split(__file__)
config_path = os.path.join(this_dir, "../molscore/config.json")
file = open(config_path, 'r')
config_save = json.load(file)
file.close()
file = open(config_path, 'w')
file.write('{"DEFAULT_DATABASE_ROOT": "' + str(working_test_dir) +
'/data"}')
file.close()
yield None
# now we have to save the old one back
file = open(config_path, 'w')
json.dump(config_save, file)
file.close()
return
| 27.945946 | 73 | 0.692456 | 0 | 0 | 882 | 0.852998 | 976 | 0.943907 | 0 | 0 | 296 | 0.286267 |
1421a793494f3eba53446dfb7b7c27fbcbbb120e | 6,348 | py | Python | rlcard3/games/gin_rummy/utils/melding.py | cogitoergoread/muszi-macrohard.hu | e9bbd36b789e670f96622a3a2ba8327f0d897561 | [
"MIT"
] | 1 | 2021-05-27T03:40:37.000Z | 2021-05-27T03:40:37.000Z | rlcard3/games/gin_rummy/utils/melding.py | cogitoergoread/muszi-macrohard.hu | e9bbd36b789e670f96622a3a2ba8327f0d897561 | [
"MIT"
] | null | null | null | rlcard3/games/gin_rummy/utils/melding.py | cogitoergoread/muszi-macrohard.hu | e9bbd36b789e670f96622a3a2ba8327f0d897561 | [
"MIT"
] | 1 | 2022-02-24T11:25:22.000Z | 2022-02-24T11:25:22.000Z | '''
File name: gin_rummy/melding.py
Author: William Hale
Date created: 2/12/2020
'''
from rlcard3.games.gin_rummy.card import Card
import rlcard3.games.gin_rummy.utils.utils as utils
from typing import List
from typing import Set
import copy
import random
"""
Terminology:
run_meld - three or more cards of same suit in sequence
set_meld - three or more cards of same rank
meld_pile - a run_meld or a set_meld
meld_piles - a list of meld_pile
meld_cluster - same as meld_piles, but usually with the piles being mutually disjoint
meld_clusters - a list of meld_cluster
"""
def get_meld_clusters(hand: List[Card],
going_out_deadwood_count: int,
is_going_out: bool = False,
opponent_meld_piles: List[List[Card]] = None) -> List[List[Set[Card]]]:
# if is_going_out is true, then return only meld_piles with deadwood count <= 10
# opponent_meld_piles are the meld_piles for the opponent who has knocked to be used for laying off cards
result = []
all_run_melds = [set(x) for x in _get_all_run_melds(hand)]
all_set_melds = [set(x) for x in _get_all_set_melds(hand)]
all_melds = all_run_melds + all_set_melds
all_melds_count = len(all_melds)
for i in range(0, all_melds_count):
first_meld = all_melds[i]
meld_cluster_1 = [first_meld]
if is_going_out:
deadwood_count = utils.get_deadwood_count(hand=hand, meld_cluster=meld_cluster_1)
if deadwood_count <= going_out_deadwood_count:
result.append(meld_cluster_1)
else:
result.append(meld_cluster_1)
for j in range(i + 1, all_melds_count):
second_meld = all_melds[j]
if not second_meld.isdisjoint(first_meld):
continue
meld_cluster_2 = [first_meld, second_meld]
if is_going_out:
deadwood_count = utils.get_deadwood_count(hand=hand, meld_cluster=meld_cluster_2)
if deadwood_count <= going_out_deadwood_count:
result.append(meld_cluster_2)
else:
result.append(meld_cluster_2)
for k in range(j + 1, all_melds_count):
third_meld = all_melds[k]
if not third_meld.isdisjoint(first_meld) or not third_meld.isdisjoint(second_meld):
continue
meld_cluster_3 = [first_meld, second_meld, third_meld]
if is_going_out:
deadwood_count = utils.get_deadwood_count(hand=hand, meld_cluster=meld_cluster_3)
if deadwood_count <= going_out_deadwood_count:
result.append(meld_cluster_3)
else:
result.append(meld_cluster_3)
return result
def get_best_meld_clusters(hand: List[Card]) -> List[List[Set[Card]]]:
result = []
meld_clusters = get_meld_clusters(hand=hand, going_out_deadwood_count=100, is_going_out=False)
meld_clusters_count = len(meld_clusters)
if meld_clusters_count > 0:
deadwood_counts = [utils.get_deadwood_count(hand=hand, meld_cluster=meld_cluster)
for meld_cluster in meld_clusters]
best_deadwood_count = min(deadwood_counts)
for i in range(meld_clusters_count):
if deadwood_counts[i] == best_deadwood_count:
result.append(meld_clusters[i])
return result
# private methods
def _get_all_run_melds(hand: List[Card]) -> List[List[Card]]:
card_count = len(hand)
hand_by_suit = sorted(hand, key=lambda x: x.card_id)
max_run_melds_from_left = [[] for _ in hand_by_suit]
for i in range(card_count):
card = hand_by_suit[i]
max_run_melds_from_left[i].append(card)
for j in range(i + 1, card_count):
next_card = hand_by_suit[j]
if next_card.suit != card.suit or next_card.rank_id != card.rank_id + (j - i):
break
else:
max_run_melds_from_left[i].append(next_card)
max_run_melds_from_left = [run_meld for run_meld in max_run_melds_from_left if len(run_meld) >= 3]
result = copy.deepcopy(max_run_melds_from_left)
for max_run_meld in max_run_melds_from_left:
max_run_meld_count = len(max_run_meld)
if max_run_meld_count > 3:
for i in range(max_run_meld_count - 3):
result.append(max_run_meld[:-(i + 1)])
return result
def _get_all_set_melds(hand: List[Card]) -> List[List[Card]]:
max_set_melds = []
hand_by_rank = sorted(hand, key=lambda x: x.rank)
set_meld = []
current_rank = None
for card in hand_by_rank:
if current_rank is None or current_rank == card.rank:
set_meld.append(card)
else:
if len(set_meld) >= 3:
max_set_melds.append(set_meld)
set_meld = [card]
current_rank = card.rank
if len(set_meld) >= 3:
max_set_melds.append(set_meld)
result = copy.deepcopy(max_set_melds)
for max_set_meld in max_set_melds:
if len(max_set_meld) == 4:
for meld_card in max_set_meld:
result.append([card for card in max_set_meld if card != meld_card])
return result
# For test
#def test01():
# deck = Card.init_standard_deck()
# print(f"deck: {[str(card) for card in deck]}")
# hand = random.sample(deck, 20)
# print(f"hand: {[str(card) for card in hand]}")
# all_set_melds = _get_all_set_melds(hand)
# print(f"all_set_melds={[[str(card) for card in meld_pile] for meld_pile in all_set_melds]}")
# all_run_melds = _get_all_run_melds(hand)
# print(f"all_run_melds={[[str(card) for card in run] for run in all_run_melds]}")
# going_out_deadwood_count = 10
# meld_clusters = get_meld_clusters(hand=hand, going_out_deadwood_count=going_out_deadwood_count, is_going_out=True)
# for meld_cluster in meld_clusters:
# deadwood = utils.get_deadwood(hand=hand, meld_cluster=meld_cluster)
# meld_cluster_text = f"meld_cluster={[[str(card) for card in meld_pile] for meld_pile in meld_cluster]}"
# deadwood_text = f"deadwood={[str(card) for card in deadwood]}"
# print(f"{meld_cluster_text} {deadwood_text}")
if __name__ == '__main__':
test01()
| 40.433121 | 119 | 0.650284 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,650 | 0.259924 |
14225abd1af0a2752df316fa83f0866ab01e19a6 | 17,255 | py | Python | src/garage/tf/algos/rl2.py | jaekyeom/garage | be20a5ed29017dc079d9b46ee7dbd2a2fe89e90e | [
"MIT"
] | 1 | 2021-01-11T18:40:52.000Z | 2021-01-11T18:40:52.000Z | src/garage/tf/algos/rl2.py | kiminh/garage | c0fd41d73da7e7a71d6054e87370be35ca708e67 | [
"MIT"
] | null | null | null | src/garage/tf/algos/rl2.py | kiminh/garage | c0fd41d73da7e7a71d6054e87370be35ca708e67 | [
"MIT"
] | null | null | null | """Module for RL2.
This module contains RL2, RL2Worker and the environment wrapper for RL2.
"""
# yapf: disable
import abc
import collections
import akro
from dowel import logger
import numpy as np
from garage import (EnvSpec,
EnvStep,
EpisodeBatch,
log_multitask_performance,
StepType,
Wrapper)
from garage.np import concat_tensor_dict_list, discount_cumsum
from garage.np.algos import MetaRLAlgorithm
from garage.sampler import DefaultWorker
from garage.tf.algos._rl2npo import RL2NPO
# yapf: enable
class RL2Env(Wrapper):
"""Environment wrapper for RL2.
In RL2, observation is concatenated with previous action,
reward and terminal signal to form new observation.
Args:
env (Environment): An env that will be wrapped.
"""
def __init__(self, env):
super().__init__(env)
self._observation_space = self._create_rl2_obs_space()
self._spec = EnvSpec(
action_space=self.action_space,
observation_space=self._observation_space,
max_episode_length=self._env.spec.max_episode_length)
@property
def observation_space(self):
"""akro.Space: The observation space specification."""
return self._observation_space
@property
def spec(self):
"""EnvSpec: The environment specification."""
return self._spec
def reset(self):
"""Call reset on wrapped env.
Returns:
numpy.ndarray: The first observation conforming to
`observation_space`.
dict: The episode-level information.
Note that this is not part of `env_info` provided in `step()`.
It contains information of he entire episode, which could be
needed to determine the first action (e.g. in the case of
goal-conditioned or MTRL.)
"""
first_obs, episode_info = self._env.reset()
first_obs = np.concatenate(
[first_obs,
np.zeros(self._env.action_space.shape), [0], [0]])
return first_obs, episode_info
def step(self, action):
"""Call step on wrapped env.
Args:
action (np.ndarray): An action provided by the agent.
Returns:
EnvStep: The environment step resulting from the action.
Raises:
RuntimeError: if `step()` is called after the environment has been
constructed and `reset()` has not been called.
"""
es = self._env.step(action)
next_obs = es.observation
next_obs = np.concatenate([
next_obs, action, [es.reward], [es.step_type == StepType.TERMINAL]
])
return EnvStep(env_spec=self.spec,
action=action,
reward=es.reward,
observation=next_obs,
env_info=es.env_info,
step_type=es.step_type)
def _create_rl2_obs_space(self):
"""Create observation space for RL2.
Returns:
akro.Box: Augmented observation space.
"""
obs_flat_dim = np.prod(self._env.observation_space.shape)
action_flat_dim = np.prod(self._env.action_space.shape)
return akro.Box(low=-np.inf,
high=np.inf,
shape=(obs_flat_dim + action_flat_dim + 1 + 1, ))
class RL2Worker(DefaultWorker):
"""Initialize a worker for RL2.
In RL2, policy does not reset between epsiodes in each meta batch.
Policy only resets once at the beginning of a trial/meta batch.
Args:
seed (int): The seed to use to intialize random number generators.
max_episode_length (int or float): The maximum length of episodes to
sample. Can be (floating point) infinity.
worker_number (int): The number of the worker where this update is
occurring. This argument is used to set a different seed for each
worker.
n_episodes_per_trial (int): Number of episodes sampled per
trial/meta-batch. Policy resets in the beginning of a meta batch,
and obtain `n_episodes_per_trial` episodes in one meta batch.
Attributes:
agent(Policy or None): The worker's agent.
env(Environment or None): The worker's environment.
"""
def __init__(
self,
*, # Require passing by keyword, since everything's an int.
seed,
max_episode_length,
worker_number,
n_episodes_per_trial=2):
self._n_episodes_per_trial = n_episodes_per_trial
super().__init__(seed=seed,
max_episode_length=max_episode_length,
worker_number=worker_number)
def start_episode(self):
"""Begin a new episode."""
self._eps_length = 0
self._prev_obs = self.env.reset()[0]
def rollout(self):
"""Sample a single episode of the agent in the environment.
Returns:
EpisodeBatch: The collected episode.
"""
self.agent.reset()
for _ in range(self._n_episodes_per_trial):
self.start_episode()
while not self.step_episode():
pass
self._agent_infos['batch_idx'] = np.full(len(self._env_steps),
self._worker_number)
return self.collect_episode()
class NoResetPolicy:
"""A policy that does not reset.
For RL2 meta-test, the policy should not reset after meta-RL
adapation. The hidden state will be retained as it is where
the adaptation takes place.
Args:
policy (garage.tf.policies.Policy): Policy itself.
Returns:
garage.tf.policies.Policy: The wrapped policy that does not reset.
"""
def __init__(self, policy):
self._policy = policy
def reset(self):
"""Environment reset function."""
def get_action(self, obs):
"""Get a single action from this policy for the input observation.
Args:
obs (numpy.ndarray): Observation from environment.
Returns:
numpy.ndarray: Predicted action
dict: Agent into
"""
return self._policy.get_action(obs)
def get_param_values(self):
"""Return values of params.
Returns:
np.ndarray: Policy parameters values.
"""
return self._policy.get_param_values()
def set_param_values(self, params):
"""Set param values.
Args:
params (np.ndarray): A numpy array of parameter values.
"""
self._policy.set_param_values(params)
# pylint: disable=protected-access
class RL2AdaptedPolicy:
"""A RL2 policy after adaptation.
Args:
policy (garage.tf.policies.Policy): Policy itself.
"""
def __init__(self, policy):
self._initial_hiddens = policy._prev_hiddens[:]
self._policy = policy
def reset(self):
"""Environment reset function."""
self._policy._prev_hiddens = self._initial_hiddens
def get_action(self, obs):
"""Get a single action from this policy for the input observation.
Args:
obs (numpy.ndarray): Observation from environment.
Returns:
numpy.ndarray: Predicated action
dict: Agent info.
"""
return self._policy.get_action(obs)
def get_param_values(self):
"""Return values of params.
Returns:
np.ndarray: Policy parameter values
np.ndarray: Initial hidden state, which will be set every time
the policy is used for meta-test.
"""
return (self._policy.get_param_values(), self._initial_hiddens)
def set_param_values(self, params):
"""Set param values.
Args:
params (Tuple[np.ndarray, np.ndarray]): Two numpy array of
parameter values, one of the network parameters, one
for the initial hidden state.
"""
inner_params, hiddens = params
self._policy.set_param_values(inner_params)
self._initial_hiddens = hiddens
class RL2(MetaRLAlgorithm, abc.ABC):
"""RL^2.
Reference: https://arxiv.org/pdf/1611.02779.pdf.
When sampling for RL^2, there are more than one environments to be
sampled from. In the original implementation, within each task/environment,
all episodes sampled will be concatenated into one single episode, and fed
to the inner algorithm. Thus, returns and advantages are calculated across
the episode.
RL2Worker is required in sampling for RL2.
See example/tf/rl2_ppo_halfcheetah.py for reference.
User should not instantiate RL2 directly.
Currently garage supports PPO and TRPO as inner algorithm. Refer to
garage/tf/algos/rl2ppo.py and garage/tf/algos/rl2trpo.py.
Args:
env_spec (EnvSpec): Environment specification.
episodes_per_trial (int): Used to calculate the max episode length for
the inner algorithm.
meta_batch_size (int): Meta batch size.
task_sampler (TaskSampler): Task sampler.
meta_evaluator (MetaEvaluator): Evaluator for meta-RL algorithms.
n_epochs_per_eval (int): If meta_evaluator is passed, meta-evaluation
will be performed every `n_epochs_per_eval` epochs.
inner_algo_args (dict): Arguments for inner algorithm.
"""
def __init__(self, env_spec, episodes_per_trial, meta_batch_size,
task_sampler, meta_evaluator, n_epochs_per_eval,
**inner_algo_args):
self._env_spec = env_spec
_inner_env_spec = EnvSpec(
env_spec.observation_space, env_spec.action_space,
episodes_per_trial * env_spec.max_episode_length)
self._inner_algo = RL2NPO(env_spec=_inner_env_spec, **inner_algo_args)
self._rl2_max_episode_length = self._env_spec.max_episode_length
self._n_epochs_per_eval = n_epochs_per_eval
self._policy = self._inner_algo.policy
self._discount = self._inner_algo._discount
self._meta_batch_size = meta_batch_size
self._task_sampler = task_sampler
self._meta_evaluator = meta_evaluator
def train(self, trainer):
"""Obtain samplers and start actual training for each epoch.
Args:
trainer (Trainer): Experiment trainer, which provides services
such as snapshotting and sampler control.
Returns:
float: The average return in last epoch.
"""
last_return = None
for _ in trainer.step_epochs():
if trainer.step_itr % self._n_epochs_per_eval == 0:
if self._meta_evaluator is not None:
self._meta_evaluator.evaluate(self)
trainer.step_episode = trainer.obtain_samples(
trainer.step_itr,
env_update=self._task_sampler.sample(self._meta_batch_size))
last_return = self.train_once(trainer.step_itr,
trainer.step_episode)
trainer.step_itr += 1
return last_return
def train_once(self, itr, paths):
"""Perform one step of policy optimization given one batch of samples.
Args:
itr (int): Iteration number.
paths (list[dict]): A list of collected paths.
Returns:
numpy.float64: Average return.
"""
episodes, average_return = self._process_samples(itr, paths)
logger.log('Optimizing policy...')
self._inner_algo.optimize_policy(episodes)
return average_return
def get_exploration_policy(self):
"""Return a policy used before adaptation to a specific task.
Each time it is retrieved, this policy should only be evaluated in one
task.
Returns:
Policy: The policy used to obtain samples that are later used for
meta-RL adaptation.
"""
self._policy.reset()
return NoResetPolicy(self._policy)
# pylint: disable=protected-access
def adapt_policy(self, exploration_policy, exploration_episodes):
"""Produce a policy adapted for a task.
Args:
exploration_policy (Policy): A policy which was returned from
get_exploration_policy(), and which generated
exploration_episodes by interacting with an environment. The
caller may not use this object after passing it into this
method.
exploration_episodes (EpisodeBatch): episodes to adapt to,
generated by exploration_policy exploring the
environment.
Returns:
Policy: A policy adapted to the task represented by the
exploration_episodes.
"""
return RL2AdaptedPolicy(exploration_policy._policy)
# pylint: disable=protected-access
def _process_samples(self, itr, paths):
# pylint: disable=too-many-statements
"""Return processed sample data based on the collected paths.
Args:
itr (int): Iteration number.
paths (OrderedDict[dict]): A list of collected paths for each
task. In RL^2, there are n environments/tasks and paths in
each of them will be concatenated at some point and fed to
the policy.
Returns:
EpisodeBatch: Processed batch of episodes for feeding the inner
algorithm.
numpy.float64: The average return.
Raises:
ValueError: If 'batch_idx' is not found.
"""
concatenated_paths = []
paths_by_task = collections.defaultdict(list)
for path in paths:
path['returns'] = discount_cumsum(path['rewards'], self._discount)
path['lengths'] = [len(path['rewards'])]
if 'batch_idx' in path:
paths_by_task[path['batch_idx']].append(path)
elif 'batch_idx' in path['agent_infos']:
paths_by_task[path['agent_infos']['batch_idx'][0]].append(path)
else:
raise ValueError(
'Batch idx is required for RL2 but not found, '
'Make sure to use garage.tf.algos.rl2.RL2Worker '
'for sampling')
# all path in paths_by_task[i] are sampled from task[i]
for _paths in paths_by_task.values():
concatenated_path = self._concatenate_paths(_paths)
concatenated_paths.append(concatenated_path)
name_map = None
if hasattr(self._task_sampler, '_envs') and hasattr(
self._task_sampler._envs[0]._env, 'all_task_names'):
names = [
env._env.all_task_names[0] for env in self._task_sampler._envs
]
name_map = dict(enumerate(names))
undiscounted_returns = log_multitask_performance(
itr,
EpisodeBatch.from_list(self._env_spec, paths),
self._inner_algo._discount,
name_map=name_map)
average_return = np.mean(undiscounted_returns)
episodes = EpisodeBatch.from_list(self._env_spec, concatenated_paths)
return episodes, average_return
def _concatenate_paths(self, paths):
"""Concatenate paths.
The input paths are from different episodes but same task/environment.
In RL^2, paths within each meta batch are all concatenate into a single
path and fed to the policy.
Args:
paths (dict): Input paths. All paths are from different episodes,
but the same task/environment.
Returns:
dict: Concatenated paths from the same task/environment. Shape of
values: :math:`[max_episode_length * episode_per_task, S^*]`
list[dict]: Original input paths. Length of the list is
:math:`episode_per_task` and each path in the list has
values of shape :math:`[max_episode_length, S^*]`
"""
observations = np.concatenate([path['observations'] for path in paths])
actions = np.concatenate([
self._env_spec.action_space.flatten_n(path['actions'])
for path in paths
])
valids = np.concatenate(
[np.ones_like(path['rewards']) for path in paths])
baselines = np.concatenate(
[np.zeros_like(path['rewards']) for path in paths])
concatenated_path = concat_tensor_dict_list(paths)
concatenated_path['observations'] = observations
concatenated_path['actions'] = actions
concatenated_path['valids'] = valids
concatenated_path['baselines'] = baselines
return concatenated_path
@property
def policy(self):
"""Policy: Policy to be used."""
return self._inner_algo.policy
@property
def max_episode_length(self):
"""int: Maximum length of an episode."""
return self._rl2_max_episode_length
| 33.635478 | 79 | 0.619009 | 16,601 | 0.961986 | 0 | 0 | 500 | 0.028974 | 0 | 0 | 9,027 | 0.523092 |
1422d17a09d1a24e370225b68f6258e43dc906f1 | 2,059 | py | Python | cct/core2/algorithms/orderforleastmotormovement.py | awacha/cct | be1adbed2533df15c778051f3f4f9da0749c873a | [
"BSD-3-Clause"
] | 1 | 2015-11-04T16:37:39.000Z | 2015-11-04T16:37:39.000Z | cct/core2/algorithms/orderforleastmotormovement.py | awacha/cct | be1adbed2533df15c778051f3f4f9da0749c873a | [
"BSD-3-Clause"
] | null | null | null | cct/core2/algorithms/orderforleastmotormovement.py | awacha/cct | be1adbed2533df15c778051f3f4f9da0749c873a | [
"BSD-3-Clause"
] | 1 | 2020-03-05T02:50:43.000Z | 2020-03-05T02:50:43.000Z | from typing import Tuple, List, TypeVar
T = TypeVar("T")
def orderForLeastMotorMovement(positions: List[Tuple[T, Tuple[float, float]]], startposition: Tuple[float, float]) -> List[T]:
xpositions = set([p[1][0] for p in positions] + [startposition[0]])
ypositions = set([p[1][1] for p in positions] + [startposition[1]])
if len(xpositions) < len(ypositions):
# there are more unique Y coordinates than X coordinates: go by X coordinates first
slow = 0
fast = 1
else:
slow = 1
fast = 0
# put the slowest moving sample (not starting position!) coordinates first in increasing order
slow_ordered = sorted(
set([p[1][slow] for p in positions if p[1][slow] != startposition[slow]]))
if not slow_ordered:
# only one position, which is the start position:
slow_ordered = []
else:
# see which end we must start. Start from that end which is nearest to the empty beam measurement
if abs(slow_ordered[-1] - startposition[slow]) < abs(slow_ordered[0] - startposition[slow]):
slow_ordered = reversed(slow_ordered)
slow_ordered = list(slow_ordered)
lastfastcoord = startposition[fast]
objects_ordered = []
for slowpos in [startposition[slow]] + slow_ordered:
# sort those samples which have this X coordinate first by increasing Y coordinate
objects = sorted([p for p in positions if p[1][slow] == slowpos],
key = lambda p:p[1][fast])
if not objects:
# no samples with this slow coordinate
continue
# see which end of the fastest coordinate is nearest to the last fast coordinate position
if abs(objects[0][1][fast] - lastfastcoord) > abs(objects[-1][1][fast] - lastfastcoord):
objects = reversed(objects)
objects = list(objects)
objects_ordered.extend(objects)
lastfastcoord = objects_ordered[-1][1][fast]
assert len(objects_ordered) == len(positions)
return [o[0] for o in objects_ordered]
| 45.755556 | 126 | 0.64643 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 535 | 0.259835 |
142313749cfd9431bfe0ceeb096274e6b438b71c | 350 | py | Python | openpnm/topotools/__init__.py | lixuekai2001/OpenPNM | 9026f0fed427d37f4caf1a79e4a7684490d52cf6 | [
"MIT"
] | 2 | 2019-08-24T09:17:40.000Z | 2020-07-05T07:21:21.000Z | openpnm/topotools/__init__.py | xu-kai-xu/OpenPNM | 61d5fc4729a0a29291cf6c53c07c4246e7a13714 | [
"MIT"
] | 2 | 2020-06-26T19:58:23.000Z | 2021-12-14T07:16:41.000Z | openpnm/topotools/__init__.py | xu-kai-xu/OpenPNM | 61d5fc4729a0a29291cf6c53c07c4246e7a13714 | [
"MIT"
] | null | null | null | r"""
Collection of functions for manipulating network topology
=========================================================
This module contains a selection of functions that deal specifically with
network topology.
"""
from ._topotools import *
from ._perctools import *
from ._graphtools import *
from ._plottools import *
from . import generators
| 23.333333 | 73 | 0.66 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 218 | 0.622857 |
1423487aa9ea63d89b16cc57da5de0d83c612a84 | 7,560 | py | Python | tests/gdata_tests/analytics/data_test.py | lqc/google-data-api | b720582a472d627a0853d02e51e13dbce4cfe6ae | [
"Apache-2.0"
] | null | null | null | tests/gdata_tests/analytics/data_test.py | lqc/google-data-api | b720582a472d627a0853d02e51e13dbce4cfe6ae | [
"Apache-2.0"
] | null | null | null | tests/gdata_tests/analytics/data_test.py | lqc/google-data-api | b720582a472d627a0853d02e51e13dbce4cfe6ae | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit Tests for Google Analytics Account Feed and Data Feed.
AccountFeedTest: All unit tests for AccountFeed class.
DataFeedTest: All unit tests for DataFeed class.
"""
__author__ = 'api.nickm@google.com (Nick Mihailovski)'
import unittest
from gdata import test_data
import gdata.analytics.data
import atom.core
import gdata.test_config as conf
class AccountFeedTest(unittest.TestCase):
"""Unit test for all custom elements in the Account Feed."""
def setUp(self):
"""Retrieves the test XML feed into a AccountFeed object."""
self.feed = atom.core.parse(test_data.ANALYTICS_ACCOUNT_FEED,
gdata.analytics.data.AccountFeed)
def testAccountEntryTableId(self):
"""Tests custom classes in Google Analytics Account Feed."""
entry = self.feed.entry[0]
self.assertEquals(entry.table_id.text, 'ga:1174')
def testAccountEntryProperty(self):
"""Tests the property classes in Google Analytics Account Feed."""
property = self.feed.entry[0].property
self.assertEquals(property[0].name, 'ga:accountId')
self.assertEquals(property[0].value, '30481')
self.assertEquals(property[1].name, 'ga:accountName')
self.assertEquals(property[1].value, 'Google Store')
self.assertEquals(property[2].name, 'ga:profileId')
self.assertEquals(property[2].value, '1174')
self.assertEquals(property[3].name, 'ga:webPropertyId')
self.assertEquals(property[3].value, 'UA-30481-1')
self.assertEquals(property[4].name, 'ga:currency')
self.assertEquals(property[4].value, 'USD')
self.assertEquals(property[5].name, 'ga:timezone')
self.assertEquals(property[5].value, 'America/Los_Angeles')
def testAccountEntryGetProperty(self):
"""Tests GetProperty inherited class in the AccountEntry class."""
entry = self.feed.entry[0]
self.assertEquals(entry.GetProperty('ga:accountId').value, '30481')
self.assertEquals(entry.GetProperty('ga:accountName').value, 'Google Store')
self.assertEquals(entry.GetProperty('ga:profileId').value, '1174')
self.assertEquals(entry.GetProperty('ga:webPropertyId').value, 'UA-30481-1')
self.assertEquals(entry.GetProperty('ga:currency').value, 'USD')
self.assertEquals(entry.GetProperty('ga:timezone').value, 'America/Los_Angeles')
class DataFeedTest(unittest.TestCase):
"""Unit test for all custom elements in the Data Feed."""
def setUp(self):
"""Retrieves the test XML feed into a DataFeed object."""
self.feed = atom.core.parse(test_data.ANALYTICS_DATA_FEED,
gdata.analytics.data.DataFeed)
def testDataFeed(self):
"""Tests custom classes in Google Analytics Data Feed."""
self.assertEquals(self.feed.start_date.text, '2008-10-01')
self.assertEquals(self.feed.end_date.text, '2008-10-31')
def testAggregates(self):
"""Tests Aggregates class in Google Analytics Data Feed."""
self.assert_(self.feed.aggregates is not None)
def testAggregatesElements(self):
"""Tests Metrics class in Aggregates class."""
metric = self.feed.aggregates.metric[0]
self.assertEquals(metric.confidence_interval, '0.0')
self.assertEquals(metric.name, 'ga:visits')
self.assertEquals(metric.type, 'integer')
self.assertEquals(metric.value, '136540')
metric = self.feed.aggregates.GetMetric('ga:visits')
self.assertEquals(metric.confidence_interval, '0.0')
self.assertEquals(metric.name, 'ga:visits')
self.assertEquals(metric.type, 'integer')
self.assertEquals(metric.value, '136540')
def testDataSource(self):
"""Tests DataSources class in Google Analytics Data Feed."""
self.assert_(self.feed.data_source[0] is not None)
def testDataSourceTableId(self):
"""Tests TableId class in the DataSource class."""
table_id = self.feed.data_source[0].table_id
self.assertEquals(table_id.text, 'ga:1174')
def testDataSourceTableName(self):
"""Tests TableName class in the DataSource class."""
table_name = self.feed.data_source[0].table_name
self.assertEquals(table_name.text, 'www.googlestore.com')
def testDataSourceProperty(self):
"""Tests Property clas in the DataSource class."""
property = self.feed.data_source[0].property
self.assertEquals(property[0].name, 'ga:profileId')
self.assertEquals(property[0].value, '1174')
self.assertEquals(property[1].name, 'ga:webPropertyId')
self.assertEquals(property[1].value, 'UA-30481-1')
self.assertEquals(property[2].name, 'ga:accountName')
self.assertEquals(property[2].value, 'Google Store')
def testDataSourceGetProperty(self):
"""Tests GetProperty utility method in the DataSource class."""
ds = self.feed.data_source[0]
self.assertEquals(ds.GetProperty('ga:profileId').value, '1174')
self.assertEquals(ds.GetProperty('ga:webPropertyId').value, 'UA-30481-1')
self.assertEquals(ds.GetProperty('ga:accountName').value, 'Google Store')
def testEntryDimension(self):
"""Tests Dimension class in Entry class."""
dim = self.feed.entry[0].dimension[0]
self.assertEquals(dim.name, 'ga:source')
self.assertEquals(dim.value, 'blogger.com')
def testEntryGetDimension(self):
"""Tests GetDimension utility method in the Entry class."""
dim = self.feed.entry[0].GetDimension('ga:source')
self.assertEquals(dim.name, 'ga:source')
self.assertEquals(dim.value, 'blogger.com')
error = self.feed.entry[0].GetDimension('foo')
self.assertEquals(error, None)
def testEntryMetric(self):
"""Tests Metric class in Entry class."""
met = self.feed.entry[0].metric[0]
self.assertEquals(met.confidence_interval, '0.0')
self.assertEquals(met.name, 'ga:visits')
self.assertEquals(met.type, 'integer')
self.assertEquals(met.value, '68140')
def testEntryGetMetric(self):
"""Tests GetMetric utility method in the Entry class."""
met = self.feed.entry[0].GetMetric('ga:visits')
self.assertEquals(met.confidence_interval, '0.0')
self.assertEquals(met.name, 'ga:visits')
self.assertEquals(met.type, 'integer')
self.assertEquals(met.value, '68140')
error = self.feed.entry[0].GetMetric('foo')
self.assertEquals(error, None)
def testEntryGetObject(self):
"""Tests GetObjectOf utility method in Entry class."""
entry = self.feed.entry[0]
dimension = entry.GetObject('ga:source')
self.assertEquals(dimension.name, 'ga:source')
self.assertEquals(dimension.value, 'blogger.com')
metric = entry.GetObject('ga:visits')
self.assertEquals(metric.name, 'ga:visits')
self.assertEquals(metric.value, '68140')
self.assertEquals(metric.type, 'integer')
self.assertEquals(metric.confidence_interval, '0.0')
error = entry.GetObject('foo')
self.assertEquals(error, None)
def suite():
"""Test Account Feed and Data Feed."""
return conf.build_suite([AccountFeedTest, DataFeedTest])
if __name__ == '__main__':
unittest.main()
| 34.678899 | 84 | 0.714153 | 6,435 | 0.85119 | 0 | 0 | 0 | 0 | 0 | 0 | 2,797 | 0.369974 |
14235464064dbcb80d081c8ba50a4d1f0167651c | 3,299 | py | Python | parkself_mbed/scripts/parkself_run.py | SweiLz/Parkself | 5df83967926dda849a73ff33d8ce1f8c6eb54d4f | [
"BSD-3-Clause"
] | null | null | null | parkself_mbed/scripts/parkself_run.py | SweiLz/Parkself | 5df83967926dda849a73ff33d8ce1f8c6eb54d4f | [
"BSD-3-Clause"
] | null | null | null | parkself_mbed/scripts/parkself_run.py | SweiLz/Parkself | 5df83967926dda849a73ff33d8ce1f8c6eb54d4f | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
import rospy
from math import sqrt
from geometry_msgs.msg import Twist, PoseStamped, Pose2D, PointStamped, PoseWithCovarianceStamped
from std_msgs.msg import Empty, String
import Queue
goal_point = rospy.Publisher(
"move_base_simple/goal", PoseStamped, queue_size=1)
servo = rospy.Publisher("servo", Empty, queue_size=1)
robot_state = "In"
p0 = [(0, 0), (0.53, -0.03), (1.09, -0.07)]
p1 = [(0.00, -0.46), (0.53, -0.50), (1.00, -0.55)]
p2 = [(0, -0.90), (0.53, -0.90), (1.00, -0.90)]
p3 = [(0, -1.30), (0.53, -1.30), (1.00, -1.30)]
p4 = [(0, -1.72), (0.53, -1.70), (1.00, -1.72)]
p5 = [(0, -2.10), (0.53, -2.10), (1.00, -2.10)]
graphN = {
"In": p0[0],
"Out": p0[2],
"B1": p1[0],
"B2": p2[0],
"B3": p3[0],
"B4": p4[0],
"B5": p5[0],
"A1": p1[2],
"A2": p2[2],
"A3": p3[2],
"A4": p4[2],
"A5": p5[2]
}
graph = {
p0[0]: [p0[1]],
p0[1]: [p0[0], p0[2], p1[1]],
p0[2]: [p0[1]],
p1[0]: [p1[1]],
p1[1]: [p0[1], p1[0], p1[2], p2[1]],
p1[2]: [p1[1]],
p2[0]: [p2[1]],
p2[1]: [p1[1], p2[0], p2[2], p3[1]],
p2[2]: [p2[1]],
p3[0]: [p3[1]],
p3[1]: [p2[1], p3[0], p3[2], p4[1]],
p3[2]: [p3[1]],
p4[0]: [p4[1]],
p4[1]: [p3[1], p4[0], p4[2], p5[1]],
p4[2]: [p4[1]],
p5[0]: [p5[1]],
p5[1]: [p4[1], p5[0], p5[2]],
p5[2]: [p5[1]]
}
def bfs(start, goal):
global graph
frontier = Queue.Queue()
frontier.put(start)
came_from = {start: None}
while not frontier.empty():
current = frontier.get()
if current == goal:
break
for nieg in graph[current]:
if nieg not in came_from:
frontier.put(nieg)
came_from[nieg] = current
path = [goal]
parent = came_from.get(goal)
while parent != None:
path.insert(0, parent)
parent = came_from.get(parent)
return path
def userCB(msg):
global graphN, robot_state
msgStart, msgGoal = msg.data.split(',')
rospy.loginfo("from {} -> {}".format(msgStart, msgGoal))
path = bfs(graphN[robot_state], graphN[msgStart])
rospy.loginfo("path: {}".format(path))
goal = PoseStamped()
goal.header.frame_id = "map"
for i, pat in enumerate(path[1:]):
goal.pose.position.x = pat[0]
goal.pose.position.y = pat[1]
goal_point.publish(goal)
dx = pat[0] - path[i][0]
dy = pat[1] - path[i][1]
duration = sqrt(dx**2 + dy**2) / 0.08
rospy.sleep(duration)
servo.publish(Empty())
rospy.sleep(2.0)
path = bfs(graphN[msgStart], graphN[msgGoal])
rospy.loginfo("path: {}".format(path))
for i, pat in enumerate(path[1:]):
goal.pose.position.x = pat[0]
goal.pose.position.y = pat[1]
goal_point.publish(goal)
dx = pat[0] - path[i][0]
dy = pat[1] - path[i][1]
duration = sqrt(dx**2 + dy**2) / 0.08
rospy.sleep(duration)
servo.publish(Empty())
rospy.sleep(2.0)
robot_state = msgGoal
def main():
rospy.init_node("parkself_runner")
rospy.loginfo("Parkself Runner")
rospy.Subscriber("user", String, userCB)
goal = PoseStamped()
goal.header.frame_id = "map"
goal_point.publish(goal)
rospy.spin()
if __name__ == '__main__':
main()
| 24.992424 | 97 | 0.529858 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 202 | 0.061231 |
14239ea5aa010abca8f3a50c93850774794a7cf3 | 5,587 | py | Python | tests/test_session.py | brainsplinter/pylibsrtp | f684f93f3b1013a7ef7dfb6a863c885ae5761854 | [
"BSD-3-Clause"
] | 11 | 2018-03-02T12:21:48.000Z | 2022-01-31T12:58:06.000Z | tests/test_session.py | brainsplinter/pylibsrtp | f684f93f3b1013a7ef7dfb6a863c885ae5761854 | [
"BSD-3-Clause"
] | 18 | 2019-02-26T16:03:19.000Z | 2022-03-06T17:36:42.000Z | tests/test_session.py | brainsplinter/pylibsrtp | f684f93f3b1013a7ef7dfb6a863c885ae5761854 | [
"BSD-3-Clause"
] | 5 | 2019-11-08T08:31:02.000Z | 2022-02-11T15:21:04.000Z | from unittest import TestCase
from pylibsrtp import Error, Policy, Session
RTP = (
b"\x80\x08\x00\x00" # version, packet type, sequence number
b"\x00\x00\x00\x00" # timestamp
b"\x00\x00\x30\x39" # ssrc: 12345
) + (b"\xd4" * 160)
RTCP = (
b"\x80\xc8\x00\x06\xf3\xcb\x20\x01\x83\xab\x03\xa1\xeb\x02\x0b\x3a"
b"\x00\x00\x94\x20\x00\x00\x00\x9e\x00\x00\x9b\x88"
)
# Set key to predetermined value
KEY = (
b"\x00\x01\x02\x03\x04\x05\x06\x07"
b"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
b"\x10\x11\x12\x13\x14\x15\x16\x17"
b"\x18\x19\x1a\x1b\x1c\x1d"
)
class PolicyTest(TestCase):
def test_allow_repeat_tx(self):
policy = Policy()
self.assertEqual(policy.allow_repeat_tx, False)
policy.allow_repeat_tx = True
self.assertEqual(policy.allow_repeat_tx, True)
policy.allow_repeat_tx = False
self.assertEqual(policy.allow_repeat_tx, False)
policy.allow_repeat_tx = 1
self.assertEqual(policy.allow_repeat_tx, True)
policy.allow_repeat_tx = 0
self.assertEqual(policy.allow_repeat_tx, False)
def test_key(self):
policy = Policy()
self.assertEqual(policy.key, None)
policy.key = KEY
self.assertEqual(policy.key, KEY)
policy.key = None
self.assertEqual(policy.key, None)
with self.assertRaises(TypeError) as cm:
policy.key = 1234
self.assertEqual(policy.key, None)
self.assertEqual(str(cm.exception), "key must be bytes")
def test_ssrc_type(self):
policy = Policy()
self.assertEqual(policy.ssrc_type, Policy.SSRC_UNDEFINED)
policy.ssrc_type = Policy.SSRC_ANY_INBOUND
self.assertEqual(policy.ssrc_type, Policy.SSRC_ANY_INBOUND)
def test_ssrc_value(self):
policy = Policy()
self.assertEqual(policy.ssrc_value, 0)
policy.ssrc_value = 12345
self.assertEqual(policy.ssrc_value, 12345)
def test_window_size(self):
policy = Policy()
self.assertEqual(policy.window_size, 0)
policy.window_size = 1024
self.assertEqual(policy.window_size, 1024)
class SessionTest(TestCase):
def test_no_key(self):
policy = Policy(ssrc_type=Policy.SSRC_ANY_OUTBOUND)
with self.assertRaises(Error) as cm:
Session(policy=policy)
self.assertEqual(str(cm.exception), "unsupported parameter")
def test_add_remove_stream(self):
# protect RTP
tx_session = Session(
policy=Policy(key=KEY, ssrc_type=Policy.SSRC_SPECIFIC, ssrc_value=12345)
)
protected = tx_session.protect(RTP)
self.assertEqual(len(protected), 182)
# add stream and unprotect RTP
rx_session = Session()
rx_session.add_stream(
Policy(key=KEY, ssrc_type=Policy.SSRC_SPECIFIC, ssrc_value=12345)
)
unprotected = rx_session.unprotect(protected)
self.assertEqual(len(unprotected), 172)
self.assertEqual(unprotected, RTP)
# remove stream
rx_session.remove_stream(12345)
# try removing stream again
with self.assertRaises(Error) as cm:
rx_session.remove_stream(12345)
self.assertEqual(str(cm.exception), "no appropriate context found")
def test_rtp_any_ssrc(self):
# protect RTP
tx_session = Session(policy=Policy(key=KEY, ssrc_type=Policy.SSRC_ANY_OUTBOUND))
protected = tx_session.protect(RTP)
self.assertEqual(len(protected), 182)
# bad type
with self.assertRaises(TypeError) as cm:
tx_session.protect(4567)
self.assertEqual(str(cm.exception), "packet must be bytes")
# bad length
with self.assertRaises(ValueError) as cm:
tx_session.protect(b"0" * 1500)
self.assertEqual(str(cm.exception), "packet is too long")
# unprotect RTP
rx_session = Session(policy=Policy(key=KEY, ssrc_type=Policy.SSRC_ANY_INBOUND))
unprotected = rx_session.unprotect(protected)
self.assertEqual(len(unprotected), 172)
self.assertEqual(unprotected, RTP)
def test_rtcp_any_ssrc(self):
# protect RCTP
tx_session = Session(policy=Policy(key=KEY, ssrc_type=Policy.SSRC_ANY_OUTBOUND))
protected = tx_session.protect_rtcp(RTCP)
self.assertEqual(len(protected), 42)
# bad type
with self.assertRaises(TypeError) as cm:
tx_session.protect_rtcp(4567)
self.assertEqual(str(cm.exception), "packet must be bytes")
# bad length
with self.assertRaises(ValueError) as cm:
tx_session.protect_rtcp(b"0" * 1500)
self.assertEqual(str(cm.exception), "packet is too long")
# unprotect RTCP
rx_session = Session(policy=Policy(key=KEY, ssrc_type=Policy.SSRC_ANY_INBOUND))
unprotected = rx_session.unprotect_rtcp(protected)
self.assertEqual(len(unprotected), 28)
self.assertEqual(unprotected, RTCP)
def test_rtp_specific_ssrc(self):
# protect RTP
tx_session = Session(
policy=Policy(key=KEY, ssrc_type=Policy.SSRC_SPECIFIC, ssrc_value=12345)
)
protected = tx_session.protect(RTP)
self.assertEqual(len(protected), 182)
# unprotect RTP
rx_session = Session(
policy=Policy(key=KEY, ssrc_type=Policy.SSRC_SPECIFIC, ssrc_value=12345)
)
unprotected = rx_session.unprotect(protected)
self.assertEqual(len(unprotected), 172)
self.assertEqual(unprotected, RTP)
| 32.864706 | 88 | 0.653839 | 5,000 | 0.894935 | 0 | 0 | 0 | 0 | 0 | 0 | 788 | 0.141042 |
1425326030a36f7088c87f42da842ce1d53ade94 | 7,974 | py | Python | tripleo_ansible/ansible_plugins/modules/tripleo_baremetal_expand_roles.py | beagles/tripleo-ansible | 7faddd87cffc8903a9cdedc7a6454cdf44aeed67 | [
"Apache-2.0"
] | 22 | 2018-08-29T12:33:15.000Z | 2022-03-30T00:17:25.000Z | tripleo_ansible/ansible_plugins/modules/tripleo_baremetal_expand_roles.py | beagles/tripleo-ansible | 7faddd87cffc8903a9cdedc7a6454cdf44aeed67 | [
"Apache-2.0"
] | 1 | 2020-02-07T20:54:34.000Z | 2020-02-07T20:54:34.000Z | tripleo_ansible/ansible_plugins/modules/tripleo_baremetal_expand_roles.py | beagles/tripleo-ansible | 7faddd87cffc8903a9cdedc7a6454cdf44aeed67 | [
"Apache-2.0"
] | 19 | 2019-07-16T04:42:00.000Z | 2022-03-30T00:17:29.000Z | #!/usr/bin/python
# Copyright 2020 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
__metaclass__ = type
from ansible.module_utils import baremetal_deploy as bd
from ansible.module_utils.basic import AnsibleModule
import yaml
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: tripleo_baremetal_expand_roles
short_description: Manage baremetal nodes with metalsmith
version_added: "2.9"
author: "Steve Baker (@stevebaker)"
description:
- Takes a baremetal deployment description of roles and node instances
and transforms that into an instance list and a heat environment file
for deployed-server.
options:
stack_name:
description:
- Name of the overcloud stack which will be deployed on these instances
default: overcloud
state:
description:
- Build instance list for the desired provision state, "present" to
provision, "absent" to unprovision, "all" for a combination of
"present" and "absent".
default: present
choices:
- present
- absent
- all
baremetal_deployment:
description:
- Data describing roles and baremetal node instances to provision for
those roles
type: list
elements: dict
suboptions:
name:
description:
- Mandatory role name
type: str
required: True
hostname_format:
description:
- Overrides the default hostname format for this role.
The default format uses the lower case role name.
For example, the default format for the Controller role is
%stackname%-controller-%index%. Only the Compute role does not
follow the role name rule. The Compute default format is
%stackname%-novacompute-%index%
type: str
count:
description:
- Number of instances to create for this role.
type: int
default: 1
defaults:
description:
- A dictionary of default values for instances entry properties.
An instances entry property overrides any defaults that you specify
in the defaults parameter.
type: dict
instances:
description:
- Values that you can use to specify attributes for specific nodes.
The length of this list must not be greater than the value of the
count parameter.
type: list
elements: dict
default_network:
description:
- Default nics entry when none are specified
type: list
suboptions: dict
default:
- network: ctlplane
vif: true
default_image:
description:
- Default image
type: dict
default:
href: overcloud-full
ssh_public_keys:
description:
- SSH public keys to load
type: str
user_name:
description:
- Name of the admin user to create
type: str
'''
RETURN = '''
instances:
description: Expanded list of instances to perform actions on
returned: changed
type: list
sample: [
{
"hostname": "overcloud-controller-0",
"image": {
"href": "overcloud-full"
}
},
{
"hostname": "overcloud-controller-1",
"image": {
"href": "overcloud-full"
}
},
{
"hostname": "overcloud-controller-2",
"image": {
"href": "overcloud-full"
}
},
{
"hostname": "overcloud-novacompute-0",
"image": {
"href": "overcloud-full"
}
},
{
"hostname": "overcloud-novacompute-1",
"image": {
"href": "overcloud-full"
}
},
{
"hostname": "overcloud-novacompute-2",
"image": {
"href": "overcloud-full"
}
}
]
environment:
description: Heat environment data to be used with the overcloud deploy.
This is only a partial environment, further changes are
required once instance changes have been made.
returned: changed
type: dict
sample: {
"parameter_defaults": {
"ComputeDeployedServerCount": 3,
"ComputeDeployedServerHostnameFormat": "%stackname%-novacompute-%index%",
"ControllerDeployedServerCount": 3,
"ControllerDeployedServerHostnameFormat": "%stackname%-controller-%index%",
"HostnameMap": {
"overcloud-controller-0": "overcloud-controller-0",
"overcloud-controller-1": "overcloud-controller-1",
"overcloud-controller-2": "overcloud-controller-2",
"overcloud-novacompute-0": "overcloud-novacompute-0",
"overcloud-novacompute-1": "overcloud-novacompute-1",
"overcloud-novacompute-2": "overcloud-novacompute-2"
}
}
}
''' # noqa
EXAMPLES = '''
- name: Expand roles
tripleo_baremetal_expand_roles:
baremetal_deployment:
- name: Controller
count: 3
defaults:
image:
href: overcloud-full
networks: []
- name: Compute
count: 3
defaults:
image:
href: overcloud-full
networks: []
state: present
stack_name: overcloud
register: tripleo_baremetal_instances
'''
def main():
argument_spec = yaml.safe_load(DOCUMENTATION)['options']
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=False,
)
state = module.params['state']
try:
if state in ('present', 'all'):
present, env, role_net_map, hostname_role_map = bd.expand(
roles=module.params['baremetal_deployment'],
stack_name=module.params['stack_name'],
expand_provisioned=True,
default_image=module.params['default_image'],
default_network=module.params['default_network'],
user_name=module.params['user_name'],
ssh_public_keys=module.params['ssh_public_keys'],
)
if state in ('absent', 'all'):
absent, _, _, _ = bd.expand(
roles=module.params['baremetal_deployment'],
stack_name=module.params['stack_name'],
expand_provisioned=False,
default_image=module.params['default_image'],
)
env = {}
role_net_map = {}
hostname_role_map = {}
if state == 'present':
instances = present
elif state == 'absent':
instances = absent
elif state == 'all':
instances = present + absent
module.exit_json(
changed=True,
msg='Expanded to %d instances' % len(instances),
instances=instances,
environment=env,
role_net_map=role_net_map,
hostname_role_map=hostname_role_map,
)
except Exception as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| 30.787645 | 91 | 0.577878 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,231 | 0.781415 |
1425809d2b5fe657e70492cd714e8e6e266db850 | 2,207 | py | Python | hexa/user_management/migrations/0003_feature_flags.py | qgerome/openhexa-app | 8c9377b2ad972121d8e9575f5d52420212b52ed4 | [
"MIT"
] | 4 | 2021-07-19T12:53:21.000Z | 2022-01-26T17:45:02.000Z | hexa/user_management/migrations/0003_feature_flags.py | qgerome/openhexa-app | 8c9377b2ad972121d8e9575f5d52420212b52ed4 | [
"MIT"
] | 20 | 2021-05-17T12:27:06.000Z | 2022-03-30T11:35:26.000Z | hexa/user_management/migrations/0003_feature_flags.py | qgerome/openhexa-app | 8c9377b2ad972121d8e9575f5d52420212b52ed4 | [
"MIT"
] | 2 | 2021-09-07T04:19:59.000Z | 2022-02-08T15:33:29.000Z | # Generated by Django 3.2.6 on 2021-09-10 11:56
import uuid
import django.db.models.deletion
import django.db.models.fields
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("user_management", "0002_remove_username"),
]
operations = [
migrations.CreateModel(
name="Feature",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
("created_at", models.DateTimeField(auto_now_add=True)),
("updated_at", models.DateTimeField(auto_now=True)),
("code", models.CharField(max_length=200)),
],
options={
"abstract": False,
},
),
migrations.CreateModel(
name="FeatureFlag",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
("created_at", models.DateTimeField(auto_now_add=True)),
("updated_at", models.DateTimeField(auto_now=True)),
("config", models.JSONField(blank=True, null=True)),
(
"feature",
models.ForeignKey(
on_delete=django.db.models.fields.CharField,
to="user_management.feature",
),
),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"abstract": False,
},
),
]
| 30.232877 | 72 | 0.421386 | 2,004 | 0.90802 | 0 | 0 | 0 | 0 | 0 | 0 | 238 | 0.107839 |
14273ef2fcf5b08c4c4fe145580e3eb565476537 | 624 | py | Python | btclib/__init__.py | btclib-org/btclib | d19b45edca068118eb1d509536a1a459660d7306 | [
"MIT"
] | 40 | 2020-04-07T12:10:34.000Z | 2022-03-18T18:10:48.000Z | btclib/__init__.py | btclib-org/btclib | d19b45edca068118eb1d509536a1a459660d7306 | [
"MIT"
] | 30 | 2020-03-20T00:52:37.000Z | 2022-02-10T12:54:30.000Z | btclib/__init__.py | btclib-org/btclib | d19b45edca068118eb1d509536a1a459660d7306 | [
"MIT"
] | 16 | 2020-03-19T15:33:35.000Z | 2022-02-24T21:47:39.000Z | #!/usr/bin/env python3
# Copyright (C) 2020-2021 The btclib developers
#
# This file is part of btclib. It is subject to the license terms in the
# LICENSE file found in the top-level directory of this distribution.
#
# No part of btclib including this file, may be copied, modified, propagated,
# or distributed except according to the terms contained in the LICENSE file.
"__init__ module for the btclib package."
name = "btclib"
__version__ = "2021.1"
__author__ = "The btclib developers"
__author_email__ = "devs@btclib.org"
__copyright__ = "Copyright (C) 2017-2021 The btclib developers"
__license__ = "MIT License"
| 32.842105 | 77 | 0.761218 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 523 | 0.838141 |
1428fc857bf985d08474b17410858d44bd518123 | 1,009 | py | Python | Lettuce_Tests/old_tests/testFeatureCategoricalGridCoverage.py | jread-usgs/pyGDP | 72c8464934fec6643f111bf326b5c12bf47925fb | [
"CC0-1.0"
] | null | null | null | Lettuce_Tests/old_tests/testFeatureCategoricalGridCoverage.py | jread-usgs/pyGDP | 72c8464934fec6643f111bf326b5c12bf47925fb | [
"CC0-1.0"
] | null | null | null | Lettuce_Tests/old_tests/testFeatureCategoricalGridCoverage.py | jread-usgs/pyGDP | 72c8464934fec6643f111bf326b5c12bf47925fb | [
"CC0-1.0"
] | null | null | null | import pyGDP
import os
from nose.tools import assert_equal
from nose.tools import assert_not_equal
class TestFeatureCategoricalGridCoverage(object):
def test_submit_FCGC(self):
pyGDP.WPS_URL='http://cida.usgs.gov/gdp/process/WebProcessingService'
testPyGDP = pyGDP.pyGDPwebProcessing()
shapefile = 'sample:CONUS_states'
attribute = 'STATE'
value = 'Rhode Island'
dataSetURI = 'http://cida.usgs.gov/ArcGIS/services/statsgo_muid/MapServer/WCSServer'
dataType = '1'
outputFile_handle = testPyGDP.submitFeatureCategoricalGridCoverage(shapefile, dataSetURI, dataType, attribute, value, verbose=True)
# This test is not currently working because what comes from
# testPyGDP.submitFeatureCategoricalGridCoverage() is a NoneType
# even through I've verified that it consistently writes a file
# of the size below. I expect a string to come back from this
# function
assert_equal(os.path.getsize(outputFile_handle), 650)
| 38.807692 | 136 | 0.739346 | 907 | 0.89891 | 0 | 0 | 0 | 0 | 0 | 0 | 429 | 0.425173 |
142bca0d242d6c11c1ad4347da2e3a6249ad9ac2 | 16,521 | py | Python | Machine-Learning-in-Production/02-Model-Management/02-Model-Registry.py | databricks-academy/ml-in-production | 1fd6713e18cfc36357f3a98d75fedc8ffbf9eedc | [
"CC0-1.0"
] | 14 | 2021-09-21T19:48:02.000Z | 2022-03-09T19:22:39.000Z | Machine-Learning-in-Production/02-Model-Management/02-Model-Registry.py | databricks-academy/ml-in-production | 1fd6713e18cfc36357f3a98d75fedc8ffbf9eedc | [
"CC0-1.0"
] | null | null | null | Machine-Learning-in-Production/02-Model-Management/02-Model-Registry.py | databricks-academy/ml-in-production | 1fd6713e18cfc36357f3a98d75fedc8ffbf9eedc | [
"CC0-1.0"
] | 5 | 2021-08-22T12:12:49.000Z | 2022-02-28T15:47:43.000Z | # Databricks notebook source
# MAGIC %md-sandbox
# MAGIC
# MAGIC <div style="text-align: center; line-height: 0; padding-top: 9px;">
# MAGIC <img src="https://databricks.com/wp-content/uploads/2018/03/db-academy-rgb-1200px.png" alt="Databricks Learning" style="width: 600px">
# MAGIC </div>
# COMMAND ----------
# MAGIC %md # Model Registry
# MAGIC
# MAGIC MLflow Model Registry is a collaborative hub where teams can share ML models, work together from experimentation to online testing and production, integrate with approval and governance workflows, and monitor ML deployments and their performance. This lesson explores how to manage models using the MLflow model registry.
# MAGIC
# MAGIC ##  In this lesson you:<br>
# MAGIC - Register a model using MLflow
# MAGIC - Deploy that model into production
# MAGIC - Update a model in production to new version including a staging phase for testing
# MAGIC - Archive and delete models
# COMMAND ----------
# MAGIC %run ../Includes/Classroom-Setup
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC ### Model Registry
# MAGIC
# MAGIC The MLflow Model Registry component is a centralized model store, set of APIs, and UI, to collaboratively manage the full lifecycle of an MLflow Model. It provides model lineage (which MLflow Experiment and Run produced the model), model versioning, stage transitions (e.g. from staging to production), annotations (e.g. with comments, tags), and deployment management (e.g. which production jobs have requested a specific model version).
# MAGIC
# MAGIC Model registry has the following features:<br><br>
# MAGIC
# MAGIC * **Central Repository:** Register MLflow models with the MLflow Model Registry. A registered model has a unique name, version, stage, and other metadata.
# MAGIC * **Model Versioning:** Automatically keep track of versions for registered models when updated.
# MAGIC * **Model Stage:** Assigned preset or custom stages to each model version, like “Staging” and “Production” to represent the lifecycle of a model.
# MAGIC * **Model Stage Transitions:** Record new registration events or changes as activities that automatically log users, changes, and additional metadata such as comments.
# MAGIC * **CI/CD Workflow Integration:** Record stage transitions, request, review and approve changes as part of CI/CD pipelines for better control and governance.
# MAGIC
# MAGIC <div><img src="https://files.training.databricks.com/images/eLearning/ML-Part-4/model-registry.png" style="height: 400px; margin: 20px"/></div>
# MAGIC
# MAGIC <img src="https://files.training.databricks.com/images/icon_note_24.png"/> See <a href="https://mlflow.org/docs/latest/registry.html" target="_blank">the MLflow docs</a> for more details on the model registry.
# COMMAND ----------
# MAGIC %md ### Registering a Model
# MAGIC
# MAGIC The following workflow will work with either the UI or in pure Python. This notebook will use pure Python.
# MAGIC
# MAGIC <img src="https://files.training.databricks.com/images/icon_note_24.png"/> Explore the UI throughout this lesson by clicking the "Models" tab on the left-hand side of the screen.
# COMMAND ----------
# MAGIC %md Train a model and log it to MLflow.
# COMMAND ----------
import mlflow
import mlflow.sklearn
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from mlflow.models.signature import infer_signature
df = pd.read_parquet("/dbfs/mnt/training/airbnb/sf-listings/airbnb-cleaned-mlflow.parquet")
X_train, X_test, y_train, y_test = train_test_split(df.drop(["price"], axis=1), df["price"], random_state=42)
n_estimators = 100
max_depth = 5
rf = RandomForestRegressor(n_estimators=n_estimators, max_depth=max_depth)
rf.fit(X_train, y_train)
input_example = X_train.head(3)
signature = infer_signature(X_train, pd.DataFrame(y_train))
with mlflow.start_run(run_name="RF Model") as run:
mlflow.sklearn.log_model(rf, "model", input_example=input_example, signature=signature)
mlflow.log_metric("mse", mean_squared_error(y_test, rf.predict(X_test)))
mlflow.log_param("n_estimators", n_estimators)
mlflow.log_param("max_depth", max_depth)
run_id = run.info.run_id
# COMMAND ----------
# MAGIC %md Create a unique model name so you don't clash with other workspace users.
# COMMAND ----------
import uuid
model_name = f"airbnb_rf_model_{uuid.uuid4().hex[:6]}"
model_name
# COMMAND ----------
# MAGIC %md Register the model.
# COMMAND ----------
model_uri = f"runs:/{run_id}/model"
model_details = mlflow.register_model(model_uri=model_uri, name=model_name)
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC **Open the *Models* tab on the left of the screen to explore the registered model.** Note the following:<br><br>
# MAGIC
# MAGIC * It logged who trained the model and what code was used
# MAGIC * It logged a history of actions taken on this model
# MAGIC * It logged this model as a first version
# MAGIC
# MAGIC <div><img src="https://files.training.databricks.com/images/301/registered_model_new.png" style="height: 600px; margin: 20px"/></div>
# COMMAND ----------
# MAGIC %md Check the status. It will initially be in `PENDING_REGISTRATION` status.
# COMMAND ----------
from mlflow.tracking.client import MlflowClient
client = MlflowClient()
model_version_details = client.get_model_version(name=model_name, version=1)
model_version_details.status
# COMMAND ----------
# MAGIC %md Now add a model description
# COMMAND ----------
client.update_registered_model(
name=model_details.name,
description="This model forecasts Airbnb housing list prices based on various listing inputs."
)
# COMMAND ----------
# MAGIC %md Add a version-specific description.
# COMMAND ----------
client.update_model_version(
name=model_details.name,
version=model_details.version,
description="This model version was built using sklearn."
)
# COMMAND ----------
# MAGIC %md ### Deploying a Model
# MAGIC
# MAGIC The MLflow Model Registry defines several model stages: `None`, `Staging`, `Production`, and `Archived`. Each stage has a unique meaning. For example, `Staging` is meant for model testing, while `Production` is for models that have completed the testing or review processes and have been deployed to applications.
# MAGIC
# MAGIC Users with appropriate permissions can transition models between stages. In private preview, any user can transition a model to any stage. In the near future, administrators in your organization will be able to control these permissions on a per-user and per-model basis.
# MAGIC
# MAGIC If you have permission to transition a model to a particular stage, you can make the transition directly by using the `MlflowClient.update_model_version()` function. If you do not have permission, you can request a stage transition using the REST API; for example: ```%sh curl -i -X POST -H "X-Databricks-Org-Id: <YOUR_ORG_ID>" -H "Authorization: Bearer <YOUR_ACCESS_TOKEN>" https://<YOUR_DATABRICKS_WORKSPACE_URL>/api/2.0/preview/mlflow/transition-requests/create -d '{"comment": "Please move this model into production!", "model_version": {"version": 1, "registered_model": {"name": "power-forecasting-model"}}, "stage": "Production"}'
# MAGIC ```
# COMMAND ----------
# MAGIC %md Now that you've learned about stage transitions, transition the model to the `Production` stage.
# COMMAND ----------
import time
time.sleep(10) # In case the registration is still pending
# COMMAND ----------
client.transition_model_version_stage(
name=model_details.name,
version=model_details.version,
stage="Production"
)
# COMMAND ----------
# MAGIC %md Fetch the model's current status.
# COMMAND ----------
model_version_details = client.get_model_version(
name=model_details.name,
version=model_details.version,
)
print(f"The current model stage is: '{model_version_details.current_stage}'")
# COMMAND ----------
# MAGIC %md Fetch the latest model using a `pyfunc`. Loading the model in this way allows us to use the model regardless of the package that was used to train it.
# MAGIC
# MAGIC <img src="https://files.training.databricks.com/images/icon_note_24.png"/> You can load a specific version of the model too.
# COMMAND ----------
import mlflow.pyfunc
model_version_uri = f"models:/{model_name}/1"
print(f"Loading registered model version from URI: '{model_version_uri}'")
model_version_1 = mlflow.pyfunc.load_model(model_version_uri)
# COMMAND ----------
# MAGIC %md Apply the model.
# COMMAND ----------
model_version_1.predict(X_test)
# COMMAND ----------
# MAGIC %md ### Deploying a New Model Version
# MAGIC
# MAGIC The MLflow Model Registry enables you to create multiple model versions corresponding to a single registered model. By performing stage transitions, you can seamlessly integrate new model versions into your staging or production environments.
# COMMAND ----------
# MAGIC %md Create a new model version and register that model when it's logged.
# COMMAND ----------
n_estimators = 300
max_depth = 10
rf = RandomForestRegressor(n_estimators=n_estimators, max_depth=max_depth)
rf.fit(X_train, y_train)
input_example = X_train.head(3)
signature = infer_signature(X_train, pd.DataFrame(y_train))
with mlflow.start_run(run_name="RF Model") as run:
# Specify the `registered_model_name` parameter of the `mlflow.sklearn.log_model()`
# function to register the model with the MLflow Model Registry. This automatically
# creates a new model version
mlflow.sklearn.log_model(
sk_model=rf,
artifact_path="sklearn-model",
registered_model_name=model_name,
input_example=input_example,
signature=signature
)
mlflow.log_metric("mse", mean_squared_error(y_test, rf.predict(X_test)))
mlflow.log_param("n_estimators", n_estimators)
mlflow.log_param("max_depth", max_depth)
run_id = run.info.run_id
# COMMAND ----------
# MAGIC %md-sandbox Check the UI to see the new model version.
# MAGIC
# MAGIC <div><img src="https://files.training.databricks.com/images/301/model_version_new.png" style="height: 600px; margin: 20px"/></div>
# COMMAND ----------
# MAGIC %md Use the search functionality to grab the latest model version.
# COMMAND ----------
model_version_infos = client.search_model_versions(f"name = '{model_name}'")
new_model_version = max([model_version_info.version for model_version_info in model_version_infos])
print(f"New model version: {new_model_version}")
# COMMAND ----------
# MAGIC %md Add a description to this new version.
# COMMAND ----------
client.update_model_version(
name=model_name,
version=new_model_version,
description="This model version is a random forest containing 300 decision trees and a max depth of 10 that was trained in scikit-learn."
)
# COMMAND ----------
# MAGIC %md Put this new model version into `Staging`
# COMMAND ----------
time.sleep(10) # In case the registration is still pending
client.transition_model_version_stage(
name=model_name,
version=new_model_version,
stage="Staging"
)
# COMMAND ----------
# MAGIC %md Sicne this model is now in staging, you can execute an automated CI/CD pipeline against it to test it before going into production. Once that is completed, you can push that model into production.
# COMMAND ----------
client.transition_model_version_stage(
name=model_name,
version=new_model_version,
stage="Production",
archive_existing_versions=True # Archive old versions of this model
)
# COMMAND ----------
# MAGIC %md ### Deleting
# MAGIC
# MAGIC You can now delete old versions of the model.
# COMMAND ----------
# MAGIC %md Delete version 1.
# MAGIC
# MAGIC <img src="https://files.training.databricks.com/images/icon_note_24.png"/> You cannot delete a model that is not first archived.
# COMMAND ----------
client.delete_model_version(
name=model_name,
version=1
)
# COMMAND ----------
# MAGIC %md Archive version 2 of the model too.
# COMMAND ----------
client.transition_model_version_stage(
name=model_name,
version=2,
stage="Archived"
)
# COMMAND ----------
# MAGIC %md Now delete the entire registered model.
# COMMAND ----------
client.delete_registered_model(model_name)
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC
# MAGIC ## Central Model Registry
# MAGIC
# MAGIC In this lesson, we have explored how to use your workspace's local model registry. Databricks also supports sharing models across multiple workspaces.
# MAGIC
# MAGIC Typically mutliple workspaces are used for different stages of the deployment lifecycle, such as development, staging, and production.
# MAGIC
# MAGIC Having a central model registry allows us to pass artifacts into our production environment. This keeps the production environment as separate as possible from our other environments.
# MAGIC
# MAGIC We recommend the use of a **Central Model Registry** to help with this.
# MAGIC
# MAGIC This architecture uses a separate Databricks workspace for the sole purpose of hosting a model registry. This acts as a swap point for transitioning models. Once a model is ready to be sent to a new stage of deployment, it is pushed to the central model registry. Other environments then pull the artifacts into the workspace dedicated to the next stage of the deployment.
# MAGIC
# MAGIC The diagram below shows how this process works:
# MAGIC
# MAGIC <img src="https://docs.databricks.com/_images/multiworkspace1.png" style="height: 450px">
# MAGIC
# MAGIC This separates environments so that issues in development don't affect production systems. It also plays a critical role in CI/CD infrastructure where models are tested in the staging branch of this central model registry before being promoted to production. See [this documentation for more information.](https://docs.databricks.com/applications/machine-learning/manage-model-lifecycle/multiple-workspaces.html)
# COMMAND ----------
# MAGIC %md
# MAGIC ## Review
# MAGIC **Question:** How does MLflow tracking differ from the model registry?
# MAGIC **Answer:** Tracking is meant for experimentation and development. The model registry is designed to take a model from tracking and put it through staging and into production. This is often the point that a data engineer or a machine learning engineer takes responsibility for the depoloyment process.
# MAGIC
# MAGIC **Question:** Why do I need a model registry?
# MAGIC **Answer:** Just as MLflow tracking provides end-to-end reproducibility for the machine learning training process, a model registry provides reproducibility and governance for the deployment process. Since production systems are mission critical, components can be isolated with ACL's so only specific individuals can alter production models. Version control and CI/CD workflow integration is also a critical dimension of deploying models into production.
# MAGIC
# MAGIC **Question:** What can I do programatically versus using the UI?
# MAGIC **Answer:** Most operations can be done using the UI or in pure Python. A model must be tracked using Python, but from that point on everything can be done either way. For instance, a model logged using the MLflow tracking API can then be registered using the UI and can then be pushed into production.
# COMMAND ----------
# MAGIC %md
# MAGIC ##  Next Steps
# MAGIC
# MAGIC Next we'll take a look at, [Webhooks and Testing]($./03a-Webhooks-and-Testing)
# COMMAND ----------
# MAGIC %md
# MAGIC ## Additional Topics & Resources
# MAGIC
# MAGIC **Q:** Where can I find out more information on MLflow Model Registry?
# MAGIC **A:** Check out <a href="https://mlflow.org/docs/latest/registry.html" target="_blank">the MLflow documentation</a>
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC © 2021 Databricks, Inc. All rights reserved.<br/>
# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the <a href="http://www.apache.org/">Apache Software Foundation</a>.<br/>
# MAGIC <br/>
# MAGIC <a href="https://databricks.com/privacy-policy">Privacy Policy</a> | <a href="https://databricks.com/terms-of-use">Terms of Use</a> | <a href="http://help.databricks.com/">Support</a>
| 40.692118 | 645 | 0.736699 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 13,065 | 0.790429 |
142d5302e65382732b38002b4366ded5bfe975f2 | 7,151 | py | Python | corehq/apps/callcenter/utils.py | johan--/commcare-hq | 86ee99c54f55ee94e4c8f2f6f30fc44e10e69ebd | [
"BSD-3-Clause"
] | null | null | null | corehq/apps/callcenter/utils.py | johan--/commcare-hq | 86ee99c54f55ee94e4c8f2f6f30fc44e10e69ebd | [
"BSD-3-Clause"
] | 1 | 2022-03-12T01:03:25.000Z | 2022-03-12T01:03:25.000Z | corehq/apps/callcenter/utils.py | johan--/commcare-hq | 86ee99c54f55ee94e4c8f2f6f30fc44e10e69ebd | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
from collections import namedtuple
from datetime import datetime, timedelta
import pytz
from casexml.apps.case.dbaccessors import get_open_case_docs_in_domain
from casexml.apps.case.mock import CaseBlock
from casexml.apps.case.xml import V2
import uuid
from xml.etree import ElementTree
from corehq.apps.app_manager.const import USERCASE_TYPE
from corehq.apps.domain.models import Domain
from corehq.apps.es.domains import DomainES
from corehq.apps.es import filters
from corehq.apps.hqcase.utils import submit_case_blocks, get_case_by_domain_hq_user_id
from corehq.feature_previews import CALLCENTER
from corehq.util.quickcache import quickcache
from corehq.util.timezones.conversions import UserTime, ServerTime
from dimagi.utils.couch import CriticalSection
class DomainLite(namedtuple('DomainLite', 'name default_timezone cc_case_type use_fixtures')):
def midnights(self, utcnow=None):
"""Returns a list containing two datetimes in UTC that corresponds to midnight
in the domains timezone on either side of the current UTC datetime.
i.e. [<previous midnight in TZ>, <next midnight in TZ>]
>>> d = DomainLite('', 'Asia/Kolkata', '', True)
>>> d.midnights(datetime(2015, 8, 27, 18, 30, 0 ))
[datetime.datetime(2015, 8, 26, 18, 30), datetime.datetime(2015, 8, 27, 18, 30)]
>>> d.midnights(datetime(2015, 8, 27, 18, 31, 0 ))
[datetime.datetime(2015, 8, 27, 18, 30), datetime.datetime(2015, 8, 28, 18, 30)]
"""
utcnow = utcnow or datetime.utcnow()
tz = pytz.timezone(self.default_timezone)
current_time_tz = ServerTime(utcnow).user_time(tz).done()
midnight_tz1 = current_time_tz.replace(hour=0, minute=0, second=0, microsecond=0)
midnight_tz_utc1 = UserTime(midnight_tz1).server_time().done()
midnight_tz_utc2 = midnight_tz_utc1 + timedelta(days=(1 if midnight_tz_utc1 < utcnow else -1))
return sorted([midnight_tz_utc1, midnight_tz_utc2])
CallCenterCase = namedtuple('CallCenterCase', 'case_id hq_user_id')
def sync_user_case(commcare_user, case_type, owner_id):
"""
Each time a CommCareUser is saved this method gets called and creates or updates
a case associated with the user with the user's details.
This is also called to create user cases when the usercase is used for the
first time.
"""
with CriticalSection(['user_case_%s_for_%s' % (case_type, commcare_user._id)]):
domain = commcare_user.project
def valid_element_name(name):
try:
ElementTree.fromstring('<{}/>'.format(name))
return True
except ElementTree.ParseError:
return False
# remove any keys that aren't valid XML element names
fields = {k: v for k, v in commcare_user.user_data.items() if valid_element_name(k)}
# language or phone_number can be null and will break
# case submission
fields.update({
'name': commcare_user.name or commcare_user.raw_username,
'username': commcare_user.raw_username,
'email': commcare_user.email,
'language': commcare_user.language or '',
'phone_number': commcare_user.phone_number or ''
})
case = get_case_by_domain_hq_user_id(domain.name, commcare_user._id, case_type)
close = commcare_user.to_be_deleted() or not commcare_user.is_active
caseblock = None
if case:
props = dict(case.dynamic_case_properties())
changed = close != case.closed
changed = changed or case.type != case_type
changed = changed or case.name != fields['name']
changed = changed or case.owner_id != owner_id
if not changed:
for field, value in fields.items():
if field != 'name' and props.get(field) != value:
changed = True
break
if changed:
caseblock = CaseBlock(
create=False,
case_id=case._id,
owner_id=owner_id,
case_type=case_type,
close=close,
update=fields
)
else:
fields['hq_user_id'] = commcare_user._id
caseblock = CaseBlock(
create=True,
case_id=uuid.uuid4().hex,
owner_id=owner_id,
user_id=owner_id,
case_type=case_type,
update=fields
)
if caseblock:
casexml = ElementTree.tostring(caseblock.as_xml())
submit_case_blocks(casexml, domain.name)
def sync_call_center_user_case(user):
domain = user.project
if domain and domain.call_center_config.enabled:
owner_id = domain.call_center_config.case_owner_id
if domain.call_center_config.use_user_location_as_owner:
owner_id = user.location_id
sync_user_case(
user,
domain.call_center_config.case_type,
owner_id
)
def sync_usercase(user):
domain = user.project
if domain and domain.usercase_enabled:
sync_user_case(
user,
USERCASE_TYPE,
user.get_id
)
def is_midnight_for_domain(midnight_form_domain, error_margin=15, current_time=None):
current_time = current_time or datetime.utcnow()
diff = current_time - midnight_form_domain
return diff.days >= 0 and diff < timedelta(minutes=error_margin)
def get_call_center_domains():
result = (
DomainES()
.is_active()
.is_snapshot(False)
.filter(filters.term('call_center_config.enabled', True))
.fields(['name', 'default_timezone', 'call_center_config.case_type', 'call_center_config.use_fixtures'])
.run()
)
def to_domain_lite(hit):
return DomainLite(
name=hit['name'],
default_timezone=hit['default_timezone'],
cc_case_type=hit.get('call_center_config.case_type', ''),
use_fixtures=hit.get('call_center_config.use_fixtures', True)
)
return [to_domain_lite(hit) for hit in result.hits]
def get_call_center_cases(domain_name, case_type, user=None):
all_cases = []
if user:
docs = (doc for owner_id in user.get_owner_ids()
for doc in get_open_case_docs_in_domain(domain_name, case_type,
owner_id=owner_id))
else:
docs = get_open_case_docs_in_domain(domain_name, case_type)
for case_doc in docs:
hq_user_id = case_doc.get('hq_user_id', None)
if hq_user_id:
all_cases.append(CallCenterCase(
case_id=case_doc['_id'],
hq_user_id=hq_user_id
))
return all_cases
@quickcache(['domain'])
def get_call_center_case_type_if_enabled(domain):
if CALLCENTER.enabled(domain):
return Domain.get_by_name(domain).call_center_config.case_type
| 37.244792 | 116 | 0.638932 | 1,212 | 0.169487 | 0 | 0 | 179 | 0.025031 | 0 | 0 | 1,391 | 0.194518 |