hexsha
stringlengths
40
40
size
int64
5
2.06M
ext
stringclasses
11 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
251
max_stars_repo_name
stringlengths
4
130
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
251
max_issues_repo_name
stringlengths
4
130
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
116k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
251
max_forks_repo_name
stringlengths
4
130
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
1
1.05M
avg_line_length
float64
1
1.02M
max_line_length
int64
3
1.04M
alphanum_fraction
float64
0
1
f407eb6974ae23f62280d5ff068afc9b35ea9eeb
984
py
Python
cli.py
palazzem/elmo-server
b2e02d600a431dc1db31090f0d8dd09a8d586373
[ "BSD-3-Clause" ]
null
null
null
cli.py
palazzem/elmo-server
b2e02d600a431dc1db31090f0d8dd09a8d586373
[ "BSD-3-Clause" ]
8
2019-05-20T19:26:01.000Z
2019-05-26T13:02:45.000Z
cli.py
palazzem/elmo-server
b2e02d600a431dc1db31090f0d8dd09a8d586373
[ "BSD-3-Clause" ]
null
null
null
import click APP_YAML_TEMPLATE = """runtime: python37 env_variables: ELMO_BASE_URL: '{BASE_URL}' ELMO_VENDOR: '{VENDOR}' handlers: - url: /.* script: auto secure: always redirect_http_response_code: 301 """ if __name__ == "__main__": generate_app_yaml()
25.230769
76
0.704268
f408463fbafd0299afebe10a70bf543c07547fe8
4,541
py
Python
utils/data/dataset_catalog.py
rs9899/Parsing-R-CNN
a0c9ed8850abe740eedf8bfc6e1577cc0aa3fc7b
[ "MIT" ]
289
2018-10-25T09:42:57.000Z
2022-03-30T08:31:50.000Z
utils/data/dataset_catalog.py
qzane/Parsing-R-CNN
8c4d940dcd322bf7a8671f8b0faaabb3259bd384
[ "MIT" ]
28
2019-01-07T02:39:49.000Z
2022-01-25T08:54:36.000Z
utils/data/dataset_catalog.py
qzane/Parsing-R-CNN
8c4d940dcd322bf7a8671f8b0faaabb3259bd384
[ "MIT" ]
44
2018-12-20T07:36:46.000Z
2022-03-16T14:30:20.000Z
import os.path as osp # Root directory of project ROOT_DIR = osp.abspath(osp.join(osp.dirname(__file__), '..', '..')) # Path to data dir _DATA_DIR = osp.abspath(osp.join(ROOT_DIR, 'data')) # Required dataset entry keys _IM_DIR = 'image_directory' _ANN_FN = 'annotation_file' # Available datasets COMMON_DATASETS = { 'coco_2017_train': { _IM_DIR: _DATA_DIR + '/coco/images/train2017', _ANN_FN: _DATA_DIR + '/coco/annotations/instances_train2017.json', }, 'coco_2017_val': { _IM_DIR: _DATA_DIR + '/coco/images/val2017', _ANN_FN: _DATA_DIR + '/coco/annotations/instances_val2017.json', }, 'coco_2017_test': { _IM_DIR: _DATA_DIR + '/coco/images/test2017', _ANN_FN: _DATA_DIR + '/coco/annotations/image_info_test2017.json', }, 'coco_2017_test-dev': { _IM_DIR: _DATA_DIR + '/coco/images/test2017', _ANN_FN: _DATA_DIR + '/coco/annotations/image_info_test-dev2017.json', }, 'keypoints_coco_2017_train': { _IM_DIR: _DATA_DIR + '/coco/images/train2017', _ANN_FN: _DATA_DIR + '/coco/annotations/person_keypoints_train2017.json' }, 'keypoints_coco_2017_val': { _IM_DIR: _DATA_DIR + '/coco/images/val2017', _ANN_FN: _DATA_DIR + '/coco/annotations/person_keypoints_val2017.json' }, 'keypoints_coco_2017_test': { _IM_DIR: _DATA_DIR + '/coco/images/test2017', _ANN_FN: _DATA_DIR + '/coco/annotations/image_info_test2017.json' }, 'keypoints_coco_2017_test-dev': { _IM_DIR: _DATA_DIR + '/coco/images/test2017', _ANN_FN: _DATA_DIR + '/coco/annotations/image_info_test-dev2017.json', }, 'dense_coco_2017_train': { _IM_DIR: _DATA_DIR + '/coco/images/train2017', _ANN_FN: _DATA_DIR + '/coco/annotations/DensePoseData/densepose_coco_train2017.json', }, 'dense_coco_2017_val': { _IM_DIR: _DATA_DIR + '/coco/images/val2017', _ANN_FN: _DATA_DIR + '/coco/annotations/DensePoseData/densepose_coco_val2017.json', }, 'dense_coco_2017_test': { _IM_DIR: _DATA_DIR + '/coco/images/test2017', _ANN_FN: _DATA_DIR + '/coco/annotations/DensePoseData/densepose_coco_test.json', }, 'CIHP_train': { # new addition by wzh _IM_DIR: _DATA_DIR + '/CIHP/train_img', _ANN_FN: _DATA_DIR + '/CIHP/annotations/CIHP_train.json', }, 'CIHP_val': { # new addition by wzh _IM_DIR: _DATA_DIR + '/CIHP/val_img', _ANN_FN: _DATA_DIR + '/CIHP/annotations/CIHP_val.json', }, 'CIHP_test': { # new addition by wzh _IM_DIR: _DATA_DIR + '/CIHP/test_img', _ANN_FN: _DATA_DIR + '/CIHP/annotations/CIHP_test.json', }, 'MHP-v2_train': { # new addition by wzh _IM_DIR: _DATA_DIR + '/MHP-v2/train_img', _ANN_FN: _DATA_DIR + '/MHP-v2/annotations/MHP-v2_train.json', }, 'MHP-v2_val': { # new addition by wzh _IM_DIR: _DATA_DIR + '/MHP-v2/val_img', _ANN_FN: _DATA_DIR + '/MHP-v2/annotations/MHP-v2_val.json', }, 'MHP-v2_test': { # new addition by wzh _IM_DIR: _DATA_DIR + '/MHP-v2/test_img', _ANN_FN: _DATA_DIR + '/MHP-v2/annotations/MHP-v2_test_all.json', }, 'MHP-v2_test_inter_top10': { # new addition by wzh _IM_DIR: _DATA_DIR + '/MHP-v2/test_img', _ANN_FN: _DATA_DIR + '/MHP-v2/annotations/MHP-v2_test_inter_top10.json', }, 'MHP-v2_test_inter_top20': { # new addition by wzh _IM_DIR: _DATA_DIR + '/MHP-v2/test_img', _ANN_FN: _DATA_DIR + '/MHP-v2/annotations/MHP-v2_test_inter_top20.json', }, 'PASCAL-Person-Part_train': { # new addition by soeaver _IM_DIR: _DATA_DIR + '/PASCAL-Person-Part/train_img', _ANN_FN: _DATA_DIR + '/PASCAL-Person-Part/annotations/pascal_person_part_train.json', }, 'PASCAL-Person-Part_test': { # new addition by soeaver _IM_DIR: _DATA_DIR + '/PASCAL-Person-Part/test_img', _ANN_FN: _DATA_DIR + '/PASCAL-Person-Part/annotations/pascal_person_part_test.json', } }
31.978873
88
0.580269
f408a9fe238e011fdbd51d60d3da477f1a193548
1,713
py
Python
prepareDataSet.py
Dakewe-DS1000/LapRSNet
47e630acd3f0523ee5ac698566ff45e645681b23
[ "Apache-2.0" ]
6
2019-11-14T12:12:43.000Z
2021-07-10T13:05:14.000Z
prepareDataSet.py
Dakewe-DS1000/LapRSNet
47e630acd3f0523ee5ac698566ff45e645681b23
[ "Apache-2.0" ]
null
null
null
prepareDataSet.py
Dakewe-DS1000/LapRSNet
47e630acd3f0523ee5ac698566ff45e645681b23
[ "Apache-2.0" ]
1
2021-05-18T06:41:11.000Z
2021-05-18T06:41:11.000Z
# Prepare my dataset for Digital Pathology import os import math import cv2 import pdb rootFolder = "F:\DataBase\LymphnodePathology" trainFolder = rootFolder + "\\trainDataSet" testFolder = rootFolder + "\\testDataSet" srcTrainFilePath = trainFolder + "\\20X\\" dstTrainFilePath = trainFolder + "\\5X\\" srcTestFilePath = testFolder + "\\20X\\" dstTestFilePath = testFolder + "\\5X\\" factor = 4 if __name__ == '__main__': srcTrainFileNameList = os.listdir(srcTrainFilePath) srcTestFileNameList = os.listdir(srcTestFilePath) for srcTrainFileName in srcTrainFileNameList: srcTrainImage = cv2.imread(srcTrainFilePath + srcTrainFileName) imgHeight, imgWidth, _ = srcTrainImage.shape newWidth = int(imgWidth / factor) newHeight = int(imgHeight / factor) newSize = (newWidth, newHeight) dstTrainImage = cv2.resize(srcTrainImage, newSize, interpolation=cv2.INTER_AREA) print("Train File Name : %s, (%d, %d) => (%d, %d)" %(srcTrainFileName, imgWidth, imgHeight, newSize[0], newSize[1])) cv2.imwrite(dstTrainFilePath + srcTrainFileName, dstTrainImage) for srcTestFileName in srcTestFileNameList: srcTestImage = cv2.imread(srcTestFilePath + srcTestFileName) imgHeight, imgWidth, _ = srcTestImage.shape newWidth = int(imgWidth / factor) newHeight = int(imgHeight / factor) newSize = (newWidth, newHeight) dstTestImage = cv2.resize(srcTestImage, newSize, interpolation=cv2.INTER_AREA) print("Test File Name : %s, (%d, %d) => (%d, %d)" %(srcTestFileName, imgWidth, imgHeight, newSize[0], newSize[1])) cv2.imwrite(dstTestFilePath + srcTestFileName, dstTestImage)
34.959184
124
0.694688
f40992ff6f047f5e4c5a436cd251bdd645155f4b
424
py
Python
sample_project/sample_content/serializers.py
zentrumnawi/solid-backend
0a6ac51608d4c713903856bb9b0cbf0068aa472c
[ "MIT" ]
1
2021-01-24T11:54:01.000Z
2021-01-24T11:54:01.000Z
sample_project/sample_content/serializers.py
zentrumnawi/solid-backend
0a6ac51608d4c713903856bb9b0cbf0068aa472c
[ "MIT" ]
112
2020-04-22T10:07:03.000Z
2022-03-29T15:25:26.000Z
sample_project/sample_content/serializers.py
zentrumnawi/solid-backend
0a6ac51608d4c713903856bb9b0cbf0068aa472c
[ "MIT" ]
null
null
null
from rest_framework import serializers from solid_backend.photograph.serializers import PhotographSerializer from solid_backend.media_object.serializers import MediaObjectSerializer from .models import SampleProfile
28.266667
72
0.794811
f40b92b97d4fadcb832680913b2744036577bcf3
782
py
Python
tests/reshape_4/generate_pb.py
wchsieh/utensor_cgen
1774f0dfc0eb98b274271e7a67457dc3593b2593
[ "Apache-2.0" ]
null
null
null
tests/reshape_4/generate_pb.py
wchsieh/utensor_cgen
1774f0dfc0eb98b274271e7a67457dc3593b2593
[ "Apache-2.0" ]
null
null
null
tests/reshape_4/generate_pb.py
wchsieh/utensor_cgen
1774f0dfc0eb98b274271e7a67457dc3593b2593
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf8 -*- import os from utensor_cgen.utils import save_consts, save_graph, save_idx import numpy as np import tensorflow as tf if __name__ == "__main__": generate()
28.962963
64
0.673913
f40be0fa2a141ea92705b94cef65862a1f2be235
2,619
py
Python
junn-predict/junn_predict/common/logging.py
modsim/junn
a40423b98c6a3739dd0b2ba02d546a5db91f9215
[ "BSD-2-Clause" ]
null
null
null
junn-predict/junn_predict/common/logging.py
modsim/junn
a40423b98c6a3739dd0b2ba02d546a5db91f9215
[ "BSD-2-Clause" ]
null
null
null
junn-predict/junn_predict/common/logging.py
modsim/junn
a40423b98c6a3739dd0b2ba02d546a5db91f9215
[ "BSD-2-Clause" ]
null
null
null
"""Logging helpers.""" import logging import sys import colorlog import tqdm def setup_logging(level): """ Set the logging up to the specified level. :param level: Log level :return: None """ name_to_log_level = get_name_to_log_level_dict() if level in name_to_log_level: level = name_to_log_level[level] tqdm_log_handler = TqdmLoggingHandler() log_format = ( "%(asctime)-15s.%(msecs)03d %(process)d %(levelname)s %(name)s %(message)s" ) log_datefmt = '%Y-%m-%d %H:%M:%S' tqdm_log_handler.setFormatter( colorlog.TTYColoredFormatter( fmt='%(log_color)s' + log_format, datefmt=log_datefmt, stream=sys.stdout ) ) buffer = DelayedFileLog() log_handlers = [tqdm_log_handler, buffer] # noinspection PyArgumentList logging.basicConfig( level=level, format=log_format, datefmt=log_datefmt, handlers=log_handlers ) def get_name_to_log_level_dict(): """ Return a dict with a mapping of log levels. :return: The dict """ # noinspection PyProtectedMember name_to_log_level = logging._nameToLevel.copy() return name_to_log_level def get_log_levels(): """ Return supported log levels. :return: List of log levels """ log_levels = [ k for k, v in sorted(get_name_to_log_level_dict().items(), key=lambda ab: ab[1]) ] log_levels.remove('NOTSET') return log_levels
25.930693
88
0.640321
f40cae84710b69af5184821f31d2608460ea3b50
2,284
py
Python
subpartcode/ultrasonic_basic_code.py
LesterYHZ/Automated-Bridge-Inspection-Robot-Project
c3f4e12f9b60a8a6b041bf2b6d0461a1bb39c726
[ "MIT" ]
1
2020-04-15T01:17:06.000Z
2020-04-15T01:17:06.000Z
subpartcode/ultrasonic_basic_code.py
LesterYHZ/Automated-Bridge-Inspection-Robot-Project
c3f4e12f9b60a8a6b041bf2b6d0461a1bb39c726
[ "MIT" ]
null
null
null
subpartcode/ultrasonic_basic_code.py
LesterYHZ/Automated-Bridge-Inspection-Robot-Project
c3f4e12f9b60a8a6b041bf2b6d0461a1bb39c726
[ "MIT" ]
1
2020-04-13T16:45:06.000Z
2020-04-13T16:45:06.000Z
#Basic Ultrasonic sensor (HC-SR04) code import RPi.GPIO as GPIO #GPIO RPI library import time # makes sure Pi waits between steps GPIO.setmode(GPIO.BCM) #sets GPIO pin numbering #GPIO.setmode(GPIO.BOARD) #Remove warnings GPIO.setwarnings(False) #Create loop variable #loop = 1 #BCM TRIG = 23 #output pin - triggers the sensor ECHO = 24 #input pin - reads the return signal from the sensor #BOARD #TRIG=16 #ECHO=18 #Looping not necessary #Print a message to let the user know that distance measurement is in progress print ("Distance Measurement In Progress") #Set two GPIO ports as inputs/outputs GPIO.setup(TRIG,GPIO.OUT) GPIO.setup(ECHO,GPIO.IN) #while loop == 1: #Looping forever while True: #Looping forever #Ensure the trigger pin is set low GPIO.output(TRIG, False) #Give the sensor a second to settle print ("Waiting for Sensor to Settle") #time.sleep(2) time.sleep(1) #Create trigger pulse GPIO.output(TRIG,True) #Set trigger pin high for 10uS time.sleep(0.00001) #Set it low again GPIO.output(TRIG,False) #Record the last low timestamp for ECHO (just before the return signal is received and the pin goes high) while GPIO.input(ECHO)==0: pulse_start = time.time() #Once a signal is received, the value changes from low to high, and the signal will remain high for the duration of the echo pulse while GPIO.input(ECHO)==1: pulse_end = time.time() #speed=distance/time #speed of sound at sea level = 343m/s #34300 = distance/(time/2) #17150 = distance/time #17150*time = distance #Calculating... pulse_duration = pulse_end - pulse_start distance_cm = pulse_duration*17150 #distance_cm = pulse_duration*0.034/2; distance_cm = round(distance_cm,2) distance_inch = distance_cm/2.54 #2.54 cm in 1 inch #distance_inch = pulse_duration*0.0133/2 distance_inch = round(distance_inch,2) distance_feet = distance_inch/12 distance_feet = round(distance_feet,2) #Print distance #print ("Distance:",distance_cm,"cm") #print ("Distance:",distance_inch,"in") print ("Distance:",distance_feet,"ft") #Delay time.sleep(2) #Clean GPIO pins to ensure all inputs/outputs are reset GPIO.cleanup()
26.252874
134
0.700088
f40ced5dbf4e657687527c5b0ff47c429e361805
868
py
Python
Mentorama/Modulo 3 - POO/Retangulo.py
MOURAIGOR/python
b267f8ef277a385e3e315e88a22390512bf1e101
[ "MIT" ]
null
null
null
Mentorama/Modulo 3 - POO/Retangulo.py
MOURAIGOR/python
b267f8ef277a385e3e315e88a22390512bf1e101
[ "MIT" ]
null
null
null
Mentorama/Modulo 3 - POO/Retangulo.py
MOURAIGOR/python
b267f8ef277a385e3e315e88a22390512bf1e101
[ "MIT" ]
null
null
null
# Executando comprimento = int(input('Valor do comprimento: ')) altura = int(input('Valor da altura: ')) retangulo = Retangulo(comprimento, altura) print('A area do retangulo : %d' % retangulo.calculaArea()) print('O perimetro do retangulo : %d' % retangulo.calculaPerimetro())
28
71
0.679724
f40f0e5d0f6c305a62e87232ab24691dc4b36cbe
4,053
py
Python
DEMs/denmark/download_dk_dem.py
PeterFogh/digital_elevation_model_use_cases
0e72cc6238ca5217a73d06dc3e8c3229024112c3
[ "MIT" ]
null
null
null
DEMs/denmark/download_dk_dem.py
PeterFogh/digital_elevation_model_use_cases
0e72cc6238ca5217a73d06dc3e8c3229024112c3
[ "MIT" ]
null
null
null
DEMs/denmark/download_dk_dem.py
PeterFogh/digital_elevation_model_use_cases
0e72cc6238ca5217a73d06dc3e8c3229024112c3
[ "MIT" ]
null
null
null
""" Fetch all files from Kortforsyningen FTP server folder. Copyright (c) 2021 Peter Fogh See also command line alternative in `download_dk_dem.sh` """ from ftplib import FTP, error_perm import os from pathlib import Path import time import operator import functools import shutil # TODO: use logging to std instead of print(time.ctime()) from environs import Env # Functions def download_FTP_tree(ftp, remote_dir, local_dir): """ Download FTP directory and all content to local directory. Inspired by https://stackoverflow.com/a/55127679/7796217. Parameters: ftp : ftplib.FTP Established FTP connection after login. remote_dir : pathlib.Path FTP directory to download. local_dir : pathlib.Path Local directory to store downloaded content. """ # Set up empty local dir and FTP current work dir before tree traversal. shutil.rmtree(local_dir) ftp.cwd(remote_dir.parent.as_posix()) local_dir.mkdir(parents=True, exist_ok=True) return _recursive_download_FTP_tree(ftp, remote_dir, local_dir) def _is_ftp_dir(ftp, name): """ Check if FTP entry is a directory. Modified from here https://www.daniweb.com/programming/software-development/threads/243712/ftplib-isdir-or-isfile to accommodate not necessarily being in the top-level directory. Parameters: ftp : ftplib.FTP Established FTP connection after login. name: str Name of FTP file system entry to check if directory or not. """ try: current_dir = ftp.pwd() ftp.cwd(name) #print(f'File system entry "{name=}" is a directory.') ftp.cwd(current_dir) return True except error_perm as e: #print(f'File system entry "{name=}" is a file.') return False def _recursive_download_FTP_tree(ftp, remote_dir, local_dir): """ Download FTP directory and all content to local directory. Inspired by https://stackoverflow.com/a/55127679/7796217. Parameters: ftp : ftplib.FTP Established FTP connection after login. remote_dir : pathlib.Path FTP directory to download. local_dir : pathlib.Path Local directory to store downloaded content. """ print(f'{remote_dir=}') print(f'{local_dir=}') ftp.cwd(remote_dir.name) local_dir.mkdir(exist_ok=True) print(f'{time.ctime()}: Fetching file & directory names within "{remote_dir}".') dir_entries = ftp.nlst() print(f'{time.ctime()}: Fetched file & directory names within "{remote_dir}".') dirs = [] for filename in sorted(dir_entries)[-5:]: # TODO: remove restriction on downloaded of entries if _is_ftp_dir(ftp, filename): dirs.append(filename) else: local_file = local_dir/filename print(f'{time.ctime()}: Downloading "{local_file}".') ftp.retrbinary( cmd=f'RETR {filename}', callback=local_file.open('wb').write) print(f'{time.ctime()}: Downloaded "{local_file}".') print(f'Traverse dir tree to "{dirs=}"') map_download_FTP_tree = map(lambda dir: _recursive_download_FTP_tree( ftp, remote_dir/dir, local_dir/dir), dirs) return functools.reduce(operator.iand, map_download_FTP_tree, True) if __name__ == '__main__': # Load environment variables from local `.env` file. env = Env() env.read_env() # Set up server and source/destination paths. ftp_host = 'ftp.kortforsyningen.dk' dem_ftp_dir = Path('dhm_danmarks_hoejdemodel/DTM') local_ftp_dir = env.path('LOCAL_FTP_DIR', './') local_dem_ftp_dir = local_ftp_dir/'kortforsyningen'/dem_ftp_dir # Perform FTP download. print(f'{time.ctime()}: Connect to {ftp_host}') ftp = FTP(ftp_host) ftp.login(env('KORTFORSYNING_USERNAME'), env('KORTFORSYNING_PASSWORD')) download_FTP_tree(ftp, dem_ftp_dir, local_dem_ftp_dir) ftp.close() print(f'{time.ctime()}: Finished')
32.166667
117
0.66642
f40ff4bc5a583d0c231681fd8bba22b2aa827939
3,481
py
Python
6_refin_widgets.py
jiaxinjiang2919/Refinance-Calculator
f4bb0c536b88692ef90f504fdb2d9bed85588b7c
[ "Apache-2.0" ]
14
2019-05-01T05:03:20.000Z
2022-01-08T03:18:05.000Z
6_refin_widgets.py
jiaxinjiang2919/Refinance-Calculator
f4bb0c536b88692ef90f504fdb2d9bed85588b7c
[ "Apache-2.0" ]
null
null
null
6_refin_widgets.py
jiaxinjiang2919/Refinance-Calculator
f4bb0c536b88692ef90f504fdb2d9bed85588b7c
[ "Apache-2.0" ]
8
2019-05-19T11:24:28.000Z
2022-02-16T20:19:30.000Z
# -*- coding: utf-8 -*- """ Created on Sun Mar 24 15:02:37 2019 @author: Matt Macarty """ from tkinter import * import numpy as np LoanCalculator()
36.642105
118
0.578857
f4117b390cbdb79866a23c18436a60de53454ed6
19,224
py
Python
ndctl.py
davelarsen58/pmemtool
a7acb0991cbcd683f761d4b108d018d7d2d10aeb
[ "MIT" ]
3
2021-12-17T04:26:30.000Z
2022-03-30T06:32:21.000Z
ndctl.py
davelarsen58/pmemtool
a7acb0991cbcd683f761d4b108d018d7d2d10aeb
[ "MIT" ]
9
2021-12-21T17:14:58.000Z
2022-02-12T00:45:11.000Z
ndctl.py
davelarsen58/pmemtool
a7acb0991cbcd683f761d4b108d018d7d2d10aeb
[ "MIT" ]
1
2022-01-18T23:26:02.000Z
2022-01-18T23:26:02.000Z
#!/usr/bin/python3 # # PMTOOL NDCTL Python Module # Copyright (C) David P Larsen # Released under MIT License import os import json from common import message, get_linenumber, pretty_print from common import V0, V1, V2, V3, V4, V5, D0, D1, D2, D3, D4, D5 import common as c import time DEFAULT_FSTAB_FILE = "/etc/fstab" DEFAULT_NDCTL_FILE = "/tmp/ndctl_list_NDRH.txt" DEBUG = 0 VERBOSE = c.VERBOSE tmp_dir = '/tmp' timers = [] # If working in a test sandbox, change paths # to start with path to sandbox # if not os.getenv('SANDBOX'): SANDBOX = '' else: SANDBOX = os.environ['SANDBOX'] print('Enabling Sandbox at:', SANDBOX) # FSTAB = SANDBOX + '/etc/fstab' DEVDIR = SANDBOX + '/dev' DEV_UUID = DEVDIR + '/disk/by-uuid/' NDCTL_FILE = SANDBOX + "/tmp/ndctl_list_NDRH.txt" ndctl = {} # --------------------------------------------------------------------- def clean_up(): '''clean up all tmp files associated with this mdule''' name = 'clean_up()' tic = time.perf_counter() status = False file_name = '/tmp/ndctl*.txt' status = c.clean_up(file_name) toc = time.perf_counter() delta_t = toc - tic td = {'name': name, "elapsed": delta_t, 'tic': tic, 'toc': toc} timers.append(td) return status def get_nmem_dev_list(node): ''' returns list of nmems['nmem0' 'nmem1' 'nmem2' 'nmem3' 'nmem4' 'nmem5'] ndctl list -D -U 0 { "dev":"nmem2", "id":"8089-a2-1836-00002716", "handle":33, "phys_id":42, "flag_failed_flush":true, "flag_smart_event":true, "security":"disabled" } ''' name = 'get_nmem_dev_list()' tic = time.perf_counter() file_name = '/tmp/ndctl_list_-D_-U_node' + str(node) + '.txt' cmd = "/usr/bin/ndctl list -D -U " + str(node) + " > " + file_name if not os.path.exists(file_name): os.system(cmd) tmp = {} my_list = [] with open(file_name, 'r') as f: tmp = json.load(f) for t in range(len(tmp)): my_list.append(tmp[0]['dev']) toc = time.perf_counter() delta_t = toc - tic td = {'name': name, "elapsed": delta_t, 'tic': tic, 'toc': toc} timers.append(td) return my_list # --------------------------------------------------------------------- def get_region_dev_list(node): ''' returns list of regions devices, ie: "region0" ndctl list -U 0 [ { "dev":"region0", "size":1623497637888, "available_size":0, "max_available_extent":0, "type":"pmem", "iset_id":-7155516910447809332, "persistence_domain":"memory_controller" } ] ''' name = 'get_region_dev_list()' tic = time.perf_counter() file_name = '/tmp/ndctl_list_-R_-U_node' + str(node) + '.txt' cmd = "/usr/bin/ndctl list -R -U " + str(node) + " > " + file_name if not os.path.exists(file_name): os.system(cmd) # tmp = {} with open(file_name, 'r') as f: tmp = json.load(f) my_list = [] for t in range(len(tmp)): my_list.append(tmp[0]['dev']) toc = time.perf_counter() delta_t = toc - tic td = {'name': name, "elapsed": delta_t, 'tic': tic, 'toc': toc} timers.append(td) return my_list # --------------------------------------------------------------------- def get_ns_dev(node): ''' returns list of namespace names, ie: "namespace0.0" ndctl list -U 0 [ { "dev":"namespace0.0", "mode":"fsdax", "map":"dev", "size":1598128390144, "uuid":"115ff8e8-bd52-47b8-a678-9b200902d864", "sector_size":512, "align":2097152, "blockdev":"pmem0" } ] ''' name = 'get_ns_dev()' tic = time.perf_counter() file_name = '/tmp/ndctl_list_-N_-U' + str(node) + '.txt' cmd = "/usr/bin/ndctl list -N -U " + str(node) + " > " + file_name os.system(cmd) # tmp = {} with open(file_name, 'r') as f: tmp = json.load(f) # my_list = [] for t in range(len(tmp)): my_list.append(tmp[0]['dev']) # toc = time.perf_counter() delta_t = toc - tic td = {'name': name, "elapsed": delta_t, 'tic': tic, 'toc': toc} timers.append(td) return my_list # --------------------------------------------------------------------- def get_ns_block_dev(node): ''' returns list of ns blockdevs, ie: "pmem0" ndctl list -U 0 [ { "dev":"namespace0.0", "mode":"fsdax", "map":"dev", "size":1598128390144, "uuid":"115ff8e8-bd52-47b8-a678-9b200902d864", "sector_size":512, "align":2097152, "blockdev":"pmem0" } ] ''' name = 'get_ns_block_dev()' tic = time.perf_counter() file_name = '/tmp/ndctl_list_-N_-U' + str(node) + '.txt' cmd = "/usr/bin/ndctl list -N -U " + str(node) + " > " + file_name os.system(cmd) # tmp = {} with open(file_name, 'r') as f: tmp = json.load(f) # my_list = [] for t in range(len(tmp)): my_list.append(tmp[0]['blockdev']) # toc = time.perf_counter() delta_t = toc - tic td = {'name': name, "elapsed": delta_t, 'tic': tic, 'toc': toc} timers.append(td) return my_list # --------------------------------------------------------------------- def dump(file_name = NDCTL_FILE): """ dump the config to a file to parse """ name = 'dump()' tic = time.perf_counter() # message("Function:", __name__, "File:", file_name ) # if VERBOSE: print(' Querying ndctl data:', file_name, end="...") # ndctl list -NDRH cmd = "/usr/bin/ndctl list -NDRH > " + file_name os.system(cmd) # if VERBOSE: print('Done') def parse(file_name = NDCTL_FILE): """ parse ndctl dump file into dict: ndctl """ name = 'parse()' tic = time.perf_counter() global ndctl # if DEBUG: print("DEBUG: Function:", __name__, "File:", file_name ) # if VERBOSE: print(' Parsing ndctl data:', file_name, end="...") with open(file_name, 'r') as f: ndctl = json.load(f) # if VERBOSE: print('Done') # if DEBUG: print("Debug:", __name__, ":", ndctl) toc = time.perf_counter() delta_t = toc - tic td = {'name': name, "elapsed": delta_t, 'tic': tic, 'toc': toc} timers.append(td) return ndctl # - +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- + # Accessor Functions # def get_region_dimm_list(region): """ returns list of pmem dimms assocaited with pmem region """ name = 'get_region_dimm_list()' tic = time.perf_counter() global ndctl dimm_list = [] # if DEBUG: print("DEBUG: Function:", __name__, "Region:", region ) # if VERBOSE: print(' getting:', __name__, end="...") for r in range(len(ndctl['regions'])): # if this region matches arg, get DIMM mappings if ndctl['regions'][r]['dev'] == region: for d in range(len(ndctl['regions'][r]['mappings'])): if DEBUG: print(' ndctl[regions][r]mappings', ndctl['regions'][r]['mappings'][d]['dimm']) dimm_list.append(ndctl['regions'][r]['mappings'][d]['dimm']) continue # if VERBOSE: print('Done') # if DEBUG: print("Debug:", __name__, region, "DIMMS", dimm_list) toc = time.perf_counter() delta_t = toc - tic td = {'name': name, "elapsed": delta_t, 'tic': tic, 'toc': toc} timers.append(td) return dimm_list def get_region_list(): """ Region List returns list of all pmem regions """ name = 'get_region_list()' tic = time.perf_counter() global ndctl region_list = [] # if DEBUG: print("DEBUG: Function:", __name__ ) # if VERBOSE: print(' getting:', __name__, end="...") for r in range(len(ndctl['regions'])): region_list.append(ndctl['regions'][r]['dev']) # if VERBOSE: print('Done') # if DEBUG: print("Debug:", __name__, ":", region_list) toc = time.perf_counter() delta_t = toc - tic td = {'name': name, "elapsed": delta_t, 'tic': tic, 'toc': toc} timers.append(td) return region_list def get_region_ns_device_list(region): """ Region Namespace Device List returns list of all pmem namespaces names associated w/ pmem region """ name = 'get_region_ns_device_list()' tic = time.perf_counter() ns_list = [] # if DEBUG: print("DEBUG: Function:", __name__, "Region:", region ) # if VERBOSE: print(' getting:', __name__, end="...") for r in range(len(ndctl['regions'])): # if this region matches arg, get DIMM mappings if ndctl['regions'][r]['dev'] == region: for d in range(len(ndctl['regions'][r]['namespaces'])): if DEBUG: print(' ndctl[regions][r]mappings', ndctl['regions'][r]['mappings'][d]['dimm']) ns_list.append(ndctl['regions'][r]['namespaces'][d]['blockdev']) continue # if VERBOSE: print('Done') toc = time.perf_counter() delta_t = toc - tic td = {'name': name, "elapsed": delta_t, 'tic': tic, 'toc': toc} timers.append(td) return ns_list def get_region_ns_name_list(region): """ Region Namespace List returns list of all pmem namespaces names associated w/ pmem region """ name = 'get_region_ns_name_list()' tic = time.perf_counter() ns_list = [] # if DEBUG: print("DEBUG: Function:", __name__, "Region:", region ) # if VERBOSE: print(' getting:', __name__, end="...") for r in range(len(ndctl['regions'])): # if this region matches arg, get DIMM mappings if ndctl['regions'][r]['dev'] == region: for d in range(len(ndctl['regions'][r]['namespaces'])): if DEBUG: print(' ndctl[regions][r]mappings', ndctl['regions'][r]['mappings'][d]['dimm']) ns_list.append(ndctl['regions'][r]['namespaces'][d]['dev']) continue # if VERBOSE: print('Done') toc = time.perf_counter() delta_t = toc - tic td = {'name': name, "elapsed": delta_t, 'tic': tic, 'toc': toc} timers.append(td) return ns_list def get_dimm_status(dimm): """ DIMM List returns status of given dimm """ name = 'get_dimm_status()' tic = time.perf_counter() # dimm_list = [] # if DEBUG: print("DEBUG: Function:", __name__ ) # if VERBOSE: print(' getting:', __name__, end="...") for d in range(len(ndctl['dimms'])): if DEBUG: print(ndctl['dimms'][d]['dev'], ndctl['dimms'][d]['health']['health_state']) if ndctl['dimms'][d]['dev'] == dimm: status = ndctl['dimms'][d]['health']['health_state'] break # if VERBOSE: print('Done') # if DEBUG: print("Debug:", __name__, ":", dimmList) toc = time.perf_counter() delta_t = toc - tic td = {'name': name, "elapsed": delta_t, 'tic': tic, 'toc': toc} timers.append(td) return status def get_dimm_list(): """ DIMM List returns list of all pmem devices in system """ name = 'get_dimm_list()' tic = time.perf_counter() dimm_list = [] # if DEBUG: print("DEBUG: Function:", __name__ ) # if VERBOSE: print(' getting:', __name__, end="...") for d in range(len(ndctl['dimms'])): dimm_list.append(ndctl['dimms'][d]['dev']) # if VERBOSE: print('Done') # if DEBUG: print("Debug:", __name__, ":", dimmList) toc = time.perf_counter() delta_t = toc - tic td = {'name': name, "elapsed": delta_t, 'tic': tic, 'toc': toc} timers.append(td) return dimm_list def get_region_by_dimm(dimm): """ Get Region by DIMM returns region associated with PMEM device """ name = 'get_region_by_dimm()' tic = time.perf_counter() region = "regionX" # if DEBUG: print("DEBUG: Function:", __name__ ) # if VERBOSE: print(' getting:', __name__, end="...") # loop through regions, get dimmList for each, check if match for r in range(len(ndctl['regions'])): region = ndctl['regions'][r]['dev'] dimmList = get_region_dimm_list(region) # print("get_region_by_dimm.r", r, region, dimmList ) if dimm in dimmList: break # if VERBOSE: print('Done') # if DEBUG: print("Debug:", __name__, ":", region) toc = time.perf_counter() delta_t = toc - tic td = {'name': name, "elapsed": delta_t, 'tic': tic, 'toc': toc} timers.append(td) return region def get_ns_name_list_by_dimm(dimm): """ Get PMEM Namespace name by DIMM returns list of pmem namespaces associated with name """ name = 'get_ns_name_list_by_dimm()' tic = time.perf_counter() nsNameList = [] # if DEBUG: print("DEBUG: Function:", __name__ ) # if VERBOSE: print(' getting:', __name__, end="...") # loop through regions, get dimmList for each, check if match for r in range(len(ndctl['regions'])): region = ndctl['regions'][r]['dev'] dimmList = get_region_dimm_list(region) # we should have a region to lookup namespaces nsNameList = get_region_ns_name_list(region) if dimm in dimmList: break # if VERBOSE: print('Done') # if DEBUG: print("Debug:", __name__, ":", nsNameList) toc = time.perf_counter() delta_t = toc - tic td = {'name': name, "elapsed": delta_t, 'tic': tic, 'toc': toc} timers.append(td) return nsNameList def get_ns_device_list_by_dimm(dimm): """ Get Namespace Devices by DIMM returns pmem namespace device for given DIMM """ name = 'get_ns_device_list_by_dimm()' tic = time.perf_counter() ns_device_list = [] dimm_list = [] # if DEBUG: print("DEBUG: Function:", __name__ ) # if VERBOSE: print(' getting:', __name__, end="...") # loop through regions, get dimmList for each, check if match for r in range(len(ndctl['regions'])): region = ndctl['regions'][r]['dev'] dimm_list = get_region_dimm_list(region) # we should have a region to lookup namespaces ns_device_list = get_region_ns_device_list(region) if dimm in dimm_list: break # if VERBOSE: print('Done') # if DEBUG: print("Debug:", __name__, ":", ns_device_list) toc = time.perf_counter() delta_t = toc - tic td = {'name': name, "elapsed": delta_t, 'tic': tic, 'toc': toc} timers.append(td) return ns_device_list def print_timers(t = timers): ''' ------------ndctl function timers--------------------- Function Elapsed Start End -------------------- --------- ----------- ------------ show_socket() 0.5140 941291.4208 941291.9348 parse_socket() 0.0004 941291.9348 941291.9352 show_dimm() 2.0074 941291.9352 941293.9426 parse_dimm() 0.0068 941293.9426 941293.9494 show_region() 3.8237 941293.9494 941297.7731 parse_region() 0.0006 941297.7732 941297.7737 show_dimm() 2.5911 941297.7781 941300.3692 parse_dimm() 0.0051 941300.3692 941300.3743 get_dimms() 2.5962 941297.7781 941300.3744 list_dimms() 0.0004 941300.3744 941300.3748 ''' print('------------Start ndctl function timers---------------') print('%30s %8s %11s %11s' % ('Function', 'Elapsed', 'Start', 'End') ) print('%30s %8s %11s %11s' % ('------------------------------', '---------', '-----------', '------------') ) first = t[0]['tic'] last = t[len(t) -1]['toc'] for i in t: print('%30s %9.4f %11.4f %11.4f' % (i['name'], i['elapsed'], i['tic'], i['toc']) ) print('%30s %9.4f %11.4f %11.4f' % ('NDCTL Overall', last - first, first, last) ) print() print('------------End ndctl function timers-----------------') if __name__ == "__main__": main()
27.700288
120
0.5567
f4125249bade8003f4245ff65531e572d79b0160
653
py
Python
tb/sources/__init__.py
DronMDF/manabot
b412e8cb9b5247f05487bed4cbf4967f7b58327f
[ "MIT" ]
1
2017-11-29T11:51:12.000Z
2017-11-29T11:51:12.000Z
tb/sources/__init__.py
DronMDF/manabot
b412e8cb9b5247f05487bed4cbf4967f7b58327f
[ "MIT" ]
109
2017-11-28T20:51:59.000Z
2018-02-02T13:15:29.000Z
tb/sources/__init__.py
DronMDF/manabot
b412e8cb9b5247f05487bed4cbf4967f7b58327f
[ "MIT" ]
null
null
null
from .admin import ReviewListAdmin, SoAdminReviewIsOut, SoReviewForAdmin from .admin_commands import ( AdminCommands, AdminFilteredCommands, ReviewListByCommands, SoIgnoreReview, SoSubmitReview ) from .gerrit import ReviewOnServer, SoNewReview, SoOutReview, SoUpdateReview from .reaction import ( ReactionAlways, ReactionChoiced, ReactionRestrict, ReactionReview ) from .review_list import ( ReviewDifference, ReviewForUpdate, ReviewIgnored, ReviewIsNeed, ReviewOne, ReviewUnderControl, ReviewVerified ) from .telegram import ( SoNoTelegramTimeout, SoTelegram, TelegramBot, TelegramOffsetFromDb ) from .utility import SoJoin, SoSafe
20.40625
76
0.82389
f412b42dfc85a5a206a8dd5d9f02a0078c055cdd
60,615
py
Python
sdk/python/pulumi_gcp/accesscontextmanager/service_perimeter.py
sisisin/pulumi-gcp
af6681d70ea457843409110c1324817fe55f68ad
[ "ECL-2.0", "Apache-2.0" ]
121
2018-06-18T19:16:42.000Z
2022-03-31T06:06:48.000Z
sdk/python/pulumi_gcp/accesscontextmanager/service_perimeter.py
sisisin/pulumi-gcp
af6681d70ea457843409110c1324817fe55f68ad
[ "ECL-2.0", "Apache-2.0" ]
492
2018-06-22T19:41:03.000Z
2022-03-31T15:33:53.000Z
sdk/python/pulumi_gcp/accesscontextmanager/service_perimeter.py
sisisin/pulumi-gcp
af6681d70ea457843409110c1324817fe55f68ad
[ "ECL-2.0", "Apache-2.0" ]
43
2018-06-19T01:43:13.000Z
2022-03-23T22:43:37.000Z
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities from . import outputs from ._inputs import * __all__ = ['ServicePerimeterArgs', 'ServicePerimeter'] class ServicePerimeter(pulumi.CustomResource): def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(ServicePerimeterArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, description: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, parent: Optional[pulumi.Input[str]] = None, perimeter_type: Optional[pulumi.Input[str]] = None, spec: Optional[pulumi.Input[pulumi.InputType['ServicePerimeterSpecArgs']]] = None, status: Optional[pulumi.Input[pulumi.InputType['ServicePerimeterStatusArgs']]] = None, title: Optional[pulumi.Input[str]] = None, use_explicit_dry_run_spec: Optional[pulumi.Input[bool]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = ServicePerimeterArgs.__new__(ServicePerimeterArgs) __props__.__dict__["description"] = description __props__.__dict__["name"] = name if parent is None and not opts.urn: raise TypeError("Missing required property 'parent'") __props__.__dict__["parent"] = parent __props__.__dict__["perimeter_type"] = perimeter_type __props__.__dict__["spec"] = spec __props__.__dict__["status"] = status if title is None and not opts.urn: raise TypeError("Missing required property 'title'") __props__.__dict__["title"] = title __props__.__dict__["use_explicit_dry_run_spec"] = use_explicit_dry_run_spec __props__.__dict__["create_time"] = None __props__.__dict__["update_time"] = None super(ServicePerimeter, __self__).__init__( 'gcp:accesscontextmanager/servicePerimeter:ServicePerimeter', resource_name, __props__, opts)
52.480519
147
0.640502
f41456d2af09359f55da03d5a94e013a18221147
3,935
py
Python
core/swift3.1.1Action/swift3runner.py
ianpartridge/incubator-openwhisk-runtime-swift
5aacba1435f46b13cbb0a70874afb4b53c1a78bc
[ "Apache-2.0" ]
2
2017-08-18T23:02:29.000Z
2018-01-20T22:44:33.000Z
core/swift3.1.1Action/swift3runner.py
ianpartridge/incubator-openwhisk-runtime-swift
5aacba1435f46b13cbb0a70874afb4b53c1a78bc
[ "Apache-2.0" ]
4
2017-02-03T17:01:33.000Z
2017-03-27T01:29:56.000Z
core/swift3.1.1Action/swift3runner.py
ianpartridge/incubator-openwhisk-runtime-swift
5aacba1435f46b13cbb0a70874afb4b53c1a78bc
[ "Apache-2.0" ]
4
2019-10-08T13:43:47.000Z
2021-11-10T15:36:35.000Z
"""Python proxy to run Swift action. /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ """ import os import glob import sys import subprocess import codecs import json sys.path.append('../actionProxy') from actionproxy import ActionRunner, main, setRunner # noqa SRC_EPILOGUE_FILE = '/swift3Action/epilogue.swift' DEST_SCRIPT_FILE = '/swift3Action/spm-build/main.swift' DEST_SCRIPT_DIR = '/swift3Action/spm-build' DEST_BIN_FILE = '/swift3Action/spm-build/.build/release/Action' BUILD_PROCESS = ['./swiftbuildandlink.sh'] if __name__ == '__main__': setRunner(Swift3Runner()) main()
34.217391
104
0.636595
f4166388f315b81cfe6df485234fcfe561b8ac22
251
py
Python
src/ychaos/utils/types.py
vanderh0ff/ychaos
5148c889912b744ee73907e4dd30c9ddb851aeb3
[ "Apache-2.0" ]
8
2021-07-21T15:37:48.000Z
2022-03-03T14:43:09.000Z
src/ychaos/utils/types.py
vanderh0ff/ychaos
5148c889912b744ee73907e4dd30c9ddb851aeb3
[ "Apache-2.0" ]
102
2021-07-20T16:08:29.000Z
2022-03-25T07:28:37.000Z
src/ychaos/utils/types.py
vanderh0ff/ychaos
5148c889912b744ee73907e4dd30c9ddb851aeb3
[ "Apache-2.0" ]
8
2021-07-20T13:37:46.000Z
2022-02-18T01:44:52.000Z
from typing import Dict, List, TypeVar, Union JsonTypeVar = TypeVar("JsonTypeVar") JsonPrimitive = Union[str, float, int, bool, None] JsonDict = Dict[str, JsonTypeVar] JsonArray = List[JsonTypeVar] Json = Union[JsonPrimitive, JsonDict, JsonArray]
22.818182
50
0.760956
f416d0a9f34ba173050cd0b0ffffe6b5fee17622
6,006
py
Python
yandex_market_language/models/promo.py
stefanitsky/yandex_market_language
e17595b556fc55e183cf366227b2739c5c6178dc
[ "MIT" ]
7
2020-03-28T22:35:52.000Z
2021-09-16T10:50:10.000Z
yandex_market_language/models/promo.py
stefanitsky/yandex_market_language
e17595b556fc55e183cf366227b2739c5c6178dc
[ "MIT" ]
192
2020-03-29T12:38:53.000Z
2021-09-01T14:12:07.000Z
yandex_market_language/models/promo.py
stefanitsky/yandex_market_language
e17595b556fc55e183cf366227b2739c5c6178dc
[ "MIT" ]
6
2020-06-05T09:07:02.000Z
2021-11-28T14:37:58.000Z
import typing as t from yandex_market_language import models from yandex_market_language.models.abstract import XMLElement, XMLSubElement
27.934884
78
0.580087
f4173149ff496f494a4326e1f0ac4dc7014b0225
3,834
py
Python
src/testCmd.py
skogsbaer/check-assignments
cda8208c10644eecfe0bb988bee61098485aa6c4
[ "BSD-3-Clause" ]
null
null
null
src/testCmd.py
skogsbaer/check-assignments
cda8208c10644eecfe0bb988bee61098485aa6c4
[ "BSD-3-Clause" ]
null
null
null
src/testCmd.py
skogsbaer/check-assignments
cda8208c10644eecfe0bb988bee61098485aa6c4
[ "BSD-3-Clause" ]
1
2021-03-26T14:00:14.000Z
2021-03-26T14:00:14.000Z
import shell from dataclasses import dataclass from utils import * from ownLogging import * from typing import * from ansi import * import re import os import testHaskell import testPython import testJava INSPECT_COMMAND = 'inspect' RERUN_COMMAND = 'rerun' CONTINUE_COMMAND = 'continue' HELP_COMMAND = 'help' TEST_DICT = { 'python': testPython.runPythonTests, 'java': testJava.runJavaTests, 'haskell': testHaskell.runHaskellTests }
30.919355
103
0.594158
f4189a148892e47a3efe2ef760b39a4a07630dfd
14,098
py
Python
kipoi_containers/singularityhelper.py
kipoi/kipoi-containers
5978cf1563dcc1072170f28a0a956cc28aa3c406
[ "MIT" ]
null
null
null
kipoi_containers/singularityhelper.py
kipoi/kipoi-containers
5978cf1563dcc1072170f28a0a956cc28aa3c406
[ "MIT" ]
11
2021-11-30T19:30:50.000Z
2022-03-29T17:06:15.000Z
kipoi_containers/singularityhelper.py
kipoi/kipoi-containers
5978cf1563dcc1072170f28a0a956cc28aa3c406
[ "MIT" ]
null
null
null
from collections import Counter from datetime import datetime import os import requests from subprocess import Popen, PIPE from pathlib import Path import json from typing import Dict, Union, TYPE_CHECKING from kipoi_utils.external.torchvision.dataset_utils import download_url if TYPE_CHECKING: import zenodoclient ZENODO_BASE = "https://zenodo.org" ZENODO_DEPOSITION = f"{ZENODO_BASE}/api/deposit/depositions" PathType = Union[str, Path] def cleanup(singularity_file_path: PathType) -> None: """ Deletes the singularity image that was created by build_singularity_image """ if isinstance(singularity_file_path, str): singularity_file_path = Path(singularity_file_path) if singularity_file_path.exists(): singularity_file_path.unlink() def build_singularity_image( name_of_docker_image: str, singularity_image_name: str, singularity_image_folder: PathType, ) -> PathType: """ This function builds a singularity image from a dockerhub image using singularity pull. The resulting .sif is stored in <singularity_image_folder> and the filepath is returned. """ if isinstance(singularity_image_folder, Path): singularity_image_folder = str(singularity_image_folder) pull_cmd = [ "singularity", "pull", "--name", f"{singularity_image_folder}/{singularity_image_name}", "--force", f"docker://{name_of_docker_image}", ] print(f"Building {singularity_image_name} - {' '.join(pull_cmd)}") process = Popen(pull_cmd, stdout=PIPE, stderr=PIPE) stdout, stderr = process.communicate() if process.returncode != 0: print(stderr) print(stdout) raise ValueError( f"Singularity image {singularity_image_name} can not be built" ) singularity_image_path = ( f"{singularity_image_folder}/{singularity_image_name}" ) return singularity_image_path def test_singularity_image( singularity_image_folder: PathType, singularity_image_name: str, model: str ) -> None: """Tests a singularity image residing in singularity_image_folder with kipoi test <model> --source=kipoi Raises: ValueError: Raise valueerror if the test is not successful""" print( f"Testing {model} with {singularity_image_folder}/{singularity_image_name}" ) if model == "Basenji": test_cmd = [ "kipoi", "test", f"{model}", "--source=kipoi", "--batch_size=2", ] else: test_cmd = ["kipoi", "test", f"{model}", "--source=kipoi"] if isinstance(singularity_image_folder, str): singularity_image_folder = Path(singularity_image_folder) if isinstance(singularity_image_name, str): singularity_image_name = Path(singularity_image_name) exec_cmd = [ "singularity", "exec", f"{singularity_image_folder}/{singularity_image_name}", ] exec_cmd.extend(test_cmd) process = Popen(exec_cmd, stdout=PIPE, stderr=PIPE) stdout, stderr = process.communicate() if process.returncode != 0: print(stdout) print(stderr) raise ValueError( f"Singularity image {singularity_image_name} for {model} did not pass relevant tests" ) def create_new_deposition( zenodo_client: "zenodoclient.Client", deposition_id: str ) -> str: """Creates a new version of an existing depsosition on zenodo and returns the corresponding id""" status_code, response = zenodo_client.post_content( f"{ZENODO_DEPOSITION}/{deposition_id}/actions/newversion" ) return response["links"]["latest_draft"].split("/")[-1] def get_deposit( zenodo_client: "zenodoclient.Client", deposition_id: str ) -> Dict: """Returns the response body of a get request for an existing deposition""" response = zenodo_client.get_content( f"{ZENODO_DEPOSITION}/{deposition_id}" ) return response def upload_file( zenodo_client: "zenodoclient.Client", url: str, singularity_image_folder: PathType, filename: str, ) -> None: """Upload singularity_image_folder/filename to a url""" path = Path(singularity_image_folder) / Path(filename) zenodo_client.put_content(url, data=path) def upload_metadata( zenodo_client: "zenodoclient.Client", url: str, model_group: str = "", shared_env: str = "", ) -> None: """Upload metadata for a model group to a given url""" if not model_group and not shared_env: raise ValueError( "You need to provide atlease a shared env name or a model group name" ) if model_group: data = { "metadata": { "title": f"{model_group} singularity container", "upload_type": "physicalobject", "description": "This is a singularity container for models " f"under https://kipoi.org/models/{model_group}/", "creators": [ {"name": "Haimasree, Bhattacharya", "affiliation": "EMBL"} ], "publication_date": datetime.today().strftime("%Y-%m-%d"), "license": "MIT", } } elif shared_env: if "shared" in shared_env: data = { "metadata": { "title": f"{shared_env} singularity container", "upload_type": "physicalobject", "description": "Singularity container with conda environment " f"https://github.com/kipoi/kipoi-containers/blob/main/envfiles/{shared_env}.yml", "creators": [ { "name": "Haimasree, Bhattacharya", "affiliation": "EMBL", } ], "publication_date": datetime.today().strftime("%Y-%m-%d"), "license": "MIT", } } elif shared_env == "mmsplice": data = { "metadata": { "title": "MMSplice singularity container except mtsplice", "upload_type": "physicalobject", "description": "Singularity container for MMSplice models except mtsplice " "under http://kipoi.org/models/MMSplice/", "creators": [ { "name": "Haimasree, Bhattacharya", "affiliation": "EMBL", } ], "publication_date": datetime.today().strftime("%Y-%m-%d"), "license": "MIT", } } else: raise ValueError( "Available options are - mmsplice, sharedpy3keras2tf1, sharedpy3keras2tf2, sharedpy3keras1.2" ) zenodo_client.put_content(url, data=data) def push_deposition( zenodo_client: "zenodoclient.Client", deposition_id: str ) -> Dict: """Pushes a deposition to zenodo. An additional get request is made to the newy pushed deposition and a response body is returned""" status_code, response = zenodo_client.post_content( f"{ZENODO_DEPOSITION}/{deposition_id}/actions/publish" ) response = get_deposit(zenodo_client, deposition_id) return response def update_existing_singularity_container( zenodo_client: "zenodoclient.Client", singularity_dict: Dict, singularity_image_folder: PathType, model_group: str, file_to_upload: str = "", push: bool = True, ) -> None: """This function creates a new draft version of an existing image's zenodo entry with updated metadata and file after deleting the old file. If push is True, the draft version is finalized and the url, name and md5 fields are updated and the new deposition id and file id is added to singularity dict which contains information about the existing image. Otherwise, only the new deposotion id and file id is added to the dictionary. This modified dictionary is returned""" # Create a new version of an existing deposition deposition_id = singularity_dict["url"].split("/")[4] new_deposition_id = create_new_deposition(zenodo_client, deposition_id) response = get_deposit(zenodo_client, new_deposition_id) bucket_url = response["links"]["bucket"] filename = ( file_to_upload if file_to_upload else f"{singularity_dict['name']}.sif" ) file_id = "" for fileobj in response["files"]: if fileobj["filename"] == filename: file_id = fileobj["id"] # Assuming only 1 version is added # Delete existing file from this new version if file_id: zenodo_client.delete_content( f"{ZENODO_DEPOSITION}/{new_deposition_id}/files/{file_id}" ) # Add a new file to this new version upload_file( zenodo_client, f"{bucket_url}/{filename}", singularity_image_folder, filename, ) url = f"{ZENODO_DEPOSITION}/{new_deposition_id}" if ( "shared" in singularity_dict["name"] or singularity_dict["name"] == "kipoi-docker_mmsplice-slim" ): shared_env_name = ( singularity_dict["name"] .replace("kipoi-docker_", "") .replace("-slim", "") ) upload_metadata(zenodo_client, url, shared_env=shared_env_name) else: upload_metadata(zenodo_client, url, model_group=model_group) # publish the newly created revision if push: response = push_deposition(zenodo_client, new_deposition_id) record_id = response["metadata"]["prereserve_doi"]["recid"] file_id, file_name, file_md5 = "", "", "" for fileobj in response["files"]: if fileobj["filename"] == filename: file_id = fileobj["id"] # Assuming only 1 version is added file_name = fileobj["filename"].replace(".sif", "") file_md5 = fileobj["checksum"] return { "new_deposition_id": new_deposition_id, "file_id": file_id, "url": f"{ZENODO_BASE}/record/{record_id}/files/{filename}?download=1", "name": file_name, "md5": file_md5, } else: return singularity_dict | { "new_deposition_id": new_deposition_id, "file_id": "", } def push_new_singularity_image( zenodo_client: "zenodoclient.Client", singularity_image_folder: PathType, singularity_dict: Dict, model_group: str, file_to_upload: str = "", path: str = "", push: bool = True, ) -> None: """This function creates a draft version of a new zenodo entry with the metadata and singularity image. If push is True, the draft version is finalized and the url, name and md5 fields are updated and the new deposition id and file id is added to singularity dict which contains empty strings as url and md5. Otherwise, only the new deposotion id and file id is added to the dictionary. This modified dictionary is returned""" status_code, response = zenodo_client.post_content(f"{ZENODO_DEPOSITION}") deposition_id = response["id"] bucket_url = response["links"]["bucket"] filename = ( file_to_upload if file_to_upload else f"{singularity_dict['name']}.sif" ) upload_file( zenodo_client, f"{bucket_url}/{filename}", singularity_image_folder, filename, ) url = f"{ZENODO_DEPOSITION}/{deposition_id}" if "shared" in singularity_dict["name"]: shared_env_name = ( singularity_dict["name"] .replace("kipoi-docker_", "") .replace("-slim", "") ) upload_metadata(zenodo_client, url, shared_env=shared_env_name) else: upload_metadata(zenodo_client, url, model_group=model_group) if push: push_deposition(zenodo_client, deposition_id) response = get_deposit(zenodo_client, deposition_id) record_id = response["metadata"]["prereserve_doi"]["recid"] return { "new_deposition_id": deposition_id, "file_id": response["files"][0]["id"], "url": f"{ZENODO_BASE}/record/{record_id}/files/{filename}?download=1", "name": response["files"][0]["filename"].replace(".sif", ""), "md5": response["files"][0]["checksum"], } else: return singularity_dict | { "new_deposition_id": deposition_id, "file_id": "", } def get_singularity_image( singularity_image_folder: PathType, singularity_image_dict: Dict, model_or_model_group: str, ) -> PathType: """This function downloads the singularity image corresponding to the given model or model group from zenodo to singularity_image_folder and returns the name of the image""" if ( model_or_model_group in singularity_image_dict ): # Special case for MMSPlice/mtsplice, APARENT/veff image_name = ( f"{singularity_image_dict[model_or_model_group]['name']}.sif" ) image_url = f"{singularity_image_dict[model_or_model_group]['url']}" image_md5 = f"{singularity_image_dict[model_or_model_group]['md5']}" else: model_group = model_or_model_group.split("/")[0] image_name = f"{singularity_image_dict[model_group]['name']}.sif" image_url = f"{singularity_image_dict[model_group]['url']}" image_md5 = f"{singularity_image_dict[model_group]['md5']}" if isinstance(singularity_image_folder, str): singularity_image_folder = Path(singularity_image_folder) if isinstance(image_name, str): image_name = Path(image_name) if not (singularity_image_folder / image_name).exists(): download_url( url=image_url, root=singularity_image_folder, filename=image_name, md5=image_md5, ) return image_name
36.148718
109
0.62633
f418aa86180868641545c7ca6a350482c74458ed
1,152
py
Python
policy/_cache.py
garenchan/policy
fbd056c0474e62252d1fe986fe029cacde6845d8
[ "Apache-2.0" ]
5
2018-10-17T21:06:07.000Z
2021-12-31T01:33:09.000Z
policy/_cache.py
garenchan/policy
fbd056c0474e62252d1fe986fe029cacde6845d8
[ "Apache-2.0" ]
1
2018-09-07T09:00:41.000Z
2018-09-07T11:06:14.000Z
policy/_cache.py
garenchan/policy
fbd056c0474e62252d1fe986fe029cacde6845d8
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- """ policy._cache ~~~~~~~~~~~~~~~ Cache for policy file. """ import os import logging LOG = logging.getLogger(__name__) # Global file cache CACHE = {} def read_file(filename: str, force_reload=False): """Read a file if it has been modified. :param filename: File name which want to be read from. :param force_reload: Whether to reload the file. :returns: A tuple with a boolean specifying if the data is fresh or not. """ if force_reload: _delete_cached_file(filename) reloaded = False mtime = os.path.getmtime(filename) cache_info = CACHE.setdefault(filename, {}) if not cache_info or mtime > cache_info.get('mtime', 0): LOG.debug('Reloading cached file %s', filename) with open(filename) as fp: cache_info['data'] = fp.read() cache_info['mtime'] = mtime reloaded = True return reloaded, cache_info['data'] def _delete_cached_file(filename: str): """Delete cached file if present. :param filename: Filename to delete """ try: del CACHE[filename] except KeyError: pass
21.735849
76
0.631944
f419167b819e5ee174fbe6b84ca88ef1f496b659
10,858
py
Python
contrib/opencensus-ext-django/opencensus/ext/django/middleware.py
samn/opencensus-python
d8709f141b67f7f5ba011c440b8ba8fb9cbc419a
[ "Apache-2.0" ]
null
null
null
contrib/opencensus-ext-django/opencensus/ext/django/middleware.py
samn/opencensus-python
d8709f141b67f7f5ba011c440b8ba8fb9cbc419a
[ "Apache-2.0" ]
null
null
null
contrib/opencensus-ext-django/opencensus/ext/django/middleware.py
samn/opencensus-python
d8709f141b67f7f5ba011c440b8ba8fb9cbc419a
[ "Apache-2.0" ]
null
null
null
# Copyright 2017, OpenCensus Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Django middleware helper to capture and trace a request.""" import logging from opencensus.ext.django.config import (settings, convert_to_import) from opencensus.trace import attributes_helper from opencensus.trace import execution_context from opencensus.trace import span as span_module from opencensus.trace import tracer as tracer_module from opencensus.trace import utils from opencensus.trace.samplers import probability try: from django.utils.deprecation import MiddlewareMixin except ImportError: # pragma: NO COVER MiddlewareMixin = object HTTP_METHOD = attributes_helper.COMMON_ATTRIBUTES['HTTP_METHOD'] HTTP_URL = attributes_helper.COMMON_ATTRIBUTES['HTTP_URL'] HTTP_STATUS_CODE = attributes_helper.COMMON_ATTRIBUTES['HTTP_STATUS_CODE'] REQUEST_THREAD_LOCAL_KEY = 'django_request' SPAN_THREAD_LOCAL_KEY = 'django_span' BLACKLIST_PATHS = 'BLACKLIST_PATHS' GCP_EXPORTER_PROJECT = 'GCP_EXPORTER_PROJECT' SAMPLING_RATE = 'SAMPLING_RATE' TRANSPORT = 'TRANSPORT' SERVICE_NAME = 'SERVICE_NAME' ZIPKIN_EXPORTER_SERVICE_NAME = 'ZIPKIN_EXPORTER_SERVICE_NAME' ZIPKIN_EXPORTER_HOST_NAME = 'ZIPKIN_EXPORTER_HOST_NAME' ZIPKIN_EXPORTER_PORT = 'ZIPKIN_EXPORTER_PORT' ZIPKIN_EXPORTER_PROTOCOL = 'ZIPKIN_EXPORTER_PROTOCOL' JAEGER_EXPORTER_HOST_NAME = 'JAEGER_EXPORTER_HOST_NAME' JAEGER_EXPORTER_PORT = 'JAEGER_EXPORTER_PORT' JAEGER_EXPORTER_AGENT_HOST_NAME = 'JAEGER_EXPORTER_AGENT_HOST_NAME' JAEGER_EXPORTER_AGENT_PORT = 'JAEGER_EXPORTER_AGENT_PORT' JAEGER_EXPORTER_SERVICE_NAME = 'JAEGER_EXPORTER_SERVICE_NAME' OCAGENT_TRACE_EXPORTER_ENDPOINT = 'OCAGENT_TRACE_EXPORTER_ENDPOINT' BLACKLIST_HOSTNAMES = 'BLACKLIST_HOSTNAMES' log = logging.getLogger(__name__) def _get_django_request(): """Get Django request from thread local. :rtype: str :returns: Django request. """ return execution_context.get_opencensus_attr(REQUEST_THREAD_LOCAL_KEY) def _get_django_span(): """Get Django span from thread local. :rtype: str :returns: Django request. """ return execution_context.get_opencensus_attr(SPAN_THREAD_LOCAL_KEY) def _get_current_tracer(): """Get the current request tracer.""" return execution_context.get_opencensus_tracer() def _set_django_attributes(span, request): """Set the django related attributes.""" django_user = getattr(request, 'user', None) if django_user is None: return user_id = django_user.pk try: user_name = django_user.get_username() except AttributeError: # AnonymousUser in some older versions of Django doesn't implement # get_username return # User id is the django autofield for User model as the primary key if user_id is not None: span.add_attribute('django.user.id', str(user_id)) if user_name is not None: span.add_attribute('django.user.name', str(user_name))
36.682432
79
0.667618
f419f2c87349548809cd06192323167246871ccd
1,322
py
Python
codeblockCar/codingPage/tests.py
ICT2x01-p2-4/ICT2x01-p2-4
6249c0a807354b33db80f367344fe14cb5512840
[ "MIT" ]
null
null
null
codeblockCar/codingPage/tests.py
ICT2x01-p2-4/ICT2x01-p2-4
6249c0a807354b33db80f367344fe14cb5512840
[ "MIT" ]
24
2021-09-29T02:46:17.000Z
2021-11-06T13:32:11.000Z
codeblockCar/codingPage/tests.py
ICT2x01-p2-4/Codeblock-car
6249c0a807354b33db80f367344fe14cb5512840
[ "MIT" ]
null
null
null
from typing import Reversible from django.test import TestCase, Client from challenge.models import Challenge from codingPage.models import Command, Log from django.core.exceptions import ValidationError from django.urls import reverse
38.882353
113
0.630862
f41a1ce9bbfb9a3f65c33e9986100ab487ba7015
537
py
Python
app/hint/models.py
vigov5/oshougatsu2015
38cbf325675ee2c08a6965b8689fad8308eb84eb
[ "MIT" ]
null
null
null
app/hint/models.py
vigov5/oshougatsu2015
38cbf325675ee2c08a6965b8689fad8308eb84eb
[ "MIT" ]
null
null
null
app/hint/models.py
vigov5/oshougatsu2015
38cbf325675ee2c08a6965b8689fad8308eb84eb
[ "MIT" ]
null
null
null
import os import datetime from app import app, db
26.85
68
0.666667
f41ab377d1e1427d9115db2eb2f0758b5461fed9
204
py
Python
base/urls.py
almustafa-noureddin/Portfolio-website
67462c98fec65e74183ae057e8b31b5bdff1402c
[ "MIT" ]
null
null
null
base/urls.py
almustafa-noureddin/Portfolio-website
67462c98fec65e74183ae057e8b31b5bdff1402c
[ "MIT" ]
null
null
null
base/urls.py
almustafa-noureddin/Portfolio-website
67462c98fec65e74183ae057e8b31b5bdff1402c
[ "MIT" ]
null
null
null
from django.urls import path from . import views app_name = "base" urlpatterns = [ path('', views.IndexView.as_view(), name="home"), path('contact/', views.ContactView.as_view(), name="contact"),]
22.666667
67
0.686275
f41b63806a18c6ea9b6ee2484bb3111d3bc16034
33,899
py
Python
app/main/views/templates.py
cds-snc/notification-admin
d4056798bf889ad29893667bbb67ead2f8e466e4
[ "MIT" ]
16
2019-11-05T21:35:49.000Z
2022-01-12T15:00:32.000Z
app/main/views/templates.py
cds-snc/notification-admin
d4056798bf889ad29893667bbb67ead2f8e466e4
[ "MIT" ]
509
2019-07-11T22:03:19.000Z
2022-03-30T15:19:26.000Z
app/main/views/templates.py
cds-snc/notification-admin
d4056798bf889ad29893667bbb67ead2f8e466e4
[ "MIT" ]
8
2020-02-21T20:19:29.000Z
2022-03-31T14:17:02.000Z
from datetime import datetime, timedelta from string import ascii_uppercase from dateutil.parser import parse from flask import abort, flash, jsonify, redirect, render_template, request, url_for from flask_babel import _ from flask_babel import lazy_gettext as _l from flask_login import current_user from markupsafe import Markup from notifications_python_client.errors import HTTPError from notifications_utils.formatters import nl2br from notifications_utils.recipients import first_column_headings from app import ( current_service, service_api_client, template_api_prefill_client, template_folder_api_client, template_statistics_client, ) from app.main import main from app.main.forms import ( CreateTemplateForm, EmailTemplateForm, LetterTemplateForm, LetterTemplatePostageForm, SearchByNameForm, SetTemplateSenderForm, SMSTemplateForm, TemplateAndFoldersSelectionForm, TemplateFolderForm, ) from app.main.views.send import get_example_csv_rows, get_sender_details from app.models.service import Service from app.models.template_list import TemplateList, TemplateLists from app.template_previews import TemplatePreview, get_page_count_for_letter from app.utils import ( email_or_sms_not_enabled, get_template, should_skip_template_page, user_has_permissions, user_is_platform_admin, ) form_objects = { "email": EmailTemplateForm, "sms": SMSTemplateForm, "letter": LetterTemplateForm, } def process_folder_management_form(form, current_folder_id): current_service.get_template_folder_with_user_permission_or_403(current_folder_id, current_user) new_folder_id = None if form.is_add_folder_op: new_folder_id = template_folder_api_client.create_template_folder( current_service.id, name=form.get_folder_name(), parent_id=current_folder_id ) if form.is_move_op: # if we've just made a folder, we also want to move there move_to_id = new_folder_id or form.move_to.data current_service.move_to_folder(ids_to_move=form.templates_and_folders.data, move_to=move_to_id) return redirect(request.url) def get_template_nav_label(value): return { "all": _l("All"), "sms": _l("Text message"), "email": _l("Email"), "letter": _l("Letter"), }[value] def get_template_nav_items(template_folder_id, sending_view): return [ ( get_template_nav_label(key), key, url_for( ".choose_template", service_id=current_service.id, template_type=key, template_folder_id=template_folder_id, view="sending" if sending_view else None, ), "", ) for key in ["all"] + current_service.available_template_types ] def _view_template_version(service_id, template_id, version, letters_as_pdf=False): return dict( template=get_template( current_service.get_template(template_id, version=version), current_service, letter_preview_url=url_for( ".view_template_version_preview", service_id=service_id, template_id=template_id, version=version, filetype="png", ) if not letters_as_pdf else None, ) ) def _add_template_by_type(template_type, template_folder_id): if template_type == "copy-existing": return redirect( url_for( ".choose_template_to_copy", service_id=current_service.id, ) ) if template_type == "letter": blank_letter = service_api_client.create_service_template( "New letter template", "letter", "Body", current_service.id, "Main heading", "normal", template_folder_id, ) return redirect( url_for( ".view_template", service_id=current_service.id, template_id=blank_letter["data"]["id"], ) ) if email_or_sms_not_enabled(template_type, current_service.permissions): return redirect( url_for( ".action_blocked", service_id=current_service.id, notification_type=template_type, return_to="add_new_template", template_id="0", ) ) else: return redirect( url_for( ".add_service_template", service_id=current_service.id, template_type=template_type, template_folder_id=template_folder_id, ) ) def _get_template_copy_name(template, existing_templates): template_names = [existing["name"] for existing in existing_templates] for index in reversed(range(1, 10)): if "{} (copy {})".format(template["name"], index) in template_names: return "{} (copy {})".format(template["name"], index + 1) if "{} (copy)".format(template["name"]) in template_names: return "{} (copy 2)".format(template["name"]) return "{} (copy)".format(template["name"]) def abort_403_if_not_admin_user(): if not current_user.platform_admin: abort(403) def get_template_sender_form_dict(service_id, template): context = { "email": {"field_name": "email_address"}, "letter": {"field_name": "contact_block"}, "sms": {"field_name": "sms_sender"}, }[template["template_type"]] sender_format = context["field_name"] service_senders = get_sender_details(service_id, template["template_type"]) context["default_sender"] = next((x["id"] for x in service_senders if x["is_default"]), "Not set") if not service_senders: context["no_senders"] = True context["value_and_label"] = [(sender["id"], Markup(nl2br(sender[sender_format]))) for sender in service_senders] context["value_and_label"].insert(0, ("", "Blank")) # Add blank option to start of list context["current_choice"] = template["service_letter_contact"] if template["service_letter_contact"] else "" return context def get_human_readable_delta(from_time, until_time): delta = until_time - from_time if delta < timedelta(seconds=60): return "under a minute" elif delta < timedelta(hours=1): minutes = int(delta.seconds / 60) return "{} minute{}".format(minutes, "" if minutes == 1 else "s") elif delta < timedelta(days=1): hours = int(delta.seconds / 3600) return "{} hour{}".format(hours, "" if hours == 1 else "s") else: days = delta.days return "{} day{}".format(days, "" if days == 1 else "s")
36.927015
127
0.661081
f41c237f71cc3272ed38dd3e63b60d183d0e2aa0
7,999
py
Python
linearRegression_gradientDescent/linearRegression_gradientDescent.py
MarcelloVendruscolo/DeepLearningForImageAnalysis
0f57d63510d0f7b2729d214b3729a21a663794b5
[ "MIT" ]
null
null
null
linearRegression_gradientDescent/linearRegression_gradientDescent.py
MarcelloVendruscolo/DeepLearningForImageAnalysis
0f57d63510d0f7b2729d214b3729a21a663794b5
[ "MIT" ]
null
null
null
linearRegression_gradientDescent/linearRegression_gradientDescent.py
MarcelloVendruscolo/DeepLearningForImageAnalysis
0f57d63510d0f7b2729d214b3729a21a663794b5
[ "MIT" ]
null
null
null
import numpy as np from load_auto import load_auto import matplotlib.pyplot as plt import math PATH_DATASET = '/Users/marcellovendruscolo/Documents/vscode-workspace/DeepLearningForImageAnalysis/linearRegression_gradientDescent/Auto.csv' train_dataset, train_labels = load_auto(PATH_DATASET) train_dataset = np.array(train_dataset) non_normalised_dataset = np.array(np.transpose(train_dataset)) non_normalised_horsepower = non_normalised_dataset[2,:].reshape(1,-1) train_labels = np.array(train_labels) mean = np.mean(train_dataset, axis=0) sd = np.std(train_dataset, axis=0) for col in range(0, train_dataset.shape[1]): train_dataset[:,col] = (train_dataset[:,col] - mean[col])/sd[col] normalised_dataset = np.transpose(train_dataset) horsepower_dataset = normalised_dataset[2,:].reshape(1,-1) # Exercise 1.4.1 and Exercise 1.4.2: # learning_rate = 0.1 # number_iterations = 1000 # print('\nChoice of input dataset: (i) Only horsepower feature.') # weights, offset_b, cost_function_value = train_linear_model(horsepower_dataset, train_labels, number_iterations, learning_rate) # print('Number of iterations: ' +str(number_iterations)) # print('Learning rate: ' +str(learning_rate)) # print('Cost function value: ' +str(cost_function_value[len(cost_function_value) - 1])) # print('Weights: ' +str(weights)) # print('Offset: ' +str(offset_b)) # print('\nChoice of input dataset: (ii) All features except name.') # weights, offset_b, cost_function_value = train_linear_model(normalised_dataset, train_labels, number_iterations, learning_rate) # print('Number of iterations: ' +str(number_iterations)) # print('Learning rate: ' +str(learning_rate)) # print('Cost function value: ' +str(cost_function_value[len(cost_function_value) - 1])) # print('Weights: ' +str(weights)) # print('Offset: ' +str(offset_b) + '\n') # Exercise 1.4.3: # learning_rates = [1, 1e-1, 1e-2, 1e-3, 1e-4] # number_iterations = 1000 # cost_consolidated = np.ndarray(shape=(len(learning_rates), number_iterations)) # for counter in range(0, len(learning_rates)): # weights, offset_b, cost_consolidated[counter,:] = train_linear_model(normalised_dataset, train_labels, number_iterations, learning_rates[counter]) # plotting_cost_iteration(learning_rates, cost_consolidated) # Exercise 1.4.4: # learning_rate = [1, 1e-1, 1e-2, 1e-3, 1e-4] # number_iterations = 1000 # cost_consolidated = np.ndarray(shape=(len(learning_rate), number_iterations)) # for counter in range(0, len(learning_rate)): # weights, offset_b, cost_consolidated[counter,:] = train_linear_model(non_normalised_dataset, train_labels, number_iterations, learning_rate[counter]) # plotting_cost_iteration(learning_rate, cost_consolidated) # Exercise 1.4.5: # learning_rate = 0.1 # number_iterations = 1000 # weights, offset_b, cost_function_value = train_linear_model(horsepower_dataset, train_labels, number_iterations, learning_rate) # plotting_horsepower_mpg(horsepower_dataset, train_labels, weights, offset_b)
48.478788
169
0.739717
f41df8a9a5f75d57ee4443306eca56bc32c0d2b4
3,426
py
Python
unit_tests/test_hr_calculations.py
mdholbrook/heart_rate_sentinel_server
927b59ad6d2078bd6e3491014fdebbc610d25e63
[ "MIT" ]
null
null
null
unit_tests/test_hr_calculations.py
mdholbrook/heart_rate_sentinel_server
927b59ad6d2078bd6e3491014fdebbc610d25e63
[ "MIT" ]
null
null
null
unit_tests/test_hr_calculations.py
mdholbrook/heart_rate_sentinel_server
927b59ad6d2078bd6e3491014fdebbc610d25e63
[ "MIT" ]
null
null
null
import pytest from functions.hr_calculations import *
32.018692
78
0.600117
f41e1e3571049d96370122828fa85b57484158ca
2,492
py
Python
selfdrive/boardd/tests/test_boardd_api.py
919bot/Tessa
9b48ff9020e8fb6992fc78271f2720fd19e01093
[ "MIT" ]
114
2020-02-24T14:18:01.000Z
2022-03-19T03:42:00.000Z
selfdrive/boardd/tests/test_boardd_api.py
919bot/Tessa
9b48ff9020e8fb6992fc78271f2720fd19e01093
[ "MIT" ]
15
2020-02-25T03:37:44.000Z
2021-09-08T01:51:15.000Z
selfdrive/boardd/tests/test_boardd_api.py
919bot/Tessa
9b48ff9020e8fb6992fc78271f2720fd19e01093
[ "MIT" ]
73
2018-12-03T19:34:42.000Z
2020-07-27T05:10:23.000Z
import random import numpy as np import selfdrive.boardd.tests.boardd_old as boardd_old import selfdrive.boardd.boardd as boardd from common.realtime import sec_since_boot from cereal import log import unittest if __name__ == '__main__': unittest.main()
31.948718
104
0.668539
f41f9b1b5316c6d5a7a52a8e3e8227d25b183272
2,037
py
Python
py_types/static/parse.py
zekna/py-types
ec39da1277986f0ea44830dfb0da9d906deb13e1
[ "MIT" ]
5
2015-06-18T20:04:56.000Z
2016-03-15T15:32:44.000Z
py_types/static/parse.py
sarlianna/py-types
ec39da1277986f0ea44830dfb0da9d906deb13e1
[ "MIT" ]
1
2016-01-19T01:39:54.000Z
2016-01-27T19:17:31.000Z
py_types/static/parse.py
zekna/py-types
ec39da1277986f0ea44830dfb0da9d906deb13e1
[ "MIT" ]
null
null
null
import ast import inspect import sys import argparse from ..runtime.asserts import typecheck if __name__ == "__main__": parse("static/example_parse_me.py")
29.955882
122
0.587138
f420912bbaeaef68549b8a153f2087a527d8302c
475
py
Python
example/example/urls.py
pmaccamp/django-tastypie-swagger
d51ef3ea8e33791617edba8ed55a1be1f16e4ccc
[ "Apache-2.0" ]
2
2020-04-13T13:26:42.000Z
2021-10-30T17:56:15.000Z
example/example/urls.py
pmaccamp/django-tastypie-swagger
d51ef3ea8e33791617edba8ed55a1be1f16e4ccc
[ "Apache-2.0" ]
null
null
null
example/example/urls.py
pmaccamp/django-tastypie-swagger
d51ef3ea8e33791617edba8ed55a1be1f16e4ccc
[ "Apache-2.0" ]
5
2020-04-15T07:05:13.000Z
2021-11-01T20:36:10.000Z
from django.conf.urls import include, url from django.contrib import admin from demo.apis import api urlpatterns = [ url(r'^api/', include(api.urls)), url(r'^api/doc/', include(('tastypie_swagger.urls', 'tastypie_swagger'), namespace='demo_api_swagger'), kwargs={ "tastypie_api_module":"demo.apis.api", "namespace":"demo_api_swagger", "version": "0.1"} ), url(r'^admin/', admin.site.urls), ]
29.6875
76
0.6
f420c7cad07b73b890ce9019d4a200470cb1bcbf
948
py
Python
scrapy_framework/midwares/download_midware.py
savor007/scrapy_framework
9f1266eb2d4bb7e181d1c5352b05298e77040980
[ "MIT" ]
null
null
null
scrapy_framework/midwares/download_midware.py
savor007/scrapy_framework
9f1266eb2d4bb7e181d1c5352b05298e77040980
[ "MIT" ]
null
null
null
scrapy_framework/midwares/download_midware.py
savor007/scrapy_framework
9f1266eb2d4bb7e181d1c5352b05298e77040980
[ "MIT" ]
null
null
null
from scrapy_framework.html.request import Request from scrapy_framework.html.response import Response import random
28.727273
85
0.635021
f420caa0d727e8d433f67df3503f8152d7e6f2e7
2,294
py
Python
tracardi/process_engine/action/v1/pro/scheduler/plugin.py
bytepl/tracardi
e8fa4684fa6bd3d05165fe48aa925fc6c1e74923
[ "MIT" ]
null
null
null
tracardi/process_engine/action/v1/pro/scheduler/plugin.py
bytepl/tracardi
e8fa4684fa6bd3d05165fe48aa925fc6c1e74923
[ "MIT" ]
null
null
null
tracardi/process_engine/action/v1/pro/scheduler/plugin.py
bytepl/tracardi
e8fa4684fa6bd3d05165fe48aa925fc6c1e74923
[ "MIT" ]
null
null
null
from pydantic import BaseModel from tracardi.domain.entity import Entity from tracardi.domain.scheduler_config import SchedulerConfig from tracardi.domain.resource import ResourceCredentials from tracardi.service.storage.driver import storage from tracardi.service.plugin.runner import ActionRunner from tracardi.service.plugin.domain.register import Plugin, Spec, MetaData, Form, FormGroup, FormField, FormComponent from tracardi.service.plugin.domain.result import Result
29.792208
117
0.610724
f4211dfd13f13cb0b576625ee36371455d4c829c
568
py
Python
tests/test_covid_daily.py
alvarobartt/covid-daily
cb4506a007ac206e85409a13281028f6f82441a6
[ "MIT" ]
13
2020-05-23T12:25:04.000Z
2021-12-09T04:56:06.000Z
tests/test_covid_daily.py
alvarobartt/covid-daily
cb4506a007ac206e85409a13281028f6f82441a6
[ "MIT" ]
6
2020-06-02T12:18:12.000Z
2021-06-20T07:59:11.000Z
tests/test_covid_daily.py
alvarobartt/covid-daily
cb4506a007ac206e85409a13281028f6f82441a6
[ "MIT" ]
5
2020-07-02T16:48:19.000Z
2022-03-21T01:52:17.000Z
# Copyright 2020 Alvaro Bartolome, alvarobartt @ GitHub # See LICENSE for details. import pytest import covid_daily if __name__ == "__main__": test_overview() test_data()
15.777778
55
0.568662
f4214fc3ae97e545eaf80e0585a829da218ecbdc
6,132
py
Python
2021/HANFS/fence-agents/fence/agents/zvm/fence_zvmip.py
BryanWhitehurst/HPCCEA
54b7e7355b67ba3fdce2e28cc1b0e3b29d2bdefa
[ "MIT" ]
10
2019-08-12T23:00:20.000Z
2021-08-06T17:06:48.000Z
2021/HANFS/fence-agents/fence/agents/zvm/fence_zvmip.py
BryanWhitehurst/HPCCEA
54b7e7355b67ba3fdce2e28cc1b0e3b29d2bdefa
[ "MIT" ]
5
2020-06-18T23:51:58.000Z
2021-07-28T17:50:34.000Z
2021/HANFS/fence-agents/fence/agents/zvm/fence_zvmip.py
BryanWhitehurst/HPCCEA
54b7e7355b67ba3fdce2e28cc1b0e3b29d2bdefa
[ "MIT" ]
21
2019-06-10T21:03:03.000Z
2021-08-06T17:57:25.000Z
#!@PYTHON@ -tt import sys import atexit import socket import struct import logging sys.path.append("@FENCEAGENTSLIBDIR@") from fencing import * from fencing import fail, fail_usage, run_delay, EC_LOGIN_DENIED, EC_TIMED_OUT #BEGIN_VERSION_GENERATION RELEASE_VERSION="" REDHAT_COPYRIGHT="" BUILD_DATE="" #END_VERSION_GENERATION INT4 = 4 if __name__ == "__main__": main()
31.285714
139
0.693575
f4217689eb43722ace5f25924ae5b537893153d9
668
py
Python
2.5.9/test_splash/test_splash/spiders/with_splash.py
feel-easy/myspider
dcc65032015d7dbd8bea78f846fd3cac7638c332
[ "Apache-2.0" ]
1
2019-02-28T10:16:00.000Z
2019-02-28T10:16:00.000Z
2.5.9/test_splash/test_splash/spiders/with_splash.py
wasalen/myspider
dcc65032015d7dbd8bea78f846fd3cac7638c332
[ "Apache-2.0" ]
null
null
null
2.5.9/test_splash/test_splash/spiders/with_splash.py
wasalen/myspider
dcc65032015d7dbd8bea78f846fd3cac7638c332
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- import scrapy from scrapy_splash import SplashRequest # scrapy_splashrequest
35.157895
70
0.606287
f421c73bcef415f2ea33d9a7d630fcbbb3f2dac1
180
py
Python
run.py
iudaichi/iu_linebot
d3f5a7b0227b175963d51d62bcd5894366bde35c
[ "MIT" ]
null
null
null
run.py
iudaichi/iu_linebot
d3f5a7b0227b175963d51d62bcd5894366bde35c
[ "MIT" ]
null
null
null
run.py
iudaichi/iu_linebot
d3f5a7b0227b175963d51d62bcd5894366bde35c
[ "MIT" ]
null
null
null
from main import app import os import uvicorn if __name__ == '__main__': port = int(os.getenv("PORT")) uvicorn.run(app, host="0.0.0.0", port=port, workers=1, reload=True)
22.5
71
0.677778
f422b787a305cf7e7c9786d86bf5d8569355733a
5,889
py
Python
fastestimator/architecture/pytorch/unet.py
DwijayDS/fastestimator
9b288cb2bd870f971ec4cee09d0b3205e1316a94
[ "Apache-2.0" ]
57
2019-05-21T21:29:26.000Z
2022-02-23T05:55:21.000Z
fastestimator/architecture/pytorch/unet.py
vbvg2008/fastestimator
6061a4fbbeb62a2194ef82ba8017f651710d0c65
[ "Apache-2.0" ]
93
2019-05-23T18:36:07.000Z
2022-03-23T17:15:55.000Z
fastestimator/architecture/pytorch/unet.py
vbvg2008/fastestimator
6061a4fbbeb62a2194ef82ba8017f651710d0c65
[ "Apache-2.0" ]
47
2019-05-09T15:41:37.000Z
2022-03-26T17:00:08.000Z
# Copyright 2019 The FastEstimator Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from typing import Tuple import torch import torch.nn as nn from torch.nn import functional as F from torch.nn.init import kaiming_normal_ as he_normal
43.301471
110
0.609611
f422e0910bbd8a7ecf986379f467205dc93f05c0
5,660
py
Python
generalfile/path.py
Mandera/generalfile
5e476a1c075fa072c7e52e62455feeb78b9bb298
[ "MIT" ]
null
null
null
generalfile/path.py
Mandera/generalfile
5e476a1c075fa072c7e52e62455feeb78b9bb298
[ "MIT" ]
null
null
null
generalfile/path.py
Mandera/generalfile
5e476a1c075fa072c7e52e62455feeb78b9bb298
[ "MIT" ]
null
null
null
import pathlib import os from generallibrary import VerInfo, TreeDiagram, Recycle, classproperty, deco_cache from generalfile.errors import InvalidCharacterError from generalfile.path_lock import Path_ContextManager from generalfile.path_operations import Path_Operations from generalfile.path_strings import Path_Strings from generalfile.optional_dependencies.path_spreadsheet import Path_Spreadsheet from generalfile.optional_dependencies.path_text import Path_Text from generalfile.optional_dependencies.path_cfg import Path_Cfg from generalfile.optional_dependencies.path_pickle import Path_Pickle setattr(Path, "Path", Path)
28.3
137
0.649117
f423a60c36497f5bf95253c92fffc3d805f3c461
11,128
py
Python
src/genui/models/models.py
Tontolda/genui
c5b7da7c5a99fc16d34878e2170145ac7c8e31c4
[ "0BSD" ]
15
2021-05-31T13:39:17.000Z
2022-03-30T12:04:14.000Z
src/genui/models/models.py
martin-sicho/genui
ea7f1272030a13e8e253a7a9b6479ac6a78552d3
[ "MIT" ]
3
2021-04-08T22:02:22.000Z
2022-03-16T09:10:20.000Z
src/genui/models/models.py
Tontolda/genui
c5b7da7c5a99fc16d34878e2170145ac7c8e31c4
[ "0BSD" ]
5
2021-03-04T11:00:54.000Z
2021-12-18T22:59:22.000Z
import os from django.db import models import uuid # Create your models here. from djcelery_model.models import TaskMixin from polymorphic.models import PolymorphicModel from genui.utils.models import NON_POLYMORPHIC_CASCADE, OverwriteStorage from genui.utils.extensions.tasks.models import TaskShortcutsMixIn, PolymorphicTaskManager from genui.projects.models import DataSet PARAM_VALUE_CTYPE_TO_MODEL_MAP = { ModelParameter.STRING : ModelParameterStr, ModelParameter.INTEGER : ModelParameterInt, ModelParameter.FLOAT : ModelParameterFloat, ModelParameter.BOOL : ModelParameterBool }
34.030581
190
0.668494
f4242dbae6b7b1f46a3706ed85d4e577d7b0d160
106,765
py
Python
projectroles/tests/test_views_api.py
bihealth/sodar_core
a6c22c4f276b64ffae6de48779a82d59a60a9333
[ "MIT" ]
11
2019-06-26T14:05:58.000Z
2020-12-05T02:20:11.000Z
projectroles/tests/test_views_api.py
bihealth/sodar_core
a6c22c4f276b64ffae6de48779a82d59a60a9333
[ "MIT" ]
11
2019-07-01T06:17:44.000Z
2021-04-20T07:19:40.000Z
projectroles/tests/test_views_api.py
bihealth/sodar_core
a6c22c4f276b64ffae6de48779a82d59a60a9333
[ "MIT" ]
4
2019-06-26T07:49:50.000Z
2020-05-19T21:58:10.000Z
"""REST API view tests for the projectroles app""" import base64 import json import pytz from django.conf import settings from django.core import mail from django.forms.models import model_to_dict from django.test import override_settings from django.urls import reverse from django.utils import timezone from knox.models import AuthToken from test_plus.test import APITestCase from projectroles import views_api from projectroles.models import ( Project, Role, RoleAssignment, ProjectInvite, SODAR_CONSTANTS, ) from projectroles.plugins import change_plugin_status, get_backend_api from projectroles.remote_projects import RemoteProjectAPI from projectroles.tests.test_models import ( ProjectMixin, RoleAssignmentMixin, ProjectInviteMixin, RemoteSiteMixin, RemoteProjectMixin, AppSettingMixin, ) from projectroles.tests.test_views import ( TestViewsBase, PROJECT_TYPE_CATEGORY, PROJECT_TYPE_PROJECT, PROJECT_ROLE_OWNER, PROJECT_ROLE_DELEGATE, PROJECT_ROLE_CONTRIBUTOR, PROJECT_ROLE_GUEST, REMOTE_SITE_NAME, REMOTE_SITE_URL, SITE_MODE_SOURCE, SITE_MODE_TARGET, REMOTE_SITE_DESC, REMOTE_SITE_SECRET, ) from projectroles.utils import build_secret CORE_API_MEDIA_TYPE_INVALID = 'application/vnd.bihealth.invalid' CORE_API_VERSION_INVALID = '9.9.9' INVALID_UUID = '11111111-1111-1111-1111-111111111111' NEW_CATEGORY_TITLE = 'New Category' NEW_PROJECT_TITLE = 'New Project' UPDATED_TITLE = 'Updated Title' UPDATED_DESC = 'Updated description' UPDATED_README = 'Updated readme' INVITE_USER_EMAIL = 'new1@example.com' INVITE_USER2_EMAIL = 'new2@example.com' INVITE_MESSAGE = 'Message' # Base Classes ----------------------------------------------------------------- class TestAPIViewsBase( ProjectMixin, RoleAssignmentMixin, SODARAPIViewTestMixin, APITestCase ): """Base API test view with knox authentication""" class TestCoreAPIViewsBase(TestAPIViewsBase): """Override of TestAPIViewsBase to be used with SODAR Core API views""" media_type = views_api.CORE_API_MEDIA_TYPE api_version = views_api.CORE_API_DEFAULT_VERSION # Tests ------------------------------------------------------------------------ # TODO: To be updated once the legacy API view is redone for SODAR Core v1.0
35.166337
93
0.607577
f4244d996a4c380f34dcf151872e78afdd5ea5e0
7,569
py
Python
src/model/model.py
kwasnydam/animal_disambiguation
1dba0a2f40ca952a3adab925ff9ef54238cf7c1c
[ "MIT" ]
null
null
null
src/model/model.py
kwasnydam/animal_disambiguation
1dba0a2f40ca952a3adab925ff9ef54238cf7c1c
[ "MIT" ]
5
2020-03-24T17:52:45.000Z
2021-08-23T20:28:40.000Z
src/model/model.py
kwasnydam/animal_disambiguation
1dba0a2f40ca952a3adab925ff9ef54238cf7c1c
[ "MIT" ]
null
null
null
"""Contains the classification model I am going to use in my problem and some utility functions. Functions build_mmdisambiguator - build the core application object with the collaborators info Classes MMDisambiguator - core class of the application """ import pickle import os import numpy as np from sklearn.linear_model import LogisticRegression import sklearn.metrics as metrics from src.data import dataset DEFAULT_CLASSIFIER_SETTINGS = { 'solver': 'liblinear', 'class_weight': 'balanced', 'C': 1. } up = os.path.dirname DEFAULT_ROOT_DIRECTORY = up(up(up(__file__))) # Get directory two levels above DEFAULT_MODEL_DIRECTORY = os.path.join(DEFAULT_ROOT_DIRECTORY, 'models') def build_mmdisambiguator(data_model_params, data_model_path, classificator_parameters, classificator_path=None): """Given collaborator parameters and /or load paths, build the MMDisambiguator""" if classificator_path is None: data_model = dataset.TextLabelsVectorizer(data_model_params) data_model_saved = try_opening_file_pickle(data_model_path) data_model.deserialize(data_model_saved) classificator = LogisticRegression(**classificator_parameters) disambiguator = MMDisambiguator(data_model, classificator) else: disambiguator_pieces = try_opening_file_pickle(classificator_path) data_model = dataset.TextLabelsVectorizer(data_model_params) data_model.deserialize(disambiguator_pieces['data_model']) classificator = disambiguator_pieces['classificator'] disambiguator = MMDisambiguator(data_model, classificator) return disambiguator
39.836842
118
0.680011
f425ac3324f9ff67c7cc522a90e36c4d71da699a
2,848
py
Python
v0.5.0/nvidia/submission/code/recommendation/pytorch/load.py
myelintek/results
11c38436a158c453e3011f8684570f7a55c03330
[ "Apache-2.0" ]
44
2018-11-07T18:52:33.000Z
2019-07-06T12:48:18.000Z
v0.5.0/nvidia/submission/code/recommendation/pytorch/load.py
myelintek/results
11c38436a158c453e3011f8684570f7a55c03330
[ "Apache-2.0" ]
12
2018-12-13T18:04:36.000Z
2019-06-14T20:49:33.000Z
v0.5.0/nvidia/submission/code/recommendation/pytorch/load.py
myelintek/results
11c38436a158c453e3011f8684570f7a55c03330
[ "Apache-2.0" ]
44
2018-11-09T21:04:52.000Z
2019-06-24T07:40:28.000Z
# Copyright (c) 2018, deepakn94. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import namedtuple import pandas as pd RatingData = namedtuple('RatingData', ['items', 'users', 'ratings', 'min_date', 'max_date']) DATASETS = [k.replace('load_', '') for k in locals().keys() if "load_" in k]
34.313253
78
0.670997
f42643ddcdfa49204eb89ec1d689fa4a85b4b22e
38,947
py
Python
rpython/jit/backend/llsupport/test/test_rewrite.py
jptomo/pypy-lang-scheme
55edb2cec69d78f86793282a4566fcbc1ef9fcac
[ "MIT" ]
1
2019-11-25T10:52:01.000Z
2019-11-25T10:52:01.000Z
rpython/jit/backend/llsupport/test/test_rewrite.py
jptomo/pypy-lang-scheme
55edb2cec69d78f86793282a4566fcbc1ef9fcac
[ "MIT" ]
null
null
null
rpython/jit/backend/llsupport/test/test_rewrite.py
jptomo/pypy-lang-scheme
55edb2cec69d78f86793282a4566fcbc1ef9fcac
[ "MIT" ]
null
null
null
from rpython.jit.backend.llsupport.descr import get_size_descr,\ get_field_descr, get_array_descr, ArrayDescr, FieldDescr,\ SizeDescr, get_interiorfield_descr from rpython.jit.backend.llsupport.gc import GcLLDescr_boehm,\ GcLLDescr_framework from rpython.jit.backend.llsupport import jitframe from rpython.jit.metainterp.gc import get_description from rpython.jit.tool.oparser import parse from rpython.jit.metainterp.optimizeopt.util import equaloplists from rpython.jit.metainterp.history import JitCellToken, FLOAT from rpython.jit.metainterp.history import AbstractFailDescr from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper import rclass from rpython.jit.backend.x86.arch import WORD o_vtable = lltype.malloc(rclass.OBJECT_VTABLE, immortal=True) ## should ideally be: ## p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \ ## %(unicodedescr.basesize + \ ## 10 * unicodedescr.itemsize)d, \ ## descr=malloc_fixedsize_descr) ## setfield_gc(p0, 10, descr=unicodelendescr)
36.811909
90
0.518885
f427c8d1c78db5257b6c365066dd8f7483686e6c
10,390
py
Python
hummingbot/client/command/history_command.py
sanchaymittal/hummingbot
f8d1c19dfd0875bd12717f9c46ddbe20cc7b9a0d
[ "Apache-2.0" ]
null
null
null
hummingbot/client/command/history_command.py
sanchaymittal/hummingbot
f8d1c19dfd0875bd12717f9c46ddbe20cc7b9a0d
[ "Apache-2.0" ]
null
null
null
hummingbot/client/command/history_command.py
sanchaymittal/hummingbot
f8d1c19dfd0875bd12717f9c46ddbe20cc7b9a0d
[ "Apache-2.0" ]
null
null
null
from decimal import Decimal import pandas as pd from typing import ( Any, Dict, Set, Tuple, TYPE_CHECKING) from hummingbot.client.performance_analysis import PerformanceAnalysis from hummingbot.core.utils.exchange_rate_conversion import ExchangeRateConversion from hummingbot.market.market_base import MarketBase from hummingbot.strategy.market_trading_pair_tuple import MarketTradingPairTuple ERC = ExchangeRateConversion.get_instance() s_float_0 = float(0) if TYPE_CHECKING: from hummingbot.client.hummingbot_application import HummingbotApplication
52.474747
123
0.624254
f427f297c82ca0ccff892cae6ccdb0651100e3ef
3,271
py
Python
scripts/bin2asm.py
sami2316/asm2vec-pytorch
5de1351aeda61d7467b3231e48437fd8d34a970c
[ "MIT" ]
null
null
null
scripts/bin2asm.py
sami2316/asm2vec-pytorch
5de1351aeda61d7467b3231e48437fd8d34a970c
[ "MIT" ]
null
null
null
scripts/bin2asm.py
sami2316/asm2vec-pytorch
5de1351aeda61d7467b3231e48437fd8d34a970c
[ "MIT" ]
null
null
null
import re import os import click import r2pipe import hashlib from pathlib import Path import _pickle as cPickle if __name__ == '__main__': cli()
26.811475
123
0.566188
f4283fe6df2818523658c305534af2e5905a9186
180
py
Python
6/4.py
Chyroc/homework
b1ee8e9629b4dbb6c46a550d710157702d57b00b
[ "MIT" ]
null
null
null
6/4.py
Chyroc/homework
b1ee8e9629b4dbb6c46a550d710157702d57b00b
[ "MIT" ]
1
2018-05-23T02:12:16.000Z
2018-05-23T02:12:31.000Z
6/4.py
Chyroc/homework
b1ee8e9629b4dbb6c46a550d710157702d57b00b
[ "MIT" ]
null
null
null
import re if __name__ == '__main__': print(remove_not_alpha_num('a000 aa-b') == 'a000aab')
18
57
0.65
f4284681ecf92df1bb97ccccca1bcb0558c6d8a3
1,763
py
Python
LazyAngus/Assets/Extensions/IOSDeploy/Scripts/Editor/post_process.py
DougLazyAngus/lazyAngus
485a8d5061ab740ab055abfc7fc5b86b864a5c7e
[ "Apache-2.0" ]
null
null
null
LazyAngus/Assets/Extensions/IOSDeploy/Scripts/Editor/post_process.py
DougLazyAngus/lazyAngus
485a8d5061ab740ab055abfc7fc5b86b864a5c7e
[ "Apache-2.0" ]
null
null
null
LazyAngus/Assets/Extensions/IOSDeploy/Scripts/Editor/post_process.py
DougLazyAngus/lazyAngus
485a8d5061ab740ab055abfc7fc5b86b864a5c7e
[ "Apache-2.0" ]
null
null
null
import os from sys import argv from mod_pbxproj import XcodeProject #import appcontroller path = argv[1] frameworks = argv[2].split(' ') libraries = argv[3].split(' ') cflags = argv[4].split(' ') ldflags = argv[5].split(' ') folders = argv[6].split(' ') print('Step 1: add system frameworks ') #if framework is optional, add `weak=True` project = XcodeProject.Load(path +'/Unity-iPhone.xcodeproj/project.pbxproj') for frwrk in frameworks: files = project.get_files_by_name(frwrk) for f in files: project.remove_file(f) if len(frwrk) > 0: fo = frwrk.split('|') if int(fo[1]): project.add_file('System/Library/Frameworks/' + fo[0], tree='SDKROOT', weak=True) else: project.add_file('System/Library/Frameworks/' + fo[0], tree='SDKROOT') print('Step 2: add system libraries ') for lib in libraries: files = project.get_files_by_name(lib) for f in files: project.remove_file(f) if len(lib) > 0: lo = lib.split('|') if int(lo[1]): project.add_file('usr/lib/' + lo[0], tree='SDKROOT', weak=True) else: project.add_file('usr/lib/' + lo[0], tree='SDKROOT') print('Step 3: add CFLAGS ') for cf in cflags: if len(cf) > 0: project.add_other_cflags(cf) print('Step 4: add LDFLAGS ') for ldf in ldflags: if len(ldf) > 0: project.add_other_ldflags(ldf) print('Step 5: add language folders') for langFolder in folders: if len(langFolder) > 0: project.add_folder(path + '/' + langFolder + '.lproj') print('Step 6: save our change to xcode project file') if project.modified: project.backup() project.saveFormat3_2()
29.383333
94
0.608622
f428973b7e9156b1b01843493a65c906c5b5ba52
996
py
Python
judge/migrations/0024_auto_20200705_0246.py
TheAvidDev/pnoj-site
63299e873b1fb654667545222ce2b3157e78acd9
[ "MIT" ]
2
2020-04-02T19:50:03.000Z
2020-08-06T18:30:25.000Z
judge/migrations/0024_auto_20200705_0246.py
TheAvidDev/pnoj-site
63299e873b1fb654667545222ce2b3157e78acd9
[ "MIT" ]
28
2020-03-19T16:29:58.000Z
2021-09-22T18:47:30.000Z
judge/migrations/0024_auto_20200705_0246.py
TheAvidDev/pnoj-site
63299e873b1fb654667545222ce2b3157e78acd9
[ "MIT" ]
2
2020-08-09T06:23:12.000Z
2020-10-13T00:13:25.000Z
# Generated by Django 3.0.8 on 2020-07-05 02:46 from django.db import migrations, models
41.5
278
0.564257
f4299097184e1727c715f499e066d9e69de9e523
26,771
py
Python
src/badge_hub.py
stottlerhenke-seattle/openbadge-hub-py
d0eb1772eb1250862041cc50071252f46d4c4771
[ "MIT" ]
null
null
null
src/badge_hub.py
stottlerhenke-seattle/openbadge-hub-py
d0eb1772eb1250862041cc50071252f46d4c4771
[ "MIT" ]
null
null
null
src/badge_hub.py
stottlerhenke-seattle/openbadge-hub-py
d0eb1772eb1250862041cc50071252f46d4c4771
[ "MIT" ]
null
null
null
#!/usr/bin/env python from __future__ import absolute_import, division, print_function import os import re import shlex import subprocess import signal import csv import logging import json import time from datetime import datetime as dt from requests.exceptions import RequestException import glob import traceback import random from badge import * from badge_discoverer import BadgeDiscoverer, BeaconDiscoverer from badge_manager_server import BadgeManagerServer from beacon_manager_server import BeaconManagerServer from badge_manager_standalone import BadgeManagerStandalone from beacon_manager_standalone import BeaconManagerStandalone import hub_manager from settings import DATA_DIR, LOG_DIR log_file_name = LOG_DIR + 'hub.log' scans_file_name = DATA_DIR + 'scan.txt' pending_file_prefix = DATA_DIR + 'pending_' audio_archive_file_name = DATA_DIR + 'audio_archive.txt' proximity_archive_file_name = DATA_DIR + 'proximity_archive.txt' standalone_audio_file = DATA_DIR + 'audio_data.txt' standalone_proximity_file = DATA_DIR + 'proximity_data.txt' AUDIO = "audio" PROXIMITY = "proximity" SCAN_DURATION = 3 # seconds #NOTE try to keep under 100MB or so due to memory constraints MAX_PENDING_FILE_SIZE = 15000000 # in bytes, so 15MB # create logger with 'badge_server' logger = logging.getLogger('badge_server') logger.setLevel(logging.DEBUG) # create file handler which logs even debug messages fh = logging.FileHandler(log_file_name) fh.setLevel(logging.DEBUG) # create console handler with a higher log level ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) # create formatter and add it to the handlers # formatter = logging.Formatter('%(asctime)s - %(levelname)s - [%(mac)s] %(message)s') formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') fh.setFormatter(formatter) ch.setFormatter(formatter) # add the handlers to the logger logger.addHandler(fh) logger.addHandler(ch) def has_chunks(filename): """ Returns true if there is data in the file, and false otherwise """ return os.path.exists(filename) and os.path.getsize(filename) > 0 def offload_data(): """ Send pending files to server and move pending to archive Return True on success, False on failure """ #TODO test with standalone #NOTE not currently doing anything with the True/False # return values, might decide to do something later pending_files = sorted(glob.glob(pending_file_prefix + "*")) for pending_file_name in pending_files: logger.debug("Sending {} to server".format(pending_file_name)) if not has_chunks(pending_file_name): continue chunks = [] with open(pending_file_name, "r") as pending_file: for line in pending_file: chunks.append(json.loads(line)) # real quick grab the data type from the first data entry data_type = "audio" if "audio" in chunks[0]["type"] else "proximity" # fire away! try: chunks_written = hub_manager.send_data_to_server(logger, data_type, chunks) if chunks_written == len(chunks): logger.debug("Successfully wrote {} data entries to server" .format(len(chunks))) else: # this seems unlikely to happen but is good to keep track of i guess logger.error("Data mismatch: {} data entries were not written to server" .format(len(chunks) - chunks_written)) logger.error("Error sending data from file {} to server!" .format(pending_file_name)) return False # write to archive and erase pending file with open(get_archive_name(data_type), "a") as archive_file: for chunk in chunks: archive_file.write(json.dumps(chunk) + "\n") os.remove(pending_file_name) except RequestException as e: s = traceback.format_exc() logger.error("Error sending data from file {} to server!" .format(pending_file_name)) logger.error("{},{}".format(e,s)) return False return True def get_archive_name(data_type): """ Return the name of the archive file for the passed data type """ if data_type == AUDIO: return audio_archive_file_name else: return proximity_archive_file_name def get_proximity_name(mode="server"): """ return the name of the existing pending proximity file, or a new one if either one doesn't exist or if the existing file is > MAX_PENDING_FILE_SIZE """ if mode == "server": return _get_pending_file_name(PROXIMITY) else: return standalone_proximity_file def _get_pending_file_name(data_type): """ If there are no current pending files < MAX_PENDING_FILE_SIZE in size, return a new pending filename Else, return an existing one. """ filenames = filter( lambda x: os.path.getsize(x) < MAX_PENDING_FILE_SIZE, glob.glob("{}*{}*".format(pending_file_prefix, data_type))) if len(filenames) == 0: return _create_pending_file_name(data_type) else: return filenames[0] def _create_pending_file_name(data_type): """ Create a pending file name for the given data_type Uses the current date/time to create a unique filename """ now = dt.now().strftime("%Y%m%d%H%M%S") filename = "{}{}_{}.txt".format(pending_file_prefix, now, data_type) if os.path.exists(filename): # this seems unlikely to happen, but just in case :) # get the number of pending files that match this time and add one files = glob.glob("{}{}*{}*".format(pending_file_prefix, now, data_type)) now = '_'.join((now, str(len(files) + 1))) filename = "{}{}_{}.txt".format(pending_file_prefix, now, data_type) return filename def dialogue(bdg, activate_audio, activate_proximity, mode="server"): """ Attempts to read data from the device specified by the address. Reading is handled by gatttool. :param bdg: :return: """ ret = bdg.pull_data(activate_audio, activate_proximity) addr = bdg.addr if ret == 0: logger.info("Successfully pulled data") # if we were able to pull data, we saw the badge again bdg.last_seen_ts = time.time() else: logger.info("Errors pulling data.") if bdg.dlg.chunks: logger.info("Chunks received: {}".format(len(bdg.dlg.chunks))) logger.info("saving chunks to file") # store in JSON file with open(get_audio_name(mode), "a") as fout: for chunk in bdg.dlg.chunks: ts_with_ms = round_float_for_log(ts_and_fract_to_float(chunk.ts, chunk.fract)) log_line = { 'type': "audio received", 'log_timestamp': round_float_for_log(time.time()), 'log_index': -1, # need to find a good accumulator. 'data': { 'voltage': round_float_for_log(chunk.voltage), 'timestamp': ts_with_ms, 'sample_period': chunk.sampleDelay, 'num_samples': len(chunk.samples), 'samples': chunk.samples, 'badge_address': addr, 'member': bdg.key, 'member_id':bdg.badge_id } } logger.debug("Chunk timestamp: {0:.3f}, Voltage: {1:.3f}, Delay: {2}, Samples in chunk: {3}".format( ts_with_ms, chunk.voltage, chunk.sampleDelay, len(chunk.samples))) #logger.debug(json.dumps(log_line)) json.dump(log_line, fout) fout.write('\n') logger.info("done writing") # update badge object to hold latest timestamps last_chunk = bdg.dlg.chunks[-1] last_chunk_ts_pretty = dt.fromtimestamp(last_chunk.ts).strftime("%Y-%m-%d@%H:%M:%S UTC") if bdg.is_newer_audio_ts(last_chunk.ts, last_chunk.fract): logger.debug("Setting last badge audio timestamp to {} {} ({})".format( last_chunk.ts, last_chunk.fract, last_chunk_ts_pretty)) bdg.set_audio_ts(last_chunk.ts, last_chunk.fract) else: logger.debug("Keeping existing timestamp ({}.{}) for {}. Last chunk timestamp was: {}.{} ({})" .format(bdg.last_audio_ts_int,bdg.last_audio_ts_fract,bdg.addr, last_chunk.ts, last_chunk.fract, last_chunk_pretty)) else: logger.info("No mic data ready") if bdg.dlg.scans: logger.info("Proximity scans received: {}".format(len(bdg.dlg.scans))) logger.info("saving proximity scans to file") with open(get_proximity_name(mode), "a") as fout: for scan in bdg.dlg.scans: ts_with_ms = round_float_for_log(scan.ts) log_line = { 'type': "proximity received", 'log_timestamp': round_float_for_log(time.time()), 'log_index': -1, # need to find a good accumulator. 'data': { 'voltage': round_float_for_log(scan.voltage), 'timestamp': ts_with_ms, 'badge_address': addr, 'rssi_distances': { device.ID: {'rssi': device.rssi, 'count': device.count} for device in scan.devices }, 'member': bdg.key, 'member_id': bdg.badge_id } } logger.debug("SCAN: scan timestamp: {0:.3f}, voltage: {1:.3f}, Devices in scan: {2}".format( ts_with_ms, scan.voltage, scan.numDevices)) #logger.info(json.dumps(log_line)) json.dump(log_line, fout) fout.write('\n') # update badge object to hold latest timestamps last_scan = bdg.dlg.scans[-1] last_scan_ts_pretty = dt.fromtimestamp(last_scan.ts).strftime("%Y-%m-%d@%H:%M:%S UTC") logger.debug("Setting last badge proximity timestamp to {} ([])".format( last_scan.ts, last_scan_ts_pretty)) bdg.last_proximity_ts = last_scan.ts else: logger.info("No proximity scans ready") def reset(): ''' Resets and reconfigures Bluetooth parameters. The specific parameters affect connection speed negotiation. It's not pretty, but safer to change the conn params this way :return: ''' # Resets BLE hci logger.info("Resetting bluetooth") reset_command = "hciconfig hci0 reset" args = shlex.split(reset_command) p = subprocess.Popen(args) # israspberry pi? logger.info("Setting bluetooth connection parameters") if os.uname()[4][:3] == 'arm': logger.info("Raspberry Pi detected, changing bluetooth connection parameters") with open("/sys/kernel/debug/bluetooth/hci0/conn_min_interval", "w") as connparam: connparam.write("16") with open("/sys/kernel/debug/bluetooth/hci0/conn_max_interval", "w") as connparam: connparam.write("17") else: logger.warn("Not a Raspberry Pi, Bluetooth connection parameters remain untouched (communication may be slower)") time.sleep(2) # requires sleep after reset logger.info("Done resetting bluetooth") def kill_bluepy(): """ Kill orphaned/leftover/defunct bluepy-helper processes I'd like to move this to a separate utility file or something when we refactor """ # get all the bluepy-helper processes CMD="/bin/ps ax | grep bluepy-helper | grep -v grep | awk '{ print $1 }'" p = subprocess.Popen(CMD, shell=True, stdout=subprocess.PIPE) pidstr = p.communicate()[0] pids = pidstr.split("\n") pids = [int(pid) for pid in pids if pid.isdigit()] mypid = os.getpid() # dont wanna kill our process by accident :) if mypid in pids: pids.remove(mypid) for pid in pids: # KILL KILL KILL try: os.kill(int(pid), signal.SIGKILL) # we waitpid to clean up defunct processes os.waitpid(int(pid), 0) logger.info("Process with PID {} killed".format(pid)) except OSError as err: logger.error("Unable to kill process with pid {}".format(pid)) logger.error(err) if __name__ == "__main__": import time import argparse parser = argparse.ArgumentParser(description="Run scans, send dates, or continuously pull data") parser.add_argument('-dr','--disable_reset_ble', action='store_true', default=False, help="Do not reset BLE") parser.add_argument('-m','--hub_mode', choices=('server', 'standalone') , default='standalone', dest='hub_mode' , help="Operation mode - standalone (using a configuration file) or a server") parser.add_argument('-t', '--timestamp' , type=int, required=False , dest='timestamp', help='UTC timestamp to start pulling data from (int)') subparsers = parser.add_subparsers(help='Program mode (e.g. Scan, send dates, pull, scan etc.)', dest='mode') add_pull_command_options(subparsers) add_scan_command_options(subparsers) add_sync_all_command_options(subparsers) add_start_all_command_options(subparsers) add_print_badges_command_options(subparsers) args = parser.parse_args() mgr = create_badge_manager_instance(args.hub_mode, args.timestamp) mgrb = create_beacon_manager_instance(args.hub_mode, args.timestamp) if not args.disable_reset_ble: reset() if args.mode == "sync_all": sync_all_devices(mgr) # scan for devices if args.mode == "scan": devices_scanner(mgr,mgrb, args.show_all) # pull data from all devices if args.mode == "pull": pull_devices(mgr, mgrb, args.start_recording) if args.mode == "start_all": start_all_devices(mgr) if args.mode == "print_badges": print_badges(mgr, mgrb) exit(0)
38.298999
121
0.614956
f429caab4270145beb2ac6d0e280f42b19535df8
53
py
Python
python/compile.py
liamgam/gdkit
e9d419ff916f15dbd8ec6d7cc59b0a3d8f636a95
[ "BSD-2-Clause" ]
1
2019-01-16T05:59:53.000Z
2019-01-16T05:59:53.000Z
python/compile.py
liamgam/gdkit
e9d419ff916f15dbd8ec6d7cc59b0a3d8f636a95
[ "BSD-2-Clause" ]
null
null
null
python/compile.py
liamgam/gdkit
e9d419ff916f15dbd8ec6d7cc59b0a3d8f636a95
[ "BSD-2-Clause" ]
null
null
null
import compileall compileall.compile_dir(".",force=1)
26.5
35
0.811321
f429e1e71ee50e3cb36b3bd6d0606c845af7b2a3
3,010
py
Python
saleor/product/migrations/0141_update_descritpion_fields.py
fairhopeweb/saleor
9ac6c22652d46ba65a5b894da5f1ba5bec48c019
[ "CC-BY-4.0" ]
15,337
2015-01-12T02:11:52.000Z
2021-10-05T19:19:29.000Z
saleor/product/migrations/0141_update_descritpion_fields.py
fairhopeweb/saleor
9ac6c22652d46ba65a5b894da5f1ba5bec48c019
[ "CC-BY-4.0" ]
7,486
2015-02-11T10:52:13.000Z
2021-10-06T09:37:15.000Z
saleor/product/migrations/0141_update_descritpion_fields.py
aminziadna/saleor
2e78fb5bcf8b83a6278af02551a104cfa555a1fb
[ "CC-BY-4.0" ]
5,864
2015-01-16T14:52:54.000Z
2021-10-05T23:01:15.000Z
# Generated by Django 3.1.5 on 2021-02-17 11:04 from django.db import migrations import saleor.core.db.fields import saleor.core.utils.editorjs
31.354167
78
0.580066
f42aede445a90e085482590f47cc1c5cb9b7e7e5
5,215
py
Python
local_search/sat_isfayer.py
arnaubena97/SatSolver-sat_isfayer
db7edc83547786deb7bf6b1c5d75b406f877ca15
[ "MIT" ]
null
null
null
local_search/sat_isfayer.py
arnaubena97/SatSolver-sat_isfayer
db7edc83547786deb7bf6b1c5d75b406f877ca15
[ "MIT" ]
null
null
null
local_search/sat_isfayer.py
arnaubena97/SatSolver-sat_isfayer
db7edc83547786deb7bf6b1c5d75b406f877ca15
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 import sys import random def read_file(file_name): """File reader and parser the num of variables, num of clauses and put the clauses in a list""" clauses =[] with open(file_name) as all_file: for line in all_file: if line.startswith('c'): continue #ignore comments if line.startswith('p'): num_variables = int(line.split()[2]) # set num_variables continue if line.strip() == "": continue clause = list(map(int, line.split())) clause.pop() clauses.append(clause) return num_variables, clauses def print_sol(solution): """Method to print the solution that satisfies all the clauses """ print("s SATISFIABLE") print("v %s 0" %" ".join(map(str, solution))) exit(0) #Main if __name__ == "__main__": if len(sys.argv) == 2: file_name = sys.argv[1] else: print("\n Command: python %s <file_name.cnf> \n" %sys.argv[0]) exit(0) num_variables, clauses = read_file(file_name) sat = walksat_solver(clauses, num_variables) sat.solve() exit(0)
36.985816
100
0.607095
f42c731576acb55056eef2a6a2b894f6ff9cf5c6
656
py
Python
torch/_VF.py
Hacky-DH/pytorch
80dc4be615854570aa39a7e36495897d8a040ecc
[ "Intel" ]
60,067
2017-01-18T17:21:31.000Z
2022-03-31T21:37:45.000Z
torch/_VF.py
Hacky-DH/pytorch
80dc4be615854570aa39a7e36495897d8a040ecc
[ "Intel" ]
66,955
2017-01-18T17:21:38.000Z
2022-03-31T23:56:11.000Z
torch/_VF.py
Hacky-DH/pytorch
80dc4be615854570aa39a7e36495897d8a040ecc
[ "Intel" ]
19,210
2017-01-18T17:45:04.000Z
2022-03-31T23:51:56.000Z
""" This makes the functions in torch._C._VariableFunctions available as torch._VF.<funcname> without mypy being able to find them. A subset of those functions are mapped to ATen functions in torch/jit/_builtins.py See https://github.com/pytorch/pytorch/issues/21478 for the reason for introducing torch._VF """ import torch import sys import types sys.modules[__name__] = VFModule(__name__)
21.866667
70
0.73628
f42c89b9ad4a67ef2088d23901ec3eee27d8dfed
1,426
py
Python
sparse_causal_model_learner_rl/annealer/threshold_projection.py
sergeivolodin/causality-disentanglement-rl
5a41b4a2e3d85fa7e9c8450215fdc6cf954df867
[ "CC0-1.0" ]
2
2020-12-11T05:26:24.000Z
2021-04-21T06:12:58.000Z
sparse_causal_model_learner_rl/annealer/threshold_projection.py
sergeivolodin/causality-disentanglement-rl
5a41b4a2e3d85fa7e9c8450215fdc6cf954df867
[ "CC0-1.0" ]
9
2020-04-30T16:29:50.000Z
2021-03-26T07:32:18.000Z
sparse_causal_model_learner_rl/annealer/threshold_projection.py
sergeivolodin/causality-disentanglement-rl
5a41b4a2e3d85fa7e9c8450215fdc6cf954df867
[ "CC0-1.0" ]
null
null
null
import gin import torch import logging from sparse_causal_model_learner_rl.metrics import find_value, find_key
32.409091
108
0.670407
f42cd1526653837e6ebdebb62cc32ac0a5f88b7c
15,684
py
Python
numpyro/contrib/control_flow/scan.py
ucals/numpyro
566a5311d660d28a630188063c03a018165a38a9
[ "Apache-2.0" ]
2
2021-01-10T06:27:51.000Z
2021-01-10T06:27:55.000Z
numpyro/contrib/control_flow/scan.py
ucals/numpyro
566a5311d660d28a630188063c03a018165a38a9
[ "Apache-2.0" ]
null
null
null
numpyro/contrib/control_flow/scan.py
ucals/numpyro
566a5311d660d28a630188063c03a018165a38a9
[ "Apache-2.0" ]
1
2020-12-23T13:27:39.000Z
2020-12-23T13:27:39.000Z
# Copyright Contributors to the Pyro project. # SPDX-License-Identifier: Apache-2.0 from collections import OrderedDict from functools import partial from jax import lax, random, tree_flatten, tree_map, tree_multimap, tree_unflatten import jax.numpy as jnp from jax.tree_util import register_pytree_node_class from numpyro import handlers from numpyro.primitives import _PYRO_STACK, Messenger, apply_stack from numpyro.util import not_jax_tracer def scan(f, init, xs, length=None, reverse=False): """ This primitive scans a function over the leading array axes of `xs` while carrying along state. See :func:`jax.lax.scan` for more information. **Usage**: .. doctest:: >>> import numpy as np >>> import numpyro >>> import numpyro.distributions as dist >>> from numpyro.contrib.control_flow import scan >>> >>> def gaussian_hmm(y=None, T=10): ... def transition(x_prev, y_curr): ... x_curr = numpyro.sample('x', dist.Normal(x_prev, 1)) ... y_curr = numpyro.sample('y', dist.Normal(x_curr, 1), obs=y_curr) ... return x_curr, (x_curr, y_curr) ... ... x0 = numpyro.sample('x_0', dist.Normal(0, 1)) ... _, (x, y) = scan(transition, x0, y, length=T) ... return (x, y) >>> >>> # here we do some quick tests >>> with numpyro.handlers.seed(rng_seed=0): ... x, y = gaussian_hmm(np.arange(10.)) >>> assert x.shape == (10,) and y.shape == (10,) >>> assert np.all(y == np.arange(10)) >>> >>> with numpyro.handlers.seed(rng_seed=0): # generative ... x, y = gaussian_hmm() >>> assert x.shape == (10,) and y.shape == (10,) .. warning:: This is an experimental utility function that allows users to use JAX control flow with NumPyro's effect handlers. Currently, `sample` and `deterministic` sites within the scan body `f` are supported. If you notice that any effect handlers or distributions are unsupported, please file an issue. .. note:: It is ambiguous to align `scan` dimension inside a `plate` context. So the following pattern won't be supported .. code-block:: python with numpyro.plate('N', 10): last, ys = scan(f, init, xs) All `plate` statements should be put inside `f`. For example, the corresponding working code is .. code-block:: python def g(*args, **kwargs): with numpyro.plate('N', 10): return f(*arg, **kwargs) last, ys = scan(g, init, xs) .. note:: Nested scan is currently not supported. .. note:: We can scan over discrete latent variables in `f`. The joint density is evaluated using parallel-scan (reference [1]) over time dimension, which reduces parallel complexity to `O(log(length))`. Currently, only the equivalence to :class:`~numpyro.contrib.funsor.enum_messenger.markov(history_size=1)` is supported. A :class:`~numpyro.handlers.trace` of `scan` with discrete latent variables will contain the following sites: + init sites: those sites belong to the first trace of `f`. Each of them will have name prefixed with `_init/`. + scanned sites: those sites collect the values of the remaining scan loop over `f`. An addition time dimension `_time_foo` will be added to those sites, where `foo` is the name of the first site appeared in `f`. Not all transition functions `f` are supported. All of the restrictions from Pyro's enumeration tutorial [2] still apply here. In addition, there should not have any site outside of `scan` depend on the first output of `scan` (the last carry value). ** References ** 1. *Temporal Parallelization of Bayesian Smoothers*, Simo Sarkka, Angel F. Garcia-Fernandez (https://arxiv.org/abs/1905.13002) 2. *Inference with Discrete Latent Variables* (http://pyro.ai/examples/enumeration.html#Dependencies-among-plates) :param callable f: a function to be scanned. :param init: the initial carrying state :param xs: the values over which we scan along the leading axis. This can be any JAX pytree (e.g. list/dict of arrays). :param length: optional value specifying the length of `xs` but can be used when `xs` is an empty pytree (e.g. None) :param bool reverse: optional boolean specifying whether to run the scan iteration forward (the default) or in reverse :return: output of scan, quoted from :func:`jax.lax.scan` docs: "pair of type (c, [b]) where the first element represents the final loop carry value and the second element represents the stacked outputs of the second output of f when scanned over the leading axis of the inputs". """ # if there are no active Messengers, we just run and return it as expected: if not _PYRO_STACK: (length, rng_key, carry), (pytree_trace, ys) = scan_wrapper( f, init, xs, length=length, reverse=reverse) else: # Otherwise, we initialize a message... initial_msg = { 'type': 'control_flow', 'fn': scan_wrapper, 'args': (f, init, xs, length, reverse), 'kwargs': {'rng_key': None, 'substitute_stack': []}, 'value': None, } # ...and use apply_stack to send it to the Messengers msg = apply_stack(initial_msg) (length, rng_key, carry), (pytree_trace, ys) = msg['value'] if not msg["kwargs"].get("enum", False): for msg in pytree_trace.trace.values(): apply_stack(msg) else: from numpyro.contrib.funsor import to_funsor from numpyro.contrib.funsor.enum_messenger import LocalNamedMessenger for msg in pytree_trace.trace.values(): with LocalNamedMessenger(): dim_to_name = msg["infer"].get("dim_to_name") to_funsor(msg["value"], dim_to_name=OrderedDict([(k, dim_to_name[k]) for k in sorted(dim_to_name)])) apply_stack(msg) return carry, ys
44.939828
116
0.605011
f42d1600d0b6bc46f53578838228c289c55fcb61
342
py
Python
src/catalog/migrations/0003_remove_productattributevalue_name.py
earth-emoji/dennea
fbabd7d9ecc95898411aba238bbcca8b5e942c31
[ "BSD-3-Clause" ]
null
null
null
src/catalog/migrations/0003_remove_productattributevalue_name.py
earth-emoji/dennea
fbabd7d9ecc95898411aba238bbcca8b5e942c31
[ "BSD-3-Clause" ]
13
2019-12-09T02:38:36.000Z
2022-03-12T00:33:57.000Z
src/catalog/migrations/0003_remove_productattributevalue_name.py
earth-emoji/dennea
fbabd7d9ecc95898411aba238bbcca8b5e942c31
[ "BSD-3-Clause" ]
null
null
null
# Generated by Django 2.2.12 on 2020-06-10 01:11 from django.db import migrations
19
48
0.608187
f42e0214aa8abe8fa4ef98083bd64acd6f94ca90
1,245
py
Python
e2xgrader/preprocessors/overwritecells.py
divindevaiah/e2xgrader
19eb4662e4eee5ddef673097517e4bd4fb469e62
[ "MIT" ]
2
2021-10-02T10:48:47.000Z
2022-03-02T14:00:48.000Z
e2xgrader/preprocessors/overwritecells.py
divindevaiah/e2xgrader
19eb4662e4eee5ddef673097517e4bd4fb469e62
[ "MIT" ]
70
2020-10-23T16:42:01.000Z
2022-03-14T16:33:54.000Z
e2xgrader/preprocessors/overwritecells.py
divindevaiah/e2xgrader
19eb4662e4eee5ddef673097517e4bd4fb469e62
[ "MIT" ]
10
2020-11-22T16:36:16.000Z
2022-03-02T15:51:24.000Z
import json from nbformat.notebooknode import NotebookNode from nbconvert.exporters.exporter import ResourcesDict from typing import Tuple from nbgrader.api import MissingEntry from nbgrader.preprocessors import OverwriteCells as NbgraderOverwriteCells from ..utils.extra_cells import is_singlechoice, is_multiplechoice
35.571429
75
0.706024
f42ea50cd75ed3588bee01251935be095b9cd852
9,261
py
Python
tools/pdf2txt.py
ehtec/pdfminer.six
5b1823f25ab998e904fc5d81687732580f23e3b9
[ "MIT" ]
null
null
null
tools/pdf2txt.py
ehtec/pdfminer.six
5b1823f25ab998e904fc5d81687732580f23e3b9
[ "MIT" ]
1
2022-01-31T22:58:07.000Z
2022-01-31T22:58:07.000Z
tools/pdf2txt.py
phantomcyber/pdfminer.six
e35a9319a6ae5d310f08f07a5edf16aadc529c1e
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 """A command line tool for extracting text and images from PDF and output it to plain text, html, xml or tags.""" import argparse import logging import sys from typing import Any, Container, Iterable, List, Optional import pdfminer.high_level from pdfminer.layout import LAParams from pdfminer.utils import AnyIO logging.basicConfig() OUTPUT_TYPES = ((".htm", "html"), (".html", "html"), (".xml", "xml"), (".tag", "tag")) if __name__ == '__main__': sys.exit(main())
41.34375
85
0.632005
f42eca67de3f090707cbdfd6324c3cd84ee5458f
2,757
py
Python
nython/nythonize.py
agungnasik57/nython
cf499fe20f86e2685671495bd941b411fa066813
[ "MIT" ]
53
2020-02-11T15:10:23.000Z
2021-10-05T12:47:14.000Z
nython/nythonize.py
agungnasik57/nython
cf499fe20f86e2685671495bd941b411fa066813
[ "MIT" ]
null
null
null
nython/nythonize.py
agungnasik57/nython
cf499fe20f86e2685671495bd941b411fa066813
[ "MIT" ]
4
2020-02-12T07:03:06.000Z
2020-08-15T14:53:39.000Z
"""Compile Nim libraries as Python Extension Modules. If you want your namespace to coexist with your pthon code, name this ponim.nim and then your import will look like `from ponim.nim import adder` and `from ponim import subtractor`. There must be a way to smooth that out in the __init__.py file somehow. Note that the file must be in the included source code dir. Currently it is easiest to just put this in with your python code. """ from os import listdir, mkdir from os.path import join, expanduser from setuptools import Extension from shutil import copyfile, rmtree from typing import Sequence, Dict, List import subprocess import sys import pathlib # class NimLib(TypedDict): # """Wrapper around a lib name and path for nim cdoe""" # name: str # path: str def nythonize(nimbase: str, modules: Sequence[Dict[str, str]]) -> List[Extension]: """Compile a Nim library as a Python Extension Module. `nimbase` is the path to `nimbase.h` on your system, which is needed for Python to compile gene Nim generated C code. This builds a set of Extenstions, which are then passed back to setuptools. """ extensions = [] # Create a top level working dir rmtree(join("build", "nim_build"), ignore_errors=True) pathlib.Path(join("build", "nim_build")).mkdir(parents=True) for module in modules: module_dir = join("build", "nim_build", f"{module['name']}_build") rmtree(module_dir, ignore_errors=True) mkdir(module_dir) subprocess.run( [ "nim", "compileToC", "--compileOnly", "-d:release", "-d:ssl", "--app:lib", "--opt:speed", "--gc:markAndSweep", f"--nimcache:{module_dir}", module["path"], ], check=True, stderr=sys.stdout.buffer, ) copyfile( nimbase, join(module_dir, "nimbase.h"), ) sources = [] for c_source_file in listdir(module_dir): if c_source_file.endswith(".c"): sources.append(join(module_dir, c_source_file)) extensions.append( Extension( name=module["name"], sources=sources, extra_compile_args=[ "-flto", "-ffast-math", "-march=native", "-mtune=native", "-O3", "-fno-ident", "-fsingle-precision-constant", ], extra_link_args=["-s"], include_dirs=[module_dir], ) ) return extensions
31.689655
82
0.564382
f42fc38f6dae49e6659d55730c3133cb884a1c0e
3,591
py
Python
tests/contrib/test_util.py
lixinso/pyro
ca0d6417bed3882a47cb8cbb01b36f403ee903d5
[ "MIT" ]
10
2020-03-18T14:41:25.000Z
2021-07-04T08:49:57.000Z
tests/contrib/test_util.py
lixinso/pyro
ca0d6417bed3882a47cb8cbb01b36f403ee903d5
[ "MIT" ]
19
2018-10-30T13:45:31.000Z
2019-09-27T14:16:57.000Z
tests/contrib/test_util.py
lixinso/pyro
ca0d6417bed3882a47cb8cbb01b36f403ee903d5
[ "MIT" ]
5
2020-06-21T23:40:35.000Z
2021-11-09T16:18:42.000Z
from collections import OrderedDict import pytest import torch import pyro.distributions as dist from pyro.contrib.util import ( get_indices, tensor_to_dict, rmv, rvv, lexpand, rexpand, rdiag, rtril, hessian ) from tests.common import assert_equal def test_lexpand(): A = torch.tensor([[1., 2.], [-2., 0]]) assert_equal(lexpand(A), A, prec=1e-8) assert_equal(lexpand(A, 4), A.expand(4, 2, 2), prec=1e-8) assert_equal(lexpand(A, 4, 2), A.expand(4, 2, 2, 2), prec=1e-8) def test_rexpand(): A = torch.tensor([[1., 2.], [-2., 0]]) assert_equal(rexpand(A), A, prec=1e-8) assert_equal(rexpand(A, 4), A.unsqueeze(-1).expand(2, 2, 4), prec=1e-8) assert_equal(rexpand(A, 4, 2), A.unsqueeze(-1).unsqueeze(-1).expand(2, 2, 4, 2), prec=1e-8) def test_rtril(): A = torch.tensor([[1., 2.], [-2., 0]]) assert_equal(rtril(A), torch.tril(A), prec=1e-8) expanded = lexpand(A, 5, 4) expected = lexpand(torch.tril(A), 5, 4) assert_equal(rtril(expanded), expected, prec=1e-8) def test_rdiag(): v = torch.tensor([1., 2., -1.]) assert_equal(rdiag(v), torch.diag(v), prec=1e-8) expanded = lexpand(v, 5, 4) expeceted = lexpand(torch.diag(v), 5, 4) assert_equal(rdiag(expanded), expeceted, prec=1e-8) def test_hessian_mvn(): tmp = torch.randn(3, 10) cov = torch.matmul(tmp, tmp.t()) mvn = dist.MultivariateNormal(cov.new_zeros(3), cov) x = torch.randn(3, requires_grad=True) y = mvn.log_prob(x) assert_equal(hessian(y, x), -mvn.precision_matrix) def test_hessian_multi_variables(): x = torch.randn(3, requires_grad=True) z = torch.randn(3, requires_grad=True) y = (x ** 2 * z + z ** 3).sum() H = hessian(y, (x, z)) Hxx = (2 * z).diag() Hxz = (2 * x).diag() Hzz = (6 * z).diag() target_H = torch.cat([torch.cat([Hxx, Hxz]), torch.cat([Hxz, Hzz])], dim=1) assert_equal(H, target_H)
34.528846
95
0.588972
f43046969834dd88f5e6bf5c76a2ad1571e187a4
87
py
Python
emodul/apps.py
HarisHijazi/mojarnik-server
bee7266609cc0bca7cc6a4059086fc0ba7219a33
[ "MIT" ]
null
null
null
emodul/apps.py
HarisHijazi/mojarnik-server
bee7266609cc0bca7cc6a4059086fc0ba7219a33
[ "MIT" ]
null
null
null
emodul/apps.py
HarisHijazi/mojarnik-server
bee7266609cc0bca7cc6a4059086fc0ba7219a33
[ "MIT" ]
null
null
null
from django.apps import AppConfig
14.5
33
0.747126
f4313551859e5b967cf0a91de7f015a788b3e06f
20,473
py
Python
Diffnet++/class/DataModule.py
mIXs222/diffnet
1f580332254a5113ed7b88b9b2e0aa467344e94d
[ "MIT" ]
null
null
null
Diffnet++/class/DataModule.py
mIXs222/diffnet
1f580332254a5113ed7b88b9b2e0aa467344e94d
[ "MIT" ]
null
null
null
Diffnet++/class/DataModule.py
mIXs222/diffnet
1f580332254a5113ed7b88b9b2e0aa467344e94d
[ "MIT" ]
null
null
null
from __future__ import division from collections import defaultdict import numpy as np from time import time import random import tensorflow.compat.v1 as tf tf.disable_v2_behavior() # import tensorflow as tf
43.652452
166
0.652176
f4315741709ca1828a0cd87b2111a7735ecd6a23
2,656
py
Python
src/models/VanillaTransformer.py
iosurodri/annotated-transformer
e5a7e27067d08c09f51b57bbf2824fbcd80ae4d9
[ "MIT" ]
null
null
null
src/models/VanillaTransformer.py
iosurodri/annotated-transformer
e5a7e27067d08c09f51b57bbf2824fbcd80ae4d9
[ "MIT" ]
null
null
null
src/models/VanillaTransformer.py
iosurodri/annotated-transformer
e5a7e27067d08c09f51b57bbf2824fbcd80ae4d9
[ "MIT" ]
null
null
null
from xmlrpc.server import MultiPathXMLRPCServer import torch.nn as nn import torch.nn.functional as F import copy from src.layers.layers import Encoder, EncoderLayer, Decoder, DecoderLayer, PositionwiseFeedForward from src.layers.preprocessing import Embeddings, PositionalEncoding from src.layers.attention import MultiHeadedAttention ### Generic EncoderDecoder structure: def make_model(src_vocab, tgt_vocab, N=6, d_model=512, d_ff=2048, h=8, dropout=0.1, alpha=0.5): "Helper: Construct a model from hyperparameters." c = copy.deepcopy attn = MultiHeadedAttention(h, d_model, alpha=alpha) ff = PositionwiseFeedForward(d_model, d_ff, dropout) position = PositionalEncoding(d_model, dropout) model = EncoderDecoder( Encoder(EncoderLayer(d_model, c(attn), c(ff), dropout), N), Decoder(DecoderLayer(d_model, c(attn), c(attn), c(ff), dropout), N), nn.Sequential(Embeddings(d_model, src_vocab), c(position)), nn.Sequential(Embeddings(d_model, tgt_vocab), c(position)), Generator(d_model, tgt_vocab) ) # This was important from their code. # Initialize parameters with Glorot / fan_avg. for p in model.parameters(): if p.dim() > 1: nn.init.xavier_uniform(p) return model if __name__ == '__main__': # Small example model tmp_model = make_model(10, 10, 2) print(tmp_model)
35.413333
99
0.6875
f432670cd6f74e0a57b036d2bab7509d31c45355
34,716
py
Python
venv/lib/python3.8/site-packages/arch/tests/univariate/test_recursions.py
YileC928/finm-portfolio-2021
3fa1e97423fa731bce0cad3457807e1873120891
[ "MIT" ]
null
null
null
venv/lib/python3.8/site-packages/arch/tests/univariate/test_recursions.py
YileC928/finm-portfolio-2021
3fa1e97423fa731bce0cad3457807e1873120891
[ "MIT" ]
null
null
null
venv/lib/python3.8/site-packages/arch/tests/univariate/test_recursions.py
YileC928/finm-portfolio-2021
3fa1e97423fa731bce0cad3457807e1873120891
[ "MIT" ]
null
null
null
import os import timeit from typing import List import numpy as np from numpy.random import RandomState from numpy.testing import assert_allclose, assert_almost_equal import pytest from scipy.special import gamma import arch.univariate.recursions_python as recpy CYTHON_COVERAGE = os.environ.get("ARCH_CYTHON_COVERAGE", "0") in ("true", "1", "True") try: import arch.univariate.recursions as rec_cython missing_extension = False except ImportError: missing_extension = True if missing_extension: rec = recpy else: rec = rec_cython try: import numba # noqa missing_numba = False except ImportError: missing_numba = True pytestmark = pytest.mark.filterwarnings("ignore::arch.compat.numba.PerformanceWarning") def test_asym_aparch_smoke(self): sigma2 = np.empty(1000) p = o = q = 1 parameters = np.array([0.1, 0.1, 0.1, 0.8, 1.3]) sigma2[:] = np.nan sigma2_delta = np.empty_like(sigma2) recpy.aparch_recursion_python( parameters, self.resids, np.abs(self.resids), sigma2, sigma2_delta, p, o, q, self.nobs, self.backcast, self.var_bounds, ) assert np.all(np.isfinite(sigma2)) sigma2_py = sigma2.copy() sigma2[:] = np.nan recpy.aparch_recursion( parameters, self.resids, np.abs(self.resids), sigma2, sigma2_delta, p, o, q, self.nobs, self.backcast, self.var_bounds, ) assert np.all(np.isfinite(sigma2)) assert_allclose(sigma2_py, sigma2) sigma2[:] = np.nan rec.aparch_recursion( parameters, self.resids, np.abs(self.resids), sigma2, sigma2_delta, p, o, q, self.nobs, self.backcast, self.var_bounds, ) assert np.all(np.isfinite(sigma2)) assert_allclose(sigma2_py, sigma2) def test_bounds_check(): var_bounds = np.array([0.1, 10]) assert_almost_equal(recpy.bounds_check_python(-1.0, var_bounds), 0.1) assert_almost_equal( recpy.bounds_check_python(20.0, var_bounds), 10 + np.log(20.0 / 10.0) ) assert_almost_equal(recpy.bounds_check_python(np.inf, var_bounds), 1010.0)
28.857855
97
0.514863
f43380760e72e46d79cbcf3d20f37e8eb8257947
3,215
py
Python
hermetrics/damerau_levenshtein.py
SoldAI/hermetrics
5e07a4f40376779015ef2f5b964d7ac060ed6e25
[ "MIT" ]
3
2020-01-18T02:37:49.000Z
2022-01-27T19:24:15.000Z
hermetrics/damerau_levenshtein.py
SoldAI/hermetrics
5e07a4f40376779015ef2f5b964d7ac060ed6e25
[ "MIT" ]
null
null
null
hermetrics/damerau_levenshtein.py
SoldAI/hermetrics
5e07a4f40376779015ef2f5b964d7ac060ed6e25
[ "MIT" ]
2
2020-01-26T20:40:19.000Z
2021-08-11T12:05:01.000Z
from .levenshtein import Levenshtein if(__name__ == '__main__'): print("Damerau-Levenshtein distance")
40.696203
118
0.51353
f434676fc528e9c88694b6e2adf610fc78d5e377
13,130
py
Python
etna/analysis/outliers/hist_outliers.py
Carlosbogo/etna
b6210f0e79ee92aa9ae8ff4fcfb267be9fb7cc94
[ "Apache-2.0" ]
1
2021-11-11T21:18:42.000Z
2021-11-11T21:18:42.000Z
etna/analysis/outliers/hist_outliers.py
Carlosbogo/etna
b6210f0e79ee92aa9ae8ff4fcfb267be9fb7cc94
[ "Apache-2.0" ]
null
null
null
etna/analysis/outliers/hist_outliers.py
Carlosbogo/etna
b6210f0e79ee92aa9ae8ff4fcfb267be9fb7cc94
[ "Apache-2.0" ]
null
null
null
import typing from copy import deepcopy from typing import TYPE_CHECKING from typing import List import numba import numpy as np import pandas as pd if TYPE_CHECKING: from etna.datasets import TSDataset def compute_f(series: np.ndarray, k: int, p: np.ndarray, pp: np.ndarray) -> np.ndarray: """ Compute F. F[a][b][k] - minimum approximation error on series[a:b+1] with k outliers. http://www.vldb.org/conf/1999/P9.pdf Parameters ---------- series: array to count F k: number of outliers p: array of sums of elements, p[i] - sum from 0th to i elements pp: array of sums of squares of elements, p[i] - sum of squares from 0th to i elements Returns ------- result: np.ndarray array F, outliers_indices """ f = np.zeros((len(series), len(series), k + 1)) s: list = [[[[] for i in range(k + 1)] for j in range(len(series))] for s in range(len(series))] ss: list = [[[[] for i in range(k + 1)] for j in range(len(series))] for s in range(len(series))] outliers_indices: list = [[[[] for i in range(k + 1)] for j in range(len(series))] for s in range(len(series))] for right_border in range(0, len(series)): f[0][right_border][0] = optimal_sse(0, right_border, p, pp) s[0][right_border][0] = [p[right_border]] ss[0][right_border][0] = [pp[right_border]] for left_border in range(1, len(series)): for right_border in range(left_border, len(series)): f[left_border][right_border][0] = optimal_sse(left_border, right_border, p, pp) s[left_border][right_border][0] = [p[right_border] - p[left_border - 1]] ss[left_border][right_border][0] = [pp[right_border] - pp[left_border - 1]] for left_border in range(0, len(series)): for right_border in range(left_border, min(len(series), left_border + k)): s[left_border][right_border][right_border - left_border + 1] = [0] ss[left_border][right_border][right_border - left_border + 1] = [0] outliers_indices[left_border][right_border][right_border - left_border + 1] = [ list(np.arange(left_border, right_border + 1)) ] for left_border in range(len(series)): for right_border in range(left_border + 1, len(series)): for outlier_number in range(1, min(right_border - left_border + 1, k + 1)): f1 = f[left_border][right_border - 1][outlier_number - 1] tmp_ss = [] tmp_s = [] f2 = [] now_min = np.inf now_outliers_indices = [] where = 0 for i in range(len(ss[left_border][right_border - 1][outlier_number])): tmp_ss.append(ss[left_border][right_border - 1][outlier_number][i] + series[right_border] ** 2) tmp_s.append(s[left_border][right_border - 1][outlier_number][i] + series[right_border]) now_outliers_indices.append( deepcopy(outliers_indices[left_border][right_border - 1][outlier_number][i]) ) f2.append(tmp_ss[-1] - tmp_s[-1] ** 2 / (right_border - left_border + 1 - outlier_number)) if f2[-1] < now_min: now_min = f2[-1] where = i if f1 < now_min: f[left_border][right_border][outlier_number] = f1 s[left_border][right_border][outlier_number] = deepcopy( s[left_border][right_border - 1][outlier_number - 1] ) ss[left_border][right_border][outlier_number] = deepcopy( ss[left_border][right_border - 1][outlier_number - 1] ) outliers_indices[left_border][right_border][outlier_number] = deepcopy( outliers_indices[left_border][right_border - 1][outlier_number - 1] ) if len(outliers_indices[left_border][right_border][outlier_number]): for i in range(len(outliers_indices[left_border][right_border][outlier_number])): outliers_indices[left_border][right_border][outlier_number][i].append(right_border) else: outliers_indices[left_border][right_border][outlier_number].append([right_border]) elif f1 > now_min: f[left_border][right_border][outlier_number] = f2[where] s[left_border][right_border][outlier_number] = tmp_s ss[left_border][right_border][outlier_number] = tmp_ss outliers_indices[left_border][right_border][outlier_number] = now_outliers_indices else: f[left_border][right_border][outlier_number] = f1 tmp_s.extend(s[left_border][right_border - 1][outlier_number - 1]) tmp_ss.extend(ss[left_border][right_border - 1][outlier_number - 1]) s[left_border][right_border][outlier_number] = tmp_s ss[left_border][right_border][outlier_number] = tmp_ss tmp = deepcopy(outliers_indices[left_border][right_border - 1][outlier_number - 1]) if len(tmp): for i in range(len(tmp)): tmp[i].append(right_border) else: tmp = [[right_border]] outliers_indices[left_border][right_border][outlier_number].extend(now_outliers_indices) outliers_indices[left_border][right_border][outlier_number].extend(deepcopy(tmp)) return f, outliers_indices def hist(series: np.ndarray, bins_number: int) -> np.ndarray: """ Compute outliers indices according to hist rule. http://www.vldb.org/conf/1999/P9.pdf Parameters ---------- series: array to count F bins_number: number of bins Returns ------- indices: np.ndarray outliers indices """ approximation_error = np.zeros((len(series), bins_number + 1, bins_number)) anomalies: list = [[[[] for i in range(bins_number)] for j in range(bins_number + 1)] for s in range(len(series))] p, pp = np.empty_like(series), np.empty_like(series) p[0] = series[0] pp[0] = series[0] ** 2 for i in range(1, len(series)): p[i] = p[i - 1] + series[i] pp[i] = pp[i - 1] + series[i] ** 2 f, outliers_indices = compute_f(series, bins_number - 1, p, pp) approximation_error[:, 1:, 0] = v_optimal_hist(series, bins_number, p, pp) approximation_error[:, 1, :] = f[0] for right_border in range(len(series)): for outlier_number in range(1, bins_number): if len(outliers_indices[0][right_border][outlier_number]): anomalies[right_border][1][outlier_number] = deepcopy( outliers_indices[0][right_border][outlier_number][0] ) for right_border in range(1, len(series)): for tmp_bins_number in range(2, min(bins_number + 1, right_border + 2)): for outlier_number in range(1, min(bins_number, right_border + 2 - tmp_bins_number)): # tmp_approximation_error = approximation_error[:right_border, tmp_bins_number - 1, : outlier_number + 1] tmp_f = f[1 : right_border + 1, right_border, : outlier_number + 1][:, ::-1] approximation_error[right_border][tmp_bins_number][outlier_number] = np.min( tmp_approximation_error + tmp_f ) where = np.where( tmp_approximation_error + tmp_f == approximation_error[right_border][tmp_bins_number][outlier_number] ) if where[1][0] != outlier_number: anomalies[right_border][tmp_bins_number][outlier_number].extend( deepcopy(outliers_indices[1 + where[0][0]][right_border][outlier_number - where[1][0]][0]) ) anomalies[right_border][tmp_bins_number][outlier_number].extend( deepcopy(anomalies[where[0][0]][tmp_bins_number - 1][where[1][0]]) ) count = 0 now_min = approximation_error[-1][-1][0] for outlier_number in range(1, min(approximation_error.shape[1], approximation_error.shape[2])): if approximation_error[-1][approximation_error.shape[1] - 1 - outlier_number][outlier_number] <= now_min: count = outlier_number now_min = approximation_error[-1][approximation_error.shape[1] - 1 - outlier_number][outlier_number] return np.array(sorted(anomalies[-1][approximation_error.shape[1] - 1 - count][count])) def get_anomalies_hist( ts: "TSDataset", in_column: str = "target", bins_number: int = 10 ) -> typing.Dict[str, List[pd.Timestamp]]: """ Get point outliers in time series using histogram model. Outliers are all points that, when removed, result in a histogram with a lower approximation error, even with the number of bins less than the number of outliers. Parameters ---------- ts: TSDataset with timeseries data in_column: name of the column in which the anomaly is searching bins_number: number of bins Returns ------- dict of outliers: typing.Dict[str, typing.List[pd.Timestamp]] dict of outliers in format {segment: [outliers_timestamps]} """ outliers_per_segment = {} segments = ts.segments for seg in segments: segment_df = ts.df[seg].reset_index() values = segment_df[in_column].values timestamp = segment_df["timestamp"].values anomalies = hist(values, bins_number) outliers_per_segment[seg] = [timestamp[i] for i in anomalies] return outliers_per_segment
39.667674
119
0.591394
f435ce04c79c1712e8cc0abcb3f67f2a72425976
134
py
Python
virtual/lib/python3.6/site-packages/django_pusher/context_processors.py
petermirithu/hooby_lab
ffd641948bc2d2539649ec747114c78b5ad105e7
[ "MIT" ]
2
2020-01-26T15:09:48.000Z
2020-05-10T05:31:05.000Z
virtual/lib/python3.6/site-packages/django_pusher/context_processors.py
petermirithu/hooby_lab
ffd641948bc2d2539649ec747114c78b5ad105e7
[ "MIT" ]
10
2020-06-06T01:10:07.000Z
2022-03-12T00:12:22.000Z
virtual/lib/python3.6/site-packages/django_pusher/context_processors.py
petermirithu/hooby_lab
ffd641948bc2d2539649ec747114c78b5ad105e7
[ "MIT" ]
null
null
null
from django.conf import settings
16.75
58
0.634328
f43612b155ef29350dd3f083a77ca91ae4d8fa46
7,537
py
Python
inconnu/character/update/parse.py
tiltowait/inconnu
6cca5fed520899d159537701b695c94222d8dc45
[ "MIT" ]
4
2021-09-06T20:18:13.000Z
2022-02-05T17:08:44.000Z
inconnu/character/update/parse.py
tiltowait/inconnu
6cca5fed520899d159537701b695c94222d8dc45
[ "MIT" ]
7
2021-09-13T00:46:57.000Z
2022-01-11T06:38:50.000Z
inconnu/character/update/parse.py
tiltowait/inconnu
6cca5fed520899d159537701b695c94222d8dc45
[ "MIT" ]
2
2021-11-27T22:24:53.000Z
2022-03-16T21:05:00.000Z
"""character/update/parse.py - Defines an interface for updating character traits.""" # pylint: disable=too-many-arguments import re import discord from discord_ui.components import LinkButton from . import paramupdate from ..display import display from ... import common, constants from ...log import Log from ...vchar import VChar __MATCHES = {} __KEYS = { "name": "The character's name", "health": "The character's max Health", "willpower": "The character's max Willpower", "humanity": "The character's Humanity", "splat": "The type of character: `vampire`, `mortal`, or `ghoul`", "sh": "+/- Superficial Health damage", "ah": "+/- Aggravated Health damage", "sw": "+/- Superficial Willpower damage", "aw": "+/- Aggravated Willpower damage", "stains": "+/- Stains", "unspent_xp": "+/- Unspent XP", "lifetime_xp": "+/- Total Lifetime XP", "hunger": "+/- The character's Hunger", "potency": "+/- The character's Blood Potency" } __HELP_URL = "https://www.inconnu-bot.com/#/character-tracking?id=tracker-updates" def __parse_arguments(*arguments): """ Parse the user's arguments. Raises ValueErrors and KeyErrors on exceptions. """ if len(arguments) == 0: raise ValueError("You must supply some parameters!") parameters = {} for argument in arguments: split = argument.split("=") key = split[0].lower() if len(split) != 2: err = "Parameters must be in `key = value` pairs." if key not in __KEYS: err += f" Also, `{key}` is not a valid option." raise SyntaxError(err) if key in parameters: raise ValueError(f"You cannot use `{key}` more than once.") if key not in __MATCHES: raise ValueError(f"Unknown parameter: `{key}`.") key = __MATCHES[key] # Get the canonical key value = split[1] if len(value) == 0: raise ValueError(f"No value given for `{key}`.") parameters[key] = value # Don't do any validation here return parameters def __update_character(character: VChar, param: str, value: str) -> str: """ Update one of a character's parameters. Args: character (VChar): The character being updated param (str): The parameter to update value (str): The parameter's new value Raises ValueError if the parameter's value is invalid. """ return getattr(paramupdate, f"update_{param}")(character, value) # We do flexible matching for the keys. Many of these are the same as RoD's # keys, while others have been observed in syntax error logs. This should be # a little more user-friendly. def __setup_matches(): """Register all the update keys.""" __register_keys("name") __register_keys("health", "hp") __register_keys("willpower", "wp", "w") __register_keys("humanity", "hm") __register_keys("splat", "type") __register_keys( "sh", "sd", "shp", "suphp", "suph", "supd", "superficialhealth", "superficialdamage" ) __register_keys("ah", "ad", "ahp", "agghp", "aggd", "aggh", "agghealth", "aggdamage") __register_keys("sw", "swp", "supwp", "supw", "superficialwillpower") __register_keys("aw", "awp", "aggwp", "aggw", "aggwillpower") __register_keys("stains", "stain", "s") __register_keys( "current_xp", "xp_current", "current_exp", "exp_current", "currentxp", "currentexp", "xpcurrent", "expcurrent", "cxp", "unspent_xp", "xp_unspent", "unspent_exp", "exp_unspent", "unspentxp", "unspentexp", "xpunspent", "expunspent", "uxp" ) __register_keys( "total_xp", "xp_total", "total_exp", "exp_total", "totalxp", "totalexp", "xptotal", "exptotal", "txp", "lifetimexp", "xplifetime", "explifetime", "lxp", "lifetime_xp", "life_time_xp" ) __register_keys("hunger", "h") __register_keys("potency", "bp", "p") def __register_keys(canonical, *alternates): """Register an update key along with some alternates.""" __MATCHES[canonical] = canonical for alternate in alternates: if alternate in __MATCHES: raise KeyError(f"{alternate} is already an update parameter.") __MATCHES[alternate] = canonical __setup_matches()
34.573394
99
0.629959
f436146fcd68e0fffec8d89af9ff63a4a2a79aad
7,980
py
Python
src/models/train_search_multi_deep.py
smadha/MlTrio
a7269fc4c6d77b2f71432ab9d2ab8fe4e28234d5
[ "Apache-2.0" ]
null
null
null
src/models/train_search_multi_deep.py
smadha/MlTrio
a7269fc4c6d77b2f71432ab9d2ab8fe4e28234d5
[ "Apache-2.0" ]
null
null
null
src/models/train_search_multi_deep.py
smadha/MlTrio
a7269fc4c6d77b2f71432ab9d2ab8fe4e28234d5
[ "Apache-2.0" ]
null
null
null
''' Uses flattened features in feature directory and run a SVM on it ''' from keras.layers import Dense from keras.models import Sequential import keras.regularizers as Reg from keras.optimizers import SGD, RMSprop from keras.callbacks import EarlyStopping import cPickle as pickle import numpy as np from sklearn.model_selection import train_test_split from sklearn.metrics import f1_score import theano from models.down_sampling import balanced_subsample theano.config.openmp = True OMP_NUM_THREADS=16 users_va_te_dict = dict([ (v,idx) for (idx,v) in enumerate(pickle.load(open("../../bytecup2016data/users_va_te.p"))) ]) print "users_va_te_dict created ", len(users_va_te_dict) def normalize(X_tr): ''' Normalize training and test data features Args: X_tr: Unnormalized training features Output: X_tr: Normalized training features ''' X_mu = np.mean(X_tr, axis=0) X_tr = X_tr - X_mu X_sig = np.std(X_tr, axis=0) X_tr = X_tr/X_sig return X_tr, X_mu, X_sig def genmodel(num_units, actfn='relu', reg_coeff=0.0, last_act='softmax'): ''' Generate a neural network model of approporiate architecture Args: num_units: architecture of network in the format [n1, n2, ... , nL] actfn: activation function for hidden layers ('relu'/'sigmoid'/'linear'/'softmax') reg_coeff: L2-regularization coefficient last_act: activation function for final layer ('relu'/'sigmoid'/'linear'/'softmax') Output: model: Keras sequential model with appropriate fully-connected architecture ''' model = Sequential() for i in range(1, len(num_units)): if i == 1 and i < len(num_units) - 1: model.add(Dense(input_dim=num_units[0], output_dim=num_units[i], activation=actfn, W_regularizer=Reg.l2(l=reg_coeff), init='glorot_normal')) elif i == 1 and i == len(num_units) - 1: model.add(Dense(input_dim=num_units[0], output_dim=num_units[i], activation=last_act, W_regularizer=Reg.l2(l=reg_coeff), init='glorot_normal')) elif i < len(num_units) - 1: model.add(Dense(output_dim=num_units[i], activation=actfn, W_regularizer=Reg.l2(l=reg_coeff), init='glorot_normal')) elif i == len(num_units) - 1: model.add(Dense(output_dim=num_units[i], activation=last_act, W_regularizer=Reg.l2(l=reg_coeff), init='glorot_normal')) return model def get_transform_label(): ''' Returns list of labels as list of [0/1 , 1/0] if label = 1 [0, 1] if label = 0 [1, 0] ''' count = 0 users_order = [] ##features to be deletd del_rows = [] with open("../../bytecup2016data/invited_info_train_PROC.txt","r") as f: training_data = f.readline().strip().split("\t") while training_data and len(training_data) >= 2 : user_id = training_data[1] label = training_data[2] if user_id in users_va_te_dict: users_order.append((user_id,label) ) else: del_rows.append(count) count += 1 training_data = f.readline().strip().split("\t") f.close() print "users_order created ", len(users_order), len(del_rows) return transform_label(users_order), del_rows features = pickle.load( open("../feature_engg/feature/all_features.p", "rb") ) labels, del_rows = get_transform_label() # features = np.random.normal(size=(26796,3)) # labels, del_rows = get_transform_label() print len(features),len(features[0]) print len(labels),len(labels[0]) features = np.array(features) features = np.delete(features, del_rows, axis=0) col_deleted = np.nonzero((features==0).sum(axis=0) > (len(features)-1000)) # col_deleted = col_deleted[0].tolist() + range(6,22) + range(28,44) print col_deleted features = np.delete(features, col_deleted, axis=1) print len(features),len(features[0]) print len(labels),len(labels[0]) features, X_mu, X_sig = normalize(features) save_res = {"col_deleted":col_deleted,"X_mu":X_mu,"X_sig":X_sig} with open("model/train_config", 'wb') as pickle_file: pickle.dump(save_res, pickle_file, protocol=2) print "Dumped config" momentum = 0.99 eStop = True sgd_Nesterov = True sgd_lr = 1e-5 batch_size=5000 nb_epoch=100 verbose=True features,labels = [] , [] features_tr, features_te,labels_tr, labels_te = train_test_split(features,labels, train_size = 0.85) print "Using separate test data", len(features_tr), len(features_te) arch_range = [[len(features_tr[0]),1024,len(labels_tr[0])], [len(features_tr[0]),1024,512,len(labels_tr[0])], [len(features_tr[0]),1024,1024,len(labels_tr[0])],[len(features_tr[0]),1024,512,256,len(labels_tr[0])]] reg_coeffs_range = [1e-6, 5e-6, 1e-5, 5e-5, 5e-4 ] sgd_decays_range = [1e-6, 1e-5, 5e-5, 1e-4, 5e-4 ] class_weight_0_range = [1] # subsample_size_range = [2,2.5,3] #GRID SEARCH ON BEST PARAM for arch in arch_range: for reg_coeff in reg_coeffs_range: for sgd_decay in sgd_decays_range: # for subsample_size in subsample_size_range: run_NN(arch, reg_coeff, sgd_decay) # arch = [len(features[0]),1024,512,2] # reg_coeff = 1e-05 # sgd_decay = 1e-05 # class_weight_0 = 0.5
34.847162
213
0.663033
f43697d11efae6dda37ec02c7a022ad4d3dc4330
11,009
py
Python
formation.py
graham-kim/pygremlin-graph-visualiser
65cb4d4fb71c8dde46ff1a36a40adcbdf233448c
[ "MIT" ]
null
null
null
formation.py
graham-kim/pygremlin-graph-visualiser
65cb4d4fb71c8dde46ff1a36a40adcbdf233448c
[ "MIT" ]
39
2020-07-25T10:58:19.000Z
2020-08-28T15:02:12.000Z
formation.py
graham-kim/pygremlin-graph-visualiser
65cb4d4fb71c8dde46ff1a36a40adcbdf233448c
[ "MIT" ]
null
null
null
import sys import os sys.path.append( os.path.dirname(__file__) ) import numpy as np import typing as tp import angles from model import Node, Link, Label from spec import ArrowDraw, NodeSpec
42.670543
121
0.592788
f438ca5f15375beccb9a1833b80357a7835e309b
454
py
Python
opencv_camera/parameters/utils.py
MomsFriendlyRobotCompany/opencv_camera
046d779a853ef0117c0177c03a6fd81f361a9dd3
[ "MIT" ]
6
2020-08-17T04:36:24.000Z
2022-02-22T13:54:20.000Z
opencv_camera/parameters/utils.py
MomsFriendlyRobotCompany/opencv_camera
046d779a853ef0117c0177c03a6fd81f361a9dd3
[ "MIT" ]
1
2020-12-28T20:45:17.000Z
2021-01-01T02:39:02.000Z
opencv_camera/parameters/utils.py
MomsFriendlyRobotCompany/opencv_camera
046d779a853ef0117c0177c03a6fd81f361a9dd3
[ "MIT" ]
1
2020-12-15T04:04:54.000Z
2020-12-15T04:04:54.000Z
############################################## # The MIT License (MIT) # Copyright (c) 2014 Kevin Walchko # see LICENSE for full details ############################################## # -*- coding: utf-8 -* from math import atan, pi def fov(w,f): """ Returns the FOV as in degrees, given: w: image width (or height) in pixels f: focalLength (fx or fy) in pixels """ return 2*atan(w/2/f) * 180/pi
26.705882
48
0.451542
f439222f5a9cee3a82981ad6666b33d56810e907
3,571
py
Python
Code_Hybrid_SLIMBPR_CBF_RP3Beta.py
SamanFekri/BookRecommendation
07dfa875154af39546cb263d4407339ce26d47e8
[ "MIT" ]
null
null
null
Code_Hybrid_SLIMBPR_CBF_RP3Beta.py
SamanFekri/BookRecommendation
07dfa875154af39546cb263d4407339ce26d47e8
[ "MIT" ]
null
null
null
Code_Hybrid_SLIMBPR_CBF_RP3Beta.py
SamanFekri/BookRecommendation
07dfa875154af39546cb263d4407339ce26d47e8
[ "MIT" ]
null
null
null
# This Python 3 environment comes with many helpful analytics libraries installed # It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python # For example, here's several helpful packages to load import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import scipy.sparse as sps import time RM_train=pd.read_csv('./input/data_train.csv') R_test=pd.read_csv('./input/data_target_users_test.csv') URM=pd.read_csv('./input/data_train.csv') ICM = pd.read_csv('./input/data_ICM_title_abstract.csv') ##### URM URM_tuples = [tuple(x) for x in URM.to_numpy()] userList, itemList, ratingList = zip(*URM_tuples) userList = list(userList) userList=np.array(userList,dtype=np.int64) itemList = list(itemList) itemList=np.array(itemList,dtype=np.int64) ratingList = list(ratingList) #not needed ratingList=np.array(ratingList,dtype=np.int64) #not needed URM_all = sps.coo_matrix((ratingList, (userList, itemList))) URM_all = URM_all.tocsr() #### ICM ICM_tuples = [tuple(x) for x in ICM.to_numpy()] itemList_icm, featureList_icm, scoreList_icm = zip(*ICM_tuples) itemList_icm = list(itemList_icm) itemList_icm = np.array(itemList_icm,dtype=np.int64) featureList_icm = list(featureList_icm) featureList_icm = np.array(featureList_icm,dtype=np.int64) scoreList_icm = list(scoreList_icm) scoreList_icm = np.array(scoreList_icm,dtype=np.float64) ICM_all = sps.coo_matrix((scoreList_icm, (itemList_icm, featureList_icm))) #### Test userTestList = [x for x in R_test.to_numpy()] userTestList = zip(*userTestList) userTestList = [list(a) for a in userTestList][0] #### make validation and test from Base.Evaluation.Evaluator import EvaluatorHoldout from Data_manager.split_functions.split_train_validation_random_holdout import split_train_in_two_percentage_global_sample URM_train, URM_test = split_train_in_two_percentage_global_sample(URM_all, train_percentage = 0.80) URM_train, URM_validation = split_train_in_two_percentage_global_sample(URM_train, train_percentage = 0.80) evaluator_validation = EvaluatorHoldout(URM_validation, cutoff_list=[10]) evaluator_test = EvaluatorHoldout(URM_test, cutoff_list=[10]) ### hybrid recommender ### Usinng TF IDF ICM_all = ICM_all.tocsr() num_tot_items = ICM_all.shape[0] # let's count how many items have a certain feature items_per_feature = np.ediff1d(ICM_all.indptr) + 1 # print(items_per_feature) IDF = np.array(np.log(num_tot_items / items_per_feature)) from scipy.sparse import diags diags(IDF) ICM_idf = ICM_all.copy() ICM_idf = diags(IDF)*ICM_idf ############## top pop item_popularity = np.ediff1d(URM_all.tocsc().indptr) popular_items = np.argsort(item_popularity) popular_items = np.flip(popular_items, axis=0) popular_items = popular_items[0:10] ########### from HybridRecommender import HybridRecommender recommender = HybridRecommender(URM_all) recommender.fit([0.2, 0.3, 0.2], ICM_idf) recoms = recommender.recommend(userTestList, cutoff=10) recomList = [] for i in range(len(recoms)): user_id = userTestList[i] start_pos = URM_train.indptr[user_id] end_pos = URM_train.indptr[user_id + 1] if start_pos == end_pos: recomList.append(' '.join(str(e) for e in popular_items)) else: recomList.append(' '.join(str(e) for e in recoms[i])) # print(recomList) res = {"user_id": userTestList, "item_list": recomList} result = pd.DataFrame(res, columns= ['user_id', 'item_list']) result.to_csv('outputs/hybrid_slim_cbf_rp3v1.csv', index = False, header=True)
31.324561
122
0.758891
f43a93adbb44a173a83f3be2da8ae94b9ee5a0d3
989
py
Python
dodge/config.py
MoyTW/7DRL2016_Rewrite
99e092dcb8797a25caa3c8a989a574efae19e4d4
[ "MIT" ]
2
2020-05-10T02:16:28.000Z
2021-04-05T21:54:10.000Z
dodge/config.py
MoyTW/7DRL2016_Rewrite
99e092dcb8797a25caa3c8a989a574efae19e4d4
[ "MIT" ]
null
null
null
dodge/config.py
MoyTW/7DRL2016_Rewrite
99e092dcb8797a25caa3c8a989a574efae19e4d4
[ "MIT" ]
null
null
null
import json
41.208333
66
0.605662
f43be3dcb74991918120ac726f26bac6d8cff63f
524
py
Python
incal_lib/create_dataframe.py
barel-mishal/InCal_lib
3aa63ebccf2ed3277fac55049c88178541cbb94b
[ "MIT" ]
null
null
null
incal_lib/create_dataframe.py
barel-mishal/InCal_lib
3aa63ebccf2ed3277fac55049c88178541cbb94b
[ "MIT" ]
null
null
null
incal_lib/create_dataframe.py
barel-mishal/InCal_lib
3aa63ebccf2ed3277fac55049c88178541cbb94b
[ "MIT" ]
null
null
null
import pandas as pd import numpy as np def create_calr_example_df(n_rows, start_date): ''' ''' np.random.seed(20) array = np.random.rand(n_rows) cumulative = np.cumsum(array) d = { 'feature1_subject_1': array, 'feature1_subject_2': array, 'feature2_subject_1': cumulative, 'feature2_subject_2': cumulative*2 } idx = pd.date_range(start_date, periods=n_rows, freq="MIN", name='Date_Time_1') return pd.DataFrame(data=d, index=idx)
24.952381
55
0.622137
f43cde7e64305b95ccb8abd4674e469455ce57e1
4,663
py
Python
HybridSN/DataLoadAndOperate.py
lms-07/HybridSN
7580d67a5879d5b53ced75a653d4f198a8aefde2
[ "MIT" ]
null
null
null
HybridSN/DataLoadAndOperate.py
lms-07/HybridSN
7580d67a5879d5b53ced75a653d4f198a8aefde2
[ "MIT" ]
null
null
null
HybridSN/DataLoadAndOperate.py
lms-07/HybridSN
7580d67a5879d5b53ced75a653d4f198a8aefde2
[ "MIT" ]
null
null
null
import os import numpy as np import scipy.io as sio import tifffile from sklearn.decomposition import PCA from sklearn.model_selection import train_test_split #Load dataset # Use tifffile pkg read the hyperspectral img. # Load .tiff data set and converted to .mat data ### Using PCA for removing the spectral redundancy() ### Reduce the spectral dimension, from high-dimensional to low-dimensional. ### Padding zeros ### Create data cube,3D-patch. # Dataset split.
43.579439
123
0.671885
f43e3816708a9a04921f14baa15850bfa0137251
1,873
py
Python
alipay/aop/api/domain/AlipayOpenIotmbsDooropenresultSyncModel.py
antopen/alipay-sdk-python-all
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
[ "Apache-2.0" ]
null
null
null
alipay/aop/api/domain/AlipayOpenIotmbsDooropenresultSyncModel.py
antopen/alipay-sdk-python-all
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
[ "Apache-2.0" ]
null
null
null
alipay/aop/api/domain/AlipayOpenIotmbsDooropenresultSyncModel.py
antopen/alipay-sdk-python-all
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- import json from alipay.aop.api.constant.ParamConstants import *
26.380282
71
0.584624
f43f0adac87483d74d65bc876a1b45c40eb3778c
958
py
Python
setup.py
ghost58400/marlin-binary-protocol
fb93603866ecfce84e887c159bbbb9f9d2f01f17
[ "MIT" ]
null
null
null
setup.py
ghost58400/marlin-binary-protocol
fb93603866ecfce84e887c159bbbb9f9d2f01f17
[ "MIT" ]
null
null
null
setup.py
ghost58400/marlin-binary-protocol
fb93603866ecfce84e887c159bbbb9f9d2f01f17
[ "MIT" ]
null
null
null
import setuptools with open("README.md", "r") as fh: long_description = fh.read() setuptools.setup( name="marlin_binary_protocol", version="0.0.7", author="Charles Willis", author_email="charleswillis3@users.noreply.github.com", description="Transfer files with Marlin 2.0 firmware using Marlin Binary Protocol Mark II", long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/charleswillis3/marlin-binary-protocol", packages=setuptools.find_packages(), install_requires=["heatshrink2>=0.9", "pyserial>=3.4", "backports.time_perf_counter; python_version < '3.3'"], classifiers=[ "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], python_requires='>=2.6, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4', )
38.32
114
0.662839
f44057beff2cbba250db617a96a21c14300e3ae1
18,028
py
Python
taut_euler_class.py
henryseg/Veering
50ebdcd5bde582726aefdd564c43e17890651282
[ "CC0-1.0" ]
2
2020-08-17T21:38:16.000Z
2021-08-29T21:38:43.000Z
taut_euler_class.py
henryseg/Veering
50ebdcd5bde582726aefdd564c43e17890651282
[ "CC0-1.0" ]
null
null
null
taut_euler_class.py
henryseg/Veering
50ebdcd5bde582726aefdd564c43e17890651282
[ "CC0-1.0" ]
null
null
null
# # taut_euler_class.py # from file_io import parse_data_file, write_data_file from taut import liberal, isosig_to_tri_angle from transverse_taut import is_transverse_taut from sage.matrix.constructor import Matrix from sage.modules.free_module_element import vector from sage.arith.misc import gcd from sage.arith.functions import lcm # # Goal - given a transverse taut triangulation, decide if the # associated "absolute" euler class is torsion or not. If it is # torsion, determine its order. # # Contents and overview: # 1. References. # # 2. Background. # # 3. Helper functions. # # 4. Truncate. We build the correct "truncated" cell structure \calT' # from (M, \calT) and give generators for the cochain groups # C^k(\calT', \ZZ) (k = 1, 2). # # 5. Representative. We find a two-cocycle E \in Z^2(\calT', \ZZ) # that represents E(\calT) \in H^2(M, \ZZ). # # 6. Coboundary. We find the matrix for the coboundary operator # \delta^1. # # 7. Linear algebra. We solve the linear problem to decide if E is a # coboundary - that is, if E lies in B^2(\calT', \ZZ) - that is, if E # is in the image of \delta^1. # # 8. Remarks. # # 9. Calling code # # 1. References. # # Culler, Dunfield - Orderability and Dehn filling # Ghys - Groups acting on the circle # Thurston - A norm for the homology of three-manifolds # Candel, Conlon - Foliations, chapter four # 2. Background: # Suppose that (M, \calT) is a transverse taut triangulation. Then # \calT^{2} is the "horizontal branched surface". This caries various # laminations, which extend to foliations on M. All of these have the # same Euler class, which we will denote E(\calT) \in H^2(M, \ZZ). # Suppose that \calF is a carried foliation and let UT\calF be the # unit tangent bundle over \calF. The Euler class E vanishes exactly # when UT\calF has a section; that is, when the unit tangent bundle is # trivialisable. # Recall: # Suppose that X is an F-bundle over B. We have # # i # F -------> X <--. # | | # | | # p| |s # | | # v | # B ---' # # So s \from B \to X is a \emph{section} if p \circ s = Id_B # 3. Helper functions # 4. Truncate. # Suppose that M is a connected, cusped, oriented three-manifold. Let # C = C(M) \geq 1 be the number of cusps of M. Suppose that \calT is a # transverse taut ideal triangulation of M. Let T = T(\calT) \geq 1 # be the number of tetrahedra of \calT. # We use Regina to number and orient the edges \{e_i\}_{i = 0}^{T-1}, # the faces \{f_i\}_{i = 0}^{2T-1}, and the tetrahedra \{t_i\}_{i = # 0}^{T-1} of \calT. We call all of these \emph{ideal} cells. Note # that the transverse structure also gives us co-orientations of the # e_i and the f_i, called "upwards" # We remove a small open neighbourbood of all ideal vertices of all # model tetrahedra. This gives the \emph{truncated} cell structure # \calT'. The remains of the ideal cells are called \emph{truncated} # cells; we abuse and reuse the notations e_i and f_i for these. The # truncated cells inherit orientations and co-orientations. The new # cells are called \emph{peripheral} cells. We number these as # follows: # e_{ij} is the peripheral edge cutting vertex v_j off of ideal face f_i # f_{ij} is the peripheral face cutting vertex v_j off of ideal tetrahedron t_i # Note that every truncated face is combinatorially a hexagon. The # boundary of this hexagon contains three truncated edges alternating # with three peripheral edges. We orient each peripheral edge e_{ij} # so that the orientation of e_{ij} agrees with the orientation # induced by \bdy f_i. We orient each peripheral face f_{ij} # anti-clockwise, as viewed from infinity (that is, from outside of # M). Also, we equip e_{ij} and f_{ij} with co-orientations pointing # out of M, called "outward". # e_{i0} # --- # / \ # e_2 / \ e_1 # / \ # / f_i \ # \ / # e_{i1} --------- e_{i2} # e_0 # For an edge e or a face f we use e^* and f^* to denote the dual in # C^1(\calT', \ZZ) or C^2(\calT', \ZZ). Thus \{e^*_i\} \cup # \{e^*_{ij}\} generates C^1(\calT', \ZZ) while \{f^*_i\} \cup # \{f^*_{ij}\} generates C^2(\calT', \ZZ). # For more pictures, see # /Veering_code/NotesPictures/euler_notes_from_nathan.jpg # 5. Representative # We now construct a two-cocycle E \in Z^2(\calT', \ZZ). For every # peripheral face f we take # E(f) = 0. # \begin{remark} # To see that this is correct, let \calF be any foliation of M, # transverse to the boundary. Suppose that f is the given peripheral # triangle. We have a section of the restriction of UT\calF to \bdy # f; namely the outward field. This extends over f to give a section # of UT\calF restricted to f. So there is no obstruction to the # extension. See below for a more precise discussion in terms of # "Poincar\'e-Hopf index". # \end{remark} # Now suppose that f is a truncated face. Suppose that e_0, e_1, e_2 # are its three truncated edges. Recall that these are all oriented. # Let AC(f) be the number of the edges e_0, e_1, e_2 that are # oriented anti-clockwise (that is, agree with their induced # orientation coming from f). We take # E(f) = AC(f) - 2 # If we flip the transverse direction: AC(f') = 3 - AC(f), # so E(f') = AC(f') - 2 = 1 - AC(f) = 2 - AC(f) - 1 = -E(f) - 1 # \begin{remark} # Here is one way to remember (and explain!) this rule. Suppose that # f is the given truncated face. Suppose that s is a section of UTf | # \bdy f. Then index(s) is the total rotation of s with respect to # the tangent field, _plus_ one. This can be rephrased in terms of # the index of tangent vector fields extending s over all of f. # Our choices of orientations of edges determine a section of UTf | # \bdy f. Since all of the boundary edges e_{ij} of f are oriented # the same way, we choose a standard framing there; Nathan tells us to # just use the outward pointing section on all of the e_{ij}. Our # choice of section on e_0 (say) has to (a) depend only on the # orientation of e_0 and (b) has to be outward at the endpoints of # e_0. The simplest choice is the section that rotates by +\pi with # respect to the tangent along \bdy f_i, as we move forward along e_0. # So s points _back_ at the beginning of e_0, points _right_ in the # middle of e_0, and points _forwards_ at the end of e_0. The total # rotation of the resulting field (with respect to the tangent field) # is AC(f) - 3. Thus E(f) = AC(f) - 2 is the index. You can check # this works by drawing the four possible pictures and computing the index # of any extension of s over f. # \end{remark} # Claim: \delta^2 E = 0. # That is, E is a cocycle. # Proof of claim: Fix a truncated tetrahedron t and fix some oriention # of its truncated edges. A direct calculation shows that # \delta E (t) = E \bdy t = 0. # Likewise, a direct computation shows that switching the orientation # of a single edge leaves E \bdy t unchanged. QED. ### It would be nice to have a less computational proof! def euler_cocycle(tri, angle): """ Given a regina triangulation "tri", with oriented edges, and a transverse taut angle structure "angle", returns the associated two-cocycle E representing the Euler class E(tri). """ assert is_transverse_taut(tri, angle) face_coorientations = is_transverse_taut(tri, angle, return_type = "face_coorientations") # E will be a _row_ vector, because it eats column vectors. E = [] # First deal with the truncated faces for face in tri.faces(2): # 2 = dimension # First we compute the number of Regina oriented edges that agree with the Regina orientation on face AC = 0 for i in range(3): perm = face.faceMapping(1, i) # print perm[0], perm[1] if perm[1] == ((perm[0] + 1) % 3): # the edge and face orientations agree so, AC = AC + 1 # print "AC", AC # Now we condition on whether or not Regina and angle agree on the (co-)orientation of the face. if face_coorientations[face.index()] == 1: E.append(AC - 2) else: E.append(1 - AC) # Now deal with the peripheral faces for tet in tri.tetrahedra(): for j in range(4): E.append(0) return E # 6. Coboundary # Suppose that e is a truncated edge. Let LF be the set of truncated # faces to the left of e and let RF be the set of faces to the right. Then # \delta e^* = \sum_{f \in LF} f^* - \sum_{f \in RF} f^*. # Suppose that e is a peripheral edge. So there is a unique truncated # face f meeting e. Note that f is to the left of e. There are # also a pair of boundary faces meeting e: say f' _above_ e and f'' # _below_ e. Then # \delta e^* = f^* + (f')^* - (f'')^*. def coboundary(tri, angle): """ Given a triangulation "tri" (T), with oriented edges, and a transverse taut angle structure "angle", returns the co-boundary operator delta^1 \from C^1(T', ZZ) \to C^2(T', ZZ), as a matrix, for the truncated triangulation T'. Note that, strictly speaking, we don't need to use "angle" for this, but we use it to determine orientation on faces for the Euler class, so we might as well use it again here. """ # \delta^1 takes row vectors (functions on edges) and spits out # row vectors (functions on faces). So, if c is a one-cochain # then c \cdot \delta is a two-cochain. delta = [] assert is_transverse_taut(tri, angle) tet_vert_coorientations = is_transverse_taut(tri, angle, return_type = "tet_vert_coorientations") face_coorientations = is_transverse_taut(tri, angle, return_type = "face_coorientations") for edge in tri.edges(): # A row for every truncated edge row = [] for face in tri.triangles(): # A row entry for every truncated face count = 0 for i in range(3): if face.edge(i) == edge: perm = face.faceMapping(1, i) if perm[1] == ((perm[0] + 1) % 3): # the edge and face orientations agree so, count += 1 else: count -= 1 row.append(count * face_coorientations[face.index()]) # +1 if face is to the left of the edge, -1 if face is to # the right of the edge, using Regina's edge orientation # when viewed from above (using the transverse taut notion # of up) # ,'| # ,' | # ,' | # ,' CCW | gets a +1 # `. ^ # `. | # `. | # `.| for tet in tri.simplices(): for i in range(4): row.append(0) delta.append(row) for face in tri.triangles(): face_embeddings = [] for j in range(2): face_embeddings.append( face.embedding(j) ) for i in range(3): # vertices of the face # A row for every peripheral edge row = [] for face2 in tri.triangles(): # A row entry for every truncated face if face2 == face: row.append(1) else: row.append(0) for tet in tri.simplices(): for k in range(4): # A row entry for every peripheral face count = 0 for j in range(2): if (tet == face_embeddings[j].simplex()) and (face_embeddings[j].vertices()[i] == k): # the tetrahedron is on the jth side of the # face and the ith vertex of face is the kth # vertex of tet face_num_in_tet = face_embeddings[j].vertices()[3] count -= tet_vert_coorientations[tet.index()][face_num_in_tet] # tet_vert_coorientations is +1 if # coorientation on face points out of the # tetrahedron, and we want count += 1 if # the peripheral face is above the # peripheral edge row.append(count) delta.append(row) return delta # 7. Linear algebra # We ask: is there a one-cocycle C \in C^1(\calT', \ZZ) so that # \delta C = E? If so, then [E] = E(\calT) is zero in H^2, as # desired. # This is a linear algebra problem, so can be solved by, say, sage. def order_of_euler_class(delta, E): """ Given the coboundary operator delta and an Euler two-cocycle E, returns k if [E] is k--torsion. By convention, returns zero if [E] is non-torsion. Note that the trivial element is 1--torsion. """ delta = Matrix(delta) E = vector(E) # Note that E is a coboundary if there is a one-cocycle C solving # # E = C*delta # # We can find C (if it exists at all) using Smith normal form. D, U, V = delta.smith_form() assert D == U*delta*V # So we are trying to solve # # C*delta = C*U.inverse()*D*V.inverse() = E # # for a one-cochain C. Multiply by V to get # # C*delta*V = C*U.inverse()*D = E*V # # Now set # # B = C*U.inverse(), and so B*U = C # # and rewrite to get # # B*U*delta*V = B*D = E*V # # So define E' by: Ep = E*V # Finally we attempt to solve B * D = Ep. Note that D is # diagonal: so if we can solve all of the equations # B[i] * D[i][i] == Ep[i] # with B[i] integers, then [E] = 0 in cohomology. diag = diagonal(D) if any( (diag[i] == 0 and Ep[i] != 0) for i in range(len(Ep)) ): return 0 # All zeros are at the end in Smith normal form. Since we've # passed the above we can now remove them. first_zero = diag.index(0) diag = diag[:first_zero] Ep = Ep[:first_zero] # Since diag[i] is (now) never zero we can divide to get the # fractions Ep[i]/diag[i] and then find the scaling that makes # them simultaneously integral. denoms = [ diag[i] / gcd(Ep[i], diag[i]) for i in range(len(Ep)) ] return lcm(denoms) # 8. Remarks # a) Here is a nice trick that proves [E] = 0 in some cases. Suppose # that \gamma is an oriented path in \bdy M. Suppose that \gamma is # transverse to the one-skeleton of \calT'. We form a one-cocycle # D_\gamma by adding up the boundary edges that \gamma crosses, with # sign. The sign is positive if \gamma crosses from below to above, # and negative otherwise. Note that \delta D_\gamma vanishes on all # boundary faces. # b) Marc Lackenby says that we should take the paths that go up # through the centres of tetrahedra and take the Poincare dual. BUT I # think this is not what we want... Marc is thinking of the relative # Euler class as discussed on page 390 of his paper "Taut ideal # triangulations of three-manifolds". The relative Euler class lives # in H^2(M, \bdy M), so is Poincare dual to an element of H_1(M), # represented by a collection of loops. # c) [2019-03-31] It seems that, for transverse veering triangulations # in the 16 census, the Euler class is always zero or two-torsion. # Note that there are manifolds M in the census where H^2(M, \ZZ) has # positive rank... What about odd torsion? # Question: If the veering triangulation is edge-orientable, does the # Euler class vanish? # Answer: Yes. Here is a version of a discussion with Nathan # [2020-04-03] - he says the following: # Suppose that F is a foliation carried by the horizontal branched # surface. Let UTF be the unit tangent bundle to F. We think of # e(UTF) as being the obstruction to UTF having a section. Let G be # the foliation carried by the upper (aka green) branched surface. If # G is transversely orientable (aka edge-orientability of the veering # triangulation) then G \cap F gives the desired section, and e(UTF) = # 0. Note that G \cap F gives, for every point, a pair of points in # the unit tangent circle. So let PUTF be the projective unit tangent # bundle to F. This definitely has a section, so e(PUTF) = 0. Now, # the bundle UTF is a double cover of the bundle PUTF. # Claim: The euler class is multiplicative with respect to covers (in # both senses). # With the claim in hand, we have # 2 * e(UTF) = e(PUTF) = 0 # We deduce that e(UTF) is either zero or two-torsion. # 9. Calling code
35.279843
109
0.625194
f44062d81d380655736648a227bdbe096d8db999
110
py
Python
mailing/urls.py
ananyamalik/Railway-Concession-Portal
295264ccb50bc4750bf0a749c8477384407d51ad
[ "MIT" ]
null
null
null
mailing/urls.py
ananyamalik/Railway-Concession-Portal
295264ccb50bc4750bf0a749c8477384407d51ad
[ "MIT" ]
10
2020-02-11T23:58:12.000Z
2022-03-11T23:43:58.000Z
mailing/urls.py
ananyamalik/Railway-Concession-Portal
295264ccb50bc4750bf0a749c8477384407d51ad
[ "MIT" ]
1
2019-03-26T10:43:34.000Z
2019-03-26T10:43:34.000Z
from django.urls import path from .views import ( student_list, student_add, student_profile,student_delete )
36.666667
80
0.827273
f4418c7fe5090cc1ad72d42e956421d4fcbc0d8c
5,253
py
Python
transformers/tests/tokenization_xlnet_test.py
deepbluesea/transformers
11a2317986aad6e9a72f542e31344cfb7c94cbab
[ "Apache-2.0" ]
270
2020-04-26T17:54:36.000Z
2022-03-24T20:47:11.000Z
transformers/tests/tokenization_xlnet_test.py
deepbluesea/transformers
11a2317986aad6e9a72f542e31344cfb7c94cbab
[ "Apache-2.0" ]
27
2020-06-03T17:34:41.000Z
2022-03-31T01:17:34.000Z
transformers/tests/tokenization_xlnet_test.py
deepbluesea/transformers
11a2317986aad6e9a72f542e31344cfb7c94cbab
[ "Apache-2.0" ]
61
2020-04-25T21:48:11.000Z
2022-03-23T02:39:10.000Z
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import, division, print_function, unicode_literals import os import unittest from transformers.tokenization_xlnet import (XLNetTokenizer, SPIECE_UNDERLINE) from .tokenization_tests_commons import CommonTestCases SAMPLE_VOCAB = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'fixtures/test_sentencepiece.model') if __name__ == '__main__': unittest.main()
49.093458
128
0.61146
f442619ffa1142c65bd44ce29ca3a9c6c0e0aea7
5,153
py
Python
preprocess/utils/liftOver_vcf.py
Rongtingting/xcltk
2e86207c45a1caa7f905a89e1c121c3c203eab2d
[ "Apache-2.0" ]
null
null
null
preprocess/utils/liftOver_vcf.py
Rongtingting/xcltk
2e86207c45a1caa7f905a89e1c121c3c203eab2d
[ "Apache-2.0" ]
null
null
null
preprocess/utils/liftOver_vcf.py
Rongtingting/xcltk
2e86207c45a1caa7f905a89e1c121c3c203eab2d
[ "Apache-2.0" ]
2
2021-01-26T02:07:32.000Z
2021-02-03T03:56:55.000Z
# forked from https://github.com/single-cell-genetics/cellSNP ## A python wrap of UCSC liftOver function for vcf file ## UCSC liftOver binary and hg19 to hg38 chain file: ## https://genome.ucsc.edu/cgi-bin/hgLiftOver ## http://hgdownload.cse.ucsc.edu/admin/exe/linux.x86_64/liftOver ## http://hgdownload.soe.ucsc.edu/goldenPath/hg19/liftOver/hg19ToHg38.over.chain.gz import sys import gzip import subprocess from optparse import OptionParser LIFTOVER_INFO = '##INFO=<ID=OLD,Number=1,Type=Integer,' LIFTOVER_INFO += 'Description="position before liftover">\n' if __name__ == "__main__": main()
33.901316
83
0.581603
f442ad3274e1d03978bf00cca2923623d11978bb
8,842
py
Python
pomodorr/frames/tests/test_consumers.py
kamil559/pomodorr
232e6e98ff3481561dd1235794b3960066713210
[ "MIT" ]
null
null
null
pomodorr/frames/tests/test_consumers.py
kamil559/pomodorr
232e6e98ff3481561dd1235794b3960066713210
[ "MIT" ]
15
2020-04-11T18:30:57.000Z
2020-07-05T09:37:43.000Z
pomodorr/frames/tests/test_consumers.py
kamil559/pomodorr
232e6e98ff3481561dd1235794b3960066713210
[ "MIT" ]
null
null
null
import json import pytest from channels.db import database_sync_to_async from channels.testing import WebsocketCommunicator from pytest_lazyfixture import lazy_fixture from pomodorr.frames import statuses from pomodorr.frames.models import DateFrame from pomodorr.frames.routing import frames_application from pomodorr.frames.selectors.date_frame_selector import get_finished_date_frames_for_task pytestmark = [pytest.mark.django_db(transaction=True), pytest.mark.asyncio]
39.123894
120
0.774259
f4430a5ed7a70794aa650554ee2233f1a76e4ce7
1,362
py
Python
Bot/db_aps.py
FaHoLo/Fish_shop
b08018223705bca169dab9f39ec5a55f62822f0b
[ "MIT" ]
null
null
null
Bot/db_aps.py
FaHoLo/Fish_shop
b08018223705bca169dab9f39ec5a55f62822f0b
[ "MIT" ]
null
null
null
Bot/db_aps.py
FaHoLo/Fish_shop
b08018223705bca169dab9f39ec5a55f62822f0b
[ "MIT" ]
null
null
null
import logging import os import redis import moltin_aps _database = None db_logger = logging.getLogger('db_logger')
29.608696
99
0.737885
f4431b68372b44ad4517e0ab87e6c368a124ad83
142
py
Python
backend/server/tables/__init__.py
shiv12095/realtimeviz
ee2bf10b5f9467212f9a9ce8957d80456ebd0259
[ "MIT" ]
1
2021-03-03T13:54:15.000Z
2021-03-03T13:54:15.000Z
backend/server/tables/__init__.py
shiv12095/realtimeviz
ee2bf10b5f9467212f9a9ce8957d80456ebd0259
[ "MIT" ]
null
null
null
backend/server/tables/__init__.py
shiv12095/realtimeviz
ee2bf10b5f9467212f9a9ce8957d80456ebd0259
[ "MIT" ]
1
2021-03-03T13:59:48.000Z
2021-03-03T13:59:48.000Z
from .lime_bike_feed import LimeBikeFeed from .lime_bike_trips import LimeBikeTrips from .lime_bike_trips_analyze import LimeBikeTripsAnalyze
35.5
57
0.894366
f444f9703d175494884baaba0472ab27a4d9a8a1
75,692
py
Python
sapmon/payload/provider/sapnetweaver.py
gummadirajesh/AzureMonitorForSAPSolutions
9f8e9dbd38141b5de4782d40556c4368f6ad8d0b
[ "MIT" ]
null
null
null
sapmon/payload/provider/sapnetweaver.py
gummadirajesh/AzureMonitorForSAPSolutions
9f8e9dbd38141b5de4782d40556c4368f6ad8d0b
[ "MIT" ]
null
null
null
sapmon/payload/provider/sapnetweaver.py
gummadirajesh/AzureMonitorForSAPSolutions
9f8e9dbd38141b5de4782d40556c4368f6ad8d0b
[ "MIT" ]
null
null
null
# Python modules import json import logging from datetime import datetime, timedelta, timezone from time import time from typing import Any, Callable import re import requests from requests import Session from threading import Lock # SOAP Client modules from zeep import Client from zeep import helpers from zeep.transports import Transport from zeep.exceptions import Fault # Payload modules from const import * from helper.azure import AzureStorageAccount from helper.context import * from helper.tools import * from provider.base import ProviderInstance, ProviderCheck from netweaver.metricclientfactory import NetWeaverMetricClient, MetricClientFactory from netweaver.rfcsdkinstaller import PATH_RFC_SDK_INSTALL, SapRfcSdkInstaller from typing import Dict # Suppress SSLError warning due to missing SAP server certificate import urllib3 urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) # wait time in between attempts to re-download and install RFC SDK package if we have a download blob # URL defined and previous install attempt was not successful MINIMUM_RFC_INSTALL_RETRY_INTERVAL = timedelta(minutes=30) # timeout to use for all SOAP WSDL fetch and other API calls SOAP_API_TIMEOUT_SECS = 5 # soap client cache expiration, after which amount of time both successful + failed soap client instantiation attempts will be refreshed SOAP_CLIENT_CACHE_EXPIRATIION = timedelta(minutes=10) ###########################
51.702186
175
0.624716
f4458a3941886161e8e7b509e9445b16e1094e76
24
py
Python
docker_squash/version.py
pombredanne/docker-scripts
ecee9f921b22cd44943197635875572185dd015d
[ "MIT" ]
513
2016-04-04T21:44:14.000Z
2022-03-27T06:18:26.000Z
docker_squash/version.py
pombredanne/docker-scripts
ecee9f921b22cd44943197635875572185dd015d
[ "MIT" ]
106
2016-04-01T11:53:20.000Z
2022-03-31T00:35:31.000Z
docker_squash/version.py
pombredanne/docker-scripts
ecee9f921b22cd44943197635875572185dd015d
[ "MIT" ]
75
2016-05-11T01:08:47.000Z
2022-03-25T01:20:06.000Z
version = "1.0.10.dev0"
12
23
0.625
f4480752faba119871fef4e77c8c713728e07b1e
3,294
py
Python
example_usage/example_list_errors.py
oceanprotocol/plecos
ae532df8539e5c327cca57fbc1ea1b1193916cd1
[ "Apache-2.0" ]
1
2019-03-15T14:43:38.000Z
2019-03-15T14:43:38.000Z
example_usage/example_list_errors.py
oceanprotocol/plecos
ae532df8539e5c327cca57fbc1ea1b1193916cd1
[ "Apache-2.0" ]
26
2019-06-04T08:49:42.000Z
2022-02-07T02:06:42.000Z
example_usage/example_list_errors.py
oceanprotocol/Plecos
25b9a3f1698ab2c65ca82ac69ecd1f461c55a581
[ "Apache-2.0" ]
1
2019-03-12T18:31:55.000Z
2019-03-12T18:31:55.000Z
from pathlib import Path import plecos import json print(plecos.__version__) #%% path_to_json_local = Path("~/ocn/plecos/plecos/samples/sample_metadata_local.json").expanduser() path_to_json_remote = Path("~/ocn/plecos/plecos/samples/sample_metadata_remote.json").expanduser() path_to_broken_json = Path("~/ocn/plecos/plecos/samples/metadata_local_broken.json").expanduser() path_to_schema_local = Path("~/ocn/plecos/plecos/schemas/metadata_local_v0_3.json").expanduser() path_to_schema_remote = Path("~/ocn/plecos/plecos/schemas/metadata_remote_v0_3.json").expanduser() # Select remote or local metadata LOCAL=True if LOCAL: path_json_file = path_to_json_local path_schema_file = path_to_schema_local with open(path_to_json_local) as f: json_dict = json.load(f) else: path_json_file = path_to_json_remote path_schema_file = path_to_schema_remote with open(path_to_json_remote) as f: json_dict = json.load(f) print("Json file:", path_json_file) print("Schema file:", path_schema_file) #%% del json_dict['base']['files'][0]['index'] # del json_dict['base']['files'][0]['url'] # json_dict['base']['extra'] = 1 plecos.is_valid_dict(json_dict) # json_dict['base']['files'][0]['url'] # json_dict['base']['EXTRA ATTRIB!'] = 0 # json_dict['base']['files'][0]['EXTRA_ATTR'] = "????" # json_dict['base']['price'] = "A string is not allowed!" errors = plecos.list_errors(json_dict, path_schema_file) if errors: print("ERRORS:") for e in errors: print(e) else: print("No errors") raise #%% json_dict = { "base": { "name": "10 Monkey Species Small", "author": "Mario", "license": "CC0: Public Domain", "contentType": "jpg/txt", "price": 5, "categories": [ "image" ], "tags": [ "image data", " animals" ], "type": "dataset", "description": "Example description", "copyrightHolder": "", "encoding": "", "compression": "", "workExample": "", "inLanguage": "en", "files": [ { "url": "https://s3.amazonaws.com/datacommons-seeding-us-east/10_Monkey_Species_Small/assets/training.zip" }, { "url": "https://s3.amazonaws.com/datacommons-seeding-us-east/10_Monkey_Species_Small/assets/monkey_labels.txt" }, { "url": "https://s3.amazonaws.com/datacommons-seeding-us-east/10_Monkey_Species_Small/assets/validation.zip" } ], "links": [ { "url": "https://s3.amazonaws.com/datacommons-seeding-us-east/10_Monkey_Species_Small/links/sample/sample.zip", "name": "sample.zip", "type": "sample" }, { "url": "https://github.com/slothkong/CNN_classification_10_monkey_species", "name": "example code", "type": "example code" }, { "url": "https://s3.amazonaws.com/datacommons-seeding-us-east/10_Monkey_Species_Small/links/discovery/n5151.jpg", "name": "n5151.jpg", "type": "discovery" } ], "checksum": "0", }, } #%% path_to_schema_local = Path("~/ocn/Plecos/plecos/schemas/metadata_local_190305.json").expanduser() errors = plecos.list_errors(json_dict, path_to_schema_local) if errors: print("ERRORS:") for e in errors: print(e) else: print("No errors")
26.564516
120
0.649059
f448729d42d0a606df0321be7509a9b2530f28d6
2,180
py
Python
pangloss/backend.py
CLRafaelR/pangloss
920c509381a8d7831471fc3f22a07e58b53b8c0e
[ "MIT" ]
null
null
null
pangloss/backend.py
CLRafaelR/pangloss
920c509381a8d7831471fc3f22a07e58b53b8c0e
[ "MIT" ]
1
2020-06-11T21:08:30.000Z
2020-09-20T03:36:06.000Z
pangloss/backend.py
CLRafaelR/pangloss
920c509381a8d7831471fc3f22a07e58b53b8c0e
[ "MIT" ]
1
2021-03-11T21:11:34.000Z
2021-03-11T21:11:34.000Z
import re import panflute as pf from functools import partial from pangloss.util import smallcapify, break_plain # regular expression for label formats label_re = re.compile(r'\{#ex:(\w+)\}') gb4e_fmt_labelled = """ \\ex\\label{{ex:{label}}} \\gll {} \\\\ {} \\\\ \\trans {} """ gb4e_fmt = """ \\ex \\gll {} \\\\ {} \\\\ \\trans {} """ def gb4e(lst): """ Convert an example list into a series of gb4e-formatted interlinear glosses. Because example list references are replaced at parsing by Pandoc, the normal syntax of (@foo) cannot be used for labels; instead, a label syntax similar to that used for headers (and tables and figures with pandoc-crossref) is used, namely a {#ex:foo} inserted after the translation, which will be stripped and replaced with a LaTeX label on the relevant example. """ latex = "\\begin{exe}\n" for li in lst.content: lines = break_plain(li.content[0]) if len(lines) != 3: continue orig, gloss, trans = map(partial(pf.stringify, newlines=False), lines) gloss = smallcapify(gloss) label_match = label_re.search(trans) if label_match: label = label_match.group(1) trans = trans[:label_match.start() - 1] latex += gb4e_fmt_labelled.format(orig, gloss, trans, label=label) else: latex += gb4e_fmt.format(orig, gloss, trans) latex += "\\end{exe}" return pf.RawBlock(latex, format='latex') leipzigjs_fmt = """ <div data-gloss> <p>{}</p> <p>{}</p> <p>{}</p> </div> """ def leipzigjs(lst): """ Convert an example list into a series of div's suitable for use with Leipzig.js. """ html = '' for li in lst.content: lines = break_plain(li.content[0]) if len(lines) != 3: continue orig, gloss, trans = map(partial(pf.stringify, newlines=False), lines) html += leipzigjs_fmt.format(orig, gloss, trans) return pf.RawBlock(html, format='html') # available formats and backends formats = { 'latex': { 'gb4e': gb4e }, 'html': { 'leipzigjs': leipzigjs } }
23.44086
78
0.601835
f4496e9806f5e5781ad656efc22821170a6cd22c
3,702
py
Python
tests/unit/discovery/test_py_spec.py
xavfernandez/virtualenv
dd37c7d2af8a21026f4d4b7f43142e4e1e0faf86
[ "MIT" ]
1
2020-02-25T15:08:59.000Z
2020-02-25T15:08:59.000Z
tests/unit/discovery/test_py_spec.py
xavfernandez/virtualenv
dd37c7d2af8a21026f4d4b7f43142e4e1e0faf86
[ "MIT" ]
null
null
null
tests/unit/discovery/test_py_spec.py
xavfernandez/virtualenv
dd37c7d2af8a21026f4d4b7f43142e4e1e0faf86
[ "MIT" ]
null
null
null
from __future__ import absolute_import, unicode_literals import itertools import os import sys from copy import copy import pytest from virtualenv.discovery.py_spec import PythonSpec
31.913793
117
0.679633
f44a0eaa8a605413b612c82d10265dde71bd9d5d
2,251
py
Python
plugins/module_utils/definitions/trigger_image_activation.py
robertcsapo/dnacenter-ansible
33f776f8c0bc7113da73191c301dd1807e6b4a43
[ "MIT" ]
null
null
null
plugins/module_utils/definitions/trigger_image_activation.py
robertcsapo/dnacenter-ansible
33f776f8c0bc7113da73191c301dd1807e6b4a43
[ "MIT" ]
null
null
null
plugins/module_utils/definitions/trigger_image_activation.py
robertcsapo/dnacenter-ansible
33f776f8c0bc7113da73191c301dd1807e6b4a43
[ "MIT" ]
null
null
null
from __future__ import (absolute_import, division, print_function) __metaclass__ = type import json module_definition = json.loads( """{ "family": "software_image_management_swim", "name": "trigger_image_activation", "operations": { "post": [ "trigger_software_image_activation" ] }, "parameters": { "trigger_software_image_activation": [ { "name": "schedule_validate", "required": false, "type": "boolean" }, { "array_type": "object", "name": "payload", "required": true, "schema": [ { "name": "activateLowerImageVersion", "required": false, "type": "boolean" }, { "name": "deviceUpgradeMode", "required": false, "type": "string" }, { "name": "deviceUuid", "required": false, "type": "string" }, { "name": "distributeIfNeeded", "required": false, "type": "boolean" }, { "array_type": "string", "name": "imageUuidList", "required": false, "schema": [], "type": "array" }, { "array_type": "string", "name": "smuImageUuidList", "required": false, "schema": [], "type": "array" } ], "type": "array" } ] }, "responses": { "trigger_software_image_activation": { "properties": [ "response", "version" ], "type": "object" } } }""" )
29.618421
66
0.330964
f44ab2c0f0cd8c386e07e21d67f94743e0fb707b
3,966
py
Python
minecraft_launcher_lib/fabric.py
bopchik/Simple-minecraft-mod-launcher
52e4e8ec351b0bac7eb4fe707f21de8da14b9ac9
[ "BSD-2-Clause" ]
1
2021-06-17T18:19:41.000Z
2021-06-17T18:19:41.000Z
minecraft_launcher_lib/fabric.py
bopchik/Simple-minecraft-mod-launcher
52e4e8ec351b0bac7eb4fe707f21de8da14b9ac9
[ "BSD-2-Clause" ]
null
null
null
minecraft_launcher_lib/fabric.py
bopchik/Simple-minecraft-mod-launcher
52e4e8ec351b0bac7eb4fe707f21de8da14b9ac9
[ "BSD-2-Clause" ]
3
2021-06-17T18:19:44.000Z
2021-06-17T22:18:34.000Z
from .helper import download_file, get_user_agent from .install import install_minecraft_version from typing import List, Dict, Union from xml.dom import minidom import subprocess import requests import tempfile import random import os def get_all_minecraft_versions() -> List[Dict[str,Union[str,bool]]]: """ Returns all available Minecraft Versions for fabric """ FABRIC_MINECARFT_VERSIONS_URL = "https://meta.fabricmc.net/v2/versions/game" return requests.get(FABRIC_MINECARFT_VERSIONS_URL,headers={"user-agent": get_user_agent()}).json() def get_stable_minecraft_versions() -> List[str]: """ Returns a list which only contains the stable Minecraft versions that supports fabric """ minecraft_versions = get_all_minecraft_versions() stable_versions = [] for i in minecraft_versions: if i["stable"] == True: stable_versions.append(i["version"]) return stable_versions def get_latest_minecraft_version() -> str: """ Returns the latest unstable Minecraft versions that supports fabric. This could be a snapshot. """ minecraft_versions = get_all_minecraft_versions() return minecraft_versions[0]["version"] def get_latest_stable_minecraft_version() -> str: """ Returns the latest stable Minecraft version that supports fabric """ stable_versions = get_stable_minecraft_versions() return stable_versions[0] def is_minecraft_version_supported(version: str) -> bool: """ Checks if a Minecraft version supported by fabric """ minecraft_versions = get_all_minecraft_versions() for i in minecraft_versions: if i["version"] == version: return True return False def get_all_loader_versions() -> List[Dict[str,Union[str,bool,int]]]: """ Returns all loader versions """ FABRIC_LOADER_VERSIONS_URL = "https://meta.fabricmc.net/v2/versions/loader" return requests.get(FABRIC_LOADER_VERSIONS_URL,headers={"user-agent": get_user_agent()}).json() def get_latest_loader_version() -> str: """ Get the latest loader version """ loader_versions = get_all_loader_versions() return loader_versions[0]["version"] def get_latest_installer_version() -> str: """ Returns the latest installer version """ FABRIC_INSTALLER_MAVEN_URL = "https://maven.fabricmc.net/net/fabricmc/fabric-installer/maven-metadata.xml" r = requests.get(FABRIC_INSTALLER_MAVEN_URL,headers={"user-agent": get_user_agent()}) xml_data = minidom.parseString(r.text) release = xml_data.getElementsByTagName("release") return release.item(0).lastChild.data def install_fabric(path: str, minecraft_version: str,loader_version: str=None): """ Install a fabric version """ #Get latest loader version if not given if not loader_version: loader_version = get_latest_loader_version() #Make sure the Minecraft version is installed install_minecraft_version(path,minecraft_version) #Get installer version installer_version = get_latest_installer_version() installer_download_url = f"https://maven.fabricmc.net/net/fabricmc/fabric-installer/{installer_version}/fabric-installer-{installer_version}.jar" #Generate a temporary path for downloading the installer installer_path = os.path.join(tempfile.gettempdir(),f"fabric-installer-{random.randrange(100,10000)}.tmp") #Download the installer download_file(installer_download_url,installer_path) #Run the installer see https://fabricmc.net/wiki/install#cli_installation subprocess.run(["java","-jar",installer_path,"client","-dir",path,"-mcversion",minecraft_version,"-loader",loader_version,"-noprofile","-snapshot"]) #Delete the installer we don't need them anymore os.remove(installer_path) #Install all libs of fabric fabric_minecraft_version = f"fabric-loader-{loader_version}-{minecraft_version}" install_minecraft_version(path,fabric_minecraft_version)
39.66
152
0.739536
f44b19520f8c0f088d9bcd431d1e1bf360a73146
2,354
py
Python
Strand Sort.py
Nishkarsh-Tripathi/Sorting-algorithms-
cda25f1a8e7fb5e25e59e69e78f000421b0e4eb0
[ "Apache-2.0" ]
5
2020-03-29T16:26:18.000Z
2020-11-23T15:37:23.000Z
Strand Sort.py
Nishkarsh-Tripathi/Sorting-algorithms
cda25f1a8e7fb5e25e59e69e78f000421b0e4eb0
[ "Apache-2.0" ]
null
null
null
Strand Sort.py
Nishkarsh-Tripathi/Sorting-algorithms
cda25f1a8e7fb5e25e59e69e78f000421b0e4eb0
[ "Apache-2.0" ]
null
null
null
# STRAND SORT # It is a recursive comparison based sorting technique which sorts in increasing order. # It works by repeatedly pulling sorted sub-lists out of the list to be sorted and merging them # with a result array. # Algorithm: # Create a empty strand (list) and append the first element to it popping it from the input array # Compare this element with the rest of elements of the input array # if a greater element is found then pop and append it to strand otherwise skip # Now merge this array to the final output array # Recur for remaining items in strand and input array. # Utility Function to merge two arrays # Function to return the strand (sorted sub-list) # Strand Sort Function # Driver Code arr = [1, 6, 3, 8, 2, 0, 9] print(strand_sort(arr)) # Time Complexity : O(n^2) [Worst] # O(n*log(n)) [Average] # Space Complexity : O(n) # Stable : Yes # Inplace : No
26.449438
101
0.657179
f44cdd7cc2616d5398119b8bf5c750adca9d4192
10,915
py
Python
gamestonk_terminal/cryptocurrency/overview/pycoingecko_model.py
minhhoang1023/GamestonkTerminal
195dc19b491052df080178c0cc6a9d535a91a704
[ "MIT" ]
null
null
null
gamestonk_terminal/cryptocurrency/overview/pycoingecko_model.py
minhhoang1023/GamestonkTerminal
195dc19b491052df080178c0cc6a9d535a91a704
[ "MIT" ]
null
null
null
gamestonk_terminal/cryptocurrency/overview/pycoingecko_model.py
minhhoang1023/GamestonkTerminal
195dc19b491052df080178c0cc6a9d535a91a704
[ "MIT" ]
null
null
null
"""CoinGecko model""" __docformat__ = "numpy" # pylint: disable=C0301, E1101 import logging import re from typing import Any, List import numpy as np import pandas as pd from pycoingecko import CoinGeckoAPI from gamestonk_terminal.cryptocurrency.dataframe_helpers import ( create_df_index, long_number_format_with_type_check, replace_underscores_in_column_names, ) from gamestonk_terminal.cryptocurrency.discovery.pycoingecko_model import get_coins from gamestonk_terminal.decorators import log_start_end logger = logging.getLogger(__name__) HOLD_COINS = ["ethereum", "bitcoin"] NEWS_FILTERS = ["Index", "Title", "Author", "Posted"] CATEGORIES_FILTERS = [ "Rank", "Name", "Change_1h", "Change_24h", "Change_7d", "Market_Cap", "Volume_24h", "Coins", ] STABLES_FILTERS = [ "Rank", "Name", "Symbol", "Price", "Change_24h", "Exchanges", "Market_Cap", "Change_30d", ] PRODUCTS_FILTERS = [ "Rank", "Platform", "Identifier", "Supply_Rate", "Borrow_Rate", ] PLATFORMS_FILTERS = ["Rank", "Name", "Category", "Centralized"] EXCHANGES_FILTERS = [ "Rank", "Trust_Score", "Id", "Name", "Country", "Year Established", "Trade_Volume_24h_BTC", ] EXRATES_FILTERS = ["Index", "Name", "Unit", "Value", "Type"] INDEXES_FILTERS = ["Rank", "Name", "Id", "Market", "Last", "MultiAsset"] DERIVATIVES_FILTERS = [ "Rank", "Market", "Symbol", "Price", "Pct_Change_24h", "Contract_Type", "Basis", "Spread", "Funding_Rate", "Volume_24h", ] COINS_COLUMNS = [ "symbol", "name", "current_price", "market_cap", "market_cap_rank", "price_change_percentage_7d_in_currency", "price_change_percentage_24h_in_currency", "total_volume", ] SORT_VALUES = [ "market_cap_desc", "market_cap_asc", "name_desc", "name_asc", "market_cap_change_24h_desc", "market_cap_change_24h_asc", ] # TODO: add string with overview
25.034404
316
0.624462
f44d14e6df3a58dd087e5855ff51ca5785dc0dff
20,399
py
Python
docker/messein/board-import-app/app.py
sourceperl/tk-dashboard
015ececc670902b02284749ac59f354db4304e48
[ "MIT" ]
null
null
null
docker/messein/board-import-app/app.py
sourceperl/tk-dashboard
015ececc670902b02284749ac59f354db4304e48
[ "MIT" ]
null
null
null
docker/messein/board-import-app/app.py
sourceperl/tk-dashboard
015ececc670902b02284749ac59f354db4304e48
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 from configparser import ConfigParser from datetime import datetime import urllib.parse import hashlib import io import json import logging import os import re import time from xml.dom import minidom import feedparser import requests import schedule import PIL.Image import PIL.ImageDraw import PIL.ImageFont from metar.Metar import Metar import pytz import pdf2image import PIL.Image import PIL.ImageDraw from board_lib import CustomRedis, catch_log_except, dt_utc_to_local from webdav import WebDAV # some const USER_AGENT = 'Mozilla/5.0 (X11; Linux x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1' # some var owc_doc_dir_last_sync = 0 owc_car_dir_last_sync = 0 # read config cnf = ConfigParser() cnf.read('/data/conf/board.conf') # redis main_redis_user = cnf.get('redis', 'user') main_redis_pass = cnf.get('redis', 'pass') # redis-loos for share loos_redis_user = cnf.get('redis-loos', 'user') loos_redis_pass = cnf.get('redis-loos', 'pass') # gmap img traffic gmap_img_url = cnf.get('gmap_img', 'img_url') # gsheet gsheet_url = cnf.get('gsheet', 'url') # openweathermap ow_app_id = cnf.get('openweathermap', 'app_id') # webdav webdav_url = cnf.get('owncloud_dashboard', 'webdav_url') webdav_user = cnf.get('owncloud_dashboard', 'webdav_user') webdav_pass = cnf.get('owncloud_dashboard', 'webdav_pass') webdav_reglement_doc_dir = cnf.get('owncloud_dashboard', 'webdav_reglement_doc_dir') webdav_carousel_img_dir = cnf.get('owncloud_dashboard', 'webdav_carousel_img_dir') # some class # some function # main if __name__ == '__main__': # logging setup logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO) logging.getLogger('PIL').setLevel(logging.INFO) logging.info('board-import-app started') # init webdav client wdv = WebDAV(webdav_url, username=webdav_user, password=webdav_pass) # init scheduler schedule.every(5).minutes.do(owc_updated_job) schedule.every(1).hours.do(owc_sync_carousel_job) schedule.every(1).hours.do(owc_sync_doc_job) schedule.every(2).minutes.do(loos_redis_import_job) schedule.every(60).minutes.do(air_quality_atmo_ge_job) schedule.every(5).minutes.do(dir_est_img_job) schedule.every(5).minutes.do(gsheet_job) schedule.every(2).minutes.do(img_gmap_traffic_job) schedule.every(5).minutes.do(local_info_job) schedule.every(5).minutes.do(vigilance_job) schedule.every(5).minutes.do(weather_today_job) # first call air_quality_atmo_ge_job() dir_est_img_job() gsheet_job() img_gmap_traffic_job() local_info_job() loos_redis_import_job() vigilance_job() weather_today_job() owc_updated_job() # main loop while True: schedule.run_pending() time.sleep(1)
40.076621
110
0.622776
f44d3d2bcc982ad4f8edfb7eb180227db0f5fa05
19,687
py
Python
fsleyes_widgets/widgetlist.py
pauldmccarthy/fsleyes-widgets
cb27899a0f665efe3f1c6ca1f89349507e004378
[ "Apache-2.0" ]
1
2018-11-04T11:18:46.000Z
2018-11-04T11:18:46.000Z
fsleyes_widgets/widgetlist.py
pauldmccarthy/fsleyes-widgets
cb27899a0f665efe3f1c6ca1f89349507e004378
[ "Apache-2.0" ]
2
2018-09-24T15:01:56.000Z
2020-01-20T10:39:37.000Z
fsleyes_widgets/widgetlist.py
pauldmccarthy/fsleyes-widgets
cb27899a0f665efe3f1c6ca1f89349507e004378
[ "Apache-2.0" ]
1
2017-12-09T09:02:07.000Z
2017-12-09T09:02:07.000Z
#!/usr/bin/env python # # widgetlist.py - A widget which displays a list of groupable widgets. # # Author: Paul McCarthy <pauldmccarthy@gmail.com> # """This module provides the :class:`WidgetList` class, which displays a list of widgets. """ import wx import wx.lib.newevent as wxevent import wx.lib.scrolledpanel as scrolledpanel import fsleyes_widgets.togglepanel as togglepanel _WidgetListChangeEvent, _EVT_WL_CHANGE_EVENT = wxevent.NewEvent() WidgetListChangeEvent = _WidgetListChangeEvent """Event emitted by a :class:`WidgetList` when its contents change. """ EVT_WL_CHANGE_EVENT = _EVT_WL_CHANGE_EVENT """Identifier for the :data:`WidgetListChangeEvent`. """ WL_ONE_EXPANDED = 1 """:class:`WidgetList` style flag. When applied, at most one group will be expanded at any one time. """
31.05205
79
0.584701
f44da748e4ab13e359126b052ffbda6e65cd72ff
1,441
py
Python
setup.py
TransactPRO/gw3-python-client
77a9395c13f75467385227461b57ce85f4730ce5
[ "MIT" ]
1
2018-03-13T00:10:05.000Z
2018-03-13T00:10:05.000Z
setup.py
TransactPRO/gw3-python-client
77a9395c13f75467385227461b57ce85f4730ce5
[ "MIT" ]
1
2020-08-05T08:25:14.000Z
2020-08-05T08:25:14.000Z
setup.py
TransactPRO/gw3-python-client
77a9395c13f75467385227461b57ce85f4730ce5
[ "MIT" ]
null
null
null
#!/usr/bin/env python import setuptools MAINTAINER_NAME = 'Transact Pro' MAINTAINER_EMAIL = 'support@transactpro.lv' URL_GIT = 'https://github.com/TransactPRO/gw3-python-client' try: import pypandoc LONG_DESCRIPTION = pypandoc.convert('README.md', 'rst') except (IOError, ImportError, OSError, RuntimeError): LONG_DESCRIPTION = '' CLASSIFIERS = [ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Topic :: Software Development :: Libraries :: Python Modules' ] required = [ 'requests', ] setuptools.setup( name='transactpro-gw3-client', version='1.7.6', description='Transact PRO Gateway3 implementation in Python.', long_description=LONG_DESCRIPTION, long_description_content_type="text/markdown", author='Transact Pro', author_email='support@transactpro.net', install_requires=required, url=URL_GIT, packages=setuptools.find_packages(), license='MIT', classifiers=CLASSIFIERS, keywords='GW3 gateway3 integration gateway TransactPRO python python3', python_requires='>=3.6', )
28.82
75
0.693963
f44db94e38c8e52a26896847a590eaee7cd80693
2,359
py
Python
social_auth_mitxpro/backends_test.py
mitodl/social-auth-mitxpro
8cae8bbe900b25f724b24f783d06de7b853a1366
[ "BSD-3-Clause" ]
null
null
null
social_auth_mitxpro/backends_test.py
mitodl/social-auth-mitxpro
8cae8bbe900b25f724b24f783d06de7b853a1366
[ "BSD-3-Clause" ]
37
2019-03-06T17:43:26.000Z
2022-03-21T05:18:10.000Z
social_auth_mitxpro/backends_test.py
mitodl/social-auth-mitxpro
8cae8bbe900b25f724b24f783d06de7b853a1366
[ "BSD-3-Clause" ]
null
null
null
"""Tests for our backend""" from urllib.parse import urljoin import pytest from social_auth_mitxpro.backends import MITxProOAuth2 # pylint: disable=redefined-outer-name def test_user_data(backend, strategy, mocked_responses): """Tests that the backend makes a correct appropriate request""" access_token = "user_token" api_root = "http://xpro.example.com/" response = {"username": "abc123", "email": "user@example.com", "name": "Jane Doe"} mocked_responses.add( mocked_responses.GET, urljoin(api_root, "/api/users/me"), json=response ) settings = {"API_ROOT": api_root} def _setting(name, *, backend, default=None): # pylint: disable=unused-argument """Dummy setting func""" return settings.get(name, default) strategy.setting.side_effect = _setting assert backend.user_data(access_token) == response request, _ = mocked_responses.calls[0] assert request.headers["Authorization"] == "Bearer user_token" strategy.setting.assert_any_call("API_ROOT", default=None, backend=backend) def test_authorization_url(backend, strategy): """Test authorization_url()""" strategy.setting.return_value = "abc" assert backend.authorization_url() == "abc" strategy.setting.assert_called_once_with( "AUTHORIZATION_URL", default=None, backend=backend ) def test_access_token_url(backend, strategy): """Test access_token_url()""" strategy.setting.return_value = "abc" assert backend.access_token_url() == "abc" strategy.setting.assert_called_once_with( "ACCESS_TOKEN_URL", default=None, backend=backend )
29.4875
86
0.680797
f450452cbcef41209866e35540c53f785f67820d
1,183
py
Python
Scripts/simulation/careers/detective/detective_crime_scene.py
velocist/TS4CheatsInfo
b59ea7e5f4bd01d3b3bd7603843d525a9c179867
[ "Apache-2.0" ]
null
null
null
Scripts/simulation/careers/detective/detective_crime_scene.py
velocist/TS4CheatsInfo
b59ea7e5f4bd01d3b3bd7603843d525a9c179867
[ "Apache-2.0" ]
null
null
null
Scripts/simulation/careers/detective/detective_crime_scene.py
velocist/TS4CheatsInfo
b59ea7e5f4bd01d3b3bd7603843d525a9c179867
[ "Apache-2.0" ]
null
null
null
# uncompyle6 version 3.7.4 # Python bytecode 3.7 (3394) # Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)] # Embedded file name: T:\InGame\Gameplay\Scripts\Server\careers\detective\detective_crime_scene.py # Compiled at: 2015-02-08 03:00:54 # Size of source mod 2**32: 1608 bytes from careers.career_event_zone_director import CareerEventZoneDirector import sims4.log logger = sims4.log.Logger('Crime Scene', default_owner='bhill')
42.25
107
0.72612
f450583ef2fc87d70603f2a691c77577371d8626
11,640
py
Python
classifier/interpretation_exp.py
methylgrammarlab/proj_scwgbs
287196898796eb617fef273bfaf9e978a57047dc
[ "MIT" ]
null
null
null
classifier/interpretation_exp.py
methylgrammarlab/proj_scwgbs
287196898796eb617fef273bfaf9e978a57047dc
[ "MIT" ]
null
null
null
classifier/interpretation_exp.py
methylgrammarlab/proj_scwgbs
287196898796eb617fef273bfaf9e978a57047dc
[ "MIT" ]
null
null
null
""" Code adapted from https://github.com/ohlerlab/DeepRiPe with changes Extract information and graphs from the Integrated gradients output """ import argparse import os import sys import matplotlib.pyplot as plt import numpy as np import seaborn as sns from classifier.plotseqlogo import seqlogo_fig from commons import files_tools sns.set() sns.set_style('whitegrid') def plot_multi_seq(sequences_dict, number_of_seq, output_folder=None): """ Plot the multiple sequences in one figure :param sequences_dict: A dictionary with pl or cl as key and the integrated values results for each sequence in this label :param number_of_seq: number of sequences in one figure :param output_folder: Output folder """ for k in sequences_dict: ex_seq = sequences_dict[k][:number_of_seq] fig = seqlogo_fig(np.transpose(ex_seq[:, :, :], axes=(1, 2, 0)), vocab="DNA", figsize=(8, 4), ncol=1, yl=0.1, plot_name="seq for top %s of type %s" % (number_of_seq, k)) if output_folder: fig.savefig(os.path.join(output_folder, "seq_for_top_%s_of_type_%s" % (number_of_seq, k))) else: plt.show() plt.close() def plot_avg_sequence(sequences_dict, output_folder=None): """ Plot the average sequence across 30 letters and all the sequence :param sequences_dict: A dictionary with pl or cl as key and the integrated values results for each sequence in this label :param output_folder: Output folder """ for k in sequences_dict: ex_seq = sequences_dict[k] mean_seq = np.transpose(np.mean(ex_seq[:, 60:90, :], axis=0).reshape(1, 30, 4), axes=(1, 2, 0)) name = k fig = seqlogo_fig(mean_seq, vocab="DNA", figsize=(20, 4), ncol=1, plot_name="Average attribution score for prediction %s" % name) ax = fig.axes[0] ax.set_title("Average sequence for prediction %s" % name, fontsize=16) if output_folder: fig.savefig(os.path.join(output_folder, "Avg_seq_for_%s30.png" % k)) else: plt.show() plt.close() for k in sequences_dict: ex_seq = sequences_dict[k] mean_seq = np.transpose(np.mean(ex_seq[:, :, :], axis=0).reshape(1, 150, 4), axes=(1, 2, 0)) fig = seqlogo_fig(mean_seq, vocab="DNA", figsize=(20, 4), ncol=1, plot_name="Avg seq for %s" % k) if output_folder: fig.savefig(os.path.join(output_folder, "Avg_seq_for_%s.png" % k)) else: plt.show() plt.close() def plot_avg_sequence_sw(sequences_dict, output_folder=None): """ plot the avg sequence using SW, flatten the AT to W and CG to S :param sequences_dict: A dictionary with pl or cl as key and the integrated values results for each sequence in this label :param output_folder: Output folder """ for k in sequences_dict: ex_seq = sequences_dict[k] mean_seq = np.transpose(np.mean(ex_seq[:, 60:90, :], axis=0).reshape(1, 30, 4), axes=(1, 2, 0)) new_seq = np.zeros_like(mean_seq) for i in range(mean_seq.shape[0]): new_seq[i][0] = mean_seq[i][0] + mean_seq[i][3] new_seq[i][1] = mean_seq[i][1] + mean_seq[i][2] fig = seqlogo_fig(new_seq, vocab="DNAWS", figsize=(20, 4), ncol=1, plot_name="Avg seq for %s" % k) if output_folder: fig.savefig(os.path.join(output_folder, "Avg_seq_for_%s_sw30.png" % k)) else: plt.show() for k in sequences_dict: ex_seq = sequences_dict[k] mean_seq = np.transpose(np.mean(ex_seq[:, :, :], axis=0).reshape(1, 150, 4), axes=(1, 2, 0)) new_seq = np.zeros_like(mean_seq) for i in range(mean_seq.shape[0]): new_seq[i][0] = mean_seq[i][0] + mean_seq[i][3] new_seq[i][1] = mean_seq[i][1] + mean_seq[i][2] fig = seqlogo_fig(new_seq, vocab="DNAWS", figsize=(20, 4), ncol=1, plot_name="Avg seq for %s" % k) if output_folder: fig.savefig(os.path.join(output_folder, "Avg_seq_for_%s_sw.png" % k)) else: plt.show() plt.close() def plot_avg_sequence_sw_flatten_values(sequences_dict, output_folder=None): """ plot the avg sequence using SW, flatten the AT to W and CG to S and combining both options to get one number per sequence place :param sequences_dict: A dictionary with pl or cl as key and the integrated values results for each sequence in this label :param output_folder: Output folder """ for k in sequences_dict: ex_seq = sequences_dict[k] mean_seq = np.transpose(np.mean(ex_seq[:, 60:90, :], axis=0).reshape(1, 30, 4), axes=(1, 2, 0)) new_seq = np.zeros_like(mean_seq) for i in range(mean_seq.shape[0]): w = mean_seq[i][0] + mean_seq[i][3] s = mean_seq[i][1] + mean_seq[i][2] delta = s - w sw_index = 1 if delta > 0 else 0 new_seq[i][sw_index] = abs(delta) fig = seqlogo_fig(new_seq, vocab="DNAWS", figsize=(8, 4), ncol=1, plot_name="Avg seq for %s" % k) if output_folder: fig.savefig(os.path.join(output_folder, "Avg_seq_for_%s_sw30_flatten.png" % k)) else: fig.show() for k in sequences_dict: ex_seq = sequences_dict[k] mean_seq = np.transpose(np.mean(ex_seq[:, :, :], axis=0).reshape(1, 150, 4), axes=(1, 2, 0)) new_seq = np.zeros_like(mean_seq) for i in range(mean_seq.shape[0]): w = mean_seq[i][0] + mean_seq[i][3] s = mean_seq[i][1] + mean_seq[i][2] delta = s - w sw_index = 1 if delta > 0 else 0 new_seq[i][sw_index] = abs(delta) fig = seqlogo_fig(new_seq, vocab="DNAWS", figsize=(20, 4), ncol=1, plot_name="Avg seq for %s" % k) if output_folder: fig.savefig(os.path.join(output_folder, "Avg_seq_for_%s_sw_flatten.png" % k)) else: plt.show() plt.close() def plot_distance_weight_two_sides(sequences_dict, output_folder=None): """ Plot the integrated gradient value of each feature based on distance from center, two ways graph(-74->74) We wanted to see if there are indexes and some periodicity :param sequences_dict: A dictionary with pl or cl as key and the integrated values results for each sequence in this label :param output_folder: Output folder """ for k in sequences_dict: class_type = k ex_seq = np.abs(sequences_dict[k]) mean_seq = np.transpose(np.mean(ex_seq[:, :, :], axis=0).reshape(1, 150, 4), axes=(1, 2, 0)) seq_weight = np.sum(mean_seq, axis=1) middle = int(seq_weight.shape[0] / 2) - 1 seq_weight[middle] = None seq_weight[middle + 1] = None x = np.arange(-74, 1).astype(np.int) x = np.append(x, x[::-1] * -1) x_ticks = [i for i in range(-70, 80, 10)] plt.xticks(x_ticks) plt.plot(x, seq_weight, '.-') plt.legend() plt.grid(axis="y") plt.xlabel("Distance from CpG Site", fontsize=12) plt.ylabel("Attribute score", fontsize=12) plt.title("Attribute score base on distance from CpG site for %s" % class_type, fontsize=14) if output_folder: plt.savefig( os.path.join(output_folder, "distance_importance_of_flanking_letters_type_%s_two_way.png" % k)) else: plt.show() plt.close() def plot_distance_weight_one_side(sequences_dict, output_folder=None): """ Plot the integrated gradient value of each feature based on distance from center, one way graph (0->74) We wanted to see if there are indexes and some periodicity :param sequences_dict: A dictionary with pl or cl as key and the integrated values results for each sequence in this label :param output_folder: Output folder """ for k in sequences_dict: class_type = k ex_seq = np.abs(sequences_dict[k]) mean_seq = np.transpose(np.mean(ex_seq[:, :, :], axis=0).reshape(1, 150, 4), axes=(1, 2, 0)) seq_weight = np.sum(mean_seq, axis=1) std_seq = np.std(mean_seq, axis=1) middle = int(seq_weight.shape[0] / 2) - 1 seq_to_values = np.flip(seq_weight[:middle]) seq_from_values = seq_weight[middle + 2:] seq_to_std = np.flip(std_seq[:middle]) seq_from_std = std_seq[middle + 2:] x = np.arange(1, seq_from_values.shape[0] + 1) plt.errorbar(x, seq_to_values, seq_to_std, marker='^', label="to", alpha=0.5) plt.errorbar(x, seq_from_values, seq_from_std, marker='^', label="from", alpha=0.5) plt.legend() x_ticks = [i for i in range(1, 5)] + [i for i in range(5, 75, 5)] plt.xticks(x_ticks) plt.xlabel("Distance from CG") plt.ylabel("Importance shannon values") plt.title("Importance of flanking letters - %s" % (class_type)) if output_folder: plt.savefig(os.path.join(output_folder, "distance_importance_of_flanking_letters_type_%s_one_way.png" % k)) else: plt.show() plt.close() def print_each_seq(sequences_dict, output_folder): """ Plot all the sequences on after the other :param sequences_dict: A dictionary with pl or cl as key and the integrated values results for each sequence in this label :param output_folder: Output folder """ cl_list = [] pl_list = [] # Remove duplicates seq = None for i in range(sequences_dict["cl"].shape[0]): new_seq = sequences_dict["cl"][i] if np.all(new_seq == seq): continue else: cl_list.append(new_seq) seq = new_seq seq = None for i in range(sequences_dict["pl"].shape[0]): new_seq = sequences_dict["pl"][i] if np.all(new_seq == seq): continue else: pl_list.append(new_seq) seq = new_seq for i in range(1000): plot_one_seq(seq=cl_list[i], output=os.path.join(output_folder, "cl_seq_%s.png" % i), title="CL seq num %s" % i, yl=0.1) for i in range(1000): plot_one_seq(seq=pl_list[i], output=os.path.join(output_folder, "pl_seq_%s.png" % i), title="PL seq num %s" % i, yl=0.1) if __name__ == '__main__': main()
36.489028
109
0.617526
f452f54dd600820476b6e9842531fd00913972e2
3,921
py
Python
scripts/pythonutils/autorepr.py
shulinye/dotfiles
a342512c33ca102d03921cc653ee4605d0cf9617
[ "MIT" ]
2
2015-01-16T22:07:10.000Z
2015-11-09T06:45:44.000Z
scripts/pythonutils/autorepr.py
shulinye/dotfiles
a342512c33ca102d03921cc653ee4605d0cf9617
[ "MIT" ]
4
2015-07-08T19:13:47.000Z
2015-08-31T16:04:36.000Z
scripts/pythonutils/autorepr.py
shulinye/dotfiles
a342512c33ca102d03921cc653ee4605d0cf9617
[ "MIT" ]
null
null
null
#!/usr/bin/python3 from collections import OrderedDict from functools import partial from ordered_set import OrderedSet import inspect import itertools import types from .utils import walk_getattr __all__ = ['autoinit', 'autorepr', 'TotalCompareByKey'] def autoinit(obj=None, *args, params=None, **kwargs): """Takes __slots__ and _slots and writes an __init__ Can be used as a class decorator, or by setting __init__ = autoinit""" if obj is None: return partial(autoinit, params=params) if params: pass elif hasattr(obj, '__slots__'): params = OrderedSet(itertools.chain.from_iterable(walk_getattr(obj, '__slots__'))) elif hasattr(obj, '_slots'): params = OrderedSet(itertools.chain.from_iterable(walk_getattr(obj, '_slots'))) else: raise RuntimeError("Can't autocreate __init__, please supply '__slots__' or '_slots'") if inspect.isclass(obj): #I'm being used as a decorator s = ["def __init__(self,{}):".format(", ".join(i for i in params))] s.extend("self.{0} = {0}".format(i) for i in params) scope = {} exec('\n '.join(s), scope) setattr(obj, '__init__', scope['__init__']) return obj else: signature = inspect.Signature(inspect.Parameter(i, inspect.Parameter.POSITIONAL_OR_KEYWORD) for i in params) signature.bind(*args, **kwargs) for p, val in itertools.chain(zip(params, args), kwargs.items()): setattr(obj, p, val) def autorepr(obj=None, *, params=None): """Function that automagically gives you a __repr__. If no params are given, uses __slots__, _slots, and at last resort, inspects __init__ Can be used as a class decorator or by setting __repr__ = autorepr""" if obj is None: return partial(autorepr, params = params) discard_first = False if params: pass elif hasattr(obj, '__slots__'): params = OrderedSet(itertools.chain.from_iterable(walk_getattr(obj, '__slots__'))) elif hasattr(obj, '_slots'): params = OrderedSet(itertools.chain.from_iterable(walk_getattr(obj, '_slots'))) else: sig = inspect.signature(obj.__init__) params = sig.parameters discard_first = True if inspect.isclass(obj): #I'm being used as a decorator if discard_first: params = list(params)[1:] #drop the first argument, that's self s = ["def __repr__(self):\n return '%s(" + ", ".join(["%s=%r"]*(len(params)))] s.append(")' % (self.__class__.__name__, ") s.append(', '.join("'{0}', self.{0}".format(i) for i in params) + ')') scope = {} exec("".join(s), scope) setattr(obj, '__repr__', scope['__repr__']) return obj else: #Being a normal function here :P return "%s(%s)" % (obj.__class__.__name__, ", ".join("%s=%r" % (i, getattr(obj,i)) for i in params))
40.42268
116
0.602907
f4537a07a1d5765ef6c894d899d3fcdd3ed64dab
10,051
py
Python
v1.0.0.test/toontown/estate/DistributedGardenPlotAI.py
TTOFFLINE-LEAK/ttoffline
bb0e91704a755d34983e94288d50288e46b68380
[ "MIT" ]
4
2019-07-01T15:46:43.000Z
2021-07-23T16:26:48.000Z
v1.0.0.test/toontown/estate/DistributedGardenPlotAI.py
TTOFFLINE-LEAK/ttoffline
bb0e91704a755d34983e94288d50288e46b68380
[ "MIT" ]
1
2019-06-29T03:40:05.000Z
2021-06-13T01:15:16.000Z
v1.0.0.test/toontown/estate/DistributedGardenPlotAI.py
TTOFFLINE-LEAK/ttoffline
bb0e91704a755d34983e94288d50288e46b68380
[ "MIT" ]
4
2019-07-28T21:18:46.000Z
2021-02-25T06:37:25.000Z
from direct.directnotify import DirectNotifyGlobal from toontown.estate import GardenGlobals from toontown.estate.DistributedLawnDecorAI import DistributedLawnDecorAI FLOWER_X_OFFSETS = ( None, (0, ), (-1.5, 1.5), (-3.4, 0, 3.5))
49.029268
195
0.639936