hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9ffe17de7805da9bfb7ad7d54bb9a08115c66b6e
| 149
|
py
|
Python
|
commonutils/__init__.py
|
lrbsunday/commonutils
|
6a4f2106e877417eebc8b8c6a9c1610505bd21e3
|
[
"BSD-3-Clause"
] | 1
|
2017-09-10T13:13:04.000Z
|
2017-09-10T13:13:04.000Z
|
commonutils/__init__.py
|
lrbsunday/commonutils
|
6a4f2106e877417eebc8b8c6a9c1610505bd21e3
|
[
"BSD-3-Clause"
] | 2
|
2021-03-25T21:45:54.000Z
|
2021-11-15T17:47:06.000Z
|
commonutils/__init__.py
|
lrbsunday/commonutils
|
6a4f2106e877417eebc8b8c6a9c1610505bd21e3
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Top-level package for commonutils."""
__author__ = """lrbsunday"""
__email__ = '272316131@qq.com'
__version__ = '0.1.0'
| 18.625
| 40
| 0.637584
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 103
| 0.691275
|
9ffe6ea421da07a4d91197e1ea46c83dd156f66f
| 826
|
py
|
Python
|
app/components/admin.py
|
Uniquode/uniquode2
|
385f3e0b26383c042d8da64b52350e82414580ea
|
[
"MIT"
] | null | null | null |
app/components/admin.py
|
Uniquode/uniquode2
|
385f3e0b26383c042d8da64b52350e82414580ea
|
[
"MIT"
] | null | null | null |
app/components/admin.py
|
Uniquode/uniquode2
|
385f3e0b26383c042d8da64b52350e82414580ea
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.utils.timesince import timesince
class CreatedByAdminMixin:
def save_model(self, request, obj, form, change):
if getattr(obj, 'created_by', None) is None:
obj.created_by = request.user
# noinspection PyUnresolvedReferences
super().save_model(request, obj, form, change)
# noinspection PyMethodMayBeStatic
class TimestampAdminMixin:
def since_created(self, obj):
return timesince(obj.dt_created)
def since_modified(self, obj):
return timesince(obj.dt_created)
class TaggedAdminMixin:
# noinspection PyUnresolvedReferences
def get_queryset(self, request):
return super().get_queryset(request).prefetch_related('tags')
def _tags(self, obj):
return ", ".join(o.name for o in obj.tags.all())
| 25.8125
| 69
| 0.690073
| 713
| 0.863196
| 0
| 0
| 0
| 0
| 0
| 0
| 153
| 0.18523
|
9fff12642cb00ff3e2ce7ae890c3d2b10cbbe1d1
| 8,936
|
py
|
Python
|
src/WignerFunctionMeasurement.py
|
ngchihuan/WignerFunc_Measurement
|
9c258180da4c1a1ff87b384f0aaf85dc0f92d667
|
[
"MIT"
] | null | null | null |
src/WignerFunctionMeasurement.py
|
ngchihuan/WignerFunc_Measurement
|
9c258180da4c1a1ff87b384f0aaf85dc0f92d667
|
[
"MIT"
] | null | null | null |
src/WignerFunctionMeasurement.py
|
ngchihuan/WignerFunc_Measurement
|
9c258180da4c1a1ff87b384f0aaf85dc0f92d667
|
[
"MIT"
] | null | null | null |
import os
from os.path import join, isfile
from shutil import Error
from sys import exec_prefix
import numpy as np
import fit
import simple_read_data
from tabulate import tabulate
import logging
np.seterr(all='raise')
class DataFormatError(Exception):
pass
class WrongPathFormat(Exception):
pass
def check_data_format(data):
'''
check if the input data satisfies the following requiresments:
1. it is a dictionary {x: [], y: [], yerr: []}.
2. The array must have same size
if the data format is wrong, raise a Type Error
'''
conf = {'x': [], 'y' : [], 'yerr' : [] }
if (check_structure(data,conf)==False):
raise DataFormatError("Wrong format for the input data")
else:
if (np.min(data['y']) < 0 or np.max(data['y'])>1.0):
raise DataFormatError("y is out of range (0,1)")
def print_debug():
debug_msg = 'debug'
return debug_msg
def check_structure(struct, conf):
if isinstance(struct, dict) and isinstance(conf, dict):
# struct is a dict of types or other dicts
return all(k in conf and check_structure(struct[k], conf[k]) for k in struct)
if isinstance(struct, list) and isinstance(conf, list):
# struct is list in the form [type or dict]
return all(check_structure(struct[0], c) for c in conf)
elif isinstance(struct, type):
# struct is the type of conf
return isinstance(conf, struct)
else:
# struct is neither a dict, nor list, not type
return False
class WignerFunc_Measurement():
def __init__(self,fpath,debug=False) -> None:
self.sb_list={} #dictionary that stores sb measurement
self.set_path(fpath)
self.list_all_files()
self.logger = logging.getLogger('WFM')
self.debug = debug
if debug == True:
self.logger.setLevel(logging.DEBUG)
else:
self.logger.setLevel(logging.ERROR)
#add stream handler and formatter
c_handler = logging.StreamHandler()
c_format = logging.Formatter('WFM: %(message)s')
c_handler.setFormatter(c_format)
self.logger.addHandler(c_handler)
def set_path(self,fpath) -> None:
try:
os.listdir(fpath)
self.fpath = fpath
except (NotADirectoryError, FileNotFoundError):
self.logger.error('The given path is not a directory')
raise WrongPathFormat
def list_all_files(self):
print('Scanning the directory')
self.files = [f for f in os.listdir(self.fpath) if isfile(join(self.fpath, f)) and os.path.splitext(join(self.fpath,f))[1] in ['','.dat'] ]
self.fullpath_files = sorted( [join(self.fpath,f) for f in os.listdir(self.fpath) if isfile(join(self.fpath, f)) and os.path.splitext(join(self.fpath,f))[1]=='' ] )
if self.files == []:
self.logger.warning('The directory is empty')
else:
print(f'Discovered {len(self.files)} files in the directory')
return self.files
def setup_sbs(self):
print(f'Validating files')
cnt=0
for fname in self.fullpath_files:
try:
sbs = SideBandMeasurement(fname,raw = False,debug= self.debug)
self.sb_list[str(cnt)] = sbs
cnt += 1
except Exception as err:
pass
else:
sbs.eval_parity()
print(f'Discovered {cnt} valid files with right data format\n')
def get_files(self):
return self.files
def print_report(self):
print('Report summary \n')
t=[[key,sb.folder, sb.short_fname, sb.parity, sb.err_log] for key,sb in self.sb_list.items()]
print(tabulate(t, headers=['id', 'folder','filename', 'parity','Errors']))
def refit(self,id,weights=[],omega=None,gamma=None):
'''
Refit a sideband measurement using new weights, omega and gamma.
'''
if (id >= len(self.sb_list.keys()) ):
self.logger.warning('id is out of range')
return
else:
sb_target = self.sb_list[str(id)]
sb_target.reset_log_err()
print(f'Refitting Sideband measurement {sb_target.fname}')
if omega!= None:
sb_target.set_Omega(omega)
if len(weights) != 0:
sb_target.set_weight(weights)
if gamma!= None:
sb_target.set_gamma(gamma)
sb_target.eval_parity()
def show_errors(self):
pass
class SideBandMeasurement():
def __init__(self,fname,raw = False, debug = False ) -> None:
self.fname = fname
self.xy = dict((el,[]) for el in ['x','y','yerr'])
self.plot = None
self.parity = None
self.raw = raw
self.weight = [1, 0, 0]
self.Omega_0 = 0.05
self.gamma = 7e-4
self.offset = 0.0
#internal logging
self.err_log=[]
#logging
self.logger= logging.getLogger(self.fname)
#add stream handler and formatter
c_handler = logging.StreamHandler()
c_format = logging.Formatter('SBM: %(message)s')
c_handler.setFormatter(c_format)
self.logger.addHandler(c_handler)
if debug == True:
self.logger.setLevel(logging.DEBUG)
else:
self.logger.setLevel(logging.ERROR)
#extract folder name and fname only
(self.folder, self.short_fname) = self.fname.split("/")[-2:]
#verify if the data file is valid
try:
np.genfromtxt(self.fname)
except IOError as err:
#self.logger.exception('file \'%s\' is not found' %(self.fname) )
raise
try:
self.extract_xy()
except ValueError as err:
#self.logger.exception(err)
raise
def log_err(self,errors):
self.err_log.append(errors)
def reset_log_err(self):
self.err_log=[]
def set_Omega(self,omega):
try:
self.Omega_0 = float(omega)
except ValueError as error:
self.logger.error(f'Rabi freq must a nummber {error}')
raise
def set_gamma(self,gamma):
try:
self.gamma = float(gamma)
except ValueError as error:
self.logger.error(f'gamma must a nummber {error}')
raise
def set_weight(self,weight) -> None:
self.logger.debug(f'Set weight when fitting sb {self.fname}')
try:
self.weight = [float(i) for i in weight]
except (TypeError,ValueError) as err:
self.logger.error(f'Set weight error')
raise
def extract_xy(self):
'''
Extract xy data from the data files
'''
if self.raw== True:
try:
(self.xy['x'], self.xy['y'], self.xy['yerr'],_,_) = simple_read_data.get_x_y(self.fname)
except Exception as err:
raise
else:
try:
self.xy['x'], self.xy['y'], self.xy['yerr'] = tuple(np.genfromtxt(self.fname))
except ValueError as err:
raise ValueError(f'{self.short_fname} has wrong data format')
def extract_pop(self):
try:
self.fit_res = fit.fit_sum_multi_sine_offset(self.xy['x'], self.xy['y'], self.xy['yerr'], self.weight, self.Omega_0, self.gamma, offset = self.offset, rsb=False\
,gamma_fixed=False,customized_bound_population=None,debug=False)
except FloatingPointError as err:
#self.logger.warning('There is a measurement with zero uncertainty')
self.log_err('zero sigma')
except Exception as err:
self.log_err('unexpected error in fitting')
#raise RuntimeError('Could not fit')
else:
redchi = self.fit_res['reduced_chi square']
if (redchi>10 or redchi<0):
#self.logger.warning(f'Could not fit well')
self.log_err(f'Could not fit well, redchi = {round(redchi,2)}')
return self.fit_res
def eval_parity(self):
self.logger.debug(f'Evaluate parity of {self.fname}')
res = self.extract_pop()
if res!= None:
self.weight_fit = res['weight fit']
self.parity = 0
for i,j in enumerate(self.weight_fit):
if i%2 == 0:
self.parity += j*1
else:
self.parity += j*(-1)
return self.parity
#use map and filter to do it in a better way???
def plotxy(self):
self.plot = None
if __name__ == '__main__':
fpath ='../tests/test_data'
wfm1 = WignerFunc_Measurement(fpath)
wfm1.setup_sbs()
wfm1.report()
| 31.575972
| 173
| 0.57218
| 7,339
| 0.821285
| 0
| 0
| 0
| 0
| 0
| 0
| 1,906
| 0.213295
|
b000e8e09627008c8e1b4d9bdfd0f7e449d23a7e
| 1,729
|
py
|
Python
|
falmer/content/models/scheme.py
|
sussexstudent/services-api
|
ae735bd9d6177002c3d986e5c19a78102233308f
|
[
"MIT"
] | 2
|
2017-04-27T19:35:59.000Z
|
2017-06-13T16:19:33.000Z
|
falmer/content/models/scheme.py
|
sussexstudent/falmer
|
ae735bd9d6177002c3d986e5c19a78102233308f
|
[
"MIT"
] | 975
|
2017-04-13T11:31:07.000Z
|
2022-02-10T07:46:18.000Z
|
falmer/content/models/scheme.py
|
sussexstudent/services-api
|
ae735bd9d6177002c3d986e5c19a78102233308f
|
[
"MIT"
] | 3
|
2018-05-09T06:42:25.000Z
|
2020-12-10T18:29:30.000Z
|
from django.db import models
from wagtail.admin.edit_handlers import FieldPanel, StreamFieldPanel, MultiFieldPanel
from wagtail.core.blocks import StreamBlock
from wagtail.core.fields import StreamField
from wagtail.images.edit_handlers import ImageChooserPanel
from falmer.content import components
from falmer.content.components.structures import sidebar_card
from falmer.content.models.mixins import SocialMediaMixin
from falmer.matte.models import MatteImage
from .core import Page
class SchemePage(Page, SocialMediaMixin):
subpage_types = []
parent_page_types = ('content.SchemeIndexPage', )
main = StreamField(
StreamBlock([
components.text.to_pair(),
]), verbose_name='Main Content',
null=True, blank=True
)
hero_image = models.ForeignKey(MatteImage, null=False, blank=False, on_delete=models.PROTECT)
sidebar_cards = StreamField([
sidebar_card.to_pair()
], blank=True)
content_panels = Page.content_panels + [
StreamFieldPanel('main'),
ImageChooserPanel('hero_image'),
StreamFieldPanel('sidebar_cards'),
MultiFieldPanel((
FieldPanel('social_facebook_url'),
FieldPanel('social_twitter_handle'),
FieldPanel('social_snapchat_handle'),
FieldPanel('social_instagram_handle'),
FieldPanel('social_email_address'),
)),
]
api_fields = [
'hero_image',
]
class SchemeIndexPage(Page):
subpage_types = (SchemePage, )
preamble = StreamField([
components.text.to_pair(),
])
content_panels = Page.content_panels + [
StreamFieldPanel('preamble'),
]
api_fields = [
'preamble',
]
| 27.887097
| 97
| 0.685367
| 1,237
| 0.715442
| 0
| 0
| 0
| 0
| 0
| 0
| 219
| 0.126663
|
b0017ce65ff4bed42aaeae9f18c1a86d9bbd1f1d
| 1,089
|
py
|
Python
|
scripts/main_validation.py
|
platycristate/ptah
|
15369382fc48860cc5bcd6a201a8b250ae8cb516
|
[
"MIT"
] | null | null | null |
scripts/main_validation.py
|
platycristate/ptah
|
15369382fc48860cc5bcd6a201a8b250ae8cb516
|
[
"MIT"
] | 1
|
2021-06-11T12:01:33.000Z
|
2021-06-11T12:01:33.000Z
|
scripts/main_validation.py
|
platycristate/ptah
|
15369382fc48860cc5bcd6a201a8b250ae8cb516
|
[
"MIT"
] | 1
|
2021-06-11T11:57:06.000Z
|
2021-06-11T11:57:06.000Z
|
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import re
import spacy
from time import time
import pickle
from collections import defaultdict
import pmi_tfidf_classifier as ptic
path = "../data/"
pd.set_option("display.max_rows", None, "display.max_columns", None)
np.random.seed(250)
spacy.prefer_gpu()
nlp = spacy.load("en_core_sci_sm", disable=['ner', 'parser'])
train_data = pd.read_csv(path + 'DILI_data_mixed.csv')
test_data = pd.read_csv(path + "Validation.tsv", sep="\t")
targets_train = train_data['Label'].values
tokenized_texts = ptic.tokenization(train_data)
tokenized_test_texts = ptic.tokenization(test_data)
N = len(tokenized_texts)
word2text_count = ptic.get_word_stat( tokenized_texts )
words_pmis = ptic.create_pmi_dict(tokenized_texts, targets_train, min_count=1)
t1 = time()
results = ptic.classify_pmi_based(words_pmis, word2text_count, tokenized_test_texts, N)
t2 = time()
test_data["Label"] = results
print("Classfication time: %s min" % (round(t2 - t1, 3)/60))
test_data.to_csv(path + "arsentii.ivasiuk@gmail.com_results.csv")
| 25.325581
| 87
| 0.769513
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 201
| 0.184573
|
b00272462aa831ed8359bfb1b05ac3991b3aef99
| 956
|
py
|
Python
|
src/marion/marion/tests/test_fields.py
|
openfun/marion
|
bf06b64bf78bca16685e62ff14b66897c1dbe80c
|
[
"MIT"
] | 7
|
2021-04-06T20:33:31.000Z
|
2021-09-30T23:29:24.000Z
|
src/marion/marion/tests/test_fields.py
|
openfun/marion
|
bf06b64bf78bca16685e62ff14b66897c1dbe80c
|
[
"MIT"
] | 23
|
2020-09-09T15:01:50.000Z
|
2022-01-03T08:58:36.000Z
|
src/marion/marion/tests/test_fields.py
|
openfun/marion
|
bf06b64bf78bca16685e62ff14b66897c1dbe80c
|
[
"MIT"
] | 2
|
2020-12-14T10:07:07.000Z
|
2021-06-29T00:20:43.000Z
|
"""Tests for the marion application fields"""
from marion.defaults import DocumentIssuerChoices
from ..fields import IssuerLazyChoiceField, LazyChoiceField
def test_fields_lazy_choice_field():
"""
LazyChoiceField class.
Choices instance attribute should not be customizable.
"""
field = LazyChoiceField(
name="lazy_choice_field",
choices=[("option1", "Option 1"), ("option2", "Option 2")],
max_length=200,
)
errors = field.check()
assert len(errors) == 0
assert field.choices == []
def test_fields_issuer_lazy_choice_field(settings):
"""
IssuerLazyChoiceField class.
Choices attribute relies on DOCUMENT_ISSUER_CHOICES_CLASS setting.
"""
settings.MARION_DOCUMENT_ISSUER_CHOICES_CLASS = (
"marion.defaults.DocumentIssuerChoices"
)
field = IssuerLazyChoiceField(name="issuer_lazy_choice_field")
assert field.choices == DocumentIssuerChoices.choices
| 26.555556
| 70
| 0.712343
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 379
| 0.396444
|
b00495771d6a310aa5e5d77c1c05c91690f9a756
| 2,331
|
py
|
Python
|
ObjectTrackingDrone/colorpickerusingTello.py
|
udayagopi587/ArealRobotics_AutonomousDrone
|
6bc10ee167076086abb3b2eef311ae43f457f21d
|
[
"MIT"
] | 1
|
2022-03-12T00:47:24.000Z
|
2022-03-12T00:47:24.000Z
|
ObjectTrackingDrone/colorpickerusingTello.py
|
udayagopi587/ArealRobotics_AutonomousDrone
|
6bc10ee167076086abb3b2eef311ae43f457f21d
|
[
"MIT"
] | null | null | null |
ObjectTrackingDrone/colorpickerusingTello.py
|
udayagopi587/ArealRobotics_AutonomousDrone
|
6bc10ee167076086abb3b2eef311ae43f457f21d
|
[
"MIT"
] | 1
|
2022-03-14T23:42:57.000Z
|
2022-03-14T23:42:57.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 3 12:15:40 2022
@author: udaya
"""
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 27 18:06:29 2022
@author: udaya
"""
import cv2
import numpy as np
from djitellopy import Tello
frameWidth = 640
frameHeight = 480
###############################
# CONNECT TO TELLO
def initializeTello():
myDrone = Tello()
myDrone.connect()
myDrone.for_back_velocity = 0
myDrone.left_right_velocity = 0
myDrone.up_down_velocity = 0
myDrone.yaw_velocity = 0
myDrone.speed = 0
print(myDrone.get_battery())
myDrone.streamoff() #Turning off the streams, if any preious streams were on
myDrone.streamon()
return myDrone
# cap = cv2.VideoCapture(0)
# cap.set(3, frameWidth)
# cap.set(4, frameHeight)
def telloGetFrame(myDrone, w= 360,h=240):
myFrame = myDrone.get_frame_read()
myFrame = myFrame.frame
img = cv2.resize(myFrame,(w,h))
return img
def empty(a):
pass
myDrone = initializeTello()
cv2.namedWindow("HSV")
cv2.resizeWindow("HSV", 640, 240)
cv2.createTrackbar("HUE Min", "HSV", 0, 179, empty)
cv2.createTrackbar("HUE Max", "HSV", 179, 179, empty)
cv2.createTrackbar("SAT Min", "HSV", 0, 255, empty)
cv2.createTrackbar("SAT Max", "HSV", 255, 255, empty)
cv2.createTrackbar("VALUE Min", "HSV", 0, 255, empty)
cv2.createTrackbar("VALUE Max", "HSV", 255, 255, empty)
while True:
success, img = telloGetFrame(myDrone,frameWidth,frameHeight)
imgHsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h_min = cv2.getTrackbarPos("HUE Min", "HSV")
h_max = cv2.getTrackbarPos("HUE Max", "HSV")
s_min = cv2.getTrackbarPos("SAT Min", "HSV")
s_max = cv2.getTrackbarPos("SAT Max", "HSV")
v_min = cv2.getTrackbarPos("VALUE Min", "HSV")
v_max = cv2.getTrackbarPos("VALUE Max", "HSV")
print(h_min)
lower = np.array([h_min, s_min, v_min])
upper = np.array([h_max, s_max, v_max])
mask = cv2.inRange(imgHsv, lower, upper)
result = cv2.bitwise_and(img, img, mask=mask)
mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
hStack = np.hstack([img, mask, result])
cv2.imshow('Horizontal Stacking', hStack)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
#cap.release()
cv2.destroyAllWindows()
| 26.793103
| 81
| 0.632347
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 586
| 0.251394
|
b0050cae1ff0c2350a07478cbaf2f32a1d466c54
| 16,101
|
py
|
Python
|
climetlab_plugin_tools/create_plugin_cmd.py
|
ecmwf-lab/climetlab-plugin-tools
|
52fc1c6c07958ecfb8a5c946f4851725832b3cd0
|
[
"Apache-2.0"
] | null | null | null |
climetlab_plugin_tools/create_plugin_cmd.py
|
ecmwf-lab/climetlab-plugin-tools
|
52fc1c6c07958ecfb8a5c946f4851725832b3cd0
|
[
"Apache-2.0"
] | null | null | null |
climetlab_plugin_tools/create_plugin_cmd.py
|
ecmwf-lab/climetlab-plugin-tools
|
52fc1c6c07958ecfb8a5c946f4851725832b3cd0
|
[
"Apache-2.0"
] | null | null | null |
# (C) Copyright 2020 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
#
import configparser
import datetime
import logging
import os
import pathlib
from climetlab.scripts.tools import parse_args
from .str_utils import CamelCase, alphanum, camelCase, dashes, underscores
LOG = logging.getLogger(__name__)
# import climetlab.debug
APACHE_LICENCE = """This software is licensed under the terms of the Apache Licence Version 2.0
which can be obtained at http://www.apache.org/licenses/LICENSE-2.0."""
PREFIX_ECMWF_LICENCE = (
"""(C) Copyright {year} European Centre for Medium-Range Weather Forecasts."""
)
POSTFIX_ECMWF_LICENCE = """In applying this licence, ECMWF does not waive the privileges and immunities
granted to it by virtue of its status as an intergovernmental organisation
nor does it submit to any jurisdiction."""
class PluginContext:
def __init__(self, kind, **kwargs):
self.kind = kind
self._transformers = {}
self.kwargs = kwargs
def fill_form(self):
for t in TRANSFORMERS_CLASSES[self.kind]:
t(self)
@property
def template_dir(self):
here = os.path.dirname(__file__)
return os.path.realpath(os.path.join(here, "templates", self.kind))
@property
def output_dir(self):
return self("climetlab-plugin-name-climetlab-template")
def check_output_dir(self):
if os.path.exists(self.output_dir):
raise Exception(
f"Folder {self.output_dir} already exists. Not overwriting it."
)
def create_plugin(self):
self.check_output_dir()
for path in self.template_files_list():
template = os.path.join(self.template_dir, path)
output = os.path.join(self.output_dir, path)
output = self(output)
LOG.info(f"Creating {output}")
with open(template, "r") as f:
txt = f.read()
txt = self(txt)
os.makedirs(os.path.dirname(output), exist_ok=True)
with open(output, "w") as f:
f.write(txt)
print(f"Plugin built in {self.output_dir}")
print(self.final_help())
def template_files_list(self):
cwd = os.getcwd()
os.chdir(self.template_dir)
lst = [str(f) for f in pathlib.Path(".").glob("**/*") if os.path.isfile(str(f))]
# TODO: find a nicer way to avoid __path__ folders.
lst = [f for f in lst if "__pycache__" not in f]
os.chdir(cwd)
return lst
def __call__(self, txt):
if txt is None:
return None
assert isinstance(txt, str), txt
original = txt
for k, transformer in self._transformers.items():
txt = transformer(txt)
if txt != original:
txt = self(txt)
return txt
def get_default_email(self):
try:
return self._gitconfig("email")
except: # noqa:E722
return f'{self._transformers["full_name"].value.replace(" ", ".").lower()}@example.com'
def get_default_full_name(self):
try:
return self._gitconfig("name")
except: # noqa:E722
return "Joe Developer"
def _gitconfig(self, key):
if os.environ.get("CLIMETLAB_PLUGIN_TOOLS_NO_GUESS"):
raise Exception("CLIMETLAB_PLUGIN_TOOLS_NO_GUESS is set.")
config = configparser.ConfigParser()
gitconfig = os.path.expanduser("~/.gitconfig")
config.read(gitconfig)
value = config["user"][key]
LOG.info(f"Found {key} in gitconfig {value}")
return value
def final_help(self):
txt = """
--------------------------------------------------------------------
Climetlab plugin generated successfully. Next steps:
1. Create a repository on github at http://github.com/repo_url_climetlab_template.
2. Push to the repository as instructed by github:
cd climetlab-plugin-name-climetlab-template
git init
git add .
git commit -m'first commit'
git branch -M main
git remote add origin http://github.com/repo_url_climetlab_template
git push --set-upstream origin main
[Optional: See tests running http://github.com/repo_url_climetlab_template/actions]
3 - Publish to pipy (pip) manually:
python -m pip install --upgrade pip
pip install setuptools wheel twine
twine upload dist/*
# Need pipy login/password (create an account at https://pypi.org)
Others can now do `pip install climetlab-plugin-name-climetlab-template`.
4. Publish automatically from Github to pypi. [Optional]
Edit climetlab-plugin-name-climetlab-template/.github/workflows/check-and-publish to point to pypi instead of test.pypi.
Create a token from pypi at https://pypi.org/manage/account/token/
Add the token as a Github secret on the name PYPI_API_TOKEN at https://github.com/repo_url_climetlab_template/settings/secrets/actions/new
You are all set! Push the github repository and release from http://github.com/repo_url_climetlab_template/releases/new.
""" # noqa: E501
return self(txt)
class Transformer:
_help = ""
glob = None
def __init__(
self,
context,
key,
default=None,
pattern=None,
value=None,
force_prefix="",
):
LOG.debug(f"New Transformer({key})")
self._context = context
self.key = key
self.default = self._context(default)
self.force_prefix = self._context(force_prefix)
self.pattern = pattern
self.value = value
self.help = self._context(self._help)
self.fill()
LOG.debug(f"Transformer({key}) created")
def __repr__(self) -> str:
return f"Transformer({self.key}, pattern={self.pattern}, value={self.value})"
def fill(self):
if self.pattern is None:
self.pattern = self.key
if not self.glob:
self.adapts = [lambda x: x]
elif self.glob is True:
self.adapts = [underscores, dashes, CamelCase, camelCase]
else:
self.adapts = self.glob
self.read_value()
self.pattern = self.pattern + "_climetlab_template"
self._context._transformers[self.key] = self
def prompt(self):
return f"Please enter {self.desc} ('?' for help)"
def default_prompt(self):
if self.default:
return f"Hit 'return' to use the default value '{self.force_prefix}{self.default}'"
return ""
def try_reading_from_context(self):
if self._context.kwargs.get(self.key, None):
self.value = self._context.kwargs[self.key]
assert isinstance(self.value, str)
assert isinstance(self.force_prefix, str)
print(f"\n--> Using {self.force_prefix + self.value} (from command line)")
return True
def try_reading_from_user(self):
print()
value = input(">>>> " + self.force_prefix)
if value == "h" or value == "?":
print(f"?\n {self.help}")
if self.default is not None:
print(f" Default value: {self.force_prefix}{self.default}")
return self.try_reading_from_user()
if value:
self.value = value
print(f"\n--> Using {self.force_prefix + self.value}")
return True
def try_reading_from_default(self):
if self.default is not None:
print(f"\n--> Using {self.force_prefix + self.default} (default)")
self.value = self.default
return True
def read_value(self):
print()
print(self.prompt())
print(self.default_prompt())
if self.try_reading_from_context():
return
if self.try_reading_from_user():
return
if self.try_reading_from_default():
return
return self.read_value()
def __call__(self, txt):
for adapt in self.adapts:
p = adapt(self.pattern)
v = adapt(self.value)
if p in txt:
LOG.debug(f'Replacing "{p}" by "{v}"')
LOG.debug(f" k={self.key}")
LOG.debug(f" p: {self.pattern} -> {p}")
LOG.debug(f" v: {self.value} -> {v}")
txt = txt.replace(p, v)
return txt
class NoPromptTransformer(Transformer):
def read_value(self):
LOG.debug(f"{self.key}: not prompt using {self.value}.")
class GlobNoPromptTransformer(NoPromptTransformer):
glob = True
class SourceNameTransformer(GlobNoPromptTransformer):
def __init__(self, context):
name = context._transformers["plugin_name"].value
if name.endswith("-source"):
name = name[:-7]
super().__init__(context, "source_name", value=name)
class DatasetNameTransformer(Transformer):
desc = "the dataset name"
_help = """The dataset name is used as follow:
A climetlab dataset plugin package can provides one or more
datasets. This scripts creates a plugin with one dataset.
The dataset name will be used by the end users to access
the data through CliMetLab with:
cml.load_dataset("dataset-name", ...)
The convention is to make the dataset name start with
"plugin-name-climetlab-template".
The dataset name can easily be modified afterwards, without
regenerating a new plugin, simply by editing the setup.py."""
glob = True
def __init__(self, context):
super().__init__(
context,
"dataset_name",
default="",
force_prefix="plugin-name-climetlab-template",
)
def fill(self):
super().fill()
self.value = dashes(self.value).lower()
self.value = alphanum(self.value)
if self.value:
while self.value.startswith("-"):
self.value = self.value[1:]
name = "plugin-name-climetlab-template" + "-" + self.value
else:
self.value = "main"
name = "plugin-name-climetlab-template"
name = self._context(name)
GlobNoPromptTransformer(self._context, "dataset_full_name", value=name)
class PluginNameTransformer(Transformer):
desc = "the plugin name"
_help = """The plugin name is used to define:
- The python package name `import climetlab_{plugin_name} `
- The pip package name `pip install climetlab-{plugin-name}`.
It will also be used to suggest and appropriate URL on github.
The plugin_name can be the name of the project you are working on,
but notice that it should be specific enough as only one plugin with
a given name can be installed. Highly generic names (such as "meteo",
"domain", "copernicus", "country-name" are not recommended.
The plugin name cannot be easily modified afterwards.
You would need to regenerate a new one and copy existing code."""
glob = True
def __init__(self, context):
super().__init__(
context,
"plugin_name",
default="my_plugin",
)
context.check_output_dir()
class EmailTransformer(Transformer):
desc = "your email"
_help = """The email is used in setup.py to define the email maintainer of the pip package."""
def __init__(self, context):
super().__init__(
context,
"email",
default=context.get_default_email(),
)
class GithubUsernameTransformer(Transformer):
desc = "your Github user name"
_help = """The github username (or github space name) is used
to suggest a github repository url.
The username (ecmwf-lab) should be used if you wish to host your
repository on the github space "https://github.com/ecmwf-lab/").
Else, please provide your own github user name."""
def __init__(self, context):
super().__init__(
context,
"github_username",
default="ecmwf-lab",
)
class FullNameTransformer(Transformer):
desc = "your full name"
_help = """The full name is used in setup.py to define the maintainer of the pip package."""
def __init__(self, context):
super().__init__(
context,
"full_name",
default=context.get_default_full_name(),
)
class RepoUrlTransformer(Transformer):
desc = "the repository url"
_help = """The repository url name is used to define:
- The package url in the setup.py, i.e. the url published in Pypi for pip.
- The links in the README file.
If your do not want to host you repository on github,
please edit manually the generated setup.py afterwards."""
def __init__(self, context):
super().__init__(
context,
"repo_url",
default="github_username_climetlab_template/climetlab-plugin-name-climetlab-template",
force_prefix="https://github.com/",
)
class LicenceTransformer(Transformer):
_help = """The APACHE 2.0 licence is used for the plugin code.
Most users should answer "n" to use the standard APACHE 2.0 licence.
ECMWF users should answer "y" to add the appropriate addition to the licence.
The licence is added in the plugin code:
- In the header of each python file.
- In the LICENSE file.
- In the README.
If you choose another licence, please modify these files manually afterwards."""
desc = "Use the modified APACHE licence with ECMWF additions?"
def __init__(self, context):
super().__init__(context, "licence")
def prompt(self):
return f"{self.desc} ('y' or 'n', '?' for help)"
def fill(self):
self.read_value()
self.value = dict(y=True, n=False)[self.value.lower()]
self.year = str(datetime.datetime.now().year)
licence = APACHE_LICENCE
if self.value:
licence = "\n".join([PREFIX_ECMWF_LICENCE, licence, POSTFIX_ECMWF_LICENCE])
licence = licence.format(year=self.year)
print(f" Using this licence:\n{licence}\n")
NoPromptTransformer(self._context, "year_licence", value=str(self.year))
NoPromptTransformer(self._context, "licence_txt", value=licence)
NoPromptTransformer(self._context, "license_txt", value=licence)
licence_with_sharp = "\n".join(["# " + line for line in licence.split("\n")])
NoPromptTransformer(
self._context,
"licence_header",
pattern="# licence_header",
value=licence_with_sharp,
)
NoPromptTransformer(
self._context,
"license_header",
pattern="# license_header",
value=licence_with_sharp,
)
TRANSFORMERS_CLASSES = {
"dataset": [
PluginNameTransformer,
DatasetNameTransformer,
FullNameTransformer,
EmailTransformer,
GithubUsernameTransformer,
RepoUrlTransformer,
LicenceTransformer,
],
"source": [
PluginNameTransformer,
SourceNameTransformer,
FullNameTransformer,
EmailTransformer,
GithubUsernameTransformer,
RepoUrlTransformer,
LicenceTransformer,
],
}
class CreateDatasetPluginCmd:
@parse_args(
name=dict(help="Plugin name"),
dataset=dict(help="Dataset name"),
)
def do_plugin_create_dataset(self, args):
context = PluginContext(
"dataset", plugin_name=args.name, dataset_name=args.dataset
)
context.fill_form()
context.create_plugin()
class CreateSourcePluginCmd:
@parse_args(name=dict(help="Plugin name"))
def do_plugin_create_source(self, args):
context = PluginContext("source", plugin_name=args.name)
context.fill_form()
context.create_plugin()
| 33.266529
| 143
| 0.629464
| 14,422
| 0.895721
| 0
| 0
| 786
| 0.048817
| 0
| 0
| 6,537
| 0.406
|
b0074893c2e7005340588db291b50134738031f4
| 3,044
|
py
|
Python
|
openclean/util/core.py
|
remram44/openclean-core
|
8c09c8302cadbb3bb02c959907f91a3ae343f939
|
[
"BSD-3-Clause"
] | 4
|
2021-04-20T09:06:26.000Z
|
2021-11-20T20:31:28.000Z
|
openclean/util/core.py
|
remram44/openclean-core
|
8c09c8302cadbb3bb02c959907f91a3ae343f939
|
[
"BSD-3-Clause"
] | 14
|
2021-01-19T19:23:16.000Z
|
2021-04-28T14:31:03.000Z
|
openclean/util/core.py
|
remram44/openclean-core
|
8c09c8302cadbb3bb02c959907f91a3ae343f939
|
[
"BSD-3-Clause"
] | 5
|
2021-08-24T11:57:21.000Z
|
2022-03-17T04:39:04.000Z
|
# This file is part of the Data Cleaning Library (openclean).
#
# Copyright (C) 2018-2021 New York University.
#
# openclean is released under the Revised BSD License. See file LICENSE for
# full license details.
"""Collection of helper functions for various purpoeses."""
from typing import Optional
import uuid
def always_false(*args):
"""Predicate that always evaluates to False.
Parameters
----------
args: any
Variable list of arguments.
Returns
-------
bool
"""
return False
class eval_all(object):
"""Logic operator that evaluates a list of predicates and returns True only
if all predicates return a defined result value.
"""
def __init__(self, predicates, truth_value=True):
"""Initialize the list of predicates and the expected result value.
Parameters
----------
predicates: list(callable)
List of callables that are evaluated on a given value.
truth_value: scalar, default=True
Expected result value for predicate evaluation to be considered
satisfied.
"""
self.predicates = predicates
self.truth_value = truth_value
def __call__(self, value):
"""Evaluate all predicates on the given value. Returns True only if all
predicates evaluate to the defined result value.
Parameters
----------
value: scalar
Scalar value that is compared against the constant compare value.
Returns
-------
bool
"""
for f in self.predicates:
if f.eval(value) != self.truth_value:
return False
return True
def is_list_or_tuple(value):
"""Test if a given value is a list or tuple that can be converted into
multiple arguments.
Parameters
----------
value: any
Any object that is tested for being a list or tuple.
Returns
-------
bool
"""
return isinstance(value, list) or isinstance(value, tuple)
def scalar_pass_through(value):
"""Pass-through method for single scalar values.
Parameters
----------
value: scalar
Scalar cell value from a data frame row.
Returns
-------
scalar
"""
return value
def tenary_pass_through(*args):
"""Pass-through method for a list of argument values.
Parameters
----------
args: list of scalar
List of argument values.
Returns
-------
scalar
"""
return args
def unique_identifier(length: Optional[int] = None) -> str:
"""Get an identifier string of given length. Uses UUID to generate a unique
string and return the requested number of characters from that string.
Parameters
----------
length: int, default=None
Number of characters in the returned string.
Returns
-------
string
"""
identifier = str(uuid.uuid4()).replace('-', '')
if length is not None:
identifier = identifier[:length]
return identifier
| 23.415385
| 79
| 0.617608
| 1,163
| 0.382063
| 0
| 0
| 0
| 0
| 0
| 0
| 2,187
| 0.718463
|
b0091d1b6caace04c666bba350b86f62370a21bc
| 78
|
py
|
Python
|
desafio1.py
|
sergioboff/Desafios-Curso-em-Video
|
f876396635b12c00bdd9523758364bbebfd70ae0
|
[
"MIT"
] | null | null | null |
desafio1.py
|
sergioboff/Desafios-Curso-em-Video
|
f876396635b12c00bdd9523758364bbebfd70ae0
|
[
"MIT"
] | null | null | null |
desafio1.py
|
sergioboff/Desafios-Curso-em-Video
|
f876396635b12c00bdd9523758364bbebfd70ae0
|
[
"MIT"
] | null | null | null |
nome= input('Qual seu nome ?: ')
print ('Olá {} Seja bem vindo'.format(nome))
| 26
| 44
| 0.641026
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 43
| 0.544304
|
b00943e9be2f2f8a05e1b1e0bcce1f1c5bb49902
| 68
|
py
|
Python
|
exquiro/parsers/openponk/__init__.py
|
xhusar2/conceptual_model_parser
|
63eea4ab8b967a6d2ee612ffb4a06b93e97d0043
|
[
"MIT"
] | null | null | null |
exquiro/parsers/openponk/__init__.py
|
xhusar2/conceptual_model_parser
|
63eea4ab8b967a6d2ee612ffb4a06b93e97d0043
|
[
"MIT"
] | null | null | null |
exquiro/parsers/openponk/__init__.py
|
xhusar2/conceptual_model_parser
|
63eea4ab8b967a6d2ee612ffb4a06b93e97d0043
|
[
"MIT"
] | null | null | null |
from .openpondk_class_diagram_parser import OpenponkClsDiagramParser
| 68
| 68
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
b00a0ae9f8f71c5f857d2683a8d63e315db4a5e2
| 254
|
py
|
Python
|
fastNLP/modules/encoder/__init__.py
|
awesome-archive/fastNLP
|
767e7971e542783c0129ed88b7d871db775e653e
|
[
"Apache-2.0"
] | 4
|
2019-01-19T13:58:10.000Z
|
2019-01-19T15:07:48.000Z
|
fastNLP/modules/encoder/__init__.py
|
TTTREE/fastNLP
|
ef82c1f10000752db32a5fa323668b94bcb940a1
|
[
"Apache-2.0"
] | 1
|
2018-09-30T13:30:51.000Z
|
2018-09-30T13:30:51.000Z
|
fastNLP/modules/encoder/__init__.py
|
TTTREE/fastNLP
|
ef82c1f10000752db32a5fa323668b94bcb940a1
|
[
"Apache-2.0"
] | null | null | null |
from .conv import Conv
from .conv_maxpool import ConvMaxpool
from .embedding import Embedding
from .linear import Linear
from .lstm import LSTM
__all__ = ["LSTM",
"Embedding",
"Linear",
"Conv",
"ConvMaxpool"]
| 21.166667
| 37
| 0.629921
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 44
| 0.173228
|
b00bb16d432ae4e7eebbd1a8f438f11ad4838ec1
| 1,141
|
py
|
Python
|
openCVTutorials/openCVimgChangeColorspaceTutorial.py
|
nahutch/BasketballAI_P1
|
9a44f80787231df386910c28f17bab465fee013d
|
[
"Apache-2.0"
] | 1
|
2019-01-24T19:07:08.000Z
|
2019-01-24T19:07:08.000Z
|
openCVTutorials/openCVimgChangeColorspaceTutorial.py
|
nahutch/BasketballAI_P1
|
9a44f80787231df386910c28f17bab465fee013d
|
[
"Apache-2.0"
] | null | null | null |
openCVTutorials/openCVimgChangeColorspaceTutorial.py
|
nahutch/BasketballAI_P1
|
9a44f80787231df386910c28f17bab465fee013d
|
[
"Apache-2.0"
] | null | null | null |
#following tutorial: https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_colorspaces/py_colorspaces.html#converting-colorspaces
import numpy as np
import cv2
#there are more than 150 color-space conversions methods available in OpenCV
#why so many?
#gets all possible color space conversion flags
flags = [i for i in dir(cv2) if i.startswith("COLOR_")]
#print (flags)
#converts a bgr color to hsv
green = np.uint8([[[0,255,0]]])
hsv_green = cv2.cvtColor(green,cv2.COLOR_BGR2HSV)
print (hsv_green)
#extracts any blue colored object using the built in video camera
#can detect my blue eyes if I get close and widen them
cap = cv2.VideoCapture(0)
while(1):
#take each frame
_, frame = cap.read()
hsv = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)
lower_blue = np.array([110,50,50])
upper_blue = np.array([130,255,255])
mask = cv2.inRange(hsv,lower_blue,upper_blue)
res = cv2.bitwise_and(frame,frame,mask=mask)
cv2.imshow("frame",frame)
cv2.imshow("mask",mask)
cv2.imshow("result",res)
k = cv2.waitKey(5)& 0xFF
if k == 27:
break
cv2.destroyAllWindows()
| 26.534884
| 158
| 0.718668
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 500
| 0.438212
|
b00c4cc641fafb1dc25683af3562c4fd4137c48c
| 1,724
|
py
|
Python
|
sdcflows/utils/misc.py
|
madisoth/sdcflows
|
c2f01e4f9b19dbd89ac1b54e3cfb0643fc3fd4f2
|
[
"Apache-2.0"
] | 16
|
2020-02-25T17:47:10.000Z
|
2022-03-07T02:54:51.000Z
|
sdcflows/utils/misc.py
|
madisoth/sdcflows
|
c2f01e4f9b19dbd89ac1b54e3cfb0643fc3fd4f2
|
[
"Apache-2.0"
] | 175
|
2020-02-15T00:52:28.000Z
|
2022-03-29T21:42:31.000Z
|
sdcflows/utils/misc.py
|
madisoth/sdcflows
|
c2f01e4f9b19dbd89ac1b54e3cfb0643fc3fd4f2
|
[
"Apache-2.0"
] | 12
|
2019-05-28T23:34:37.000Z
|
2020-01-22T21:32:22.000Z
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2021 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
"""Basic miscellaneous utilities."""
def front(inlist):
"""
Pop from a list or tuple, otherwise return untouched.
Examples
--------
>>> front([1, 0])
1
>>> front("/path/somewhere")
'/path/somewhere'
"""
if isinstance(inlist, (list, tuple)):
return inlist[0]
return inlist
def last(inlist):
"""
Return the last element from a list or tuple, otherwise return untouched.
Examples
--------
>>> last([1, 0])
0
>>> last("/path/somewhere")
'/path/somewhere'
"""
if isinstance(inlist, (list, tuple)):
return inlist[-1]
return inlist
def get_free_mem():
"""Probe the free memory right now."""
try:
from psutil import virtual_memory
return round(virtual_memory().free, 1)
except Exception:
return None
| 24.628571
| 77
| 0.657773
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,311
| 0.760441
|
b00d6bcbdc91daedbc8ff5cedd805b13268a4bca
| 7,026
|
py
|
Python
|
src/model1_predict.py
|
shubhampachori12110095/FashionAI-Clothing-Attribute-Labels-Classification
|
04fb40948fcae55c379d8e878c41f281948155e8
|
[
"Apache-2.0"
] | 2
|
2018-12-29T09:10:18.000Z
|
2020-08-07T03:42:38.000Z
|
src/model1_predict.py
|
shubhampachori12110095/FashionAI-Clothing-Attribute-Labels-Classification
|
04fb40948fcae55c379d8e878c41f281948155e8
|
[
"Apache-2.0"
] | null | null | null |
src/model1_predict.py
|
shubhampachori12110095/FashionAI-Clothing-Attribute-Labels-Classification
|
04fb40948fcae55c379d8e878c41f281948155e8
|
[
"Apache-2.0"
] | 3
|
2018-12-29T09:10:21.000Z
|
2021-05-23T06:30:35.000Z
|
# -*- coding: UTF-8 -*-
import os
import numpy as np
import pandas as pd
from tqdm import tqdm
import json
import cv2
from sklearn.model_selection import train_test_split
import matplotlib
from keras.utils import np_utils
from keras.optimizers import *
from keras.preprocessing.image import ImageDataGenerator
from fashionAI.config import config
from fashionAI.Utils.preprocessing.imagetoarraypreprocessor import ImageToArrayPreprocessor
from fashionAI.Utils.preprocessing.simplepreprocessor import SimplePreprocessor
from fashionAI.Utils.preprocessing.meanpreprocessor import MeanPreprocessor
from fashionAI.Utils.preprocessing.patchpreprocessor import PatchPreprocessor
from fashionAI.Utils.preprocessing.croppreprocessor import CropPreprocessor
from fashionAI.callbacks.trainingmonitor import TrainingMonitor
from fashionAI.Utils.io.datagenerator import DataGenerator
from fashionAI.nn.inceptionresnet_v2 import InceptionResnetV2
def predict1(testb_data_root_path = '/data/Attributes/Round2b/', output_csv_root_path = '../'):
df_test = pd.read_csv(testb_data_root_path+'Tests/question.csv', header=None)
df_test.columns = ['image_id', 'class', 'x']
del df_test['x']
##########attributes setting##
classes = ['collar_design_labels', 'lapel_design_labels', 'neck_design_labels', 'neckline_design_labels',
'coat_length_labels', 'pant_length_labels', 'skirt_length_labels','sleeve_length_labels']
design_classes = ['collar_design_labels', 'lapel_design_labels', 'neck_design_labels', 'neckline_design_labels']
design_label_count = {'collar': 5,'lapel': 5,'neck': 5,'neckline': 10}
length_classes = ['coat_length_labels', 'pant_length_labels', 'skirt_length_labels','sleeve_length_labels']
length_label_count = {'coat': 8,'pant': 6,'skirt': 6,'sleeve': 9}
##########model############
incepres1 = InceptionResnetV2(500, 500, design_label_count, weight_decay=0.001)
design_model = incepres1.build_net()
incepres2 = InceptionResnetV2(500, 500, length_label_count, weight_decay=0.001)
length_model = incepres2.build_net()
design_model.load_weights('../models/model1/multitask_design_final.h5')
length_model.load_weights('../models/model1/multitask_length_final.h5')
##########functions#########
pre_resize = SimplePreprocessor(530, 530) #use opecv to resize in the width of 530*530
cp = CropPreprocessor(500, 500) #when 10crops, 530*530 -> 500*500
iap = ImageToArrayPreprocessor() # transform data format
design_means = json.loads(open('./model1_mean/multitask_mean_design.json').read())
length_means = json.loads(open('./model1_mean/multitask_mean_length.json').read())
design_mp = MeanPreprocessor(design_means['R'], design_means['G'], design_means['B'])
length_mp = MeanPreprocessor(length_means['R'], length_means['G'], length_means['B'])
val_aug = ImageDataGenerator(rescale=1./255)
##########predict############
tmp_df = {}
print('model1\'s design_model:predict design')
cnt = 0
for idx in range(4):
print()
cur_class = design_classes[idx]
df_load = df_test[(df_test['class'] == cur_class)].copy()
df_load.reset_index(inplace=True)
del df_load['index']
X_test = [testb_data_root_path + test_img for test_img in df_load['image_id']]
print('design samples num-{0}:'.format(cur_class),len(X_test))
print('[INFO] predicting on test data (with crops)...')
print()
testGen = DataGenerator((X_test, None), 32, aug=val_aug, preprocessors=[pre_resize, design_mp])
predictions = []
for (i, images) in enumerate(testGen.generator(training=False, passes=1)):
if i % 10 == 0:
print('{}_test_batch_num/epochs:{}/{}'.format(cur_class, i, int(len(X_test) / 32)))
for image in images:
crops = cp.preprocess(image)
crops = np.array([iap.preprocess(c) for c in crops], dtype='float32')
pred = design_model.predict(crops)
predictions.append(pred[idx].mean(axis=0))
result = []
for i in range(len(X_test)):
tmp_list = predictions[i]
tmp_result = ''
for tmp_ret in tmp_list:
tmp_result += '{:.4f};'.format(tmp_ret)
print(X_test[i].split('/')[-1],' predicted: ', tmp_result[:-1])
result.append(tmp_result[:-1])
cnt = cnt +1
df_load['result'] = result
print(len(df_load))
tmp_df[cur_class] = df_load.copy()
print('Model1-design_model completes the prediction of design:on the {0}of samples'.format(cnt))
print('############################################')
print()
print('next is to predict length using Model1-lenght_model')
print('model1\'s length_model:predict length')
cnt = 0
for idx in range(4):
print()
cur_class = length_classes[idx]
df_load = df_test[(df_test['class'] == cur_class)].copy()
df_load.reset_index(inplace=True)
del df_load['index']
X_test = [testb_data_root_path + test_img for test_img in df_load['image_id']]
print('length samples num-{0}:'.format(cur_class), len(X_test))
print('[INFO] predicting on test data (with crops)...')
print()
testGen = DataGenerator((X_test, None), 32, aug=val_aug, preprocessors=[pre_resize, length_mp])
predictions = []
for (i, images) in enumerate(testGen.generator(training=False, passes=1)):
if i%10 ==0:
print('{}_test_batch_num/epochs:{}/{}'.format(cur_class, i, int(len(X_test) / 32)))
for image in images:
crops = cp.preprocess(image)
crops = np.array([iap.preprocess(c) for c in crops], dtype='float32')
pred = length_model.predict(crops)
predictions.append(pred[idx].mean(axis=0))
result = []
for i in range(len(X_test)):
tmp_list = predictions[i]
tmp_result = ''
for tmp_ret in tmp_list:
tmp_result += '{:.4f};'.format(tmp_ret)
print(X_test[i].split('/')[-1], ' predicted: ', tmp_result[:-1])
result.append(tmp_result[:-1])
cnt = cnt + 1
df_load['result'] = result
print(len(df_load))
tmp_df[cur_class] = df_load.copy()
print('Model1-length_model completes the prediction of length:on the {0}of samples'.format(cnt))
print()
print('Complete!')
###########output csv######
df_result = []
for cur in classes:
tmp = tmp_df[cur]
tmp.reset_index(inplace=True)
del tmp['index']
df_result.append(tmp)
for i in df_result:
i.columns = ['image_id', 'class', 'label']
result = pd.concat(df_result)
result.to_csv('../output/model1_result.csv', index=None, header=None)
print('model1 predicts the {} samples'.format(len(result)))
###result1###
return result
| 39.033333
| 117
| 0.646883
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,722
| 0.24509
|
b00f67fa0503dd85f3c8d37c378d2f72c7f066bd
| 700
|
py
|
Python
|
venv/lib/python3.6/site-packages/phonenumbers/shortdata/region_QA.py
|
exdeam/opencrm
|
dfdcfdf99f0b42eb3959171927cb6574583f5ee0
|
[
"MIT"
] | null | null | null |
venv/lib/python3.6/site-packages/phonenumbers/shortdata/region_QA.py
|
exdeam/opencrm
|
dfdcfdf99f0b42eb3959171927cb6574583f5ee0
|
[
"MIT"
] | null | null | null |
venv/lib/python3.6/site-packages/phonenumbers/shortdata/region_QA.py
|
exdeam/opencrm
|
dfdcfdf99f0b42eb3959171927cb6574583f5ee0
|
[
"MIT"
] | 1
|
2020-09-08T14:45:34.000Z
|
2020-09-08T14:45:34.000Z
|
"""Auto-generated file, do not edit by hand. QA metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_QA = PhoneMetadata(id='QA', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='[129]\\d{2,4}', possible_length=(3, 4, 5)),
toll_free=PhoneNumberDesc(national_number_pattern='999', example_number='999', possible_length=(3,)),
emergency=PhoneNumberDesc(national_number_pattern='999', example_number='999', possible_length=(3,)),
short_code=PhoneNumberDesc(national_number_pattern='(?:1|20)\\d\\d|9(?:[27]\\d{3}|99)', example_number='100', possible_length=(3, 4, 5)),
short_data=True)
| 70
| 141
| 0.754286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 138
| 0.197143
|
b00f7bd4e39ef2e25f158e39f9604eb34518aa71
| 815
|
py
|
Python
|
test_parametrized_tests.py
|
karianjahi/python_pytest_tutorial
|
d8cf7bc9d85e75cc3248a35d8abdfd24d76276cd
|
[
"MIT"
] | null | null | null |
test_parametrized_tests.py
|
karianjahi/python_pytest_tutorial
|
d8cf7bc9d85e75cc3248a35d8abdfd24d76276cd
|
[
"MIT"
] | null | null | null |
test_parametrized_tests.py
|
karianjahi/python_pytest_tutorial
|
d8cf7bc9d85e75cc3248a35d8abdfd24d76276cd
|
[
"MIT"
] | null | null | null |
"""
Organizing test and parametrizing
"""
# Parametrized tests: Run many tests in one
# pylint: disable=W0622
# pylint: disable=R0201
# pylint: disable=R0903
import pytest
from word_counter import count_words
class TestWordCounterParametrization:
"""
In this case we want to test many tests in one function
"""
Tests = [
("Today is Monday", 3),
("head", 1),
("He jumps", 2),
("He\nis\nnot\nfeeling\nwell", 5),
("Mein Hände", 2),
('<h1>This is a heading</h1>', 4),
('<h1 class="foo">this is a heading</h1>', 5),
("Joseph-Njeri", 2)
]
@pytest.mark.parametrize('input, output', Tests)
def test_all_in_one(self, input, output):
"""
Testing all in one
"""
assert count_words(input) == output
| 27.166667
| 59
| 0.586503
| 606
| 0.742647
| 0
| 0
| 189
| 0.231618
| 0
| 0
| 437
| 0.535539
|
b0110b071338ec4840e5427dcade83815657e854
| 1,685
|
py
|
Python
|
src/dep_appearances/cli.py
|
jdlubrano/dep-appearances
|
bf752b469463ee8cb7351df37231d250be3bcf47
|
[
"MIT"
] | null | null | null |
src/dep_appearances/cli.py
|
jdlubrano/dep-appearances
|
bf752b469463ee8cb7351df37231d250be3bcf47
|
[
"MIT"
] | null | null | null |
src/dep_appearances/cli.py
|
jdlubrano/dep-appearances
|
bf752b469463ee8cb7351df37231d250be3bcf47
|
[
"MIT"
] | null | null | null |
from argparse import ArgumentParser
import os
import pdb
import sys
from dep_appearances.appearances_report import AppearancesReport
def main():
parser = ArgumentParser(description='Find dependencies that are unused and underused in your codebase.')
parser.add_argument(
'project_root',
metavar='PATH',
type=str,
nargs='?',
default=os.getcwd(),
help="The path to your project's root (defaults to your current working directory)"
)
parser.add_argument(
'--underused_threshold',
type=int,
default=2,
help='The threshold to set for marking dependencies as underused (default: 2)'
)
args = parser.parse_args()
report = AppearancesReport(project_root=args.project_root).compile()
unused_dependencies = report.unused_dependencies()
underused_dependencies = report.underused_dependencies(usage_threshold=args.underused_threshold)
if len(unused_dependencies) == 0:
print("No unused dependencies found")
else:
print("Unused dependencies:")
for dep in unused_dependencies:
print(f"\t{dep.name}")
print("")
if len(underused_dependencies) == 0:
print("No underused dependencies found")
else:
print(f"Underused dependencies (usage threshold = {args.underused_threshold}):")
for dep in underused_dependencies:
print(f"\t{dep.name}\n\t\timported in:")
for import_statement in dep.import_statements:
print(f"\t\t{os.path.relpath(import_statement.source_file)}:{import_statement.line_number}")
print("")
if __name__ == "__main__":
main()
| 30.089286
| 108
| 0.668249
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 569
| 0.337685
|
b01166da273e45dbd1d37d892c58fe4b13c2a3e7
| 250
|
py
|
Python
|
kernel/filters.py
|
pycodi/django-kernel
|
87829a0d47d04a3bb3d5c7cb79a03f0772dfdf46
|
[
"MIT"
] | 1
|
2016-09-16T11:40:45.000Z
|
2016-09-16T11:40:45.000Z
|
kernel/filters.py
|
pycodi/django-kernel
|
87829a0d47d04a3bb3d5c7cb79a03f0772dfdf46
|
[
"MIT"
] | null | null | null |
kernel/filters.py
|
pycodi/django-kernel
|
87829a0d47d04a3bb3d5c7cb79a03f0772dfdf46
|
[
"MIT"
] | null | null | null |
from django_filters import Filter
from django_filters.fields import Lookup
class ListFilter(Filter):
def filter(self, qs, value):
value_list = value.split(u',')
return super(ListFilter, self).filter(qs, Lookup(value_list, 'in'))
| 31.25
| 75
| 0.716
| 173
| 0.692
| 0
| 0
| 0
| 0
| 0
| 0
| 8
| 0.032
|
b0120808c75c26295ac6097ea109b68947111348
| 323
|
py
|
Python
|
tests/expr/expr08.py
|
ktok07b6/polyphony
|
657c5c7440520db6b4985970bd50547407693ac4
|
[
"MIT"
] | 83
|
2015-11-30T09:59:13.000Z
|
2021-08-03T09:12:28.000Z
|
tests/expr/expr08.py
|
jesseclin/polyphony
|
657c5c7440520db6b4985970bd50547407693ac4
|
[
"MIT"
] | 4
|
2017-02-10T01:43:11.000Z
|
2020-07-14T03:52:25.000Z
|
tests/expr/expr08.py
|
jesseclin/polyphony
|
657c5c7440520db6b4985970bd50547407693ac4
|
[
"MIT"
] | 11
|
2016-11-18T14:39:15.000Z
|
2021-02-23T10:05:20.000Z
|
from polyphony import testbench
def expr08(a, b, c):
return a < b < c
@testbench
def test():
assert False == expr08(0, 0, 0)
assert True == expr08(0, 1, 2)
assert False == expr08(1, 0, 2)
assert False == expr08(2, 1, 0)
assert True == expr08(-1, 0, 1)
assert True == expr08(-2, -1, 0)
test()
| 21.533333
| 36
| 0.575851
| 0
| 0
| 0
| 0
| 239
| 0.739938
| 0
| 0
| 0
| 0
|
b0134690af47b5e16baf709ce4dca459913ce34e
| 1,175
|
py
|
Python
|
pyfirmata_tmp36_MQ7_Mysql.py
|
amy861113/Arduino
|
7592c2029242fca24245ee1c34b2b9f6043070d1
|
[
"MIT"
] | null | null | null |
pyfirmata_tmp36_MQ7_Mysql.py
|
amy861113/Arduino
|
7592c2029242fca24245ee1c34b2b9f6043070d1
|
[
"MIT"
] | null | null | null |
pyfirmata_tmp36_MQ7_Mysql.py
|
amy861113/Arduino
|
7592c2029242fca24245ee1c34b2b9f6043070d1
|
[
"MIT"
] | null | null | null |
from pyfirmata import Arduino, util
from time import sleep
import pymysql
def arduino_map(x, in_min, in_max, out_min, out_max):
return(x-in_min) * (out_max-out_min) / (in_max-in_min) + out_min
PORT = "COM4"
uno = Arduino(PORT)
sleep(5)
it = util.Iterator(uno)
it.start()
a4 = uno.get_pin('a:4:i')
a5 = uno.get_pin('a:5:i')
db = pymysql.connect("120.110.114.14", "hanshin", "Hanshin519", "Student", port = 3306)
cursor = db.cursor()
print("Arduino start~")
try:
while True:
gas = a4.read()
tmp = a5.read()
try:
gasValue = round(gas * 1024)
Vout = arduino_map(tmp, 0, 1, 0, 5)
tmpValue = round((((Vout * 1000) - 500) / 10) , 2)
#tmpValue = ((round(tmp * 1024)) * (5.0/1024) -0.5) / 0.01
sleep(5)
except TypeError:
pass
print('{0} {1}'.format(gasValue, tmpValue))
sql = "update Student.articles_envdata set tmpValue = {1}, gasValue = {0} where data_id = 1".format(gasValue, tmpValue)
cursor.execute(sql)
db.commit()
print("Update Success~")
sleep(5)
except Exception as e:
db.rollback()
print("Error!:{0}".format(e))
except KeyboardInterrupt:
uno.exit()
| 23.5
| 124
| 0.612766
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 264
| 0.224681
|
b01440159aa9a67d2eac6230f37afcedb41016ba
| 303
|
py
|
Python
|
app/views.py
|
kobrient/tinypilot
|
aa40f11a370e04b11e0f72d34647c0e01669bbe9
|
[
"MIT"
] | null | null | null |
app/views.py
|
kobrient/tinypilot
|
aa40f11a370e04b11e0f72d34647c0e01669bbe9
|
[
"MIT"
] | null | null | null |
app/views.py
|
kobrient/tinypilot
|
aa40f11a370e04b11e0f72d34647c0e01669bbe9
|
[
"MIT"
] | null | null | null |
import flask
from find_files import find as find_files
views_blueprint = flask.Blueprint('views', __name__, url_prefix='')
@views_blueprint.route('/', methods=['GET'])
def index_get():
return flask.render_template(
'index.html', custom_elements_files=find_files.custom_elements_files())
| 25.25
| 79
| 0.752475
| 0
| 0
| 0
| 0
| 175
| 0.577558
| 0
| 0
| 29
| 0.09571
|
b0144723fdb455462aff667b476dc0e86c2e8039
| 577
|
py
|
Python
|
example.py
|
LAIRLAB/libpyarr
|
9e973a4045519fa6aedae3aaabd8267f6f796a8c
|
[
"BSD-3-Clause"
] | 1
|
2016-04-09T02:37:03.000Z
|
2016-04-09T02:37:03.000Z
|
example.py
|
LAIRLAB/libpyarr
|
9e973a4045519fa6aedae3aaabd8267f6f796a8c
|
[
"BSD-3-Clause"
] | null | null | null |
example.py
|
LAIRLAB/libpyarr
|
9e973a4045519fa6aedae3aaabd8267f6f796a8c
|
[
"BSD-3-Clause"
] | null | null | null |
#! /usr/bin/env python
import warnings, numpy
with warnings.catch_warnings():
warnings.simplefilter("ignore")
from libpyarr_example import *
def main():
my_uninited_arr = bar()
my_uninited_arr[:, 0,0] = -50
print "some -50s, some uninitialized:",my_uninited_arr[:,:2,0]
foo(my_uninited_arr)
print "Definitely zeroed:",my_uninited_arr[:,:2,0]
print "by the way, int corresponds to numpy.int32:", my_uninited_arr.dtype
print "in general, numpy and C data types correspond the way you would expect."
if __name__=='__main__':
main()
| 28.85
| 83
| 0.694974
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 210
| 0.363951
|
b0146dc56f96a9ee8522dfa5aeb06d9a9ea59827
| 1,167
|
py
|
Python
|
kitty_tiny/tools/annoGen/AnnoEventHandler.py
|
sixxchung/mymm
|
4e8cd43c2615c08a60bf21fe0c4604344b470602
|
[
"MIT"
] | null | null | null |
kitty_tiny/tools/annoGen/AnnoEventHandler.py
|
sixxchung/mymm
|
4e8cd43c2615c08a60bf21fe0c4604344b470602
|
[
"MIT"
] | null | null | null |
kitty_tiny/tools/annoGen/AnnoEventHandler.py
|
sixxchung/mymm
|
4e8cd43c2615c08a60bf21fe0c4604344b470602
|
[
"MIT"
] | null | null | null |
import logging
from watchdog.events import LoggingEventHandler, FileSystemEventHandler
class AnnoEventHandler(FileSystemEventHandler):
"""Logs all the events captured."""
def __init__(self, logger=None):
super().__init__()
self.logger = logger or logging.root
def on_moved(self, event):
super().on_moved(event)
what = 'directory' if event.is_directory else 'file'
self.logger.info("Moved %s: from %s to %s", what, event.src_path,
event.dest_path)
def on_created(self, event):
super().on_created(event)
what = 'directory' if event.is_directory else 'file'
self.logger.info("Created %s: %s", what, event.src_path)
def on_deleted(self, event):
super().on_deleted(event)
what = 'directory' if event.is_directory else 'file'
self.logger.info("Deleted %s: %s", what, event.src_path)
def on_modified(self, event):
super().on_modified(event)
what = 'directory' if event.is_directory else 'file'
if what is 'file':
self.logger.info("Annotator detected modified %s: %s", what, event.src_path)
| 31.540541
| 88
| 0.640103
| 1,077
| 0.922879
| 0
| 0
| 0
| 0
| 0
| 0
| 202
| 0.173093
|
b01504199a00f0b0ea4a2e7806f9a6775f0b35bb
| 11,037
|
py
|
Python
|
BCPNN/backend/_cpu_base_backend.py
|
KTH-HPC/StreamBrain
|
37b16e7c8e02e6d2800bcf89630a0f4419e90cd4
|
[
"BSD-2-Clause"
] | 4
|
2020-10-20T22:15:25.000Z
|
2022-02-10T10:25:24.000Z
|
BCPNN/backend/_cpu_base_backend.py
|
KTH-HPC/StreamBrain
|
37b16e7c8e02e6d2800bcf89630a0f4419e90cd4
|
[
"BSD-2-Clause"
] | 1
|
2020-12-16T10:46:50.000Z
|
2020-12-16T10:46:50.000Z
|
BCPNN/backend/_cpu_base_backend.py
|
KTH-HPC/StreamBrain
|
37b16e7c8e02e6d2800bcf89630a0f4419e90cd4
|
[
"BSD-2-Clause"
] | 1
|
2020-10-20T22:15:29.000Z
|
2020-10-20T22:15:29.000Z
|
import sys
import numpy as np
from tqdm import tqdm
from contextlib import nullcontext
class DenseLayer:
_update_state = None
_softmax_minicolumns = None
_update_counters = None
_update_weights = None
_update_bias = None
def __init__(
self,
in_features,
hypercolumns,
minicolumns,
taupdt,
initial_counters,
dtype=np.float64):
self.in_features = in_features
self.hypercolumns = hypercolumns
self.minicolumns = minicolumns
self.out_features = hypercolumns * minicolumns
self.taupdt = taupdt
self.dtype = dtype
self.weights = (
0.1 *
np.random.randn(
self.in_features,
self.out_features)).astype(dtype)
self.bias = (0.1 * np.random.rand(self.out_features)).astype(dtype)
self.Ci = initial_counters[0] * np.ones([in_features]).astype(dtype)
self.Cj = initial_counters[1] * \
np.ones([self.out_features]).astype(dtype)
self.Cij = initial_counters[2] * \
np.ones([self.in_features, self.out_features]).astype(dtype)
def compute_activation(self, inputs):
activations = np.zeros(
[inputs.shape[0], self.out_features], dtype=self.dtype)
activations = self._update_state(
activations, self.weights, self.bias, inputs)
activations = self._softmax_minicolumns(
activations, self.hypercolumns, self.minicolumns)
return activations
def convert(self, dtype):
self.dtype = dtype
self.weights = self.weights.astype(dtype)
self.bias = self.bias.astype(dtype)
self.Ci = self.Ci.astype(dtype)
self.Cj = self.Cj.astype(dtype)
self.Cij = self.Cij.astype(dtype)
def train_step(self, inputs, outputs):
self.Ci, self.Cj, self.Cij = self._update_counters(
self.Ci, self.Cj, self.Cij, inputs, outputs, self.taupdt)
def train_finalize(self):
self.weights = self._update_weights(
self.weights, self.Ci, self.Cj, self.Cij, self.taupdt / 2)
self.bias = self._update_bias(self.bias, self.Cj, self.taupdt / 2)
class StructuralPlasticityLayer:
_update_state = None
_softmax_minicolumns = None
_update_counters = None
_update_weights = None
_update_bias = None
_update_mask = None
_apply_mask = None
def __init__(
self,
in_features,
hypercolumns,
minicolumns,
taupdt,
khalf,
pmin,
taubdt,
density,
mask_iterations,
initial_counters,
dtype=np.float64):
self.in_features = in_features
self.hypercolumns = hypercolumns
self.minicolumns = minicolumns
self.out_features = hypercolumns * minicolumns
self.taupdt = taupdt
self.khalf = khalf
self.pmin = pmin
self.taubdt = taubdt
self.density = density
self.mask_iterations = mask_iterations
self.dtype = dtype
self.weights = (
0.1 *
np.random.randn(
self.in_features,
self.out_features)).astype(dtype)
self.bias = (0.1 * np.random.rand(self.out_features)).astype(dtype)
self.Ci = initial_counters[0] * np.ones([in_features]).astype(dtype)
self.Cj = initial_counters[1] * \
np.ones([self.out_features]).astype(dtype)
self.Cij = initial_counters[2] * \
np.ones([self.in_features, self.out_features]).astype(dtype)
self.kbi = np.ones([self.out_features]).astype(dtype)
self.wmask = (
np.random.rand(
self.in_features,
self.hypercolumns) < self.density).astype(
np.uint8)
def compute_activation(self, inputs):
activations = np.zeros(
[inputs.shape[0], self.out_features], dtype=self.dtype)
activations = self._update_state(
activations, self.weights, self.bias, inputs)
activations = self._softmax_minicolumns(
activations, self.hypercolumns, self.minicolumns)
return activations
def convert(self, dtype):
self.dtype = dtype
self.weights = self.weights.astype(dtype)
self.bias = self.bias.astype(dtype)
self.Ci = self.Ci.astype(dtype)
self.Cj = self.Cj.astype(dtype)
self.Cij = self.Cij.astype(dtype)
self.kbi = self.kbi.astype(dtype)
def train_step(self, inputs, outputs, hypercolumn=None):
self.Ci, self.Cj, self.Cij = self._update_counters(
self.Ci, self.Cj, self.Cij, inputs, outputs, self.taupdt)
self.weights = self._update_weights(
self.weights, self.Ci, self.Cj, self.Cij, self.taupdt / 2)
self.bias, self.kbi = self._update_bias(
self.bias, self.kbi, self.Cj, self.taupdt / 2, self.khalf, self.pmin, self.taubdt)
if hypercolumn is not None:
#print("Updating hypercolumn:", hypercolumn)
self.wmask = self._update_mask(
self.wmask,
self.weights,
self.Ci,
self.Cj,
self.Cij,
self.taupdt / 2,
self.hypercolumns,
self.minicolumns,
hypercolumn,
self.mask_iterations)
self.weights = self._apply_mask(
self.weights,
self.wmask,
self.hypercolumns,
self.minicolumns)
def train_finalize(self):
pass
class Network:
def __init__(self, dtype):
self.dtype = dtype
self._layers = []
self.world_rank = 0
self.world_size = 1
def add_layer(self, layer):
if layer.dtype != self.dtype:
layer.convert(self.dtype)
self._layers.append(layer)
def fit(
self,
training_data,
training_labels,
maximal_batch_size,
schedule):
training_data = training_data.astype(self.dtype)
training_labels = training_labels.astype(self.dtype)
for layer, epochs in schedule:
self._train_layer(
layer,
maximal_batch_size,
training_data,
training_labels,
epochs)
def evaluate(self, images, labels, maximal_batch_size):
images = images.astype(self.dtype)
labels = labels.astype(self.dtype)
correct = np.array([0])
total = np.array([0])
number_of_batches = (
images.shape[0] + maximal_batch_size - 1) // maximal_batch_size
if self.world_rank == 0:
cm = tqdm(total=number_of_batches)
else:
cm = nullcontext()
with cm as pbar:
if self.world_rank == 0:
pbar.set_description('Evaluation')
for i in range(number_of_batches):
global_start = i * maximal_batch_size
global_end = global_start + maximal_batch_size if global_start + \
maximal_batch_size <= images.shape[0] else images.shape[0]
local_batch_size = (
global_end - global_start) // self.world_size
start_sample = global_start + self.world_rank * local_batch_size
end_sample = start_sample + local_batch_size
batch_images = images[start_sample:end_sample, :]
batch_labels = labels[start_sample:end_sample, :]
activations = batch_images
for layer in self._layers:
activations = layer.compute_activation(activations)
correct += (np.argmax(activations, axis=1) ==
np.argmax(batch_labels, axis=1)).sum()
total += batch_images.shape[0]
if self.world_rank == 0:
pbar.update(1)
return correct, total
def _train_layer(
self,
layer,
maximal_batch_size,
images,
labels,
epochs):
for epoch in range(epochs):
if self.world_rank == 0:
print('Layer - %d/%d' %
(layer + 1, len(self._layers)), flush=True)
idx = np.random.permutation(range(images.shape[0]))
shuffled_images = images[idx, :]
shuffled_labels = labels[idx, :]
n_hypercolumns = self._layers[layer].hypercolumns
hypercolumns_shuffled = np.random.permutation(
range(n_hypercolumns))
number_of_batches = (
images.shape[0] + maximal_batch_size - 1) // maximal_batch_size
local_batch_size = maximal_batch_size // self.world_size
if self.world_rank == 0:
cm = tqdm(total=number_of_batches)
else:
cm = nullcontext()
with cm as pbar:
if self.world_rank == 0:
pbar.set_description('Epoch %d/%d' % (epoch + 1, epochs))
for i in range(number_of_batches):
global_start = i * maximal_batch_size
global_end = global_start + maximal_batch_size if global_start + \
maximal_batch_size <= images.shape[0] else images.shape[0]
local_batch_size = (
global_end - global_start) // self.world_size
start_sample = global_start + self.world_rank * local_batch_size
end_sample = start_sample + local_batch_size
batch_images = shuffled_images[start_sample:end_sample, :]
batch_labels = shuffled_labels[start_sample:end_sample, :]
prev_activation = None
activation = batch_images
for l in range(layer + 1):
prev_activation = activation
activation = self._layers[l].compute_activation(
prev_activation)
if epoch > 0 and i % (
number_of_batches // (n_hypercolumns + 1)) == 0:
h = i // (number_of_batches // (n_hypercolumns + 1))
h = hypercolumns_shuffled[h] if h < n_hypercolumns else None
else:
h = None
if layer + 1 == len(self._layers):
self._layers[layer].train_step(
prev_activation, batch_labels)
else:
self._layers[layer].train_step(
prev_activation, activation, h)
if self.world_rank == 0:
pbar.update(1)
self._layers[layer].train_finalize()
| 35.038095
| 94
| 0.552596
| 10,941
| 0.991302
| 0
| 0
| 0
| 0
| 0
| 0
| 84
| 0.007611
|
b01639c2289f47ba698eea2092678bb22c032e75
| 6,879
|
py
|
Python
|
flux_sensors/flux_sensor.py
|
Flux-Coordinator/flux-sensors
|
44968c95e277023c3a6777d653e7b3cb4e333923
|
[
"MIT"
] | null | null | null |
flux_sensors/flux_sensor.py
|
Flux-Coordinator/flux-sensors
|
44968c95e277023c3a6777d653e7b3cb4e333923
|
[
"MIT"
] | 1
|
2018-06-14T18:21:33.000Z
|
2018-06-14T18:21:33.000Z
|
flux_sensors/flux_sensor.py
|
Flux-Coordinator/flux-sensors
|
44968c95e277023c3a6777d653e7b3cb4e333923
|
[
"MIT"
] | null | null | null |
from flux_sensors.localizer.localizer import Localizer, Coordinates, LocalizerError, PozyxDeviceError
from flux_sensors.light_sensor.light_sensor import LightSensor
from flux_sensors.config_loader import ConfigLoader
from flux_sensors.flux_server import FluxServer, FluxServerError
from flux_sensors.models import models
import time
import requests
import json
import logging
logger = logging.getLogger(__name__)
class FluxSensorError(Exception):
"""Base class for exceptions in this module."""
class InitializationError(FluxSensorError):
"""Exception raised when the initialization of the sensors failed."""
class FluxSensor:
"""Controlling class for the flux-sensors components"""
def __init__(self, localizer_instance: Localizer, light_sensor_instance: LightSensor, config_loader: ConfigLoader,
flux_server: FluxServer) -> None:
self._localizer = localizer_instance
self._light_sensor = light_sensor_instance
self._config_loader = config_loader
self._flux_server = flux_server
self._timeout = time.time()
def start_when_ready(self) -> None:
logger.info("Flux-sensors in standby. Start polling Flux-server")
while True:
if not self._flux_server.poll_server_urls(self._config_loader.get_server_urls(),
self._config_loader.get_timeout()):
logger.warning("All server URLs failed to respond. Retry started...")
continue
logger.info("Server responding. Start measurement when ready...")
if not self._flux_server.poll_active_measurement():
FluxSensor.handle_retry(3)
continue
logger.info("Success! A flux-server is available and a measurement is active.")
try:
self._flux_server.login_at_server()
response = self._flux_server.get_active_measurement()
self._flux_server.log_server_response(response)
except requests.exceptions.RequestException as err:
logger.error("Request error while loading active measurement from Flux-server")
logger.error(err)
FluxSensor.handle_retry(3)
continue
except FluxServerError as err:
logger.error("Server error while loading active measurement from Flux-server")
logger.error(err)
FluxSensor.handle_retry(3)
continue
try:
self.clear_sensors()
self.initialize_sensors(response.text)
except InitializationError as err:
logger.error(err)
logger.error("Error while initializing the sensors")
FluxSensor.handle_retry(3)
continue
logger.info("Flux-sensors initialized. Start measurement...")
self.start_measurement()
@staticmethod
def handle_retry(seconds: int) -> None:
logger.info("Retry starts in {} seconds...".format(seconds))
time.sleep(seconds)
def initialize_sensors(self, measurement: str) -> None:
self.initialize_localizer(measurement)
self.initialize_light_sensor()
def initialize_localizer(self, measurement: str) -> None:
try:
measurement_json = json.loads(measurement)
for anchorPosition in measurement_json["anchorPositions"]:
self._localizer.add_anchor_to_cache(int(anchorPosition["anchor"]["networkId"], 16),
Coordinates(int(anchorPosition["xposition"]),
int(anchorPosition["yposition"]),
int(anchorPosition["zposition"])))
except(ValueError, KeyError, TypeError):
raise InitializationError("Error while parsing the Pozyx Anchors.")
try:
self._localizer.initialize()
except LocalizerError as err:
logger.error(err)
raise InitializationError("Error while initializing Pozyx.")
def initialize_light_sensor(self) -> None:
self._light_sensor.initialize()
def clear_sensors(self) -> None:
self._localizer.clear()
def _reset_timeout(self) -> None:
self._timeout = time.time() + self._config_loader.get_timeout()
def _is_timeout_exceeded(self) -> bool:
return time.time() > self._timeout
def start_measurement(self) -> None:
readings = []
self._flux_server.initialize_last_response()
self._reset_timeout()
while not self._is_timeout_exceeded():
try:
position = self._localizer.do_positioning()
illuminance = self._light_sensor.do_measurement()
readings.append(models.Reading(illuminance, position))
try:
if self._flux_server.get_last_response() == 200:
if len(readings) >= self._flux_server.MIN_BATCH_SIZE:
self._flux_server.reset_last_response()
json_data = json.dumps(readings, default=lambda o: o.__dict__)
self._flux_server.send_data_to_server(json_data)
del readings[:]
self._reset_timeout()
elif self._flux_server.get_last_response() == 401:
logger.info("Auth token expired. Try new login...")
self._flux_server.login_at_server()
self._flux_server.initialize_last_response()
elif self._flux_server.get_last_response() == 404:
logger.info("The measurement has been stopped by the server.")
return
elif self._flux_server.get_last_response() != self._flux_server.RESPONSE_PENDING:
logger.info("The measurement has been stopped.")
return
except requests.exceptions.RequestException as err:
logger.error("Request error while sending new readings to Flux-server")
logger.error(err)
return
except FluxServerError as err:
logger.error("Server error while sending new readings to Flux-server")
logger.error(err)
return
except PozyxDeviceError as err:
logger.error("Pozyx error while creating new readings")
logger.error(err)
continue
logger.error("Timeout of {}s is exceeded while waiting for Flux-server response".format(
self._config_loader.get_timeout()))
| 44.668831
| 118
| 0.602704
| 6,456
| 0.938509
| 0
| 0
| 154
| 0.022387
| 0
| 0
| 1,125
| 0.163541
|
b019647d7984c42bcd98ff6521f630e19b83c858
| 11,288
|
py
|
Python
|
Network.py
|
Coldog2333/pytoflow
|
3cec913fa5a2ddb8133a075d4ff177cceb74f06a
|
[
"MIT"
] | 102
|
2018-12-29T16:19:18.000Z
|
2022-01-13T03:54:04.000Z
|
Network.py
|
mengxiangyudlut/pytoflow
|
3cec913fa5a2ddb8133a075d4ff177cceb74f06a
|
[
"MIT"
] | 19
|
2019-04-26T10:19:14.000Z
|
2021-11-14T07:36:23.000Z
|
Network.py
|
mengxiangyudlut/pytoflow
|
3cec913fa5a2ddb8133a075d4ff177cceb74f06a
|
[
"MIT"
] | 32
|
2019-03-04T00:10:06.000Z
|
2022-01-11T08:19:19.000Z
|
import math
import torch
# import torch.utils.serialization # it was removed in torch v1.0.0 or higher version.
arguments_strModel = 'sintel-final'
SpyNet_model_dir = './models' # The directory of SpyNet's weights
def normalize(tensorInput):
tensorRed = (tensorInput[:, 0:1, :, :] - 0.485) / 0.229
tensorGreen = (tensorInput[:, 1:2, :, :] - 0.456) / 0.224
tensorBlue = (tensorInput[:, 2:3, :, :] - 0.406) / 0.225
return torch.cat([tensorRed, tensorGreen, tensorBlue], 1)
def denormalize(tensorInput):
tensorRed = (tensorInput[:, 0:1, :, :] * 0.229) + 0.485
tensorGreen = (tensorInput[:, 1:2, :, :] * 0.224) + 0.456
tensorBlue = (tensorInput[:, 2:3, :, :] * 0.225) + 0.406
return torch.cat([tensorRed, tensorGreen, tensorBlue], 1)
Backward_tensorGrid = {}
def Backward(tensorInput, tensorFlow, cuda_flag):
if str(tensorFlow.size()) not in Backward_tensorGrid:
tensorHorizontal = torch.linspace(-1.0, 1.0, tensorFlow.size(3)).view(1, 1, 1, tensorFlow.size(3)).expand(tensorFlow.size(0), -1, tensorFlow.size(2), -1)
tensorVertical = torch.linspace(-1.0, 1.0, tensorFlow.size(2)).view(1, 1, tensorFlow.size(2), 1).expand(tensorFlow.size(0), -1, -1, tensorFlow.size(3))
if cuda_flag:
Backward_tensorGrid[str(tensorFlow.size())] = torch.cat([ tensorHorizontal, tensorVertical ], 1).cuda()
else:
Backward_tensorGrid[str(tensorFlow.size())] = torch.cat([tensorHorizontal, tensorVertical], 1)
# end
tensorFlow = torch.cat([ tensorFlow[:, 0:1, :, :] / ((tensorInput.size(3) - 1.0) / 2.0), tensorFlow[:, 1:2, :, :] / ((tensorInput.size(2) - 1.0) / 2.0) ], 1)
return torch.nn.functional.grid_sample(input=tensorInput, grid=(Backward_tensorGrid[str(tensorFlow.size())] + tensorFlow).permute(0, 2, 3, 1), mode='bilinear', padding_mode='border')
# end
class SpyNet(torch.nn.Module):
def __init__(self, cuda_flag):
super(SpyNet, self).__init__()
self.cuda_flag = cuda_flag
class Basic(torch.nn.Module):
def __init__(self, intLevel):
super(Basic, self).__init__()
self.moduleBasic = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=8, out_channels=32, kernel_size=7, stride=1, padding=3),
torch.nn.ReLU(inplace=False),
torch.nn.Conv2d(in_channels=32, out_channels=64, kernel_size=7, stride=1, padding=3),
torch.nn.ReLU(inplace=False),
torch.nn.Conv2d(in_channels=64, out_channels=32, kernel_size=7, stride=1, padding=3),
torch.nn.ReLU(inplace=False),
torch.nn.Conv2d(in_channels=32, out_channels=16, kernel_size=7, stride=1, padding=3),
torch.nn.ReLU(inplace=False),
torch.nn.Conv2d(in_channels=16, out_channels=2, kernel_size=7, stride=1, padding=3)
)
# end
def forward(self, tensorInput):
return self.moduleBasic(tensorInput)
self.moduleBasic = torch.nn.ModuleList([Basic(intLevel) for intLevel in range(4)])
self.load_state_dict(torch.load(SpyNet_model_dir + '/network-' + arguments_strModel + '.pytorch'), strict=False)
def forward(self, tensorFirst, tensorSecond):
tensorFirst = [tensorFirst]
tensorSecond = [tensorSecond]
for intLevel in range(3):
if tensorFirst[0].size(2) > 32 or tensorFirst[0].size(3) > 32:
tensorFirst.insert(0, torch.nn.functional.avg_pool2d(input=tensorFirst[0], kernel_size=2, stride=2))
tensorSecond.insert(0, torch.nn.functional.avg_pool2d(input=tensorSecond[0], kernel_size=2, stride=2))
tensorFlow = tensorFirst[0].new_zeros(tensorFirst[0].size(0), 2,
int(math.floor(tensorFirst[0].size(2) / 2.0)),
int(math.floor(tensorFirst[0].size(3) / 2.0)))
for intLevel in range(len(tensorFirst)):
tensorUpsampled = torch.nn.functional.interpolate(input=tensorFlow, scale_factor=2, mode='bilinear', align_corners=True) * 2.0
# if the sizes of upsampling and downsampling are not the same, apply zero-padding.
if tensorUpsampled.size(2) != tensorFirst[intLevel].size(2):
tensorUpsampled = torch.nn.functional.pad(input=tensorUpsampled, pad=[0, 0, 0, 1], mode='replicate')
if tensorUpsampled.size(3) != tensorFirst[intLevel].size(3):
tensorUpsampled = torch.nn.functional.pad(input=tensorUpsampled, pad=[0, 1, 0, 0], mode='replicate')
# input :[first picture of corresponding level,
# the output of w with input second picture of corresponding level and upsampling flow,
# upsampling flow]
# then we obtain the final flow. 最终再加起来得到intLevel的flow
tensorFlow = self.moduleBasic[intLevel](torch.cat([tensorFirst[intLevel],
Backward(tensorInput=tensorSecond[intLevel],
tensorFlow=tensorUpsampled,
cuda_flag=self.cuda_flag),
tensorUpsampled], 1)) + tensorUpsampled
return tensorFlow
class warp(torch.nn.Module):
def __init__(self, h, w, cuda_flag):
super(warp, self).__init__()
self.height = h
self.width = w
if cuda_flag:
self.addterm = self.init_addterm().cuda()
else:
self.addterm = self.init_addterm()
def init_addterm(self):
n = torch.FloatTensor(list(range(self.width)))
horizontal_term = n.expand((1, 1, self.height, self.width)) # 第一个1是batch size
n = torch.FloatTensor(list(range(self.height)))
vertical_term = n.expand((1, 1, self.width, self.height)).permute(0, 1, 3, 2)
addterm = torch.cat((horizontal_term, vertical_term), dim=1)
return addterm
def forward(self, frame, flow):
"""
:param frame: frame.shape (batch_size=1, n_channels=3, width=256, height=448)
:param flow: flow.shape (batch_size=1, n_channels=2, width=256, height=448)
:return: reference_frame: warped frame
"""
if True:
flow = flow + self.addterm
else:
self.addterm = self.init_addterm()
flow = flow + self.addterm
horizontal_flow = flow[0, 0, :, :].expand(1, 1, self.height, self.width) # 第一个0是batch size
vertical_flow = flow[0, 1, :, :].expand(1, 1, self.height, self.width)
horizontal_flow = horizontal_flow * 2 / (self.width - 1) - 1
vertical_flow = vertical_flow * 2 / (self.height - 1) - 1
flow = torch.cat((horizontal_flow, vertical_flow), dim=1)
flow = flow.permute(0, 2, 3, 1)
reference_frame = torch.nn.functional.grid_sample(frame, flow)
return reference_frame
class ResNet(torch.nn.Module):
"""
Three-layers ResNet/ResBlock
reference: https://blog.csdn.net/chenyuping333/article/details/82344334
"""
def __init__(self, task):
super(ResNet, self).__init__()
self.task = task
self.conv_3x2_64_9x9 = torch.nn.Conv2d(in_channels=3 * 2, out_channels=64, kernel_size=9, padding=8 // 2)
self.conv_3x7_64_9x9 = torch.nn.Conv2d(in_channels=3 * 7, out_channels=64, kernel_size=9, padding=8 // 2)
self.conv_64_64_9x9 = torch.nn.Conv2d(in_channels=64, out_channels=64, kernel_size=9, padding=8 // 2)
self.conv_64_64_1x1 = torch.nn.Conv2d(in_channels=64, out_channels=64, kernel_size=1)
self.conv_64_3_1x1 = torch.nn.Conv2d(in_channels=64, out_channels=3, kernel_size=1)
def ResBlock(self, x, aver):
if self.task == 'interp':
x = torch.nn.functional.relu(self.conv_3x2_64_9x9(x))
x = torch.nn.functional.relu(self.conv_64_64_1x1(x))
elif self.task in ['denoise', 'denoising']:
x = torch.nn.functional.relu(self.conv_3x7_64_9x9(x))
x = torch.nn.functional.relu(self.conv_64_64_1x1(x))
elif self.task in ['sr', 'super-resolution']:
x = torch.nn.functional.relu(self.conv_3x7_64_9x9(x))
x = torch.nn.functional.relu(self.conv_64_64_9x9(x))
x = torch.nn.functional.relu(self.conv_64_64_1x1(x))
else:
raise NameError('Only support: [interp, denoise/denoising, sr/super-resolution]')
x = self.conv_64_3_1x1(x) + aver
return x
def forward(self, frames):
aver = frames.mean(dim=1)
x = frames[:, 0, :, :, :]
for i in range(1, frames.size(1)):
x = torch.cat((x, frames[:, i, :, :, :]), dim=1)
result = self.ResBlock(x, aver)
return result
class TOFlow(torch.nn.Module):
def __init__(self, h, w, task, cuda_flag):
super(TOFlow, self).__init__()
self.height = h
self.width = w
self.task = task
self.cuda_flag = cuda_flag
self.SpyNet = SpyNet(cuda_flag=self.cuda_flag) # SpyNet层
# for param in self.SpyNet.parameters(): # fix
# param.requires_grad = False
self.warp = warp(self.height, self.width, cuda_flag=self.cuda_flag)
self.ResNet = ResNet(task=self.task)
# frames should be TensorFloat
def forward(self, frames):
"""
:param frames: [batch_size=1, img_num, n_channels=3, h, w]
:return:
"""
for i in range(frames.size(1)):
frames[:, i, :, :, :] = normalize(frames[:, i, :, :, :])
if self.cuda_flag:
opticalflows = torch.zeros(frames.size(0), frames.size(1), 2, frames.size(3), frames.size(4)).cuda()
warpframes = torch.empty(frames.size(0), frames.size(1), 3, frames.size(3), frames.size(4)).cuda()
else:
opticalflows = torch.zeros(frames.size(0), frames.size(1), 2, frames.size(3), frames.size(4))
warpframes = torch.empty(frames.size(0), frames.size(1), 3, frames.size(3), frames.size(4))
if self.task == 'interp':
process_index = [0, 1]
opticalflows[:, 1, :, :, :] = self.SpyNet(frames[:, 0, :, :, :], frames[:, 1, :, :, :]) / 2
opticalflows[:, 0, :, :, :] = self.SpyNet(frames[:, 1, :, :, :], frames[:, 0, :, :, :]) / 2
elif self.task in ['denoise', 'denoising', 'sr', 'super-resolution']:
process_index = [0, 1, 2, 4, 5, 6]
for i in process_index:
opticalflows[:, i, :, :, :] = self.SpyNet(frames[:, 3, :, :, :], frames[:, i, :, :, :])
warpframes[:, 3, :, :, :] = frames[:, 3, :, :, :]
else:
raise NameError('Only support: [interp, denoise/denoising, sr/super-resolution]')
for i in process_index:
warpframes[:, i, :, :, :] = self.warp(frames[:, i, :, :, :], opticalflows[:, i, :, :, :])
# warpframes: [batch_size=1, img_num=7, n_channels=3, height=256, width=448]
Img = self.ResNet(warpframes)
# Img: [batch_size=1, n_channels=3, h, w]
Img = denormalize(Img)
return Img
| 47.230126
| 186
| 0.589033
| 9,462
| 0.835423
| 0
| 0
| 0
| 0
| 0
| 0
| 1,519
| 0.134116
|
b01bbd168b9b732e58f788ff84aca342f6b50515
| 2,668
|
py
|
Python
|
storagetest/pkgs/ltp/acl/acl_test.py
|
liufeng-elva/storage-test2
|
5364cc00dbe71b106f1bb740bf391e6124788bf4
|
[
"MIT"
] | null | null | null |
storagetest/pkgs/ltp/acl/acl_test.py
|
liufeng-elva/storage-test2
|
5364cc00dbe71b106f1bb740bf391e6124788bf4
|
[
"MIT"
] | null | null | null |
storagetest/pkgs/ltp/acl/acl_test.py
|
liufeng-elva/storage-test2
|
5364cc00dbe71b106f1bb740bf391e6124788bf4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
@file : acl_test.py
@Time : 2020/11/9 9:25
@Author: Tao.Xu
@Email : tao.xu2008@outlook.com
"""
import os
import unittest
from storagetest.libs import utils
from storagetest.libs.log import log
from storagetest.libs.exceptions import PlatformError, NoSuchDir, NoSuchBinary
logger = log.get_logger()
cur_dir = os.path.dirname(os.path.realpath(__file__))
bin_path = os.path.join(cur_dir, 'bin')
class AclXattr(object):
"""Test ACL and Extend Attribute on Linux system"""
def __init__(self, top_path):
self.top_path = top_path
def verify(self):
if os.name != "posix":
raise PlatformError("Just support for linux machine!")
if not os.path.isdir(self.top_path):
raise NoSuchDir(self.top_path)
try:
utils.run_cmd("which attr", expected_rc=0)
except Exception as e:
logger.error(e)
raise NoSuchBinary("attr, try install it.(apt-get install -y attr)")
def run(self, test_path):
"""cd <test_path>; ./tacl_xattr.sh """
logger.info(self.run.__doc__)
utils.mkdir_path(test_path)
acl_bin = os.path.join(bin_path, 'tacl_xattr.sh')
test_log = os.path.join(self.top_path, 'tacl_xattr.log')
acl_cmd = "rm -rf {0}/*; cd {0}; {1} | tee {2}".format(test_path, acl_bin, test_log)
try:
os.system('chmod +x {0}/*'.format(bin_path))
rc, output = utils.run_cmd(acl_cmd, expected_rc="ignore")
logger.info(output)
if rc != 0:
raise Exception("tacl_xattr.sh exit with !0")
if "FAILED:" in output:
raise Exception("FAIL: test acl_xattr on {}".format(test_path))
logger.info("PASS: test acl_xattr on {}".format(test_path))
except Exception as e:
logger.info("FAIL: test acl_xattr on {}".format(test_path))
raise e
finally:
pass
return True
def sanity(self):
self.verify()
test_path = os.path.join(self.top_path, "acl_attribute")
assert self.run(test_path)
return True
def stress(self):
self.verify()
test_path = os.path.join(self.top_path, "acl_attribute")
assert self.run(test_path)
return True
class UnitTestCase(unittest.TestCase):
def setUp(self) -> None:
self.acl = AclXattr("/mnt/test")
def test_01(self):
self.acl.sanity()
if __name__ == '__main__':
# unittest.main()
suite = unittest.TestLoader().loadTestsFromTestCase(UnitTestCase)
unittest.TextTestRunner(verbosity=2).run(suite)
| 29.644444
| 92
| 0.613943
| 2,045
| 0.766492
| 0
| 0
| 0
| 0
| 0
| 0
| 615
| 0.23051
|
b01ead4c68269eedb233e679f59c48eb110ed041
| 1,518
|
py
|
Python
|
recipes-bsp/b205/files/spihost_write_ftdi.py
|
tszucs/meta-ublox-tk1
|
8cb7c83d9a8b387fae4a4108a48e697d3e94df8e
|
[
"MIT"
] | null | null | null |
recipes-bsp/b205/files/spihost_write_ftdi.py
|
tszucs/meta-ublox-tk1
|
8cb7c83d9a8b387fae4a4108a48e697d3e94df8e
|
[
"MIT"
] | null | null | null |
recipes-bsp/b205/files/spihost_write_ftdi.py
|
tszucs/meta-ublox-tk1
|
8cb7c83d9a8b387fae4a4108a48e697d3e94df8e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import sys, getopt, os, time, array
from pyftdi.spi import SpiController
def download ( filename, speed=5000000, chunksize=32 ):
try:
with open(filename, 'rb') as filein:
data = filein.read ()
data = array.array('B', data).tolist()
except IOError:
print "ERROR: Could not open file {0}".format(os.path.basename(filename))
exit (1)
try:
spi = SpiController(silent_clock=True)
spi.configure(vendor=0x0403, product=0x6010, interface=2)
spi2mars = spi.get_port(cs=0)
spi2mars.set_frequency(speed)
time.sleep(1)
startTime = time.time()
i = 0
length = len(data)
while length > chunksize:
spi2mars.exchange(data[i:i+chunksize])
i += chunksize
length -= chunksize
spi2mars.exchange(data[i:])
stopTime = time.time()
print "File {0} dumped on SPI @{1}MHz ({2}bytes in {3} seconds)".format(os.path.basename(filename), speed/1.0e6, len(data), stopTime - startTime)
return (len(data), stopTime-startTime)
except Exception, e :
print "ERROR: SPI Write -", e
exit (1)
def main (argv):
print "SAF5100 SPI downloader"
try:
opts, args = getopt.getopt(argv, "hf:",["--firmware="])
except:
print "spi_host_write.py -f <firmware>"
sys.exit(2)
fname="/lib/firmware/cohda/SDRMK5Dual.bin".format(os.path.dirname(sys.argv[0]))
for opt, arg in opts:
if opt=="-h":
print "spi_host_write.py -f <firmware>"
elif opt=="-f":
fname = arg
print "Downloading {0}".format(fname)
download (fname)
if __name__ == "__main__":
main (sys.argv[1:])
| 24.483871
| 147
| 0.675889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 313
| 0.206192
|
b01eb11332b52c82d114e9890278450ea72e51d6
| 3,845
|
py
|
Python
|
PedestrianSlayer/MechanicalControl/NeuralNetwork.py
|
Viriliter/PedestrianSlayer
|
4adbdc3d0ed60408e6422cdba01f017701d05069
|
[
"MIT"
] | 2
|
2018-05-23T14:14:23.000Z
|
2018-12-03T21:08:37.000Z
|
PedestrianSlayer/MechanicalControl/NeuralNetwork.py
|
Viriliter/PedestrianSlayer
|
4adbdc3d0ed60408e6422cdba01f017701d05069
|
[
"MIT"
] | null | null | null |
PedestrianSlayer/MechanicalControl/NeuralNetwork.py
|
Viriliter/PedestrianSlayer
|
4adbdc3d0ed60408e6422cdba01f017701d05069
|
[
"MIT"
] | null | null | null |
import numpy as np
class NeuralNetwork():
'''
This neural network is designed for Pedestrian Car Project.
It constructs an input layer that gets 4 road and car parameters.
Using neural network algorithm, it gives optimum steering angle of the car.
To use this class:
-Firstly, using trainingIn() and trainingOut() functions, set the input
and output training values.
-Then, run train() function to estimate each synaptic weight.
-Finally, using getSteeringAngle() function get optimum steering
angle for car based on training input and outputs.
It is possible to adjust the normalized input values using threshold functions.
'''
def __init__(self):
self.training_inputs = np.array([0,0,0,0])
self.training_inputs = np.array([0]).T
setRadiusThreshold(self,0.100)
setDeviationThreshold(self,0.100)
setSpeedThreshold(self,0.100)
setOrientThreshold(self,0.100)
self.synaptic_weights = None
def setRadiusThreshold(self,radiusB,radiusU):
self.radiusThreshB = radiusB
self.radiusThreshU = radiusU
def setDeviationThreshold(self,deviationB,deviationU):
self.deviatThreshB = deviationB
self.deviatThreshU = deviationU
def setSpeedThreshold(self,speedB,speedU):
self.speedThreshB = speedB
self.speedThreshU = speedU
def setOrientThreshold(self,orientB,orientU):
self.orientThreshB = orientB
self.orientThreshU = orientU
def normalizeParameter(self,radius,deviation,speed,orient):
'''
Normalize input parameters
'''
if(radiusThreshB<radius<self.radiusThreshU):
nor_rad = radius*0.01
else:
nor_rad = 1
if(self.deviatThreshB<deviation<self.deviatThreshU):
nor_dev = deviation*0.01
else:
nor_dev = 1
if(self.speedThreshB<speed<self.speedThreshU):
nor_speed = speed*0.01
else:
nor_speed = 1
if(self.orientThreshB<orient<self.orientThreshU):
nor_orient = orient*0.01
else:
nor_orient = 1
return np.array([nor_rad,nor_dev,nor_speed,nor_orient])
def normalizeOutput(self,output):
'''
Normalize output parameter
'''
nor_output = output/90
return nor_output
def revertOutput(self,normal_out):
'''
Convert the normalized output to angle
'''
return normal_out*90
def trainingIn(self,radius,deviation,speed,orient):
'''
Joints new training input to existing one
'''
self.training_inputs = np.array([self.training_inputs,normalizeParameter(radius,deviation,speed,orient)])
def trainingOut(self,angle):
'''
Joints new training output to existing one
'''
self.training_outputs = np.array(self.training_outputs,[normalizeOutput(self,angle)]).T
def train(self):
'''
Calculates synaptic weights of each neuron
'''
np.random.seed(1)
self.synaptic_weights = 2*random.random((3,1))-1
for iteration in range(10000):
output = 1/(1+exp(-np.dot(self.training_inputs,self.synaptic_weights)))
self.synaptic_weights += np.dot(self.training_inputs.T,(self.training_outputs-output)*output*(1-output))
def getSteeringAngle(self,radius,deviation,speed,orient):
'''
Uses sigmoid function to compute desired steering angle
Returns the angle
'''
nor_param = normalizeParameter(self,radius,deviation,speed,orient)
normal_out=(1/(1+np.exp(-np.dot(nor_param,self.synaptic_weights))))
return revertOutput(normal_out)
| 34.954545
| 116
| 0.635111
| 3,824
| 0.994538
| 0
| 0
| 0
| 0
| 0
| 0
| 1,116
| 0.290247
|
b01f92f5f3f6a4f80aa7644a0330cdac5e27b92c
| 1,405
|
py
|
Python
|
tests/test_paramviewer.py
|
lnielsen/pyhf
|
3d98dc445c384d2919a77b9af0a202e12343a707
|
[
"Apache-2.0"
] | null | null | null |
tests/test_paramviewer.py
|
lnielsen/pyhf
|
3d98dc445c384d2919a77b9af0a202e12343a707
|
[
"Apache-2.0"
] | null | null | null |
tests/test_paramviewer.py
|
lnielsen/pyhf
|
3d98dc445c384d2919a77b9af0a202e12343a707
|
[
"Apache-2.0"
] | null | null | null |
import pyhf
from pyhf.parameters import ParamViewer
def test_paramviewer_simple_nonbatched(backend):
pars = pyhf.tensorlib.astensor([1, 2, 3, 4, 5, 6, 7])
parshape = pyhf.tensorlib.shape(pars)
view = ParamViewer(
parshape,
{'hello': {'slice': slice(0, 2)}, 'world': {'slice': slice(5, 7)}},
['hello', 'world'],
)
par_slice = view.get(pars)
assert pyhf.tensorlib.tolist(par_slice[view.slices[0]]) == [1, 2]
assert pyhf.tensorlib.tolist(par_slice[view.slices[1]]) == [6, 7]
assert pyhf.tensorlib.tolist(par_slice) == [1, 2, 6, 7]
def test_paramviewer_simple_batched(backend):
pars = pyhf.tensorlib.astensor([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
parshape = pyhf.tensorlib.shape(pars)
view = ParamViewer(
parshape,
{'hello': {'slice': slice(0, 2)}, 'world': {'slice': slice(3, 4)}},
['hello', 'world'],
)
par_slice = view.get(pars)
assert isinstance(view.index_selection, list)
assert all(
[len(x) == 3 for x in view.index_selection]
) # first dimension is batch dim
assert pyhf.tensorlib.shape(par_slice) == (3, 3)
assert pyhf.tensorlib.tolist(par_slice[view.slices[0]]) == [[1, 5, 9], [2, 6, 10]]
assert pyhf.tensorlib.tolist(par_slice[view.slices[1]]) == [[4, 8, 12]]
assert pyhf.tensorlib.tolist(par_slice) == [[1, 5, 9], [2, 6, 10], [4, 8, 12]]
| 31.222222
| 86
| 0.605694
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 114
| 0.081139
|
b0204523055a99ef60f353c69bef13df582957e8
| 15,276
|
py
|
Python
|
library/modules/encoder_decoders/sequence_to_sequence.py
|
dangitstam/le-traducteur
|
499005ac198029fd2a7e7469fb250b8b3af6a619
|
[
"Apache-2.0"
] | 6
|
2018-10-23T10:05:55.000Z
|
2020-08-30T13:04:51.000Z
|
library/modules/encoder_decoders/sequence_to_sequence.py
|
dangitstam/le-traducteur
|
499005ac198029fd2a7e7469fb250b8b3af6a619
|
[
"Apache-2.0"
] | 1
|
2018-08-20T21:58:33.000Z
|
2020-12-29T17:44:04.000Z
|
library/modules/encoder_decoders/sequence_to_sequence.py
|
dangitstam/le-traducteur
|
499005ac198029fd2a7e7469fb250b8b3af6a619
|
[
"Apache-2.0"
] | 1
|
2022-03-26T05:13:38.000Z
|
2022-03-26T05:13:38.000Z
|
from typing import Dict, Optional, Tuple
import numpy as np
import torch
import torch.nn.functional as F
from allennlp.common.checks import ConfigurationError
from allennlp.common.util import START_SYMBOL, END_SYMBOL
from allennlp.data.vocabulary import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import FeedForward, Seq2SeqEncoder, TextFieldEmbedder
from allennlp.modules.attention import BilinearAttention
from allennlp.modules.token_embedders import Embedding
from allennlp.nn import InitializerApplicator, RegularizerApplicator, util
from overrides import overrides
# This is largely based on AllenNLP's general Seq2Seq encoder-decoder:
# https://github.com/allenai/allennlp/blob/master/allennlp/models/encoder_decoders/simple_seq2seq.py
#
# but offers more flexibility. Maybe I'll subclass this module when they've addressed their TODOs.
# TODO: Add more asserts so people don't do dumb shit
# TODO: Better docstrings.
@Model.register("sequence_to_sequence")
class SequenceToSequence(Model):
"""
Base class for sequence-to-sequence models.
"""
DECODERS = {"rnn": torch.nn.RNN, "lstm": torch.nn.LSTM, "gru": torch.nn.GRU}
def __init__(self,
# Vocabluary.
vocab: Vocabulary,
# Embeddings.
source_field_embedder: TextFieldEmbedder,
target_embedding_size: int,
# Encoders and Decoders.
encoder: Seq2SeqEncoder,
decoder_type: str,
output_projection_layer: FeedForward,
source_namespace: str = "source",
target_namespace: str = "target",
# Hyperparamters and flags.
decoder_attention_function: BilinearAttention = None,
decoder_is_bidirectional: bool = False,
decoder_num_layers: int = 1,
apply_attention: Optional[bool] = False,
max_decoding_steps: int = 100,
scheduled_sampling_ratio: float = 0.4,
# Logistical.
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None) -> None:
super().__init__(vocab, regularizer)
if encoder.get_input_dim() != source_field_embedder.get_output_dim():
raise ConfigurationError("The input dimension of the encoder must match the embedding"
"size of the source_field_embedder. Found {} and {}, respectively."
.format(encoder.get_input_dim(),
source_field_embedder.get_output_dim()))
if output_projection_layer.get_output_dim() != vocab.get_vocab_size(target_namespace):
raise ConfigurationError("The output dimension of the output_projection_layer must match the "
"size of the French vocabulary. Found {} and {}, "
"respectively.".format(output_projection_layer.get_output_dim(),
vocab.get_vocab_size(target_namespace)))
if decoder_type not in SequenceToSequence.DECODERS:
raise ConfigurationError("Unrecognized decoder option '{}'".format(decoder_type))
# For dealing with input.
self.source_vocab_size = vocab.get_vocab_size(source_namespace)
self.target_vocab_size = vocab.get_vocab_size(target_namespace)
self.source_field_embedder = source_field_embedder or TextFieldEmbedder()
self.encoder = encoder
# For dealing with / producing output.
self.target_vocab_size = vocab.get_vocab_size(target_namespace)
self.target_embedder = Embedding(self.target_vocab_size, target_embedding_size)
# Input size will either be the target embedding size or the target embedding size plus the
# encoder hidden size to attend on the input.
#
# When making a custom attention function that uses neither of those input sizes, you will
# have to define the decoder yourself.
decoder_input_size = target_embedding_size
if apply_attention:
decoder_input_size += encoder.get_output_dim()
# Hidden size of the encoder and decoder should match.
decoder_hidden_size = encoder.get_output_dim()
self.decoder = SequenceToSequence.DECODERS[decoder_type](
decoder_input_size,
decoder_hidden_size,
num_layers=decoder_num_layers,
batch_first=True,
bias=True,
bidirectional=decoder_is_bidirectional
)
self.output_projection_layer = output_projection_layer
self.apply_attention = apply_attention
self.decoder_attention_function = decoder_attention_function or BilinearAttention(
matrix_dim=encoder.get_output_dim(),
vector_dim=encoder.get_output_dim()
)
# Hyperparameters.
self._max_decoding_steps = max_decoding_steps
self._scheduled_sampling_ratio = scheduled_sampling_ratio
# Used for prepping the translation primer (initialization of the target word-level
# encoder's hidden state).
#
# If the decoder is an LSTM, both hidden states and cell states must be initialized.
# Also, hidden states that prime translation via this encoder must be duplicated
# across by number of layers they has.
self._decoder_is_lstm = isinstance(self.decoder, torch.nn.LSTM)
self._decoder_num_layers = decoder_num_layers
self._start_index = vocab.get_token_index(START_SYMBOL, target_namespace)
self._end_index = vocab.get_token_index(END_SYMBOL, target_namespace)
self._source_namespace = source_namespace
self._target_namespace = target_namespace
self._batch_size = None
initializer(self)
@overrides
def forward(self,
source: Dict[str, torch.LongTensor],
target: Dict[str, torch.LongTensor]) -> Dict[str, torch.Tensor]:
# pylint: disable=arguments-differ
output_dict: dict = {}
source = self.preprocess_input(source)
# Embed and encode the source sequence.
source_sequence_encoded = self.encode_input(source)
source_mask = util.get_text_field_mask(source)
source_lengths = source_mask.sum(dim=-1)
source_encoded = torch.zeros_like(source_sequence_encoded[:, 0])
for i, length in enumerate(source_lengths):
source_encoded[i] = source_sequence_encoded[i, length - 1]
batch_size = source_encoded.size(0)
# Determine number of decoding steps. If training or computing validation, we decode
# target_seq_len times and compute loss.
if target:
target_tokens = target['tokens']
target_seq_len = target['tokens'].size(1)
num_decoding_steps = target_seq_len - 1
else:
num_decoding_steps = self.max_decoding_steps
# Begin decoding the encoded source, swapping in predictions for ground truth at the
# scheduled sampling rate.
last_predictions = None
step_logits, step_probabilities, step_predictions = [], [], []
decoder_hidden = self.init_decoder_hidden_state(source_encoded)
for timestep in range(num_decoding_steps):
if self.training and torch.rand(1).item() >= self._scheduled_sampling_ratio:
input_choices = target_tokens[:, timestep]
else:
if timestep == 0: # Initialize decoding with the start token.
input_choices = source_mask.new_full((batch_size,),
fill_value=self._start_index)
else:
input_choices = last_predictions
decoder_input = self.prepare_decode_step_input(input_choices, decoder_hidden,
source_sequence_encoded, source_mask)
if len(decoder_input.shape) < 3:
decoder_input = decoder_input.unsqueeze(1)
_, decoder_hidden = self.decoder(decoder_input, decoder_hidden)
# Probability distribution for what the next decoded class should be.
output_projection = self.output_projection_layer(decoder_hidden[0][-1]
if self._decoder_is_lstm
else decoder_hidden[-1])
step_logits.append(output_projection.unsqueeze(1))
# Collect predicted classes and their probabilities.
class_probabilities = F.softmax(output_projection, dim=-1)
_, predicted_classes = torch.max(class_probabilities, 1)
step_probabilities.append(class_probabilities.unsqueeze(1))
step_predictions.append(predicted_classes.unsqueeze(1))
last_predictions = predicted_classes
try:
logits = torch.cat(step_logits, 1)
except:
import pdb; pdb.set_trace()
class_probabilities = torch.cat(step_probabilities, 1)
all_predictions = torch.cat(step_predictions, 1)
output_dict = {"logits": logits,
"class_probabilities": class_probabilities,
"predictions": all_predictions}
if target:
target_mask = util.get_text_field_mask(target)
relevant_targets = target['tokens'][:, 1:].contiguous()
relevant_mask = target_mask[:, 1:].contiguous()
loss = util.sequence_cross_entropy_with_logits(logits, relevant_targets, relevant_mask)
output_dict["loss"] = loss
return output_dict
def preprocess_input(self, source: Dict[str, torch.LongTensor]) -> Dict[str, torch.Tensor]:
"""
Perform any preprocessing on the input text field you like; returns the source unchanged
by default.
"""
# pylint: disable=R0201
return source
def encode_input(self, source: Dict[str, torch.LongTensor]) -> Tuple[torch.FloatTensor,
torch.FloatTensor]:
"""
Encode the source utterance how you see fit, as long as you return a tuple of
tensors.
By default, embeds the source utterance and feeds it to the source encoder.
Note that when subclassing this module, the decoder_hidden_size should be the same as
the encoder's hidden size.
Required shapes: (batch_size, sequence_length, decoder_hidden_size)
"""
source_sequence_embedded = self.source_field_embedder(source)
source_sequence_mask = util.get_text_field_mask(source)
encoded_source_sequence = self.encoder(source_sequence_embedded, source_sequence_mask)
return encoded_source_sequence
def init_decoder_hidden_state(self, source_sequence_encoded: torch.FloatTensor) -> torch.FloatTensor:
"""
Prep the hidden state initialization of the word-level Target decoder any way
you like.
By default, uses only the final hidden state of the encoded source.
Required shape: (batch_size, num_decoder_layers, encoder_hidden_size)
"""
decoder_primer = source_sequence_encoded.unsqueeze(0)
decoder_primer = decoder_primer.expand(
self._decoder_num_layers, -1, self.encoder.get_output_dim()
).contiguous()
# If the decoder is an LSTM, we need to initialize a cell state.
if self._decoder_is_lstm:
decoder_primer = (decoder_primer, torch.zeros_like(decoder_primer))
return decoder_primer
def prepare_decode_step_input(self,
input_indices: torch.LongTensor,
decoder_hidden: torch.LongTensor,
encoder_outputs: torch.LongTensor,
encoder_outputs_mask: torch.LongTensor) -> torch.LongTensor:
"""
Prepares the current timestep input for the decoder.
By default, simply embeds and returns the input. If using attention, the default attention
(BiLinearAttention) is applied to attend on the step input given the encoded source
sequence and the previous hidden state.
Parameters:
-----------
input_indices : torch.LongTensor
Indices of either the gold inputs to the decoder or the predicted labels from the
previous timestep.
decoder_hidden : torch.LongTensor, optional (not needed if no attention)
Output from the decoder at the last time step. Needed only if using attention.
encoder_outputs : torch.LongTensor, optional (not needed if no attention)
Encoder outputs from all time steps. Needed only if using attention.
encoder_outputs_mask : torch.LongTensor, optional (not needed if no attention)
Masks on encoder outputs. Needed only if using attention.
"""
# input_indices : (batch_size,) since we are processing these one timestep at a time.
# (batch_size, target_embedding_dim)
embedded_input = self.target_embedder(input_indices)
if self.apply_attention:
if isinstance(decoder_hidden, tuple):
decoder_hidden = decoder_hidden[0]
# encoder_outputs : (batch_size, input_sequence_length, encoder_output_dim)
# Ensuring mask is also a FloatTensor. Or else the multiplication within attention will
# complain.
encoder_outputs_mask = encoder_outputs_mask.float()
# (batch_size, input_sequence_length)
input_weights = self.decoder_attention_function(decoder_hidden[-1], encoder_outputs,
encoder_outputs_mask)
# (batch_size, encoder_output_dim)
attended_input = util.weighted_sum(encoder_outputs, input_weights)
# (batch_size, encoder_output_dim + target_embedding_dim)
return torch.cat((attended_input, embedded_input), -1)
else:
return embedded_input
@overrides
def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
predicted_indices = output_dict["predictions"]
if not isinstance(predicted_indices, np.ndarray):
predicted_indices = predicted_indices.detach().cpu().numpy()
all_predicted_tokens = []
for indices in predicted_indices:
indices = list(indices)
# Collect indices till the first END_SYMBOL.
if self._end_index in indices:
indices = indices[:indices.index(self._end_index)]
predicted_tokens = [self.vocab.get_token_from_index(x, namespace=self._target_namespace)
for x in indices]
all_predicted_tokens.append(predicted_tokens)
output_dict["predicted_tokens"] = all_predicted_tokens
return output_dict
| 48.805112
| 106
| 0.645719
| 14,276
| 0.934538
| 0
| 0
| 14,316
| 0.937156
| 0
| 0
| 4,518
| 0.295758
|
b021c9112da0b09c0383564d4213787ef0cf3187
| 1,372
|
py
|
Python
|
hrv/filters.py
|
LegrandNico/hrv
|
35cdd1b7ddf8afdebf2db91f982b256c3b9dbf67
|
[
"BSD-3-Clause"
] | 1
|
2020-01-06T20:08:04.000Z
|
2020-01-06T20:08:04.000Z
|
hrv/filters.py
|
LegrandNico/hrv
|
35cdd1b7ddf8afdebf2db91f982b256c3b9dbf67
|
[
"BSD-3-Clause"
] | null | null | null |
hrv/filters.py
|
LegrandNico/hrv
|
35cdd1b7ddf8afdebf2db91f982b256c3b9dbf67
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
from hrv.rri import RRi
from hrv.utils import _create_time_info
def quotient(rri):
# TODO: Receive option to replaced outliers with stats
# functions (i.e mean, median etc)
# TODO: Receive option to re-create time array with cumsum of filtered rri
if isinstance(rri, RRi):
rri_time = rri.time
rri = rri.values
else:
rri = np.array(rri)
rri_time = _create_time_info(rri)
L = len(rri) - 1
indices = np.where(
(rri[:L-1]/rri[1:L] < 0.8) | (rri[:L-1]/rri[1:L] > 1.2) |
(rri[1:L]/rri[:L-1] < 0.8) | (rri[1:L]/rri[:L-1] > 1.2)
)
rri_filt, time_filt = np.delete(rri, indices), np.delete(rri_time, indices)
return RRi(rri_filt, time_filt)
def moving_average(rri, order=3):
return _moving_function(rri, order, np.mean)
def moving_median(rri, order=3):
return _moving_function(rri, order, np.median)
def _moving_function(rri, order, func):
if isinstance(rri, RRi):
rri_time = rri.time
rri = rri.values
else:
rri_time = _create_time_info(rri)
offset = int(order / 2)
# TODO: Implemente copy method for RRi class
filt_rri = np.array(rri.copy(), dtype=np.float64)
for i in range(offset, len(rri) - offset, 1):
filt_rri[i] = func(rri[i-offset:i+offset+1])
return RRi(filt_rri, rri_time)
| 25.886792
| 79
| 0.626093
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 206
| 0.150146
|
b02365bd68f389ec1ac4453e0ddfb053b1f457d4
| 20,428
|
py
|
Python
|
PVPlugins/PVGeo_UBC_Tools.py
|
jkulesza/PVGeo
|
c7bdbad5e5e5579033e1b00605d680b67252b3f4
|
[
"BSD-3-Clause"
] | 1
|
2020-06-09T16:49:28.000Z
|
2020-06-09T16:49:28.000Z
|
PVPlugins/PVGeo_UBC_Tools.py
|
jkulesza/PVGeo
|
c7bdbad5e5e5579033e1b00605d680b67252b3f4
|
[
"BSD-3-Clause"
] | null | null | null |
PVPlugins/PVGeo_UBC_Tools.py
|
jkulesza/PVGeo
|
c7bdbad5e5e5579033e1b00605d680b67252b3f4
|
[
"BSD-3-Clause"
] | null | null | null |
paraview_plugin_version = '1.1.39'
# This is module to import. It provides VTKPythonAlgorithmBase, the base class
# for all python-based vtkAlgorithm subclasses in VTK and decorators used to
# 'register' the algorithm with ParaView along with information about UI.
from paraview.util.vtkAlgorithm import *
# Helpers:
from PVGeo import _helpers
# Classes to Decorate
from PVGeo.ubc import *
#### GLOBAL VARIABLES ####
MENU_CAT = 'PVGeo: UBC Mesh Tools'
@smproxy.reader(name="PVGeoTensorMeshReader",
label='PVGeo: %s'%TensorMeshReader.__displayname__,
extensions=TensorMeshReader.extensions,
file_description=TensorMeshReader.description)
@smhint.xml('''<RepresentationType view="RenderView" type="Surface With Edges" />''')
class PVGeoTensorMeshReader(TensorMeshReader):
def __init__(self):
TensorMeshReader.__init__(self)
#### Seters and Geters ####
@smproperty.xml('''
<StringVectorProperty
panel_visibility="advanced"
name="MeshFile"
label="File Name Mesh"
command="SetMeshFileName"
animateable="1"
clean_command="ClearMesh"
number_of_elements="1">
<FileListDomain name="meshfile"/>
<Documentation>This is the mesh file for a 2D or 3D UBC Mesh grid. This plugin only allows ONE mesh to be defined.</Documentation>
</StringVectorProperty>''')
def SetMeshFileName(self, fname):
TensorMeshReader.SetMeshFileName(self, fname)
@smproperty.xml('''
<StringVectorProperty
panel_visibility="default"
name="ModelFiles"
label="File Name(s) Model"
command="AddModelFileName"
animateable="1"
repeat_command="1"
clean_command="ClearModels"
number_of_elements="1">
<FileListDomain name="modelfiles"/>
<Documentation>This is for a single sets of model files to append to the mesh as data time varying attributes. You can chose as many files as you would like for this for the given attribute.</Documentation>
</StringVectorProperty>''')
def AddModelFileName(self, fname):
"""Use to set the file names for the reader. Handles singlt string or list of strings."""
TensorMeshReader.AddModelFileName(self, fname)
@smproperty.doublevector(name="TimeDelta", default_values=1.0, panel_visibility="advanced")
def SetTimeDelta(self, dt):
TensorMeshReader.SetTimeDelta(self, dt)
@smproperty.doublevector(name="TimestepValues", information_only="1", si_class="vtkSITimeStepsProperty")
def GetTimestepValues(self):
"""This is critical for registering the timesteps"""
return TensorMeshReader.GetTimestepValues(self)
@smproperty.xml(_helpers.getPropertyXml(name='Use extension name', command='SetUseExtensionAsName', default_values=True, help='A boolean to override the DataName and use whitespace model file extension as data name.',
panel_visibility="advanced"))
def SetUseExtensionAsName(self, flag):
TensorMeshReader.SetUseExtensionAsName(self, flag)
@smproperty.stringvector(name='DataName', default_values='Data', panel_visibility="advanced")
def SetDataName(self, name):
TensorMeshReader.SetDataName(self, name)
@smproxy.filter(name="PVGeoTensorMeshAppender",
label=TensorMeshAppender.__displayname__)
@smhint.xml('''<ShowInMenu category="%s"/>
<RepresentationType view="RenderView" type="Surface" />''' % MENU_CAT)
@smproperty.input(name="Input", port_index=0)
@smdomain.datatype(dataTypes=["vtkRectilinearGrid"], composite_data_supported=False)
class PVGeoTensorMeshAppender(TensorMeshAppender):
"""This assumes the input vtkRectilinearGrid has already handled the timesteps"""
def __init__(self):
TensorMeshAppender.__init__(self)
@smproperty.xml('''
<StringVectorProperty
panel_visibility="default"
name="ModelFiles"
label="File Name(s) Model"
command="AddModelFileName"
animateable="1"
repeat_command="1"
clean_command="ClearModels"
number_of_elements="1">
<FileListDomain name="modelfiles"/>
<Documentation>This is for a single sets of model files to append to the mesh as data time varying attributes. You can chose as many files as you would like for this for the given attribute.</Documentation>
</StringVectorProperty>''')
def AddModelFileName(self, fname):
"""Use to set the file names for the reader. Handles single string or list of strings."""
TensorMeshAppender.AddModelFileName(self, fname)
@smproperty.xml(_helpers.getPropertyXml(name='Use extension name', command='SetUseExtensionAsName', default_values=True, help='A boolean to override the DataName and use whitespace model file extension as data name.',
panel_visibility="advanced"))
def SetUseExtensionAsName(self, flag):
TensorMeshAppender.SetUseExtensionAsName(self, flag)
@smproperty.stringvector(name='DataName', default_values='Appended Data', panel_visibility="advanced")
def SetDataName(self, name):
TensorMeshAppender.SetDataName(self, name)
@smproperty.doublevector(name="TimestepValues", information_only="1", si_class="vtkSITimeStepsProperty")
def GetTimestepValues(self):
"""This is critical for registering the timesteps"""
return TensorMeshAppender.GetTimestepValues(self)
@smproxy.filter(name="PVGeoTopoMeshAppender",
label=TopoMeshAppender.__displayname__)
@smhint.xml('''<ShowInMenu category="%s"/>
<RepresentationType view="RenderView" type="Surface" />''' % MENU_CAT)
@smproperty.input(name="Input", port_index=0)
@smdomain.datatype(dataTypes=["vtkRectilinearGrid"], composite_data_supported=False)
class PVGeoTopoMeshAppender(TopoMeshAppender):
"""This assumes the input vtkRectilinearGrid has already handled the timesteps"""
def __init__(self):
TopoMeshAppender.__init__(self)
@smproperty.xml('''
<StringVectorProperty
panel_visibility="advanced"
name="TopoFile"
label="File Name Topo"
command="SetTopoFileName"
animateable="1"
clean_command="ClearTopoFile"
number_of_elements="1">
<FileListDomain name="topofile"/>
<Documentation>This plugin only allows ONE topo file to be defined.</Documentation>
</StringVectorProperty>''')
def SetTopoFileName(self, fname):
TopoMeshAppender.SetTopoFileName(self, fname)
#------------------------------------------------------------------------------
# Read OcTree Mesh
#------------------------------------------------------------------------------
@smproxy.reader(name="PVGeoUBCOcTreeMeshReader",
label='PVGeo: %s'%OcTreeReader.__displayname__,
extensions=OcTreeReader.extensions,
file_description=OcTreeReader.description)
@smhint.xml('''<RepresentationType view="RenderView" type="Surface With Edges" />''')
class PVGeoUBCOcTreeMeshReader(OcTreeReader):
def __init__(self):
OcTreeReader.__init__(self)
#### Seters and Geters ####
@smproperty.xml('''
<StringVectorProperty
panel_visibility="advanced"
name="MeshFile"
label="File Name Mesh"
command="SetMeshFileName"
animateable="1"
clean_command="ClearMesh"
number_of_elements="1">
<FileListDomain name="meshfile"/>
<Documentation>This is the mesh file for a OcTree Mesh grid. This plugin only allows ONE mesh to be defined.</Documentation>
</StringVectorProperty>''')
def SetMeshFileName(self, fname):
OcTreeReader.SetMeshFileName(self, fname)
@smproperty.xml('''
<StringVectorProperty
panel_visibility="default"
name="ModelFiles"
label="File Name(s) Model"
command="AddModelFileName"
animateable="1"
repeat_command="1"
clean_command="ClearModels"
number_of_elements="1">
<FileListDomain name="modelfiles"/>
<Documentation>This is for a single sets of model files to append to the mesh as data time varying attributes. You can chose as many files as you would like for this for the given attribute.</Documentation>
</StringVectorProperty>''')
def AddModelFileName(self, fname):
"""Use to set the file names for the reader. Handles singlt string or list of strings."""
OcTreeReader.AddModelFileName(self, fname)
@smproperty.doublevector(name="TimeDelta", default_values=1.0, panel_visibility="advanced")
def SetTimeDelta(self, dt):
OcTreeReader.SetTimeDelta(self, dt)
@smproperty.doublevector(name="TimestepValues", information_only="1", si_class="vtkSITimeStepsProperty")
def GetTimestepValues(self):
"""This is critical for registering the timesteps"""
return OcTreeReader.GetTimestepValues(self)
@smproperty.xml(_helpers.getPropertyXml(name='Use extension name', command='SetUseExtensionAsName', default_values=True, help='A boolean to override the DataName and use whitespace model file extension as data name.',
panel_visibility="advanced"))
def SetUseExtensionAsName(self, flag):
OcTreeReader.SetUseExtensionAsName(self, flag)
@smproperty.stringvector(name='DataName', default_values='Data', panel_visibility="advanced")
def SetDataName(self, name):
OcTreeReader.SetDataName(self, name)
@smproxy.filter(name="PVGeoOcTreeAppender",
label=OcTreeAppender.__displayname__)
@smhint.xml('''<ShowInMenu category="%s"/>
<RepresentationType view="RenderView" type="Surface With Edges" />''' % MENU_CAT)
@smproperty.input(name="Input", port_index=0)
@smdomain.datatype(dataTypes=["vtkUnstructuredGrid"], composite_data_supported=False)
class PVGeoOcTreeAppender(OcTreeAppender):
"""This assumes the input vtkUnstructuredGrid has already handled the timesteps"""
def __init__(self):
OcTreeAppender.__init__(self)
@smproperty.xml('''
<StringVectorProperty
panel_visibility="default"
name="ModelFiles"
label="File Name(s) Model"
command="AddModelFileName"
animateable="1"
repeat_command="1"
clean_command="ClearModels"
number_of_elements="1">
<FileListDomain name="modelfiles"/>
<Documentation>This is for a single sets of model files to append to the mesh as data time varying attributes. You can chose as many files as you would like for this for the given attribute.</Documentation>
</StringVectorProperty>''')
def AddModelFileName(self, fname):
"""Use to set the file names for the reader. Handles single string or list of strings."""
OcTreeAppender.AddModelFileName(self, fname)
@smproperty.xml(_helpers.getPropertyXml(name='Use extension name', command='SetUseExtensionAsName', default_values=True, help='A boolean to override the DataName and use whitespace model file extension as data name.',
panel_visibility="advanced"))
def SetUseExtensionAsName(self, flag):
OcTreeAppender.SetUseExtensionAsName(self, flag)
@smproperty.stringvector(name='DataName', default_values='Appended Data', panel_visibility="advanced")
def SetDataName(self, name):
OcTreeAppender.SetDataName(self, name)
#------------------------------------------------------------------------------
# Write Tensor Mesh
#------------------------------------------------------------------------------
@smproxy.writer(extensions="msh", file_description="UBC Tensor Mesh", support_reload=False)
@smproperty.input(name="Input", port_index=0)
@smdomain.datatype(dataTypes=["vtkRectilinearGrid"], composite_data_supported=True)
class PVGeoWriteRectilinearGridToUBC(WriteRectilinearGridToUBC):
def __init__(self):
WriteRectilinearGridToUBC.__init__(self)
@smproperty.stringvector(name="FileName", panel_visibility="never")
@smdomain.filelist()
def SetFileName(self, fname):
"""Specify filename for the file to write."""
WriteRectilinearGridToUBC.SetFileName(self, fname)
@smproperty.stringvector(name="Format", default_values='%.9e')
def SetFormat(self, fmt):
"""Use to set the ASCII format for the writer default is ``'%.9e'``"""
WriteRectilinearGridToUBC.SetFormat(self, fmt)
@smproxy.writer(extensions="msh", file_description="UBC Tensor Mesh", support_reload=False)
@smproperty.input(name="Input", port_index=0)
@smdomain.datatype(dataTypes=["vtkImageData"], composite_data_supported=True)
class PVGeoWriteImageDataToUBC(WriteImageDataToUBC):
def __init__(self):
WriteImageDataToUBC.__init__(self)
@smproperty.stringvector(name="FileName", panel_visibility="never")
@smdomain.filelist()
def SetFileName(self, fname):
"""Specify filename for the file to write."""
WriteImageDataToUBC.SetFileName(self, fname)
@smproperty.stringvector(name="Format", default_values='%.9e')
def SetFormat(self, fmt):
"""Use to set the ASCII format for the writer default is ``'%.9e'``"""
WriteImageDataToUBC.SetFormat(self, fmt)
###############################################################################
@smproxy.reader(name="PVGeoTopoReader",
label='PVGeo: %s'%TopoReader.__displayname__,
extensions=TopoReader.extensions,
file_description=TopoReader.description)
class PVGeoTopoReader(TopoReader):
def __init__(self):
TopoReader.__init__(self)
#### Seters and Geters ####
@smproperty.xml(_helpers.getFileReaderXml(TopoReader.extensions, readerDescription=TopoReader.description))
def AddFileName(self, fname):
TopoReader.AddFileName(self, fname)
@smproperty.doublevector(name="TimeDelta", default_values=1.0, panel_visibility="advanced")
def SetTimeDelta(self, dt):
TopoReader.SetTimeDelta(self, dt)
@smproperty.doublevector(name="TimestepValues", information_only="1", si_class="vtkSITimeStepsProperty")
def GetTimestepValues(self):
"""This is critical for registering the timesteps"""
return TopoReader.GetTimestepValues(self)
@smproperty.intvector(name="SkipRows", default_values=0, panel_visibility="advanced")
def SetSkipRows(self, skip):
TopoReader.SetSkipRows(self, skip)
@smproperty.stringvector(name="Comments", default_values="!", panel_visibility="advanced")
def SetComments(self, identifier):
TopoReader.SetComments(self, identifier)
###############################################################################
@smproxy.reader(name="PVGeoGravObsReader",
label='PVGeo: %s'%GravObsReader.__displayname__,
extensions=GravObsReader.extensions,
file_description=GravObsReader.description)
class PVGeoGravObsReader(GravObsReader):
def __init__(self):
GravObsReader.__init__(self)
#### Seters and Geters ####
@smproperty.xml(_helpers.getFileReaderXml(GravObsReader.extensions, readerDescription=GravObsReader.description))
def AddFileName(self, fname):
GravObsReader.AddFileName(self, fname)
@smproperty.doublevector(name="TimeDelta", default_values=1.0, panel_visibility="advanced")
def SetTimeDelta(self, dt):
GravObsReader.SetTimeDelta(self, dt)
@smproperty.doublevector(name="TimestepValues", information_only="1", si_class="vtkSITimeStepsProperty")
def GetTimestepValues(self):
"""This is critical for registering the timesteps"""
return GravObsReader.GetTimestepValues(self)
@smproperty.intvector(name="SkipRows", default_values=0, panel_visibility="advanced")
def SetSkipRows(self, skip):
GravObsReader.SetSkipRows(self, skip)
@smproperty.stringvector(name="Comments", default_values="!", panel_visibility="advanced")
def SetComments(self, identifier):
GravObsReader.SetComments(self, identifier)
###############################################################################
@smproxy.reader(name="PVGeoGravGradReader",
label='PVGeo: %s'%GravGradReader.__displayname__,
extensions=GravGradReader.extensions,
file_description=GravGradReader.description)
class PVGeoGravGradReader(GravGradReader):
def __init__(self):
GravGradReader.__init__(self)
#### Seters and Geters ####
@smproperty.xml(_helpers.getFileReaderXml(GravGradReader.extensions, readerDescription=GravGradReader.description))
def AddFileName(self, fname):
GravGradReader.AddFileName(self, fname)
@smproperty.doublevector(name="TimeDelta", default_values=1.0, panel_visibility="advanced")
def SetTimeDelta(self, dt):
GravGradReader.SetTimeDelta(self, dt)
@smproperty.doublevector(name="TimestepValues", information_only="1", si_class="vtkSITimeStepsProperty")
def GetTimestepValues(self):
"""This is critical for registering the timesteps"""
return GravGradReader.GetTimestepValues(self)
@smproperty.intvector(name="SkipRows", default_values=0, panel_visibility="advanced")
def SetSkipRows(self, skip):
GravGradReader.SetSkipRows(self, skip)
@smproperty.stringvector(name="Comments", default_values="!", panel_visibility="advanced")
def SetComments(self, identifier):
GravGradReader.SetComments(self, identifier)
###############################################################################
@smproxy.reader(name="PVGeoMagObsReader",
label='PVGeo: %s'%MagObsReader.__displayname__,
extensions=MagObsReader.extensions,
file_description=MagObsReader.description)
class PVGeoMagObsReader(MagObsReader):
def __init__(self):
MagObsReader.__init__(self)
#### Seters and Geters ####
@smproperty.xml(_helpers.getFileReaderXml(MagObsReader.extensions, readerDescription=MagObsReader.description))
def AddFileName(self, fname):
MagObsReader.AddFileName(self, fname)
@smproperty.doublevector(name="TimeDelta", default_values=1.0, panel_visibility="advanced")
def SetTimeDelta(self, dt):
MagObsReader.SetTimeDelta(self, dt)
@smproperty.doublevector(name="TimestepValues", information_only="1", si_class="vtkSITimeStepsProperty")
def GetTimestepValues(self):
"""This is critical for registering the timesteps"""
return MagObsReader.GetTimestepValues(self)
@smproperty.intvector(name="SkipRows", default_values=0, panel_visibility="advanced")
def SetSkipRows(self, skip):
MagObsReader.SetSkipRows(self, skip)
@smproperty.stringvector(name="Comments", default_values="!", panel_visibility="advanced")
def SetComments(self, identifier):
MagObsReader.SetComments(self, identifier)
###############################################################################
@smproxy.filter(name='PVGeoGeologyMapper', label=GeologyMapper.__displayname__)
@smhint.xml('''<ShowInMenu category="%s"/>
<RepresentationType view="RenderView" type="Surface" />''' % MENU_CAT)
@smproperty.input(name="Input", port_index=0)
@smdomain.datatype(dataTypes=["vtkDataObject"], composite_data_supported=False)
class PVGeoGeologyMapper(GeologyMapper):
def __init__(self):
GeologyMapper.__init__(self)
#### SETTERS AND GETTERS ####
@smproperty.xml(_helpers.getInputArrayXml(nInputPorts=1, numArrays=1))
def SetInputArrayToProcess(self, idx, port, connection, field, name):
return GeologyMapper.SetInputArrayToProcess(self, idx, port, connection, field, name)
@smproperty.xml('''
<StringVectorProperty
panel_visibility="default"
name="FileName"
label="File Name"
command="SetFileName"
animateable="1"
number_of_elements="1">
<FileListDomain name="filename"/>
<Documentation>This is the file contating the mapping definitions.</Documentation>
</StringVectorProperty>''')
def SetFileName(self, fname):
GeologyMapper.SetFileName(self, fname)
@smproperty.stringvector(name="Delimiter", default_values=",", panel_visibility="advanced")
def SetDelimiter(self, identifier):
GeologyMapper.SetDelimiter(self, identifier)
###############################################################################
| 42.558333
| 221
| 0.684012
| 15,938
| 0.780204
| 0
| 0
| 19,077
| 0.933865
| 0
| 0
| 9,153
| 0.448061
|
b023ba4b1780ce639f98fb2247c460ffe792c1f6
| 20,333
|
py
|
Python
|
tests/rewards_tree/test_rewards_flow.py
|
shuklaayush/badger-system
|
1274eadbd0b0f3a02efbf40702719ce1d0a96c44
|
[
"MIT"
] | 99
|
2020-12-02T08:40:48.000Z
|
2022-03-15T05:21:06.000Z
|
tests/rewards_tree/test_rewards_flow.py
|
shuklaayush/badger-system
|
1274eadbd0b0f3a02efbf40702719ce1d0a96c44
|
[
"MIT"
] | 115
|
2020-12-15T07:15:39.000Z
|
2022-03-28T22:21:03.000Z
|
tests/rewards_tree/test_rewards_flow.py
|
shuklaayush/badger-system
|
1274eadbd0b0f3a02efbf40702719ce1d0a96c44
|
[
"MIT"
] | 56
|
2020-12-11T06:50:04.000Z
|
2022-02-21T09:17:38.000Z
|
import json
import secrets
import brownie
from dotmap import DotMap
import pytest
import pprint
from brownie import *
from helpers.constants import *
from helpers.registry import registry
from rich.console import Console
FARM_ADDRESS = "0xa0246c9032bC3A600820415aE600c6388619A14D"
XSUSHI_ADDRESS = "0x8798249c2E607446EfB7Ad49eC89dD1865Ff4272"
SECS_PER_HOUR = 3600
SECS_PER_DAY = 86400
console = Console()
@pytest.fixture(scope="function", autouse="True")
def setup():
from assistant.rewards import rewards_assistant
return rewards_assistant
# @pytest.fixture(scope="function")
# def setup_badger(badger_tree_unit):
# return badger_tree_unit
def random_32_bytes():
return "0x" + secrets.token_hex(32)
# generates merkle root purely off dummy data
def internal_generate_rewards_in_range(
rewards_assistant, currentMerkleData, newRewards, startBlock, endBlock, pastRewards
):
cumulativeRewards = rewards_assistant.process_cumulative_rewards(
pastRewards, newRewards
)
# Take metadata from geyserRewards
console.print("Processing to merkle tree")
merkleTree = rewards_assistant.rewards_to_merkle_tree(
cumulativeRewards, startBlock, endBlock, newRewards
)
# Publish data
rootHash = rewards_assistant.hash(merkleTree["merkleRoot"])
contentFileName = rewards_assistant.content_hash_to_filename(rootHash)
console.log(
{
"merkleRoot": merkleTree["merkleRoot"],
"rootHash": str(rootHash),
"contentFile": contentFileName,
"startBlock": startBlock,
"endBlock": endBlock,
"currentContentHash": currentMerkleData["contentHash"],
}
)
return {
"contentFileName": contentFileName,
"merkleTree": merkleTree,
"rootHash": rootHash,
}
# @pytest.mark.skip()
def test_rewards_flow(setup):
rewards_assistant = setup
badgerTree = rewards_assistant.BadgerTree
guardian = rewards_assistant.guardian
rootUpdater = rewards_assistant.rootUpdater
admin, proposer, validator, user = accounts[:4]
rewardsContract = admin.deploy(badgerTree)
rewardsContract.initialize(admin, proposer, validator)
# Propose root
root = random_32_bytes()
contentHash = random_32_bytes()
startBlock = rewardsContract.lastPublishEndBlock() + 1
# Test variations of invalid data upload and verify revert string
with brownie.reverts("Incorrect cycle"):
rewardsContract.proposeRoot(
root,
contentHash,
rewardsContract.currentCycle(),
startBlock,
startBlock + 1,
{"from": proposer},
)
with brownie.reverts("Incorrect cycle"):
rewardsContract.proposeRoot(
root,
contentHash,
rewardsContract.currentCycle() + 2,
startBlock,
startBlock + 1,
{"from": proposer},
)
with brownie.reverts("Incorrect start block"):
rewardsContract.proposeRoot(
root,
contentHash,
rewardsContract.currentCycle() + 1,
rewardsContract.lastPublishEndBlock() + 2,
startBlock + 1,
{"from": proposer},
)
with brownie.reverts("Incorrect start block"):
rewardsContract.proposeRoot(
root,
contentHash,
rewardsContract.currentCycle() + 1,
rewardsContract.lastPublishEndBlock(),
startBlock + 1,
{"from": proposer},
)
# Ensure event
tx = rewardsContract.proposeRoot(
root,
contentHash,
rewardsContract.currentCycle() + 1,
startBlock,
startBlock + 1,
{"from": proposer},
)
assert "RootProposed" in tx.events.keys()
# Approve root
# Test variations of invalid data upload and verify revert string
with brownie.reverts("Incorrect root"):
rewardsContract.approveRoot(
random_32_bytes(),
contentHash,
rewardsContract.currentCycle(),
startBlock,
startBlock + 1,
{"from": validator},
)
with brownie.reverts("Incorrect content hash"):
rewardsContract.approveRoot(
root,
random_32_bytes(),
rewardsContract.currentCycle(),
startBlock,
startBlock + 1,
{"from": validator},
)
with brownie.reverts("Incorrect cycle"):
rewardsContract.approveRoot(
root,
contentHash,
rewardsContract.currentCycle(),
startBlock,
startBlock + 1,
{"from": validator},
)
with brownie.reverts("Incorrect cycle"):
rewardsContract.approveRoot(
root,
contentHash,
rewardsContract.currentCycle() + 2,
startBlock,
startBlock + 1,
{"from": validator},
)
with brownie.reverts("Incorrect cycle start block"):
rewardsContract.approveRoot(
root,
contentHash,
rewardsContract.pendingCycle(),
startBlock + 1,
startBlock + 1,
{"from": validator},
)
with brownie.reverts("Incorrect cycle start block"):
rewardsContract.approveRoot(
root,
contentHash,
rewardsContract.pendingCycle(),
startBlock - 1,
startBlock + 1,
{"from": validator},
)
with brownie.reverts("Incorrect cycle end block"):
rewardsContract.approveRoot(
root,
contentHash,
rewardsContract.pendingCycle(),
startBlock,
startBlock + 9,
{"from": validator},
)
with brownie.reverts("Incorrect cycle end block"):
rewardsContract.approveRoot(
root,
contentHash,
rewardsContract.pendingCycle(),
startBlock,
startBlock + 11,
{"from": validator},
)
with brownie.reverts("Incorrect cycle end block"):
rewardsContract.approveRoot(
root,
contentHash,
rewardsContract.pendingCycle(),
startBlock,
startBlock,
{"from": validator},
)
# Ensure event
tx = rewardsContract.approveRoot(
root,
contentHash,
rewardsContract.pendingCycle(),
startBlock,
startBlock + 1,
{"from": validator},
)
assert "RootUpdated" in tx.events.keys()
with brownie.reverts("Incorrect start block"):
rewardsContract.proposeRoot(
root,
contentHash,
rewardsContract.currentCycle() + 1,
rewardsContract.lastPublishStartBlock() + 1,
startBlock + 1,
{"from": proposer},
)
# Claim as a user
rewardsContract = admin.deploy(badgerTree)
rewardsContract.initialize(admin, proposer, validator)
startBlock = rewardsContract.lastPublishEndBlock() + 1
endBlock = startBlock + 5
currCycle = rewardsContract.currentCycle()
nextCycle = currCycle + 1
currentRoot = rewardsContract.merkleRoot()
# Update to new root with xSushi and FARM
farmClaim = 100000000000
xSushiClaim = 5555555555
geyserRewards = DotMap(
{
"badger_tree": rewardsContract,
"claims": {
user.address: {FARM_ADDRESS: farmClaim, XSUSHI_ADDRESS: xSushiClaim},
accounts[5].address: {FARM_ADDRESS: 100, XSUSHI_ADDRESS: 100},
accounts[6].address: {FARM_ADDRESS: 100, XSUSHI_ADDRESS: 100},
},
"tokens": [FARM_ADDRESS, XSUSHI_ADDRESS],
"cycle": nextCycle,
}
)
pastRewards = DotMap(
{
"badger_tree": rewardsContract,
"claims": {},
"tokens": [FARM_ADDRESS, XSUSHI_ADDRESS],
"cycle": currCycle,
}
)
rewards_data = internal_generate_rewards_in_range(
rewards_assistant,
{"contentHash": currentRoot},
geyserRewards,
startBlock,
endBlock,
pastRewards,
)
rewardsContract.proposeRoot(
rewards_data["merkleTree"]["merkleRoot"],
rewards_data["rootHash"],
rewards_data["merkleTree"]["cycle"],
rewards_data["merkleTree"]["startBlock"],
rewards_data["merkleTree"]["endBlock"],
{"from": proposer},
)
rewardsContract.approveRoot(
rewards_data["merkleTree"]["merkleRoot"],
rewards_data["rootHash"],
rewards_data["merkleTree"]["cycle"],
rewards_data["merkleTree"]["startBlock"],
rewards_data["merkleTree"]["endBlock"],
{"from": validator},
)
# Claim as user who has xSushi and FARM
# This revert message means the claim was valid and it tried to transfer rewards
# it can't actually transfer any with this setup
with brownie.reverts("ERC20: transfer amount exceeds balance"):
rewardsContract.claim(
[FARM_ADDRESS, XSUSHI_ADDRESS], # FARM # XSUSHI
[farmClaim, xSushiClaim],
rewards_data["merkleTree"]["claims"][user]["index"],
rewards_data["merkleTree"]["cycle"],
rewards_data["merkleTree"]["claims"][user]["proof"],
[farmClaim, xSushiClaim],
{"from": user},
)
# Ensure tokens are as expected
# farmBalance = Contract.at("0xa0246c9032bC3A600820415aE600c6388619A14D").balanceOf(user)
# assert farmClaim == farmBalance
# Claim partial as a user
with brownie.reverts("ERC20: transfer amount exceeds balance"):
rewardsContract.claim(
[FARM_ADDRESS, XSUSHI_ADDRESS],
[farmClaim, xSushiClaim],
rewards_data["merkleTree"]["claims"][user]["index"],
rewards_data["merkleTree"]["cycle"],
rewards_data["merkleTree"]["claims"][user]["proof"],
[farmClaim - 100, xSushiClaim - 100],
{"from": user},
)
# Claim with MockToken and confirm new balance
mockToken = rewards_assistant.MockToken
mockContract = admin.deploy(mockToken)
mockContract.initialize([rewardsContract], [100000000])
startBlock = rewardsContract.lastPublishEndBlock() + 1
endBlock = startBlock + 5
currCycle = rewardsContract.currentCycle()
nextCycle = currCycle + 1
currentRoot = rewardsContract.merkleRoot()
geyserRewards = DotMap(
{
"badger_tree": rewardsContract,
"claims": {
user.address: {},
accounts[5].address: {},
accounts[6].address: {},
},
"tokens": [mockContract],
"cycle": nextCycle,
}
)
geyserRewards["claims"][user.address][str(mockContract)] = 100
geyserRewards["claims"][accounts[5].address][str(mockContract)] = 20
geyserRewards["claims"][accounts[6].address][str(mockContract)] = 0
pastRewards = DotMap(
{
"badger_tree": rewardsContract,
"claims": {},
"tokens": [mockContract],
"cycle": currCycle,
}
)
rewards_data = internal_generate_rewards_in_range(
rewards_assistant,
{"contentHash": currentRoot},
geyserRewards,
startBlock,
endBlock,
pastRewards,
)
rewardsContract.proposeRoot(
rewards_data["merkleTree"]["merkleRoot"],
rewards_data["rootHash"],
rewards_data["merkleTree"]["cycle"],
rewards_data["merkleTree"]["startBlock"],
rewards_data["merkleTree"]["endBlock"],
{"from": proposer},
)
rewardsContract.approveRoot(
rewards_data["merkleTree"]["merkleRoot"],
rewards_data["rootHash"],
rewards_data["merkleTree"]["cycle"],
rewards_data["merkleTree"]["startBlock"],
rewards_data["merkleTree"]["endBlock"],
{"from": validator},
)
rewardsContract.claim(
[mockContract],
[100],
rewards_data["merkleTree"]["claims"][user]["index"],
rewards_data["merkleTree"]["cycle"],
rewards_data["merkleTree"]["claims"][user]["proof"],
[100],
{"from": user},
)
assert mockContract.balanceOf(user) == 100
assert mockContract.balanceOf(str(rewardsContract)) == 100000000 - 100
# Try to claim with zero tokens all around, expect failure
rewardsContract = admin.deploy(badgerTree)
rewardsContract.initialize(admin, proposer, validator)
startBlock = rewardsContract.lastPublishEndBlock() + 1
endBlock = startBlock + 5
currCycle = rewardsContract.currentCycle()
nextCycle = currCycle + 1
currentRoot = rewardsContract.merkleRoot()
geyserRewards = DotMap(
{
"badger_tree": rewardsContract,
"claims": {
user.address: {FARM_ADDRESS: 0, XSUSHI_ADDRESS: 0},
accounts[5].address: {FARM_ADDRESS: 0, XSUSHI_ADDRESS: 0},
accounts[6].address: {FARM_ADDRESS: 0, XSUSHI_ADDRESS: 0},
},
"tokens": [FARM_ADDRESS, XSUSHI_ADDRESS], # FARM # XSUSHI
"cycle": nextCycle,
}
)
pastRewards = DotMap(
{
"badger_tree": rewardsContract,
"claims": {},
"tokens": [FARM_ADDRESS, XSUSHI_ADDRESS], # FARM # XSUSHI
"cycle": currCycle,
}
)
rewards_data = internal_generate_rewards_in_range(
rewards_assistant,
{"contentHash": currentRoot},
geyserRewards,
startBlock,
endBlock,
pastRewards,
)
rewardsContract.proposeRoot(
rewards_data["merkleTree"]["merkleRoot"],
rewards_data["rootHash"],
rewards_data["merkleTree"]["cycle"],
rewards_data["merkleTree"]["startBlock"],
rewards_data["merkleTree"]["endBlock"],
{"from": proposer},
)
rewardsContract.approveRoot(
rewards_data["merkleTree"]["merkleRoot"],
rewards_data["rootHash"],
rewards_data["merkleTree"]["cycle"],
rewards_data["merkleTree"]["startBlock"],
rewards_data["merkleTree"]["endBlock"],
{"from": validator},
)
with brownie.reverts("No tokens to claim"):
rewardsContract.claim(
[FARM_ADDRESS, XSUSHI_ADDRESS], # FARM # XSUSHI
[0, 0],
rewards_data["merkleTree"]["claims"][user]["index"],
rewards_data["merkleTree"]["cycle"],
rewards_data["merkleTree"]["claims"][user]["proof"],
[0, 0],
{"from": user},
)
def test_salary(setup):
rewards_assistant = setup
admin, proposer, validator = accounts[:3]
users = accounts[3:]
rewards_contract = admin.deploy(rewards_assistant.BadgerTree)
rewards_contract.initialize(admin, proposer, validator)
def make_salary_entry(recipient, token, total_amount, duration, start_time):
return DotMap(
{
"recipient": recipient,
"token": token,
"totalAmount": total_amount,
"duration": duration,
"startTime": start_time,
"endTime": start_time + duration,
}
)
def update_root(rewards_data):
rewards_contract.proposeRoot(
rewards_data["merkleTree"]["merkleRoot"],
rewards_data["rootHash"],
rewards_data["merkleTree"]["cycle"],
rewards_data["merkleTree"]["startBlock"],
rewards_data["merkleTree"]["endBlock"],
{"from": proposer},
)
rewards_contract.approveRoot(
rewards_data["merkleTree"]["merkleRoot"],
rewards_data["rootHash"],
rewards_data["merkleTree"]["cycle"],
rewards_data["merkleTree"]["startBlock"],
rewards_data["merkleTree"]["endBlock"],
{"from": validator},
)
def calculate_payment(salary_entry, start_block_time, end_block_time):
print(
f"salary_entry: {salary_entry}\nstart_block_time:\t{start_block_time}\nend_block_time: \t{end_block_time}"
)
if (
salary_entry.startTime <= end_block_time
and salary_entry.endTime > start_block_time
):
start_time = max(salary_entry.startTime, start_block_time)
end_time = min(salary_entry.endTime, end_block_time)
return (
salary_entry.totalAmount
* salary_entry.duration
/ (end_time - start_time)
)
return 0
mock_token = rewards_assistant.MockToken
mock_contract = admin.deploy(mock_token)
mock_contract.initialize([rewards_contract], [10_000_000_000_000_000_000_000_000])
salaries = [
make_salary_entry(
users[0].address,
mock_contract,
1_000_000_000_000_000_000,
SECS_PER_DAY * 360,
chain.time() - SECS_PER_DAY * 30,
),
make_salary_entry(
users[1].address,
mock_contract,
1_000_000_000_000_000_000,
SECS_PER_DAY * 180,
chain.time() - SECS_PER_DAY * 200,
),
make_salary_entry(
users[2].address,
mock_contract,
1_000_000_000_000_000_000,
SECS_PER_DAY * 180,
chain.time() + SECS_PER_DAY * 30,
),
make_salary_entry(
users[3].address,
mock_contract,
1_000_000_000_000_000_000,
SECS_PER_DAY * 180,
chain.time() + SECS_PER_HOUR * 2,
),
]
void_state = DotMap(
{
"badger_tree": rewards_contract,
"claims": {},
"tokens": [mock_contract.address],
"cycle": rewards_contract.currentCycle(),
}
)
initial_state = DotMap(
{
"badger_tree": rewards_contract,
"claims": {users[20].address: {mock_contract.address: 456}},
"tokens": [mock_contract.address],
"cycle": rewards_contract.currentCycle() + 1,
}
)
update_root(
internal_generate_rewards_in_range(
rewards_assistant,
{"contentHash": rewards_contract.merkleRoot()},
initial_state,
rewards_contract.lastPublishEndBlock() + 1,
web3.eth.blockNumber,
void_state,
)
)
sleep_time = SECS_PER_HOUR * 4
chain.sleep(sleep_time)
chain.mine(50)
last_publish_time = rewards_contract.lastPublishTimestamp()
chain_time = chain.time()
claims = {
entry.recipient: {
mock_contract.address: calculate_payment(
entry, rewards_contract.lastPublishTimestamp(), chain.time()
)
}
for entry in salaries
}
assert claims[users[0]][mock_contract.address] > 0
assert claims[users[1]][mock_contract.address] == 0
assert claims[users[2]][mock_contract.address] == 0
assert claims[users[3]][mock_contract.address] > 0
update_state = DotMap(
{
"badger_tree": rewards_contract,
"claims": claims,
"tokens": [mock_contract],
"cycle": rewards_contract.currentCycle() + 1,
}
)
rewards_data = internal_generate_rewards_in_range(
rewards_assistant,
{"contentHash": rewards_contract.merkleRoot()},
update_state,
rewards_contract.lastPublishEndBlock() + 1,
web3.eth.blockNumber,
initial_state,
)
console.log(rewards_data)
update_root(rewards_data)
# TODO: Do something more than just verify that the above change was made
entry1 = salaries[0]
rewards_contract.claim(
[mock_contract],
[claims[entry1.recipient][mock_contract.address]],
rewards_data["merkleTree"]["claims"][entry1.recipient]["index"],
rewards_data["merkleTree"]["cycle"],
rewards_data["merkleTree"]["claims"][entry1.recipient]["proof"],
[calculate_payment(entry1, last_publish_time, chain_time)],
{"from": entry1.recipient},
)
| 31.137825
| 119
| 0.59278
| 0
| 0
| 0
| 0
| 144
| 0.007082
| 0
| 0
| 3,688
| 0.18138
|
b0249f5db53b2ce54527df608f97d99c1010a240
| 23,869
|
py
|
Python
|
HCm-uv/HCm-UV_v4.11/HCm-UV_v4.11.py
|
Borja-Perez-Diaz/HII-CHI-Mistry
|
d0dafc753c63246bf14b77807a885ddc7bd4bb99
|
[
"MIT"
] | null | null | null |
HCm-uv/HCm-UV_v4.11/HCm-UV_v4.11.py
|
Borja-Perez-Diaz/HII-CHI-Mistry
|
d0dafc753c63246bf14b77807a885ddc7bd4bb99
|
[
"MIT"
] | null | null | null |
HCm-uv/HCm-UV_v4.11/HCm-UV_v4.11.py
|
Borja-Perez-Diaz/HII-CHI-Mistry
|
d0dafc753c63246bf14b77807a885ddc7bd4bb99
|
[
"MIT"
] | null | null | null |
# Filename: HCm_UV_v4.11.py
import string
import numpy as np
import sys
#sys.stderr = open('errorlog.txt', 'w')
#Function for interpolation of grids
def interpolate(grid,z,zmin,zmax,n):
ncol = 9
vec = []
for col in range(ncol):
inter = 0
no_inter = 0
for row in range(0,len(grid)):
if grid[row,z] < zmin or grid[row,z] > zmax: continue
if z == 2: x = 0; y = 1
if z == 1: x = 0; y = 2
if z == 0: x = 1; y = 2
if row == (len(grid)-1):
vec.append(grid[row,col])
no_inter = no_inter + 1
elif grid[row,x] < grid[row+1,x] or grid[row,y] < grid[row+1,y] :
vec.append(grid[row,col])
no_inter = no_inter + 1
else:
inter = inter + 1
for index in range(0,n):
i = grid[row,col]+(index)*(grid[row+1,col]-grid[row,col])/n
vec.append(i)
out = np.transpose(np.reshape(vec,(-1,n*inter+no_inter)))
return out
print (' ---------------------------------------------------------------------')
print (' This is HII-CHI-mistry for UV version 4.11')
print (' See Perez-Montero, & Amorin (2017) for details')
print ( ' Insert the name of your input text file with some or all of the following columns:')
print (' Lya 1216, CIV 1549, HeII 1640, OIII 1665, CIII 1909, Hb 4861, OIII 5007')
print ('in arbitrary units and reddening corrected. Each column must be given')
print ('with labels and followed by its corresponding flux error.')
print ('---------------------------------------------------------------------')
# Input file reading
if len(sys.argv) == 1:
if int(sys.version[0]) < 3:
input00 = raw_input('Insert input file name:')
else:
input00 = input('Insert input file name:')
else:
input00 = str(sys.argv[1])
try:
input0 = np.genfromtxt(input00,dtype=None,names=True, encoding = 'ascii')
print ('The input file is:'+input00)
except:
print ('Input file error: It does not exist or has wrong format')
sys.exit
print ('')
if input0.size == 1:
input1 = np.stack((input0,input0))
else:
input1 = input0
# Iterations for Montecarlo error derivation
if len(sys.argv) < 3:
n = 25
else:
n = int(sys.argv[2])
print ('The number of iterations for MonteCarlo simulation is: ',n)
print ('')
# Reading of models grids. These can be changed
print ('')
question = True
while question:
print('-------------------------------------------------')
print ('(1) POPSTAR with Chabrier IMF, age = 1 Myr')
print ('(2) BPASS v.2.1 a_IMF = 1.35, Mup = 300, age = 1Myr')
print('-------------------------------------------------')
if int(sys.version[0]) < 3:
sed = raw_input('Choose SED of the models:')
else:
sed = input('Choose SED of the models:')
if sed == '1' or sed == '2' : question = False
print ('')
question = True
while question:
if int(sys.version[0]) < 3:
inter = raw_input('Choose models [0] No interpolated [1] Interpolated: ')
else:
inter = input('Choose models [0] No interpolated [1] Interpolated: ')
if inter == '0' or inter == '1': question = False
print ('')
sed = int(sed)
inter = int(inter)
if sed==1 :
grid1 = np.loadtxt('C17_popstar_uv_v4.0.dat')
grid2 = np.loadtxt('C17_popstar_logU_adapted_emp_uv_v4.0.dat')
grid3 = np.loadtxt('C17_popstar_logU-CO_adapted_emp_uv_v4.0.dat')
if inter == 0:
sed_type = 'POPSTAR, age = 1 Myr, Chabrier IMF. No interpolation'
print ('No interpolation for the POPSTAR models is going to be used.')
print ('The grid has a resolution of 0.1dex for O/H and 0.125dex for C/O')
res_CO = 0.125
elif inter == 1:
sed_type = 'POPSTAR, age = 1 Myr, Chabrier IMF interpolated'
print ('Interpolation for the POPSTAR models is going to be used.')
print ('The grid has a resolution of 0.01 dex for O/H and 0.0125 dex for C/O')
res_CO = 0.125
elif sed==2:
grid1 = np.loadtxt('C17_bpass_uv_v4.1.dat')
grid2 = np.loadtxt('C17_bpass_logU_adapted_emp_uv_v4.1.dat')
grid3 = np.loadtxt('C17_bpass_logU-CO_adapted_emp_uv_v4.1.dat')
if inter == 0:
sed_type = 'BPASS a_IMF = 1.35, M_up = 300, age = 1Myr. No interpolation'
print ('No interpolation for theBPASS models is going to be used.')
print ('The grid has a resolution of 0.1 dex for O/H and 0.125 dex for N/O')
res_CO = 0.125
elif inter == 1:
sed_type = 'BPASS a_IMF = 1.35, M_up = 300, age = 1Myr interpolated'
print ('Interpolation for theBPASS models is going to be used.')
print ('The grid has a resolution of 0.01 dex for O/H and 0.0125 dex for N/O')
res_CO = 0.125
grids = []
OHffs = []
eOHffs = []
COffs = []
eCOffs = []
logUffs = []
elogUffs = []
Label_ID = False
Label_Lya = False
Label_eLya = False
Label_CIV = False
Label_eCIV = False
Label_HeII = False
Label_eHeII = False
Label_OIII_1665 = False
Label_eOIII_1665 = False
Label_CIII = False
Label_eCIII = False
Label_OIII_5007 = False
Label_eOIII_5007 = False
Label_Hbeta = False
Label_eHbeta = False
for col in range(0,len(input1.dtype.names),1):
if input1.dtype.names[col] == 'ID':
Label_ID = True
if input1.dtype.names[col] == 'Lya_1216':
Label_Lya = True
if input1.dtype.names[col] == 'eLya_1216':
Label_eLya = True
if input1.dtype.names[col] == 'CIV_1549':
Label_CIV = True
if input1.dtype.names[col] == 'eCIV_1549':
Label_eCIV = True
if input1.dtype.names[col] == 'HeII_1640':
Label_HeII = True
if input1.dtype.names[col] == 'eHeII_1640':
Label_eHeII = True
if input1.dtype.names[col] == 'OIII_1665':
Label_OIII_1665 = True
if input1.dtype.names[col] == 'eOIII_1665':
Label_eOIII_1665 = True
if input1.dtype.names[col] == 'CIII_1909':
Label_CIII = True
if input1.dtype.names[col] == 'eCIII_1909':
Label_eCIII = True
if input1.dtype.names[col] == 'Hb_4861':
Label_Hbeta = True
if input1.dtype.names[col] == 'eHb_4861':
Label_eHbeta = True
if input1.dtype.names[col] == 'OIII_5007':
Label_OIII_5007 = True
if input1.dtype.names[col] == 'eOIII_5007':
Label_eOIII_5007 = True
if Label_ID == False:
Names = np.arange(1,input1.size+1,1)
else:
Names = input1['ID']
if Label_Lya == False:
Lya_1216 = np.zeros(input1.size)
else:
Lya_1216 = input1['Lya_1216']
if Label_eLya == False:
eLya_1216 = np.zeros(input1.size)
else:
eLya_1216 = input1['eLya_1216']
if Label_CIV == False:
CIV_1549 = np.zeros(input1.size)
else:
CIV_1549 = input1['CIV_1549']
if Label_eCIV == False:
eCIV_1549 = np.zeros(input1.size)
else:
eCIV_1549 = input1['eCIV_1549']
if Label_HeII == False:
HeII_1640 = np.zeros(input1.size)
else:
HeII_1640 = input1['HeII_1640']
if Label_eHeII == False:
eHeII_1640 = np.zeros(input1.size)
else:
eHeII_1640 = input1['eHeII_1640']
if Label_OIII_1665 == False:
OIII_1665 = np.zeros(input1.size)
else:
OIII_1665 = input1['OIII_1665']
if Label_eOIII_1665 == False:
eOIII_1665 = np.zeros(input1.size)
else:
eOIII_1665 = input1['eOIII_1665']
if Label_CIII == False:
CIII_1909 = np.zeros(input1.size)
else:
CIII_1909 = input1['CIII_1909']
if Label_eCIII == False:
eCIII_1909 = np.zeros(input1.size)
else:
eCIII_1909 = input1['eCIII_1909']
if Label_Hbeta == False:
Hb_4861 = np.zeros(len(input1))
else:
Hb_4861 = input1['Hb_4861']
if Label_eHbeta == False:
eHb_4861 = np.zeros(input1.size)
else:
eHb_4861 = input1['eHb_4861']
if Label_OIII_5007 == False:
OIII_5007 = np.zeros(input1.size)
else:
OIII_5007 = input1['OIII_5007']
if Label_eOIII_5007 == False:
eOIII_5007 = np.zeros(input1.size)
else:
eOIII_5007 = input1['eOIII_5007']
output = np.zeros(input1.size, dtype=[('ID', 'U12'), ('Lya_1216', float),('eLya_1216', float),('CIV_1549', float),('eCIV_1549', float),('HeII_1640', float),('eHeII_1640', float),('OIII_1665', float),('eOIII_1665', float),('CIII_1909', float),('eCIII_1909', float),('Hb_4861', float),('eHb_4861', float),('OIII_5007', float),('eOIII_5007', float),('grid', int),('OH', float),('eOH', float),('CO', float),('eCO', float),('logU', float),('elogU', float)] )
output['ID'] = Names
output['Lya_1216'] = Lya_1216
output['eLya_1216'] = eLya_1216
output['CIV_1549'] = CIV_1549
output['eCIV_1549'] = eCIV_1549
output['HeII_1640'] = HeII_1640
output['eHeII_1640'] = eHeII_1640
output['OIII_1665'] = OIII_1665
output['eOIII_1665'] = eOIII_1665
output['CIII_1909'] = CIII_1909
output['eCIII_1909'] = eCIII_1909
output['Hb_4861'] = Hb_4861
output['eHb_4861'] = eHb_4861
output['OIII_5007'] = OIII_5007
output['eOIII_5007'] = eOIII_5007
print ('Reading grids ....')
print ('')
print ('')
print ('----------------------------------------------------------------')
print ('(%) ID Grid 12+log(O/H) log(C/O) log(U)')
print ('-----------------------------------------------------------------')
# Beginning of loop of calculation
count = 0
for tab in range(0,len(input1),1):
count = count + 1
OH_mc = []
CO_mc = []
logU_mc = []
OHe_mc = []
COe_mc = []
logUe_mc = []
for monte in range(0,n,1):
OH_p = 0
logU_p = 0
CO_p = 0
den_OH = 0
den_CO = 0
OH_e = 0
CO_e = 0
logU_e = 0
den_OH_e = 0
den_CO_e = 0
tol_max = 1e2
Lya_1216_obs = 0
if Lya_1216[tab] == 0:
Lya_1216_obs = 0
else:
while Lya_1216_obs <= 0:
Lya_1216_obs = np.random.normal(Lya_1216[tab],eLya_1216[tab]+1e-5)
CIV_1549_obs = 0
if CIV_1549[tab] == 0:
CIV_1549_obs = 0
else:
while CIV_1549_obs <= 0:
CIV_1549_obs = np.random.normal(CIV_1549[tab],eCIV_1549[tab]+1e-5)
HeII_1640_obs = 0
if HeII_1640[tab] == 0:
HeII_1640_obs = 0
else:
if HeII_1640_obs <= 0:
HeII_1640_obs = np.random.normal(HeII_1640[tab],eHeII_1640[tab]+1e-5)
OIII_1665_obs = 0
if OIII_1665[tab] == 0:
OIII_1665_obs = 0
else:
while OIII_1665_obs <= 0:
OIII_1665_obs = np.random.normal(OIII_1665[tab],eOIII_1665[tab]+1e-5)
CIII_1909_obs = 0
if CIII_1909[tab] == 0:
CIII_1909_obs = 0
else:
while CIII_1909_obs <= 0:
CIII_1909_obs = np.random.normal(CIII_1909[tab],eCIII_1909[tab]+1e-5)
Hb_4861_obs = 0
if Hb_4861[tab] == 0:
Hb_4861_obs = 0
else:
while Hb_4861_obs <= 0:
Hb_4861_obs = np.random.normal(Hb_4861[tab],eHb_4861[tab]+1e-5)
OIII_5007_obs = 0
if OIII_5007[tab] == 0:
OIII_5007_obs = 0
else:
while OIII_5007_obs <= 0:
OIII_5007_obs = np.random.normal(OIII_5007[tab],eOIII_5007[tab]+1e-5)
if OIII_1665_obs == 0 or OIII_5007_obs == 0:
ROIII_obs = 0
else:
ROIII_obs = OIII_5007_obs/OIII_1665_obs
if Lya_1216_obs == 0 or CIII_1909_obs == 0:
C34_obs = 0
else:
C34_obs = (CIII_1909_obs + CIV_1549_obs) / (Lya_1216_obs)
if HeII_1640_obs == 0 or CIII_1909_obs == 0:
C34He2_obs = 0
else:
C34He2_obs = (CIII_1909_obs + CIV_1549_obs) / (HeII_1640_obs)
if CIII_1909_obs == 0 or OIII_1665_obs == 0:
C3O3_obs = -10
else:
C3O3_obs = np.log10((CIII_1909_obs) / (OIII_1665_obs))
if CIII_1909_obs == 0 or CIV_1549_obs == 0:
C3C4_obs = 0
else:
C3C4_obs = (CIII_1909_obs/CIV_1549_obs)
if CIII_1909_obs == 0 or Hb_4861_obs == 0:
C34Hb_obs = 0
else:
C34Hb_obs = (CIII_1909_obs + CIV_1549_obs) / Hb_4861_obs
# Selection of grid
if OIII_1665[tab] > 0 and OIII_5007[tab] > 0:
grid = grid1
if monte == n-1: grids.append(1)
grid_type = 1
elif OIII_1665[tab] > 0 and CIII_1909[tab] > 0:
grid = grid2
if monte == n-1: grids.append(2)
grid_type = 2
else:
grid = grid3
if monte == n-1: grids.append(3)
grid_type = 3
# Calculation of C/O
if C3O3_obs == -10:
CO = -10
else:
CHI_ROIII = 0
CHI_C3O3 = 0
CHI_CO = 0
for index in grid:
if ROIII_obs == 0:
CHI_ROIII = 0
elif index[6] == 0 or index[8] == 0:
CHI_ROIII = tol_max
else:
CHI_ROIII = (index[8]/index[6] - ROIII_obs)**2/(index[8]/index[6])
if C3O3_obs == -10:
CHI_C3O3 = 0
elif index[7] == 0 or index[6] == 0:
CHI_C3O3 = tol_max
else:
CHI_C3O3 =(np.log10((index[7])/index[6]) - C3O3_obs)**2/np.log10((index[7])/(index[6]+1e-5))
CHI_CO = (CHI_ROIII**2 + CHI_C3O3**2 )**0.5
if CHI_CO == 0:
CO_p = CO_p
den_CO = den_CO
else:
CO_p = index[1] /np.exp(CHI_CO) + CO_p
den_CO = 1 / np.exp(CHI_CO) + den_CO
CO = CO_p / den_CO
# Calculation of C/O error
if C3O3_obs == -10:
eCO = 0
else:
CHI_ROIII = 0
CHI_C3O3 = 0
CHI_CO = 0
for index in grid:
if ROIII_obs == 0:
CHI_ROIII = 0
elif index[6] == 0 or index[8] == 0:
CHI_ROIII = tol_max
else:
CHI_ROIII = (index[8]/index[6] - ROIII_obs)**2/(index[8]/index[6])
if C3O3_obs == -10:
CHI_C3O3 = 0
elif index[7] == 0 or index[6] == 0:
CHI_C3O3 = tol_max
else:
CHI_C3O3 =(np.log10((index[7])/index[6]) - C3O3_obs)**2/np.log10((index[7])/(index[6]+1e-5))
CHI_CO = (CHI_ROIII**2 + CHI_C3O3**2 )**0.5
if CHI_CO == 0:
CO_e = CO_e
den_CO_e = den_CO_e
else:
CO_e = (index[1] - CO)**2 / np.exp(CHI_CO) + CO_e
den_CO_e = 1 /np.exp(CHI_CO) + den_CO_e
eCO = CO_e / den_CO_e
# Calculation of O/H and log U
if C34_obs == 0 and ROIII_obs == 0 and C34Hb_obs == 0 and C34He2_obs == 0 :
OH = 0
logU = 0
else:
CHI_ROIII = 0
CHI_C3C4 = 0
CHI_C34He2 = 0
CHI_C34 = 0
CHI_C34Hb = 0
CHI_OH = 0
for index in grid:
if CO > -10 and np.abs(index[1] - CO) > np.abs(eCO+0.125):
continue
if CIV_1549_obs > 0 and index[4] == 0:
continue
if HeII_1640_obs > 0 and index[5] == 0:
continue
else:
if ROIII_obs == 0:
CHI_ROIII = 0
elif index[6] == 0 or index[8] == 0:
CHI_ROIII = tol_max
else:
CHI_ROIII = (index[8]/index[6] - ROIII_obs)**2/(index[8]/index[6])
if C34_obs == 0:
CHI_C34 = 0
elif index[3] == 0 or index[7] == 0:
CHI_C34 = tol_max
else:
CHI_C34 = ((index[7]+index[4])/index[3] - C34_obs)**2/((index[7]+index[4])/index[3])
if C34He2_obs == 0:
CHI_C34He2 = 0
elif index[5] == 0 or index[7] == 0:
CHI_C34He2 = tol_max
else:
CHI_C34He2 = ((index[7]+index[4])/index[5] - C34He2_obs)**2/((index[7]+index[4])/index[5])
if C34Hb_obs == 0:
CHI_C34Hb = 0
elif index[7] == 0:
CHI_C34Hb = tol_max
else:
CHI_C34Hb = (index[7]+index[4] - C34Hb_obs)**2/(index[7]+index[4])
if C3C4_obs == 0:
CHI_C3C4 = 0
elif index[4] == 0 or index[7] == 0:
CHI_C3C4 = tol_max
else:
CHI_C3C4 = (index[7]/index[4] - C3C4_obs)**2/(index[7]/index[4])
if C34Hb_obs > 0:
CHI_OH = (CHI_ROIII**2 + CHI_C34Hb**2 + CHI_C3C4**2)**0.5
else:
CHI_OH = (CHI_ROIII**2 + CHI_C34**2 + CHI_C34He2**2 + CHI_C3C4**2 )**0.5
if CHI_OH == 0:
OH_p = OH_p
logU_p = logU_p
den_OH = den_OH
else:
OH_p = index[0] / np.exp(CHI_OH) + OH_p
logU_p = index[2] / np.exp(CHI_OH) + logU_p
den_OH = 1 /np.exp(CHI_OH) + den_OH
OH = OH_p / den_OH
logU = logU_p / den_OH
# Calculation of error of O/H and logU
if C34_obs == 0 and ROIII_obs == 0 and C34Hb_obs == 0 and C34He2_obs == 0:
eOH = 0
elogU = 0
else:
CHI_ROIII = 0
CHI_C3C4 = 0
CHI_C34 = 0
CHI_C34He2 = 0
CHI_C34Hb = 0
CHI_OH = 0
for index in grid:
if CO > -10 and np.abs(index[1] - CO) > np.abs(eCO+res_CO):
continue
if CIV_1549_obs > 0 and index[4] == 0:
continue
if HeII_1640_obs > 0 and index[5] == 0:
continue
else:
if ROIII_obs == 0:
CHI_ROIII = 0
elif index[6] == 0 or index[8] == 0:
CHI_ROIII = tol_max
else:
CHI_ROIII = (index[8]/index[6] - ROIII_obs)**2/(index[8]/index[6])
if C34_obs == 0:
CHI_C34 = 0
elif index[3] == 0 or index[7] == 0:
CHI_C34 = tol_max
else:
CHI_C34 = ((index[7]+index[4])/index[3] - C34_obs)**2/((index[7]+index[4])/index[3])
if C34He2_obs == 0:
CHI_C34He2 = 0
elif index[5] == 0 or index[7] == 0:
CHI_C34He2 = tol_max
else:
CHI_C34He2 = ((index[7]+index[4])/index[5] - C34He2_obs)**2/((index[7]+index[4])/index[5])
if C34Hb_obs == 0:
CHI_C34Hb = 0
elif index[7] == 0:
CHI_C34Hb = tol_max
else:
CHI_C34Hb = (index[7]+index[4] - C34Hb_obs)**2/(index[7]+index[4])
if C3C4_obs == 0:
CHI_C3C4 = 0
elif index[4] == 0 or index[7] == 0:
CHI_C3C4 = tol_max
else:
CHI_C3C4 = (index[7]/index[4] - C3C4_obs)**2/(index[7]/index[4])
if C34Hb_obs > 0:
CHI_OH = (CHI_ROIII**2 + CHI_C34Hb**2 + CHI_C3C4**2)**0.5
else:
CHI_OH = (CHI_ROIII**2 + CHI_C34**2 + CHI_C34He2**2 + CHI_C3C4**2 )**0.5
if CHI_OH == 0:
OH_e = OH_e
logU_e = logU_e
den_OH_e = den_OH_e
else:
OH_e = (index[0] - OH)**2 /np.exp(CHI_OH) + OH_e
logU_e = (index[2] - logU)**2 /np.exp(CHI_OH) + logU_e
den_OH_e = 1 /np.exp(CHI_OH) + den_OH_e
eOH = OH_e / den_OH_e
elogU = logU_e / den_OH_e
# Iterations for interpolated models
if inter == 0 or (OH == 0 and CO == -10):
COf = CO
OHf = OH
logUf = logU
elif inter == 1:
if OH == 0:
igrid = grid
else:
igrid = interpolate(grid,2,logU-elogU-0.25,logU+elogU+0.25,10)
igrid = igrid[np.lexsort((igrid[:,1],igrid[:,2]))]
igrid = interpolate(igrid,0,OH-eOH-0.1,OH+eOH+0.1,10)
if CO == -10:
igrid = igrid
else:
igrid = igrid[np.lexsort((igrid[:,0],igrid[:,2]))]
igrid = interpolate(igrid,1,CO-eCO-0.125,CO+eCO+0.125,10)
CHI_ROIII = 0
CHI_C3O3 = 0
CHI_C3C4 = 0
CHI_C34He2 = 0
CHI_C34 = 0
CHI_C34Hb = 0
CHI_OH = 0
CHI_CO = 0
for index in igrid:
if ROIII_obs == 0:
CHI_ROIII = 0
elif index[6] == 0 or index[8] == 0:
CHI_ROIII = tol_max
else:
CHI_ROIII = (index[8]/index[6] - ROIII_obs)**2/(index[8]/index[6])
if C3O3_obs == -10:
CHI_C3O3 = 0
elif index[7] == 0 or index[6] == 0:
CHI_C3O3 = tol_max
else:
CHI_C3O3 =(np.log10((index[7])/index[6]) - C3O3_obs)**2/np.log10((index[7])/(index[6]+1e-5))
if C34_obs == 0:
CHI_C34 = 0
elif index[4] == 0:
CHI_C34 = tol_max
else:
CHI_C34 = ((index[6]+index[7])/index[3] - C34_obs)**2/((index[6]+index[7])/index[3])
if C34Hb_obs == 0:
CHI_C34Hb = 0
elif index[4] == 0:
CHI_C34Hb = tol_max
else:
CHI_C34Hb = (index[6]+index[7] - C34_obs)**2/(index[6]+index[7])
if C3C4_obs == 0:
CHI_C3C4 = 0
elif index[7] == 0 or index[6] == 0:
CHI_C3C4 = tol_max
else:
CHI_C3C4 = (index[6]/index[7] - C3C4_obs)**2/(index[6]/index[7])
if C34Hb_obs > 0:
CHI_OH = (CHI_ROIII**2 + CHI_C34Hb**2 + CHI_C3C4**2)**0.5
else:
CHI_OH = (CHI_ROIII**2 + CHI_C34**2 + CHI_C34He2**2 + CHI_C3C4**2 )**0.5
if CHI_OH == 0:
OH_p = OH_p
logU_p = logU_p
den_OH = den_OH
else:
OH_p = index[0] /np.exp(CHI_OH) + OH_p
logU_p = index[2] /np.exp(CHI_OH) + logU_p
den_OH = 1 /np.exp(CHI_OH) + den_OH
CHI_CO = (CHI_ROIII**2 + CHI_C3O3**2 )**0.5
if CHI_CO == 0:
CO_p = CO_p
den_CO = den_CO
else:
CO_p = index[1] /np.exp(CHI_CO)**2 + CO_p
den_CO = 1 /np.exp(CHI_CO)**2 + den_CO
if CO == -10:
COf = -10
else:
COf = CO_p / den_CO
if OH == 0:
OHf = 0
logUf = 0
else:
OHf = OH_p / den_OH
logUf = logU_p / den_OH
OH_mc.append(OHf)
CO_mc.append(COf)
logU_mc.append(logUf)
OHe_mc.append(eOH)
COe_mc.append(eCO)
logUe_mc.append(elogU)
OHff = np.mean(OH_mc)
eOHff = (np.std(OH_mc)**2+np.mean(OHe_mc)**2)**0.5
COff = np.mean(CO_mc)
eCOff = (np.std(CO_mc)**2+np.mean(COe_mc)**2)**0.5
logUff = np.mean(logU_mc)
elogUff = (np.std(logU_mc)**2+np.mean(logUe_mc)**2)**0.5
OHffs.append(OHff)
eOHffs.append(eOHff)
COffs.append(COff)
eCOffs.append(eCOff)
logUffs.append(logUff)
elogUffs.append(elogUff)
if input0.size == 1 and tab==0: continue
print (round(100*(count)/float(input1.size),1),'%',Names[tab],grid_type,'', round(OHff,2), round(eOHff,2),'',round(COff,2), round(eCOff,2), '',round(logUff,2), round(elogUff,2))
output['grid'] = grids
output['OH'] = OHffs
output['eOH'] = eOHffs
output['CO'] = COffs
output['eCO'] = eCOffs
output['logU'] = logUffs
output['elogU'] = elogUffs
if input0.size == 1: output = np.delete(output,obj=1,axis=0)
lineas_header = [' HII-CHI-mistry_UV v.4.11 output file', 'Input file:'+input00,'Iterations for MonteCarlo: '+str(n),'Used models: '+sed_type,'','ID. Lya eLya 1549 e1549 1640 e1640 1665 e1665 1909 e1909 Hbeta eHbeta 5007 e5007 i O/H eO/H C/O eC/O logU elogU']
header = '\n'.join(lineas_header)
np.savetxt(input00+'_hcm-uv-output.dat',output,fmt=' '.join(['%s']*1+['%.3f']*14+['%i']+['%.2f']*6),header=header)
print ('________________________________')
print ('Results are stored in '+input00+'_hcm-uv-output.dat')
| 30.759021
| 453
| 0.526541
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,758
| 0.157443
|
b026bd7b3b263fb2129be4259af4f24d57934ce8
| 20,746
|
py
|
Python
|
Name Generator/names.py
|
Rakkerrak/Projects
|
0a9bc54b7d41e69b444165f60254262a163509a9
|
[
"MIT"
] | null | null | null |
Name Generator/names.py
|
Rakkerrak/Projects
|
0a9bc54b7d41e69b444165f60254262a163509a9
|
[
"MIT"
] | null | null | null |
Name Generator/names.py
|
Rakkerrak/Projects
|
0a9bc54b7d41e69b444165f60254262a163509a9
|
[
"MIT"
] | null | null | null |
feFirst = ['Emma', 'Olivia', 'Ava', 'Isabella', 'Sophia', 'Charlotte', 'Mia', 'Amelia', 'Harper', 'Evelyn', 'Abigail', 'Emily', 'Elizabeth', 'Mila', 'Ella', 'Avery', 'Sofia', 'Camila', 'Aria', 'Scarlett', 'Victoria', 'Madison', 'Luna', 'Grace', 'Chloe', 'Penelope', 'Layla', 'Riley', 'Zoey', 'Nora', 'Lily', 'Eleanor', 'Hannah', 'Lillian', 'Addison', 'Aubrey', 'Ellie', 'Stella', 'Natalie', 'Zoe', 'Leah', 'Hazel', 'Violet', 'Aurora', 'Savannah', 'Audrey', 'Brooklyn', 'Bella', 'Claire', 'Skylar', 'Lucy', 'Paisley', 'Everly', 'Anna', 'Caroline', 'Nova', 'Genesis', 'Emilia', 'Kennedy', 'Samantha', 'Maya', 'Willow', 'Kinsley', 'Naomi', 'Aaliyah', 'Elena', 'Sarah', 'Ariana', 'Allison', 'Gabriella', 'Alice', 'Madelyn', 'Cora', 'Ruby', 'Eva', 'Serenity', 'Autumn', 'Adeline', 'Hailey', 'Gianna', 'Valentina', 'Isla', 'Eliana', 'Quinn', 'Nevaeh', 'Ivy', 'Sadie', 'Piper', 'Lydia', 'Alexa', 'Josephine', 'Emery', 'Julia', 'Delilah', 'Arianna', 'Vivian', 'Kaylee', 'Sophie', 'Brielle', 'Madeline', 'Peyton', 'Rylee', 'Clara', 'Hadley', 'Melanie', 'Mackenzie', 'Reagan', 'Adalynn', 'Liliana', 'Aubree', 'Jade', 'Katherine', 'Isabelle', 'Natalia', 'Raelynn', 'Maria', 'Athena', 'Ximena', 'Arya', 'Leilani', 'Taylor', 'Faith', 'Rose', 'Kylie', 'Alexandra', 'Mary', 'Margaret', 'Lyla', 'Ashley', 'Amaya', 'Eliza', 'Brianna', 'Bailey', 'Andrea', 'Khloe', 'Jasmine', 'Melody', 'Iris', 'Isabel', 'Norah', 'Annabelle', 'Valeria', 'Emerson', 'Adalyn', 'Ryleigh', 'Eden', 'Emersyn', 'Anastasia', 'Kayla', 'Alyssa', 'Juliana', 'Charlie', 'Esther', 'Ariel', 'Cecilia', 'Valerie', 'Alina', 'Molly', 'Reese', 'Aliyah', 'Lilly', 'Parker', 'Finley', 'Morgan', 'Sydney', 'Jordyn', 'Eloise', 'Trinity', 'Daisy', 'Kimberly', 'Lauren', 'Genevieve', 'Sara', 'Arabella', 'Harmony', 'Elise', 'Remi', 'Teagan', 'Alexis', 'London', 'Sloane', 'Laila', 'Lucia', 'Diana', 'Juliette', 'Sienna', 'Elliana', 'Londyn', 'Ayla', 'Callie', 'Gracie', 'Josie', 'Amara', 'Jocelyn', 'Daniela', 'Everleigh', 'Mya', 'Rachel', 'Summer', 'Alana', 'Brooke', 'Alaina', 'Mckenzie', 'Catherine', 'Amy', 'Presley', 'Journee', 'Rosalie', 'Ember', 'Brynlee', 'Rowan', 'Joanna', 'Paige', 'Rebecca', 'Ana', 'Sawyer', 'Mariah', 'Nicole', 'Brooklynn', 'Payton', 'Marley', 'Fiona', 'Georgia', 'Lila', 'Harley', 'Adelyn', 'Alivia', 'Noelle', 'Gemma', 'Vanessa', 'Journey', 'Makayla', 'Angelina', 'Adaline', 'Catalina', 'Alayna', 'Julianna', 'Leila', 'Lola', 'Adriana', 'June', 'Juliet', 'Jayla', 'River', 'Tessa', 'Lia', 'Dakota', 'Delaney', 'Selena', 'Blakely', 'Ada', 'Camille', 'Zara', 'Malia', 'Hope', 'Samara', 'Vera', 'Mckenna', 'Briella', 'Izabella', 'Hayden', 'Raegan', 'Michelle', 'Angela', 'Ruth', 'Freya', 'Kamila', 'Vivienne', 'Aspen', 'Olive', 'Kendall', 'Elaina', 'Thea', 'Kali', 'Destiny', 'Amiyah', 'Evangeline', 'Cali', 'Blake', 'Elsie', 'Juniper', 'Alexandria', 'Myla', 'Ariella', 'Kate', 'Mariana', 'Lilah', 'Charlee', 'Daleyza', 'Nyla', 'Jane', 'Maggie', 'Zuri', 'Aniyah', 'Lucille', 'Leia', 'Melissa', 'Adelaide', 'Amina', 'Giselle', 'Lena', 'Camilla', 'Miriam', 'Millie', 'Brynn', 'Gabrielle', 'Sage', 'Annie', 'Logan', 'Lilliana', 'Haven', 'Jessica', 'Kaia', 'Magnolia', 'Amira', 'Adelynn', 'Makenzie', 'Stephanie', 'Nina', 'Phoebe', 'Arielle', 'Evie', 'Lyric', 'Alessandra', 'Gabriela', 'Paislee', 'Raelyn', 'Madilyn', 'Paris', 'Makenna', 'Kinley', 'Gracelyn', 'Talia', 'Maeve', 'Rylie', 'Kiara', 'Evelynn', 'Brinley', 'Jacqueline', 'Laura', 'Gracelynn', 'Lexi', 'Ariah', 'Fatima', 'Jennifer', 'Kehlani', 'Alani', 'Ariyah', 'Luciana', 'Allie', 'Heidi', 'Maci', 'Phoenix', 'Felicity', 'Joy', 'Kenzie', 'Veronica', 'Margot', 'Addilyn', 'Lana', 'Cassidy', 'Remington', 'Saylor', 'Ryan', 'Keira', 'Harlow', 'Miranda', 'Angel', 'Amanda', 'Daniella', 'Royalty', 'Gwendolyn', 'Ophelia', 'Heaven', 'Jordan', 'Madeleine', 'Esmeralda', 'Kira', 'Miracle', 'Elle', 'Amari', 'Danielle', 'Daphne', 'Willa', 'Haley', 'Gia', 'Kaitlyn', 'Oakley', 'Kailani', 'Winter', 'Alicia', 'Serena', 'Nadia', 'Aviana', 'Demi', 'Jada', 'Braelynn', 'Dylan', 'Ainsley', 'Alison', 'Camryn', 'Avianna', 'Bianca', 'Skyler', 'Scarlet', 'Maddison', 'Nylah', 'Sarai', 'Regina', 'Dahlia', 'Nayeli', 'Raven', 'Helen', 'Adrianna', 'Averie', 'Skye', 'Kelsey', 'Tatum', 'Kensley', 'Maliyah', 'Erin', 'Viviana', 'Jenna', 'Anaya', 'Carolina', 'Shelby', 'Sabrina', 'Mikayla', 'Annalise', 'Octavia', 'Lennon', 'Blair', 'Carmen', 'Yaretzi', 'Kennedi', 'Mabel', 'Zariah', 'Kyla', 'Christina', 'Selah', 'Celeste', 'Eve', 'Mckinley', 'Milani', 'Frances', 'Jimena', 'Kylee', 'Leighton', 'Katie', 'Aitana', 'Kayleigh', 'Sierra', 'Kathryn', 'Rosemary', 'Jolene', 'Alondra', 'Elisa', 'Helena', 'Charleigh', 'Hallie', 'Lainey', 'Avah', 'Jazlyn', 'Kamryn', 'Mira', 'Cheyenne', 'Francesca', 'Antonella', 'Wren', 'Chelsea', 'Amber', 'Emory', 'Lorelei', 'Nia', 'Abby', 'April', 'Emelia', 'Carter', 'Aylin', 'Cataleya', 'Bethany', 'Marlee', 'Carly', 'Kaylani', 'Emely', 'Liana', 'Madelynn', 'Cadence', 'Matilda', 'Sylvia', 'Myra', 'Fernanda', 'Oaklyn', 'Elianna', 'Hattie', 'Dayana', 'Kendra', 'Maisie', 'Malaysia', 'Kara', 'Katelyn', 'Maia', 'Celine', 'Cameron', 'Renata', 'Jayleen', 'Charli', 'Emmalyn', 'Holly', 'Azalea', 'Leona', 'Alejandra', 'Bristol', 'Collins', 'Imani', 'Meadow', 'Alexia', 'Edith', 'Kaydence', 'Leslie', 'Lilith', 'Kora', 'Aisha', 'Meredith', 'Danna', 'Wynter', 'Emberly', 'Julieta', 'Michaela', 'Alayah', 'Jemma', 'Reign', 'Colette', 'Kaliyah', 'Elliott', 'Johanna', 'Remy', 'Sutton', 'Emmy', 'Virginia', 'Briana', 'Oaklynn', 'Adelina', 'Everlee', 'Megan', 'Angelica', 'Justice', 'Mariam', 'Khaleesi', 'Macie', 'Karsyn', 'Alanna', 'Aleah', 'Mae', 'Mallory', 'Esme', 'Skyla', 'Madilynn', 'Charley', 'Allyson', 'Hanna', 'Shiloh', 'Henley', 'Macy', 'Maryam', 'Ivanna', 'Ashlynn', 'Lorelai', 'Amora', 'Ashlyn', 'Sasha', 'Baylee', 'Beatrice', 'Itzel', 'Priscilla', 'Marie', 'Jayda', 'Liberty', 'Rory', 'Alessia', 'Alaia', 'Janelle', 'Kalani', 'Gloria', 'Sloan', 'Dorothy', 'Greta', 'Julie', 'Zahra', 'Savanna', 'Annabella', 'Poppy', 'Amalia', 'Zaylee', 'Cecelia', 'Coraline', 'Kimber', 'Emmie', 'Anne', 'Karina', 'Kassidy', 'Kynlee', 'Monroe', 'Anahi', 'Jaliyah', 'Jazmin', 'Maren', 'Monica', 'Siena', 'Marilyn', 'Reyna', 'Kyra', 'Lilian', 'Jamie', 'Melany', 'Alaya', 'Ariya', 'Kelly', 'Rosie', 'Adley', 'Dream', 'Jaylah', 'Laurel', 'Jazmine', 'Mina', 'Karla', 'Bailee', 'Aubrie', 'Katalina', 'Melina', 'Harlee', 'Elliot', 'Hayley', 'Elaine', 'Karen', 'Dallas', 'Irene', 'Lylah', 'Ivory', 'Chaya', 'Rosa', 'Aleena', 'Braelyn', 'Nola', 'Alma', 'Leyla', 'Pearl', 'Addyson', 'Roselyn', 'Lacey', 'Lennox', 'Reina', 'Aurelia', 'Noa', 'Janiyah', 'Jessie', 'Madisyn', 'Saige', 'Alia', 'Tiana', 'Astrid', 'Cassandra', 'Kyleigh', 'Romina', 'Stevie', 'Haylee', 'Zelda', 'Lillie', 'Aileen', 'Brylee', 'Eileen', 'Yara', 'Ensley', 'Lauryn', 'Giuliana', 'Livia', 'Anya', 'Mikaela', 'Palmer', 'Lyra', 'Mara', 'Marina', 'Kailey', 'Liv', 'Clementine', 'Kenna', 'Briar', 'Emerie', 'Galilea', 'Tiffany', 'Bonnie', 'Elyse', 'Cynthia', 'Frida', 'Kinslee', 'Tatiana', 'Joelle', 'Armani', 'Jolie', 'Nalani', 'Rayna', 'Yareli', 'Meghan', 'Rebekah', 'Addilynn', 'Faye', 'Zariyah', 'Lea', 'Aliza', 'Julissa', 'Lilyana', 'Anika', 'Kairi', 'Aniya', 'Noemi', 'Angie', 'Crystal', 'Bridget', 'Ari', 'Davina', 'Amelie', 'Amirah', 'Annika', 'Elora', 'Xiomara', 'Linda', 'Hana', 'Laney', 'Mercy', 'Hadassah', 'Madalyn', 'Louisa', 'Simone', 'Kori', 'Jillian', 'Alena', 'Malaya', 'Miley', 'Milan', 'Sariyah', 'Malani', 'Clarissa', 'Nala', 'Princess', 'Amani', 'Analia', 'Estella', 'Milana', 'Aya', 'Chana', 'Jayde', 'Tenley', 'Zaria', 'Itzayana', 'Penny', 'Ailani', 'Lara', 'Aubriella', 'Clare', 'Lina', 'Rhea', 'Bria', 'Thalia', 'Keyla', 'Haisley', 'Ryann', 'Addisyn', 'Amaia', 'Chanel', 'Ellen', 'Harmoni', 'Aliana', 'Tinsley', 'Landry', 'Paisleigh', 'Lexie', 'Myah', 'Rylan', 'Deborah', 'Emilee', 'Laylah', 'Novalee', 'Ellis', 'Emmeline', 'Avalynn', 'Hadlee', 'Legacy', 'Braylee', 'Elisabeth', 'Kaylie', 'Ansley', 'Dior', 'Paula', 'Belen', 'Corinne', 'Maleah', 'Martha', 'Teresa', 'Salma', 'Louise', 'Averi', 'Lilianna', 'Amiya', 'Milena', 'Royal', 'Aubrielle', 'Calliope', 'Frankie', 'Natasha', 'Kamilah', 'Meilani', 'Raina', 'Amayah', 'Lailah', 'Rayne', 'Zaniyah', 'Isabela', 'Nathalie', 'Miah', 'Opal', 'Kenia', 'Azariah', 'Hunter', 'Tori', 'Andi', 'Keily', 'Leanna', 'Scarlette', 'Jaelyn', 'Saoirse', 'Selene', 'Dalary', 'Lindsey', 'Marianna', 'Ramona', 'Estelle', 'Giovanna', 'Holland', 'Nancy', 'Emmalynn', 'Mylah', 'Rosalee', 'Sariah', 'Zoie', 'Blaire', 'Lyanna', 'Maxine', 'Anais', 'Dana', 'Judith', 'Kiera', 'Jaelynn', 'Noor', 'Kai', 'Adalee', 'Oaklee', 'Amaris', 'Jaycee', 'Belle', 'Carolyn', 'Della', 'Karter', 'Sky', 'Treasure', 'Vienna', 'Jewel', 'Rivka', 'Rosalyn', 'Alannah', 'Ellianna', 'Sunny', 'Claudia', 'Cara', 'Hailee', 'Estrella', 'Harleigh', 'Zhavia', 'Alianna', 'Brittany', 'Jaylene', 'Journi', 'Marissa', 'Mavis', 'Iliana', 'Jurnee', 'Aislinn', 'Alyson', 'Elsa', 'Kamiyah', 'Kiana', 'Lisa', 'Arlette', 'Kadence', 'Kathleen', 'Halle', 'Erika', 'Sylvie', 'Adele', 'Erica', 'Veda', 'Whitney', 'Bexley', 'Emmaline', 'Guadalupe', 'August', 'Brynleigh', 'Gwen', 'Promise', 'Alisson', 'India', 'Madalynn', 'Paloma', 'Patricia', 'Samira', 'Aliya', 'Casey', 'Jazlynn', 'Paulina', 'Dulce', 'Kallie', 'Perla', 'Adrienne', 'Alora', 'Nataly', 'Ayleen', 'Christine', 'Kaiya', 'Ariadne', 'Karlee', 'Barbara', 'Lillianna', 'Raquel', 'Saniyah', 'Yamileth', 'Arely', 'Celia', 'Heavenly', 'Kaylin', 'Marisol', 'Marleigh', 'Avalyn', 'Berkley', 'Kataleya', 'Zainab', 'Dani', 'Egypt', 'Joyce', 'Kenley', 'Annabel', 'Kaelyn', 'Etta', 'Hadleigh', 'Joselyn', 'Luella', 'Jaylee', 'Zola', 'Alisha', 'Ezra', 'Queen', 'Amia', 'Annalee', 'Bellamy', 'Paola', 'Tinley', 'Violeta', 'Jenesis', 'Arden', 'Giana', 'Wendy', 'Ellison', 'Florence', 'Margo', 'Naya', 'Robin', 'Sandra', 'Scout', 'Waverly', 'Janessa', 'Jayden', 'Micah', 'Novah', 'Zora', 'Ann', 'Jana', 'Taliyah', 'Vada', 'Giavanna', 'Ingrid', 'Valery', 'Azaria', 'Emmarie', 'Esperanza', 'Kailyn', 'Aiyana', 'Keilani', 'Austyn', 'Whitley', 'Elina', 'Kimora', 'Maliah']
maFirst = ['Liam', 'Noah', 'William', 'James', 'Oliver', 'Benjamin', 'Elijah', 'Lucas', 'Mason', 'Logan', 'Alexander', 'Ethan', 'Jacob', 'Michael', 'Daniel', 'Henry', 'Jackson', 'Sebastian', 'Aiden', 'Matthew', 'Samuel', 'David', 'Joseph', 'Carter', 'Owen', 'Wyatt', 'John', 'Jack', 'Luke', 'Jayden', 'Dylan', 'Grayson', 'Levi', 'Isaac', 'Gabriel', 'Julian', 'Mateo', 'Anthony', 'Jaxon', 'Lincoln', 'Joshua', 'Christopher', 'Andrew', 'Theodore', 'Caleb', 'Ryan', 'Asher', 'Nathan', 'Thomas', 'Leo', 'Isaiah', 'Charles', 'Josiah', 'Hudson', 'Christian', 'Hunter', 'Connor', 'Eli', 'Ezra', 'Aaron', 'Landon', 'Adrian', 'Jonathan', 'Nolan', 'Jeremiah', 'Easton', 'Elias', 'Colton', 'Cameron', 'Carson', 'Robert', 'Angel', 'Maverick', 'Nicholas', 'Dominic', 'Jaxson', 'Greyson', 'Adam', 'Ian', 'Austin', 'Santiago', 'Jordan', 'Cooper', 'Brayden', 'Roman', 'Evan', 'Ezekiel', 'Xavier', 'Jose', 'Jace', 'Jameson', 'Leonardo', 'Bryson', 'Axel', 'Everett', 'Parker', 'Kayden', 'Miles', 'Sawyer', 'Jason', 'Declan', 'Weston', 'Micah', 'Ayden', 'Wesley', 'Luca', 'Vincent', 'Damian', 'Zachary', 'Silas', 'Gavin', 'Chase', 'Kai', 'Emmett', 'Harrison', 'Nathaniel', 'Kingston', 'Cole', 'Tyler', 'Bennett', 'Bentley', 'Ryker', 'Tristan', 'Brandon', 'Kevin', 'Luis', 'George', 'Ashton', 'Rowan', 'Braxton', 'Ryder', 'Gael', 'Ivan', 'Diego', 'Maxwell', 'Max', 'Carlos', 'Kaiden', 'Juan', 'Maddox', 'Justin', 'Waylon', 'Calvin', 'Giovanni', 'Jonah', 'Abel', 'Jayce', 'Jesus', 'Amir', 'King', 'Beau', 'Camden', 'Alex', 'Jasper', 'Malachi', 'Brody', 'Jude', 'Blake', 'Emmanuel', 'Eric', 'Brooks', 'Elliot', 'Antonio', 'Abraham', 'Timothy', 'Finn', 'Rhett', 'Elliott', 'Edward', 'August', 'Xander', 'Alan', 'Dean', 'Lorenzo', 'Bryce', 'Karter', 'Victor', 'Milo', 'Miguel', 'Hayden', 'Graham', 'Grant', 'Zion', 'Tucker', 'Jesse', 'Zayden', 'Joel', 'Richard', 'Patrick', 'Emiliano', 'Avery', 'Nicolas', 'Brantley', 'Dawson', 'Myles', 'Matteo', 'River', 'Steven', 'Thiago', 'Zane', 'Matias', 'Judah', 'Messiah', 'Jeremy', 'Preston', 'Oscar', 'Kaleb', 'Alejandro', 'Marcus', 'Mark', 'Peter', 'Maximus', 'Barrett', 'Jax', 'Andres', 'Holden', 'Legend', 'Charlie', 'Knox', 'Kaden', 'Paxton', 'Kyrie', 'Kyle', 'Griffin', 'Josue', 'Kenneth', 'Beckett', 'Enzo', 'Adriel', 'Arthur', 'Felix', 'Bryan', 'Lukas', 'Paul', 'Brian', 'Colt', 'Caden', 'Leon', 'Archer', 'Omar', 'Israel', 'Aidan', 'Theo', 'Javier', 'Remington', 'Jaden', 'Bradley', 'Emilio', 'Colin', 'Riley', 'Cayden', 'Phoenix', 'Clayton', 'Simon', 'Ace', 'Nash', 'Derek', 'Rafael', 'Zander', 'Brady', 'Jorge', 'Jake', 'Louis', 'Damien', 'Karson', 'Walker', 'Maximiliano', 'Amari', 'Sean', 'Chance', 'Walter', 'Martin', 'Finley', 'Andre', 'Tobias', 'Cash', 'Corbin', 'Arlo', 'Iker', 'Erick', 'Emerson', 'Gunner', 'Cody', 'Stephen', 'Francisco', 'Killian', 'Dallas', 'Reid', 'Manuel', 'Lane', 'Atlas', 'Rylan', 'Jensen', 'Ronan', 'Beckham', 'Daxton', 'Anderson', 'Kameron', 'Raymond', 'Orion', 'Cristian', 'Tanner', 'Kyler', 'Jett', 'Cohen', 'Ricardo', 'Spencer', 'Gideon', 'Ali', 'Fernando', 'Jaiden', 'Titus', 'Travis', 'Bodhi', 'Eduardo', 'Dante', 'Ellis', 'Prince', 'Kane', 'Luka', 'Kash', 'Hendrix', 'Desmond', 'Donovan', 'Mario', 'Atticus', 'Cruz', 'Garrett', 'Hector', 'Angelo', 'Jeffrey', 'Edwin', 'Cesar', 'Zayn', 'Devin', 'Conor', 'Warren', 'Odin', 'Jayceon', 'Romeo', 'Julius', 'Jaylen', 'Hayes', 'Kayson', 'Muhammad', 'Jaxton', 'Joaquin', 'Caiden', 'Dakota', 'Major', 'Keegan', 'Sergio', 'Marshall', 'Johnny', 'Kade', 'Edgar', 'Leonel', 'Ismael', 'Marco', 'Tyson', 'Wade', 'Collin', 'Troy', 'Nasir', 'Conner', 'Adonis', 'Jared', 'Rory', 'Andy', 'Jase', 'Lennox', 'Shane', 'Malik', 'Ari', 'Reed', 'Seth', 'Clark', 'Erik', 'Lawson', 'Trevor', 'Gage', 'Nico', 'Malakai', 'Quinn', 'Cade', 'Johnathan', 'Sullivan', 'Solomon', 'Cyrus', 'Fabian', 'Pedro', 'Frank', 'Shawn', 'Malcolm', 'Khalil', 'Nehemiah', 'Dalton', 'Mathias', 'Jay', 'Ibrahim', 'Peyton', 'Winston', 'Kason', 'Zayne', 'Noel', 'Princeton', 'Matthias', 'Gregory', 'Sterling', 'Dominick', 'Elian', 'Grady', 'Russell', 'Finnegan', 'Ruben', 'Gianni', 'Porter', 'Kendrick', 'Leland', 'Pablo', 'Allen', 'Hugo', 'Raiden', 'Kolton', 'Remy', 'Ezequiel', 'Damon', 'Emanuel', 'Zaiden', 'Otto', 'Bowen', 'Marcos', 'Abram', 'Kasen', 'Franklin', 'Royce', 'Jonas', 'Sage', 'Philip', 'Esteban', 'Drake', 'Kashton', 'Roberto', 'Harvey', 'Alexis', 'Kian', 'Jamison', 'Maximilian', 'Adan', 'Milan', 'Phillip', 'Albert', 'Dax', 'Mohamed', 'Ronin', 'Kamden', 'Hank', 'Memphis', 'Oakley', 'Augustus', 'Drew', 'Moises', 'Armani', 'Rhys', 'Benson', 'Jayson', 'Kyson', 'Braylen', 'Corey', 'Gunnar', 'Omari', 'Alonzo', 'Landen', 'Armando', 'Derrick', 'Dexter', 'Enrique', 'Bruce', 'Nikolai', 'Francis', 'Rocco', 'Kairo', 'Royal', 'Zachariah', 'Arjun', 'Deacon', 'Skyler', 'Eden', 'Alijah', 'Rowen', 'Pierce', 'Uriel', 'Ronald', 'Luciano', 'Tate', 'Frederick', 'Kieran', 'Lawrence', 'Moses', 'Rodrigo', 'Brycen', 'Leonidas', 'Nixon', 'Keith', 'Chandler', 'Case', 'Davis', 'Asa', 'Darius', 'Isaias', 'Aden', 'Jaime', 'Landyn', 'Raul', 'Niko', 'Trenton', 'Apollo', 'Cairo', 'Izaiah', 'Scott', 'Dorian', 'Julio', 'Wilder', 'Santino', 'Dustin', 'Donald', 'Raphael', 'Saul', 'Taylor', 'Ayaan', 'Duke', 'Ryland', 'Tatum', 'Ahmed', 'Moshe', 'Edison', 'Emmitt', 'Cannon', 'Alec', 'Danny', 'Keaton', 'Roy', 'Conrad', 'Roland', 'Quentin', 'Lewis', 'Samson', 'Brock', 'Kylan', 'Cason', 'Ahmad', 'Jalen', 'Nikolas', 'Braylon', 'Kamari', 'Dennis', 'Callum', 'Justice', 'Soren', 'Rayan', 'Aarav', 'Gerardo', 'Ares', 'Brendan', 'Jamari', 'Kaison', 'Yusuf', 'Issac', 'Jasiah', 'Callen', 'Forrest', 'Makai', 'Crew', 'Kobe', 'Bo', 'Julien', 'Mathew', 'Braden', 'Johan', 'Marvin', 'Zaid', 'Stetson', 'Casey', 'Ty', 'Ariel', 'Tony', 'Zain', 'Callan', 'Cullen', 'Sincere', 'Uriah', 'Dillon', 'Kannon', 'Colby', 'Axton', 'Cassius', 'Quinton', 'Mekhi', 'Reece', 'Alessandro', 'Jerry', 'Mauricio', 'Sam', 'Trey', 'Mohammad', 'Alberto', 'Gustavo', 'Arturo', 'Fletcher', 'Marcelo', 'Abdiel', 'Hamza', 'Alfredo', 'Chris', 'Finnley', 'Curtis', 'Kellan', 'Quincy', 'Kase', 'Harry', 'Kyree', 'Wilson', 'Cayson', 'Hezekiah', 'Kohen', 'Neil', 'Mohammed', 'Raylan', 'Kaysen', 'Lucca', 'Sylas', 'Mack', 'Leonard', 'Lionel', 'Ford', 'Roger', 'Rex', 'Alden', 'Boston', 'Colson', 'Briggs', 'Zeke', 'Dariel', 'Kingsley', 'Valentino', 'Jamir', 'Salvador', 'Vihaan', 'Mitchell', 'Lance', 'Lucian', 'Darren', 'Jimmy', 'Alvin', 'Amos', 'Tripp', 'Zaire', 'Layton', 'Reese', 'Casen', 'Colten', 'Brennan', 'Korbin', 'Sonny', 'Bruno', 'Orlando', 'Devon', 'Huxley', 'Boone', 'Maurice', 'Nelson', 'Douglas', 'Randy', 'Gary', 'Lennon', 'Titan', 'Denver', 'Jaziel', 'Noe', 'Jefferson', 'Ricky', 'Lochlan', 'Rayden', 'Bryant', 'Langston', 'Lachlan', 'Clay', 'Abdullah', 'Lee', 'Baylor', 'Leandro', 'Ben', 'Kareem', 'Layne', 'Joe', 'Crosby', 'Deandre', 'Demetrius', 'Kellen', 'Carl', 'Jakob', 'Ridge', 'Bronson', 'Jedidiah', 'Rohan', 'Larry', 'Stanley', 'Tomas', 'Shiloh', 'Thaddeus', 'Watson', 'Baker', 'Vicente', 'Koda', 'Jagger', 'Nathanael', 'Carmelo', 'Shepherd', 'Graysen', 'Melvin', 'Ernesto', 'Jamie', 'Yosef', 'Clyde', 'Eddie', 'Tristen', 'Grey', 'Ray', 'Tommy', 'Samir', 'Ramon', 'Santana', 'Kristian', 'Marcel', 'Wells', 'Zyaire', 'Brecken', 'Byron', 'Otis', 'Reyansh', 'Axl', 'Joey', 'Trace', 'Morgan', 'Musa', 'Harlan', 'Enoch', 'Henrik', 'Kristopher', 'Talon', 'Rey', 'Guillermo', 'Houston', 'Jon', 'Vincenzo', 'Dane', 'Terry', 'Azariah', 'Castiel', 'Kye', 'Augustine', 'Zechariah', 'Joziah', 'Kamryn', 'Hassan', 'Jamal', 'Chaim', 'Bodie', 'Emery', 'Branson', 'Jaxtyn', 'Kole', 'Wayne', 'Aryan', 'Alonso', 'Brixton', 'Madden', 'Allan', 'Flynn', 'Jaxen', 'Harley', 'Magnus', 'Sutton', 'Dash', 'Anders', 'Westley', 'Brett', 'Emory', 'Felipe', 'Yousef', 'Jadiel', 'Mordechai', 'Dominik', 'Junior', 'Eliseo', 'Fisher', 'Harold', 'Jaxxon', 'Kamdyn', 'Maximo', 'Caspian', 'Kelvin', 'Damari', 'Fox', 'Trent', 'Hugh', 'Briar', 'Franco', 'Keanu', 'Terrance', 'Yahir', 'Ameer', 'Kaiser', 'Thatcher', 'Ishaan', 'Koa', 'Merrick', 'Coen', 'Rodney', 'Brayan', 'London', 'Rudy', 'Gordon', 'Bobby', 'Aron', 'Marc', 'Van', 'Anakin', 'Canaan', 'Dario', 'Reginald', 'Westin', 'Darian', 'Ledger', 'Leighton', 'Maxton', 'Tadeo', 'Valentin', 'Aldo', 'Khalid', 'Nickolas', 'Toby', 'Dayton', 'Jacoby', 'Billy', 'Gatlin', 'Elisha', 'Jabari', 'Jermaine', 'Alvaro', 'Marlon', 'Mayson', 'Blaze', 'Jeffery', 'Kace', 'Braydon', 'Achilles', 'Brysen', 'Saint', 'Xzavier', 'Aydin', 'Eugene', 'Adrien', 'Cain', 'Kylo', 'Nova', 'Onyx', 'Arian', 'Bjorn', 'Jerome', 'Miller', 'Alfred', 'Kenzo', 'Kyng', 'Leroy', 'Maison', 'Jordy', 'Stefan', 'Wallace', 'Benicio', 'Kendall', 'Zayd', 'Blaine', 'Tristian', 'Anson', 'Gannon', 'Jeremias', 'Marley', 'Ronnie', 'Dangelo', 'Kody', 'Will', 'Bentlee', 'Gerald', 'Salvatore', 'Turner', 'Chad', 'Misael', 'Mustafa', 'Konnor', 'Maxim', 'Rogelio', 'Zakai', 'Cory', 'Judson', 'Brentley', 'Darwin', 'Louie', 'Ulises', 'Dakari', 'Rocky', 'Wesson', 'Alfonso', 'Payton', 'Dwayne', 'Juelz', 'Duncan', 'Keagan', 'Deshawn', 'Bode', 'Bridger', 'Skylar', 'Brodie', 'Landry', 'Avi', 'Keenan', 'Reuben', 'Jaxx', 'Rene', 'Yehuda', 'Imran', 'Yael', 'Alexzander', 'Willie', 'Cristiano', 'Heath', 'Lyric', 'Davion', 'Elon', 'Karsyn', 'Krew', 'Jairo', 'Maddux', 'Ephraim', 'Ignacio', 'Vivaan', 'Aries', 'Vance', 'Boden', 'Lyle', 'Ralph', 'Reign', 'Camilo', 'Draven', 'Terrence', 'Idris', 'Ira', 'Javion', 'Jericho', 'Khari', 'Marcellus', 'Creed', 'Shepard', 'Terrell', 'Ahmir', 'Camdyn', 'Cedric', 'Howard', 'Jad', 'Zahir', 'Harper', 'Justus', 'Forest', 'Gibson', 'Zev', 'Alaric', 'Decker', 'Ernest', 'Jesiah', 'Torin', 'Benedict', 'Bowie', 'Deangelo', 'Genesis', 'Harlem', 'Kalel', 'Kylen', 'Bishop', 'Immanuel', 'Lian', 'Zavier', 'Archie', 'Davian', 'Gus', 'Kabir', 'Korbyn', 'Randall', 'Benton', 'Coleman', 'Markus']
last = ['Smith', 'Johnson', 'Williams', 'Brown', 'Jones', 'Garcia', 'Miller', 'Davis', 'Rodriguez', 'Martinez', 'Hernandez', 'Lopez', 'Gonzales', 'Wilson', 'Anderson', 'Thomas', 'Taylor', 'Moore', 'Jackson', 'Martin', 'Lee', 'Perez', 'Thompson', 'White', 'Harris', 'Sanchez', 'Clark', 'Ramirez', 'Lewis', 'Robinson', 'Walker', 'Young', 'Allen', 'King', 'Wright', 'Scott', 'Torres', 'Nguyen', 'Hill', 'Flores', 'Green', 'Adams', 'Nelson', 'Baker', 'Hall', 'Rivera', 'Campbell', 'Mitchell', 'Carter', 'Roberts', 'Gomez', 'Phillips', 'Evans', 'Turner', 'Diaz', 'Parker', 'Cruz', 'Edwards', 'Collins', 'Reyes', 'Stewart', 'Morris', 'Morales', 'Murphy', 'Cook', 'Rogers', 'Gutierrez', 'Ortiz', 'Morgan', 'Cooper', 'Peterson', 'Bailey', 'Reed', 'Kelly', 'Howard', 'Ramos', 'Kim', 'Cox', 'Ward', 'Richardson', 'Watson', 'Brooks', 'Chavez', 'Wood', 'James', 'Bennet', 'Gray', 'Mendoza', 'Ruiz', 'Hughes', 'Price', 'Alvarez', 'Castillo', 'Sanders', 'Patel', 'Myers', 'Long', 'Ross', 'Foster', 'Jimenez']
| 3,457.666667
| 10,010
| 0.594428
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 16,514
| 0.796009
|
b028018661b0929da5b6a926d65bb750a50efe57
| 444
|
py
|
Python
|
oldtoronto/test/toronto_archives_test.py
|
patcon/oldto
|
44c099550a4e3cfafa85afbaebd3cd6c33325891
|
[
"Apache-2.0"
] | 22
|
2018-04-25T22:03:53.000Z
|
2021-07-13T18:43:23.000Z
|
oldtoronto/test/toronto_archives_test.py
|
patcon/oldto
|
44c099550a4e3cfafa85afbaebd3cd6c33325891
|
[
"Apache-2.0"
] | 17
|
2018-04-30T14:04:08.000Z
|
2022-02-13T19:52:44.000Z
|
oldtoronto/test/toronto_archives_test.py
|
patcon/oldto
|
44c099550a4e3cfafa85afbaebd3cd6c33325891
|
[
"Apache-2.0"
] | 7
|
2018-05-08T23:32:44.000Z
|
2022-01-27T17:49:30.000Z
|
from nose.tools import eq_
from oldtoronto.toronto_archives import get_citation_hierarchy # noqa
def test_get_citation_hierarchy():
eq_([
'Fonds 200, Series 123',
'Fonds 200'
], get_citation_hierarchy('Fonds 200, Series 123, Item 456'))
eq_([
'Fonds 257, Series 12, File 1983',
'Fonds 257, Series 12',
'Fonds 257'
], get_citation_hierarchy('Fonds 257, Series 12, File 1983, 52","'))
| 26.117647
| 73
| 0.646396
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 180
| 0.405405
|
b02d1a840f2e9ca574098b991b8f37e1b954c866
| 979
|
py
|
Python
|
excel2.py
|
darkless456/Python
|
1ba37d028e4a818ccfffc18682c1bac15554e3ac
|
[
"MIT"
] | null | null | null |
excel2.py
|
darkless456/Python
|
1ba37d028e4a818ccfffc18682c1bac15554e3ac
|
[
"MIT"
] | null | null | null |
excel2.py
|
darkless456/Python
|
1ba37d028e4a818ccfffc18682c1bac15554e3ac
|
[
"MIT"
] | null | null | null |
# excel2.py
import xlrd
def print_xls(path):
xlsFile = xlrd.open_workbook(path)
try:
mySheet = xlsFile.sheets()[0] # 访问第1张表序号0 // xlsFile.sheet_by_name('sheetName') 通过工作表名访问
except:
print('no such sheet in file')
return
print('%d rows, %d cols' % (mySheet.nrows, mySheet.ncols)) # 输出工作表共几行(rows)和几列(cols)
for row in range(0, mySheet.nrows):
temp = ''
for col in range(0, mySheet.ncols):
if mySheet.cell(row, col).value != None:
temp += str(mySheet.cell(row, col).value) + '\t'
print(temp)
if __name__ == '__main__':
print_xls('D:\\python_path\\sample_ex.xls')
'''
模块是对象,并且所有的模块都有一个内置属性 __name__。一个模块的 __name__ 的值取决于您如何应用模块。如果 import 一个模块,
那么模块__name__ 的值通常为模块文件名,不带路径或者文件扩展名。但是您也可以像一个标准的程序样直接运行模块,在这种情况下,
__name__ 的值将是一个特别缺省"__main__"。
在cmd 中直接运行.py文件,则__name__的值是'__main__';
而在import 一个.py文件后,__name__的值就不是'__main__'了;
从而用if __name__ == '__main__'来判断是否是在直接运行该.py文件
'''
| 27.194444
| 96
| 0.670072
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 865
| 0.641216
|
b02f9eadae5afd900218c21f9e3251e4c4f3cf07
| 1,162
|
py
|
Python
|
reth_buffer/reth_buffer/__init__.py
|
sosp2021/Reth
|
10c032f44a25049355ebdd97a2cb3299e8c3fb82
|
[
"MIT"
] | null | null | null |
reth_buffer/reth_buffer/__init__.py
|
sosp2021/Reth
|
10c032f44a25049355ebdd97a2cb3299e8c3fb82
|
[
"MIT"
] | 1
|
2021-08-10T02:58:58.000Z
|
2021-08-10T02:58:58.000Z
|
reth_buffer/reth_buffer/__init__.py
|
sosp2021/reth
|
10c032f44a25049355ebdd97a2cb3299e8c3fb82
|
[
"MIT"
] | null | null | null |
import multiprocessing as mp
import portpicker
from .client import Client, NumpyLoader, TorchCudaLoader
from .sampler import PERSampler
from .server.main_loop import main_loop
from .utils import get_local_ip
def start_server(
capacity, batch_size, host=None, port=None, samplers=None, cache_policy=None
):
if host is None:
host = get_local_ip()
if port is None:
port = portpicker.pick_unused_port()
meta_addr = f"tcp://{host}:{port}"
ctx = mp.get_context("spawn")
proc = ctx.Process(
target=main_loop,
args=(capacity, batch_size, meta_addr, samplers, cache_policy),
)
proc.start()
return proc, meta_addr
def start_per(
capacity,
batch_size,
alpha=0.6,
beta=0.4,
sample_start=1000,
num_sampler_procs=1,
host=None,
port=None,
cache_policy=None,
):
samplers = [
{
"sampler_cls": PERSampler,
"num_procs": num_sampler_procs,
"sample_start": sample_start,
"kwargs": {"alpha": alpha, "beta": beta},
}
]
return start_server(capacity, batch_size, host, port, samplers, cache_policy)
| 23.24
| 81
| 0.645439
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 88
| 0.075731
|
b02fad481b4d3cb3263f98acf09c40e1f2669bfa
| 7,171
|
py
|
Python
|
agent.py
|
FlowerForAlgernon/rainbow
|
78492ba572e2f8b4b2228d2ca625af94a09ee696
|
[
"Apache-2.0"
] | 1
|
2022-03-23T02:02:10.000Z
|
2022-03-23T02:02:10.000Z
|
agent.py
|
FlowerForAlgernon/rainbow
|
78492ba572e2f8b4b2228d2ca625af94a09ee696
|
[
"Apache-2.0"
] | null | null | null |
agent.py
|
FlowerForAlgernon/rainbow
|
78492ba572e2f8b4b2228d2ca625af94a09ee696
|
[
"Apache-2.0"
] | null | null | null |
import random
import numpy as np
import torch
import torch.optim as optim
import torch.nn.functional as F
import torchvision.transforms as T
from memory import Transition, ReplayMemory, PrioritizedReplayMemory, NStepMemory
from DQN import DQN, DuelingDQN, NoisyDQN, DistributionalDQN
class Agent:
def __init__(self, config):
# Distributional DQN
self.support = torch.linspace(config.v_min, config.v_max, config.atom_size).to(config.device)
self.policy_net = DistributionalDQN(config.c, config.h, config.w, config.n_actions, config.atom_size, self.support).to(config.device)
self.target_net = DistributionalDQN(config.c, config.h, config.w, config.n_actions, config.atom_size, self.support).to(config.device)
self.target_net.load_state_dict(self.policy_net.state_dict())
self.target_net.eval()
#self.memory = ReplayMemory(config.memory_size)
self.memory = PrioritizedReplayMemory(config.memory_size, config.alpha)
self.memory_n = NStepMemory(config.memory_size, config.alpha, config.gamma, config.n_step)
self.optimizer = optim.RMSprop(self.policy_net.parameters(), lr=config.learning_rate, eps=0.001, alpha=0.95)
@staticmethod
def get_state(obs, config):
state = np.array(obs)[14:77,:,:]
state = np.ascontiguousarray(state.transpose((2, 0, 1)), dtype=np.float)
state = torch.from_numpy(state / 255)
return state.unsqueeze(0).to(config.device)
def select_action(self, state, epsilon, config):
if random.random() > epsilon:
with torch.no_grad():
return self.policy_net(state).max(1)[1].view(1,1).to(config.device)
else:
return torch.tensor([[random.randrange(4)]], device=config.device, dtype=torch.long)
def transition_to_tensor(self, transitions):
for i in range(len(transitions)):
transitions[i][0] = torch.tensor(transitions[i][0]).to(config.device)
transitions[i][1] = torch.tensor(transitions[i][1]).to(config.device)
transitions[i][2] = torch.tensor(transitions[i][2]).to(config.device)
transitions[i][3] = torch.tensor(transitions[i][3]).to(config.device)
return transitions
def optimize_model(self, config):
#transitions = self.memory.sample(config.batch_size)
# PrioritizedReplayMemory
transitions, weights, indices = self.memory.sample(config.batch_size, config.beta)
transitions = self.transition_to_tensor(transitions)
batch = Transition(*zip(*transitions))
loss, weights_loss = self.get_loss(batch, config, weights, config.gamma)
# N Step
transitions_n, _, _ = self.memory_n.sample_from_indices(config.batch_size, config.beta, indices)
transitions_n = self.transition_to_tensor(transitions_n)
batch_n = Transition(*zip(*transitions_n))
gamma_n = config.gamma ** config.n_step
loss_n, weights_loss_n = self.get_loss(batch_n, config, weights, gamma_n)
weights_loss += weights_loss_n
self.optimizer.zero_grad()
#loss.backward()
# PrioritizedReplayMemory
weights_loss.backward()
for param in self.policy_net.parameters():
param.grad.data.clamp_(-1, 1)
self.optimizer.step()
# PrioritizedReplayMemory
loss_for_prior = loss.detach().cpu().numpy()
new_priorities = loss_for_prior + config.prior_eps
self.memory.update_priorities(indices, new_priorities)
# N Step
self.memory_n.update_priorities(indices, new_priorities)
# Noisy Net
self.policy_net.reset_noise()
self.target_net.reset_noise()
def get_loss(self, batch, config, weights, gamma):
non_final_mask = torch.tensor(tuple(map(lambda s: s is not None, batch.next_state)), device=config.device, dtype=torch.bool)
non_final_next_states = torch.cat([s for s in batch.next_state if s is not None])
state_batch = torch.cat(batch.state)
action_batch = torch.cat(batch.action)
reward_batch = torch.cat(batch.reward).unsqueeze(1)
state_action_values = self.policy_net(state_batch).gather(1, action_batch)
next_state_action_values = torch.zeros(config.batch_size, device=config.device).unsqueeze(1)
next_state_action_values[non_final_mask] = self.target_net(non_final_next_states).gather(
1, self.policy_net(non_final_next_states).detach().argmax(dim=1, keepdim=True)
).detach()
expected_state_action_values = reward_batch + gamma * next_state_action_values
#loss = F.smooth_l1_loss(state_action_values, expected_state_action_values)
# PrioritizedReplayMemory
loss = F.smooth_l1_loss(state_action_values, expected_state_action_values, reduction="none")
weights = torch.FloatTensor(np.array(weights).reshape(-1, 1)).to(config.device)
weights_loss = torch.mean(weights * loss)
return loss, weights_loss
def get_DistributionalDQN_loss(self, batch, config, weights, gamma):
state_batch = torch.cat(batch.state)
next_state_batch = torch.cat(batch.next_state)
action_batch = torch.cat(batch.action)
reward_batch = torch.cat(batch.reward).unsqueeze(1)
done_batch = torch.cat([1 if s is not None else 0 for s in batch.next_state]).unsqueeze(1)
delta_z = float(config.v_max - config.v_min) / (config.atom_size - 1)
with torch.no_grad():
next_action = self.policy_net(next_state_batch).argmax(1)
next_dist = self.target_net.dist(next_state_batch)
next_dist = next_dist[range(config.batch_size), next_action]
t_z = reward_batch + (1 - done_batch) * gamma * self.support
t_z = t_z.clamp(min=config.v_min, max=config.v_max)
b = (t_z - config.v_min) / delta_z
l = b.floor().long()
u = b.ceil().long()
offset = (
torch.linspace(
0, (config.batch_size - 1) * config.atom_size, config.batch_size
).long()
.unsqueeze(1)
.expand(config.batch_size, config.atom_size)
.to(config.device)
)
proj_dist = torch.zeros(next_dist.size(), device=config.device)
proj_dist.view(-1).index_add_(
0, (l + offset).view(-1), (next_dist * (u.float() - b)).view(-1)
)
proj_dist.view(-1).index_add_(
0, (u + offset).view(-1), (next_dist * (b - l.float())).view(-1)
)
dist = self.policy_net.dist(state_batch)
log_p = torch.log(dist[range(config.batch_size), action_batch])
elementwise_loss = -(proj_dist * log_p).sum(1)
# PrioritizedReplayMemory
weights = torch.FloatTensor(np.array(weights).reshape(-1, 1)).to(config.device)
weights_loss = torch.mean(weights * elementwise_loss)
return elementwise_loss, weights_loss
| 48.452703
| 142
| 0.647748
| 6,875
| 0.958723
| 0
| 0
| 270
| 0.037652
| 0
| 0
| 381
| 0.053131
|
b0347f10c5746915500b0d6e172c2c32ab5316d0
| 121
|
py
|
Python
|
Deutsch-Jozsa-Algorithm/main.py
|
Gregory-Eales/QA-Reimplementations
|
bef0b3e67397a73c468e539c426c6629d398433b
|
[
"MIT"
] | 1
|
2019-05-03T21:48:29.000Z
|
2019-05-03T21:48:29.000Z
|
Deutsch-Jozsa-Algorithm/main.py
|
Gregory-Eales/QA-Reimplementations
|
bef0b3e67397a73c468e539c426c6629d398433b
|
[
"MIT"
] | null | null | null |
Deutsch-Jozsa-Algorithm/main.py
|
Gregory-Eales/QA-Reimplementations
|
bef0b3e67397a73c468e539c426c6629d398433b
|
[
"MIT"
] | null | null | null |
import qsharp
from DeutschJozsa import SayHello, RunDeutschJozsa
SayHello.simulate()
RunDeutschJozsa.simulate(N=10)
| 13.444444
| 50
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
b0370f00352f25c209bf62c39330309ded5b5b35
| 413
|
py
|
Python
|
xslt/apply.py
|
carlosduarteroa/smap
|
5760631dfaf3e85da26ce68bf542bf254bb92c80
|
[
"BSD-2-Clause"
] | 21
|
2015-02-06T21:55:59.000Z
|
2021-04-29T11:23:18.000Z
|
xslt/apply.py
|
carlosduarteroa/smap
|
5760631dfaf3e85da26ce68bf542bf254bb92c80
|
[
"BSD-2-Clause"
] | 9
|
2015-02-03T10:41:35.000Z
|
2020-02-18T12:46:10.000Z
|
xslt/apply.py
|
carlosduarteroa/smap
|
5760631dfaf3e85da26ce68bf542bf254bb92c80
|
[
"BSD-2-Clause"
] | 20
|
2015-02-06T00:09:19.000Z
|
2020-01-10T13:27:06.000Z
|
"""Apply a stylesheet to an XML file"""
import sys
from lxml import etree
if len(sys.argv) != 3:
print >>sys.stderr, "Usage: %s <stylesheet> <xml doc> ..." % sys.argv[0]
sys.exit(1)
transform = etree.XSLT(etree.XML(open(sys.argv[1], "r").read()))
for xmlfile in sys.argv[2:]:
with open(xmlfile, "r") as fp:
doc = etree.parse(fp)
print(etree.tostring(transform(doc), pretty_print=True))
| 27.533333
| 76
| 0.639225
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 83
| 0.200969
|
b037c4f526f6d6afd8598b5e5a8cb64d9cc7462a
| 7,122
|
py
|
Python
|
docs/conf.py
|
vlukes/io3d
|
34d048b7f737a5e56610879f6ab103128e8f0750
|
[
"MIT"
] | 8
|
2016-09-26T01:35:15.000Z
|
2022-02-23T04:05:23.000Z
|
docs/conf.py
|
vlukes/io3d
|
34d048b7f737a5e56610879f6ab103128e8f0750
|
[
"MIT"
] | 4
|
2016-05-18T11:04:56.000Z
|
2018-10-24T11:03:03.000Z
|
docs/conf.py
|
vlukes/io3d
|
34d048b7f737a5e56610879f6ab103128e8f0750
|
[
"MIT"
] | 6
|
2017-03-24T20:43:21.000Z
|
2021-08-23T06:05:34.000Z
|
# -*- coding: utf-8 -*-
#
# io3d documentation build configuration file, created by
# sphinx-quickstart on Mon Nov 27 12:01:57 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath("../"))
# mock
import mock
MOCK_MODULES = [
"numpy",
"scipy",
"matplotlib",
"matplotlib.pyplot",
"matplotlib.widgets",
"scipy.io",
"yaml",
"pydicom",
# 'scipy.interpolate', 'scipy.ndimage', 'pycut', 'io3d', 'sed3', 'pysegbase',
# 'pysegbase.pycut', 'sklearn', 'skimage', 'dicom', 'vtk', 'vtk.util',
# 'larcc', 'larcc.VIEW', 'larcc.MKPOL', 'larcc.AA', 'larcc.INTERVALS',
# 'larcc.MAP',
"PyQt5",
"PyQt5.QtCore",
"PyQt5.QtGui", #'web', 'lar2psm',
# 'scipy.ndimage.measurements', 'lar', 'extern.lar', 'splines',
# 'scipy.sparse', 'skimage.filter', 'mapper', 'skelet3d', 'numpy.core',
# 'skimage.filters', 'skimage.restoration','skimage.io',
# 'gzip', 'cPickle',
# 'lbpLibrary', 'skimage.exposure', 'PyQt4.QVTKRenderWindowInteractor',
# 'matplotlib.backends', 'matplotlib.backends.backend_qt4agg', 'numpy.linalg',
# 'PyQt4.Qt', 'matplotlib.figure', 'skimage.morphology', 'gtk',
# 'pysegbase.seed_editor_qt', 'vtk.qt4', 'vtk.qt4.QVTKRenderWindowInteractor',
# 'seg2fem', 'skimage.segmentation', 'skimage.transform', 'matplotlib.patches', 'skimage.feature',
# 'scipy.ndimage.morphology', 'mpl_toolkits', 'mpl_toolkits.mplot3d',
# 'scipy.ndimage.measurement', 'scipy.ndimage.interpolation',
# 'matplotlib.backends.backend_gtkagg', 'cv2', 'skimage.measure', 'dicom2fem',
# 'morphsnakes', 'scipy.ndimage.filters', 'scipy.signal', 'pandas',
# 'scipy.stats', 'io3d.misc', 'lisa.extern.lar', 'scipy.cluster',
# 'scipy.cluster.vq', 'scipy.cluster.vq',
# 'ipdb', 'multipolyfit', 'PIL', 'yaml',
"SimpleITK",
# 'six', 'nearpy', 'SimpleITK', 'lar', 'pandas'
"ruamel.yaml.YAML",
]
#
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
# import sklearn
# sklearn.__version__ = '0.0'
# import scipy
# scipy.__version__ = '0.0'
# import pysegbase.pycut
# pysegbase.pycut.methods = ['graphcut']
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"sphinx.ext.coverage",
"sphinx.ext.imgmath",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = [".rst", ".md"]
# source_suffix = '.rst'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"io3d"
copyright = u"2017, Miroslav Jirik"
author = u"Miroslav Jirik"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u"1.2.3"
# The full version, including alpha/beta/rc tags.
release = u"1.2.3"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
"**": [
"relations.html", # needs 'show_related': True theme option to display
"searchbox.html",
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "io3ddoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "io3d.tex", u"io3d Documentation", u"Miroslav Jirik", "manual")
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "io3d", u"io3d Documentation", [author], 1)]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"io3d",
u"io3d Documentation",
author,
"io3d",
"One line description of project.",
"Miscellaneous",
)
]
| 32.226244
| 102
| 0.664139
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 6,004
| 0.843022
|
b038cebedd15245004a4a13444cb7f55e363f2e8
| 33,401
|
py
|
Python
|
EVB.py
|
yunzhe-zhou/CS285-Project
|
e6aca061e27d2794949d4419339120107a6cb8f7
|
[
"MIT"
] | null | null | null |
EVB.py
|
yunzhe-zhou/CS285-Project
|
e6aca061e27d2794949d4419339120107a6cb8f7
|
[
"MIT"
] | null | null | null |
EVB.py
|
yunzhe-zhou/CS285-Project
|
e6aca061e27d2794949d4419339120107a6cb8f7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""RED_linear_run1.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1-WN1MY9YYluGcnigLgrndqsxcOYldbB6
"""
#@title mount your Google Drive
#@markdown Your work will be stored in a folder called `cs285_f2021` by default to prevent Colab instance timeouts from deleting your edits.
import os
from google.colab import drive
drive.mount('/content/gdrive')
# Commented out IPython magic to ensure Python compatibility.
#@title set up mount symlink
DRIVE_PATH = '/content/gdrive/My\ Drive/cs285_project'
DRIVE_PYTHON_PATH = DRIVE_PATH.replace('\\', '')
if not os.path.exists(DRIVE_PYTHON_PATH):
# %mkdir $DRIVE_PATH
## the space in `My Drive` causes some issues,
## make a symlink to avoid this
SYM_PATH = '/content/cs285_project'
if not os.path.exists(SYM_PATH):
!ln -s $DRIVE_PATH $SYM_PATH
!apt update
!apt install -y --no-install-recommends \
build-essential \
curl \
git \
gnupg2 \
make \
cmake \
ffmpeg \
swig \
libz-dev \
unzip \
zlib1g-dev \
libglfw3 \
libglfw3-dev \
libxrandr2 \
libxinerama-dev \
libxi6 \
libxcursor-dev \
libgl1-mesa-dev \
libgl1-mesa-glx \
libglew-dev \
libosmesa6-dev \
lsb-release \
ack-grep \
patchelf \
wget \
xpra \
xserver-xorg-dev \
xvfb \
python-opengl \
ffmpeg
# Commented out IPython magic to ensure Python compatibility.
#@title download mujoco
MJC_PATH = '{}/mujoco'.format(SYM_PATH)
# %mkdir $MJC_PATH
# %cd $MJC_PATH
!wget -q https://www.roboti.us/download/mujoco200_linux.zip
!unzip -q mujoco200_linux.zip
# %mv mujoco200_linux mujoco200
# %rm mujoco200_linux.zip
#@title update mujoco paths
import os
os.environ['LD_LIBRARY_PATH'] += ':{}/mujoco200/bin'.format(MJC_PATH)
os.environ['MUJOCO_PY_MUJOCO_PATH'] = '{}/mujoco200'.format(MJC_PATH)
os.environ['MUJOCO_PY_MJKEY_PATH'] = '{}/mjkey.txt'.format(MJC_PATH)
## installation on colab does not find *.so files
## in LD_LIBRARY_PATH, copy over manually instead
!cp $MJC_PATH/mujoco200/bin/*.so /usr/lib/x86_64-linux-gnu/
# Commented out IPython magic to ensure Python compatibility.
# %cd $MJC_PATH
!git clone https://github.com/openai/mujoco-py.git
# %cd mujoco-py
# %pip install -e .
## cythonize at the first import
import mujoco_py
# Commented out IPython magic to ensure Python compatibility.
# %cd $SYM_PATH
# %cd RED
# %tensorflow_version 1.x
! pip install mpi4py
'''
Disclaimer: this code is highly based on trpo_mpi at @openai/baselines and @openai/imitation
'''
import argparse
import os.path as osp
import logging
from mpi4py import MPI
from tqdm import tqdm
import numpy as np
import gym
from baselines.rnd_gail import mlp_policy
from baselines.common import set_global_seeds, tf_util as U
from baselines.common.misc_util import boolean_flag
from baselines import bench
from baselines import logger
from baselines.rnd_gail.merged_critic import make_critic
import pickle
def get_exp_data(expert_path):
with open(expert_path, 'rb') as f:
data = pickle.loads(f.read())
data["actions"] = np.squeeze(data["actions"])
data["observations"] = data["observations"]
# print(data["observations"].shape)
# print(data["actions"].shape)
return [data["observations"], data["actions"]]
Log_dir = osp.expanduser("~/workspace/log/mujoco")
Checkpoint_dir = osp.expanduser("~/workspace/checkpoint/mujoco")
def get_task_name(args):
task_name = args.env_id.split("-")[0]
if args.pretrained:
task_name += "pretrained."
task_name +="gamma_%f." % args.gamma
task_name += ".seed_" + str(args.seed)
task_name += ".reward_" + str(args.reward)
task_name += "kl_" + str(args.max_kl)
task_name += "g_"+str(args.g_step)
return task_name
def modify_args(args):
#task specific parameters
if args.reward<2:
rnd_iter = 200
dyn_norm = False
if args.env_id == "Reacher-v2":
rnd_iter = 300
args.gamma = 0.99
if args.env_id == "HalfCheetah-v2":
args.pretrained = True
if args.env_id == "Walker2d-v2":
args.fixed_var = False
if args.env_id == "Ant-v2":
args.pretrained = True
args.BC_max_iter = 10
args.fixed_var = False
return args, rnd_iter, dyn_norm
else:
if args.env_id == "Hopper-v2":
args.gamma = 0.99
dyn_norm = False
if args.env_id == "Reacher-v2":
dyn_norm = True
if args.env_id == "HalfCheetah-v2":
dyn_norm = True
if args.env_id == "Walker2d-v2":
args.gamma = 0.99
dyn_norm = True
if args.env_id == "Ant-v2":
args.gamma = 0.99
dyn_norm = False
return args, 0, dyn_norm
parser = argparse.ArgumentParser("Tensorflow Implementation of GAIL")
parser.add_argument('--env_id', help='environment ID', default="Hopper-v2")
parser.add_argument('--seed', help='RNG seed', type=int, default=0)
parser.add_argument('--checkpoint_dir', help='the directory to save model', default=Checkpoint_dir)
parser.add_argument('--log_dir', help='the directory to save log file', default=Log_dir)
parser.add_argument('--load_model_path', help='if provided, load the model', type=str, default=None)
# Task
parser.add_argument('--task', type=str, choices=['train', 'evaluate', 'sample'], default='train')
# for evaluatation
boolean_flag(parser, 'stochastic_policy', default=False, help='use stochastic/deterministic policy to evaluate')
# Optimization Configuration
parser.add_argument('--g_step', help='number of steps to train policy in each epoch', type=int, default=3)
parser.add_argument('--d_step', help='number of steps to train discriminator in each epoch', type=int, default=1)
# Network Configuration (Using MLP Policy)
parser.add_argument('--policy_hidden_size', type=int, default=100)
parser.add_argument('--adversary_hidden_size', type=int, default=100)
# Algorithms Configuration
parser.add_argument('--max_kl', type=float, default=0.01)
parser.add_argument('--policy_entcoeff', help='entropy coefficiency of policy', type=float, default=0)
parser.add_argument('--adversary_entcoeff', help='entropy coefficiency of discriminator', type=float, default=1e-3)
# Traing Configuration
parser.add_argument('--num_timesteps', help='number of timesteps per episode', type=int, default=5e6)
# Behavior Cloning
boolean_flag(parser, 'pretrained', default=False, help='Use BC to pretrain')
boolean_flag(parser, 'fixed_var', default=False, help='Fixed policy variance')
parser.add_argument('--BC_max_iter', help='Max iteration for training BC', type=int, default=20)
parser.add_argument('--gamma', help='Discount factor', type=float, default=0.97)
boolean_flag(parser, 'popart', default=True, help='Use popart on V function')
parser.add_argument('--reward', help='Reward Type', type=int, default=0)
args = parser.parse_args(args=[])
set_global_seeds(args.seed)
env = gym.make(args.env_id)
env.seed(args.seed)
# env = bench.Monitor(env, logger.get_dir() and
# osp.join(logger.get_dir(), "monitor.json"))
gym.logger.setLevel(logging.WARN)
if args.log_dir != Log_dir:
log_dir = osp.join(Log_dir, args.log_dir)
save_dir = osp.join(Checkpoint_dir, args.log_dir)
else:
log_dir = Log_dir
save_dir = Checkpoint_dir
args, rnd_iter, dyn_norm = modify_args(args)
def policy_fn(name, ob_space, ac_space,):
return mlp_policy.MlpPolicy(name=name, ob_space=ob_space, ac_space=ac_space,
hid_size=args.policy_hidden_size, num_hid_layers=2, popart=args.popart, gaussian_fixed_var=args.fixed_var)
exp_data = get_exp_data("/content/gdrive/My Drive/cs285_project/RED/data/Hopper-v2.pkl")
task_name = get_task_name(args)
logger.configure(dir=log_dir, log_suffix=task_name, format_strs=["log", "stdout"])
class RND_Critic_Revise(object):
def __init__(self, W, sigma_hat, ob_size, ac_size, rnd_hid_size=128, rnd_hid_layer=4, hid_size=128, hid_layer=1,
out_size=128, scale=250000.0, offset=0., reward_scale=1.0, scope="rnd"):
self.scope = scope
self.scale = scale
self.offset = offset
self.out_size = out_size
self.rnd_hid_size = rnd_hid_size
self.rnd_hid_layer = rnd_hid_layer
self.hid_size = hid_size
self.hid_layer = hid_layer
self.reward_scale = reward_scale
self.W = W
self.sigma_hat = sigma_hat
print("RND Critic")
ob = tf.placeholder(tf.float32, [None, ob_size])
ac = tf.placeholder(tf.float32, [None, ac_size])
lr = tf.placeholder(tf.float32, None)
feat = self.build_graph(ob, ac, self.scope, hid_layer, hid_size, out_size)
rnd_feat = self.build_graph(ob, ac, self.scope+"_rnd", rnd_hid_layer, rnd_hid_size, out_size)
feat_loss = tf.reduce_mean(tf.square(feat-rnd_feat))
self.reward = reward_scale*tf.exp(offset- tf.reduce_mean(tf.square(feat - rnd_feat), axis=-1) * self.scale)
rnd_loss = tf.reduce_mean(tf.square(feat - rnd_feat), axis=-1) * self.scale
# self.reward = reward_scale * tf.exp(offset - rnd_loss)
# self.reward = reward_scale * (tf.math.softplus(rnd_loss) - rnd_loss)
self.reward_func = U.function([ob, ac], self.reward)
self.raw_reward = U.function([ob, ac], rnd_loss)
self.trainer = tf.train.AdamOptimizer(learning_rate=lr)
gvs = self.trainer.compute_gradients(feat_loss, self.get_trainable_variables())
self._train = U.function([ob, ac, lr], [], updates=[self.trainer.apply_gradients(gvs)])
def build_graph(self, ob, ac, scope, hid_layer, hid_size, size):
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
layer = tf.concat([ob, ac], axis=1)
for _ in range(hid_layer):
layer = tf.layers.dense(layer, hid_size, activation=tf.nn.leaky_relu)
layer = tf.layers.dense(layer, size, activation=None)
return layer
def build_reward_op(self, ob, ac):
feat = self.build_graph(ob, ac, self.scope, self.hid_layer, self.hid_size, self.out_size)
rnd_feat = self.build_graph(ob, ac, self.scope + "_rnd", self.rnd_hid_layer, self.rnd_hid_size
, self.out_size)
reward = self.reward_scale* tf.exp(self.offset- tf.reduce_mean(tf.square(feat - rnd_feat), axis=-1) * self.scale)
return reward
def get_trainable_variables(self):
return tf.trainable_variables(self.scope)
def get_reward(self, ob, ac):
# return self.reward_func(ob, ac)
x = np.concatenate([ob.reshape([-1,1]),ac.reshape([-1,1])],axis = 0)
# calculate prediction variance and multiply it by 5 for rescaling. We can also change the multiplier to the other constant for tuning.
var = sigma_hat*np.sqrt(np.matmul(np.matmul(x.T,W),x))*5
return np.exp(-var**2)
def get_raw_reward(self, ob, ac):
return self.raw_reward(ob, ac)
def train(self, ob, ac, batch_size=32, lr=0.001, iter=200):
logger.info("Training RND Critic")
# for _ in range(iter):
# for data in iterbatches([ob, ac], batch_size=batch_size, include_final_partial_batch=True):
# self._train(*data, lr)
import numpy as np
import tensorflow as tf
from baselines.common import tf_util as U
from baselines.common.dataset import iterbatches
from baselines import logger
hid_size=128
rnd_hid_size=128
reward_type=0
scale=250000
reward_type=args.reward
ac_size = env.action_space.sample().shape[0]
ob_size = env.observation_space.shape[0]
# linear model to estimate variance
X1 = exp_data[0]
X2 = exp_data[1]
X = np.concatenate([X1,X2],axis=1)
np.random.seed(1)
# randomly create a oracle linear model to estimate
param = np.random.normal(0,1,14).reshape([-1,1])
# calculate response under this oracle model
Y = np.matmul(X,param).flatten() + np.random.normal(0,1,X.shape[0])
# estimate the linear model
beta_hat = np.matmul(np.linalg.inv(np.matmul(X.T,X)),np.matmul(X.T,Y))
# estimate varaince
sigma_hat = np.sqrt(np.sum((Y-np.matmul(X,beta_hat))**2)/(X.shape[0]-14))
# calculate a matrix for later use
W = np.linalg.inv(np.matmul(X.T,X))
critic = RND_Critic_Revise(W, sigma_hat, ob_size, ac_size, hid_size=hid_size, rnd_hid_size=rnd_hid_size, scale=scale)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from scipy.ndimage.filters import gaussian_filter
def generate_density_plot(x,y):
def myplot(x, y, s, bins=1000):
heatmap, xedges, yedges = np.histogram2d(x, y, bins=bins)
heatmap = gaussian_filter(heatmap, sigma=s)
extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
return heatmap.T, extent
fig, axs = plt.subplots(nrows=2,ncols=2,figsize=(10,10))
sigmas = [0, 16, 32, 64]
for ax, s in zip(axs.flatten(), sigmas):
if s == 0:
ax.plot(x, y, 'k.', markersize=3)
ax.set_title("Scatter plot")
else:
img, extent = myplot(x, y, s)
ax.imshow(img, extent=extent, origin='lower', cmap=cm.jet)
ax.set_title("Smoothing with $\sigma$ = %d" % s)
plt.show()
X1 = exp_data[0]
X2 = exp_data[1]
generate_density_plot(X2[:,0],X2[:,1])
from matplotlib import pyplot as plt, cm, colors
import numpy as np
plt.rcParams["figure.figsize"] = [7.00, 3.50]
plt.rcParams["figure.autolayout"] = True
X1 = exp_data[0]
X2 = exp_data[1]
X = np.concatenate([X1,X2],axis=1)
ob = np.mean(X[:,0:11],axis=0)
ac = np.mean(X[:,11:],axis=0)
N=100
side = np.linspace(-4, 6, N)
x, y = np.meshgrid(side, side)
z= np.zeros([N,N])
for i in range(N):
for j in range(N):
ac[0] = x[0,i]
ac[1] = y[j,0]
z[i,j] = critic.get_reward(ob,ac).flatten()[0]
plt.pcolormesh(x, y, z, shading='auto')
plt.show()
from matplotlib import pyplot as plt, cm, colors
import numpy as np
plt.rcParams["figure.figsize"] = [7.00, 3.50]
plt.rcParams["figure.autolayout"] = True
X1 = exp_data[0]
X2 = exp_data[1]
X = np.concatenate([X1,X2],axis=1)
ob = np.mean(X[:,0:11],axis=0)
ac = np.mean(X[:,11:],axis=0)
N=100
side = np.linspace(-1, 3, N)
x, y = np.meshgrid(side, side)
z= np.zeros([N,N])
for i in range(N):
for j in range(N):
ob[0] = x[0,i]
ob[1] = y[j,0]
z[i,j] = critic.get_reward(ob,ac).flatten()[0]
plt.pcolormesh(x, y, z, shading='auto')
plt.show()
from matplotlib import pyplot as plt, cm, colors
import numpy as np
plt.rcParams["figure.figsize"] = [7.00, 3.50]
plt.rcParams["figure.autolayout"] = True
X1 = exp_data[0]
X2 = exp_data[1]
X = np.concatenate([X1,X2],axis=1)
ob = np.mean(X[:,0:11],axis=0)
ac = np.mean(X[:,11:],axis=0)
N=100
side = np.linspace(-1, 3, N)
x, y = np.meshgrid(side, side)
z= np.zeros([N,N])
for i in range(N):
for j in range(N):
ob[2] = x[0,i]
ob[4] = y[j,0]
z[i,j] = critic.get_reward(ob,ac).flatten()[0]
plt.pcolormesh(x, y, z, shading='auto')
plt.show()
np.max(X,0)
from matplotlib import pyplot as plt, cm, colors
import numpy as np
plt.rcParams["figure.figsize"] = [7.00, 3.50]
plt.rcParams["figure.autolayout"] = True
X1 = exp_data[0]
X2 = exp_data[1]
X = np.concatenate([X1,X2],axis=1)
ob = np.mean(X[:,0:11],axis=0)
ac = np.mean(X[:,11:],axis=0)
fig,ax=plt.subplots(nrows=5,ncols=3,figsize=(10,12))
axes = ax.flatten()
N=100
count = 0
for p in range(6):
for q in range(6):
if p<q:
max_value = np.max([np.max(X,0)[p],np.max(X,0)[q]])+1
min_value = np.max([np.min(X,0)[p],np.min(X,0)[q]])-1
side = np.linspace(min_value, max_value, N)
x, y = np.meshgrid(side, side)
z= np.zeros([N,N])
for i in range(N):
for j in range(N):
ob[p] = x[0,i]
ob[q] = y[j,0]
z[i,j] = critic.get_reward(ob,ac).flatten()[0]
## figure 1.1
axes[count].pcolormesh(x, y, z, shading='auto')
axes[count].set_title("dim "+str(p+1) + " vs " + "dim " + str(q+1), fontsize=14)
count = count +1
plt.show()
import numpy as np
import matplotlib.pyplot as plt
fig,ax=plt.subplots(nrows=2,ncols=2,figsize=(10,8))
axes = ax.flatten()
## figure 1.1
axes[0].pcolormesh(x, y, z, shading='auto')
plt.show()
plt.rcParams["figure.figsize"] = [4.50, 3.50]
ob = np.mean(X[:,0:11],axis=0)
ac = np.mean(X[:,11:],axis=0)
critic.get_reward(ob,ac)
ob = np.mean(X[:,0:11],axis=0)
ac = np.mean(X[:,11:],axis=0)
reward_ls = []
node = 0
for i in range(100):
ac[node] = i * 0.1 - 5
reward_ls.append(critic.get_reward(ob,ac).flatten()[0])
import numpy as np
import matplotlib.pyplot as plt
x = np.array(range(len(reward_ls)))/10 - 5
plt.plot(x, reward_ls,color="limegreen",linestyle='-', markersize=7)
plt.xlabel('Value of the First Dimension of Action', fontsize=12)
plt.ylabel('Reward', fontsize=12)
plt.tight_layout(pad=4)
# plt.title("Linear Model Variance Estimation Based Reward Function \n (Change the First Dimension of Action)")
plt.show()
ob = np.mean(X[:,0:11],axis=0)
ac = np.mean(X[:,11:],axis=0)
reward_ls = []
node = 1
for i in range(100):
ac[node] = i * 0.1 - 5
reward_ls.append(critic.get_reward(ob,ac).flatten()[0])
import numpy as np
import matplotlib.pyplot as plt
x = np.array(range(len(reward_ls)))/10 - 5
plt.plot(x, reward_ls,color="limegreen",linestyle='-', markersize=7)
plt.xlabel('Value of the Second Dimension of Action', fontsize=12)
plt.ylabel('Reward', fontsize=12)
plt.tight_layout(pad=4)
# plt.title("Linear Model Variance Estimation Based Reward Function \n (Change the Second Dimension of Action)")
plt.show()
ob = np.mean(X[:,0:11],axis=0)
ac = np.mean(X[:,11:],axis=0)
reward_ls = []
node = 2
for i in range(100):
ac[node] = i * 0.1 - 5
reward_ls.append(critic.get_reward(ob,ac).flatten()[0])
import numpy as np
import matplotlib.pyplot as plt
x = np.array(range(len(reward_ls)))/10 - 5
plt.plot(x, reward_ls,color="limegreen",linestyle='-', markersize=7)
plt.xlabel('Value of the Third Dimension of Action', fontsize=12)
plt.ylabel('Reward', fontsize=12)
plt.tight_layout(pad=4)
plt.title("Linear Model Variance Estimation Based Reward Function \n (Change the Third Dimension of Action)")
plt.show()
# X1 = exp_data[0]
# X2 = exp_data[1]
# X = np.concatenate([X1,X2],axis=1)
# np.random.seed(1)
# param = np.random.normal(0,1,14).reshape([-1,1])
# Y = np.matmul(X,param).flatten() + np.random.normal(0,1,X.shape[0])
# beta_hat = np.matmul(np.linalg.inv(np.matmul(X.T,X)),np.matmul(X.T,Y))
# sigma_hat = np.sqrt(np.sum((Y-np.matmul(X,beta_hat))**2)/(X.shape[0]-14))
# W = np.linalg.inv(np.matmul(X.T,X))
# scale = 5
# x = np.ones(14).reshape([-1,1])
# var = sigma_hat*np.sqrt(np.matmul(np.matmul(x.T,W),x))*scale
# reward_return = np.exp(-var**2)
# print("var: ", var)
# print("reward: ", reward_return)
# x = X[1,:].reshape([-1,1])
# var = sigma_hat*np.sqrt(np.matmul(np.matmul(x.T,W),x))*scale
# reward_return = np.exp(-var**2)
# print("var: ", var)
# print("reward: ", reward_return)
seed = args.seed
reward_giver = critic
dataset = exp_data
g_step = args.g_step
d_step = args.d_step
policy_entcoeff = args.policy_entcoeff
num_timesteps = args.num_timesteps
checkpoint_dir = save_dir
pretrained = args.pretrained
BC_max_iter = args.BC_max_iter
gamma = args.gamma
pretrained_weight = None
from baselines.rnd_gail import trpo_mpi
# Set up for MPI seed
rank = MPI.COMM_WORLD.Get_rank()
if rank != 0:
logger.set_level(logger.DISABLED)
workerseed = seed + 10000 * MPI.COMM_WORLD.Get_rank()
set_global_seeds(workerseed)
env.seed(workerseed)
import time
import os
from contextlib import contextmanager
from mpi4py import MPI
from collections import deque
import tensorflow as tf
import numpy as np
import baselines.common.tf_util as U
from baselines.common import explained_variance, zipsame, dataset, fmt_row
from baselines import logger
from baselines.common.mpi_adam import MpiAdam
from baselines.common.cg import cg
from baselines.gail.statistics import stats
from baselines.common.dataset_plus import iterbatches
env = env
policy_func = policy_fn
reward_giver = reward_giver
expert_dataset = exp_data
rank =rank
pretrained = pretrained
pretrained_weight = pretrained_weight
g_step = g_step
d_step = d_step
entcoeff = policy_entcoeff
max_timesteps=num_timesteps
ckpt_dir=checkpoint_dir
timesteps_per_batch=1024
max_kl=args.max_kl
cg_iters=10
cg_damping=0.1
gamma=gamma
lam=0.97
vf_iters=5
vf_stepsize=1e-3
d_stepsize=3e-4
task_name=task_name
rnd_iter=rnd_iter
dyn_norm=dyn_norm
mmd=args.reward==2
max_iters=0
callback=None
max_episodes=0
nworkers = MPI.COMM_WORLD.Get_size()
rank = MPI.COMM_WORLD.Get_rank()
np.set_printoptions(precision=3)
# Setup losses and stuff
# ----------------------------------------
ob_space = env.observation_space
ac_space = env.action_space
pi = policy_func("pi", ob_space, ac_space)
oldpi = policy_func("oldpi", ob_space, ac_space)
atarg = tf.placeholder(dtype=tf.float32, shape=[None]) # Target advantage function (if applicable)
ob = U.get_placeholder_cached(name="ob")
ac = pi.pdtype.sample_placeholder([None])
kloldnew = oldpi.pd.kl(pi.pd)
ent = pi.pd.entropy()
meankl = tf.reduce_mean(kloldnew)
meanent = tf.reduce_mean(ent)
entbonus = entcoeff * meanent
ratio = tf.exp(pi.pd.logp(ac) - oldpi.pd.logp(ac)) # advantage * pnew / pold
surrgain = tf.reduce_mean(ratio * atarg)
optimgain = surrgain + entbonus
losses = [optimgain, meankl, entbonus, surrgain, meanent]
loss_names = ["optimgain", "meankl", "entloss", "surrgain", "entropy"]
dist = meankl
all_var_list = pi.get_trainable_variables()
var_list = [v for v in all_var_list if v.name.startswith("pi/pol") or v.name.startswith("pi/logstd")]
vf_var_list = [v for v in all_var_list if v.name.startswith("pi/vff")]
vfadam = MpiAdam(vf_var_list)
get_flat = U.GetFlat(var_list)
set_from_flat = U.SetFromFlat(var_list)
klgrads = tf.gradients(dist, var_list)
flat_tangent = tf.placeholder(dtype=tf.float32, shape=[None], name="flat_tan")
shapes = [var.get_shape().as_list() for var in var_list]
start = 0
tangents = []
for shape in shapes:
sz = U.intprod(shape)
tangents.append(tf.reshape(flat_tangent[start:start+sz], shape))
start += sz
gvp = tf.add_n([tf.reduce_sum(g*tangent) for (g, tangent) in zipsame(klgrads, tangents)]) # pylint: disable=E1111
fvp = U.flatgrad(gvp, var_list)
assign_old_eq_new = U.function([], [], updates=[tf.assign(oldv, newv)
for (oldv, newv) in zipsame(oldpi.get_variables(), pi.get_variables())])
compute_losses = U.function([ob, ac, atarg], losses)
compute_lossandgrad = U.function([ob, ac, atarg], losses + [U.flatgrad(optimgain, var_list)])
compute_fvp = U.function([flat_tangent, ob, ac, atarg], fvp)
compute_vflossandgrad = pi.vlossandgrad
def traj_segment_generator(pi, env, reward_giver, horizon, stochastic):
# Initialize state variables
t = 0
ac = env.action_space.sample()
new = True
rew = 0.0
true_rew = 0.0
ob = env.reset()
cur_ep_ret = 0
cur_ep_len = 0
cur_ep_true_ret = 0
ep_true_rets = []
ep_rets = []
ep_lens = []
# Initialize history arrays
obs = np.array([ob for _ in range(horizon)])
true_rews = np.zeros(horizon, 'float32')
rews = np.zeros(horizon, 'float32')
vpreds = np.zeros(horizon, 'float32')
news = np.zeros(horizon, 'int32')
acs = np.array([ac for _ in range(horizon)])
prevacs = acs.copy()
while True:
prevac = ac
ac, vpred = pi.act(stochastic, ob)
# Slight weirdness here because we need value function at time T
# before returning segment [0, T-1] so we get the correct
# terminal value
if t > 0 and t % horizon == 0:
yield {"ob": obs, "rew": rews, "vpred": vpreds, "new": news,
"ac": acs, "prevac": prevacs, "nextvpred": vpred * (1 - new),
"ep_rets": ep_rets, "ep_lens": ep_lens, "ep_true_rets": ep_true_rets}
_, vpred = pi.act(stochastic, ob)
# Be careful!!! if you change the downstream algorithm to aggregate
# several of these batches, then be sure to do a deepcopy
ep_rets = []
ep_true_rets = []
ep_lens = []
i = t % horizon
obs[i] = ob
vpreds[i] = vpred
news[i] = new
acs[i] = ac
prevacs[i] = prevac
rew = reward_giver.get_reward(ob, ac)
ob, true_rew, new, _ = env.step(ac)
rews[i] = rew
true_rews[i] = true_rew
cur_ep_ret += rew
cur_ep_true_ret += true_rew
cur_ep_len += 1
if new:
ep_rets.append(cur_ep_ret)
ep_true_rets.append(cur_ep_true_ret)
ep_lens.append(cur_ep_len)
cur_ep_ret = 0
cur_ep_true_ret = 0
cur_ep_len = 0
ob = env.reset()
t += 1
def add_vtarg_and_adv(seg, gamma, lam):
new = np.append(seg["new"], 0) # last element is only used for last vtarg, but we already zeroed it if last new = 1
vpred = np.append(seg["vpred"], seg["nextvpred"])
T = len(seg["rew"])
seg["adv"] = gaelam = np.empty(T, 'float32')
rew = seg["rew"]
lastgaelam = 0
for t in reversed(range(T)):
nonterminal = 1-new[t+1]
delta = rew[t] + gamma * vpred[t+1] * nonterminal - vpred[t]
gaelam[t] = lastgaelam = delta + gamma * lam * nonterminal * lastgaelam
seg["tdlamret"] = seg["adv"] + seg["vpred"]
def flatten_lists(listoflists):
return [el for list_ in listoflists for el in list_]
def allmean(x):
assert isinstance(x, np.ndarray)
out = np.empty_like(x)
MPI.COMM_WORLD.Allreduce(x, out, op=MPI.SUM)
out /= nworkers
return out
U.initialize()
th_init = get_flat()
MPI.COMM_WORLD.Bcast(th_init, root=0)
set_from_flat(th_init)
vfadam.sync()
if rank == 0:
print("Init param sum", th_init.sum(), flush=True)
# Prepare for rollouts
# ----------------------------------------
seg_gen = traj_segment_generator(pi, env, reward_giver, timesteps_per_batch, stochastic=True)
episodes_so_far = 0
timesteps_so_far = 0
iters_so_far = 0
tstart = time.time()
lenbuffer = deque(maxlen=40) # rolling buffer for episode lengths
rewbuffer = deque(maxlen=40) # rolling buffer for episode rewards
true_rewbuffer = deque(maxlen=40)
assert sum([max_iters > 0, max_timesteps > 0, max_episodes > 0]) == 1
ep_stats = stats(["True_rewards", "Rewards", "Episode_length"])
# if provide pretrained weight
if pretrained_weight is not None:
U.load_variables(pretrained_weight, variables=pi.get_variables())
else:
if not dyn_norm:
pi.ob_rms.update(expert_dataset[0])
if not mmd:
reward_giver.train(*expert_dataset, iter=rnd_iter)
best = -2000
save_ind = 0
max_save = 3
while True:
if callback: callback(locals(), globals())
if max_timesteps and timesteps_so_far >= max_timesteps:
break
elif max_episodes and episodes_so_far >= max_episodes:
break
elif max_iters and iters_so_far >= max_iters:
break
logger.log("********** Iteration %i ************" % iters_so_far)
def fisher_vector_product(p):
return allmean(compute_fvp(p, *fvpargs)) + cg_damping * p
# ------------------ Update G ------------------
# logger.log("Optimizing Policy...")
for _ in range(g_step):
seg = seg_gen.__next__()
#mmd reward
if mmd:
reward_giver.set_b2(seg["ob"], seg["ac"])
seg["rew"] = reward_giver.get_reward(seg["ob"], seg["ac"])
#report stats and save policy if any good
lrlocal = (seg["ep_lens"], seg["ep_rets"], seg["ep_true_rets"]) # local values
listoflrpairs = MPI.COMM_WORLD.allgather(lrlocal) # list of tuples
lens, rews, true_rets = map(flatten_lists, zip(*listoflrpairs))
true_rewbuffer.extend(true_rets)
lenbuffer.extend(lens)
rewbuffer.extend(rews)
true_rew_avg = np.mean(true_rewbuffer)
logger.record_tabular("EpLenMean", np.mean(lenbuffer))
logger.record_tabular("EpRewMean", np.mean(rewbuffer))
logger.record_tabular("EpTrueRewMean", true_rew_avg)
logger.record_tabular("EpThisIter", len(lens))
episodes_so_far += len(lens)
timesteps_so_far += sum(lens)
iters_so_far += 1
logger.record_tabular("EpisodesSoFar", episodes_so_far)
logger.record_tabular("TimestepsSoFar", timesteps_so_far)
logger.record_tabular("TimeElapsed", time.time() - tstart)
logger.record_tabular("Best so far", best)
# Save model
if ckpt_dir is not None and true_rew_avg >= best:
best = true_rew_avg
fname = os.path.join(ckpt_dir, task_name)
os.makedirs(os.path.dirname(fname), exist_ok=True)
pi.save_policy(fname+"_"+str(save_ind))
save_ind = (save_ind+1) % max_save
#compute gradient towards next policy
add_vtarg_and_adv(seg, gamma, lam)
ob, ac, atarg, tdlamret = seg["ob"], seg["ac"], seg["adv"], seg["tdlamret"]
vpredbefore = seg["vpred"] # predicted value function before udpate
atarg = (atarg - atarg.mean()) / atarg.std() # standardized advantage function estimate
if hasattr(pi, "ob_rms") and dyn_norm: pi.ob_rms.update(ob) # update running mean/std for policy
args = seg["ob"], seg["ac"], atarg
fvpargs = [arr[::5] for arr in args]
assign_old_eq_new() # set old parameter values to new parameter values
*lossbefore, g = compute_lossandgrad(*args)
lossbefore = allmean(np.array(lossbefore))
g = allmean(g)
if np.allclose(g, 0):
logger.log("Got zero gradient. not updating")
else:
stepdir = cg(fisher_vector_product, g, cg_iters=cg_iters, verbose=False)
assert np.isfinite(stepdir).all()
shs = .5*stepdir.dot(fisher_vector_product(stepdir))
lm = np.sqrt(shs / max_kl)
fullstep = stepdir / lm
expectedimprove = g.dot(fullstep)
surrbefore = lossbefore[0]
stepsize = 1.0
thbefore = get_flat()
for _ in range(10):
thnew = thbefore + fullstep * stepsize
set_from_flat(thnew)
meanlosses = surr, kl, *_ = allmean(np.array(compute_losses(*args)))
improve = surr - surrbefore
logger.log("Expected: %.3f Actual: %.3f" % (expectedimprove, improve))
if not np.isfinite(meanlosses).all():
logger.log("Got non-finite value of losses -- bad!")
elif kl > max_kl * 1.5:
logger.log("violated KL constraint. shrinking step.")
elif improve < 0:
logger.log("surrogate didn't improve. shrinking step.")
else:
logger.log("Stepsize OK!")
break
stepsize *= .5
else:
logger.log("couldn't compute a good step")
set_from_flat(thbefore)
if nworkers > 1 and iters_so_far % 20 == 0:
paramsums = MPI.COMM_WORLD.allgather((thnew.sum(), vfadam.getflat().sum())) # list of tuples
assert all(np.allclose(ps, paramsums[0]) for ps in paramsums[1:])
if pi.use_popart:
pi.update_popart(tdlamret)
for _ in range(vf_iters):
for (mbob, mbret) in dataset.iterbatches((seg["ob"], seg["tdlamret"]),
include_final_partial_batch=False, batch_size=128):
if hasattr(pi, "ob_rms") and dyn_norm:
pi.ob_rms.update(mbob) # update running mean/std for policy
vfadam.update(allmean(compute_vflossandgrad(mbob, mbret)), vf_stepsize)
g_losses = meanlosses
for (lossname, lossval) in zip(loss_names, meanlosses):
logger.record_tabular(lossname, lossval)
logger.record_tabular("ev_tdlam_before", explained_variance(vpredbefore, tdlamret))
if rank == 0:
logger.dump_tabular()
| 32.586341
| 143
| 0.632945
| 3,437
| 0.102901
| 2,106
| 0.063052
| 0
| 0
| 0
| 0
| 7,311
| 0.218886
|
b03a815221b3f33cdcf33d82406be159b843f64d
| 2,096
|
py
|
Python
|
School-Management-System/teachers/views.py
|
GisaKaze/Python-Quarantine-Projects
|
29fabcb7e4046e6f3e9a19403e6d2490fe4b9fc4
|
[
"MIT"
] | null | null | null |
School-Management-System/teachers/views.py
|
GisaKaze/Python-Quarantine-Projects
|
29fabcb7e4046e6f3e9a19403e6d2490fe4b9fc4
|
[
"MIT"
] | null | null | null |
School-Management-System/teachers/views.py
|
GisaKaze/Python-Quarantine-Projects
|
29fabcb7e4046e6f3e9a19403e6d2490fe4b9fc4
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render, get_object_or_404, redirect
from .models import TeacherInfo
from .forms import CreateTeacher
from django.contrib import messages
from django.core.paginator import Paginator
# Create your views here.
def teacher_list(request):
teachers = TeacherInfo.objects.all()
paginator = Paginator(teachers, 1)
page = request.GET.get('page')
paged_teachers = paginator.get_page(page)
context = {
"teachers": paged_teachers
}
return render(request, "teachers/teacher_list.html", context)
def single_teacher(request, teacher_id):
single_teacher = get_object_or_404(TeacherInfo, pk=teacher_id)
context = {
"single_teacher": single_teacher
}
return render(request, "teachers/single_teacher.html", context)
def create_teacher(request):
if request.method == "POST":
forms = CreateTeacher(request.POST, request.FILES or None)
if forms.is_valid():
forms.save()
messages.success(request, "Teacher Registration Successfully!")
return redirect("teacher_list")
else:
forms = CreateTeacher()
context = {
"forms": forms
}
return render(request, "teachers/create_teacher.html", context)
def edit_teacher(request, pk):
teacher_edit = TeacherInfo.objects.get(id=pk)
edit_teacher_forms = CreateTeacher(instance=teacher_edit)
if request.method == "POST":
edit_teacher_forms = CreateTeacher(request.POST, request.FILES or None, instance=teacher_edit)
if edit_teacher_forms.is_valid():
edit_teacher_forms.save()
messages.success(request, "Edit Teacher Info Successfully!")
return redirect("teacher_list")
context = {
"edit_teacher_forms": edit_teacher_forms
}
return render(request, "teachers/edit_teacher.html", context)
def delete_teacher(request, teacher_id):
teacher_delete = TeacherInfo.objects.get(id=teacher_id)
teacher_delete.delete()
messages.success(request, "Delete Teacher Info Successfully")
return redirect("teacher_list")
| 30.376812
| 102
| 0.705153
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 357
| 0.170324
|
b03af16df806f7a2f213bb90c1c62ae5588655f0
| 4,326
|
py
|
Python
|
runner_service/controllers/jobs.py
|
tonykhbo/ansible-runner-service
|
200bd9aa67fc0fd66a4425cfb38a2ac3aed4d4b2
|
[
"Apache-2.0"
] | 174
|
2018-11-21T07:44:50.000Z
|
2022-03-04T15:11:56.000Z
|
runner_service/controllers/jobs.py
|
tonykhbo/ansible-runner-service
|
200bd9aa67fc0fd66a4425cfb38a2ac3aed4d4b2
|
[
"Apache-2.0"
] | 76
|
2018-12-12T17:20:37.000Z
|
2021-12-06T11:15:47.000Z
|
runner_service/controllers/jobs.py
|
tonykhbo/ansible-runner-service
|
200bd9aa67fc0fd66a4425cfb38a2ac3aed4d4b2
|
[
"Apache-2.0"
] | 61
|
2018-12-27T15:17:38.000Z
|
2022-03-04T12:29:33.000Z
|
# from flask import request
from flask_restful import request
# import logging
from .utils import log_request
from .base import BaseResource
from ..services.jobs import get_events, get_event
from ..services.utils import APIResponse
import logging
logger = logging.getLogger(__name__)
class ListEvents(BaseResource):
"""Return a list of events within a given playbook run (job) """
@log_request(logger)
def get(self, play_uuid=None):
"""
GET {play_uuid}/events
Return a list of the event uuid's for the given job(play_uuid). Filtering is also supported, using the
?varname=value&varname=value syntax
Example.
```
$ curl -k -i --key ./client.key --cert ./client.crt https://localhost:5001/api/v1/jobs/9c1714aa-b534-11e8-8c14-aced5c652dd1/events -X GET
HTTP/1.0 200 OK
Content-Type: application/json
Content-Length: 1142
Server: Werkzeug/0.14.1 Python/3.6.5
Date: Mon, 10 Sep 2018 20:04:53 GMT
{
"status": "OK",
"msg": "",
"data": {
"events": {
"2-0eaf70cd-0d86-4209-a3ca-73c0633afa27": {
"event": "playbook_on_start"
},
"3-aced5c65-2dd1-7634-7812-00000000000b": {
"event": "playbook_on_play_start"
},
"4-aced5c65-2dd1-7634-7812-00000000000d": {
"event": "playbook_on_task_start",
"task": "Step 1"
},
"5-3f6d4b83-df90-401c-9fd7-2b646f00ccfe": {
"event": "runner_on_ok",
"host": "localhost",
"task": "Step 1"
},
"6-aced5c65-2dd1-7634-7812-00000000000e": {
"event": "playbook_on_task_start",
"task": "Step 2"
},
"7-ca1c5d3a-218f-487e-97ec-be5751ac5b40": {
"event": "runner_on_ok",
"host": "localhost",
"task": "Step 2"
},
"8-7c68cc25-9ccc-4b5c-b4b3-fddaf297e7de": {
"event": "playbook_on_stats"
}
},
"total_events": 7
}
}
```
"""
# TODO could the to_dict throw an exception?
filter = request.args.to_dict()
_e = APIResponse()
if not play_uuid:
_e.status, _e.msg = "INVALID", "playbook uuid missing"
return _e.__dict__, self.state_to_http[_e.status]
response = get_events(play_uuid, filter)
return response.__dict__, self.state_to_http[response.status]
class GetEvent(BaseResource):
"""Return the output of a specific task within a playbook"""
@log_request(logger)
def get(self, play_uuid, event_uuid):
"""
GET {play_uuid, event_uuid}
Return the json job event data for a given event uuid within a job
Example.
```
$ curl -k -i --key ./client.key --cert ./client.crt https://localhost:5001/api/v1/jobs/9c1714aa-b534-11e8-8c14-aced5c652dd1/events/2-0eaf70cd-0d86-4209-a3ca-73c0633afa27 -X GET
HTTP/1.0 200 OK
Content-Type: application/json
Content-Length: 480
Server: Werkzeug/0.14.1 Python/3.6.5
Date: Mon, 10 Sep 2018 20:12:03 GMT
{
"status": "OK",
"msg": "",
"data": {
"uuid": "0eaf70cd-0d86-4209-a3ca-73c0633afa27",
"counter": 2,
"stdout": "",
"start_line": 1,
"end_line": 1,
"created": "2018-09-10T20:03:40.145870",
"pid": 27875,
"event_data": {
"pid": 27875,
"playbook_uuid": "0eaf70cd-0d86-4209-a3ca-73c0633afa27",
"playbook": "test.yml"
},
"event": "playbook_on_start"
}
}
```
"""
response = get_event(play_uuid, event_uuid)
return response.__dict__, self.state_to_http[response.status]
| 32.772727
| 184
| 0.502312
| 4,034
| 0.932501
| 0
| 0
| 3,828
| 0.884882
| 0
| 0
| 3,414
| 0.789182
|
b0433121aa8bbd1327d3221055a476dfcaf07db3
| 136
|
py
|
Python
|
case3/test_calc.py
|
emre/unit-test-workshop
|
6a323dd7ffac08e7aa56e09d307798d4ae984fa9
|
[
"MIT"
] | 1
|
2017-11-20T18:15:12.000Z
|
2017-11-20T18:15:12.000Z
|
case3/test_calc.py
|
emre/unit-test-workshop
|
6a323dd7ffac08e7aa56e09d307798d4ae984fa9
|
[
"MIT"
] | null | null | null |
case3/test_calc.py
|
emre/unit-test-workshop
|
6a323dd7ffac08e7aa56e09d307798d4ae984fa9
|
[
"MIT"
] | null | null | null |
import unittest
# https://docs.python.org/3/library/unittest.html
from calc import Calc
class TestCalc(unittest.TestCase):
pass
| 13.6
| 49
| 0.757353
| 43
| 0.316176
| 0
| 0
| 0
| 0
| 0
| 0
| 49
| 0.360294
|
b043e0116441bcee9ae6a5419079e591b49e7c1e
| 3,267
|
py
|
Python
|
tests/service/test_integer_converter_service.py
|
NeolithEra/WavesGatewayFramework
|
e7ba892427e1d0444f2bfdc2922c45ff5f4c4add
|
[
"MIT"
] | 25
|
2018-03-04T07:49:21.000Z
|
2022-03-28T05:20:50.000Z
|
tests/service/test_integer_converter_service.py
|
NeolithEra/WavesGatewayFramework
|
e7ba892427e1d0444f2bfdc2922c45ff5f4c4add
|
[
"MIT"
] | 22
|
2018-03-25T13:19:45.000Z
|
2020-11-28T17:21:08.000Z
|
tests/service/test_integer_converter_service.py
|
NeolithEra/WavesGatewayFramework
|
e7ba892427e1d0444f2bfdc2922c45ff5f4c4add
|
[
"MIT"
] | 31
|
2018-03-25T09:45:13.000Z
|
2022-03-24T05:32:18.000Z
|
import unittest
from unittest.mock import patch
from waves_gateway.model import Transaction, TransactionReceiver
from waves_gateway.service import IntegerConverterService
class IntegerConverterServiceSpec(unittest.TestCase):
@patch.multiple( # type: ignore
IntegerConverterService, __abstractmethods__=set())
def setUp(self):
self._integer_converter = IntegerConverterService()
def test_revert_amount_conversion(self):
res = self._integer_converter.revert_amount_conversion(40)
self.assertEqual(res, 40)
def test_convert_amount_to_int(self):
res = self._integer_converter.convert_amount_to_int(40.33)
self.assertEqual(res, 40.33)
def test_safely_convert_to_int_success(self):
with patch.object(self._integer_converter, 'convert_amount_to_int'):
self._integer_converter.convert_amount_to_int.return_value = 40
res = self._integer_converter.safely_convert_to_int(0.40)
self.assertEqual(res, 40)
def test_safely_convert_to_int_throws(self):
with patch.object(self._integer_converter, 'convert_amount_to_int'):
self._integer_converter.convert_amount_to_int.return_value = 0.40
with self.assertRaises(TypeError):
self._integer_converter.safely_convert_to_int(0.40)
def test_convert_transaction_to_int(self):
transaction = Transaction(
tx="79283647",
receivers=[
TransactionReceiver(address="9782364", amount=0.40),
TransactionReceiver(address="9782364", amount=0.30)
])
expected_result = Transaction(
tx="79283647",
receivers=[
TransactionReceiver(address="9782364", amount=40),
TransactionReceiver(address="9782364", amount=30)
])
with patch.object(self._integer_converter, 'safely_convert_to_int'):
def stub(amount: float):
return int(amount * 100)
self._integer_converter.safely_convert_to_int.side_effect = stub
actual_result = self._integer_converter.convert_transaction_to_int(transaction)
self.assertEqual(actual_result, expected_result)
self.assertEqual(self._integer_converter.safely_convert_to_int.call_count, 2)
def test_revert_transaction_conversion(self):
expected_result = Transaction(
tx="79283647",
receivers=[
TransactionReceiver(address="9782364", amount=0.40),
TransactionReceiver(address="9782364", amount=0.30)
])
transaction = Transaction(
tx="79283647",
receivers=[
TransactionReceiver(address="9782364", amount=40),
TransactionReceiver(address="9782364", amount=30)
])
with patch.object(self._integer_converter, 'revert_amount_conversion'):
def stub(amount: float):
return float(amount / 100)
self._integer_converter.revert_amount_conversion.side_effect = stub
actual_result = self._integer_converter.revert_transaction_conversion(transaction)
self.assertEqual(actual_result, expected_result)
| 37.551724
| 94
| 0.67034
| 3,091
| 0.946128
| 0
| 0
| 173
| 0.052954
| 0
| 0
| 221
| 0.067646
|
b044475c3b8a25898a8527a87ed6dc1d9dadbb1d
| 6,670
|
py
|
Python
|
live_demo.py
|
GerryZhang7/ASL-Translator-
|
3963311d8dd1f010ee5a19b3760b451bc287ab1e
|
[
"MIT"
] | null | null | null |
live_demo.py
|
GerryZhang7/ASL-Translator-
|
3963311d8dd1f010ee5a19b3760b451bc287ab1e
|
[
"MIT"
] | null | null | null |
live_demo.py
|
GerryZhang7/ASL-Translator-
|
3963311d8dd1f010ee5a19b3760b451bc287ab1e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
LIVE DEMO
This script loads a pre-trained model (for best results use pre-trained weights for classification block)
and classifies American Sign Language finger spelling frame-by-frame in real-time
"""
import string
import cv2
import time
from processing import square_pad, preprocess_for_vgg
from model import create_model
import argparse
import numpy as np
ap = argparse.ArgumentParser()
ap.add_argument("-w", "--weights", default=None,
help="path to the model weights")
required_ap = ap.add_argument_group('required arguments')
required_ap.add_argument("-m", "--model",
type=str, default="resnet", required=True,
help="name of pre-trained network to use")
args = vars(ap.parse_args())
# ====== Create model for real-time classification ======
# =======================================================
# Map model names to classes
MODELS = ["resnet", "vgg16", "inception", "xception", "mobilenet"]
if args["model"] not in MODELS:
raise AssertionError("The --model command line argument should be a key in the `MODELS` dictionary")
# Create pre-trained model + classification block, with or without pre-trained weights
my_model = create_model(model=args["model"],
model_weights_path=args["weights"])
# Dictionary to convert numerical classes to alphabet
label_dict = {pos: letter
for pos, letter in enumerate(string.ascii_uppercase)}
# ====================== Live loop ======================
# =======================================================
video_capture = cv2.VideoCapture(0)
#if not video_capture.isOpened():
# raise Exception("Could not open video device")
# Set properties. Each returns === True on success (i.e. correct resolution)
video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, 5000)
video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 5000)
path = "C:/Users/Desktop/splash.jpg"
img = cv2.imread(path)
imgWrite = np.zeros((512, 512, 3), np.uint8)
flag1 = 0
flag2 = 0
flag3 = 0
fps = 0
i = 0
timer = 0
start = time.time()
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
fps += 1
timer += 1
# Draw rectangle around face
x = 313
y = 82
w = 451
h = 568
cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 255, 0), 3)
# Crop + process captured frame
hand = frame[83:650, 314:764]
#hand = frame[0:1000, 0:1000]
hand = square_pad(hand)
hand = preprocess_for_vgg(hand)
# Make prediction
my_predict = my_model.predict(hand,
batch_size=1,
verbose=0)
# Predict letter
top_prd = np.argmax(my_predict)
if (flag1 == 1):
cv2.putText(frame, text="hi ",
org=(50, (560 + 240)),
fontFace=cv2.FONT_HERSHEY_PLAIN,
fontScale=6, color=(0, 0, 255),
thickness=6, lineType=cv2.LINE_AA)
if (flag2 == 1):
cv2.putText(frame, text="im ",
org=(185, (560 + 240)),
fontFace=cv2.FONT_HERSHEY_PLAIN,
fontScale=6, color=(0, 0, 255),
thickness=6, lineType=cv2.LINE_AA)
if (flag3 == 1):
cv2.putText(frame, text="good",
org=(300, (560 + 240)),
fontFace=cv2.FONT_HERSHEY_PLAIN,
fontScale=6, color=(0, 0, 255),
thickness=6, lineType=cv2.LINE_AA)
timer = -50
# Only display predictions with probabilities greater than 0.5
#if np.max(my_predict) >= 0.50:
#if timer >= 15:
if np.max(my_predict) >= 0.9925 and timer >= 12:
timer = 0;
prediction_result = "hi im good"
#prediction_result = label_dict[top_prd]
preds_list = np.argsort(my_predict)[0]
#pred_2 = label_dict[preds_list[-2]]
#pred_3 = label_dict[preds_list[-3]]
width = int(video_capture.get(3) + 0.5)
height = int(video_capture.get(4) + 0.5)
# Annotate image with most probable prediction
if i != 2 and i != 5 and i != 10:
cv2.putText(frame, text=prediction_result[i],
org=(width // 2 + 230, height // 2 + 75),
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=17, color=(255, 255, 0),
thickness=15, lineType=cv2.LINE_AA)
elif i == 2:
cv2.putText(frame, text="[space]",
org=(width // 2 + 230, height // 2 + 75),
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=5, color=(255, 255, 0),
thickness=15, lineType=cv2.LINE_AA)
flag1 = 1
#cv2.imshow("img", img)
#cv2.imwrite("splash.jpg", img)
#cv2.waitKey(0)
elif i == 5:
cv2.putText(frame, text="[space]",
org=(width // 2 + 230, height // 2 + 75),
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=5, color=(255, 255, 0),
thickness=15, lineType=cv2.LINE_AA)
flag2 = 1
cv2.imwrite(path, frame)
elif i == 10:
cv2.putText(frame, text="[space]",
org=(width // 2 + 230, height // 2 + 75),
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=5, color=(255, 255, 0),
thickness=15, lineType=cv2.LINE_AA)
flag3 = 1
i = (i+1) % (len(prediction_result)+1)
# Annotate image with second most probable prediction (displayed on bottom left)
'''cv2.putText(frame, text=pred_2,
org=(width // 2 + width // 5 + 40, (360 + 240)),
fontFace=cv2.FONT_HERSHEY_PLAIN,
fontScale=6, color=(0, 0, 255),
thickness=6, lineType=cv2.LINE_AA)
# Annotate image with third probable prediction (displayed on bottom right)
cv2.putText(frame, text=pred_3,
org=(width // 2 + width // 3 + 5, (360 + 240)),
fontFace=cv2.FONT_HERSHEY_PLAIN,
fontScale=6, color=(0, 0, 255),
thickness=6, lineType=cv2.LINE_AA)'''
# Display the resulting frame
cv2.imshow('Video', frame)
# Press 'q' to exit live loop
if cv2.waitKey(10) & 0xFF == ord('q'):
break
# Calculate frames per second
end = time.time()
FPS = fps/(end-start)
print("[INFO] approx. FPS: {:.2f}".format(FPS))
# Release the capture
video_capture.release()
cv2.destroyAllWindows()
| 33.517588
| 105
| 0.553373
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,480
| 0.371814
|
b044b434998843e21fedc472b72d6aa6d023641a
| 8,770
|
py
|
Python
|
prob2020/python/gene_sequence.py
|
KarchinLab/probabilistic2020
|
8e0b1b9578bd8189b1690dd2f17476c3305b98dc
|
[
"Apache-2.0"
] | 8
|
2016-04-30T03:26:40.000Z
|
2021-09-17T04:47:08.000Z
|
prob2020/python/gene_sequence.py
|
KarchinLab/probabilistic2020
|
8e0b1b9578bd8189b1690dd2f17476c3305b98dc
|
[
"Apache-2.0"
] | 9
|
2016-08-18T15:19:04.000Z
|
2019-07-17T18:16:52.000Z
|
prob2020/python/gene_sequence.py
|
KarchinLab/probabilistic2020
|
8e0b1b9578bd8189b1690dd2f17476c3305b98dc
|
[
"Apache-2.0"
] | 7
|
2016-10-19T03:43:42.000Z
|
2021-07-31T02:40:20.000Z
|
"""Fetches gene sequence from gene fasta created by extract_genes.py"""
import prob2020.python.utils as utils
class GeneSequence(object):
def __init__(self, fasta_obj,
nuc_context=1.5):
self.fasta = fasta_obj
self.nuc_context = nuc_context
def set_gene(self, bed_line):
"""Updates gene sequence for a new gene (bed line).
Parameters
----------
bed_line : BedLine
BedLine object representing a single gene in a BED file
"""
self.bed = bed_line # gene that was specified as BED
self._reset_seq() # fetch sequence for bed line
def _reset_seq(self):
"""Updates attributes for gene represented in the self.bed attribute.
Sequences are always upper case.
"""
exon_seq_list, five_ss_seq_list, three_ss_seq_list = self._fetch_seq()
self.exon_seq = ''.join(exon_seq_list)
self.three_prime_seq = three_ss_seq_list
self.five_prime_seq = five_ss_seq_list
self._to_upper() # make sure all sequences are in upper case
def add_germline_variants(self, germline_nucs, coding_pos):
"""Add potential germline variants into the nucleotide sequence.
Sequenced individuals may potentially have a SNP at a somatic mutation position.
Therefore they may differ from the reference genome. This method updates the gene
germline gene sequence to match the actual individual.
Parameters
----------
germline_nucs : list of str
list of DNA nucleotides containing the germline letter
coding_pos : int
0-based nucleotide position in coding sequence
NOTE: the self.exon_seq attribute is updated, no return value
"""
if len(germline_nucs) != len(coding_pos):
raise ValueError('Each germline nucleotide should have a coding position')
es = list(self.exon_seq)
for i in range(len(germline_nucs)):
gl_nuc, cpos = germline_nucs[i].upper(), coding_pos[i]
if not utils.is_valid_nuc(gl_nuc):
raise ValueError('{0} is not a valid nucleotide'.format(gl_nuc))
if cpos >= 0:
es[cpos] = gl_nuc
self.exon_seq = ''.join(es)
def _to_upper(self):
"""Convert sequences to upper case."""
self.exon_seq = self.exon_seq.upper()
self.three_prime_seq = [s.upper() for s in self.three_prime_seq]
self.five_prime_seq = [s.upper() for s in self.five_prime_seq]
def _fetch_seq(self):
"""Fetches gene sequence from PySAM fasta object.
Returns
-------
exons : list of str
list of exon nucleotide sequences
five_prime_ss : list of str
list of 5' splice site sequences
three_prime_ss : list of str
list of 3' splice site sequences
"""
exons = []
three_prime_ss = []
five_prime_ss = []
num_exons = self.bed.get_num_exons()
for i in range(num_exons):
# add exon sequence
tmp_id = '{0};exon{1}'.format(self.bed.gene_name, i)
tmp_exon = self.fasta.fetch(reference=tmp_id)
exons.append(tmp_exon)
# add splice site sequence
tmp_id_3ss = '{0};3SS'.format(tmp_id)
tmp_id_5ss = '{0};5SS'.format(tmp_id)
if num_exons == 1:
pass
elif i == 0:
tmp_5ss = self.fasta.fetch(tmp_id_5ss)
five_prime_ss.append(tmp_5ss)
elif i == (num_exons - 1):
tmp_3ss = self.fasta.fetch(tmp_id_3ss)
three_prime_ss.append(tmp_3ss)
else:
tmp_3ss = self.fasta.fetch(tmp_id_3ss)
tmp_5ss = self.fasta.fetch(tmp_id_5ss)
three_prime_ss.append(tmp_3ss)
five_prime_ss.append(tmp_5ss)
return exons, five_prime_ss, three_prime_ss
def _fetch_5ss_fasta(fasta, gene_name, exon_num,
chrom, strand, start, end):
"""Retreives the 5' SS sequence flanking the specified exon.
Returns a string in fasta format with the first line containing
a ">" and the second line contains the two base pairs of 5' SS.
Parameters
----------
fasta : pysam.Fastafile
fasta object from pysam
gene_name : str
gene name used for fasta seq id
exon_num : int
the `exon_num` exon, used for seq id
chrom : str
chromsome
strand : str
strand, {'+', '-'}
start : int
0-based start position
end : int
0-based end position
Returns
-------
ss_fasta : str
string in fasta format with first line being seq id
"""
if strand == '+':
ss_seq = fasta.fetch(reference=chrom,
start=end-1,
end=end+3)
elif strand == '-':
ss_seq = fasta.fetch(reference=chrom,
start=start-3,
end=start+1)
ss_seq = utils.rev_comp(ss_seq)
ss_fasta = '>{0};exon{1};5SS\n{2}\n'.format(gene_name,
exon_num,
ss_seq.upper())
return ss_fasta
def _fetch_3ss_fasta(fasta, gene_name, exon_num,
chrom, strand, start, end):
"""Retreives the 3' SS sequence flanking the specified exon.
Returns a string in fasta format with the first line containing
a ">" and the second line contains the two base pairs of 3' SS.
Parameters
----------
fasta : pysam.Fastafile
fasta object from pysam
gene_name : str
gene name used for fasta seq id
exon_num : int
the `exon_num` exon, used for seq id
chrom : str
chromsome
strand : str
strand, {'+', '-'}
start : int
0-based start position
end : int
0-based end position
Returns
-------
ss_fasta : str
string in fasta format with first line being seq id
"""
if strand == '-':
ss_seq = fasta.fetch(reference=chrom,
start=end-1,
end=end+3)
ss_seq = utils.rev_comp(ss_seq)
elif strand == '+':
ss_seq = fasta.fetch(reference=chrom,
start=start-3,
end=start+1)
ss_fasta = '>{0};exon{1};3SS\n{2}\n'.format(gene_name,
exon_num,
ss_seq.upper())
return ss_fasta
def fetch_gene_fasta(gene_bed, fasta_obj):
"""Retreive gene sequences in FASTA format.
Parameters
----------
gene_bed : BedLine
BedLine object representing a single gene
fasta_obj : pysam.Fastafile
fasta object for index retreival of sequence
Returns
-------
gene_fasta : str
sequence of gene in FASTA format
"""
gene_fasta = ''
strand = gene_bed.strand
exons = gene_bed.get_exons()
if strand == '-':
exons.reverse() # order exons 5' to 3', so reverse if '-' strand
# iterate over exons
for i, exon in enumerate(exons):
exon_seq = fasta_obj.fetch(reference=gene_bed.chrom,
start=exon[0],
end=exon[1]).upper()
if strand == '-':
exon_seq = utils.rev_comp(exon_seq)
exon_fasta = '>{0};exon{1}\n{2}\n'.format(gene_bed.gene_name,
i, exon_seq)
# get splice site sequence
if len(exons) == 1:
# splice sites don't matter if there is no splicing
ss_fasta = ''
elif i == 0:
# first exon only, get 3' SS
ss_fasta = _fetch_5ss_fasta(fasta_obj, gene_bed.gene_name, i,
gene_bed.chrom, strand, exon[0], exon[1])
elif i == (len(exons) - 1):
# last exon only, get 5' SS
ss_fasta = _fetch_3ss_fasta(fasta_obj, gene_bed.gene_name, i,
gene_bed.chrom, strand, exon[0], exon[1])
else:
# middle exon, get bot 5' and 3' SS
fasta_3ss = _fetch_3ss_fasta(fasta_obj, gene_bed.gene_name, i,
gene_bed.chrom, strand, exon[0], exon[1])
fasta_5ss = _fetch_5ss_fasta(fasta_obj, gene_bed.gene_name, i,
gene_bed.chrom, strand, exon[0], exon[1])
ss_fasta = fasta_5ss + fasta_3ss
gene_fasta += exon_fasta + ss_fasta
return gene_fasta
| 34.801587
| 89
| 0.55382
| 3,881
| 0.442531
| 0
| 0
| 0
| 0
| 0
| 0
| 3,686
| 0.420296
|
b04538316ec8e7dec6961b4c00010c7027a8e97d
| 1,118
|
py
|
Python
|
src/main/python/request/http_request.py
|
photowey/pytest-dynamic-framework
|
4e7b6d74594191006b50831d42e7aae21e154d56
|
[
"Apache-2.0"
] | null | null | null |
src/main/python/request/http_request.py
|
photowey/pytest-dynamic-framework
|
4e7b6d74594191006b50831d42e7aae21e154d56
|
[
"Apache-2.0"
] | null | null | null |
src/main/python/request/http_request.py
|
photowey/pytest-dynamic-framework
|
4e7b6d74594191006b50831d42e7aae21e154d56
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
# ---------------------------------------------
# @file http_request
# @description http_request
# @author WcJun
# @date 2021/07/19
# ---------------------------------------------
from src.main.python.request.options import RequestOptions
class HttpRequest:
"""
Http Request
"""
def __init__(self, options: RequestOptions):
self.url = options.url
self.method = options.method
self.body = options.body
self.headers = options.headers
self.parameters = options.parameters
self.ssl = options.ssl
self.mock_enabled = options.mock_enabled
self.mock_response = options.mock_response
def populateUrlParameters(self) -> str:
param_chain: [] = ['?']
if type(self.parameters) == dict and len(self.parameters) > 0:
for parameter_key in self.parameters.keys():
single_param: [] = [parameter_key, '=', self.parameters[parameter_key], '&']
param_chain.append(''.join(single_param))
chain_str: str = ''.join(param_chain)
return chain_str[:-1]
| 29.421053
| 92
| 0.573345
| 851
| 0.761181
| 0
| 0
| 0
| 0
| 0
| 0
| 237
| 0.211986
|
b04682256b68f1be1d146f950d4cf5cacbc05399
| 5,728
|
py
|
Python
|
bot/helper/mirror_utils/download_utils/aria2_download.py
|
vincreator/Eunha
|
85a702a5b5f30ccea1798122c261d4ff07fe0c0c
|
[
"Apache-2.0"
] | null | null | null |
bot/helper/mirror_utils/download_utils/aria2_download.py
|
vincreator/Eunha
|
85a702a5b5f30ccea1798122c261d4ff07fe0c0c
|
[
"Apache-2.0"
] | null | null | null |
bot/helper/mirror_utils/download_utils/aria2_download.py
|
vincreator/Eunha
|
85a702a5b5f30ccea1798122c261d4ff07fe0c0c
|
[
"Apache-2.0"
] | null | null | null |
from time import sleep
from threading import Thread
from bot import aria2, download_dict_lock, download_dict, STOP_DUPLICATE, TORRENT_DIRECT_LIMIT, ZIP_UNZIP_LIMIT, LOGGER, STORAGE_THRESHOLD
from bot.helper.mirror_utils.upload_utils.gdriveTools import GoogleDriveHelper
from bot.helper.ext_utils.bot_utils import is_magnet, getDownloadByGid, new_thread, get_readable_file_size
from bot.helper.mirror_utils.status_utils.aria_download_status import AriaDownloadStatus
from bot.helper.telegram_helper.message_utils import sendMarkup, sendStatusMessage, sendMessage
from bot.helper.ext_utils.fs_utils import get_base_name, check_storage_threshold
@new_thread
def __onDownloadStarted(api, gid):
try:
if any([STOP_DUPLICATE, TORRENT_DIRECT_LIMIT, ZIP_UNZIP_LIMIT, STORAGE_THRESHOLD]):
sleep(1.5)
dl = getDownloadByGid(gid)
if not dl:
return
download = api.get_download(gid)
if STOP_DUPLICATE and not dl.getListener().isLeech:
LOGGER.info('Checking File/Folder if already in Drive...')
sname = download.name
if dl.getListener().isZip:
sname = sname + ".zip"
elif dl.getListener().extract:
try:
sname = get_base_name(sname)
except:
sname = None
if sname is not None:
smsg, button = GoogleDriveHelper().drive_list(sname, True)
if smsg:
dl.getListener().onDownloadError('File/Folder already available in Drive.\n\n')
api.remove([download], force=True, files=True)
return sendMarkup("Here are the search results:", dl.getListener().bot, dl.getListener().message, button)
if any([ZIP_UNZIP_LIMIT, TORRENT_DIRECT_LIMIT, STORAGE_THRESHOLD]):
sleep(1)
limit = None
size = api.get_download(gid).total_length
arch = any([dl.getListener().isZip, dl.getListener().extract])
if STORAGE_THRESHOLD is not None:
acpt = check_storage_threshold(size, arch, True)
# True if files allocated, if allocation disabled remove True arg
if not acpt:
msg = f'You must leave {STORAGE_THRESHOLD}GB free storage.'
msg += f'\nYour File/Folder size is {get_readable_file_size(size)}'
dl.getListener().onDownloadError(msg)
return api.remove([download], force=True, files=True)
if ZIP_UNZIP_LIMIT is not None and arch:
mssg = f'Zip/Unzip limit is {ZIP_UNZIP_LIMIT}GB'
limit = ZIP_UNZIP_LIMIT
elif TORRENT_DIRECT_LIMIT is not None:
mssg = f'Torrent/Direct limit is {TORRENT_DIRECT_LIMIT}GB'
limit = TORRENT_DIRECT_LIMIT
if limit is not None:
LOGGER.info('Checking File/Folder Size...')
if size > limit * 1024**3:
dl.getListener().onDownloadError(f'{mssg}.\nYour File/Folder size is {get_readable_file_size(size)}')
return api.remove([download], force=True, files=True)
except Exception as e:
LOGGER.error(f"{e} onDownloadStart: {gid} stop duplicate and size check didn't pass")
@new_thread
def __onDownloadComplete(api, gid):
LOGGER.info(f"onDownloadComplete: {gid}")
dl = getDownloadByGid(gid)
download = api.get_download(gid)
if download.followed_by_ids:
new_gid = download.followed_by_ids[0]
new_download = api.get_download(new_gid)
if not dl:
dl = getDownloadByGid(new_gid)
with download_dict_lock:
download_dict[dl.uid()] = AriaDownloadStatus(new_gid, dl.getListener())
LOGGER.info(f'Changed gid from {gid} to {new_gid}')
elif dl:
Thread(target=dl.getListener().onDownloadComplete).start()
@new_thread
def __onDownloadStopped(api, gid):
sleep(4)
dl = getDownloadByGid(gid)
if dl:
dl.getListener().onDownloadError('Dead torrent!')
@new_thread
def __onDownloadError(api, gid):
LOGGER.info(f"onDownloadError: {gid}")
sleep(0.5)
dl = getDownloadByGid(gid)
try:
download = api.get_download(gid)
error = download.error_message
LOGGER.info(f"Download Error: {error}")
except:
pass
if dl:
dl.getListener().onDownloadError(error)
def start_listener():
aria2.listen_to_notifications(threaded=True, on_download_start=__onDownloadStarted,
on_download_error=__onDownloadError,
on_download_stop=__onDownloadStopped,
on_download_complete=__onDownloadComplete,
timeout=20)
def add_aria2c_download(link: str, path, listener, filename):
if is_magnet(link):
download = aria2.add_magnet(link, {'dir': path, 'out': filename})
else:
download = aria2.add_uris([link], {'dir': path, 'out': filename})
if download.error_message:
error = str(download.error_message).replace('<', ' ').replace('>', ' ')
LOGGER.info(f"Download Error: {error}")
return sendMessage(error, listener.bot, listener.message)
with download_dict_lock:
download_dict[listener.uid] = AriaDownloadStatus(download.gid, listener)
LOGGER.info(f"Started: {download.gid} DIR: {download.dir} ")
sendStatusMessage(listener.message, listener.bot)
start_listener()
| 46.569106
| 138
| 0.618191
| 0
| 0
| 0
| 0
| 3,977
| 0.694309
| 0
| 0
| 801
| 0.139839
|
b047b2781fee7bef3205107d3cc7277c6707a880
| 3,407
|
py
|
Python
|
gol.py
|
AjayMT/game-of-life
|
681bb92e1d7c0644645af7b77f0106ba2d4c9c20
|
[
"MIT"
] | null | null | null |
gol.py
|
AjayMT/game-of-life
|
681bb92e1d7c0644645af7b77f0106ba2d4c9c20
|
[
"MIT"
] | null | null | null |
gol.py
|
AjayMT/game-of-life
|
681bb92e1d7c0644645af7b77f0106ba2d4c9c20
|
[
"MIT"
] | null | null | null |
import pygame
from pygame.locals import *
from pygamehelper import *
from vec2d import *
from random import randrange
class Matrix:
def __init__(self, w, h):
self.w, self.h = w, h
self._data = []
for i in range(self.w * self.h):
self._data.append(None)
def __getitem__(self, i):
return self._data[i]
def _index(self, x, y):
return x + (y * self.w)
def get(self, x, y):
return self._data[self._index(x, y)]
def set(self, x, y, v):
self._data[self._index(x, y)] = v
class Cell:
def __init__(self, x, y, w):
self.x, self.y, self.w = x, y, w
self.alive = False
def draw(self, screen):
color = (255, 255, 0) if self.alive else (100, 100, 100)
xywh = (self.x * self.w, self.y * self.w, self.w, self.w)
pygame.draw.rect(screen, color, xywh, 0)
pygame.draw.rect(screen, (100, 0, 0), xywh, 1)
class GameOfLife(PygameHelper):
def __init__(self):
self.w, self.h = 800, 600
PygameHelper.__init__(self, size=(self.w, self.h))
self.begin = raw_input('Begin: ') or [3]
self.begin = [int(x) for x in self.begin]
self.stay = raw_input('Stay: ') or [3, 2]
self.stay = [int(x) for x in self.stay]
self.paused = True
self.cellw = input('Cell width: ')
self.cells = Matrix(self.w / self.cellw, self.h / self.cellw)
random = (raw_input
('Random arrangement of live cells? (y/n) ') == 'y')
for i in range(self.cells.w):
for j in range(self.cells.h):
c = Cell(i, j, self.cellw)
if random: c.alive = (randrange(2) == 1)
self.cells.set(i, j, c)
def neighbours(self, c):
n = []
x, y = c.x, c.y
for i in [1, -1, 0]:
for j in [1, -1, 0]:
if i == 0 and j == 0: continue
if (x + i) < 0: i += self.cells.w
if (x + i) >= self.cells.w: i -= self.cells.w
if (y + j) < 0: j += self.cells.h
if (y + j) >= self.cells.h: j -= self.cells.h
n.append(self.cells.get(x + i, y + j))
return n
def mouseUp(self, pos):
if not self.paused: return
x = (pos[0] - (pos[0] % self.cellw)) / self.cellw
y = (pos[1] - (pos[1] % self.cellw)) / self.cellw
c = self.cells.get(x, y)
c.alive = not c.alive
def keyDown(self, key):
if key == 275 and self.paused:
self.paused = False
self.update()
self.draw()
self.paused = True
else:
self.paused = not self.paused
def update(self):
if self.paused: return
changed = []
for c in self.cells:
neighbours = self.neighbours(c)
liveneighbours = [n for n in neighbours if n.alive]
if c.alive:
if len(liveneighbours) not in self.stay:
changed.append(c)
if not c.alive:
if len(liveneighbours) in self.begin:
changed.append(c)
for c in changed:
c.alive = not c.alive
def draw(self):
self.screen.fill((0, 0, 0))
for c in self.cells:
c.draw(self.screen)
pygame.display.update()
g = GameOfLife()
g.mainLoop(60)
| 27.039683
| 70
| 0.502495
| 3,246
| 0.952744
| 0
| 0
| 0
| 0
| 0
| 0
| 76
| 0.022307
|
b048467d0a750345394b6d343d01156aad3e1cef
| 109
|
py
|
Python
|
pylib/gna/graph/__init__.py
|
gnafit/gna
|
c1a58dac11783342c97a2da1b19c97b85bce0394
|
[
"MIT"
] | 5
|
2019-10-14T01:06:57.000Z
|
2021-02-02T16:33:06.000Z
|
pylib/gna/graph/__init__.py
|
gnafit/gna
|
c1a58dac11783342c97a2da1b19c97b85bce0394
|
[
"MIT"
] | null | null | null |
pylib/gna/graph/__init__.py
|
gnafit/gna
|
c1a58dac11783342c97a2da1b19c97b85bce0394
|
[
"MIT"
] | null | null | null |
from gna.graph.walk import GraphWalker
from gna.graph.timeit import *
from gna.graph.walk_functions import *
| 27.25
| 38
| 0.816514
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
b048ccf5383075a3e3ddc09cd04494ee80c2a300
| 434
|
py
|
Python
|
Recursion/Aditya_Verma/Hypothesis_Method/Print_N_to_1.py
|
prash-kr-meena/GoogleR
|
27aca71e51cc2442e604e07ab00406a98d8d63a4
|
[
"Apache-2.0"
] | null | null | null |
Recursion/Aditya_Verma/Hypothesis_Method/Print_N_to_1.py
|
prash-kr-meena/GoogleR
|
27aca71e51cc2442e604e07ab00406a98d8d63a4
|
[
"Apache-2.0"
] | null | null | null |
Recursion/Aditya_Verma/Hypothesis_Method/Print_N_to_1.py
|
prash-kr-meena/GoogleR
|
27aca71e51cc2442e604e07ab00406a98d8d63a4
|
[
"Apache-2.0"
] | null | null | null |
# Forward Implementation
def print_to_n_reverse(n):
if n == 1: # Base Condition
print(1, end=" ")
return
print(n, end=" ") # Induction
print_to_n_reverse(n - 1) # Hypothesis
# Backward implementation
# - Here backward implementation, would be a bit typical to do,
# - Forward implementation makes more sense, if you think in terms of the input n
if __name__ == "__main__":
print_to_n_reverse(7)
| 25.529412
| 81
| 0.675115
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 248
| 0.571429
|
b04a94197db758a9aeced9b7588eec2e7e3ada18
| 7,835
|
py
|
Python
|
certbot_azure/azure_agw.py
|
loufa-io/certbot-azure
|
f081da34fa74c3d2fded08af2da0ca2b5380fa14
|
[
"MIT"
] | null | null | null |
certbot_azure/azure_agw.py
|
loufa-io/certbot-azure
|
f081da34fa74c3d2fded08af2da0ca2b5380fa14
|
[
"MIT"
] | null | null | null |
certbot_azure/azure_agw.py
|
loufa-io/certbot-azure
|
f081da34fa74c3d2fded08af2da0ca2b5380fa14
|
[
"MIT"
] | null | null | null |
"""Azure App Gateway Certbot installer plugin."""
from __future__ import print_function
import os
import sys
import logging
import time
import OpenSSL
import base64
try:
from secrets import token_urlsafe
except ImportError:
from os import urandom
def token_urlsafe(nbytes=None):
return urandom(nbytes)
import zope.component
import zope.interface
from certbot import interfaces
from certbot import errors
from certbot.plugins import common
from azure.common.client_factory import get_client_from_auth_file
from azure.mgmt.resource import ResourceManagementClient
from azure.mgmt.network import NetworkManagementClient
from msrestazure.azure_exceptions import CloudError
from azure.identity import CredentialUnavailableError
from .cred_wrapper import CredentialWrapper
MSDOCS = 'https://docs.microsoft.com/'
ACCT_URL = MSDOCS + 'python/azure/python-sdk-azure-authenticate?view=azure-python#mgmt-auth-file'
AZURE_CLI_URL = MSDOCS + 'cli/azure/install-azure-cli?view=azure-cli-latest'
AZURE_CLI_COMMAND = ("az ad sp create-for-rbac"
" --name Certbot --sdk-auth"
" --scope /subscriptions/<SUBSCRIPTION_ID>/resourceGroups/<RESOURCE_GROUP_ID>"
" > mycredentials.json")
logger = logging.getLogger(__name__)
class Installer(common.Plugin, interfaces.Installer):
description = "Certbot Azure Installer"
@classmethod
def add_parser_arguments(cls, add):
add('resource-group',
help=('Resource Group in which the DNS zone is located'),
default=None)
add('app-gateway-name',
help=('Name of the application gateway'),
default=None)
add('subscription-id',
help=('ID of the subscription containing the DNS Zone or Application Gateway'),
default=None)
def __init__(self, *args, **kwargs):
super(Installer, self).__init__(*args, **kwargs)
self._setup_credentials()
self.azure_client = _AzureClient(self.conf('resource-group'), self.conf('subscription-id'))
def _setup_credentials(self):
if self.conf('resource-group') is None:
raise errors.PluginError('Please specify a resource group using '
'--azure-agw-resource-group <RESOURCEGROUP>')
if self.conf('app-gateway-name') is None:
raise errors.PluginError('Please specify the app gateway name using'
'--azure-agw-resource-group <RESOURCEGROUP>')
if self.conf('subscription-id') is None:
raise errors.PluginError(
'Please specify your subscription id with --azure-agw-subscription-id')
def prepare(self): # pylint: disable=missing-docstring,no-self-use
pass # pragma: no cover
def more_info(self): # pylint: disable=missing-docstring,no-self-use
return ("")
def get_all_names(self): # pylint: disable=missing-docstring,no-self-use
pass # pragma: no cover
def deploy_cert(self, domain, cert_path, key_path, chain_path, fullchain_path):
"""
Upload Certificate to the app gateway
"""
self.azure_client.update_agw(self.conf('app-gateway-name'),domain, key_path, fullchain_path)
def enhance(self, domain, enhancement, options=None): # pylint: disable=missing-docstring,no-self-use
pass # pragma: no cover
def supported_enhancements(self): # pylint: disable=missing-docstring,no-self-use
return [] # pragma: no cover
def get_all_certs_keys(self): # pylint: disable=missing-docstring,no-self-use
pass # pragma: no cover
def save(self, title=None, temporary=False): # pylint: disable=missing-docstring,no-self-use
pass # pragma: no cover
def rollback_checkpoints(self, rollback=1): # pylint: disable=missing-docstring,no-self-use
pass # pragma: no cover
def recovery_routine(self): # pylint: disable=missing-docstring,no-self-use
pass # pragma: no cover
def view_config_changes(self): # pylint: disable=missing-docstring,no-self-use
pass # pragma: no cover
def config_test(self): # pylint: disable=missing-docstring,no-self-use
pass # pragma: no cover
def restart(self): # pylint: disable=missing-docstring,no-self-use
pass # pragma: no cover
def renew_deploy(self, lineage, *args, **kwargs): # pylint: disable=missing-docstring,no-self-use
"""
Renew certificates when calling `certbot renew`
"""
# Run deploy_cert with the lineage params
self.deploy_cert(
lineage.names()[0], lineage.cert_path, lineage.key_path, lineage.chain_path, lineage.fullchain_path)
return
class _AzureClient(object):
"""
Encapsulates all communication with the Azure Cloud DNS API.
"""
def __init__(self, resource_group, subscription_id):
self.resource_group = resource_group
try:
self.credential = CredentialWrapper()
except CredentialUnavailableError as e:
errors.PluginError('Unable to acquire identity')
self.resource_client = ResourceManagementClient(self.credential, subscription_id)
self.network_client = NetworkManagementClient(self.credential, subscription_id)
def update_agw(self, agw_name, domain, key_path, fullchain_path):
from azure.mgmt.network.models import ApplicationGatewaySslCertificate
# Generate random password for pfx
password = token_urlsafe(16)
# Get app gateway from client
agw = self.network_client.application_gateways.get(self.resource_group, agw_name)
if "Updating" in [ssl.provisioning_state for ssl in agw.ssl_certificates]:
raise errors.PluginError('There is a certificate in Updating state. Cowardly refusing to add a new one.')
ssl = ApplicationGatewaySslCertificate()
ssl.name = domain + str(int(time.time()))
ssl.data = self._generate_pfx_from_pems(key_path, fullchain_path, password)
ssl.password = password
agw.ssl_certificates.append(ssl)
try:
self.network_client.application_gateways.create_or_update(self.resource_group,
agw_name,
agw)
except CloudError as e:
logger.warning('Encountered error updating app gateway: %s', e)
raise errors.PluginError('Error communicating with the Azure API: {0}'.format(e))
def _generate_pfx_from_pems(self, key_path, fullchain_path, password):
"""Generate PFX file out of PEMs in order to meet App Gateway requirements"""
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography import x509
p12 = OpenSSL.crypto.PKCS12()
# Load Key into PKCS12 object
with open(key_path, "rb") as key_file:
private_key = serialization.load_pem_private_key(
key_file.read(),
password=None,
backend=default_backend()
)
key = OpenSSL.crypto.PKey.from_cryptography_key(private_key)
p12.set_privatekey(key)
# Load Cert into PKCS12 object
with open(fullchain_path, "rb") as cert_file:
crypto_cert = x509.load_pem_x509_certificate(
cert_file.read(),
default_backend())
cert = OpenSSL.crypto.X509.from_cryptography(crypto_cert)
p12.set_certificate(cert)
# Export object
data = p12.export(passphrase=password)
# Return base64 encoded string
return str(base64.b64encode(data), "utf-8")
| 36.957547
| 117
| 0.664199
| 6,539
| 0.834588
| 0
| 0
| 439
| 0.056031
| 0
| 0
| 2,458
| 0.31372
|
b04b28603590e6dad8f700f43ec0e40f0f4392cb
| 1,999
|
py
|
Python
|
image/apps/Ignitions.py
|
AnthonyRawlinsUoM/MetricsDashboard
|
37594e46b0cec340e10d3123bbaf94b277a3ce22
|
[
"MIT"
] | null | null | null |
image/apps/Ignitions.py
|
AnthonyRawlinsUoM/MetricsDashboard
|
37594e46b0cec340e10d3123bbaf94b277a3ce22
|
[
"MIT"
] | null | null | null |
image/apps/Ignitions.py
|
AnthonyRawlinsUoM/MetricsDashboard
|
37594e46b0cec340e10d3123bbaf94b277a3ce22
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from glob import glob as glob
from extractor.Ignition import Ignition
import logging
logger = logging.getLogger(__name__)
class Ignitions:
"""
"""
def __init__(self, path, scenario_name, redis):
self.redis = redis
self.active = False
self.name = "Ignitions"
self.path = path
self.scenario_name = scenario_name
def load(self, pb, saving=False):
if self.active:
logger.debug('>>> Will extract %s' % self.name)
if saving:
logger.debug('>>> Will save %s' % self.name)
"""
TODO -> take path components from instance parameters and
dynamically generate paths and globs using OS-agnostic calls
to pathlib. Ensure Windows compatability.
"""
scn = str(Path(self.path).joinpath(
self.scenario_name)) + "_{}*".format(pb)
batch_list = sorted(
glob(scn + "/centralhigh_*/regsim_ignitions.txt"))
for bat in batch_list:
replicate = Util.bid(bat)
with open(bat, 'r') as ig:
line_count = 0
fdata = ig.read()
for c in fdata.split('\n'):
line_count += 1
if c != '':
try:
line_place = '[line: %10d]' % (line_count)
i = Ignition(c, bat + line_place)
if i.store(pb, replicate, redis):
logger.debug(line_place, '[OK]', str(i))
else:
logger.debug(line_place, '[XX]', c)
except KeyError as e:
logger.error('Ignition Parsing Error')
def question(self):
return "Would you like to extract {} Metrics?".format(self.name)
| 36.345455
| 76
| 0.477239
| 1,848
| 0.924462
| 0
| 0
| 0
| 0
| 0
| 0
| 421
| 0.210605
|
b04cbd151462272c28fb0ccf978f4c3ccbb776cd
| 11,913
|
py
|
Python
|
frontend/alexa/alexa.py
|
jjanetzki/HackHPI-2017
|
5345a4b385b92dff8b665818127e85eb1e14b31f
|
[
"MIT"
] | 1
|
2017-06-17T18:18:55.000Z
|
2017-06-17T18:18:55.000Z
|
frontend/alexa/alexa.py
|
janetzki/Productivity-Bot
|
5345a4b385b92dff8b665818127e85eb1e14b31f
|
[
"MIT"
] | null | null | null |
frontend/alexa/alexa.py
|
janetzki/Productivity-Bot
|
5345a4b385b92dff8b665818127e85eb1e14b31f
|
[
"MIT"
] | null | null | null |
"""
This code sample is a part of a simple demo to show beginners how to create a skill (app) for the Amazon Echo using AWS Lambda and the Alexa Skills Kit.
For the full code sample visit https://github.com/pmckinney8/Alexa_Dojo_Skill.git
"""
from __future__ import print_function
import requests
import json
alcohol_url = "https://hpi.de/naumann/sites/ingestion/hackhpi/alcohol/add"
caffeine_url = "https://hpi.de/naumann/sites/ingestion/hackhpi/caffeine/add"
profile_url = "https://hpi.de/naumann/sites/ingestion/hackhpi/alcohol/setprofile"
caffeine_recommendation_url = "https://hpi.de/naumann/sites/ingestion/hackhpi/caffeine/recommendation"
alcohol_recommendation_url = "https://hpi.de/naumann/sites/ingestion/hackhpi/alcohol/recommendation"
def lambda_handler(event, context):
""" Route the incoming request based on type (LaunchRequest, IntentRequest,
etc.) The JSON body of the request is provided in the event parameter.
"""
print("event.session.application.applicationId=" +
event['session']['application']['applicationId'])
"""
Uncomment this if statement and populate with your skill's application ID to
prevent someone else from configuring a skill that sends requests to this
function.
"""
# if (event['session']['application']['applicationId'] !=
# "amzn1.echo-sdk-ams.app.[unique-value-here]"):
# raise ValueError("Invalid Application ID")
if event['session']['new']:
on_session_started({'requestId': event['request']['requestId']},
event['session'])
if event['request']['type'] == "LaunchRequest":
return on_launch(event['request'], event['session'])
elif event['request']['type'] == "IntentRequest":
return on_intent(event['request'], event['session'])
elif event['request']['type'] == "SessionEndedRequest":
return on_session_ended(event['request'], event['session'])
def on_session_started(session_started_request, session):
""" Called when the session starts """
print("on_session_started requestId=" + session_started_request['requestId']
+ ", sessionId=" + session['sessionId'])
def on_launch(launch_request, session):
""" Called when the user launches the skill without specifying what they
want
"""
# Dispatch to your skill's launch
return get_welcome_response()
def on_intent(intent_request, session):
""" Called when the user specifies an intent for this skill """
print("on_intent requestId=" + intent_request['requestId'] +
", sessionId=" + session['sessionId'])
intent = intent_request['intent']
intent_name = intent_request['intent']['name']
# Dispatch to your skill's intent handlers
if intent_name == "DrinkIntend":
return get_drink_response(intent_request)
elif intent_name == "DrinkFinishedIntend":
return get_finished_drink(intent_request)
elif intent_name == "CaffeineIntend":
return get_caffeine(intent_request)
elif intent_name == "AlcoholIntend":
return get_alcohol(intent_request)
elif intent_name == "CaffeineRecommendationIntend":
return get_caffeine_recommendation()
elif intent_name == "AlcoholRecommendationIntend":
return get_alcohol_recommendation()
elif intent_name == "CaffeineLevelIntend":
return get_caffeine_level()
elif intent_name == "AlcoholLevelIntend":
return get_alcohol_level()
elif intent_name == "SexIntend":
return set_sex(intent_request)
elif intent_name == "BodyweightIntend":
return set_bodyweight(intent_request)
elif intent_name == "AgeIntend":
return set_age(intent_request)
elif intent_name == "AMAZON.HelpIntent":
return get_help_response()
elif intent_name == "AMAZON.CancelIntent" or intent_name == "AMAZON.StopIntent":
return handle_session_end_request()
else:
raise ValueError("Invalid intent")
def on_session_ended(session_ended_request, session):
""" Called when the user ends the session.
Is not called when the skill returns should_end_session=true
"""
print("on_session_ended requestId=" + session_ended_request['requestId'] +
", sessionId=" + session['sessionId'])
# add cleanup logic here
# --------------- Functions that control the skill's behavior ------------------
def get_welcome_response():
session_attributes = {}
card_title = "Welcome"
speech_output = "Welcome to the Productivity Bot. I will help you stay in your Ballmer Peak."
# If the user either does not reply to the welcome message or says something
# that is not understood, they will be prompted again with the same text.
reprompt_text = speech_output
should_end_session = False
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session))
def get_help_response():
session_attributes = {}
card_title = "Help"
speech_output = "Welcome to the help section for the Productivity Bot. A couple of examples of phrases that I can except are... What shall I drink... or, how much alcohol does a drink contain. Lets get started now by trying one of these."
reprompt_text = speech_output
should_end_session = False
return build_response(session_attributes,
build_speechlet_response(card_title, speech_output, reprompt_text, should_end_session))
def get_drink_response(intent_request):
session_attributes = {}
card_title = "Drink response"
drink = intent_request["intent"]["slots"]["Drink"]["value"]
requests.post(caffeine_url, json={"drink": drink}) # todo: specify serving (ml)
requests.post(alcohol_url, json={"drink": drink}) # todo: specify serving (ml)
speech_output = f"Enjoy your {drink}."
reprompt_text = speech_output
should_end_session = False
return build_response(session_attributes,
build_speechlet_response(card_title, speech_output, reprompt_text, should_end_session))
def get_finished_drink(intent_request):
session_attributes = {}
card_title = "Finished drink response"
drink = intent_request["intent"]["slots"]["Drink"]["value"]
# requests.post("https://hpi.de/naumann/sites/ingestion/hackhpi/", json={"drink finished": drink})
speech_output = f"I hope your {drink} was tasty."
reprompt_text = speech_output
should_end_session = False
return build_response(session_attributes,
build_speechlet_response(card_title, speech_output, reprompt_text, should_end_session))
def get_caffeine_recommendation():
session_attributes = {}
card_title = "Caffeine recommendation response"
json_answer = requests.get(caffeine_recommendation_url).text
speech_output = json.loads(json_answer)["results"]
reprompt_text = speech_output
should_end_session = False
return build_response(session_attributes,
build_speechlet_response(card_title, speech_output, reprompt_text, should_end_session))
def get_alcohol_recommendation():
session_attributes = {}
card_title = "Alcohol recommendation response"
json_answer = requests.get(alcohol_recommendation_url).text
speech_output = json.loads(json_answer)["results"]
reprompt_text = speech_output
should_end_session = False
return build_response(session_attributes,
build_speechlet_response(card_title, speech_output, reprompt_text, should_end_session))
def get_caffeine(intent_request):
session_attributes = {}
card_title = "Caffeine response"
drink = intent_request["intent"]["slots"]["Drink"]["value"]
speech_output = f"{drink} contains a lot of caffeine."
reprompt_text = speech_output
should_end_session = False
return build_response(session_attributes,
build_speechlet_response(card_title, speech_output, reprompt_text, should_end_session))
def get_alcohol(intent_request):
session_attributes = {}
card_title = "Alcohol response"
drink = intent_request["intent"]["slots"]["Drink"]["value"]
speech_output = f"{drink} contains a lot of alcohol."
reprompt_text = speech_output
should_end_session = False
return build_response(session_attributes,
build_speechlet_response(card_title, speech_output, reprompt_text, should_end_session))
def get_caffeine_level():
session_attributes = {}
card_title = "Caffeine level response"
speech_output = "Your caffeine level is over 9000."
reprompt_text = speech_output
should_end_session = False
return build_response(session_attributes,
build_speechlet_response(card_title, speech_output, reprompt_text, should_end_session))
def get_alcohol_level():
session_attributes = {}
card_title = "Alcohol level response"
speech_output = "Your alcohol level is over 9000."
reprompt_text = speech_output
should_end_session = False
return build_response(session_attributes,
build_speechlet_response(card_title, speech_output, reprompt_text, should_end_session))
def set_sex(intent_request):
session_attributes = {}
card_title = "Sex response"
sex = intent_request["intent"]["slots"]["Sex"]["value"]
requests.post(profile_url, json={"sex": sex})
speech_output = f"Yes, you are so {sex}."
reprompt_text = speech_output
should_end_session = False
return build_response(session_attributes,
build_speechlet_response(card_title, speech_output, reprompt_text, should_end_session))
def set_bodyweight(intent_request):
session_attributes = {}
card_title = "Bodyweight response"
weight = intent_request["intent"]["slots"]["Number"]["value"]
requests.post(profile_url, json={"bodyweight": weight})
speech_output = f"A bodyweight of {weight} is just perfect!"
reprompt_text = speech_output
should_end_session = False
return build_response(session_attributes,
build_speechlet_response(card_title, speech_output, reprompt_text, should_end_session))
def set_age(intent_request):
session_attributes = {}
card_title = "Age response"
age = intent_request["intent"]["slots"]["Number"]["value"]
requests.post(profile_url, json={"age": age})
speech_output = f"I am less than {age} years old."
reprompt_text = speech_output
should_end_session = False
return build_response(session_attributes,
build_speechlet_response(card_title, speech_output, reprompt_text, should_end_session))
def handle_session_end_request():
card_title = "Session Ended"
speech_output = "Thank you for using the Productivity bot! I hope you were productive."
# Setting this to true ends the session and exits the skill.
should_end_session = True
return build_response({}, build_speechlet_response(
card_title, speech_output, None, should_end_session))
# --------------- Helpers that build all of the responses ----------------------
def build_speechlet_response(title, output, reprompt_text, should_end_session):
return {
'outputSpeech': {
'type': 'PlainText',
'text': output
},
'card': {
'type': 'Simple',
'title': 'SessionSpeechlet - ' + title,
'content': 'SessionSpeechlet - ' + output
},
'reprompt': {
'outputSpeech': {
'type': 'PlainText',
'text': reprompt_text
}
},
'shouldEndSession': should_end_session
}
def build_response(session_attributes, speechlet_response):
return {
'version': '1.0',
'sessionAttributes': session_attributes,
'response': speechlet_response
}
| 39.44702
| 242
| 0.697138
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,205
| 0.352976
|
b04d338c3d1c16a12edd8387b7d2185efd9aed7b
| 474
|
py
|
Python
|
day1.py
|
kdrag0n/aoc2021
|
469bd861a7d7c0add14412a705ec4cb1e1b5a10f
|
[
"MIT"
] | 2
|
2021-12-04T21:15:14.000Z
|
2021-12-12T09:28:28.000Z
|
day1.py
|
kdrag0n/aoc2021
|
469bd861a7d7c0add14412a705ec4cb1e1b5a10f
|
[
"MIT"
] | null | null | null |
day1.py
|
kdrag0n/aoc2021
|
469bd861a7d7c0add14412a705ec4cb1e1b5a10f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import sys
def ints(itr):
return [int(i) for i in itr]
with open(sys.argv[1], "r") as f:
lines = [l for l in f.read().split("\n") if l]
ilist = []
imap = {}
total = 0
result = 0
other = 0
last = -1
while True:
for l in lines:
val = int(l.split()[0])
if last != -1 and val > last:
total += 1
last = val
break
print(f"Total: {total}")
print(f"Result: {result}")
print(f"Other: {other}")
| 12.810811
| 50
| 0.529536
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 82
| 0.172996
|
b04e83f0c6c5bd946cc75a63519557d702719e38
| 2,142
|
py
|
Python
|
pythingspeak/test_pythingspeak.py
|
mdauphin/pythingspeak
|
d5971e9347b17a14221564a368fe032ca6acaa03
|
[
"MIT"
] | null | null | null |
pythingspeak/test_pythingspeak.py
|
mdauphin/pythingspeak
|
d5971e9347b17a14221564a368fe032ca6acaa03
|
[
"MIT"
] | null | null | null |
pythingspeak/test_pythingspeak.py
|
mdauphin/pythingspeak
|
d5971e9347b17a14221564a368fe032ca6acaa03
|
[
"MIT"
] | null | null | null |
#-*- coding: utf-8 -*-
import pythingspeak
import unittest
class TestPyThingSpeak(unittest.TestCase):
def test_update(self):
ts = pythingspeak.ThingSpeak( channel_id=59596, api_key='ISXCEH1JHRQR85O4' )
results = ts.update( [ 1, 2 ] )
self.assertTrue(results)
def test_feeds(self):
ts = pythingspeak.ThingSpeak( channel_id=59596, api_key='ISXCEH1JHRQR85O4' )
results = ts.feeds()
self.assertTrue(results)
def test_last(self):
ts = pythingspeak.ThingSpeak( channel_id=59596, api_key='ISXCEH1JHRQR85O4' )
results = ts.last()
self.assertTrue(results)
def test_entry(self):
ts = pythingspeak.ThingSpeak( channel_id=59596, api_key='ISXCEH1JHRQR85O4' )
results = ts.entry(1)
self.assertTrue(results)
def test_fields(self):
ts = pythingspeak.ThingSpeak( channel_id=59596, api_key='ISXCEH1JHRQR85O4' )
results = ts.fields(1)
self.assertTrue(results)
def test_fields(self):
ts = pythingspeak.ThingSpeak( channel_id=59596, api_key='ISXCEH1JHRQR85O4' )
results = ts.fields(1)
self.assertTrue(results)
def test_last_field(self):
ts = pythingspeak.ThingSpeak( channel_id=59596, api_key='ISXCEH1JHRQR85O4' )
results = ts.last_field(1)
self.assertTrue(results)
def test_status_update(self):
ts = pythingspeak.ThingSpeak( channel_id=59596, api_key='ISXCEH1JHRQR85O4' )
results = ts.status_update()
self.assertTrue(results)
def test_list_public_channels(self):
ts = pythingspeak.ThingSpeak( channel_id=59596, api_key='ISXCEH1JHRQR85O4' )
results = ts.list_public_channels()
self.assertTrue(results)
def test_list_my_channels(self):
ts = pythingspeak.ThingSpeak( channel_id=59596, api_key='ISXCEH1JHRQR85O4' )
results = ts.list_my_channels()
self.assertTrue(results)
def test_view_channel(self):
ts = pythingspeak.ThingSpeak( channel_id=59596, api_key='ISXCEH1JHRQR85O4' )
results = ts.view_channel()
self.assertTrue(results)
def test_create_channel(self):
ts = pythingspeak.ThingSpeak( channel_id=59596, api_key='ISXCEH1JHRQR85O4' )
results = ts.create_channel()
self.assertTrue(results)
if __name__ == '__main__':
unittest.main()
| 31.5
| 78
| 0.745565
| 2,028
| 0.946779
| 0
| 0
| 0
| 0
| 0
| 0
| 248
| 0.11578
|
b04f12eb656c69facb8b7d0c196d013597b90eb0
| 11,920
|
py
|
Python
|
esst/utils/historygraph.py
|
etcher-be/esst
|
ac41cd0c07af8ca8532997f533756c529c9609a4
|
[
"MIT"
] | 4
|
2018-06-24T14:03:44.000Z
|
2019-01-21T01:20:02.000Z
|
esst/utils/historygraph.py
|
etcher-be/esst
|
ac41cd0c07af8ca8532997f533756c529c9609a4
|
[
"MIT"
] | 106
|
2018-06-24T13:59:52.000Z
|
2019-11-26T09:05:14.000Z
|
esst/utils/historygraph.py
|
theendsofinvention/esst
|
ac41cd0c07af8ca8532997f533756c529c9609a4
|
[
"MIT"
] | null | null | null |
# coding=utf-8
"""
Creates graphic of perfs
"""
import datetime
import typing
from collections import namedtuple
from tempfile import mktemp
import humanize
from esst.core import CTX
PLT = GRID_SPEC = TICKER = None
# https://stackoverflow.com/questions/4931376/generating-matplotlib-graphs-without-a-running-x-server/4935945#4935945
# noinspection SpellCheckingInspection
def _init_mpl():
"""
This is a very stupid hack to go around Matplotlib being stupid about Tkinter.
My linters don't like import statements mixed within the code, so this will do.
"""
global PLT, GRID_SPEC, TICKER # pylint: disable=global-statement
import matplotlib as mpl
mpl.use('Agg')
from matplotlib import pyplot as plt_
from matplotlib import gridspec as grd_, ticker as tick_
PLT = plt_
GRID_SPEC = grd_
TICKER = tick_
_init_mpl()
GraphValues = namedtuple('GraphValues', ['server_cpu_history',
'server_mem_history',
'server_bytes_sent_history',
'server_bytes_recv_history',
'dcs_cpu_history',
'dcs_mem_history',
'players_history', ])
PlotLine = namedtuple('PlotValue',
[
'values',
'label',
'style',
])
def process_values(values_to_process: GraphValues, time_delta: float) -> GraphValues:
"""
Converts raw values for plotting
Args:
values_to_process: values in set from CTX
time_delta: how far behind?
Returns: processed values
"""
def _process(values):
return [data for data in values if data[0] >= time_delta] or [(time_delta, 0)]
server_cpu_history = _process(values_to_process.server_cpu_history)
server_mem_history = _process(values_to_process.server_mem_history)
server_bytes_sent_history = _process(values_to_process.server_bytes_sent_history)
server_bytes_recv_history = _process(values_to_process.server_bytes_recv_history)
dcs_cpu_history = _process(values_to_process.dcs_cpu_history)
dcs_mem_history = _process(values_to_process.dcs_mem_history)
players_history = _process(values_to_process.players_history)
return GraphValues(
server_cpu_history=zip(*server_cpu_history),
server_mem_history=zip(*server_mem_history),
server_bytes_sent_history=zip(*server_bytes_sent_history),
server_bytes_recv_history=zip(*server_bytes_recv_history),
dcs_cpu_history=zip(*dcs_cpu_history),
dcs_mem_history=zip(*dcs_mem_history),
players_history=tuple(zip(*players_history)),
)
def _make_delta(now, days, hours, minutes):
delta = datetime.timedelta(
days=days, hours=hours, minutes=minutes).total_seconds()
if delta == 0:
delta = datetime.timedelta(hours=2).total_seconds()
return now - delta
def _x_format_func(val, _):
val = datetime.datetime.fromtimestamp(val)
return str(val).replace(' ', '\n')
def _y_format_func_percent(val, _):
return str(int(val)) + '%'
def _y_format_func_bytes(val, _):
return humanize.naturalsize(val)
def _plot_axis(grid_spec, grid_pos, # pylint: disable=too-many-arguments
values_to_plot: typing.Set[PlotLine],
title,
y_label_text,
values,
now,
y_format_func,
visible_x_labels=False,
share_x=None):
axis = PLT.subplot(grid_spec[grid_pos], sharex=share_x) # type: ignore
axis.set_title(title)
PLT.setp(axis.get_xticklabels(), visible=visible_x_labels) # type: ignore
axis.set_ylabel(y_label_text)
for line in values_to_plot:
line_, = axis.plot(*line.values, line.style)
PLT.setp(line_, label=line.label) # type: ignore
_add_players_count_to_axis(axis, values.players_history)
axis.xaxis.set_major_formatter(TICKER.FuncFormatter(_x_format_func)) # type: ignore
axis.yaxis.set_major_formatter(TICKER.FuncFormatter(y_format_func)) # type: ignore
axis.grid(True)
axis.set_xlim(right=now)
return axis
# pylint: disable=too-many-arguments,too-many-locals
def _get_axis(
grid_spec,
now,
values,
grid_pos,
values_list: typing.List[typing.Any],
labels_list: typing.List[str],
title: str,
y_label: str,
visible_x: bool,
y_format_func: typing.Callable,
share_x=None,
):
lines_to_plot = set()
styles = ['r', 'b']
for _values, _label in zip(values_list, labels_list):
lines_to_plot.add(
PlotLine(
values=_values,
style=styles.pop(),
label=_label
)
)
axis = _plot_axis(grid_spec,
now=now,
values_to_plot=lines_to_plot,
grid_pos=grid_pos,
title=title,
y_label_text=y_label,
values=values,
visible_x_labels=visible_x,
share_x=share_x,
y_format_func=y_format_func)
return axis
def _plot_server(grid_spec, values, now):
axis = _get_axis(
grid_spec=grid_spec,
now=now,
values=values,
grid_pos=0,
values_list=[values.server_cpu_history, values.server_mem_history],
labels_list=['CPU', 'Memory'],
title='Server stats',
y_label='Percentage used',
visible_x=False,
y_format_func=_y_format_func_percent,
)
axis.set_ylim([0, 100])
return axis
def _plot_dcs(grid_spec, values, now, share_x=None):
axis = _get_axis(
grid_spec=grid_spec,
now=now,
values=values,
grid_pos=1,
values_list=[values.dcs_cpu_history, values.dcs_mem_history],
labels_list=['CPU', 'Memory'],
title='DCS stats',
y_label='Percentage used',
visible_x=False,
y_format_func=_y_format_func_percent,
share_x=share_x
)
axis.set_ylim([0, 100])
return axis
def _plot_bandwidth(grid_spec, values, now, share_x=None):
axis = _get_axis(
grid_spec=grid_spec,
now=now,
values=values,
grid_pos=2,
values_list=[values.server_bytes_sent_history, values.server_bytes_recv_history],
labels_list=['Bytes sent', 'Bytes received'],
title='Bytes sent',
y_label='Bytes received',
visible_x=True,
y_format_func=_y_format_func_bytes,
share_x=share_x
)
return axis
def _add_players_count_to_axis(axis, players_history):
ax_players = axis.twinx()
max_player_count = max(
max((players_count for players_count in players_history[1])), 10)
ax_players.set_ylim([0, max_player_count + (max_player_count / 4)])
ax_players.yaxis.set_major_locator(TICKER.MaxNLocator(integer=True))
ax_players.set_ylabel('Connected players')
players_history, = ax_players.plot(*players_history, 'k.', )
PLT.setp(players_history, label='Players count')
lines, labels = axis.get_legend_handles_labels()
lines2, labels2 = ax_players.get_legend_handles_labels()
axis.legend(lines + lines2, labels + labels2)
def _make_history_graph( # pylint: disable=too-many-arguments
values_to_process,
days=0,
hours=0,
minutes=0,
show: bool = False,
save_path=None):
"""
Creates a graph of perfs
Args:
show: show and exit
save_path: specify path to save to (default to temp path)
"""
# noinspection PyTypeChecker
now = datetime.datetime.now().timestamp()
time_delta = _make_delta(now, days, hours, minutes)
values = process_values(values_to_process, time_delta)
figure = PLT.figure(figsize=(18, 12)) # type: ignore
grid_spec = GRID_SPEC.GridSpec(3, 1, height_ratios=[1, 1, 1]) # type: ignore
ax_server = _plot_server(grid_spec, values, now)
_plot_dcs(grid_spec, values, now, share_x=ax_server)
_plot_bandwidth(grid_spec, values, now, share_x=ax_server)
PLT.tight_layout() # type: ignore
figure.tight_layout()
if show:
PLT.show() # type: ignore
PLT.close() # type: ignore
return None
if not save_path:
save_path = mktemp('.png') # nosec
PLT.savefig(save_path) # type: ignore
PLT.close() # type: ignore
return save_path
# pylint: disable=too-many-arguments
def make_history_graph(callback=None, days=0, hours=0, minutes=0, show: bool = False, save_path=None):
"""
Creates a graph of perfs
Args:
minutes: number of minutes to go back
hours: number of hours to go back
days: number of days to go back
callback: optional call back to the future
show: show and exit
save_path: specify path to save to (default to temp path)
"""
values_to_process = GraphValues(
dcs_cpu_history=CTX.dcs_cpu_history,
dcs_mem_history=CTX.dcs_mem_history,
server_cpu_history=CTX.server_cpu_history,
server_mem_history=CTX.server_mem_history,
server_bytes_recv_history=CTX.server_bytes_recv_history,
server_bytes_sent_history=CTX.server_bytes_sent_history,
players_history=CTX.players_history,
)
graph = _make_history_graph(values_to_process, days, hours, minutes, show, save_path)
if callback:
callback(graph)
# process_pool = futures.ProcessPoolExecutor(max_workers=1)
# values_to_process = GraphValues(
# dcs_cpu_history=CTX.dcs_cpu_history,
# dcs_mem_history=CTX.dcs_mem_history,
# server_cpu_history=CTX.server_cpu_history,
# server_mem_history=CTX.server_mem_history,
# server_bytes_recv_history=CTX.server_bytes_recv_history,
# server_bytes_sent_history=CTX.server_bytes_sent_history,
# players_history=CTX.players_history,
# )
# future = process_pool.submit(
# _make_history_graph, values_to_process, days, hours, minutes, show, save_path
# )
# if callback:
# future.add_done_callback(callback)
if __name__ == '__main__':
# Debug code
import random
TIME_DELTA = datetime.timedelta(hours=5)
TOTAL_SECONDS = int(TIME_DELTA.total_seconds())
NOW = datetime.datetime.now().timestamp()
PLAYER_COUNT = 0
CTX.players_history.append((NOW - TOTAL_SECONDS, 0))
SKIP = 0
for time_stamp in range(TOTAL_SECONDS, 0, -10):
CTX.server_mem_history.append(
(NOW - time_stamp, random.randint(60, 70))) # nosec
CTX.dcs_cpu_history.append((NOW - time_stamp, random.randint(20, 30))) # nosec
CTX.dcs_mem_history.append((NOW - time_stamp, random.randint(60, 70))) # nosec
SKIP += 1
if SKIP > 20:
SKIP = 0
CTX.server_bytes_recv_history.append(
(NOW - time_stamp, random.randint(0, 50000000))) # nosec
CTX.server_bytes_sent_history.append(
(NOW - time_stamp, random.randint(0, 50000000))) # nosec
if time_stamp <= int(TOTAL_SECONDS / 2):
CTX.server_cpu_history.append(
(NOW - time_stamp, random.randint(20, 30))) # nosec
if random.randint(0, 100) > 99: # nosec
PLAYER_COUNT += random.choice([-1, 1]) # nosec
if PLAYER_COUNT < 0:
PLAYER_COUNT = 0
continue
CTX.players_history.append((NOW - time_stamp, PLAYER_COUNT))
TIME_DELTA = datetime.datetime.now() - TIME_DELTA # type: ignore
TIME_DELTA = TIME_DELTA.timestamp() # type: ignore
make_history_graph(hours=5, save_path='./test.png')
| 32.747253
| 117
| 0.640017
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,552
| 0.214094
|
b04f60f28cbb6155e0266d15a62d61ce814d26c3
| 1,267
|
py
|
Python
|
20.valid-parentheses.py
|
Qianli-Ma/LeetCode
|
ebda421c3d652adffca5e547a22937bf1726a532
|
[
"MIT"
] | null | null | null |
20.valid-parentheses.py
|
Qianli-Ma/LeetCode
|
ebda421c3d652adffca5e547a22937bf1726a532
|
[
"MIT"
] | null | null | null |
20.valid-parentheses.py
|
Qianli-Ma/LeetCode
|
ebda421c3d652adffca5e547a22937bf1726a532
|
[
"MIT"
] | null | null | null |
#
# @lc app=leetcode id=20 lang=python3
#
# [20] Valid Parentheses
#
# https://leetcode.com/problems/valid-parentheses/description/
#
# algorithms
# Easy (36.20%)
# Total Accepted: 554.4K
# Total Submissions: 1.5M
# Testcase Example: '"()"'
#
# Given a string containing just the characters '(', ')', '{', '}', '[' and
# ']', determine if the input string is valid.
#
# An input string is valid if:
#
#
# Open brackets must be closed by the same type of brackets.
# Open brackets must be closed in the correct order.
#
#
# Note that an empty string is also considered valid.
#
# Example 1:
#
#
# Input: "()"
# Output: true
#
#
# Example 2:
#
#
# Input: "()[]{}"
# Output: true
#
#
# Example 3:
#
#
# Input: "(]"
# Output: false
#
#
# Example 4:
#
#
# Input: "([)]"
# Output: false
#
#
# Example 5:
#
#
# Input: "{[]}"
# Output: true
#
#
#
class Solution:
def isValid(self, s: str) -> bool:
stack = []
dict = {"]": "[", "}": "{", ")": "("}
for char in s:
if char in dict.values():
stack.append(char)
elif char in dict.keys():
if stack == [] or dict[char] != stack.pop():
return False
else:
return False
return stack == []
| 16.454545
| 75
| 0.534333
| 421
| 0.332019
| 0
| 0
| 0
| 0
| 0
| 0
| 801
| 0.631703
|
b0578e2fd0b0bbd54ee3add80281e9bcba12bdeb
| 428
|
py
|
Python
|
airypi/redis_queue.py
|
airypi/airypi
|
c7e3e781eaf2e6b3e2e87b576d5202e381544d0c
|
[
"Apache-2.0"
] | 3
|
2015-11-04T19:45:48.000Z
|
2017-10-26T19:40:18.000Z
|
airypi/redis_queue.py
|
airypi/airypi
|
c7e3e781eaf2e6b3e2e87b576d5202e381544d0c
|
[
"Apache-2.0"
] | null | null | null |
airypi/redis_queue.py
|
airypi/airypi
|
c7e3e781eaf2e6b3e2e87b576d5202e381544d0c
|
[
"Apache-2.0"
] | null | null | null |
import redis
from flask import g, session
import device
import message_queue
import os
class RedisMQ(message_queue.MessageQueue):
redis = None
def push(self, data):
RedisMQ.redis.lpush(self.queue_key, data)
RedisMQ.redis.ltrim(self.queue_key, 0, self.max_size)
def pop(self):
return RedisMQ.redis.rpop(self.queue_key)
def clear(self):
RedisMQ.redis.delete(self.queue_key)
| 22.526316
| 61
| 0.698598
| 339
| 0.792056
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
b05916119eca4a721a156d9e476326122efd26e2
| 4,956
|
py
|
Python
|
rnnApp.py
|
RiboswitchClassifier/RiboswitchClassification
|
4a4ab0590aa50aa765638b2bd8aa0cfd84054ac7
|
[
"MIT"
] | 2
|
2019-12-16T13:08:28.000Z
|
2021-02-23T03:03:18.000Z
|
rnnApp.py
|
RiboswitchClassifier/RiboswitchClassification
|
4a4ab0590aa50aa765638b2bd8aa0cfd84054ac7
|
[
"MIT"
] | null | null | null |
rnnApp.py
|
RiboswitchClassifier/RiboswitchClassification
|
4a4ab0590aa50aa765638b2bd8aa0cfd84054ac7
|
[
"MIT"
] | 3
|
2019-01-01T06:00:20.000Z
|
2020-01-28T13:57:49.000Z
|
import tensorflow as tf
import theano
import pandas as pd
import numpy as np
import matplotlib
# matplotlib.use('pdf')
import matplotlib.pyplot as plt
from keras.layers import Dense, Dropout, LSTM, Embedding, Activation, Lambda, Bidirectional
from sklearn.preprocessing import OneHotEncoder
from keras.engine import Input, Model, InputSpec
from keras.preprocessing.sequence import pad_sequences
from keras.utils import plot_model
from keras.utils.data_utils import get_file
from keras.models import Sequential
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
from sklearn.utils import class_weight
from keras import backend as K
from keras.preprocessing import sequence
from keras.models import model_from_json
from keras.utils import to_categorical
from sklearn.utils import shuffle
from sklearn.metrics import classification_report,confusion_matrix
from sklearn.preprocessing import label_binarize
import os
import pydot
from keras.models import load_model
import multiclassROC
import graphviz
import functools
import preprocess
# Hyperparameters and Parameters
EPOCHS = 25 # an arbitrary cutoff, generally defined as "one pass over the entire dataset", used to separate training into distinct phases, which is useful for logging and periodic evaluation.
BATCH_SIZE = 128 # a set of N samples. The samples in a batch are processed` independently, in parallel. If training, a batch results in only one update to the model.
ALLOWED_ALPHABETS = 'ATGCN' # Allowed Charecters
INPUT_DIM = len(ALLOWED_ALPHABETS) # a vocabulary of 5 words in case of genome sequence 'ATGCN'
CLASSES = 32 # Number of Classes to Classify -> Change this to 16 when needed
OUTPUT_DIM = 50 # Embedding output of Layer 1
RNN_HIDDEN_DIM = 62 # Hidden Layers
DROPOUT_RATIO = 0.2 # proportion of neurones not used for training
MAXLEN = 250 # cuts text after number of these characters in pad_sequences
VALIDATION_SPLIT = 0.1
# Create Directory for Checkpoints
checkpoint_dir ='epoch_tuning/RNN/32_checkpoints'
os.path.exists(checkpoint_dir)
# Path to save and load Model
model_file_h5 = "models/rnn_32_model.h5"
# Path to Dataset
input_file_train = 'processed_datasets/final_32train.csv'
input_file_test = 'processed_datasets/final_32test.csv'
# Create the RNN
def create_lstm(input_length, rnn_hidden_dim = RNN_HIDDEN_DIM, output_dim = OUTPUT_DIM, input_dim = INPUT_DIM, dropout = DROPOUT_RATIO):
model = Sequential()
model.add(Embedding(input_dim = INPUT_DIM, output_dim = output_dim, input_length = input_length, name='embedding_layer'))
model.add(Bidirectional(LSTM(rnn_hidden_dim, return_sequences=True)))
model.add(Dropout(dropout))
model.add(Bidirectional(LSTM(rnn_hidden_dim)))
model.add(Dropout(dropout))
model.add(Dense(CLASSES, activation='softmax'))
model.compile('adam', 'sparse_categorical_crossentropy', metrics=['accuracy']) # binary_crossentropy # categorical_crossentropy
return model
# Train RNN
def train_model_and_save(X_train, y_train, model):
# Save Checkpoint
filepath= checkpoint_dir + "/weights-improvement-{epoch:02d}-{val_acc:.2f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=0, save_best_only=True, mode='max')
callbacks_list = [checkpoint]
class_weights = class_weight.compute_class_weight('balanced', np.unique(y_train), y_train)
history = model.fit(X_train, y_train, batch_size=BATCH_SIZE, class_weight=class_weights,
epochs=EPOCHS, callbacks=callbacks_list, validation_split = VALIDATION_SPLIT, verbose = 1, shuffle=True)
model.save(model_file_h5)
print("Saved model to disk")
return model
# Classification Report
def generate_classification_report(model_loaded, X_test, y_test):
print (classification_report(y_test,model_loaded.predict_classes(X_test)))
# Predict Classes, Probabilities, Call AucRoc Function
def generate_auc_roc(X_test, y_test):
model_loaded = load_model(model_file_h5)
generate_classification_report(model_loaded, X_test, y_test)
predicted_classes = model_loaded.predict_classes(X_test)
print ("Predicted Classes")
print (predicted_classes)
score, acc = model_loaded.evaluate(X_test, y_test,batch_size=BATCH_SIZE)
print (score)
print (acc)
y_score = model_loaded.predict_proba(X_test)
print ("Predicted Probabilities")
print (y_score)
bin_output = preprocess.binarize(y_test)
multiclassROC.calculate_roc(bin_output, y_score, "RnnClassifierModel", CLASSES)
if __name__ == '__main__':
# Load Training Datasets
X_train, y_train = preprocess.load_data(input_file_train,True)
# Create Model Structure
model = create_lstm(len(X_train[0]))
model.summary()
# Load Test Datasets
X_test, y_test = preprocess.load_data(input_file_test, False)
# Train Model and Save it
model = train_model_and_save(X_train, y_train, model)
# Generate Auc and Roc Curve
generate_auc_roc(X_test, y_test)
| 43.858407
| 193
| 0.7841
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,443
| 0.291162
|
b05b358493a6597bac995a34db28dd63e04524e6
| 72
|
py
|
Python
|
geetiles/config/prod.py
|
icpac-igad/gee-tiles
|
713a58e00b4377dd54aeaa77416ad7fe7b2c9206
|
[
"MIT"
] | 1
|
2020-09-28T12:23:25.000Z
|
2020-09-28T12:23:25.000Z
|
geetiles/config/prod.py
|
icpac-igad/gee-tiles
|
713a58e00b4377dd54aeaa77416ad7fe7b2c9206
|
[
"MIT"
] | 6
|
2019-08-28T17:17:25.000Z
|
2021-10-13T07:19:14.000Z
|
geetiles/config/prod.py
|
icpac-igad/gee-tiles
|
713a58e00b4377dd54aeaa77416ad7fe7b2c9206
|
[
"MIT"
] | 5
|
2019-11-15T10:37:56.000Z
|
2021-07-15T08:07:27.000Z
|
"""-"""
SETTINGS = {
'logging': {
'level': 'DEBUG'
}
}
| 9
| 24
| 0.347222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 30
| 0.416667
|
b05bf40e3728937480f8f42cb9c975d60036475f
| 6,911
|
py
|
Python
|
neptune-python-utils/neptune_python_utils/glue_gremlin_client.py
|
Alfian878787/amazon-neptune-tools
|
a447da238e99612a290babc66878fe772727a19e
|
[
"Apache-2.0"
] | null | null | null |
neptune-python-utils/neptune_python_utils/glue_gremlin_client.py
|
Alfian878787/amazon-neptune-tools
|
a447da238e99612a290babc66878fe772727a19e
|
[
"Apache-2.0"
] | null | null | null |
neptune-python-utils/neptune_python_utils/glue_gremlin_client.py
|
Alfian878787/amazon-neptune-tools
|
a447da238e99612a290babc66878fe772727a19e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Amazon.com, Inc. or its affiliates.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file.
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
import sys
from pyspark.sql.functions import lit
from pyspark.sql.functions import format_string
from gremlin_python import statics
from gremlin_python.structure.graph import Graph
from gremlin_python.process.graph_traversal import __
from gremlin_python.process.strategies import *
from gremlin_python.driver.driver_remote_connection import DriverRemoteConnection
from gremlin_python.driver.protocol import GremlinServerError
from gremlin_python.process.traversal import *
from neptune_python_utils.gremlin_utils import GremlinUtils
from neptune_python_utils.endpoints import Endpoints
class GlueGremlinClient:
def __init__(self, endpoints):
self.gremlin_utils = GremlinUtils(endpoints)
GremlinUtils.init_statics(globals())
def add_vertices(self, label):
"""Adds a vertex with the supplied label for each row in a DataFrame partition.
If the DataFrame contains an '~id' column, the values in this column will be treated as user-supplied IDs for the new vertices.
If the DataFrame does not have an '~id' column, Neptune will autogenerate a UUID for each vertex.
Example:
>>> dynamicframe.toDF().foreachPartition(neptune.add_vertices('Product'))
"""
def add_vertices_for_label(rows):
try:
conn = self.gremlin_utils.remote_connection()
g = self.gremlin_utils.traversal_source(connection=conn)
for row in rows:
entries = row.asDict()
traversal = g.addV(label)
for key, value in entries.items():
key = key.split(':')[0]
if key == '~id':
traversal.property(id, value)
elif key == '~label':
pass
else:
traversal.property(key, value)
traversal.next()
conn.close()
except GremlinServerError as err:
print("Neptune error: {0}".format(err))
except:
print("Unexpected error:", sys.exc_info()[0])
return add_vertices_for_label
def upsert_vertices(self, label):
"""Conditionally adds vertices for the rows in a DataFrame partition using the Gremlin coalesce() idiom.
The DataFrame must contain an '~id' column.
Example:
>>> dynamicframe.toDF().foreachPartition(neptune.upsert_vertices('Product'))
"""
def upsert_vertices_for_label(rows):
try:
conn = self.gremlin_utils.remote_connection()
g = self.gremlin_utils.traversal_source(connection=conn)
for row in rows:
entries = row.asDict()
create_traversal = __.addV(label)
for key, value in entries.items():
key = key.split(':')[0]
if key == '~id':
create_traversal.property(id, value)
elif key == '~label':
pass
else:
create_traversal.property(key, value)
g.V(entries['~id']).fold().coalesce(__.unfold(), create_traversal).next()
conn.close()
except GremlinServerError as err:
print("Neptune error: {0}".format(err))
except:
print("Unexpected error:", sys.exc_info()[0])
return upsert_vertices_for_label
def add_edges(self, label):
"""Adds an edge with the supplied label for each row in a DataFrame partition.
If the DataFrame contains an '~id' column, the values in this column will be treated as user-supplied IDs for the new edges.
If the DataFrame does not have an '~id' column, Neptune will autogenerate a UUID for each edge.
Example:
>>> dynamicframe.toDF().foreachPartition(neptune.add_edges('ORDER_DETAIL'))
"""
def add_edges_for_label(rows):
try:
conn = self.gremlin_utils.remote_connection()
g = self.gremlin_utils.traversal_source(connection=conn)
for row in rows:
entries = row.asDict()
traversal = g.V(row['~from']).addE(label).to(V(row['~to'])).property(id, row['~id'])
for key, value in entries.items():
key = key.split(':')[0]
if key not in ['~id', '~from', '~to', '~label']:
traversal.property(key, value)
traversal.next()
conn.close()
except GremlinServerError as err:
print("Neptune error: {0}".format(err))
except:
print("Unexpected error:", sys.exc_info()[0])
return add_edges_for_label
def upsert_edges(self, label):
"""Conditionally adds edges for the rows in a DataFrame partition using the Gremlin coalesce() idiom.
The DataFrame must contain '~id', '~from', '~to' and '~label' columns.
Example:
>>> dynamicframe.toDF().foreachPartition(neptune.upsert_edges('ORDER_DETAIL'))
"""
def add_edges_for_label(rows):
try:
conn = self.gremlin_utils.remote_connection()
g = self.gremlin_utils.traversal_source(connection=conn)
for row in rows:
entries = row.asDict()
create_traversal = __.V(row['~from']).addE(label).to(V(row['~to'])).property(id, row['~id'])
for key, value in entries.items():
key = key.split(':')[0]
if key not in ['~id', '~from', '~to', '~label']:
create_traversal.property(key, value)
g.E(entries['~id']).fold().coalesce(__.unfold(), create_traversal).next()
conn.close()
except GremlinServerError as err:
print("Neptune error: {0}".format(err))
except:
print("Unexpected error:", sys.exc_info()[0])
return add_edges_for_label
| 46.073333
| 135
| 0.570395
| 5,751
| 0.832152
| 0
| 0
| 0
| 0
| 0
| 0
| 2,307
| 0.333816
|
b05fe1389ad39d5ec1240e047aa523f2264c0d97
| 343
|
py
|
Python
|
floyd_warshall/messages/rate_request.py
|
hrs231/sample-code
|
91c2972d1a414397d3505d3b4df9ee80b67bcac0
|
[
"MIT"
] | null | null | null |
floyd_warshall/messages/rate_request.py
|
hrs231/sample-code
|
91c2972d1a414397d3505d3b4df9ee80b67bcac0
|
[
"MIT"
] | null | null | null |
floyd_warshall/messages/rate_request.py
|
hrs231/sample-code
|
91c2972d1a414397d3505d3b4df9ee80b67bcac0
|
[
"MIT"
] | null | null | null |
class RateRequest(object):
"""" Used by Price Engine Clients to query the Price Engine """
def __init__(self, exch_1, curr_1, exch_2, curr_2):
self.exch_1 = exch_1
self.curr_1 = curr_1
self.exch_2 = exch_2
self.curr_2 = curr_2
self.rate = 0
self.path = []
self.error_msg = None
| 28.583333
| 67
| 0.594752
| 341
| 0.994169
| 0
| 0
| 0
| 0
| 0
| 0
| 63
| 0.183673
|
b0618e2deaae21564649c946c7681a44ee75680f
| 2,613
|
py
|
Python
|
backend/app/api/api_v1/router/file/excel_tool.py
|
PY-GZKY/fastapi-crawl-admin
|
6535054994d11e3c31b4caeae65e8fa0f495d2b7
|
[
"MIT"
] | 13
|
2021-07-25T15:26:04.000Z
|
2022-03-02T12:12:02.000Z
|
backend/app/api/api_v1/router/file/excel_tool.py
|
PY-GZKY/fastapi-crawl-admin
|
6535054994d11e3c31b4caeae65e8fa0f495d2b7
|
[
"MIT"
] | 1
|
2021-07-26T03:26:09.000Z
|
2021-07-26T09:05:38.000Z
|
backend/app/api/api_v1/router/file/excel_tool.py
|
PY-GZKY/fastapi-crawl-admin
|
6535054994d11e3c31b4caeae65e8fa0f495d2b7
|
[
"MIT"
] | 3
|
2021-07-26T01:44:24.000Z
|
2021-07-31T14:31:49.000Z
|
# -*- coding: utf-8 -*
# @Time : 2020/12/22 15:58
from fastapi import Depends
from motor.motor_asyncio import AsyncIOMotorClient
from app.api.db.mongoDB import get_database
import pandas as pd
import numpy as np
from io import BytesIO
class ExcelTools:
def __init__(self, columns_map=None, order=None):
'''
:param columns_map: 列名映射 => {"name":"姓名","score":"成绩","sex":"性别"}
:param columns_map: 列排序列表 => ["name","sex","score"]
'''
self.columns_map = columns_map
self.order = order
def get_excel_header(self,excel):
df = pd.read_excel(excel, skiprows=None)
print(df.columns)
return df.columns
# 表格转字典
def excel_to_dict(self,excel,skiprows=1):
'''
Excel转Python dict
:param excel: bytes
:return:
'''
if not excel:
return []
df = pd.read_excel(excel, skiprows=None)
df = df.replace(np.nan, '', regex=True)
# 去除所有列数据中的空格
stripstr = lambda x: x.strip() if isinstance(x, np.unicode) else x
df = df.applymap(stripstr)
# 列名映射
if self.columns_map:
columns_map = dict(zip(self.columns_map.values(), self.columns_map.keys()))
df = df.rename(columns=columns_map)
result = df.to_dict(orient='records')
return result
# 字典转表格
def dict_to_excel(self, datas):
"""
:param datas: 数据集 => [{"name":"张三","score":90,"sex":"男"}]
:return:
"""
# 初始化IO
output = BytesIO()
# 将字典列表转换为DataFrame
pf = pd.DataFrame(datas)
# 按字段排序
if self.order:
pf = pf[self.order]
# 将列名替换为中文
if self.columns_map:
pf.rename(columns=self.columns_map, inplace=True)
# 指定生成的Excel表格名称
writer = pd.ExcelWriter(output, engine='xlsxwriter')
# 替换空单元格
pf.fillna(' ', inplace=True)
# 输出
pf.to_excel(writer, encoding='utf-8', sheet_name='sheet1', index=False)
# 格式化Excel
workbook = writer.book
worksheet = writer.sheets['sheet1']
format = workbook.add_format({'text_wrap': True})
# 设置列宽
for i, col in enumerate(pf.columns):
# find and set length of column
column_len = pf[col].astype(str).str.len().max()
column_len = max(column_len, len(col)) + 2
# set column length
worksheet.set_column(i, i, column_len)
# 保存到IO
writer.close()
output.seek(0)
return output
if __name__ == '__main__':
pass
| 25.871287
| 87
| 0.564485
| 2,534
| 0.900818
| 0
| 0
| 0
| 0
| 0
| 0
| 828
| 0.294348
|
b0619b37fbd880320070eeeb51552bb149486090
| 1,164
|
py
|
Python
|
Lab8/1 + 2 (Simple socket server)/simple_client.py
|
marianfx/python-labs
|
7066db410ad19cababb7b66745641e65a28ccd98
|
[
"MIT"
] | null | null | null |
Lab8/1 + 2 (Simple socket server)/simple_client.py
|
marianfx/python-labs
|
7066db410ad19cababb7b66745641e65a28ccd98
|
[
"MIT"
] | null | null | null |
Lab8/1 + 2 (Simple socket server)/simple_client.py
|
marianfx/python-labs
|
7066db410ad19cababb7b66745641e65a28ccd98
|
[
"MIT"
] | null | null | null |
"""Simple socket client for the simple socket client."""
import sys
import socket
import time
SOCKET_ADDRESS = "127.0.0.1"
SOCKET_PORT = 6996
def build_client_tcp(address: str, port: int):
"""Builds the TCP client."""
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((address, port))
time.sleep(1)
sock.close()
except:
print("Cannot connect to the target server.")
def build_client_udp(address: str, port: int, message: str):
"""Builds the UDP client."""
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(message.encode(), (address, port))
if __name__ == "__main__":
if len(sys.argv) < 5:
print("You must give as args the mode, server address, the port and the message to send.")
exit()
MODE = sys.argv[1]
SOCKET_ADDRESS = sys.argv[2]
SOCKET_PORT = int(sys.argv[3])
MESSAGE = sys.argv[4]
if MODE == "TCP":
build_client_tcp(SOCKET_ADDRESS, SOCKET_PORT)
elif MODE == "UDP":
build_client_udp(SOCKET_ADDRESS, SOCKET_PORT, MESSAGE)
else:
print("Unable to determine what you want.")
| 28.390244
| 98
| 0.649485
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 300
| 0.257732
|
b062b0f29115369104d664570dbb03f1de934fe3
| 2,689
|
py
|
Python
|
009/app.py
|
ilos-vigil/random-script
|
bf8d45196d4faa6912dc0469a86b8370f43ce7ac
|
[
"MIT"
] | null | null | null |
009/app.py
|
ilos-vigil/random-script
|
bf8d45196d4faa6912dc0469a86b8370f43ce7ac
|
[
"MIT"
] | null | null | null |
009/app.py
|
ilos-vigil/random-script
|
bf8d45196d4faa6912dc0469a86b8370f43ce7ac
|
[
"MIT"
] | null | null | null |
import bs4
import nltk
import json
import re
import requests
with open('./acronym_abbreviation_id.json', 'r') as f:
data = f.read()
list_acronym_abbreviation = json.loads(data)
from_wikipedia = False
if from_wikipedia:
# Take text with Indonesian language from Wikipedia randomly
html = requests.get('https://id.wikipedia.org/wiki/Istimewa:Halaman_sembarang').text
soup = bs4.BeautifulSoup(html, 'html.parser')
for p in soup.find('div', class_='mw-parser-output').find_all('p'):
text = f'{text}{p.get_text()}'
text = re.sub(r'\n', '', text)
text = re.sub(r'\[\d*\]', '', text)
else:
text = '''
Linux (atau GNU/Linux, lihat kontroversi penamaannya) adalah nama yang diberikan kepada kumpulan sistem operasi Mirip Unix yang menggunakan Kernel Linux sebagai kernelnya. Linux merupakan proyek perangkat lunak bebas dan sumber terbuka terbesar di dunia. Seperti perangkat lunak bebas dan sumber terbuka lainnya pada umumnya, kode sumber Linux dapat dimodifikasi, digunakan dan didistribusikan kembali secara bebas oleh siapa saja
'''
text = re.sub(r'\n', '', text)
print(f'Input : {text}')
# pisah berdasarkan kalimat
# step 1
boundary = '•'
rule = {
r'\.': f'.•',
r'\?': f'?•',
'!': f'!•',
';': f';•',
':': f':•'
}
for old, new in rule.items():
text = re.sub(old, new, text)
# step 2
for word in re.finditer(r'"(.+)"', text):
start_position, end_position = word.regs[0][0], word.regs[0][1]
quoted_sentence = text[start_position:end_position]
quoted_sentence = re.sub('•', '', quoted_sentence) # remove boundary
if text[end_position] == '.': # move boundary if character after " is .
text = text[:start_position] + quoted_sentence + text[end_position:]
else:
text = text[:start_position] + quoted_sentence + '•' + text[end_position:]
# step 3
for word in re.finditer(r'([\w]*)(\.|\?|!|;|:)•', text): # [word][sign]•
# [0] -> position start, [1] -> position for [word], [2] -> position for [sign]
# position value is adalah (start, end + 1)
word_start_position, word_end_position, boundary_position = word.regs[1][0], word.regs[2][1], word.regs[0][1]
if text[word_start_position:word_end_position] in list_acronym_abbreviation:
text = text[:word_end_position] + text[boundary_position:] # remove boundary
# step 4
for word in re.finditer(r'([\w]+) ?(!|\?)(•) ?[a-z]', text): #[word](optional space)[sign][•](optional space)[lowercase char]
boundary_position = word.regs[2][1]
text = text[:boundary_position] + text[boundary_position:]
# step 5
sentences = text.split('•')
print('Output:')
[print(s.lstrip(' ').rstrip(' ')) for s in sentences]
| 38.414286
| 430
| 0.661584
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,205
| 0.443667
|
b062c54e4119bba9afb9e6fce3e62bb1a445400e
| 2,295
|
py
|
Python
|
graphs/page_rank.py
|
tg12/Python
|
398d1dbf4b780d1725aeae9a91b4c79f4410e2f0
|
[
"MIT"
] | null | null | null |
graphs/page_rank.py
|
tg12/Python
|
398d1dbf4b780d1725aeae9a91b4c79f4410e2f0
|
[
"MIT"
] | null | null | null |
graphs/page_rank.py
|
tg12/Python
|
398d1dbf4b780d1725aeae9a91b4c79f4410e2f0
|
[
"MIT"
] | 1
|
2020-06-26T09:46:17.000Z
|
2020-06-26T09:46:17.000Z
|
'''THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND
NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR ANYONE
DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR OTHER LIABILITY,
WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.'''
# Bitcoin Cash (BCH) qpz32c4lg7x7lnk9jg6qg7s4uavdce89myax5v5nuk
# Ether (ETH) - 0x843d3DEC2A4705BD4f45F674F641cE2D0022c9FB
# Litecoin (LTC) - Lfk5y4F7KZa9oRxpazETwjQnHszEPvqPvu
# Bitcoin (BTC) - 34L8qWiQyKr8k4TnHDacfjbaSqQASbBtTd
# contact :- github@jamessawyer.co.uk
"""
Author: https://github.com/bhushan-borole
"""
"""
The input graph for the algorithm is:
A B C
A 0 1 1
B 0 0 1
C 1 0 0
"""
graph = [[0, 1, 1], [0, 0, 1], [1, 0, 0]]
class Node:
def __init__(self, name):
self.name = name
self.inbound = []
self.outbound = []
def add_inbound(self, node):
self.inbound.append(node)
def add_outbound(self, node):
self.outbound.append(node)
def __repr__(self):
return "Node {}: Inbound: {} ; Outbound: {}".format(
self.name, self.inbound, self.outbound
)
def page_rank(nodes, limit=3, d=0.85):
ranks = {}
for node in nodes:
ranks[node.name] = 1
outbounds = {}
for node in nodes:
outbounds[node.name] = len(node.outbound)
for i in range(limit):
print("======= Iteration {} =======".format(i + 1))
for j, node in enumerate(nodes):
ranks[node.name] = (1 - d) + d * sum(
[ranks[ib] / outbounds[ib] for ib in node.inbound]
)
print(ranks)
def main():
names = list(input("Enter Names of the Nodes: ").split())
nodes = [Node(name) for name in names]
for ri, row in enumerate(graph):
for ci, col in enumerate(row):
if col == 1:
nodes[ci].add_inbound(names[ri])
nodes[ri].add_outbound(names[ci])
print("======= Nodes =======")
for node in nodes:
print(node)
page_rank(nodes)
if __name__ == "__main__":
main()
| 25.21978
| 74
| 0.616122
| 404
| 0.176035
| 0
| 0
| 0
| 0
| 0
| 0
| 1,016
| 0.442702
|
b064a795cdfc5cdd50a92817a383a97f8144e544
| 4,330
|
py
|
Python
|
DeepRTS/python/game.py
|
cair/deep-rts
|
7aa5dde0c5df10ae3a3d057e7b89641aec58e115
|
[
"MIT"
] | 144
|
2018-07-13T07:47:50.000Z
|
2022-03-31T06:29:50.000Z
|
DeepRTS/python/game.py
|
cair/DeepRTS
|
2ea4de0993ea0ca2677fdb36a172779db4ce7868
|
[
"MIT"
] | 18
|
2019-03-29T10:37:01.000Z
|
2022-03-02T12:47:34.000Z
|
DeepRTS/python/game.py
|
cair/DeepRTS
|
2ea4de0993ea0ca2677fdb36a172779db4ce7868
|
[
"MIT"
] | 23
|
2018-11-02T18:12:51.000Z
|
2022-02-15T20:32:18.000Z
|
from DeepRTS import Engine, Constants
from DeepRTS.python import GUI
from DeepRTS.python import Config
from DeepRTS.python import DeepRTSPlayer
import numpy as np
import random
import os
import argparse
import gym
dir_path = os.path.dirname(os.path.realpath(__file__))
class Game(Engine.Game):
def __init__(self,
map_name,
n_players=2,
engine_config=Engine.Config.defaults(),
gui_config=None,
tile_size=32,
terminal_signal=False
):
# This sets working directory, so that the C++ can load files correctly (dir_path not directly accessible in
# c++)
os.chdir(dir_path)
# Override map
try:
# This sometimmes fails under ray
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--map', action="store", dest="map", type=str)
args = parser.parse_args()
if args.map is not None:
map_name = args.map
except:
pass
# TODO
if engine_config:
engine_config.set_terminal_signal(terminal_signal)
# Disable terminal signal
engine_config.set_terminal_signal(terminal_signal)
# Call C++ constructor
super(Game, self).__init__(map_name, engine_config)
# Event listeners
self._listeners = {
"on_tile_change": []
}
self._render_every = 1
self._view_every = 1
self._capture_every = 1
self.gui = GUI(self, tile_size=tile_size, config=gui_config if isinstance(gui_config, Config) else Config())
self._py_players = [] # Keep reference of the py object
for i in range(n_players):
player = DeepRTSPlayer(self)
self.insert_player(player)
self._py_players.append(player)
# Select first player as default
self.set_player(self.players[0])
# Define the action space
self.action_space = LimitedDiscrete(Constants.action_min, Constants.action_max)
# Define the observation space, here we assume that max is 255 (image) # TODO
self.observation_space = gym.spaces.Box(
0,
255,
shape=self.get_state().shape, dtype=np.float32)
self.start()
@property
def players(self):
return self._py_players
@staticmethod
def sample_action(self):
return int(Engine.Constants.action_max * random.random()) + Engine.Constants.action_min
def update(self):
self.tick()
if self.gui.config.input:
self.event()
super().update()
self.caption()
if self.gui.config.render:
self.render()
if self.gui.config.view:
self.view()
def _render(self):
if self.get_ticks() % self._render_every == 0:
self.gui.render()
def view(self):
if self.get_ticks() % self._view_every == 0:
self.gui.view()
def event(self):
self.gui.event()
def capture(self):
if self.get_ticks() % self._capture_every == 0:
return self.gui.capture()
return None
def get_state(self, image=False, copy=True):
if image:
return self.gui.capture(copy=copy)
else:
return np.array(self.state, copy=copy)
def _caption(self):
pass
def _on_unit_create(self, unit):
pass
def _on_unit_destroy(self, unit):
pass
def _on_episode_start(self):
pass
# for tile in self.tilemap.tiles:
# self.gui.gui_tiles.set_tile(tile.x, tile.y, tile.get_type_id())
def _on_episode_end(self):
pass
def _on_tile_deplete(self, tile):
# TODO
pass
# self.gui.gui_tiles.set_tile(tile.x, tile.y, tile.get_type_id())
def _on_tile_change(self, tile):
self.gui.on_tile_change(tile)
def set_render_frequency(self, interval):
self._render_every = interval
def set_player(self, player: DeepRTSPlayer):
self.set_selected_player(player)
def set_view_every(self, n):
self._view_every = n
def set_capture_every(self, n):
self._capture_every = n
| 26.564417
| 116
| 0.599769
| 4,055
| 0.93649
| 0
| 0
| 202
| 0.046651
| 0
| 0
| 629
| 0.145266
|
b064ac81a6a14605eca93bb63e07f0834ed4309a
| 1,147
|
py
|
Python
|
lairgpt/utils/assets.py
|
lightonai/lairgpt
|
7580e1339a39662b2ff636d158c36195eb7fe3fb
|
[
"MIT"
] | 19
|
2021-05-04T13:54:45.000Z
|
2022-01-05T15:45:12.000Z
|
lairgpt/utils/assets.py
|
lightonai/lairgpt
|
7580e1339a39662b2ff636d158c36195eb7fe3fb
|
[
"MIT"
] | null | null | null |
lairgpt/utils/assets.py
|
lightonai/lairgpt
|
7580e1339a39662b2ff636d158c36195eb7fe3fb
|
[
"MIT"
] | 1
|
2021-05-28T15:25:12.000Z
|
2021-05-28T15:25:12.000Z
|
from enum import Enum
from os.path import expanduser
from lairgpt.utils.remote import local_dir
class Config(Enum):
"""Settings for preconfigured models instances
"""
SMALL = {
"d_model": 768,
"n_heads": 12,
"n_layers": 12,
"vocab_size": 50262,
"max_seq_len": 1024
}
MEDIUM = {
"d_model": 1024,
"n_heads": 16,
"n_layers": 24,
"vocab_size": 50262,
"max_seq_len": 1024
}
LARGE = {
"d_model": 1280,
"n_heads": 20,
"n_layers": 36,
"vocab_size": 50262,
"max_seq_len": 1024
}
XLARGE = {
"d_model": 1280,
"n_heads": 20,
"n_layers": 36,
"vocab_size": 50262,
"max_seq_len": 1024
}
class Snapshot(Enum):
"""Snapshots for preconfigured models state dictionaries
"""
SMALL = local_dir + "small.pt"
MEDIUM = local_dir + "medium.pt"
LARGE = local_dir + "large.pt"
XLARGE = local_dir + "xlarge.pt"
class Tokenizer(Enum):
"""Tokenizers for preconfigured models inference
"""
CCNET = local_dir + "tokenizer_ccnet.json"
| 23.408163
| 60
| 0.558849
| 1,045
| 0.911072
| 0
| 0
| 0
| 0
| 0
| 0
| 450
| 0.392328
|
b0651029340e768b51b715881e03f9826ce6837f
| 1,546
|
py
|
Python
|
smart_open/__init__.py
|
DataTron-io/smart_open
|
3565eff8f0ffe19d7fd31063753384e0084fb1e0
|
[
"MIT"
] | 1
|
2020-09-28T06:47:58.000Z
|
2020-09-28T06:47:58.000Z
|
smart_open/__init__.py
|
DataTron-io/smart_open
|
3565eff8f0ffe19d7fd31063753384e0084fb1e0
|
[
"MIT"
] | null | null | null |
smart_open/__init__.py
|
DataTron-io/smart_open
|
3565eff8f0ffe19d7fd31063753384e0084fb1e0
|
[
"MIT"
] | null | null | null |
import shutil
from .smart_open_lib import *
DEFAULT_CHUNKSIZE = 16*1024*1024 # 16mb
def copy_file(src, dest, close_src=True, close_dest=True, make_path=False):
"""
Copies file from src to dest. Supports s3 and webhdfs (does not include kerberos support)
If src does not exist, a FileNotFoundError is raised.
:param src: file-like object or path
:param dest: file-like object or path
:param close_src: boolean (optional). if True, src file is closed after use.
:param close_dest: boolean (optional). if True, dest file is closed after use.
:param make_path: str (optional, default False). if True, destination parent directories are created if missing. Only if path is local
"""
logging.info("Copy file from {} to {}".format(src, dest))
if make_path:
dir_path, _ = os.path.split(dest)
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
in_file = smart_open(src, 'rb')
out_file = smart_open(dest, 'wb')
try:
shutil.copyfileobj(in_file, out_file, DEFAULT_CHUNKSIZE)
except NotImplementedError as e:
logging.info("Error encountered copying file. Falling back to looping over input file. {}".format(e))
for line in in_file:
out_file.write(line)
try:
out_file.flush()
except Exception as e:
logging.info("Unable to flush out_file")
if in_file and not in_file.closed and close_src:
in_file.close()
if out_file and not out_file.closed and close_dest:
out_file.close()
| 34.355556
| 138
| 0.679172
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 694
| 0.4489
|
b068470f8ca662453890dee9ded5d2a25fb6fcdd
| 4,706
|
py
|
Python
|
guacozy_server/backend/api/utils.py
|
yinm8315/guacozy-django-react
|
99a8270cb660052d3b4868b7959a5750968d9cc3
|
[
"MIT"
] | 121
|
2019-10-28T09:23:05.000Z
|
2022-03-19T00:30:36.000Z
|
guacozy_server/backend/api/utils.py
|
peppelinux/guacozy
|
ff4ca3fae8b9a5cb379a7a73d39f0d0ea8b6521c
|
[
"MIT"
] | 43
|
2019-10-28T09:22:59.000Z
|
2022-03-18T23:01:25.000Z
|
guacozy_server/backend/api/utils.py
|
peppelinux/guacozy
|
ff4ca3fae8b9a5cb379a7a73d39f0d0ea8b6521c
|
[
"MIT"
] | 44
|
2019-11-05T01:58:05.000Z
|
2022-03-30T08:05:18.000Z
|
import rules
from backend.models import Folder
def add_folder_to_tree_dictionary(folder, resulting_set, include_ancestors=False):
"""
Adds folder, folder's ancestors and folder's descendants
Ancestors are needed to build the traverse path in tree view
Descendants are needed because user has permission to see them
:type folder: Folder
:type resulting_set: set
:type include_ancestors: bool}
"""
# Include all ancestors, which we get from django-mptt's get_ancestors()
# it's a "cheap" query
if include_ancestors and folder.parent is not None:
for ancestor in folder.parent.get_ancestors(ascending=False, include_self=True):
resulting_set.add(ancestor)
# add this folder
resulting_set.add(folder)
# add all foldres children
for child in folder.children.all():
add_folder_to_tree_dictionary(child, resulting_set, include_ancestors=False)
def check_folder_permissions(folder, resulting_set, user, require_view_permission=False):
"""
Recursively check folders and adds it to resulting_set if user has direct permission on folder
If require_view_permission is set to True, it returns only folders with direct permission and all child folders
If require_view_permission is set to True, it also returns all ancestor folders
:type folder: backend.Folder
:type user: users.User
:type resulting_set: set
:type require_view_permission: bool
"""
if rules.test_rule('has_direct_permission', user, folder):
add_folder_to_tree_dictionary(folder, resulting_set, include_ancestors=not require_view_permission)
else:
for child in folder.children.all():
check_folder_permissions(child, resulting_set, user, require_view_permission)
def folder_to_object(folder, user, allowed_to_list=None, allowed_to_view=None, include_objects=True):
"""
Given folder converts it and it's children and objects to a tree format, which is used in API
:type folder: Folder
:type user: users.User
:type allowed_to_list: set
:type allowed_to_view: set
:type include_objects: bool
"""
if allowed_to_list is None:
allowed_to_list = user_allowed_folders_ids(user, require_view_permission=False)
if allowed_to_view is None:
allowed_to_view = user_allowed_folders_ids(user, require_view_permission=True)
result = {'id': folder.id, 'text': folder.name, 'isFolder': True}
result_children = []
# For every child check if it is included in allowed folders
# (precalculated list of folders allowed and
# their ancestors, which is needed to get to this folder
for child in folder.children.all():
if child in allowed_to_list:
result_children += [folder_to_object(
folder=child,
user=user,
allowed_to_list=allowed_to_list,
allowed_to_view=allowed_to_view,
include_objects=include_objects
)
]
# If we are asked (include_objects) and folder is in allowed_to_view list
# include all objects (currently only connections)
if include_objects and folder.id in allowed_to_view:
for connection in folder.connections.all():
connection_object = {'id': connection.id,
'text': connection.name,
'isFolder': False,
'protocol': connection.protocol,
}
result_children += [connection_object]
result['children'] = result_children
return result
def user_allowed_folders(user, require_view_permission=False):
"""
If require_view_permission is False, return list of folders user is allowed to list
If require_view_permission is True, return list of folders user is allowed to view
:type require_view_permission: bool
:type user: users.User
"""
resulting_folder = set()
# iterate over root folders
for folder in Folder.objects.all().filter(parent=None):
check_folder_permissions(folder, resulting_folder, user, require_view_permission)
return resulting_folder
def user_allowed_folders_ids(user, require_view_permission=False):
"""
If require_view_permission is False, return list of ids of folders user is allowed to list
If require_view_permission is True, return list of ids of folders user is allowed to view
:type require_view_permission: bool
:type user: users.User
"""
resulting_set = set()
for folder in user_allowed_folders(user, require_view_permission):
resulting_set.add(folder.id)
return resulting_set
| 36.765625
| 115
| 0.698683
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,043
| 0.434127
|
b06a64034b02fc50eab6da81b27b39ddfc4affcc
| 348
|
py
|
Python
|
web/services/device-service/src/app.py
|
fhgrings/match-io
|
0acb0b006ae8d8073f1d148e80275a568c2517ae
|
[
"MIT"
] | null | null | null |
web/services/device-service/src/app.py
|
fhgrings/match-io
|
0acb0b006ae8d8073f1d148e80275a568c2517ae
|
[
"MIT"
] | null | null | null |
web/services/device-service/src/app.py
|
fhgrings/match-io
|
0acb0b006ae8d8073f1d148e80275a568c2517ae
|
[
"MIT"
] | null | null | null |
from flask import Flask
from flask_cors import CORS
from src.ext import configuration
def minimal_app(**config):
app = Flask(__name__)
configuration.init_app(app, **config)
CORS(app)
return app
def create_app(**config):
app = minimal_app(**config)
configuration.load_extensions(app)
return app
| 19.333333
| 42
| 0.672414
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
b06a839b9e9c3f3cd1914d16be145f347a1d20cd
| 11,314
|
py
|
Python
|
nyc/nyc-new-cases.py
|
btrr/covid19-epicenters
|
4134967f6dbbdeb5ad91a435dc72d905e9886fd6
|
[
"MIT"
] | 1
|
2020-04-02T15:48:28.000Z
|
2020-04-02T15:48:28.000Z
|
nyc/nyc-new-cases.py
|
btrr/covid19-epicenters
|
4134967f6dbbdeb5ad91a435dc72d905e9886fd6
|
[
"MIT"
] | null | null | null |
nyc/nyc-new-cases.py
|
btrr/covid19-epicenters
|
4134967f6dbbdeb5ad91a435dc72d905e9886fd6
|
[
"MIT"
] | null | null | null |
import datetime as dt
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.ticker as ticker
from matplotlib.dates import MO, TU, WE, TH, FR, SA, SU
dates = ['2/29/2020', '3/1/2020', '3/2/2020', '3/3/2020', '3/4/2020', '3/5/2020', '3/6/2020', '3/7/2020', '3/8/2020', '3/9/2020', '3/10/2020', '3/11/2020', '3/12/2020', '3/13/2020', '3/14/2020', '3/15/2020', '3/16/2020', '3/17/2020', '3/18/2020', '3/19/2020', '3/20/2020', '3/21/2020', '3/22/2020', '3/23/2020', '3/24/2020', '3/25/2020', '3/26/2020', '3/27/2020', '3/28/2020', '3/29/2020', '3/30/2020', '3/31/2020', '4/1/2020', '4/2/2020', '4/3/2020', '4/4/2020', '4/5/2020', '4/6/2020', '4/7/2020', '4/8/2020', '4/9/2020', '4/10/2020', '4/11/2020', '4/12/2020', '4/13/2020', '4/14/2020', '4/15/2020', '4/16/2020', '4/17/2020', '4/18/2020', '4/19/2020', '4/20/2020', '4/21/2020', '4/22/2020', '4/23/2020', '4/24/2020', '4/25/2020', '4/26/2020', '4/27/2020', '4/28/2020', '4/29/2020', '4/30/2020', '5/1/2020', '5/2/2020', '5/3/2020', '5/4/2020', '5/5/2020', '5/6/2020', '5/7/2020', '5/8/2020', '5/9/2020', '5/10/2020', '5/11/2020', '5/12/2020', '5/13/2020', '5/14/2020', '5/15/2020', '5/16/2020', '5/17/2020', '5/18/2020', '5/19/2020', '5/20/2020', '5/21/2020', '5/22/2020', '5/23/2020', '5/24/2020', '5/25/2020', '5/26/2020', '5/27/2020', '5/28/2020', '5/29/2020', '5/30/2020', '5/31/2020', '6/1/2020', '6/2/2020', '6/3/2020', '6/4/2020', '6/5/2020', '6/6/2020', '6/7/2020', '6/8/2020', '6/9/2020', '6/10/2020', '6/11/2020', '6/12/2020', '6/13/2020', '6/14/2020', '6/15/2020', '6/16/2020', '6/17/2020', '6/18/2020', '6/19/2020', '6/20/2020', '6/21/2020', '6/22/2020', '6/23/2020', '6/24/2020', '6/25/2020', '6/26/2020', '6/27/2020', '6/28/2020', '6/30/2020', '7/01/2020', '7/02/2020',
'7/03/2020', '7/04/2020', '7/05/2020', '7/06/2020', '7/07/2020', '7/08/2020', '7/09/2020', '7/10/2020', '7/11/2020', '7/12/2020', '7/13/2020', '7/14/2020', '7/15/2020', '7/16/2020', '7/17/2020', '7/18/2020', '7/19/2020', '7/20/2020', '7/21/2020', '7/22/2020', '7/23/2020', '7/24/2020', '7/25/2020', '7/26/2020', '7/27/2020', '7/28/2020', '7/29/2020', '7/30/2020', '7/31/2020', '8/01/2020', '8/02/2020', '8/03/2020', '8/04/2020', '8/05/2020', '8/06/2020', '8/07/2020', '8/08/2020', '8/09/2020', '8/10/2020', '8/11/2020', '8/12/2020', '8/13/2020', '8/14/2020', '8/15/2020', '8/16/2020', '8/17/2020', '8/18/2020', '8/19/2020', '8/20/2020', '8/21/2020', '8/22/2020', '8/23/2020', '8/24/2020', '8/25/2020', '8/26/2020', '8/27/2020', '8/28/2020', '8/29/2020', '8/30/2020', '8/31/2020', '9/01/2020', '9/02/2020', '9/3/2020', '9/4/2020', '9/5/2020', '9/7/2020', '9/08/2020', '9/09/2020', '9/10/2020', '9/11/2020', '9/12/2020', '9/14/2020', '9/15/2020', '9/16/2020', '9/17/2020', '9/18/2020', '9/19/2020', '9/20/2020', '9/21/2020', '9/22/2020', '9/23/2020', '9/24/2020', '9/25/2020', '9/26/2020', '9/27/2020', '9/28/2020', '9/29/2020', '9/30/2020', '10/01/2020', '10/02/2020', '10/03/2020', '10/04/2020', '10/05/2020', '10/06/2020', '10/07/2020', '10/08/2020', '10/09/2020', '10/10/2020', '10/11/2020', '10/12/2020', '10/13/2020', '10/14/2020', '10/15/2020', '10/16/2020', '10/17/2020', '10/18/2020', '10/19/2020', '10/20/2020', '10/21/2020', '10/22/2020', '10/23/2020', '10/24/2020', '10/25/2020', '10/26/2020', '10/27/2020', '10/28/2020', '10/29/2020', '10/30/2020', '10/31/2020', '11/01/2020', '11/02/2020', '11/03/2020', '11/04/2020', '11/05/2020', '11/06/2020', '11/07/2020', '11/08/2020', '11/09/2020', '11/10/2020', '11/11/2020', '11/12/2020', '11/13/2020', '11/14/2020', '11/15/2020', '11/16/2020', '11/17/2020', '11/18/2020', '11/19/2020', '11/20/2020', '11/21/2020', '11/22/2020', '11/23/2020', '11/24/2020', '11/25/2020', '11/26/2020', '11/27/2020', '11/28/2020', '11/29/2020', '11/30/2020', '12/01/2020', '12/02/2020', '12/03/2020', '12/04/2020', '12/05/2020', '12/06/2020', '12/07/2020', '12/08/2020', '12/09/2020', '12/10/2020', '12/11/2020', '12/12/2020', '12/13/2020', '12/14/2020', '12/15/2020', '12/16/2020', '12/17/2020', '12/18/2020', '12/19/2020', '12/20/2020', '12/21/2020', '12/22/2020', '12/23/2020', '12/24/2020', '12/25/2020', '12/26/2020', '12/27/2020', '12/28/2020', '12/29/2020', '12/30/2020', '12/31/2020', '01/01/2021', '01/02/2021', '01/03/2021', '01/04/2021', '01/05/2021', '01/06/2021', '01/07/2021', '01/08/2021', '01/09/2021', '01/10/2021', '01/11/2021', '01/12/2021', '01/13/2021', '01/14/2021', '01/15/2021', '01/16/2021', '01/17/2021', '01/18/2021', '01/19/2021', '01/20/2021', '01/21/2021', '01/22/2021', '01/23/2021', '01/24/2021', '01/25/2021', '01/26/2021', '01/27/2021', '01/28/2021', '01/29/2021', '01/30/2021', '01/31/2021', '02/01/2021', '02/02/2021', '02/03/2021', '02/04/2021', '02/05/2021', '02/06/2021', '02/07/2021', '02/08/2021', '02/09/2021', '02/10/2021', '02/11/2021', '02/12/2021', '02/13/2021', '02/14/2021', '02/15/2021', '02/16/2021', '02/17/2021', '02/18/2021', '02/19/2021', '02/20/2021', '02/21/2021', '02/22/2021', '02/23/2021', '02/24/2021', '02/25/2021', '02/26/2021', '02/27/2021', '02/28/2021', '03/01/2021', '03/02/2021', '03/03/2021', '03/04/2021', '03/05/2021', '03/06/2021', '03/07/2021', '03/08/2021', '03/09/2021', '03/10/2021', '03/11/2021', '03/12/2021', '03/13/2021', '03/14/2021', '03/15/2021', '03/16/2021', '03/17/2021', '03/18/2021', '03/19/2021', '03/20/2021', '03/24/2021', '03/25/2021', '03/26/2021', '03/27/2021', '03/28/2021', '03/29/2021', '03/30/2021', '03/31/2021', '04/01/2021', '04/02/2021', '04/03/2021', '04/04/2021', '04/05/2021', '04/06/2021', '04/07/2021', '04/08/2021', '04/09/2021', '04/10/2021', '04/11/2021', '04/12/2021', '04/13/2021', '04/14/2021', '04/15/2021', '04/16/2021', '04/17/2021', '04/18/2021', '04/19/2021', '04/20/2021', '04/21/2021', '04/22/2021', '04/23/2021', '04/24/2021', '04/25/2021', '04/26/2021', '04/27/2021', '04/28/2021', '04/29/2021', '04/30/2021', '05/01/2021', '05/02/2021', '05/03/2021', '05/04/2021', '05/05/2021', '05/06/2021', '05/07/2021', '05/08/2021', '05/09/2021', '05/10/2021', '05/11/2021', '05/12/2021', '05/13/2021', '05/14/2021', '05/15/2021', '05/16/2021', '05/17/2021', '05/18/2021', '05/19/2021', '05/20/2021', '05/21/2021', '05/22/2021', '05/23/2021', '05/24/2021', '05/25/2021', '05/26/2021', '05/27/2021', '05/28/2021', '05/29/2021', '05/30/2021', '05/31/2021', '06/01/2021', '06/02/2021', '06/03/2021', '06/04/2021', '06/05/2021', '06/06/2021', '06/07/2021', '06/08/2021', '06/09/2021', '06/10/2021', '06/11/2021', '06/12/2021', '06/13/2021', '06/14/2021', '06/15/2021', '06/16/2021', '06/17/2021', '06/18/2021', '06/19/2021', '06/20/2021', '06/21/2021', '06/22/2021', '06/23/2021', '06/24/2021', '06/25/2021', '06/26/2021', '06/27/2021', '06/28/2021', '06/29/2021', '06/30/2021', '07/01/2021', '07/02/2021', '07/03/2021', '07/04/2021', '07/05/2021']
# format dates
x_values = [dt.datetime.strptime(d, "%m/%d/%Y").date() for d in dates]
ax = plt.gca()
formatter = mdates.DateFormatter("%m/%d")
ax.xaxis.set_major_formatter(formatter)
# create x-axis
ax.xaxis.set_major_locator(mdates.WeekdayLocator(
byweekday=(MO, TU, WE, TH, FR, SA, SU), interval=21))
# minor tick = daily
ax.xaxis.set_minor_locator(mdates.WeekdayLocator(
byweekday=(MO, TU, WE, TH, FR, SA, SU)))
# format y-axis
ax.get_yaxis().set_major_formatter(
ticker.FuncFormatter(lambda x, pos: format(int(x/1000), ',')))
# schools closed
plt.axvline(dt.datetime(2020, 3, 18), linestyle='--',
color='orange', linewidth=2, label='schools')
# non-essential businesses closed
plt.axvline(dt.datetime(2020, 3, 20), linestyle='--',
color='red', linewidth=2, label='nonessential')
# stay-at-home
plt.axvline(dt.datetime(2020, 3, 22), color='black',
linewidth=2, label='stay at home')
# massive funeral in brooklyn
plt.axvline(dt.datetime(2020, 4, 29), color='black',
linestyle='--', linewidth=2, label='funeral')
# reopening, phase 1
plt.axvline(dt.datetime(2020, 6, 8), color='green',
linewidth=2, label='stay at home')
# schools reopen
plt.axvline(dt.datetime(2020, 9, 21), color='red',
linewidth=2, label='schools reopen')
# schools close again
plt.axvline(dt.datetime(2020, 11, 19), color='blue',
linewidth=2, label='schools close')
# new cases by day
new_cases = [0, 1, 0, 0, 0, 3, 0, 7, 9, 7, 5, 23, 47, 59, 115, 60, 485, 109, 1086, 1606, 2068, 2432, 2649, 2355, 2478, 4414, 3101, 3585, 4033, 2744, 4613, 5052, 4210, 2358, 6582, 4561, 4105, 3821, 5825, 5603, 7521, 6684, 4306, 5695, 2403, 450, 4161, 6141, 4583, 4220, 3420, 2679, 2407, 3561, 3319, 4385, 4437, 2628, 2896, 1613, 2152, 2347, 2293, 2378, 1962, 1689, 1189, 1565, 1421, 1377, 1395, 1285, 4896, 657, 887, 1087, 1555, 1183, 1377, 665, 577, 724, 466, 1111, 716, 785, 646, 525, 728, 904, 783, 855, 654, 283, 293, 683, 513, 510, 601, 389, 434, 323, 435, 394, 441, 476, 284, 443, 324, 448, 276, 358, 308,
550, 249, 331, 292, 338, 385, 321, 340, 503, 340, 362, 438, 349, 291, 209, 310, 199, 382, 313, 333, 326, 275, 269, 366, 396, 332, 333, 264, 319, 552, 98, 361, 152, 531, 94, 217, 424, 313, 314, 288, 309, 199, 192, 318, 346, 287, 403, 241, 321, 210, 272, 364, 429, 330, 224, 489, 221, 181, 261, 305, 203, 217, 284, 189, 171, 183, 236, 311, 233, 229, 225, 291, 248, 324, 222, 304, 230, 212, 196, 478, 216, 290, 420, 336, 253, 275, 327, 634, 188, 320, 284, 209, 379, 386, 401, 343, 367, 395, 486, 466, 579, 609, 530, 439, 473, 587, 421, 652, 680, 473, 352, 416, 502, 438, 555, 545, 486, 523, 390, 718, 436, 481, 968, 343, 367, 568, 561, 314, 1117, 641, 585, 447, 732, 592, 800, 1087, 1151, 646, 52, 1389, 963, 1127, 1154, 1228, 1489, 973, 1552, 1264, 1486, 1420, 1572, 1398, 1642, 1350, 1312, 1959, 1889, 1905, 1282, 2100, 2218, 2384, 2512, 2855, 2498, 2406, 2298, 2715, 2561, 2582, 2643, 3168, 2630, 2367, 3265, 2531, 2539, 3874, 2633, 2256, 2761, 2693, 3199, 3766, 3452, 3222, 2653, 2512, 4029, 3366, 3851, 4800, 5041, 2937, 2892, 3956, 3969, 5077, 5241, 4770, 5045, 4306, 5168, 4508, 4746, 6222, 5018, 4988, 4520, 4509, 3571, 4283, 5127, 4844, 5130, 4086, 3982, 3013, 4964, 4774, 5229, 4533, 3375, 3069, 2570, 2084, 2463, 4394, 3973, 4160, 3811, 2144, 3870, 3407, 3590, 3398, 2945, 2904, 1914, 5296, 3819, 3515, 2306, 2554, 2974, 2558, 3459, 3149, 3265, 3318, 2806, 2979, 2369, 3084, 3389, 3245, 3087, 1994, 3443, 2058, 2680, 3003, 2124, 2397, 3400, 2833, 2229, 1127, 820, 2142, 17319, 3147, 2857, 3078, 3387, 4477, 1518, 2734, 3319, 3241, 2543, 3017, 2563, 2778, 2229, 3166, 3027, 2774, 2213, 2431, 1749, 1522, 2120, 2648, 1750, 2220, 2190, 2012, 1452, 1973, 1625, 1412, 1358, 1249, 1256, 850, 1169, 1323, 1360, 982, 855, 684, 809, 827, 777, 878, 865, 650, 419, 564, 667, 650, 347, 772, 468, 662, 312, 502, 456, 402, 406, 316, 260, 206, 376, 298, 265, 289, 210, 137, 143, 124, 258, 274, 148, 181, 191, 172, 208, 329, 154, 139, 179, 130, 76, 173, 162, 160, 151, 137, 120, 129, 132, 136, 174, 152, 158, 110, 92, 177, 200, 71, 153, 195]
# text labels
plt.title('Covid-19 in NYC: New Cases')
plt.xlabel('Date')
plt.ylabel('Number of New Cases (in thousands)')
plt.legend(['Schools Closure', 'Non-Essential Businesses Closure', 'Statewide Stay-at-Home Order',
'Massive Funeral Crowd in Brooklyn', 'Reopening, Phase 1', 'Schools Reopen', 'Schools Close'], loc='best')
# create the graph
plt.plot(x_values, new_cases, color='#730af2', linewidth=2)
plt.show()
| 176.78125
| 4,998
| 0.595457
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 6,274
| 0.554534
|
b06d15947556e9e4b04c29a89022d993e3d2bccf
| 4,357
|
py
|
Python
|
src/face_utils/save_figure.py
|
hankyul2/FaceDA
|
73006327df3668923d4206f81d4976ca1240329d
|
[
"Apache-2.0"
] | null | null | null |
src/face_utils/save_figure.py
|
hankyul2/FaceDA
|
73006327df3668923d4206f81d4976ca1240329d
|
[
"Apache-2.0"
] | null | null | null |
src/face_utils/save_figure.py
|
hankyul2/FaceDA
|
73006327df3668923d4206f81d4976ca1240329d
|
[
"Apache-2.0"
] | null | null | null |
import os
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import albumentations as A
from pathlib import Path
import torch
from torch import nn
from src_backup.cdan import get_model
from src.backbone.iresnet import get_arcface_backbone
class MyModel(nn.Module):
def __init__(self, backbone):
super().__init__()
self.backbone = backbone
self.layers = [backbone.layer1, backbone.layer2, backbone.layer3, backbone.layer4]
def forward(self, x):
activations = []
x = self.backbone.prelu(self.backbone.bn1(self.backbone.conv1(x)))
for layer in self.layers:
x = layer(x)
activations.append(x)
return activations
def get_best_model(mode='arcface', base_path='log/best_weight/{}.pth'):
model_path_dict = {'BSP': 'FACE_CDAN_BSP_BOTH', 'DAN': 'FACE_DAN_BOTH',
'BOTH': 'FACE_BOTH', 'FACE': 'FACE'}
backbone = get_arcface_backbone('cpu')
if mode != 'arcface':
backbone = get_model(backbone, fc_dim=512, embed_dim=512, nclass=460, hidden_dim=1024,
pretrained_path=base_path.format(model_path_dict[mode])).backbone
backbone.eval()
return MyModel(backbone)
def img_preprocessing(img):
transforms = A.Compose([
A.SmallestMaxSize(112),
A.CenterCrop(112, 112, p=1),
])
img = ((np.transpose(transforms(image=np.array(img))['image'], (2, 0, 1)) / 255) - 0.5) / 0.5
return img
def activation_based_map_f(activations):
attention_map = []
for activation in activations:
img = activation.pow(2).mean(1).detach().numpy()[0, :, :, np.newaxis]
resized_img = A.Resize(112, 112, 4)(image=img)['image']
attention_map.append((resized_img, img))
return attention_map
def show_example(img_path='iu_mask.jpg', mode='arcface', show=True):
img = Image.open(img_path)
img_resized = A.Resize(112, 112)(image=np.array(img))['image']
img_np = np.array(img)
img_np = img_preprocessing(img)
input_img = torch.from_numpy(img_np).float().unsqueeze(0)
model = get_best_model(mode)
activations = model(input_img)
attention_maps = activation_based_map_f(activations)
if show:
plt.imshow(img)
plt.show()
for attention_map in attention_maps:
plt.figure(figsize=(16, 10))
plt.subplot(1, 2, 1)
plt.imshow(img_resized, interpolation='bicubic')
plt.imshow(attention_map[0], alpha=0.8, interpolation='bicubic')
plt.subplot(1, 2, 2)
plt.imshow(attention_map[1], interpolation='bicubic')
plt.show()
return [maps[0] for maps in attention_maps]
def compare_example(img_path, mode1='arcface', mode2='BSP', alpha=0.7, show=False):
transforms = A.Compose([
A.SmallestMaxSize(112),
A.CenterCrop(112, 112, p=1),
])
img = transforms(image=np.array(Image.open(img_path)))['image']
attn1 = show_example(img_path=img_path, mode=mode1, show=False)
attn2 = show_example(img_path=img_path, mode=mode2, show=False)
plt.figure(figsize=(16, 6))
plt.subplot(2, 5, 1)
plt.imshow(img)
plt.xticks([])
plt.yticks([])
for i, attention_map in enumerate(zip(attn1, attn2)):
plt.subplot(2, 5, 2 + i)
plt.imshow(img, alpha=0.8)
plt.imshow(attention_map[0], alpha=alpha, interpolation='bicubic')
plt.xticks([])
plt.yticks([])
plt.subplot(2, 5, 7 + i)
plt.imshow(img, alpha=0.8)
plt.imshow(attention_map[1], alpha=alpha, interpolation='bicubic')
plt.xticks([])
plt.yticks([])
if show:
plt.show()
else:
Path('result/attention_fig/').mkdir(exist_ok=True, parents=True)
plt.savefig('result/attention_fig/{}_{}_{}_{}.jpg'.format(
os.path.basename(img_path).split('.')[0], mode1, mode2, int(alpha*10)))
plt.close('all')
def run(args):
for mode in ['FACE', 'BSP', 'BOTH', 'DAN']:
for image_path in ['iu.jpg', 'iu_mask1.jpg', 'iu_mask2.jpg', 'iu_mask3.jpg', 'iu_mask4.jpg']:
for alpha in [0.8, 0.9]:
print('mode: {}'.format(mode))
print('alpha: {}'.format(alpha))
compare_example(img_path='examples/{}'.format(image_path), mode1='arcface', mode2=mode, alpha=alpha)
| 36.008264
| 116
| 0.627037
| 457
| 0.104889
| 0
| 0
| 0
| 0
| 0
| 0
| 428
| 0.098233
|
b070934d7222c882ff718596c5213477b01b49fc
| 2,481
|
py
|
Python
|
tests/unit/tests_standard_lib/tests_sample_generation/test_time_parser.py
|
monishshah18/pytest-splunk-addon
|
1600f2c7d30ec304e9855642e63511780556b406
|
[
"Apache-2.0"
] | 39
|
2020-06-09T17:37:21.000Z
|
2022-02-08T01:57:35.000Z
|
tests/unit/tests_standard_lib/tests_sample_generation/test_time_parser.py
|
monishshah18/pytest-splunk-addon
|
1600f2c7d30ec304e9855642e63511780556b406
|
[
"Apache-2.0"
] | 372
|
2020-04-15T13:55:09.000Z
|
2022-03-31T17:14:56.000Z
|
tests/unit/tests_standard_lib/tests_sample_generation/test_time_parser.py
|
isabella232/pytest-splunk-addon
|
5e6ae2b47df7a1feb6f358bbbd1f02197b5024f6
|
[
"Apache-2.0"
] | 22
|
2020-05-06T10:43:45.000Z
|
2022-03-16T15:50:08.000Z
|
import pytest
from datetime import datetime
from freezegun import freeze_time
from pytest_splunk_addon.standard_lib.sample_generation.time_parser import (
time_parse,
)
@pytest.fixture(scope="session")
def tp():
return time_parse()
def generate_parameters():
result = []
for s in ("s", "sec", "secs", "second", "seconds"):
result.append(("-", "60", s, datetime(2020, 9, 1, 8, 15, 13)))
for m in ("m", "min", "minute", "minutes"):
result.append(("-", "60", m, datetime(2020, 9, 1, 7, 16, 13)))
for h in ("h", "hr", "hrs", "hour", "hours"):
result.append(("-", "60", h, datetime(2020, 8, 29, 20, 16, 13)))
for d in ("d", "day", "days"):
result.append(("-", "1", d, datetime(2020, 8, 31, 8, 16, 13)))
for w in ("w", "week", "weeks"):
result.append(("-", "2", w, datetime(2020, 8, 18, 8, 16, 13)))
for m in ("mon", "month", "months"):
result.append(("-", "2", m, datetime(2020, 7, 1, 8, 16, 13)))
for q in ("q", "qtr", "qtrs", "quarter", "quarters"):
result.append(("-", "2", q, datetime(2020, 3, 1, 8, 16, 13)))
for y in ("y", "yr", "yrs", "year", "years"):
result.append(("-", "2", y, datetime(2018, 9, 1, 8, 16, 13)))
result.extend(
[
("+", "5", "months", datetime(2021, 2, 1, 8, 16, 13)),
("+", "3", "months", datetime(2020, 12, 1, 8, 16, 13)),
("-", "11", "months", datetime(2019, 10, 1, 8, 16, 13)),
("smth", "15", "minutes", datetime(2020, 9, 1, 8, 31, 13)),
]
)
return result
class Testtime_parse:
@freeze_time("2020-09-01T04:16:13-04:00")
@pytest.mark.parametrize("sign, num, unit, expected", generate_parameters())
def test_convert_to_time(self, tp, sign, num, unit, expected):
assert tp.convert_to_time(sign, num, unit) == expected
@pytest.mark.parametrize(
"timezone_time, expected",
[
("+1122", datetime(2020, 9, 1, 19, 37, 13)),
("+0022", datetime(2020, 9, 1, 8, 15, 13)),
("+2322", datetime(2020, 9, 1, 8, 15, 13)),
("+1200", datetime(2020, 9, 1, 8, 15, 13)),
("+0559", datetime(2020, 9, 1, 8, 15, 13)),
("-1122", datetime(2020, 8, 31, 20, 53, 13)),
],
)
def test_get_timezone_time(self, tp, timezone_time, expected):
assert (
tp.get_timezone_time(datetime(2020, 9, 1, 8, 15, 13), timezone_time)
== expected
)
| 37.590909
| 80
| 0.523176
| 909
| 0.366385
| 0
| 0
| 943
| 0.380089
| 0
| 0
| 432
| 0.174123
|
c65e7d463bac4685e30ec3b3b04bcf2f66cd3d98
| 2,756
|
py
|
Python
|
igcollect/artfiles.py
|
brainexe/igcollect
|
12a2fa81331f305f8852b5a30c8d90d2a8895738
|
[
"MIT"
] | 15
|
2016-04-13T11:13:41.000Z
|
2020-12-04T17:25:43.000Z
|
igcollect/artfiles.py
|
brainexe/igcollect
|
12a2fa81331f305f8852b5a30c8d90d2a8895738
|
[
"MIT"
] | 10
|
2016-12-01T15:15:31.000Z
|
2020-05-07T13:54:57.000Z
|
igcollect/artfiles.py
|
brainexe/igcollect
|
12a2fa81331f305f8852b5a30c8d90d2a8895738
|
[
"MIT"
] | 18
|
2016-03-16T11:06:10.000Z
|
2022-03-14T14:56:05.000Z
|
#!/usr/bin/env python
"""igcollect - Artfiles Hosting Metrics
Copyright (c) 2019 InnoGames GmbH
"""
import base64
from argparse import ArgumentParser
from time import time
try:
# Try importing the Python3 packages
from urllib.request import Request, urlopen
from urllib.parse import urlencode
except ImportError:
# On failure, import the Python2
from urllib2 import Request, urlopen
from urllib import urlencode
def parse_args():
parser = ArgumentParser()
parser.add_argument('--prefix', default='artfiles')
parser.add_argument(
"-u",
"--user",
dest="http_user",
type=str,
required=True,
help="the http user name to authenticate")
parser.add_argument(
"-p",
"--pass",
dest="http_pass",
type=str,
required=True,
help="the http password to authenticate")
return parser.parse_args()
def main():
args = parse_args()
request = Request('https://dcp.c.artfiles.de/api/stats/get_estats.html')
credentials = '{}:{}'.format(args.http_user, args.http_pass)
base64credentials = base64.b64encode(credentials.encode())
request.add_header('Authorization',
'Basic {}'.format(base64credentials.decode()))
data = urlopen(request)
template = args.prefix + '.{dc}.{rack}.{pdu_nr}.{unit} {value} ' + str(
int(time()))
for csv in data.readlines():
csv = csv.decode()
if csv.startswith('"level3"') or csv.startswith('"w408'):
parse_and_print(template, csv)
def parse_and_print(template, csv):
values = [v.strip('\n "') for v in csv.split(',')]
dc = values[0].replace('/', '_')
rack = values[1].replace('/', '')
pdu_nr = values[2]
maxcur = values[3]
measurement_type = values[4]
maxval_watt = values[5]
curval = values[6]
if maxcur == '10A':
print(template.format(dc=dc, rack=rack, pdu_nr=pdu_nr, unit='max',
value='10.00'))
elif maxcur == '16A':
print(template.format(dc=dc, rack=rack, pdu_nr=pdu_nr, unit='max',
value='16.00'))
elif maxcur == 'redundant':
print(template.format(dc=dc, rack=rack, pdu_nr=pdu_nr, unit='max',
value='20.00'))
if measurement_type == 'ampere':
ampere = curval.replace(' A', '')
print(template.format(dc=dc, rack=rack, pdu_nr=pdu_nr, unit='ampere',
value=ampere))
if measurement_type == 'kwh':
kwh = maxval_watt.replace(' kWh', '')
print(template.format(dc=dc, rack=rack, pdu_nr=pdu_nr, unit='kwh',
value=kwh))
if __name__ == '__main__':
main()
| 30.966292
| 77
| 0.589623
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 572
| 0.207547
|
c65ec057f48af79a642c8637764b523b537f83f6
| 5,459
|
py
|
Python
|
sem/storage/corpus.py
|
YoannDupont/SEM
|
ff21c5dc9a8e99eda81dc266e67cfa97dec7c243
|
[
"MIT"
] | 22
|
2016-11-13T21:08:58.000Z
|
2021-04-26T07:04:54.000Z
|
sem/storage/corpus.py
|
Raphencoder/SEM
|
ff21c5dc9a8e99eda81dc266e67cfa97dec7c243
|
[
"MIT"
] | 15
|
2016-11-15T10:21:07.000Z
|
2021-11-08T10:08:05.000Z
|
sem/storage/corpus.py
|
Raphencoder/SEM
|
ff21c5dc9a8e99eda81dc266e67cfa97dec7c243
|
[
"MIT"
] | 8
|
2016-11-15T10:21:41.000Z
|
2022-03-04T21:28:05.000Z
|
# -*- coding: utf-8 -*-
"""
file: corpus.py
Description: defines the Corpus object. It is an object representation
of a CoNLL-formatted corpus.
author: Yoann Dupont
MIT License
Copyright (c) 2018 Yoann Dupont
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from sem.IO import KeyReader, KeyWriter
_train_set = set([u"train", u"eval", u"evaluate", u"evaluation"])
_train = u"train"
_label_set = set([u"label", u"annotate", u"annotation"])
_label = "label"
_modes = _train_set | _label_set
_equivalence = dict([[mode, _train] for mode in _train_set] + [[mode, _label] for mode in _label_set])
class Entry(object):
"""
The Entry object. It represents a field's identifier in a CoNLL corpus.
An Entry may be used only in certain circumstances: for example, the
output tag may only appear in train mode.
"""
def __init__(self, name, mode=u"label"):
if mode not in _modes:
raise ValueError("Unallowed mode for entry: {0}".format(mode))
self._name = name
self._mode = _equivalence[mode]
def __eq__(self, other):
return self.name == other.name
@property
def name(self):
return self._name
@property
def mode(self):
return self._mode
@mode.setter
def mode(self, mode):
self._mode = _equivalence[mode]
@property
def is_train(self):
return self._mode == _train
@property
def is_label(self):
return self._mode == _label
@staticmethod
def fromXML(xml_element):
return Entry(**xml_element.attrib)
def has_mode(self, mode):
return self.mode == _equivalence[mode]
class Corpus(object):
def __init__(self, fields=None, sentences=None):
if fields:
self.fields = fields[:]
else:
self.fields = []
if sentences:
self.sentences = sentences[:]
else:
self.sentences = []
def __contains__(self, item):
return item in self.fields
def __len__(self):
return len(self.sentences)
def __iter__(self):
for element in self.iterate_on_sentences():
yield element
def __unicode__(self):
return self.unicode(self.fields)
@classmethod
def from_conll(cls, filename, fields, encoding="utf-8"):
def tab_split(s):
return s.split(u"\t")
corpus = Corpus(fields)
for sentence in KeyReader(filename, encoding, fields, splitter=tab_split):
corpus.append_sentence([token.copy() for token in sentence])
return corpus
def unicode(self, fields, separator=u"\t"):
fmt = u"\t".join([u"{{{0}}}".format(field) for field in fields])
sentences = []
for sentence in self:
sentences.append([])
for token in sentence:
sentences[-1].append((fmt.format(**token)) + u"\n")
return u"\n".join([u"".join(sentence) for sentence in sentences])
def to_matrix(self, sentence):
sent = []
for token in sentence:
sent.append([token[field] for field in self.fields])
return sent
def iterate_on_sentences(self):
for element in self.sentences:
yield element
def is_empty(self):
return 0 == len(self.sentences)
def has_key(self, key):
return key in self.fields
def append_sentence(self, sentence):
self.sentences.append(sentence)
def from_sentences(self, sentences, field_name=u"word"):
del self.fields[:]
del self.sentences[:]
self.fields = [field_name]
for sentence in sentences:
self.sentences.append([])
for token in sentence:
self.sentences[-1].append({field_name : token})
def from_segmentation(self, content, tokens, sentences, field_name=u"word"):
self.fields = [field_name]
for sentence in sentences.spans:
sentence_tokens = []
self.append_sentence([{field_name:content[token.lb : token.ub]} for token in tokens.spans[sentence.lb : sentence.ub]])
def write(self, fd, fields=None):
fmt = u"\t".join(["{{{0}}}".format(field) for field in (fields or self.fields)]) + u"\n"
for sentence in self:
for token in sentence:
fd.write(fmt.format(**token))
fd.write(u"\n")
| 32.301775
| 130
| 0.634365
| 3,877
| 0.710203
| 193
| 0.035354
| 755
| 0.138304
| 0
| 0
| 1,653
| 0.302803
|
c660dc00601aa00fc2df39ad1285ba2cbf2bab57
| 3,426
|
py
|
Python
|
recbole/utils/inferred_lm.py
|
ghazalehnt/RecBole
|
f1219847005e2c8d72b8c3cd5c49a138fe83276d
|
[
"MIT"
] | null | null | null |
recbole/utils/inferred_lm.py
|
ghazalehnt/RecBole
|
f1219847005e2c8d72b8c3cd5c49a138fe83276d
|
[
"MIT"
] | null | null | null |
recbole/utils/inferred_lm.py
|
ghazalehnt/RecBole
|
f1219847005e2c8d72b8c3cd5c49a138fe83276d
|
[
"MIT"
] | null | null | null |
import time
import torch
from recbole.config import Config
from recbole.utils import get_model, init_seed
import gensim
import gensim.downloader as api
from recbole.data import create_dataset, data_preparation
import numpy as np
URL_FIELD = "item_url"
class ItemLM:
def __init__(self, checkpoint_file, model_name, dataset_name, k=20, step=5000, load_docs=None, config_dict=None, config_file_list=None):
checkpoint = torch.load(checkpoint_file, map_location=torch.device('cpu'))
config = Config(model=model_name, dataset=dataset_name, config_file_list=config_file_list, config_dict=config_dict)
init_seed(config['seed'], config['reproducibility'])
dataset = create_dataset(config)
train_data, valid_data, test_data = data_preparation(config, dataset)
model = get_model(config['model'])(config, train_data).to(config['device'])
model.load_state_dict(checkpoint['state_dict'])
item_ids = dataset.get_item_feature()['item_id']
items = model.item_embedding(item_ids)
item_identifiers = dataset.get_item_feature()[URL_FIELD]
item_identifiers = dataset.id2token(URL_FIELD, item_identifiers)
url_id = {}
for i in range(1, len(item_identifiers)):
url_id[item_identifiers[i]] = i
url_id_temp = {}
if load_docs is not None:
item_ids = set()
for url in load_docs:
if url in url_id:
item_ids.add(url_id[url])
url_id_temp[url] = url_id[url]
else:
# print(f"{url} does not exist in model")
pass
item_ids = [0] + list(item_ids)
url_id = url_id_temp
id_url = {}
for url, id in url_id.items():
id_url[id] = url
print("loading glove")
s = time.time()
pretrained_embedding_name = "glove-wiki-gigaword-50"
model_path = api.load(pretrained_embedding_name, return_path=True)
w2v_model = gensim.models.KeyedVectors.load_word2vec_format(model_path)
w2v_id_terms = np.array(w2v_model.index_to_key)
print(f"done: {time.time() - s}")
self.item_lms = {}
print("making item lm...")
ts = time.time()
s = 1
e = step
if e > len(item_ids) > s:
e = len(item_ids)
while e <= len(item_ids):
print(f"{s}:{e}")
batch_ids = item_ids[s:e]
batch_items = items[batch_ids].detach().clone()
batch_lms = torch.matmul(batch_items, model.word_embedding.weight.T)
batch_lms = torch.softmax(batch_lms, 1)
batch_lms_top = batch_lms.topk(k, dim=1)
probs_normalized_topk = (batch_lms_top.values.T / batch_lms_top.values.sum(1)).T
min_ps = probs_normalized_topk.min(dim=1)
estimated_length = torch.ones(e - s) / min_ps.values
item_lm_probs = (probs_normalized_topk.T * estimated_length).T
item_lm_keys = w2v_id_terms[batch_lms_top.indices]
for i in range(len(batch_ids)):
self.item_lms[id_url[int(batch_ids[i])]] = (item_lm_keys[i], item_lm_probs[i].numpy())
s = e
e += step
if e > len(item_ids) > s:
e = len(item_ids)
print(f"done: {time.time()-ts}")
def get_lm(self):
return self.item_lms
| 40.305882
| 140
| 0.613543
| 3,170
| 0.925277
| 0
| 0
| 0
| 0
| 0
| 0
| 234
| 0.068301
|
c660f9f806690fc5f7e2f8042a3e47405144af39
| 2,842
|
py
|
Python
|
alchemist_py/parsetab.py
|
Kenta11/alchemist_py
|
49d013dde4688f663eb2d35519347047739ecace
|
[
"MIT"
] | null | null | null |
alchemist_py/parsetab.py
|
Kenta11/alchemist_py
|
49d013dde4688f663eb2d35519347047739ecace
|
[
"MIT"
] | 1
|
2021-08-04T14:14:09.000Z
|
2021-08-04T14:14:09.000Z
|
alchemist_py/parsetab.py
|
Kenta11/alchemist_py
|
49d013dde4688f663eb2d35519347047739ecace
|
[
"MIT"
] | 1
|
2021-07-15T07:05:42.000Z
|
2021-07-15T07:05:42.000Z
|
# parsetab.py
# This file is automatically generated. Do not edit.
# pylint: disable=W,C,R
_tabversion = '3.10'
_lr_method = 'LALR'
_lr_signature = 'INTEGER L_BRACE L_BRACKET RESERVED R_BRACE R_BRACKET SEMICOLON STRUCT TYPE_BOOL TYPE_CSTDINT TYPE_PRIMITIVE_FLOAT TYPE_PRIMITIVE_INT TYPE_STRING TYPE_UNSIGNED VAR_NAMEMESSAGE : PARAMS\n PARAMS : PARAM\n | PARAMS PARAM\n \n PARAM : TYPE VAR_NAME SEMICOLON\n | TYPE VAR_NAME ARRAY SEMICOLON\n \n TYPE : TYPE_PRIMITIVE_INT\n | TYPE_PRIMITIVE_FLOAT\n | TYPE_CSTDINT\n | TYPE_BOOL\n | TYPE_STRING\n | TYPE_UNSIGNED TYPE_PRIMITIVE_INT\n \n ARRAY : L_BRACKET INTEGER R_BRACKET\n | ARRAY L_BRACKET INTEGER R_BRACKET\n '
_lr_action_items = {'TYPE_PRIMITIVE_INT':([0,2,3,10,11,14,17,],[5,5,-2,13,-3,-4,-5,]),'TYPE_PRIMITIVE_FLOAT':([0,2,3,11,14,17,],[6,6,-2,-3,-4,-5,]),'TYPE_CSTDINT':([0,2,3,11,14,17,],[7,7,-2,-3,-4,-5,]),'TYPE_BOOL':([0,2,3,11,14,17,],[8,8,-2,-3,-4,-5,]),'TYPE_STRING':([0,2,3,11,14,17,],[9,9,-2,-3,-4,-5,]),'TYPE_UNSIGNED':([0,2,3,11,14,17,],[10,10,-2,-3,-4,-5,]),'$end':([1,2,3,11,14,17,],[0,-1,-2,-3,-4,-5,]),'VAR_NAME':([4,5,6,7,8,9,13,],[12,-6,-7,-8,-9,-10,-11,]),'SEMICOLON':([12,15,21,22,],[14,17,-12,-13,]),'L_BRACKET':([12,15,21,22,],[16,18,-12,-13,]),'INTEGER':([16,18,],[19,20,]),'R_BRACKET':([19,20,],[21,22,]),}
_lr_action = {}
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = {}
_lr_action[_x][_k] = _y
del _lr_action_items
_lr_goto_items = {'MESSAGE':([0,],[1,]),'PARAMS':([0,],[2,]),'PARAM':([0,2,],[3,11,]),'TYPE':([0,2,],[4,4,]),'ARRAY':([12,],[15,]),}
_lr_goto = {}
for _k, _v in _lr_goto_items.items():
for _x, _y in zip(_v[0], _v[1]):
if not _x in _lr_goto: _lr_goto[_x] = {}
_lr_goto[_x][_k] = _y
del _lr_goto_items
_lr_productions = [
("S' -> MESSAGE","S'",1,None,None,None),
('MESSAGE -> PARAMS','MESSAGE',1,'p_MESSAGE','yacc.py',6),
('PARAMS -> PARAM','PARAMS',1,'p_PARAMS','yacc.py',11),
('PARAMS -> PARAMS PARAM','PARAMS',2,'p_PARAMS','yacc.py',12),
('PARAM -> TYPE VAR_NAME SEMICOLON','PARAM',3,'p_PARAM','yacc.py',21),
('PARAM -> TYPE VAR_NAME ARRAY SEMICOLON','PARAM',4,'p_PARAM','yacc.py',22),
('TYPE -> TYPE_PRIMITIVE_INT','TYPE',1,'p_TYPE','yacc.py',34),
('TYPE -> TYPE_PRIMITIVE_FLOAT','TYPE',1,'p_TYPE','yacc.py',35),
('TYPE -> TYPE_CSTDINT','TYPE',1,'p_TYPE','yacc.py',36),
('TYPE -> TYPE_BOOL','TYPE',1,'p_TYPE','yacc.py',37),
('TYPE -> TYPE_STRING','TYPE',1,'p_TYPE','yacc.py',38),
('TYPE -> TYPE_UNSIGNED TYPE_PRIMITIVE_INT','TYPE',2,'p_TYPE','yacc.py',39),
('ARRAY -> L_BRACKET INTEGER R_BRACKET','ARRAY',3,'p_ARRAY','yacc.py',67),
('ARRAY -> ARRAY L_BRACKET INTEGER R_BRACKET','ARRAY',4,'p_ARRAY','yacc.py',68),
]
| 64.590909
| 622
| 0.611189
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,612
| 0.567206
|
c665a58b2ec63745fb6a56eded667c424d56d832
| 548
|
py
|
Python
|
fisica.py
|
Kenedw/RSSF
|
b9e7f2f0c6f2304af4de645039e70800d22d2b0c
|
[
"MIT"
] | 1
|
2019-09-01T20:28:35.000Z
|
2019-09-01T20:28:35.000Z
|
fisica.py
|
Kenedw/RSSF
|
b9e7f2f0c6f2304af4de645039e70800d22d2b0c
|
[
"MIT"
] | null | null | null |
fisica.py
|
Kenedw/RSSF
|
b9e7f2f0c6f2304af4de645039e70800d22d2b0c
|
[
"MIT"
] | 1
|
2019-05-18T00:09:26.000Z
|
2019-05-18T00:09:26.000Z
|
from packet import packet
# Camada Fisica
class fisica():
'''
@param link list(hosts)
@param ativo boolean (true|false)
'''
def __init__(self):
self.__link = []
# self.__link = link
self.__is_ativo = False
self.__dado = packet
def __SetAtivo(self,ativo):
self.__is_ativo = ativo
def __GetAtivo(self):
return self.__is_ativo
def __SetDado(self,dado):
self.__dado = dado
def __GetDado(self):
return self.__dado
def __SendDado(self,hostDest):
hostDest.send(hostDest.ID,self.__GetDado())
| 18.896552
| 47
| 0.669708
| 504
| 0.919708
| 0
| 0
| 0
| 0
| 0
| 0
| 107
| 0.195255
|
c666e9dcacd68dd1abb51bc4ffb6d2640c170719
| 11,792
|
py
|
Python
|
programs/pyeos/tests/python/cryptokitties/kittyownership.py
|
learnforpractice/pyeos
|
4f04eb982c86c1fdb413084af77c713a6fda3070
|
[
"MIT"
] | 144
|
2017-10-18T16:38:51.000Z
|
2022-01-09T12:43:57.000Z
|
programs/pyeos/tests/python/cryptokitties/kittyownership.py
|
openchatproject/safeos
|
2c8dbf57d186696ef6cfcbb671da9705b8f3d9f7
|
[
"MIT"
] | 60
|
2017-10-11T13:07:43.000Z
|
2019-03-26T04:33:27.000Z
|
programs/pyeos/tests/python/cryptokitties/kittyownership.py
|
learnforpractice/pyeos
|
4f04eb982c86c1fdb413084af77c713a6fda3070
|
[
"MIT"
] | 38
|
2017-12-05T01:13:56.000Z
|
2022-01-07T07:06:53.000Z
|
from backend import *
from basement import *
from pausable import *
from kittyaccesscontrol import *
from kittybase import KittyBase
from erc721 import ERC721
from erc721metadata import ERC721Metadata
# @title The facet of the CryptoKitties core contract that manages ownership, ERC-721 (draft) compliant.
# @author Axiom Zen (https://www.axiomzen.co)
# @dev Ref: https://github.com/ethereum/EIPs/issues/721
# See the KittyCore contract documentation to understand how the various contract facets are arranged.
class KittyOwnership(KittyBase, ERC721):
def __init__(self):
KittyBase.__init__(self)
ERC721.__init__(self)
# @notice Name and symbol of the non fungible token, as defined in ERC721.
#string public constant name = "CryptoKitties";
#string public constant symbol = "CK";
self.name = "CryptoKitties"
self.symbol = "CK"
# The contract that will return kitty metadata
#ERC721Metadata public erc721Metadata;
self.erc721Metadata = ERC721Metadata()
self.InterfaceSignature_ERC165 = bytes4(keccak256('supportsInterface(bytes4)'));
self.InterfaceSignature_ERC721 = bytes4(keccak256('InterfaceSignature_ERC721(bytes4)'));
'''FIXME
bytes4 constant InterfaceSignature_ERC721 =
bytes4(keccak256('name()')) ^
bytes4(keccak256('symbol()')) ^
bytes4(keccak256('totalSupply()')) ^
bytes4(keccak256('balanceOf(address)')) ^
bytes4(keccak256('ownerOf(uint256)')) ^
bytes4(keccak256('approve(address,uint256)')) ^
bytes4(keccak256('transfer(address,uint256)')) ^
bytes4(keccak256('transferFrom(address,address,uint256)')) ^
bytes4(keccak256('tokensOfOwner(address)')) ^
bytes4(keccak256('tokenMetadata(uint256,string)'));
'''
# @notice Introspection interface as per ERC-165 (https://github.com/ethereum/EIPs/issues/165).
# Returns true for any standardized interfaces implemented by this contract. We implement
# ERC-165 (obviously!) and ERC-721.
def supportsInterface(self, _interfaceID: bytes) -> bool:
# DEBUG ONLY
#require((InterfaceSignature_ERC165 == 0x01ffc9a7) && (InterfaceSignature_ERC721 == 0x9a20483d));
return (_interfaceID == self.InterfaceSignature_ERC165) or (_interfaceID == self.InterfaceSignature_ERC721)
# @dev Set the address of the sibling contract that tracks metadata.
# CEO only.
@onlyCEO
def setMetadataAddress(self, _contractAddress: address):
self.erc721Metadata = ERC721Metadata(_contractAddress)
# Internal utility functions: These functions all assume that their input arguments
# are valid. We leave it to public methods to sanitize their inputs and follow
# the required logic.
# @dev Checks if a given address is the current owner of a particular Kitty.
# @param _claimant the address we are validating against.
# @param _tokenId kitten id, only valid when > 0
def _owns(self, _claimant: address, _tokenId: uint256) -> bool:
return self.kittyIndexToOwner[_tokenId] == _claimant
# @dev Checks if a given address currently has transferApproval for a particular Kitty.
# @param _claimant the address we are confirming kitten is approved for.
# @param _tokenId kitten id, only valid when > 0
def _approvedFor(self, _claimant: address, _tokenId: uint256) -> bool:
return self.kittyIndexToApproved[_tokenId] == _claimant
# @dev Marks an address as being approved for transferFrom(), overwriting any previous
# approval. Setting _approved to address(0) clears all transfer approval.
# NOTE: _approve() does NOT send the Approval event. This is intentional because
# _approve() and transferFrom() are used together for putting Kitties on auction, and
# there is no value in spamming the log with Approval events in that case.
def _approve(self, _tokenId: uint256, _approved: address):
self.kittyIndexToApproved[_tokenId] = _approved
# @notice Returns the number of Kitties owned by a specific address.
# @param _owner The owner address to check.
# @dev Required for ERC-721 compliance
def balanceOf(self, _owner: address) -> uint256:
return self.ownershipTokenCount[_owner]
# @notice Transfers a Kitty to another address. If transferring to a smart
# contract be VERY CAREFUL to ensure that it is aware of ERC-721 (or
# CryptoKitties specifically) or your Kitty may be lost forever. Seriously.
# @param _to The address of the recipient, can be a user or contract.
# @param _tokenId The ID of the Kitty to transfer.
# @dev Required for ERC-721 compliance.
def transfer(self, _to: address, _tokenId: uint256):
self.whenNotPaused()
# Safety check to prevent against an unexpected 0x0 default.
require(_to != address(0))
# Disallow transfers to this contract to prevent accidental misuse.
# The contract should never own any kitties (except very briefly
# after a gen0 cat is created and before it goes on auction).
require(_to != address(this))
# Disallow transfers to the auction contracts to prevent accidental
# misuse. Auction contracts should only take ownership of kitties
# through the allow + transferFrom flow.
require(_to != address(self.saleAuction))
require(_to != address(self.siringAuction))
# You can only send your own cat.
require(self._owns(msg.sender, _tokenId))
# Reassign ownership, clear pending approvals, emit Transfer event.
self._transfer(msg.sender, _to, _tokenId)
# @notice Grant another address the right to transfer a specific Kitty via
# transferFrom(). This is the preferred flow for transfering NFTs to contracts.
# @param _to The address to be granted transfer approval. Pass address(0) to
# clear all approvals.
# @param _tokenId The ID of the Kitty that can be transferred if this call succeeds.
# @dev Required for ERC-721 compliance.
@whenNotPaused
def approve(self, _to: address, _tokenId: uint256):
# Only an owner can grant transfer approval.
require(self._owns(msg.sender, _tokenId))
# Register the approval (replacing any previous approval).
self._approve(_tokenId, _to)
# Emit approval event.
self.Approval(msg.sender, _to, _tokenId)
# @notice Transfer a Kitty owned by another address, for which the calling address
# has previously been granted transfer approval by the owner.
# @param _from The address that owns the Kitty to be transfered.
# @param _to The address that should take ownership of the Kitty. Can be any address,
# including the caller.
# @param _tokenId The ID of the Kitty to be transferred.
# @dev Required for ERC-721 compliance.
@whenNotPaused
def transferFrom(self, _from: address, _to: address, _tokenId: uint256):
# Safety check to prevent against an unexpected 0x0 default.
require(_to != address(0))
# Disallow transfers to this contract to prevent accidental misuse.
# The contract should never own any kitties (except very briefly
# after a gen0 cat is created and before it goes on auction).
require(_to != address(this))
# Check for approval and valid ownership
require(self._approvedFor(msg.sender, _tokenId))
require(self._owns(_from, _tokenId))
# Reassign ownership (also clears pending approvals and emits Transfer event).
self._transfer(_from, _to, _tokenId)
# @notice Returns the total number of Kitties currently in existence.
# @dev Required for ERC-721 compliance.
def totalSupply(self) -> uint:
return self.kitties.length - 1
# @notice Returns the address currently assigned ownership of a given Kitty.
# @dev Required for ERC-721 compliance.
def ownerOf(self, _tokenId: uint256) -> address:
owner = self.kittyIndexToOwner[_tokenId]
require(owner != address(0))
return owner
# @notice Returns a list of all Kitty IDs assigned to an address.
# @param _owner The owner whose Kitties we are interested in.
# @dev This method MUST NEVER be called by smart contract code. First, it's fairly
# expensive (it walks the entire Kitty array looking for cats belonging to owner),
# but it also returns a dynamic array, which is only supported for web3 calls, and
# not contract-to-contract calls.
def tokensOfOwner(self, _owner: address) -> List:
tokenCount = self.balanceOf(_owner)
result = List([],uint256)
if tokenCount == 0:
# Return an empty array
return result
else:
#FIXME memory type
# uint256[] memory result = new uint256[](tokenCount);
# uint256 totalCats = self.totalSupply();
# uint256 resultIndex = 0;
result = List(size = tokenCount, value_type=uint256)
totalCats = self.totalSupply()
resultIndex = 0
# We count on the fact that all cats have IDs starting at 1 and increasing
# sequentially up to the totalCat count.
# uint256 catId;
for catId in range(1, totalCats+1):
if self.kittyIndexToOwner[catId] == _owner:
result[resultIndex] = catId
resultIndex+=1
return result
# @dev Adapted from memcpy() by @arachnid (Nick Johnson <arachnid@notdot.net>)
# This method is licenced under the Apache License.
# Ref: https://github.com/Arachnid/solidity-stringutils/blob/2f6ca9accb48ae14c66f1437ec50ed19a0616f78/strings.sol
def _memcpy(self, _dest: uint, _src: uint, _len: uint):
pass
'''
def _memcpy(uint _dest, uint _src, uint _len) private view {
# Copy word-length chunks while possible
for(; _len >= 32; _len -= 32) {
assembly {
mstore(_dest, mload(_src))
}
_dest += 32;
_src += 32;
}
# Copy remaining bytes
uint256 mask = 256 ** (32 - _len) - 1;
assembly {
let srcpart := and(mload(_src), not(mask))
let destpart := and(mload(_dest), mask)
mstore(_dest, or(destpart, srcpart))
}
}
'''
# @dev Adapted from toString(slice) by @arachnid (Nick Johnson <arachnid@notdot.net>)
# This method is licenced under the Apache License.
# Ref: https://github.com/Arachnid/solidity-stringutils/blob/2f6ca9accb48ae14c66f1437ec50ed19a0616f78/strings.sol
#FIXME
def _toString(self, _rawBytes, _stringLength) -> str:
assert False
'''
def _toString(bytes32[4] _rawBytes, uint256 _stringLength) private view returns (string) {
var outputString = new string(_stringLength);
uint256 outputPtr;
uint256 bytesPtr;
assembly {
outputPtr := add(outputString, 32)
bytesPtr := _rawBytes
}
_memcpy(outputPtr, bytesPtr, _stringLength);
return outputString;
'''
# @notice Returns a URI pointing to a metadata package for this token conforming to
# ERC-721 (https://github.com/ethereum/EIPs/issues/721)
# @param _tokenId The ID number of the Kitty whose metadata should be returned.
def tokenMetadata(self, _tokenId: uint256, _preferredTransport: str) -> str:
require(self.erc721Metadata != address(0))
# bytes32[4] memory buffer;
# uint256 count;
buffer, count = self.erc721Metadata.getMetadata(_tokenId, _preferredTransport)
return self._toString(buffer, count)
| 45.180077
| 118
| 0.672914
| 11,277
| 0.956326
| 0
| 0
| 1,227
| 0.104054
| 0
| 0
| 7,699
| 0.6529
|
c6690d881a99354cf92a13a7b705df947e112eb1
| 5,009
|
py
|
Python
|
menu.py
|
kokohi28/stock-prediction
|
82d18cbb6366d522a01252e0cdc6eafa9fffea6d
|
[
"MIT"
] | 11
|
2020-06-15T12:38:57.000Z
|
2021-12-08T13:34:28.000Z
|
menu.py
|
kokohi28/stock-prediction
|
82d18cbb6366d522a01252e0cdc6eafa9fffea6d
|
[
"MIT"
] | null | null | null |
menu.py
|
kokohi28/stock-prediction
|
82d18cbb6366d522a01252e0cdc6eafa9fffea6d
|
[
"MIT"
] | 5
|
2020-12-17T16:58:36.000Z
|
2022-02-08T09:29:28.000Z
|
import os
import const as CONST
from datetime import datetime
# Const
MENU_ROOT = 0
MENU_SPECIFY_DATE = 1
MENU_SPECIFY_PERCENT_TRAINED = 2
currMenu = MENU_ROOT
stockList = ['AAPL', '^DJI', '^HSI', '^GSPC']
def welcomeMessage():
print('##############################################################################')
print('#### ####')
print('#### Stock Prediction using Long short-term memory (LSTM) ####')
print('#### ####')
print('#### BY : - Malik Dwi Yoni Fordana (17051204024) ####')
print('#### - Roy Belmiro Virgiant (17051204016) ####')
print('#### - Koko Himawan Permadi (19051204111) ####')
print('#### ####')
print('##############################################################################')
return
def validateDate(date_text):
try:
datetime.strptime(date_text, '%Y/%m/%d')
return True
except ValueError:
return False
def menuSpecifyPercentTrained():
print('\nEnter trained data percentage (%) :')
print('')
print('Press [B] for Back')
return
def menuSpecifyDate():
print('\nEnter period of stock, start date - end date :')
print('example : 2010/01/05-2015/01/05')
print('')
print('Press [B] for Back')
return
def menuRoot():
print('\nSelect Stock:')
print('1. Apple (AAPL)')
print('2. Dow Jones Industrial Average (^DJI)')
print('3. Hang Seng Index (^HSI)')
print('4. S&P 500 (^GSPC)')
print('')
print('Press [Q] for Exit')
return
def handleInputDate(inputVal):
if inputVal == 'B' or \
inputVal == 'b':
return (-1, [])
else:
dateSplit = inputVal.split('-')
if len(dateSplit) < 2:
print('\nRange INVALID... (Press any key to continue)')
input('')
return (0, [])
else:
if validateDate(dateSplit[0]) == False:
print('\nDate start INVALID... (Press any key to continue)')
input('')
return (0, [])
if validateDate(dateSplit[1]) == False:
print('\nDate end INVALID... (Press any key to continue)')
input('')
return (0, [])
return (1, dateSplit)
def handleInputPercentTrained(inputVal):
if inputVal == 'B' or \
inputVal == 'b':
return -1
else:
if inputVal.isnumeric():
num = int(inputVal)
if num == 0 or \
num > 100:
print('\nPercentage INVALID... (Press any key to continue)')
input('')
return 0
else:
return num
else:
print('\nPercentage INVALID... (Press any key to continue)')
input('')
return 0
def clearScreen():
os.system('cls' if os.name == 'nt' else 'clear')
return
def menuLoop():
loopMenu = True
global currMenu
stock = ''
dateRange = []
percentTrained = 0
while loopMenu:
try:
# Clear screen
clearScreen()
# Display Welcome
welcomeMessage()
# Display Input
inputMsg = ''
if currMenu == MENU_ROOT:
menuRoot()
inputMsg = 'Select : '
elif currMenu == MENU_SPECIFY_DATE:
menuSpecifyDate()
inputMsg = 'Specify : '
elif currMenu == MENU_SPECIFY_PERCENT_TRAINED:
menuSpecifyPercentTrained()
inputMsg = 'Percentage : '
# Get Input
inputVal = input(inputMsg)
# Listen Quit Input
if inputVal == 'Q' or \
inputVal == 'q':
stock = ''
dateRange = []
percentTrained = 0
loopMenu = False
else:
# Root
if currMenu == MENU_ROOT:
if inputVal.isnumeric():
num = int(inputVal)
if num == 0 or \
num > len(stockList):
print('\nSelection INVALID... (Press any key to continue)')
input('')
else:
stock = stockList[num - 1]
currMenu = currMenu + 1
else:
print('\nSelection INVALID... (Press any key to continue)')
input('')
# Date
elif currMenu == MENU_SPECIFY_DATE:
res, dateRange = handleInputDate(inputVal)
if res < 0:
currMenu = currMenu - 1
elif res == 0:
pass
elif res > 0:
currMenu = currMenu + 1
# Percent trained
elif currMenu == MENU_SPECIFY_PERCENT_TRAINED:
percentTrained = handleInputPercentTrained(inputVal)
if percentTrained < 0:
currMenu = currMenu - 1
elif percentTrained == 0:
pass
elif percentTrained > 0:
# EXIT MENU LOOP
loopMenu = False
except KeyboardInterrupt:
stock = ''
dateRange = []
percentTrained = 0
loopMenu = False
return (stock, dateRange, percentTrained)
| 27.075676
| 89
| 0.502296
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,652
| 0.329806
|
c6692746527064fc0f46c5e36e6e97f09870ae4f
| 3,410
|
py
|
Python
|
demo/infinity/triton_client.py
|
dumpmemory/transformer-deploy
|
36993d8dd53c7440e49dce36c332fa4cc08cf9fb
|
[
"Apache-2.0"
] | 698
|
2021-11-22T17:42:40.000Z
|
2022-03-31T11:16:08.000Z
|
demo/infinity/triton_client.py
|
dumpmemory/transformer-deploy
|
36993d8dd53c7440e49dce36c332fa4cc08cf9fb
|
[
"Apache-2.0"
] | 38
|
2021-11-23T13:45:04.000Z
|
2022-03-31T10:36:45.000Z
|
demo/infinity/triton_client.py
|
dumpmemory/transformer-deploy
|
36993d8dd53c7440e49dce36c332fa4cc08cf9fb
|
[
"Apache-2.0"
] | 58
|
2021-11-24T11:46:21.000Z
|
2022-03-29T08:45:16.000Z
|
# Copyright 2022, Lefebvre Dalloz Services
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import numpy as np
import tritonclient.http
from transformer_deploy.benchmarks.utils import print_timings, setup_logging, track_infer_time
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="require inference", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--length", required=True, help="sequence length", choices=(16, 128), type=int)
parser.add_argument("--model", required=True, help="model type", choices=("onnx", "tensorrt"))
args, _ = parser.parse_known_args()
setup_logging()
model_name = f"transformer_{args.model}_inference"
url = "127.0.0.1:8000"
model_version = "1"
batch_size = 1
if args.length == 128:
# from https://venturebeat.com/2021/08/25/how-hugging-face-is-tackling-bias-in-nlp/, text used in the HF demo
text = """Today, Hugging Face has expanded to become a robust NLP startup,
known primarily for making open-source software such as Transformers and Datasets,
used for building NLP systems. “The software Hugging Face develops can be used for
classification, question answering, translation, and many other NLP tasks,” Rush said.
Hugging Face also hosts a range of pretrained NLP models, on GitHub, that practitioners can download
and apply for their problems, Rush added.""" # noqa: W291
else:
text = "This live event is great. I will sign-up for Infinity."
triton_client = tritonclient.http.InferenceServerClient(url=url, verbose=False)
assert triton_client.is_model_ready(
model_name=model_name, model_version=model_version
), f"model {model_name} not yet ready"
model_metadata = triton_client.get_model_metadata(model_name=model_name, model_version=model_version)
model_config = triton_client.get_model_config(model_name=model_name, model_version=model_version)
query = tritonclient.http.InferInput(name="TEXT", shape=(batch_size,), datatype="BYTES")
model_score = tritonclient.http.InferRequestedOutput(name="output", binary_data=False)
time_buffer = list()
for _ in range(10000):
query.set_data_from_numpy(np.asarray([text] * batch_size, dtype=object))
_ = triton_client.infer(
model_name=model_name, model_version=model_version, inputs=[query], outputs=[model_score]
)
for _ in range(1000):
with track_infer_time(time_buffer):
query.set_data_from_numpy(np.asarray([text] * batch_size, dtype=object))
response = triton_client.infer(
model_name=model_name, model_version=model_version, inputs=[query], outputs=[model_score]
)
print_timings(name="triton transformers", timings=time_buffer)
print(response.as_numpy("output"))
| 46.712329
| 117
| 0.72346
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,512
| 0.442882
|
c66969c34948d04bc70f6e069bd8dabc5e27f5b6
| 2,361
|
py
|
Python
|
mf/knnbased.py
|
waashk/extended-pipeline
|
1f8cdfcd1530a9dd502ea0d76d89b5010d19daf7
|
[
"MIT"
] | null | null | null |
mf/knnbased.py
|
waashk/extended-pipeline
|
1f8cdfcd1530a9dd502ea0d76d89b5010d19daf7
|
[
"MIT"
] | null | null | null |
mf/knnbased.py
|
waashk/extended-pipeline
|
1f8cdfcd1530a9dd502ea0d76d89b5010d19daf7
|
[
"MIT"
] | null | null | null |
import numpy as np
from tqdm import tqdm
from scipy.sparse import csr_matrix, hstack, vstack
from sklearn.neighbors import NearestNeighbors
class MFKnn(object):
"""
Implementation of
"""
def __init__(self, metric, k):
self.k = k
self.metric = metric
def fit(self, X, y):
#
self.X_train = X
self.y_train = y
#
self.classes = sorted(map(int, list(set(self.y_train))))
self.n_classes = len(self.classes)
#
self.docs_by_class = [len(np.where(self.y_train == i)[0]) for i in self.classes]
#
self.X_by_class = []
self.knn_by_class = []
#self.scores = {}
#
njobs=-1
if self.metric == 'l1':
njobs=1
for i in self.classes:
X_tmp = self.X_train[np.where(self.y_train == i)]
#print ("xtmp"+str(X_tmp.shape[0])+" class: "+str(i))
data=[]
data.append(0)
ind=[]
ind.append(0)
auxf=csr_matrix((data, (ind,ind)), shape=(1,self.X_train.shape[1]),dtype=np.float64) #zero a linha
if X_tmp.shape[0]<self.k+1:
newxtmp=[]
for iww in list(range(X_tmp.shape[0])):
newxtmp.append(X_tmp[iww])
for iww in list(range(self.k+1-X_tmp.shape[0])):
newxtmp.append(auxf)
X_tmp=vstack(newxtmp)
knn = NearestNeighbors(n_neighbors=self.k+1, algorithm="brute", metric=self.metric, n_jobs=njobs)
knn.fit(X_tmp)
self.knn_by_class.append(knn)
return self
def csr_matrix_equal2(self, a1, a2):
return all((np.array_equal(a1.indptr, a2.indptr),
np.array_equal(a1.indices, a2.indices),
np.array_equal(a1.data, a2.data)))
def transform(self, X):
#
istrain = True if self.csr_matrix_equal2(self.X_train, X) else False
#print(istrain)
n_neighbors = self.k+1 if istrain else self.k
metafeatures = []
scores = {}
for j in self.classes:
if self.metric == "l1" or self.metric == "l2":
scores[j] = 0.0 + self.knn_by_class[j].kneighbors(X, n_neighbors, return_distance=True)[0]
if self.metric == "cosine":
scores[j] = 1.0 - self.knn_by_class[j].kneighbors(X, n_neighbors, return_distance=True)[0]
#
for i, doc in enumerate(X):
for j in self.classes:
if istrain:
if self.y_train[i] == j:
metafeatures += list(scores[j][i][1:])
else:
metafeatures += list(scores[j][i][:-1])
else:
metafeatures += list(scores[j][i])
return np.array(metafeatures).reshape((X.shape[0],self.k*self.n_classes))
| 23.147059
| 105
| 0.647183
| 2,216
| 0.938585
| 0
| 0
| 0
| 0
| 0
| 0
| 160
| 0.067768
|
c66bd961fbf8bcb3556ef3c4fc46854f04ab9b95
| 581
|
py
|
Python
|
general-practice/Exercises solved/codingbat/Warmup2/string_match.py
|
lugabrielbueno/Projeto
|
f012c5bb9ce6f6d7c9e8196cc7986127dba3eba0
|
[
"MIT"
] | null | null | null |
general-practice/Exercises solved/codingbat/Warmup2/string_match.py
|
lugabrielbueno/Projeto
|
f012c5bb9ce6f6d7c9e8196cc7986127dba3eba0
|
[
"MIT"
] | null | null | null |
general-practice/Exercises solved/codingbat/Warmup2/string_match.py
|
lugabrielbueno/Projeto
|
f012c5bb9ce6f6d7c9e8196cc7986127dba3eba0
|
[
"MIT"
] | null | null | null |
#Given 2 strings, a and b, return the number of the positions where they contain the same length 2 substring. So "xxcaazz" and "xxbaaz" yields 3, since the "xx", "aa", and "az" substrings appear in the same place in both strings.
#string_match('xxcaazz', 'xxbaaz') → 3
#string_match('abc', 'abc') → 2
#string_match('abc', 'axc') → 0
def string_match(a,b):
if a > b :
higher = a
else:
higher = b
count = 0
for i in range(len(higher)):
if a[i:i+2] == b[i:i+2] and len(a[i:i+2]) == len(b[i:i+2]) == 2:
count +=1
return count
| 34.176471
| 229
| 0.593804
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 335
| 0.570698
|
c66c55f72d0c193656a8d7f34187cd20b4099a6f
| 2,458
|
py
|
Python
|
pycondor/tools.py
|
kant/pycondor
|
ee87854504e8f4023feda860d8a9ddbecc7a70da
|
[
"BSD-3-Clause"
] | 6
|
2015-04-17T08:25:30.000Z
|
2020-04-11T23:58:16.000Z
|
pycondor/tools.py
|
kant/pycondor
|
ee87854504e8f4023feda860d8a9ddbecc7a70da
|
[
"BSD-3-Clause"
] | 3
|
2015-12-22T07:40:02.000Z
|
2019-01-21T15:07:00.000Z
|
pycondor/tools.py
|
kant/pycondor
|
ee87854504e8f4023feda860d8a9ddbecc7a70da
|
[
"BSD-3-Clause"
] | 6
|
2015-11-13T18:55:22.000Z
|
2020-03-12T19:32:56.000Z
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
"""
Tools
http://en.wikipedia.org/wiki/Haversine_formula
ToDo: ToFix / ToTest
"""
import math
def waypoint_bearing(lat1, lon1, lat2, lon2):
"""
Calculates the bearing between 2 locations.
Method calculates the bearing between 2 locations.
@param lon1 First point longitude.
@param lat1 First point latitude.
@param lon2 Second point longitude.
@param lat2 Second point latitude.
@return The bearing between 2 locations.
"""
longitude1 = math.radians(lon1)
latitude1 = math.radians(lat1)
longitude2 = math.radians(lon2)
latitude2 = math.radians(lat2)
clat1 = math.cos(latitude1)
clat2 = math.cos(latitude2)
dlon = longitude2 - longitude1
y = math.sin(dlon) * clat2
x = clat1 * math.sin(latitude2) - math.sin(latitude1) * clat2 * math.cos(dlon)
if x==0 and y==0:
return(0.0)
else:
return((360 + math.degrees(math.atan2(y, x)) + 0.5) % 360.0)
def haversine_bearing(lat1, lon1, lat2, lon2):
"""
Calculate the bearing from 1 point to 1 other
on the earth (specified in decimal degrees)
"""
# convert decimal degrees to radians
lat1, lon1, lat2, lon2 = map(math.radians, [lat1, lon1, lat2, lon2])
dlon = lon2 - lon1
b = math.atan2(math.sin(dlon) * math.cos(lat2),
math.cos(lat1) * math.sin(lat2)
- math.sin(lat1) * math.cos(lat2) * math.cos(dlon)) # bearing calc
bd = math.degrees(b)
br, bn = divmod(bd + 360, 360) # the bearing remainder and final bearing
return bn
def haversine_distance(lat1, lon1, lat2, lon2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
# convert decimal degrees to radians
lat1, lon1, lat2, lon2 = map(math.radians, [lat1, lon1, lat2, lon2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = math.sin(dlat/2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon/2)**2
c = 2 * math.asin(math.sqrt(a))
r = 6371.0 # Radius of earth in kilometers. Use 3956 for miles
return(c * r)
def main():
# Just some tests (should be removed)
(lon1, lat1, lon2, lat2) = (45.0, 1.0, 45.5, 2.0)
bearing = waypoint_bearing(lon1, lat1, lon2, lat2)
print(bearing)
bearing = haversine_bearing(lon1, lat1, lon2, lat2)
print(bearing)
if __name__ == '__main__':
main()
| 25.340206
| 83
| 0.633849
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 920
| 0.374288
|
c66f914aa66ae752fa396361357e16cd39293db5
| 10,951
|
py
|
Python
|
courses/views.py
|
mdavoodi/konkourse-python
|
50f2904e7bbb31f00c4dd66fb55cd644ea3c4eee
|
[
"MIT"
] | 4
|
2015-06-23T22:17:50.000Z
|
2019-01-17T21:32:02.000Z
|
courses/views.py
|
mdavoodi/konkourse-python
|
50f2904e7bbb31f00c4dd66fb55cd644ea3c4eee
|
[
"MIT"
] | null | null | null |
courses/views.py
|
mdavoodi/konkourse-python
|
50f2904e7bbb31f00c4dd66fb55cd644ea3c4eee
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render, redirect, render_to_response
from django.template.context import RequestContext
from account.views import login
from models import Course
from website.views import index
from forms import CourseForm, CourseInitialForm
from account.util import createImage
from django.core.context_processors import csrf
from events.forms import EventForm
from events.models import Event
import datetime
from documents.forms import DocumentForm
from datetime import timedelta
from conversation.models import ConvoWall, ConversationPost
from documents.views import __upload_core
from documents.models import Document
from endless_pagination.decorators import page_template
from notification.views import notifyCreateEvent, notifyDocShareCourse
from page.models import Page
def _course_context(request, course_id):
course = Course.objects.get(id=course_id)
institution = Page.objects.get(title=course.institution)
variables_for_template = {
'name': request.user.first_name + ' ' + request.user.last_name,
'image': course.image,
'course_name': course.name,
'course_number': course.number,
'institution': institution,
'description': course.about,
'professor': course.professor,
'course_id': course_id,
'course_form': CourseInitialForm(),
'courses': Course.objects.filter(course_users__username=request.user.username),
'current_course': course,
'section_number': course.course_id,
'is_logged_in': True,
}
return variables_for_template
def _course_exists(request):
form = CourseInitialForm(request.POST, request.FILES)
if form.is_valid():
section_number = ""
if request.user.get_profile().school.title == "James Madison University":
section_number = request.POST['course_id'].zfill(4)
else:
section_number = request.POST['course_id']
try:
course = Course.objects.get(
number__iexact=request.POST['course_number'],
course_id__iexact=section_number,
institution__iexact=request.user.get_profile().school.title)
return (True, course)
except Course.DoesNotExist:
wall = ConvoWall(wall_type=3)
wall.save()
course = Course(
wall=wall,
number=request.POST['course_number'],
course_id=section_number,
institution=request.user.get_profile().school.title)
course.save()
return (False, course)
return (False, None)
@page_template('website/course/course_page.html')
def course(request, course_id,
error_message='', template='website/course/course.html', extra_context=None):
if not request.user.is_authenticated() or not request.user.is_active:
return redirect(index)
else:
variables = _course_context(request, course_id)
variables['event_form'] = EventForm()
variables['doc_form'] = DocumentForm()
c = Course.objects.get(id=course_id)
posts = ConversationPost.objects.filter(wall=c.wall, deleted=False).order_by('created')
variables['wall'] = c.wall
variables['posts'] = posts.reverse()
if error_message != '':
variables['error_message'] = error_message
variables.update(csrf(request))
if extra_context is not None:
variables.update(extra_context)
return render_to_response(
template, variables, context_instance=RequestContext(request))
def create(request):
if not request.user.is_authenticated():
return login(request)
if request.method == 'POST':
exists, c = get_or_add_course(request)
if(c is None):
return index(request)
if(not exists):
return course_info_edit(request, c.id)
else:
return course(request, c.id)
return redirect(index)
def get_or_add_course(request):
exists, c = _course_exists(request=request)
if c is None:
return (exists, c)
c = Course.objects.get(id=c.id)
c.add_student(request.user)
return (exists, c)
def course_leave(request, course_id):
c = Course.objects.get(id=course_id)
if(c.in_course(request.user)):
c.remove_student(request.user)
return redirect(index)
return redirect(404)
def course_info(request, course_id):
if not request.user.is_authenticated():
return login(request)
course = Course.objects.get(id=course_id)
variables = _course_context(request, course_id)
variables['timeValue'] = course.time
variables['semester'] = course.get_semester
variables['credits'] = course.credits
return render(request, 'website/course/course_info.html',
variables,
context_instance=RequestContext(request),
)
def course_info_edit(request, course_id):
if not request.user.is_authenticated():
return login(request)
variables = _course_context(request, course_id)
variables['form'] = CourseForm(instance=Course.objects.get(id=course_id))
variables.update(csrf(request))
return render(request, 'website/course/course_info_edit.html',
variables,
context_instance=RequestContext(request),
)
def course_update(request, course_id):
if request.method == 'POST':
c = Course.objects.get(id=course_id)
form = CourseForm(request.POST, request.FILES, instance=c)
if form.is_valid():
u = request.user
c = form.save()
dimentions = (150, 150)
if len(request.FILES) == 1:
image = request.FILES['image']
c.image.save(image.name, createImage(c.image, dimentions))
name = u.first_name + ' ' + u.last_name
username = request.user.username
variables_for_template = {
'name': name,
'username': username,
}
return redirect('/course/' + str(course_id) + '/', variables_for_template)
else:
variables = _course_context(request, course_id)
variables['form'] = form
variables.update(csrf(request))
return render(request, 'website/course/course_info_edit.html',
variables,
context_instance=RequestContext(request))
def create_event(request, course_id):
if request.method == 'POST':
variables = _course_context(request, course_id)
c = Course.objects.get(id=course_id)
e = Event()
e.creator = request.user
form = EventForm(request.POST, request.FILES, instance=e)
if form.is_valid():
e = form.save()
c.add_event(e)
e.join_event(request.user.id)
c.save()
wallPost = ConversationPost(creator=request.user, wall=c.wall, message="", post_type='E', event=e)
wallPost.save()
notifyCreateEvent(course=c, event=e)
return redirect(course_events, course_id)
else:
variables['form'] = form
variables.update(csrf(request))
return course(request, course_id, "Invalid event creation fields!!")
return redirect(index)
def course_documents(request, course_id):
if not request.user.is_authenticated():
return login(request)
variables = _course_context(request, course_id)
variables['documents'] = Document.objects.filter(course__id=course_id).order_by('modified')
return render(request, 'website/course/course_documents.html',
variables,
context_instance=RequestContext(request),
)
def course_upload(request, course_id):
doc = __upload_core(request)
if isinstance(doc, Exception):
return course(request, course_id, "Invalid document!")
else:
c = Course.objects.get(id=course_id)
message = request.POST['message_post']
wallPost = ConversationPost(creator=request.user, wall=c.wall, message=message, post_type='D', document=doc)
wallPost.save()
doc.course.add(c)
doc.save()
notifyDocShareCourse(document=doc, course=c)
return redirect(course_documents, course_id)
def course_events(request, course_id):
if not request.user.is_authenticated():
return login(request)
variables = _course_context(request, course_id)
today = datetime.date.today()
week1_end = today + timedelta(days=6 - today.weekday())
week2_end = week1_end + timedelta(days=7)
c = Course.objects.get(id=course_id)
variables['thisWeek'] = c.events.filter(start_date__range=[today, week1_end], deleted=False)
variables['nextWeek'] = c.events.filter(start_date__gt=week1_end, start_date__lte=week2_end, deleted=False)
variables['future'] = c.events.filter(start_date__gt=week2_end, deleted=False)
return render(request, 'website/course/course_events.html',
variables,
context_instance=RequestContext(request),
)
def course_members(request, course_id):
if not request.user.is_authenticated():
return login(request)
template_variables = _course_context(request, course_id)
c = Course.objects.get(id=course_id)
_list = c.course_users.filter(courseuser__role='S')
new_list = (chunks(_list, 3))
template_variables['course_members'] = new_list
template_variables['user'] = request.user
return render(request, 'website/course/course_members.html',
template_variables,
context_instance=RequestContext(request),
)
def course_gradebook(request, course_id):
if not request.user.is_authenticated():
return login(request)
return render(request, 'website/course/course_gradebook.html',
_course_context(request, course_id),
context_instance=RequestContext(request),
)
def chunks(l, n):
for i in xrange(0, len(l), n):
yield l[i:i + n]
from django.http import HttpResponse
from django.utils import simplejson
def add_course(request):
results = {'success': False}
if request.user.is_authenticated() and request.user.is_active:
if request.method == 'POST':
courses = Course.objects.filter(course_users__username=request.user.username)
if courses.count() >= 10:
json = simplejson.dumps(results)
return HttpResponse(json, mimetype='application/json')
exists, c = get_or_add_course(request)
if c is not None:
results = {'success': True}
else:
results = {'success': False}
json = simplejson.dumps(results)
return HttpResponse(json, mimetype='application/json')
| 37.892734
| 116
| 0.649621
| 0
| 0
| 77
| 0.007031
| 973
| 0.08885
| 0
| 0
| 942
| 0.08602
|
c67157381752f709d6b39cd4632427d8936411ad
| 2,701
|
py
|
Python
|
rx/operators/observable/delaywithselector.py
|
yutiansut/RxPY
|
c3bbba77f9ebd7706c949141725e220096deabd4
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2018-11-16T09:07:13.000Z
|
2018-11-16T09:07:13.000Z
|
rx/operators/observable/delaywithselector.py
|
yutiansut/RxPY
|
c3bbba77f9ebd7706c949141725e220096deabd4
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
rx/operators/observable/delaywithselector.py
|
yutiansut/RxPY
|
c3bbba77f9ebd7706c949141725e220096deabd4
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2020-05-08T08:23:08.000Z
|
2020-05-08T08:23:08.000Z
|
from rx.core import ObservableBase, AnonymousObservable, typing
from rx.disposables import CompositeDisposable, \
SingleAssignmentDisposable, SerialDisposable
def delay_with_selector(self, subscription_delay=None,
delay_duration_mapper=None) -> ObservableBase:
"""Time shifts the observable sequence based on a subscription delay
and a delay mapper function for each element.
# with mapper only
1 - res = source.delay_with_selector(lambda x: Scheduler.timer(5000))
# with delay and mapper
2 - res = source.delay_with_selector(Observable.timer(2000),
lambda x: Observable.timer(x))
subscription_delay -- [Optional] Sequence indicating the delay for the
subscription to the source.
delay_duration_mapper [Optional] Selector function to retrieve a
sequence indicating the delay for each given element.
Returns time-shifted sequence.
"""
source = self
sub_delay, mapper = None, None
if isinstance(subscription_delay, typing.Observable):
mapper = delay_duration_mapper
sub_delay = subscription_delay
else:
mapper = subscription_delay
def subscribe(observer, scheduler=None):
delays = CompositeDisposable()
at_end = [False]
def done():
if (at_end[0] and delays.length == 0):
observer.on_completed()
subscription = SerialDisposable()
def start():
def on_next(x):
try:
delay = mapper(x)
except Exception as error:
observer.on_error(error)
return
d = SingleAssignmentDisposable()
delays.add(d)
def on_next(_):
observer.on_next(x)
delays.remove(d)
done()
def on_completed():
observer.on_next(x)
delays.remove(d)
done()
d.disposable = delay.subscribe_(on_next, observer.on_error, on_completed, scheduler)
def on_completed():
at_end[0] = True
subscription.dispose()
done()
subscription.disposable = source.subscribe_(on_next, observer.on_error, on_completed, scheduler)
if not sub_delay:
start()
else:
subscription.disposable(sub_delay.subscribe_(
lambda _: start(),
observer.on_error,
start))
return CompositeDisposable(subscription, delays)
return AnonymousObservable(subscribe)
| 32.154762
| 108
| 0.585339
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 668
| 0.247316
|
c6715e41c59947802aabe44b258270730dfcbb52
| 719
|
py
|
Python
|
w2/palindrome.py
|
connorw72/connorapcsptri3
|
2e885644ed2a8d478e5ce193f94b02ad03c6e6b3
|
[
"MIT"
] | null | null | null |
w2/palindrome.py
|
connorw72/connorapcsptri3
|
2e885644ed2a8d478e5ce193f94b02ad03c6e6b3
|
[
"MIT"
] | 3
|
2022-03-14T21:10:05.000Z
|
2022-03-28T21:11:17.000Z
|
w2/palindrome.py
|
connorw72/connorapcsptri3
|
2e885644ed2a8d478e5ce193f94b02ad03c6e6b3
|
[
"MIT"
] | 2
|
2022-03-10T06:11:11.000Z
|
2022-03-11T06:11:11.000Z
|
class Palindrome:
def __init__(self, test):
self.test = test
def __call__(self):
test_strip = list([n for n in self.test if n.isalpha() or n.isnumeric()])
self.test = "".join(test_strip)
self.test = self.test.lower()
#Test to see if the phrase/word is a palindrome
if self.test == self.test[::-1]:
return "is a palindrome"
else:
return "is not a palindrome"
# Testing these to see if they are palindromes
test_cases = ["A man, a plan, a canal -- Panama", "racecar", "broncos"]
def main():
try:
for v in test_cases:
palindrome = Palindrome(test=v)
print(v, palindrome())
except:
print("ERROR!")
| 31.26087
| 81
| 0.585535
| 445
| 0.618915
| 0
| 0
| 0
| 0
| 0
| 0
| 193
| 0.268428
|
c672a5daf5acf1852874d76a788a6d4edc536ca3
| 3,890
|
py
|
Python
|
sat-competition-2018/xof-state/sha3-xof.py
|
cipherboy/sat
|
65cbcebf03ffdfd64d49359ebb1d654c73e2c720
|
[
"MIT"
] | 1
|
2019-01-19T23:04:50.000Z
|
2019-01-19T23:04:50.000Z
|
sat-competition-2018/xof-state/sha3-xof.py
|
cipherboy/sat
|
65cbcebf03ffdfd64d49359ebb1d654c73e2c720
|
[
"MIT"
] | null | null | null |
sat-competition-2018/xof-state/sha3-xof.py
|
cipherboy/sat
|
65cbcebf03ffdfd64d49359ebb1d654c73e2c720
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import hash_framework as hf
hf.config.model_dir = "/home/cipherboy/GitHub/sat/sat-competition-2018/models"
import time, sys, os, random
run = False
release = False
if '--run' in sys.argv:
run = True
if '--release' in sys.argv:
release = True
if '-h' in sys.argv or '--help' in sys.argv:
print(sys.argv[0] + " [--run] [--release] [--args file] [w r e s]")
print('---')
print("Generates models for benchmarking. Runs if specified, otherwise only creates models.")
print("--run - runs the resulting CNF file")
print("--release - deletes the intermediate stages after creation")
print("--args file - specify a file to load arguments from")
print("w - sha3 w")
print("r - sha3 rounds format (str only)")
print("e - effective margin (128/256/.../512: as if w=1600)")
print("s - steps to apply to base state (extract s*e*w/64 bits from the XOF)")
sys.exit(0)
def sha3_xof_recreate_args():
r_args = sys.argv[1:]
if run:
r_args = r_args[1:]
if release:
r_args = r_args[1:]
w = int(r_args[0])
r = int(r_args[1])
e = int(r_args[2])
s = int(r_args[3])
sha3_xof_recreate(w, r, e, s)
def sha3_xof_recreate_file():
fname = sys.argv[-1]
args = open(fname, 'r').read().split('\n')
for s_arg in args:
if len(s_arg) == 0:
continue
arg = s_arg.split(" ")
w = int(arg[0])
r = int(arg[1])
e = int(arg[2])
s = int(arg[3])
sha3_xof_recreate(w, r, e, s)
def sha3_perform(w, r, in_state):
eval_table = hf.algorithms._sha3.perform_sha3({}, in_state, None, rounds=r, w=w)
out_state = []
for j in range(0, 25*w):
out_state.append(eval_table['out' + str(j)])
return ''.join(out_state)
def sha3_xof_recreate(w, r, e, s):
margin = e*w//64
algo = hf.algorithms.sha3(w=w, rounds=r)
base_seed = []
for j in range(0, 25*w):
if random.randint(0, 1) == 0:
base_seed.append('F')
else:
base_seed.append('T')
base_seed = ''.join(base_seed)
states = []
cstate = sha3_perform(w, r, base_seed)
for i in range(0, s):
states.append(cstate)
cstate = sha3_perform(w, r, cstate)
tag = "sha3-xof_recreate-w" + str(w) + "-r" + str(r) + '-e' + str(e) + "-s" + str(s)
prefixes = []
for i in range(0, s):
prefixes.append('h' + str(i))
m = hf.models()
m.start(tag, recreate=True)
print(w, r, e, s, margin)
print("base_seed: " + base_seed)
for i in range(0, s):
print("state " + str(i) + ": " + states[i])
hf.models.vars.write_header()
hf.models.generate(algo, prefixes, rounds=r, bypass=True)
hf.models.vars.write_assign(['cchain', 'cknown'])
if s > 1:
cchain = ['and']
for i in range(0, s-1):
for j in range(0, 25*w):
cchain.append(('equal', 'h' + str(i) + 'out' + str(j), 'h' + str(i+1) + 'in' + str(j)))
cchain = tuple(cchain)
hf.models.vars.write_clause('cchain', cchain, '10-chain.txt')
cknown = ['and']
for i in range(0, s):
for j in range(0, margin):
cknown.append(('equal', 'h' + str(i) + 'out' + str(j), states[i][j]))
cknown = tuple(cknown)
hf.models.vars.write_clause('cknown', cknown, '20-known.txt')
m.collapse()
m.build()
if run:
t1 = time.time()
res = m.run(count=1)
t2 = (time.time() - t1)
print("Run time: " + str(t2))
for result in m.load_results():
o_s = ""
for j in range(0, 25*w):
o_s += result['h0in' + str(j)]
print("predicted_seed: " + str(o_s))
if release:
os.system("rm -rf *.txt *.bc *.concat *.out")
print("")
if '--args' in sys.argv:
sha3_xof_recreate_file()
else:
sha3_xof_recreate_args()
| 28.814815
| 103
| 0.554756
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 842
| 0.216452
|
c6739210f1e8d51ce9d34502997456a48bfc0ddd
| 3,357
|
py
|
Python
|
methinks/db.py
|
andreasgrv/methinks
|
5c65fdb84e35b8082ee35963431a352e06f4af44
|
[
"BSD-3-Clause"
] | null | null | null |
methinks/db.py
|
andreasgrv/methinks
|
5c65fdb84e35b8082ee35963431a352e06f4af44
|
[
"BSD-3-Clause"
] | null | null | null |
methinks/db.py
|
andreasgrv/methinks
|
5c65fdb84e35b8082ee35963431a352e06f4af44
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import datetime
import xxhash
import json
from flask_sqlalchemy import SQLAlchemy
from methinks.utils import str_to_date
from methinks.config import get_default_conf
db = SQLAlchemy()
class Entry(db.Model):
__tablename__ = 'entry'
id = db.Column(db.Integer, primary_key=True)
hexid = db.Column(db.String(16), unique=True, nullable=False, index=True)
text = db.Column(db.Text(), nullable=False)
date = db.Column(db.Date(), index=True, nullable=False)
last_edited = db.Column(db.DateTime(timezone=True), default=datetime.datetime.utcnow)
misc = db.Column(db.JSON, nullable=True)
def __init__(self, **data):
if 'id' in data:
raise AttributeError('id cannot be set')
if 'hexid' in data:
raise AttributeError('hexid cannot be set')
self.text = data.pop('text')
self.date = data.pop('date')
if 'last_edited' in data:
self.last_edited = data.pop('last_edited')
assert type(self.date) is datetime.date
self.misc = data
self.hexid = self.hash
def __repr__(self):
return 'Entry: %r:\n%s' % (self.date, self.text)
@property
def hash(self):
content = '%s%s%s' % (self.text, self.date, json.dumps(self.misc))
hs = xxhash.xxh64(content).hexdigest()
return hs
@classmethod
def string_to_date(cl, text):
return datetime.datetime.strptime(text,
get_default_conf()['dateformat']).date()
@classmethod
def date_to_string(cl, date):
return date.strftime(get_default_conf()['dateformat'])
@property
def filename(self):
return '%s.md' % Entry.date_to_string(self.date)
def as_dict(self):
d = dict(id=self.id,
hexid=self.hexid,
text=self.text,
date=self.date,
last_edited=self.last_edited,
**self.misc)
return d
@classmethod
def from_dict(cl, data):
return Entry(text=data['text'],
date=str_to_date(data['date']).date(),
last_edited=str_to_date(data['last_edited']),
**data.get('misc', {}))
def to_file(self, folderpath):
path = os.path.join(folderpath, self.filename)
with open(path, 'w') as f:
f.write(self.text)
@classmethod
def from_file(cl, filepath):
with open(filepath, 'r') as f:
contents = f.read()
filename = os.path.basename(filepath).replace('.md', '')
if filename == 'template':
date = datetime.date.today()
last_edited = datetime.datetime.min
else:
date = cl.string_to_date(filename)
mtime = os.path.getmtime(filepath)
last_edited = datetime.datetime.fromtimestamp(mtime)
return Entry(text=contents, date=date, last_edited=last_edited)
@classmethod
def from_config(cl, config):
sections = []
for title, cl in config.triggers.items():
line = cl.default_text(title)
sections.append(line)
contents = '%s\n' % '\n'.join(sections)
date = datetime.date.today()
last_edited = datetime.datetime.min
return Entry(text=contents, date=date, last_edited=last_edited)
| 31.373832
| 89
| 0.593983
| 3,157
| 0.940423
| 0
| 0
| 1,744
| 0.519511
| 0
| 0
| 214
| 0.063747
|
c673cb49bd602adacaeaaa1c827fbb7abab3bbf6
| 580
|
py
|
Python
|
functional/decorator.py
|
LaurenceYang/learn-python
|
819994039abd3af298f73b1a73976eaa95071096
|
[
"Apache-2.0"
] | 2
|
2018-01-20T03:38:58.000Z
|
2019-07-21T11:33:24.000Z
|
functional/decorator.py
|
LaurenceYang/learn-python
|
819994039abd3af298f73b1a73976eaa95071096
|
[
"Apache-2.0"
] | null | null | null |
functional/decorator.py
|
LaurenceYang/learn-python
|
819994039abd3af298f73b1a73976eaa95071096
|
[
"Apache-2.0"
] | null | null | null |
import functools
def log(func):
@functools.wraps(func)
def wrapper(*args, **kw):
print('call %s():' % func.__name__)
return func(*args, **kw)
return wrapper
@log
def now():
print('2018-01-29')
now()
def logger(text):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
print('call %s %s():' % (text, func.__name__))
return func(*args, **kw)
return wrapper
return decorator
@logger('DEBUG')
def today():
print('2018-01-29 12:00:00')
today()
print(today.__name__)
| 17.575758
| 58
| 0.568966
| 0
| 0
| 0
| 0
| 382
| 0.658621
| 0
| 0
| 67
| 0.115517
|
c6742b09c8b11bbe5babccf11451efdfb75310ee
| 2,797
|
py
|
Python
|
dense_estimation/points_estimation.py
|
zouzhenhong98/kitti-tools
|
30b7d5c799ca2a44fe88522f6d46ad2a53c61d53
|
[
"MIT"
] | 7
|
2020-01-03T13:05:36.000Z
|
2021-08-03T07:51:43.000Z
|
dense_estimation/points_estimation.py
|
zouzhenhong98/kitti-tools
|
30b7d5c799ca2a44fe88522f6d46ad2a53c61d53
|
[
"MIT"
] | null | null | null |
dense_estimation/points_estimation.py
|
zouzhenhong98/kitti-tools
|
30b7d5c799ca2a44fe88522f6d46ad2a53c61d53
|
[
"MIT"
] | 3
|
2020-07-07T03:35:06.000Z
|
2021-07-21T11:40:38.000Z
|
'''
point clouds estimation: transfer sparse map to dense map,
work for both depth and reflectance.
'''
import sys
sys.path.append("..")
from utils import data_provider
from utils import velo_2_cam
import numpy as np
# fetch image and point clouds: coordinates and reflectance
def rawData(pc_path_, img_path_):
# loar filtered pointcloud
lidar = data_provider.read_pc2array(pc_path_, height=None, font=True)
lidar = np.array(lidar)
print('\nfiltered pointcloud size: ', (np.size(lidar,1), np.size(lidar,0)))
# load image
img = data_provider.read_img(img_path_)
return img, lidar
# project points on the image plane
def lidarPreprocess(point_cloud_, calib_path_, type_):
# type_: r:reflectance, 2d:2d depth, 3d:3d_depth
assert type_ in {"r", "2d", "3d"}, \
"type_ should be r:reflectance or 2d:2d_depth or 3d:3d_depth"
param = data_provider.read_calib(calib_path_, [2,4,5])
# projection: pixels = cam2img * cam2cam * vel2cam * pointcloud
# matrix type: np.array
cam2img = param[0].reshape([3,4]) # from camera-view to pixels
cam2cam = param[1].reshape([3,3]) # rectify camera-view
vel2cam = param[2].reshape([3,4]) # from lidar-view to camera-view
# get camera-view coordinates & pixel coordinates(after cam2img)
__, pixel = velo_2_cam.lidar_to_camera_project(trans_mat=vel2cam,
rec_mat=cam2cam,
cam_mat=cam2img,
data=point_cloud_,
pixel_range=(1242,375)
)
if type_=="r":
pixel = np.row_stack((pixel[:2,:],pixel[3,:]))
print("return 2d coodinates with reflectance")
elif type_=="2d":
pixel = np.row_stack((pixel[:2,:],pixel[4,:]))
print("return 2d coodinates with 2d depth")
elif type_=="3d":
pixel = np.row_stack((pixel[:2,:],pixel[5,:]))
print("return 2d coodinates with 3d depth")
return pixel
def completion(point_cloud_):
"""codes wait for completion"""
pass
if __name__ == "__main__":
filename = "um_000000"
pc_path = "../data/bin/"+filename+".bin"
calib_path = "../data/calib/"+filename+".txt"
image_path = "../data/img/"+filename+".png"
print('using data ',filename,' for test')
img, lidar = rawData(pc_path_=pc_path, img_path_=image_path)
pixel = lidarPreprocess(point_cloud_=lidar,
calib_path_=calib_path, type_="r")
# add pixels to image
# velo_2_cam.add_pc_to_img(img_path=image_path, coor=pixel, saveto='./result/'+filename+'_composition.png')
| 35.405063
| 111
| 0.598498
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,011
| 0.361459
|
c6770cd7813960cae894c7947e2f76b45e5169f4
| 1,014
|
py
|
Python
|
tests/run_compiler.py
|
yshrdbrn/ogle
|
529337203b1bd3ec66c08f4ed153dba5fc8349a1
|
[
"MIT"
] | null | null | null |
tests/run_compiler.py
|
yshrdbrn/ogle
|
529337203b1bd3ec66c08f4ed153dba5fc8349a1
|
[
"MIT"
] | null | null | null |
tests/run_compiler.py
|
yshrdbrn/ogle
|
529337203b1bd3ec66c08f4ed153dba5fc8349a1
|
[
"MIT"
] | null | null | null |
from ogle.code_generator.code_generator import CodeGenerator
from ogle.lexer.lexer import Lexer
from ogle.parser.parser import Parser
from ogle.semantic_analyzer.semantic_analyzer import SemanticAnalyzer
def _get_errors_warnings(all_errors):
errors = [e for e in all_errors if 'Error' in e[1]]
warnings = [e for e in all_errors if 'Warning' in e[1]]
return errors, warnings
def get_semantic_errors(input_file):
lexer = Lexer(input_file)
parser = Parser(lexer)
parser.parse()
semantic_analyzer = SemanticAnalyzer(parser.ast)
semantic_analyzer.analyze()
return _get_errors_warnings(semantic_analyzer.errors)
def run(input_file, output_filename):
lexer = Lexer(input_file)
parser = Parser(lexer)
parser.parse()
semantic_analyzer = SemanticAnalyzer(parser.ast)
semantic_analyzer.analyze()
with open(output_filename, 'w') as output:
code_generator = CodeGenerator(parser.ast, semantic_analyzer.symbol_table)
code_generator.generate(output)
| 36.214286
| 82
| 0.759369
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 19
| 0.018738
|
c678c38909ca5f9f3348fe7d0e9471e1720d3bee
| 817
|
py
|
Python
|
graph/dfs_dict_attempt2.py
|
automoto/python-code-golf
|
1a4e0b5984e64620637de9d80e82c6e89997f4af
|
[
"MIT"
] | null | null | null |
graph/dfs_dict_attempt2.py
|
automoto/python-code-golf
|
1a4e0b5984e64620637de9d80e82c6e89997f4af
|
[
"MIT"
] | null | null | null |
graph/dfs_dict_attempt2.py
|
automoto/python-code-golf
|
1a4e0b5984e64620637de9d80e82c6e89997f4af
|
[
"MIT"
] | null | null | null |
# !depth first search !dfs !graph
# dict of nodes as the key and sets for the edges(children)
graph = {'A': set(['B', 'C', 'D']),
'B': set(['E', 'F']),
'C': set([]),
'D': set(['G', 'H']),
'E': set([]),
'F': set(['I', 'J']),
'G': set(['K']),
'H': set([]),
'I': set([]),
'J': set([]),
'K': set([])}
def dfs(graph, start):
visited = set()
stack = [start]
while stack:
current_node = stack.pop()
print('visiting node ', current_node)
visited.add(current_node)
stack.extend(graph[current_node] - visited)
dfs(graph, 'A')
# PESUDOCODE
# create set of visited nodes
# create a searching stack with the starting node
# while the stack has nodes
# pop the current_node off of the stack
# add current node to visited
# add the connected nodes minus visitsed to the stack to search
| 24.029412
| 63
| 0.597307
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 423
| 0.517748
|