hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
18e8661bfba7a01963831fc9dac3f2b59f8ea633 | 2,074 | py | Python | examples/set_holidaydates.py | ultratolido/ekmmetters | e15325023262e228b4dc037021c28a8d2b9b9b03 | [
"MIT"
] | null | null | null | examples/set_holidaydates.py | ultratolido/ekmmetters | e15325023262e228b4dc037021c28a8d2b9b9b03 | [
"MIT"
] | null | null | null | examples/set_holidaydates.py | ultratolido/ekmmetters | e15325023262e228b4dc037021c28a8d2b9b9b03 | [
"MIT"
] | null | null | null | """ Simple example set holiday dates
(c) 2016 EKM Metering.
"""
import random
from ekmmeters import *
#port setup
my_port_name = "COM3"
my_meter_address = "300001162"
#log to console
ekm_set_log(ekm_print_log)
# init port and meter
port = SerialPort(my_port_name)
if (port.initPort() == True):
my_meter = V4Meter(my_meter_address)
my_meter.attachPort(port)
else:
print "Cannot open port"
exit()
# input over range(Extents.Holidays)
for holiday in range(Extents.Holidays):
day = random.randint(1,28)
mon = random.randint(1,12)
my_meter.assignHolidayDate(holiday, mon, day)
my_meter.setHolidayDates()
# input directly
param_buf = OrderedDict()
param_buf["Holiday_1_Month"] = 1
param_buf["Holiday_1_Day"] = 1
param_buf["Holiday_2_Month"] = 2
param_buf["Holiday_2_Day"] = 3
param_buf["Holiday_3_Month"] = 4
param_buf["Holiday_3_Day"] = 4
param_buf["Holiday_4_Month"] = 4
param_buf["Holiday_4_Day"] = 5
param_buf["Holiday_5_Month"] = 5
param_buf["Holiday_5_Day"] = 4
param_buf["Holiday_6_Month"] = 0
param_buf["Holiday_6_Day"] = 0
param_buf["Holiday_7_Month"] = 0
param_buf["Holiday_7_Day"] = 0
param_buf["Holiday_8_Month"] = 0
param_buf["Holiday_8_Day"] = 0
param_buf["Holiday_9_Month"] = 0
param_buf["Holiday_9_Day"] = 0
param_buf["Holiday_10_Month"] = 0
param_buf["Holiday_10_Day"] = 0
param_buf["Holiday_11_Month"] = 0
param_buf["Holiday_11_Day"] = 0
param_buf["Holiday_12_Month"] = 0
param_buf["Holiday_12_Day"] = 0
param_buf["Holiday_13_Month"] = 0
param_buf["Holiday_13_Day"] = 0
param_buf["Holiday_14_Month"] = 0
param_buf["Holiday_14_Day"] = 0
param_buf["Holiday_15_Month"] = 0
param_buf["Holiday_15_Day"] = 0
param_buf["Holiday_16_Month"] = 0
param_buf["Holiday_16_Day"] = 0
param_buf["Holiday_17_Month"] = 0
param_buf["Holiday_17_Day"] = 0
param_buf["Holiday_18_Month"] = 0
param_buf["Holiday_18_Day"] = 0
param_buf["Holiday_19_Month"] = 0
param_buf["Holiday_19_Day"] = 0
param_buf["Holiday_20_Month"] = 1
param_buf["Holiday_20_Day"] = 9
if my_meter.setHolidayDates(param_buf):
print "Set holiday dates success."
port.closePort() | 27.289474 | 49 | 0.747348 |
18e9b27e387d5cd010bbb4d876619abf03cb83f9 | 4,242 | py | Python | FCN.py | alexandrefelipemuller/timeseries_shapelet_transferlearning | be19c05ae88c5bf733fedcfed24a7140168f9727 | [
"Apache-2.0"
] | null | null | null | FCN.py | alexandrefelipemuller/timeseries_shapelet_transferlearning | be19c05ae88c5bf733fedcfed24a7140168f9727 | [
"Apache-2.0"
] | null | null | null | FCN.py | alexandrefelipemuller/timeseries_shapelet_transferlearning | be19c05ae88c5bf733fedcfed24a7140168f9727 | [
"Apache-2.0"
] | 1 | 2021-03-31T07:46:37.000Z | 2021-03-31T07:46:37.000Z | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 30 20:11:19 2016
@author: stephen
"""
from __future__ import print_function
from keras.models import Model
from keras.utils import np_utils
import numpy as np
import os
from keras.callbacks import ModelCheckpoint
import pandas as pd
import sys
import keras
from keras.callbacks import ReduceLROnPlateau
nb_epochs = 300
#flist = ['Adiac', 'Beef', 'CBF', 'ChlorineConcentration', 'CinC_ECG_torso', 'Coffee', 'Cricket_X', 'Cricket_Y', 'Cricket_Z',
#'DiatomSizeReduction', 'ECGFiveDays', 'FaceAll', 'FaceFour', 'FacesUCR', '50words', 'FISH', 'Gun_Point', 'Haptics',
#'InlineSkate', 'ItalyPowerDemand', 'Lighting2', 'Lighting7', 'MALLAT', 'MedicalImages', 'MoteStrain', 'NonInvasiveFatalECG_Thorax1',
#'NonInvasiveFatalECG_Thorax2', 'OliveOil', 'OSULeaf', 'SonyAIBORobotSurface', 'SonyAIBORobotSurfaceII', 'StarLightCurves', 'SwedishLeaf', 'Symbols',
#'synthetic_control', 'Trace', 'TwoLeadECG', 'Two_Patterns', 'uWaveGestureLibrary_X', 'uWaveGestureLibrary_Y', 'uWaveGestureLibrary_Z', 'wafer', 'WordsSynonyms', 'yoga']
flist = [ sys.argv[1] ]
for each in flist:
fname = each
x_train, y_train = readucr(fname+'/'+fname+'_TRAIN')
x_test, y_test = readucr(fname+'/'+fname+'_TEST')
nb_classes = len(np.unique(y_test))
batch_size = int(min(x_train.shape[0]/10, 16))
y_train = (y_train - y_train.min())/(y_train.max()-y_train.min())*(nb_classes-1)
y_test = (y_test - y_test.min())/(y_test.max()-y_test.min())*(nb_classes-1)
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
x_train_mean = x_train.mean()
x_train_std = x_train.std()
x_train = (x_train - x_train_mean)/(x_train_std)
x_test = (x_test - x_train_mean)/(x_train_std)
x_train = x_train.reshape(x_train.shape + (1,))
x_test = x_test.reshape(x_test.shape + (1,))
print ("class:"+each+", number of classes: "+str(nb_classes))
x = keras.layers.Input(x_train.shape[1:])
# drop_out = Dropout(0.2)(x)
conv1 = keras.layers.Conv1D(filters=32, kernel_size=8, strides=1, activation='relu', input_shape=(32,1))(x)
conv1 = keras.layers.normalization.BatchNormalization()(conv1)
conv1 = keras.layers.Activation('relu')(conv1)
# drop_out = Dropout(0.2)(conv1)
conv2 = keras.layers.Conv1D(filters=64, kernel_size=5, border_mode='same')(conv1)
conv2 = keras.layers.normalization.BatchNormalization()(conv2)
conv2 = keras.layers.Activation('relu')(conv2)
# drop_out = Dropout(0.2)(conv2)
conv3 = keras.layers.Conv1D(filters=32, kernel_size=3, border_mode='same')(conv2)
conv3 = keras.layers.normalization.BatchNormalization()(conv3)
conv3 = keras.layers.Activation('relu')(conv3)
full = keras.layers.pooling.GlobalAveragePooling1D()(conv3)
out = keras.layers.Dense(nb_classes, activation='softmax')(full)
model = Model(input=x, output=out)
optimizer = keras.optimizers.Adam()
model.compile(loss='categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
reduce_lr = ReduceLROnPlateau(monitor = 'loss', factor=0.5,
patience=50, min_lr=0.0001)
# if os.path.isfile(fname+"_best.hdf5"):
# model.load_weights(fname+'_best.hdf5')
# model.load_weights(fname+'_shapelet_best.hdf5')
checkpointer = ModelCheckpoint(filepath=fname+"_best.hdf5",
monitor = 'val_accuracy',
verbose=2,
save_best_only=True)
# hist = model.fit(x_train, Y_train, batch_size=batch_size, epochs=nb_epochs,
# verbose=1, callbacks=[reduce_lr], validation_data=(x_test, Y_test))
hist = model.fit(x_train, Y_train, batch_size=batch_size, epochs=nb_epochs,
verbose=1, callbacks=[checkpointer,reduce_lr], validation_data=(x_test, Y_test))
#Print the testing results which has the lowest training loss.
log = pd.DataFrame(hist.history)
print (log.loc[log['loss'].idxmin]['loss'], log.loc[log['loss'].idxmin])
| 40.018868 | 169 | 0.677982 |
18e9e49334b24d6e872726b2848571c7d6855286 | 624 | py | Python | localpackage/calcs.py | chapmanwilliam/Ogden8 | e17b26609fc3cdd5650bfeba387bd7253513e00e | [
"Apache-2.0"
] | null | null | null | localpackage/calcs.py | chapmanwilliam/Ogden8 | e17b26609fc3cdd5650bfeba387bd7253513e00e | [
"Apache-2.0"
] | null | null | null | localpackage/calcs.py | chapmanwilliam/Ogden8 | e17b26609fc3cdd5650bfeba387bd7253513e00e | [
"Apache-2.0"
] | null | null | null | import os
indentSize=1 #size of the indent | 20.8 | 50 | 0.56891 |
18ea5f7f2758aa0649c55416dd1e9152a5f44a15 | 7,146 | py | Python | src/cops_and_robots/fusion/probability.py | COHRINT/cops_and_robots | 1df99caa1e38bde1b5ce2d04389bc232a68938d6 | [
"Apache-2.0"
] | 3 | 2016-01-19T17:54:51.000Z | 2019-10-21T12:09:03.000Z | src/cops_and_robots/fusion/probability.py | COHRINT/cops_and_robots | 1df99caa1e38bde1b5ce2d04389bc232a68938d6 | [
"Apache-2.0"
] | null | null | null | src/cops_and_robots/fusion/probability.py | COHRINT/cops_and_robots | 1df99caa1e38bde1b5ce2d04389bc232a68938d6 | [
"Apache-2.0"
] | 5 | 2015-02-19T02:53:24.000Z | 2019-03-05T20:29:12.000Z | #!/usr/bin/env python
from __future__ import division
"""MODULE_DESCRIPTION"""
__author__ = "Nick Sweet"
__copyright__ = "Copyright 2015, Cohrint"
__credits__ = ["Nick Sweet", "Nisar Ahmed"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Nick Sweet"
__email__ = "nick.sweet@colorado.edu"
__status__ = "Development"
import logging
from copy import deepcopy
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable | 34.191388 | 102 | 0.502379 |
18ea77727f1cb2220f22073ef4e4393ab431d65a | 7,952 | py | Python | vulnman/tests/mixins.py | blockomat2100/vulnman | 835ff3aae1168d8e2fa5556279bc86efd2e46472 | [
"MIT"
] | null | null | null | vulnman/tests/mixins.py | blockomat2100/vulnman | 835ff3aae1168d8e2fa5556279bc86efd2e46472 | [
"MIT"
] | 23 | 2021-12-01T10:00:38.000Z | 2021-12-11T11:43:13.000Z | vulnman/tests/mixins.py | blockomat2100/vulnman | 835ff3aae1168d8e2fa5556279bc86efd2e46472 | [
"MIT"
] | null | null | null | from django.contrib.auth.models import User, Group
from django.utils import timezone
from django.conf import settings
from django.urls import reverse_lazy
from apps.projects.models import Project, Client, ProjectContributor
from ddf import G
from guardian.shortcuts import assign_perm
| 47.616766 | 111 | 0.692027 |
18ea8109933fbbfe2b0922e33bce91ae934e86e1 | 2,010 | py | Python | StateTracing/tester_helper.py | junchenfeng/diagnosis_tracing | 4e26e2ad0c7abc547f22774b6c9c299999a152c3 | [
"MIT"
] | null | null | null | StateTracing/tester_helper.py | junchenfeng/diagnosis_tracing | 4e26e2ad0c7abc547f22774b6c9c299999a152c3 | [
"MIT"
] | null | null | null | StateTracing/tester_helper.py | junchenfeng/diagnosis_tracing | 4e26e2ad0c7abc547f22774b6c9c299999a152c3 | [
"MIT"
] | 1 | 2020-09-08T13:42:16.000Z | 2020-09-08T13:42:16.000Z | # -*- coding: utf-8 -*-
import numpy as np
from torch import load as Tload
from torch import tensor
from dataloader import read_data,DataLoader,load_init
from cdkt import CDKT
if 'model' not in dir():
model = CDKT()
model.load_state_dict(Tload('model.pkl'))
#
inits = load_init()
data = """0 506123310064654031030450460312100605
0 506123310064654031230450460312100605
0 506123310064654031231450460312100605
0 506123310064654031231456460312100605
0 506123310064654031231456460312100645
0 506123310564654031231456460312100645
0 506123310564654231231456460312100645
0 506123310564654231231456460312100605
0 506123310564654231231456460312100645
0 506123312564654231231456460312100645
0 546123312564654231231456460312100645
0 546123312564654231231456465312100645
0 546123312564654231231456465312120645
0 546123312564654231231456465312123645
1 002163163050030425245001316542000000
1 002163163054030425245001316542000000
1 002163163054030425245001316542000006"""
# 1 002163163054030425245001316542030006
# 1 002163163054030425245001316542000006
# 1 002163163054031425245001316542000006
# 1 002163163054631425245001316542000006
# 1 002163163254631425245001316542000006
# 1 002163163254631425245601316542000006
# 1 002163163254631425245631316542000006
# 1 052163163254631425245631316542000006
# 1 452163163254631425245631316542000006
# 1 452163163254631425245631316542000016
# 1 452163163254631425245631316542000316
# 1 452163163254631425245631316542003316
# 1 452163163254631425245631316542000316
# 1 452163163254631425245631316542500316
# 1 452163163254631425245631316542520316
# 1 452163163254631425245631316542524316"""
data = [d.strip().split() for d in data.split('\n')]
states = [list(map(int,s)) for i,s in data]
states = tensor([states])
out = model.predicts(states)
prds = np.argmax(out[0],axis=2).flatten()*np.array(inits[2]) | 35.892857 | 60 | 0.783085 |
18eaed4c6444d0552d8dc7a9cc73624816ce21fa | 3,958 | py | Python | grpc-errors/stub/hello_pb2.py | twotwo/tools-python | b9e7a97e58fb0a3f3fb5e8b674e64a997669c2c4 | [
"MIT"
] | null | null | null | grpc-errors/stub/hello_pb2.py | twotwo/tools-python | b9e7a97e58fb0a3f3fb5e8b674e64a997669c2c4 | [
"MIT"
] | null | null | null | grpc-errors/stub/hello_pb2.py | twotwo/tools-python | b9e7a97e58fb0a3f3fb5e8b674e64a997669c2c4 | [
"MIT"
] | 1 | 2016-10-21T07:51:24.000Z | 2016-10-21T07:51:24.000Z | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: hello.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='hello.proto',
package='hello',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x0bhello.proto\x12\x05hello\"\x18\n\x08HelloReq\x12\x0c\n\x04Name\x18\x01 \x01(\t\"\x1b\n\tHelloResp\x12\x0e\n\x06Result\x18\x01 \x01(\t2v\n\x0cHelloService\x12/\n\x08SayHello\x12\x0f.hello.HelloReq\x1a\x10.hello.HelloResp\"\x00\x12\x35\n\x0eSayHelloStrict\x12\x0f.hello.HelloReq\x1a\x10.hello.HelloResp\"\x00\x62\x06proto3')
)
_HELLOREQ = _descriptor.Descriptor(
name='HelloReq',
full_name='hello.HelloReq',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='Name', full_name='hello.HelloReq.Name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=22,
serialized_end=46,
)
_HELLORESP = _descriptor.Descriptor(
name='HelloResp',
full_name='hello.HelloResp',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='Result', full_name='hello.HelloResp.Result', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=48,
serialized_end=75,
)
DESCRIPTOR.message_types_by_name['HelloReq'] = _HELLOREQ
DESCRIPTOR.message_types_by_name['HelloResp'] = _HELLORESP
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
HelloReq = _reflection.GeneratedProtocolMessageType('HelloReq', (_message.Message,), {
'DESCRIPTOR' : _HELLOREQ,
'__module__' : 'hello_pb2'
# @@protoc_insertion_point(class_scope:hello.HelloReq)
})
_sym_db.RegisterMessage(HelloReq)
HelloResp = _reflection.GeneratedProtocolMessageType('HelloResp', (_message.Message,), {
'DESCRIPTOR' : _HELLORESP,
'__module__' : 'hello_pb2'
# @@protoc_insertion_point(class_scope:hello.HelloResp)
})
_sym_db.RegisterMessage(HelloResp)
_HELLOSERVICE = _descriptor.ServiceDescriptor(
name='HelloService',
full_name='hello.HelloService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=77,
serialized_end=195,
methods=[
_descriptor.MethodDescriptor(
name='SayHello',
full_name='hello.HelloService.SayHello',
index=0,
containing_service=None,
input_type=_HELLOREQ,
output_type=_HELLORESP,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='SayHelloStrict',
full_name='hello.HelloService.SayHelloStrict',
index=1,
containing_service=None,
input_type=_HELLOREQ,
output_type=_HELLORESP,
serialized_options=None,
),
])
_sym_db.RegisterServiceDescriptor(_HELLOSERVICE)
DESCRIPTOR.services_by_name['HelloService'] = _HELLOSERVICE
# @@protoc_insertion_point(module_scope)
| 27.678322 | 348 | 0.741031 |
18eb73361ec3feb33d8a12b5b8881d917685a4cc | 504 | py | Python | ckanext-sitemap/ckanext/sitemap/plugin.py | alexandru-m-g/hdx-ckan | 647f1f23f0505fa195601245b758edcaf4d25985 | [
"Apache-2.0"
] | 1 | 2020-03-07T02:47:15.000Z | 2020-03-07T02:47:15.000Z | ckanext-sitemap/ckanext/sitemap/plugin.py | datopian/hdx-ckan | 2d8871c035a18e48b53859fec522b997b500afe9 | [
"Apache-2.0"
] | null | null | null | ckanext-sitemap/ckanext/sitemap/plugin.py | datopian/hdx-ckan | 2d8871c035a18e48b53859fec522b997b500afe9 | [
"Apache-2.0"
] | null | null | null | '''
Sitemap plugin for CKAN
'''
from ckan.plugins import implements, SingletonPlugin
from ckan.plugins import IRoutes
| 29.647059 | 96 | 0.712302 |
18ebf74aba4efdef03b71cc4501701981953cbd1 | 3,049 | py | Python | experiment_wrapper/__init__.py | stonkens/experiment_wrapper | 78b02a09d412097834bc81bba4452db1738b99da | [
"MIT"
] | 2 | 2022-03-24T22:31:20.000Z | 2022-03-25T03:26:01.000Z | experiment_wrapper/__init__.py | stonkens/experiment_wrapper | 78b02a09d412097834bc81bba4452db1738b99da | [
"MIT"
] | null | null | null | experiment_wrapper/__init__.py | stonkens/experiment_wrapper | 78b02a09d412097834bc81bba4452db1738b99da | [
"MIT"
] | null | null | null | from typing import Any, Dict, List
class Controller:
"""Provides a template for the functionality required from a controller class to interface with the experiment
wrappper functionality.
A controller class must implement the following methods:
- __call__: takes in the current state and time and returns the control (note: a function object can be used, e.g.:
def nominal_policy(x, t):
return L @ x
with L the LQR controller matrix"""
from experiment_wrapper.experiment import Experiment, ScenarioList, Controllers
from experiment_wrapper.rollout_trajectory import (
RolloutTrajectory,
TimeSeriesExperiment,
StateSpaceExperiment,
)
from experiment_wrapper.experiment_suite import ExperimentSuite
__version__ = "1.0.1"
| 32.094737 | 119 | 0.700886 |
18ecd7bb8ba5638e693807de98d542a164bfce66 | 2,870 | py | Python | Figure_2/panel_a_Count_mC_bin.py | Wustl-Zhanglab/Placenta_Epigenome | 227f2a42e5c0af821d372b42c9bcf9e561e4627c | [
"MIT"
] | 2 | 2021-06-28T09:16:17.000Z | 2021-07-15T02:39:35.000Z | Figure_2/panel_a_Count_mC_bin.py | Wustl-Zhanglab/Placenta_Epigenome | 227f2a42e5c0af821d372b42c9bcf9e561e4627c | [
"MIT"
] | null | null | null | Figure_2/panel_a_Count_mC_bin.py | Wustl-Zhanglab/Placenta_Epigenome | 227f2a42e5c0af821d372b42c9bcf9e561e4627c | [
"MIT"
] | 2 | 2020-05-29T01:06:19.000Z | 2021-07-02T01:04:50.000Z | #!/usr/bin/python
# programmer : Bo
# usage: Count_Reads_bin.py file_list
import sys
import re
import random
import string
import time
if __name__=="__main__":
tS = time.time()
bin = 50000
BL = Read_blacklist()
#(B_site,B_name,C_reads,tt) = Read_data(sys.argv[1])
OP = main(sys.argv[1])
for each in OP:
(B_site,B_name,B_reads,B_score,tt) = Read_data()
data = main(each[:-1])
n = 0
m = 0
out = file('M50K_'+'_'+each[:-1],'w')
#out.write(tt)
for each in data:
n += 1
if n == 1000000:
m += 1
n = 0
print m,'million reads'
te = each.split('\t')
start = int(te[1])
end = int(te[2])
if te[0] not in B_site.keys():
continue
if te[0] in BL.keys():
for ebi in range(len(BL[te[0]])):
if start < BL[te[0]][ebi][1] and end > BL[te[0]][ebi][0]:
continue
ss = int(0.5+(start/50000))*50000
s = str(ss)
w =int( len(s)/2)
tag = s[:w+1]
try :
y = B_site[te[0]][tag][s]
except:
continue
B_reads[y] += 1
B_score[y] += float(te[-1])
for i in range(len(B_name)):
if B_reads[i] == 0:
out.write(B_name[i]+'\t0\t0\n')
else:
out.write(B_name[i]+'\t'+str(B_reads[i])+'\t'+str(B_score[i]/B_reads[i])+'\n')
out.close()
tE = time.time()
print 'Cost ',(tE-tS),' sec'
| 27.075472 | 94 | 0.444599 |
18ed346e6be46b5b4a74b44f23d751e2dd5b808b | 6,648 | py | Python | slm_lab/agent/memory/replay.py | jmribeiro/SLM-Lab | 7cf7a10e56c9558764544e7683023945c72a42a7 | [
"MIT"
] | 1,074 | 2017-11-10T02:20:09.000Z | 2022-03-31T18:14:02.000Z | slm_lab/agent/memory/replay.py | jmribeiro/SLM-Lab | 7cf7a10e56c9558764544e7683023945c72a42a7 | [
"MIT"
] | 98 | 2017-11-04T22:00:01.000Z | 2022-03-31T14:13:45.000Z | slm_lab/agent/memory/replay.py | jmribeiro/SLM-Lab | 7cf7a10e56c9558764544e7683023945c72a42a7 | [
"MIT"
] | 229 | 2018-01-07T22:39:09.000Z | 2022-03-20T12:04:31.000Z | from collections import deque
from copy import deepcopy
from slm_lab.agent.memory.base import Memory
from slm_lab.lib import logger, math_util, util
from slm_lab.lib.decorator import lab_api
import numpy as np
import pydash as ps
logger = logger.get_logger(__name__)
def sample_next_states(head, max_size, ns_idx_offset, batch_idxs, states, ns_buffer):
'''Method to sample next_states from states, with proper guard for next_state idx being out of bound'''
# idxs for next state is state idxs with offset, modded
ns_batch_idxs = (batch_idxs + ns_idx_offset) % max_size
# if head < ns_idx <= head + ns_idx_offset, ns is stored in ns_buffer
ns_batch_idxs = ns_batch_idxs % max_size
buffer_ns_locs = np.argwhere(
(head < ns_batch_idxs) & (ns_batch_idxs <= head + ns_idx_offset)).flatten()
# find if there is any idxs to get from buffer
to_replace = buffer_ns_locs.size != 0
if to_replace:
# extract the buffer_idxs first for replacement later
# given head < ns_idx <= head + offset, and valid buffer idx is [0, offset)
# get 0 < ns_idx - head <= offset, or equiv.
# get -1 < ns_idx - head - 1 <= offset - 1, i.e.
# get 0 <= ns_idx - head - 1 < offset, hence:
buffer_idxs = ns_batch_idxs[buffer_ns_locs] - head - 1
# set them to 0 first to allow sampling, then replace later with buffer
ns_batch_idxs[buffer_ns_locs] = 0
# guard all against overrun idxs from offset
ns_batch_idxs = ns_batch_idxs % max_size
next_states = util.batch_get(states, ns_batch_idxs)
if to_replace:
# now replace using buffer_idxs and ns_buffer
buffer_ns = util.batch_get(ns_buffer, buffer_idxs)
next_states[buffer_ns_locs] = buffer_ns
return next_states
| 43.168831 | 170 | 0.646811 |
18ee4afcda48045a6b4b58a5f641a2905cb15b51 | 1,958 | py | Python | misc/docker/GenDockerfile.py | Wheest/atJIT | 7e29862db7b5eb9cee470edeb165380f881903c9 | [
"BSD-3-Clause"
] | 47 | 2018-08-03T09:15:08.000Z | 2022-02-14T07:06:12.000Z | misc/docker/GenDockerfile.py | Wheest/atJIT | 7e29862db7b5eb9cee470edeb165380f881903c9 | [
"BSD-3-Clause"
] | 15 | 2018-06-18T19:50:50.000Z | 2019-08-29T16:52:11.000Z | misc/docker/GenDockerfile.py | Wheest/atJIT | 7e29862db7b5eb9cee470edeb165380f881903c9 | [
"BSD-3-Clause"
] | 5 | 2018-08-28T02:35:44.000Z | 2021-11-01T06:54:51.000Z | import yaml
import sys
Head = "# Dockerfile derived from easy::jit's .travis.yml"
From = "ubuntu:latest"
Manteiner = "Juan Manuel Martinez Caamao jmartinezcaamao@gmail.com"
base_packages = ['build-essential', 'python', 'python-pip', 'git', 'wget', 'unzip', 'cmake']
travis = yaml.load(open(sys.argv[1]))
travis_sources = travis['addons']['apt']['sources']
travis_packages = travis['addons']['apt']['packages']
before_install = travis['before_install']
script = travis['script']
# I could not get a better way to do this
AddSourceCmd = {
"llvm-toolchain-trusty-6.0" : "deb http://apt.llvm.org/trusty/ llvm-toolchain-trusty-6.0 main | tee -a /etc/apt/sources.list > /dev/null",
"ubuntu-toolchain-r-test" : "apt-add-repository -y \"ppa:ubuntu-toolchain-r/test\""
}
Sources = ["RUN {cmd} \n".format(cmd=AddSourceCmd[source]) for source in travis_sources]
Apt = """# add sources
RUN apt-get update
RUN apt-get install -y software-properties-common
{AddSources}
# install apt packages, base first, then travis
RUN apt-get update
RUN apt-get upgrade -y
RUN apt-get install -y {base_packages} && \\
apt-get install -y {travis_packages}
""".format(AddSources = "".join(Sources), base_packages = " ".join(base_packages), travis_packages=" ".join(travis_packages))
Checkout = "RUN git clone --depth=50 --branch=${branch} https://github.com/jmmartinez/easy-just-in-time.git easy-just-in-time && cd easy-just-in-time\n"
BeforeInstall = "".join(["RUN cd /easy-just-in-time && {0} \n".format(cmd) for cmd in before_install])
Run = "RUN cd easy-just-in-time && \\\n" + "".join([" {cmd} && \\ \n".format(cmd=cmd) for cmd in script]) + " echo ok!"
Template = """{Head}
FROM {From}
LABEL manteiner {Manteiner}
ARG branch=master
{Apt}
# checkout
{Checkout}
# install other deps
{BeforeInstall}
# compile and test!
{Run}"""
print(Template.format(Head=Head, From=From, Manteiner=Manteiner, Apt=Apt, BeforeInstall=BeforeInstall, Checkout=Checkout, Run=Run))
| 35.6 | 152 | 0.704801 |
18eebda43ebee826c1945694815a04fc15eb96ef | 278 | py | Python | howareyoutwitter/api/tasks.py | tyheise/how-are-you-twitter | 1e4b938381e7d552486e981b0f696f330635ba82 | [
"MIT"
] | 1 | 2019-10-24T20:47:24.000Z | 2019-10-24T20:47:24.000Z | howareyoutwitter/api/tasks.py | tyheise/how-are-you-twitter | 1e4b938381e7d552486e981b0f696f330635ba82 | [
"MIT"
] | 12 | 2019-10-22T22:32:40.000Z | 2021-01-07T05:13:25.000Z | howareyoutwitter/api/tasks.py | tyheise/how-are-you-twitter | 1e4b938381e7d552486e981b0f696f330635ba82 | [
"MIT"
] | 1 | 2020-01-02T22:28:52.000Z | 2020-01-02T22:28:52.000Z | from api import models
from api.twitter_tools.tweet_seeker import TweetSeeker
| 19.857143 | 54 | 0.672662 |
18ef5021800d056c99fea4a85de29d3c6771923f | 390 | py | Python | examples/example1.py | wallrj/twisted-names-talk | d3098ab6745abd0d14bb0b6eef41727e5a89de1f | [
"MIT"
] | 2 | 2017-12-01T00:14:25.000Z | 2020-07-01T00:27:44.000Z | examples/example1.py | wallrj/twisted-names-talk | d3098ab6745abd0d14bb0b6eef41727e5a89de1f | [
"MIT"
] | null | null | null | examples/example1.py | wallrj/twisted-names-talk | d3098ab6745abd0d14bb0b6eef41727e5a89de1f | [
"MIT"
] | null | null | null | from twisted.internet import task
from twisted.names import dns
task.react(main)
| 24.375 | 78 | 0.697436 |
18f0e1c869c59304bc5b9379e901a05831726491 | 5,975 | py | Python | utility.py | ying-wen/pmln | 76d82dd620504ac00035d9d0dc9d752cd53518d4 | [
"MIT"
] | 1 | 2019-09-10T16:42:34.000Z | 2019-09-10T16:42:34.000Z | utility.py | ying-wen/pmln | 76d82dd620504ac00035d9d0dc9d752cd53518d4 | [
"MIT"
] | null | null | null | utility.py | ying-wen/pmln | 76d82dd620504ac00035d9d0dc9d752cd53518d4 | [
"MIT"
] | null | null | null | from __future__ import print_function
import numpy as np
import pandas as pd
from sklearn import metrics
def get_substitute_cate(sample, target_index, opts):
field_i = opts.fields_index_inverse.get(sample[target_index])
if field_i is None:
field_i = np.random.choice(opts.fields_index.keys(),1)[0]
field_cates = opts.fields_index[field_i]
rst = np.random.choice(field_cates,1)[0]
if len(field_cates) == 1:
rst = np.random.randint(opts.vocabulary_size)
return rst
def generate_fake_sample(temp, opts):
temp_sequence_length = len(temp)
temp = temp[0:opts.sequence_length]
if len(temp) < opts.sequence_length:
gap = opts.sequence_length - len(temp)
temp = np.array(temp + [0] * gap)
else:
temp_sequence_length = opts.sequence_length
assert len(temp) == opts.sequence_length
targets_to_avoid = set(temp)
indices_to_avoid = set()
substitute_index = np.random.randint(temp_sequence_length)
substitute_target = get_substitute_cate(temp, substitute_index, opts)
for _ in range(opts.substitute_num):
while substitute_index in indices_to_avoid:
substitute_index = np.random.randint(temp_sequence_length)
indices_to_avoid.add(substitute_index)
count = 0
while substitute_target in targets_to_avoid:
if count > 5:
break
substitute_target = get_substitute_cate(temp, substitute_index, opts)
count += 1
targets_to_avoid.add(substitute_target)
temp[substitute_index] = substitute_target
return temp
| 36.882716 | 89 | 0.60887 |
18f0f41a4a703e23e45d0e7b9b74208ed5cbd775 | 1,294 | py | Python | setup.py | jeremycline/crochet | ecfc22cefa90f3dfbafa71883c1470e7294f2b6d | [
"MIT"
] | null | null | null | setup.py | jeremycline/crochet | ecfc22cefa90f3dfbafa71883c1470e7294f2b6d | [
"MIT"
] | null | null | null | setup.py | jeremycline/crochet | ecfc22cefa90f3dfbafa71883c1470e7294f2b6d | [
"MIT"
] | 1 | 2020-01-25T18:00:31.000Z | 2020-01-25T18:00:31.000Z | try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import versioneer
def read(path):
"""
Read the contents of a file.
"""
with open(path) as f:
return f.read()
setup(
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
],
name='crochet',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description="Use Twisted anywhere!",
install_requires=[
"Twisted>=15.0",
"wrapt",
],
keywords="twisted threading",
license="MIT",
packages=["crochet", "crochet.tests"],
url="https://github.com/itamarst/crochet",
maintainer='Itamar Turner-Trauring',
maintainer_email='itamar@itamarst.org',
long_description=read('README.rst') + '\n' + read('docs/news.rst'),
)
| 28.130435 | 71 | 0.616692 |
18f12f8a5d648308d20dd8053de45efc7d50fb10 | 1,071 | py | Python | polling_test.py | ngocdh236/pypusu | 2453ca4236e4467d4fc0b7dea062ae195183b293 | [
"MIT"
] | null | null | null | polling_test.py | ngocdh236/pypusu | 2453ca4236e4467d4fc0b7dea062ae195183b293 | [
"MIT"
] | null | null | null | polling_test.py | ngocdh236/pypusu | 2453ca4236e4467d4fc0b7dea062ae195183b293 | [
"MIT"
] | null | null | null | from __future__ import division
from __future__ import print_function
from builtins import range
from past.utils import old_div
from pypusu.polling import PuSuClient
from time import sleep, time
if __name__ == "__main__":
print("Connecting")
c = PuSuClient("ws://127.0.0.1:55000")
count = 0
print("Authorizing")
c.authorize("foo")
print("Subscribing")
c.subscribe("channel.1", listener)
print("Waiting")
target = 500
start = time()
for i in range(1, target + 1):
c.publish("channel.1", {"foo": "bar"})
end = time()
elapsed = end - start
print("Sent {} messages in {:.3f}s, {:.2f}msg/s".format(
target,
elapsed,
old_div(target, elapsed)
))
sleep(1)
print("So far got {} messages, polling...".format(count))
c.poll()
print("After poll got {} messages, waiting for more...".format(count))
for i in range(0, 60):
sleep(1)
c.poll()
print("Got {} messages".format(count))
| 22.3125 | 74 | 0.601307 |
18f2ad5a7c870598e6dec3394ee47ca770ec9558 | 3,289 | py | Python | tests/test_nacl.py | intangere/NewHope_X25519_XSalsa20_Poly1305 | 459914e520bcb5aa207a11533ae217d50719307d | [
"MIT"
] | null | null | null | tests/test_nacl.py | intangere/NewHope_X25519_XSalsa20_Poly1305 | 459914e520bcb5aa207a11533ae217d50719307d | [
"MIT"
] | 1 | 2021-06-21T03:07:13.000Z | 2021-06-21T03:07:13.000Z | tests/test_nacl.py | intangere/NewHope_X25519_XSalsa20_Poly1305 | 459914e520bcb5aa207a11533ae217d50719307d | [
"MIT"
] | null | null | null | # Import libnacl libs
import libnacl
import libnacl.utils
# Import python libs
import unittest
t = TestPublic()
t.test_box_seal() | 38.244186 | 72 | 0.663728 |
18f2c7ccc01f817c8542ea8ba418a16fde40bf5a | 2,815 | py | Python | gui.py | flifloo/PyTchat | 89e0305557cfedba7637f061184d020ac7f71eeb | [
"MIT"
] | 1 | 2019-07-27T08:43:05.000Z | 2019-07-27T08:43:05.000Z | gui.py | flifloo/PyTchat | 89e0305557cfedba7637f061184d020ac7f71eeb | [
"MIT"
] | 5 | 2019-07-19T15:11:16.000Z | 2019-07-24T15:11:00.000Z | gui.py | flifloo/PyTchat | 89e0305557cfedba7637f061184d020ac7f71eeb | [
"MIT"
] | null | null | null | from tkinter import Tk, Frame, Scrollbar, Label, Text, Button, Entry, StringVar, IntVar, TclError
from tkinter.messagebox import showerror, showwarning
from client import Client
from threading import Thread
from socket import error as socket_error
destroy = False
login = Tk()
login.title("Login")
host = StringVar()
port = IntVar()
Label(login, text="Host & port:").pack()
login_f = Frame(login)
login_f.pack()
Entry(login_f, textvariable=host, width=14).grid(row=0, column=0)
Entry(login_f, textvariable=port, width=4).grid(row=0, column=1)
Button(login, text="Submit", command=start).pack()
login.mainloop()
tchat = Tk()
tchat.title("PyTchat")
tchat.protocol("WM_DELETE_WINDOW", on_closing)
chat = Frame(tchat)
chat.pack()
scrollbar = Scrollbar(chat)
scrollbar.pack(side="right", fill="y")
chat_message = Text(chat, height=15, width=50, yscrollcommand=scrollbar.set, state="disable")
chat_message.pack(side="left", fill="both")
receive_thread = Thread(target=receive)
receive_thread.start()
entry = Frame(tchat)
entry.pack()
message = StringVar()
field = Entry(entry, textvariable=message)
field.bind("<Return>", send)
field.grid(row=0, column=0)
Button(entry, text="Send", command=send).grid(row=0, column=1)
tchat.mainloop()
| 27.067308 | 97 | 0.628064 |
18f342f2a9acba64d1ea5575f081da8b2ad4064d | 281 | py | Python | nautobot_secrets_providers/urls.py | jifox/nautobot-plugin-secrets-providers | 4d6ca51d0c78b4785f78909b04cf7c7b33c02e5d | [
"Apache-2.0"
] | 6 | 2021-12-22T21:26:12.000Z | 2022-02-16T10:00:04.000Z | nautobot_secrets_providers/urls.py | jifox/nautobot-plugin-secrets-providers | 4d6ca51d0c78b4785f78909b04cf7c7b33c02e5d | [
"Apache-2.0"
] | 9 | 2021-12-14T13:43:13.000Z | 2022-03-29T18:49:55.000Z | nautobot_secrets_providers/urls.py | jifox/nautobot-plugin-secrets-providers | 4d6ca51d0c78b4785f78909b04cf7c7b33c02e5d | [
"Apache-2.0"
] | 2 | 2022-02-04T19:11:09.000Z | 2022-03-22T16:23:31.000Z | """Django urlpatterns declaration for nautobot_secrets_providers plugin."""
from django.urls import path
from nautobot_secrets_providers import views
app_name = "nautobot_secrets_providers"
urlpatterns = [
path("", views.SecretsProvidersHomeView.as_view(), name="home"),
]
| 23.416667 | 75 | 0.786477 |
18f380451d6001349051a85381a7ca31b31818f6 | 1,920 | py | Python | nadlogar/quizzes/views.py | LenartBucar/nadlogar | 2aba693254d56896419d09e066f91551492f8980 | [
"MIT"
] | null | null | null | nadlogar/quizzes/views.py | LenartBucar/nadlogar | 2aba693254d56896419d09e066f91551492f8980 | [
"MIT"
] | null | null | null | nadlogar/quizzes/views.py | LenartBucar/nadlogar | 2aba693254d56896419d09e066f91551492f8980 | [
"MIT"
] | null | null | null | from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.shortcuts import get_object_or_404, redirect, render
from .forms import QuizForm
from .models import Quiz
| 28.656716 | 73 | 0.690625 |
18f4895ff656c51b070791d34f8e28cf58f2c463 | 6,757 | py | Python | cogs/vote.py | FFrost/CBot | aee077ee36462cfef14a3fb2fa5e3c1ffe741064 | [
"MIT"
] | 4 | 2018-06-26T08:15:04.000Z | 2019-10-09T22:49:38.000Z | cogs/vote.py | FFrost/CBot | aee077ee36462cfef14a3fb2fa5e3c1ffe741064 | [
"MIT"
] | null | null | null | cogs/vote.py | FFrost/CBot | aee077ee36462cfef14a3fb2fa5e3c1ffe741064 | [
"MIT"
] | null | null | null | import discord
from discord.ext import commands
import asyncio
import time
from enum import Enum
def setup(bot):
bot.add_cog(Vote(bot))
| 35.563158 | 168 | 0.557052 |
18f4a88074003325bea709addb8e527765d91168 | 5,227 | py | Python | async_limits/storage/memcached.py | anomit/limits | a02d3234664d2b4da9968fd5ad25899ce106517a | [
"MIT"
] | 1 | 2021-06-21T13:51:56.000Z | 2021-06-21T13:51:56.000Z | async_limits/storage/memcached.py | anomit/limits | a02d3234664d2b4da9968fd5ad25899ce106517a | [
"MIT"
] | null | null | null | async_limits/storage/memcached.py | anomit/limits | a02d3234664d2b4da9968fd5ad25899ce106517a | [
"MIT"
] | null | null | null | import inspect
import threading
import time
from six.moves import urllib
from ..errors import ConfigurationError
from ..util import get_dependency
from .base import Storage
| 32.465839 | 79 | 0.543524 |
18f6a37e4dfb35bf57b4cd1ecadb7071de8cbf6b | 4,617 | py | Python | floreal/views/view_purchases.py | caracole-io/circuitscourts | 4e9279226373ae41eb4d0e0f37f84f12197f34ff | [
"MIT"
] | null | null | null | floreal/views/view_purchases.py | caracole-io/circuitscourts | 4e9279226373ae41eb4d0e0f37f84f12197f34ff | [
"MIT"
] | null | null | null | floreal/views/view_purchases.py | caracole-io/circuitscourts | 4e9279226373ae41eb4d0e0f37f84f12197f34ff | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
from django.http import HttpResponse, HttpResponseForbidden
from django.shortcuts import render_to_response
from django.contrib.auth.decorators import login_required
from caracole import settings
from .decorators import nw_admin_required
from .getters import get_delivery, get_subgroup
from . import latex
from .spreadsheet import spreadsheet
from .delivery_description import delivery_description
MIME_TYPE = {
'pdf': "application/pdf",
'xlsx': "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"}
def non_html_response(name_bits, name_extension, content):
"""Common helper to serve PDF and Excel content."""
filename = ("_".join(name_bits) + "." + name_extension).replace(" ", "_")
mime_type = MIME_TYPE[name_extension]
response = HttpResponse(content_type=mime_type)
response['Content-Disposition'] = 'attachment; filename="%s"' % filename
response.write(content)
return response
| 41.972727 | 118 | 0.706953 |
18f75103fffe006c35337768f20ad10b43a5b636 | 411 | py | Python | hack_today_2017/web/web_time_solver.py | runsel/CTF_Writeups | df3d8469b981265d4d43bfc90e75075a95acb1dd | [
"MIT"
] | 4 | 2019-01-07T03:15:45.000Z | 2021-01-10T04:58:15.000Z | hack_today_2017/web/web_time_solver.py | runsel/CTF_Writeups | df3d8469b981265d4d43bfc90e75075a95acb1dd | [
"MIT"
] | null | null | null | hack_today_2017/web/web_time_solver.py | runsel/CTF_Writeups | df3d8469b981265d4d43bfc90e75075a95acb1dd | [
"MIT"
] | 3 | 2018-10-21T19:17:34.000Z | 2020-07-07T08:58:25.000Z | import requests
charset = "abcdefghijklmnopqrstuvwxyz0123456789_{}"
password = "HackToday{"
url = "http://sawah.ittoday.web.id:40137/"
while(password[-1]!="}"):
for i in charset:
r = requests.get(url)
payload = {'password': password+i, 'submit': 'Submit+Query'}
r = requests.post(url, data=payload)
if r.status_code==302:
password+=i
print password
| 27.4 | 68 | 0.615572 |
18f9f056fd0c54a5b1e0f0f03ecf846e53698354 | 484 | py | Python | mayan/__init__.py | sneha-rk/drawings-version-control | 4e5a2bf0fd8b8026f1d3d56917b5be4b5c7be497 | [
"Apache-2.0"
] | 1 | 2021-05-14T18:40:37.000Z | 2021-05-14T18:40:37.000Z | mayan/__init__.py | sneha-rk/drawings-version-control | 4e5a2bf0fd8b8026f1d3d56917b5be4b5c7be497 | [
"Apache-2.0"
] | null | null | null | mayan/__init__.py | sneha-rk/drawings-version-control | 4e5a2bf0fd8b8026f1d3d56917b5be4b5c7be497 | [
"Apache-2.0"
] | null | null | null | from __future__ import unicode_literals
<<<<<<< HEAD
__title__ = 'Mayan EDMS'
__version__ = '2.7.3'
__build__ = 0x020703
=======
__title__ = 'IITH DVC'
__version__ = '2.7.2'
__build__ = 0x020702
>>>>>>> 4cedd41ab6b9750abaebc35d1970556408d83cf5
__author__ = 'Roberto Rosario'
__author_email__ = 'roberto.rosario@mayan-edms.com'
__description__ = 'Free Open Source Electronic Document Management System'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2011-2016 Roberto Rosario'
| 28.470588 | 74 | 0.760331 |
18fa914340e673af7a09db0d4d032b0e04e6bdee | 5,728 | py | Python | ldt/utils/usaf/bcsd_preproc/lib_bcsd_metrics/BCSD_function.py | rkim3/LISF | afaf6a228d2b29a1d26111acc951204f0b436387 | [
"Apache-2.0"
] | 67 | 2018-11-13T21:40:54.000Z | 2022-02-23T08:11:56.000Z | ldt/utils/usaf/bcsd_preproc/lib_bcsd_metrics/BCSD_function.py | dmocko/LISF | 08d024d6d5fe66db311e43e78740842d653749f4 | [
"Apache-2.0"
] | 679 | 2018-11-13T20:10:29.000Z | 2022-03-30T19:55:25.000Z | ldt/utils/usaf/bcsd_preproc/lib_bcsd_metrics/BCSD_function.py | dmocko/LISF | 08d024d6d5fe66db311e43e78740842d653749f4 | [
"Apache-2.0"
] | 119 | 2018-11-08T15:53:35.000Z | 2022-03-28T10:16:01.000Z | from __future__ import division
import pandas as pd
import numpy as np
import calendar
import os.path as op
import sys
from datetime import datetime
from dateutil.relativedelta import relativedelta
from scipy.stats import percentileofscore
from scipy.stats import scoreatpercentile, pearsonr
from math import *
import time
from BCSD_stats_functions import *
import xarray as xr
import os, errno
| 61.591398 | 191 | 0.662884 |
18fd4c8c14d7b745e7af13adc4fd4221571ac4a2 | 1,212 | py | Python | charybde/parsers/dump_parser.py | m09/charybde | 3f8d7d17ed7b9df4bc42743bbd953f61bc807b81 | [
"Apache-2.0"
] | 1 | 2020-03-12T12:58:30.000Z | 2020-03-12T12:58:30.000Z | charybde/parsers/dump_parser.py | m09/charybde | 3f8d7d17ed7b9df4bc42743bbd953f61bc807b81 | [
"Apache-2.0"
] | 24 | 2019-10-28T07:21:19.000Z | 2020-04-13T22:38:37.000Z | charybde/parsers/dump_parser.py | m09/charybde | 3f8d7d17ed7b9df4bc42743bbd953f61bc807b81 | [
"Apache-2.0"
] | null | null | null | from bz2 import BZ2File
from pathlib import Path
from queue import Queue
from threading import Thread
from typing import Any, Callable, Dict, Iterator, List, Tuple
from xmltodict import parse as xmltodict_parse
| 28.857143 | 88 | 0.615512 |
18fdbb6a59afbc92dbdea6d53c5bce95efda434c | 5,321 | py | Python | server/py/camera.py | sreyas/Attendance-management-system | eeb57bcc942f407151b71bfab528e817c6806c74 | [
"MIT"
] | null | null | null | server/py/camera.py | sreyas/Attendance-management-system | eeb57bcc942f407151b71bfab528e817c6806c74 | [
"MIT"
] | null | null | null | server/py/camera.py | sreyas/Attendance-management-system | eeb57bcc942f407151b71bfab528e817c6806c74 | [
"MIT"
] | null | null | null | import cv2
import sys,json,numpy as np
import glob,os
import face_recognition
import datetime
from pathlib import Path
from pymongo import MongoClient
from flask_mongoengine import MongoEngine
from bson.objectid import ObjectId
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
client = MongoClient(port=27017)
db=client.GetMeThrough;
home = str(os.path.dirname(os.path.abspath(__file__))) + "/../../"
known_encodings_file_path = home + "/data/known_encodings_file.csv"
people_file_path = home + "/data/people_file.csv"
known_encodings_file = Path(known_encodings_file_path)
if known_encodings_file.is_file():
known_encodings = np.genfromtxt(known_encodings_file, delimiter=',')
else:
known_encodings = []
people_file = Path(people_file_path)
if people_file.is_file():
people = np.genfromtxt(people_file, dtype='U',delimiter=',')
else:
people = []
| 41.248062 | 100 | 0.595001 |
18fe1679223211eeb9c906c7f88442b62f5fd7cf | 929 | py | Python | scgrn/src/utils.py | Fassial/nibs-intern | 493a340f431c11712723db89476cae4056c6ef5b | [
"MIT"
] | null | null | null | scgrn/src/utils.py | Fassial/nibs-intern | 493a340f431c11712723db89476cae4056c6ef5b | [
"MIT"
] | null | null | null | scgrn/src/utils.py | Fassial/nibs-intern | 493a340f431c11712723db89476cae4056c6ef5b | [
"MIT"
] | null | null | null | ###################################
# Created on 22:20, Nov. 16th, 2020
# Author: fassial
# Filename: utils.py
###################################
# dep
import os
import pandas as pd
import scanpy as sp
from collections import defaultdict
# local dep
# macro
# def get_data_lm func
# def get_data_csv func
# def UTILS_GET_DATA_FUNC dict
UTILS_GET_DATA_FUNC = defaultdict(lambda : get_data_csv, {
".loom": get_data_lm,
".csv": get_data_csv
})
# def get_data func
| 19.765957 | 62 | 0.603875 |
18feec8ad8d14751af185b1bf50263837f32d416 | 1,376 | py | Python | PQencryption/pub_key/pk_signature/quantum_vulnerable/signing_Curve25519_PyNaCl.py | OleMussmann/PQencryption | e9a550e285c4b5145210425fbaa2cac338f3d266 | [
"Apache-2.0"
] | null | null | null | PQencryption/pub_key/pk_signature/quantum_vulnerable/signing_Curve25519_PyNaCl.py | OleMussmann/PQencryption | e9a550e285c4b5145210425fbaa2cac338f3d266 | [
"Apache-2.0"
] | null | null | null | PQencryption/pub_key/pk_signature/quantum_vulnerable/signing_Curve25519_PyNaCl.py | OleMussmann/PQencryption | e9a550e285c4b5145210425fbaa2cac338f3d266 | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 10 16:26:41 CEST 2017
@author: BMMN
"""
import gc # garbage collector
import nacl.signing
import nacl.encoding
if __name__ == "__main__":
# This in an example. In production, you would want to read the key from an
# external file or the command line. The key must be 32 bytes long.
# DON'T DO THIS IN PRODUCTION!
signing_key, verify_key = key_gen()
message = 'This is my message.'
print("message : " + message)
# signing
signed = sign(signing_key, message)
verify_key_hex = verify_key.encode(encoder=nacl.encoding.HexEncoder)
print("signed: " + signed)
print("verify_key_hex: " + verify_key_hex)
# verification
verify_key = nacl.signing.VerifyKey(verify_key_hex,
encoder=nacl.encoding.HexEncoder)
print()
print("verification positive:")
print(verify_key.verify(signed))
print()
print("verification negative:")
print(verify_key.verify("0"*len(signed)))
# make sure all memory is flushed after operations
del signing_key
del signed
del message
del verify_key
del verify_key_hex
gc.collect()
| 25.018182 | 75 | 0.699855 |
18ff8d36aadc1e7329aa5016280d4db4c68e6086 | 17,187 | py | Python | app.py | otsaloma/bort-proxy | 28ac4ab2c249d4a47f71a4e39cf21c44d2fdf991 | [
"MIT"
] | 2 | 2016-10-02T01:33:24.000Z | 2016-12-12T09:20:06.000Z | app.py | otsaloma/bort-proxy | 28ac4ab2c249d4a47f71a4e39cf21c44d2fdf991 | [
"MIT"
] | 2 | 2019-12-15T20:17:09.000Z | 2020-12-28T01:10:26.000Z | app.py | otsaloma/bort-proxy | 28ac4ab2c249d4a47f71a4e39cf21c44d2fdf991 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2016 Osmo Salomaa
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import base64
import bs4
import cairosvg
import contextlib
import dotenv
import flask
import functools
import imghdr
import io
import json
import os
import pickle
import PIL.Image
import random
import re
import requests
import traceback
import tweepy
import unicodedata
import urllib.parse
import xml.etree.ElementTree as ET
dotenv.load_dotenv()
FALLBACK_PNG = open("letter-icons/x.png", "rb").read()
LINK_REL_PATTERNS = [
re.compile("^apple-touch-icon$"),
re.compile("^apple-touch-icon-precomposed$"),
re.compile("^icon$"),
re.compile("^shortcut icon$"),
]
app = flask.Flask(__name__)
blacklist = set()
if app.config["ENV"] == "production":
import redis
cache = redis.from_url(os.environ["REDISCLOUD_URL"])
else:
import redislite
cache = redislite.Redis()
# Cache HTTP connections for better performance.
# https://urllib3.readthedocs.io/en/latest/advanced-usage.html#customizing-pool-behavior
adapter = requests.adapters.HTTPAdapter(pool_connections=10,
pool_maxsize=100,
max_retries=0,
pool_block=False)
rs = requests.Session()
rs.headers = {"User-Agent": "Mozilla/5.0"}
rs.mount("http://", adapter)
rs.mount("https://", adapter)
def find_icons(url):
"""Yield icon entries specified in the HTML HEAD of `url`."""
url, page = get_page(url)
soup = bs4.BeautifulSoup(page, "html.parser")
for pattern in LINK_REL_PATTERNS:
for tag in soup.find_all("link", dict(rel=pattern)):
href = urllib.parse.urljoin(url, tag.attrs["href"])
size = tag.attrs.get("sizes", "0x0")
if size == "any":
size = "1000x1000"
yield dict(url=href, size=int(size.split("x")[0]))
# Fall back on looking for icons at the server root.
join = lambda x: urllib.parse.urljoin(url, x)
yield dict(url=join("/apple-touch-icon.png"), fallback=True)
yield dict(url=join("/apple-touch-icon-precomposed.png"), fallback=True)
def get_cache_control(max_age):
"""Return a Cache-Control header for `max_age`."""
return "public, max-age={:d}".format(max_age)
def get_from_cache(key):
"""Return value, ttl for `key` from cache."""
return cache.get(key), cache.ttl(key)
def get_letter(url):
"""Return letter to represent `url`."""
if "://" not in url:
url = "http://{}".format(url)
url = urllib.parse.urlparse(url).netloc
url = url.split(".")
url = url[-2] if len(url) > 1 else url[0]
return url[0].lower() if url else "x"
def get_page(url, timeout=15):
"""Return evaluated `url`, HTML page as text."""
if "://" in url:
response = rs.get(url, timeout=timeout)
response.raise_for_status()
return response.url, response.text
for scheme in ("https", "http"):
with silent(Exception):
return get_page("{}://{}".format(scheme, url))
raise Exception("Failed to get page")
def is_svg(image):
return (isinstance(image, str) and
image.lstrip().startswith("<svg"))
def make_response(data, format, max_age=None):
"""Return response 200 for `data` as `format`."""
if format == "base64":
text = base64.b64encode(data)
max_age = max_age or random.randint(1, 3) * 86400
return flask.Response(text, 200, {
"Access-Control-Allow-Origin": "*",
"Content-Type": "text/plain",
"Content-Encoding": "UTF-8",
"Content-Length": str(len(text)),
"Cache-Control": get_cache_control(max_age),
})
if format == "json":
text = json.dumps(data, ensure_ascii=False)
max_age = max_age or 3600
return flask.Response(text, 200, {
"Access-Control-Allow-Origin": "*",
"Content-Type": "application/json",
"Content-Encoding": "UTF-8",
"Content-Length": str(len(text)),
"Cache-Control": get_cache_control(max_age),
})
if format == "png":
max_age = max_age or random.randint(1, 3) * 86400
return flask.Response(data, 200, {
"Access-Control-Allow-Origin": "*",
"Content-Type": "image/png",
"Content-Length": str(len(data)),
"Cache-Control": get_cache_control(max_age),
})
def request_image(url, max_size=1, timeout=15):
"""Request and return image at `url` at most `max_size` MB."""
# Avoid getting caught reading insanely large files.
# http://docs.python-requests.org/en/master/user/advanced/#body-content-workflow
if url in blacklist:
raise ValueError("URL blacklisted")
max_size = max_size * 1024 * 1024
with contextlib.closing(rs.get(
url, timeout=timeout, stream=True)) as response:
response.raise_for_status()
if ("content-length" in response.headers and
response.headers["content-length"].isdigit() and
int(response.headers["content-length"]) > max_size):
raise ValueError("Too large")
content_type = response.headers.get("content-type", "").lower()
if url.endswith(".svg") or content_type == "image/svg+xml":
# SVG, return as string.
image = response.text
if len(image) > max_size:
blacklist.add(url)
raise ValueError("Too large")
return image
# Raster, return as bytes.
image = response.raw.read(max_size+1, decode_content=True)
if len(image) > max_size:
blacklist.add(url)
raise ValueError("Too large")
return image
def resize_image(image, size):
"""Resize `image` to `size` and return PNG bytes."""
if is_svg(image):
image = cairosvg.svg2png(bytestring=image.encode("utf-8"),
output_width=size,
output_height=size)
with PIL.Image.open(io.BytesIO(image)) as pi:
if pi.mode not in ("RGB", "RGBA"):
pi = pi.convert("RGBA")
pi.thumbnail((size, size), PIL.Image.BICUBIC)
if pi.width != pi.height:
# Add transparent margins to make a square image.
bg = PIL.Image.new("RGBA", (size, size), (255, 255, 255, 0))
bg.paste(pi, ((size - pi.width) // 2, (size - pi.height) // 2))
pi = bg
out = io.BytesIO()
pi.save(out, "PNG")
return out.getvalue()
def rex(a, b):
"""Return a random amount of seconds between a and b days."""
return random.randint(int(a*86400), int(b*86400))
| 38.535874 | 98 | 0.615872 |
18ffb685c2a877f7f518f970f9a6eafbcd304771 | 2,099 | py | Python | apps/comments/migrations/0001_initial.py | puertoricanDev/horas | 28597af13409edd088a71143d2f4c94cd7fd83f5 | [
"MIT"
] | 10 | 2015-01-18T02:39:35.000Z | 2021-11-09T22:53:10.000Z | apps/comments/migrations/0001_initial.py | puertoricanDev/horas | 28597af13409edd088a71143d2f4c94cd7fd83f5 | [
"MIT"
] | 52 | 2015-03-02T17:46:23.000Z | 2022-02-10T13:23:11.000Z | apps/comments/migrations/0001_initial.py | puertoricanDev/horas | 28597af13409edd088a71143d2f4c94cd7fd83f5 | [
"MIT"
] | 7 | 2015-03-02T01:23:35.000Z | 2021-11-09T22:58:39.000Z | # Generated by Django 1.10.6 on 2017-03-13 04:46
# Modified by Ral Negrn on 2019-06-22 16:48
import django.db.models.deletion
import django.utils.timezone
from django.conf import settings
from django.db import migrations, models
import apps.core.models
| 31.328358 | 85 | 0.45212 |
18ffb7e91b90c1915102493dee2fe7ea4b7d621d | 9,607 | py | Python | IRIS_data_download/IRIS_download_support/obspy/io/nied/knet.py | earthinversion/Fnet_IRIS_data_automated_download | 09a6e0c992662feac95744935e038d1c68539fa1 | [
"MIT"
] | 2 | 2020-03-05T01:03:01.000Z | 2020-12-17T05:04:07.000Z | IRIS_data_download/IRIS_download_support/obspy/io/nied/knet.py | earthinversion/Fnet_IRIS_data_automated_download | 09a6e0c992662feac95744935e038d1c68539fa1 | [
"MIT"
] | 4 | 2021-03-31T19:25:55.000Z | 2021-12-13T20:32:46.000Z | IRIS_data_download/IRIS_download_support/obspy/io/nied/knet.py | earthinversion/Fnet_IRIS_data_automated_download | 09a6e0c992662feac95744935e038d1c68539fa1 | [
"MIT"
] | 2 | 2020-09-08T19:33:40.000Z | 2021-04-05T09:47:50.000Z | # -*- coding: utf-8 -*-
"""
obspy.io.nied.knet - K-NET/KiK-net read support for ObsPy
=========================================================
Reading of the K-NET and KiK-net ASCII format as defined on
http://www.kyoshin.bosai.go.jp.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.builtins import * # NOQA @UnusedWildImport
import re
import numpy as np
from obspy import UTCDateTime, Stream, Trace
from obspy.core.trace import Stats
def _buffer_proxy(filename_or_buf, function, reset_fp=True,
file_mode="rb", *args, **kwargs):
"""
Calls a function with an open file or file-like object as the first
argument. If the file originally was a filename, the file will be
opened, otherwise it will just be passed to the underlying function.
:param filename_or_buf: File to pass.
:type filename_or_buf: str, open file, or file-like object.
:param function: The function to call.
:param reset_fp: If True, the file pointer will be set to the initial
position after the function has been called.
:type reset_fp: bool
:param file_mode: Mode to open file in if necessary.
"""
try:
position = filename_or_buf.tell()
is_buffer = True
except AttributeError:
is_buffer = False
if is_buffer is True:
ret_val = function(filename_or_buf, *args, **kwargs)
if reset_fp:
filename_or_buf.seek(position, 0)
return ret_val
else:
with open(filename_or_buf, file_mode) as fh:
return function(fh, *args, **kwargs)
def _is_knet_ascii(filename_or_buf):
"""
Checks if the file is a valid K-NET/KiK-net ASCII file.
:param filename_or_buf: File to test.
:type filename_or_buf: str or file-like object.
"""
try:
return _buffer_proxy(filename_or_buf, _internal_is_knet_ascii,
reset_fp=True)
# Happens for example when passing the data as a string which would be
# interpreted as a filename.
except (OSError, UnicodeDecodeError):
return False
def _internal_is_knet_ascii(buf):
"""
Checks if the file is a valid K-NET/KiK-net ASCII file.
:param buf: File to read.
:type buf: Open file or open file like object.
"""
first_string = buf.read(11).decode()
# File has less than 11 characters
if len(first_string) != 11:
return False
if first_string == 'Origin Time':
return True
return False
def _prep_hdr_line(name, line):
"""
Helper function to check the contents of a header line and split it.
:param name: String that the line should start with.
:type name: str
:param line: Line to check and split.
:type line: str
"""
if not line.startswith(name):
raise KNETException("Expected line to start with %s but got %s "
% (name, line))
else:
return line.split()
def _read_knet_hdr(hdrlines, convert_stnm=False, **kwargs):
"""
Read the header values into a dictionary.
:param hdrlines: List of the header lines of a a K-NET/KiK-net ASCII file
:type hdrlines: list
:param convert_stnm: For station names with 6 letters write the last two
letters of the station code to the 'location' field
:type convert_stnm: bool
"""
hdrdict = {'knet': {}}
hdrnames = ['Origin Time', 'Lat.', 'Long.', 'Depth. (km)', 'Mag.',
'Station Code', 'Station Lat.', 'Station Long.',
'Station Height(m)', 'Record Time', 'Sampling Freq(Hz)',
'Duration Time(s)', 'Dir.', 'Scale Factor', 'Max. Acc. (gal)',
'Last Correction', 'Memo.']
_i = 0
# Event information
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
dt = flds[2] + ' ' + flds[3]
dt = UTCDateTime.strptime(dt, '%Y/%m/%d %H:%M:%S')
# All times are in Japanese standard time which is 9 hours ahead of UTC
dt -= 9 * 3600.
hdrdict['knet']['evot'] = dt
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
lat = float(flds[1])
hdrdict['knet']['evla'] = lat
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
lon = float(flds[1])
hdrdict['knet']['evlo'] = lon
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
dp = float(flds[2])
hdrdict['knet']['evdp'] = dp
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
mag = float(flds[1])
hdrdict['knet']['mag'] = mag
# Station information
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
# K-NET and KiK-Net station names can be more than 5 characters long
# which will cause the station name to be truncated when writing the
# the trace as miniSEED; if convert_stnm is enabled, the last two
# letters of the station code are written to the 'location' field
stnm = flds[2]
location = ''
if convert_stnm and len(stnm) > 5:
location = stnm[-2:]
stnm = stnm[:-2]
if len(stnm) > 7:
raise KNETException(
"Station name can't be more than 7 characters long!")
hdrdict['station'] = stnm
hdrdict['location'] = location
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
hdrdict['knet']['stla'] = float(flds[2])
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
hdrdict['knet']['stlo'] = float(flds[2])
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
hdrdict['knet']['stel'] = float(flds[2])
# Data information
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
dt = flds[2] + ' ' + flds[3]
# A 15 s delay is added to the record time by the
# the K-NET and KiK-Net data logger
dt = UTCDateTime.strptime(dt, '%Y/%m/%d %H:%M:%S') - 15.0
# All times are in Japanese standard time which is 9 hours ahead of UTC
dt -= 9 * 3600.
hdrdict['starttime'] = dt
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
freqstr = flds[2]
m = re.search('[0-9]*', freqstr)
freq = int(m.group())
hdrdict['sampling_rate'] = freq
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
hdrdict['knet']['duration'] = float(flds[2])
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
channel = flds[1].replace('-', '')
kiknetcomps = {'1': 'NS1', '2': 'EW1', '3': 'UD1',
'4': 'NS2', '5': 'EW2', '6': 'UD2'}
if channel.strip() in kiknetcomps.keys(): # kiknet directions are 1-6
channel = kiknetcomps[channel.strip()]
hdrdict['channel'] = channel
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
eqn = flds[2]
num, denom = eqn.split('/')
num = float(re.search('[0-9]*', num).group())
denom = float(denom)
# convert the calibration from gal to m/s^2
hdrdict['calib'] = 0.01 * num / denom
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
acc = float(flds[3])
hdrdict['knet']['accmax'] = acc
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
dt = flds[2] + ' ' + flds[3]
dt = UTCDateTime.strptime(dt, '%Y/%m/%d %H:%M:%S')
# All times are in Japanese standard time which is 9 hours ahead of UTC
dt -= 9 * 3600.
hdrdict['knet']['last correction'] = dt
# The comment ('Memo') field is optional
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
if len(flds) > 1:
hdrdict['knet']['comment'] = ' '.join(flds[1:])
if len(hdrlines) != _i + 1:
raise KNETException("Expected %d header lines but got %d"
% (_i + 1, len(hdrlines)))
return hdrdict
def _read_knet_ascii(filename_or_buf, **kwargs):
"""
Reads a K-NET/KiK-net ASCII file and returns an ObsPy Stream object.
.. warning::
This function should NOT be called directly, it registers via the
ObsPy :func:`~obspy.core.stream.read` function, call this instead.
:param filename: K-NET/KiK-net ASCII file to be read.
:type filename: str or file-like object.
"""
return _buffer_proxy(filename_or_buf, _internal_read_knet_ascii, **kwargs)
def _internal_read_knet_ascii(buf, **kwargs):
"""
Reads a K-NET/KiK-net ASCII file and returns an ObsPy Stream object.
.. warning::
This function should NOT be called directly, it registers via the
ObsPy :func:`~obspy.core.stream.read` function, call this instead.
:param buf: File to read.
:type buf: Open file or open file like object.
"""
data = []
hdrdict = {}
cur_pos = buf.tell()
buf.seek(0, 2)
size = buf.tell()
buf.seek(cur_pos, 0)
# First read the headerlines
headerlines = []
while buf.tell() < size:
line = buf.readline().decode()
headerlines.append(line)
if line.startswith('Memo'):
hdrdict = _read_knet_hdr(headerlines, **kwargs)
break
while buf.tell() < size:
line = buf.readline()
parts = line.strip().split()
data += [float(p) for p in parts]
hdrdict['npts'] = len(data)
# The FDSN network code for the National Research Institute for Earth
# Science and Disaster Prevention (NEID JAPAN) is BO (Bosai-Ken Network)
hdrdict['network'] = 'BO'
data = np.array(data)
stats = Stats(hdrdict)
trace = Trace(data, header=stats)
return Stream([trace])
if __name__ == '__main__':
import doctest
doctest.testmod(exclude_empty=True)
| 31.498361 | 78 | 0.613407 |
7a0036f8904ef04950506fa3bb65a2bb9ab285ce | 159 | py | Python | great_expectations/dataset/__init__.py | avanderm/great_expectations | e4619a890700a492441a7ed3cbb9e5abb0953268 | [
"Apache-2.0"
] | 1 | 2021-01-10T18:00:06.000Z | 2021-01-10T18:00:06.000Z | great_expectations/dataset/__init__.py | avanderm/great_expectations | e4619a890700a492441a7ed3cbb9e5abb0953268 | [
"Apache-2.0"
] | null | null | null | great_expectations/dataset/__init__.py | avanderm/great_expectations | e4619a890700a492441a7ed3cbb9e5abb0953268 | [
"Apache-2.0"
] | null | null | null | from .base import Dataset
from .pandas_dataset import MetaPandasDataset, PandasDataset
from .sqlalchemy_dataset import MetaSqlAlchemyDataset, SqlAlchemyDataset | 53 | 72 | 0.886792 |
7a00d530de18db23fd30cafb2ab4bd712d82beb0 | 379 | py | Python | app/main/routes.py | theambidextrous/digitalemployeeapp | 2c8b593a590621a34c1fa033a720f1e412c76b96 | [
"MIT"
] | null | null | null | app/main/routes.py | theambidextrous/digitalemployeeapp | 2c8b593a590621a34c1fa033a720f1e412c76b96 | [
"MIT"
] | null | null | null | app/main/routes.py | theambidextrous/digitalemployeeapp | 2c8b593a590621a34c1fa033a720f1e412c76b96 | [
"MIT"
] | null | null | null | from flask import Blueprint, jsonify, request, redirect, abort, url_for, render_template
main = Blueprint('main', __name__)
# routes
| 29.153846 | 88 | 0.672823 |
7a00ecf5169810e7505addc750380ef02512919a | 5,377 | py | Python | python/jittor/test/test_grad.py | llehtahw/jittor | d83389117fd026a0881dd713e658ce5ae2a75bcb | [
"Apache-2.0"
] | 1 | 2020-11-13T10:08:00.000Z | 2020-11-13T10:08:00.000Z | python/jittor/test/test_grad.py | llehtahw/jittor | d83389117fd026a0881dd713e658ce5ae2a75bcb | [
"Apache-2.0"
] | null | null | null | python/jittor/test/test_grad.py | llehtahw/jittor | d83389117fd026a0881dd713e658ce5ae2a75bcb | [
"Apache-2.0"
] | null | null | null | # ***************************************************************
# Copyright (c) 2020 Jittor. Authors: Dun Liang <randonlang@gmail.com>. All Rights Reserved.
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
# ***************************************************************
import unittest
import jittor as jt
import numpy as np
from .test_core import expect_error
if __name__ == "__main__":
unittest.main()
| 32.197605 | 92 | 0.455644 |
7a012bf9cfedafb87b6096b3721323abb9371444 | 846 | py | Python | mre/helper/Range.py | alvarofpp/mre | 025a5f10b92a0a4bf32d673509958b660871b2f6 | [
"MIT"
] | 7 | 2019-04-21T18:25:49.000Z | 2020-12-22T19:13:25.000Z | mre/helper/Range.py | alvarofpp/mre | 025a5f10b92a0a4bf32d673509958b660871b2f6 | [
"MIT"
] | 12 | 2019-08-10T02:09:43.000Z | 2021-10-02T15:29:48.000Z | mre/helper/Range.py | alvarofpp/mre | 025a5f10b92a0a4bf32d673509958b660871b2f6 | [
"MIT"
] | 22 | 2019-04-21T18:25:54.000Z | 2020-10-04T21:43:12.000Z | from typing import Union
from mre.Regex import Regex
| 26.4375 | 83 | 0.568558 |
7a014283816fd43c5b99389dd4a3fcc4eb6396ff | 3,463 | py | Python | tests/python/unittest/test_tir_transform_remove_weight_layout_rewrite_block.py | driazati/tvm | b76c817986040dc070d215cf32523d9b2adc8e8b | [
"Apache-2.0"
] | 1 | 2021-12-13T22:07:00.000Z | 2021-12-13T22:07:00.000Z | tests/python/unittest/test_tir_transform_remove_weight_layout_rewrite_block.py | driazati/tvm | b76c817986040dc070d215cf32523d9b2adc8e8b | [
"Apache-2.0"
] | 7 | 2022-02-17T23:04:46.000Z | 2022-03-31T22:22:55.000Z | tests/python/unittest/test_tir_transform_remove_weight_layout_rewrite_block.py | driazati/tvm | b76c817986040dc070d215cf32523d9b2adc8e8b | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import tvm
from tvm.ir.module import IRModule
from tvm.script import tir as T
from tvm.tir.function import PrimFunc
if __name__ == "__main__":
test_matmul()
| 37.641304 | 76 | 0.547791 |
7a0383028d6c513dd8786b4e28fcf20c534cff1a | 341 | py | Python | CS1/Ch11/Artwork.py | DoctorOac/SwosuCsPythonExamples | 07476b9b4ef9a6f8bd68921aef19e8f00183b1e7 | [
"Apache-2.0"
] | 1 | 2022-03-28T18:27:10.000Z | 2022-03-28T18:27:10.000Z | CS1/Ch11/Artwork.py | DoctorOac/SwosuCsPythonExamples | 07476b9b4ef9a6f8bd68921aef19e8f00183b1e7 | [
"Apache-2.0"
] | 1 | 2022-01-11T16:27:40.000Z | 2022-01-11T16:27:40.000Z | CS1/Ch11/Artwork.py | DoctorOac/SwosuCsPythonExamples | 07476b9b4ef9a6f8bd68921aef19e8f00183b1e7 | [
"Apache-2.0"
] | 7 | 2022-03-25T21:01:42.000Z | 2022-03-28T18:51:24.000Z | from Artist import Artist
| 26.230769 | 64 | 0.630499 |
7a03cb031046f0f5a4ab04de791c5d2ae9f6699d | 2,249 | py | Python | nearproteins/__init__.py | audy/nearproteins | ed426a98004c7608894a63c6b445ff60ae251d05 | [
"MIT"
] | null | null | null | nearproteins/__init__.py | audy/nearproteins | ed426a98004c7608894a63c6b445ff60ae251d05 | [
"MIT"
] | 1 | 2019-07-10T05:47:01.000Z | 2019-07-10T17:23:52.000Z | nearproteins/__init__.py | audy/nearproteins | ed426a98004c7608894a63c6b445ff60ae251d05 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from collections import defaultdict
from itertools import product
import json
import random
import sys
from annoy import AnnoyIndex
from Bio import SeqIO
import numpy as np
| 22.267327 | 85 | 0.578924 |
7a042b77715a588fe196553691f390b7d45b469f | 314 | py | Python | arm_control/src/orientation.py | ALxander19/zobov_arm | 8b5b322b53a7a0d9c91fcbc720473a2a6e6f5826 | [
"BSD-2-Clause"
] | null | null | null | arm_control/src/orientation.py | ALxander19/zobov_arm | 8b5b322b53a7a0d9c91fcbc720473a2a6e6f5826 | [
"BSD-2-Clause"
] | null | null | null | arm_control/src/orientation.py | ALxander19/zobov_arm | 8b5b322b53a7a0d9c91fcbc720473a2a6e6f5826 | [
"BSD-2-Clause"
] | null | null | null | # tf.transformations alternative is not yet available in tf2
from tf.transformations import quaternion_from_euler
if __name__ == '__main__':
# RPY to convert: 90deg, 0, -90deg
q = quaternion_from_euler(1.5707, 0, -1.5707)
print "The quaternion representation is %s %s %s %s." % (q[0], q[1], q[2], q[3])
| 31.4 | 82 | 0.694268 |
7a05099cb4069ff152e86f9e7700bcfd829e2375 | 2,997 | py | Python | django_server/fvh_courier/rest/tests/base.py | ForumViriumHelsinki/CityLogistics | df4efef49bdc740a1dc47d0bda49ce2b3833e9c1 | [
"MIT"
] | 1 | 2021-11-02T03:21:48.000Z | 2021-11-02T03:21:48.000Z | django_server/fvh_courier/rest/tests/base.py | ForumViriumHelsinki/CityLogistics | df4efef49bdc740a1dc47d0bda49ce2b3833e9c1 | [
"MIT"
] | 136 | 2019-12-03T14:52:17.000Z | 2022-02-26T21:18:15.000Z | django_server/fvh_courier/rest/tests/base.py | ForumViriumHelsinki/CityLogistics | df4efef49bdc740a1dc47d0bda49ce2b3833e9c1 | [
"MIT"
] | 2 | 2020-06-23T23:58:08.000Z | 2020-12-08T13:19:28.000Z | import datetime
from django.contrib.auth.models import User, Group
from django.utils import timezone
from rest_framework.test import APITestCase
import fvh_courier.models.base
from fvh_courier import models
| 36.54878 | 112 | 0.593594 |
7a0915dbc8c3508d29e923526b1c9bacf3a1ca69 | 12,039 | py | Python | pynoorm/binder.py | jpeyret/pynoorm | d6f7e0e102bb0eb4865beff75cf671b560ebc8b2 | [
"MIT"
] | 2 | 2016-04-14T23:11:06.000Z | 2016-06-04T22:39:10.000Z | pynoorm/binder.py | jpeyret/pynoorm | d6f7e0e102bb0eb4865beff75cf671b560ebc8b2 | [
"MIT"
] | null | null | null | pynoorm/binder.py | jpeyret/pynoorm | d6f7e0e102bb0eb4865beff75cf671b560ebc8b2 | [
"MIT"
] | 1 | 2022-01-16T15:19:16.000Z | 2022-01-16T15:19:16.000Z | """
Binder classes perform two functions through their format method
- given a query template with %(somevar)s python substition
class MyClass(object):
pass
arg1 = MyClass()
arg1.customer = 101
default = MyClass()
default.customer = 201
arg2.country = "CAN"
qry, sub = format("
select *
from customer
where country = %(country)s
and custid = %(customer)s", arg1, default)
means that we will be fetching for country=CAN, custid=101
- the query template itself is transformed to a format
that fits the underlying database's bind variable
scheme which protects against sql injection attacks.
For example, assuming an Oracle database (paramstyle="named")
qry:
"select * from customer where country = :country and custid = :customer"
sub:
{"country":"CAN", "customer" : 101}
Postgres (paramstyle=""):
qry:
"select * from customer where country = :country and custid = :customer"
sub:
{"country":"CAN", "customer" : 101}
a positional database (paramstyle="numeric") (NotImplementedError)
would instead return
qry:
"select * from customer where country = :1 and custid = :2"
sub:
["CAN", 101]
"""
import re
PARAMSTYLE_QMARK = PARAMSTYLE_SQLITE = PARAMSTYLE_SQLSERVER = "qmark"
ExperimentalBinderNamed = BinderNamed
# This is what decides how the Binder
# will process incoming template substitutions
Binder._di_paramstyle["pyformat"] = Binder_pyformat
Binder._di_paramstyle["named"] = BinderNamed
Binder._di_paramstyle[PARAMSTYLE_QMARK] = BinderQmark
Binder._di_paramstyle["format"] = BinderFormat
Binder._di_paramstyle["experimentalnamed"] = ExperimentalBinderNamed
# and these are not done yet
Binder._di_paramstyle["numeric"] = Binder_NotImplementedError
| 27.675862 | 93 | 0.572971 |
7a0b66937d09d19c265c09560989c32e86648150 | 4,313 | py | Python | parselglossy/documentation.py | dev-cafe/parseltongue | 834e78724bb90dfa19748d7f65f6af02d525e3f2 | [
"MIT"
] | 5 | 2019-03-11T18:42:26.000Z | 2021-08-24T18:24:05.000Z | parselglossy/documentation.py | dev-cafe/parseltongue | 834e78724bb90dfa19748d7f65f6af02d525e3f2 | [
"MIT"
] | 105 | 2018-12-04T03:07:22.000Z | 2022-03-24T13:04:48.000Z | parselglossy/documentation.py | dev-cafe/parseltongue | 834e78724bb90dfa19748d7f65f6af02d525e3f2 | [
"MIT"
] | 1 | 2019-02-08T09:54:49.000Z | 2019-02-08T09:54:49.000Z | # -*- coding: utf-8 -*-
#
# parselglossy -- Generic input parsing library, speaking in tongues
# Copyright (C) 2020 Roberto Di Remigio, Radovan Bast, and contributors.
#
# This file is part of parselglossy.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# For information on the complete list of contributors to the
# parselglossy library, see: <http://parselglossy.readthedocs.io/>
#
"""Documentation generation."""
from typing import List # noqa: F401
from .utils import JSONDict
def documentation_generator(
template: JSONDict, *, header: str = "Input parameters"
) -> str:
"""Generates documentation from a valid template.
Parameters
----------
template : JSONDict
The template to generate documentation from.
We assume that the template is valid.
Returns
-------
documentation : str
"""
comment = (
".. raw:: html\n\n" # noqa: F541
" <style> .red {color:#aa0060; font-weight:bold; font-size:18px} </style>\n\n" # noqa: E501
".. role:: red\n\n"
f".. This documentation was autogenerated using parselglossy."
" Editing by hand is not recommended.\n"
)
header = (
f"{comment:s}\n{'=' * len(header):s}\n{header:s}\n{'=' * len(header):s}\n\n"
"- Keywords without a default value are **required**.\n"
"- Default values are either explicit or computed from the value of other keywords in the input.\n" # noqa: E501
"- Sections where all keywords have a default value can be omitted.\n"
"- Predicates, if present, are the functions run to validate user input.\n"
)
docs = _rec_documentation_generator(template=template)
documentation = header + docs
return documentation
def _rec_documentation_generator(template, *, level: int = 0) -> str:
"""Generates documentation from a valid template.
Parameters
----------
template : JSONDict
level : int
Returns
-------
docs : str
"""
docs = [] # type: List[str]
keywords = template["keywords"] if "keywords" in template.keys() else []
if keywords:
docs.append(_indent("\n:red:`Keywords`", level))
for k in keywords:
doc = _document_keyword(k)
docs.extend(_indent(doc, level))
sections = template["sections"] if "sections" in template.keys() else []
if sections:
docs.append(_indent("\n:red:`Sections`", level))
for s in sections:
docstring = s["docstring"].replace("\n", " ")
doc = f"\n :{s['name']:s}: {docstring:s}\n"
doc += _rec_documentation_generator(s, level=level + 1)
docs.extend(_indent(doc, level))
return "".join(docs)
| 31.713235 | 121 | 0.644563 |
7a0b7b8522bbe2e3900e18756663a43a8ac174f7 | 2,765 | py | Python | functions/print_initial_values.py | CINPLA/edNEGmodel_analysis | be8854c563376a14ee7d15e51d98d0d82be96a35 | [
"MIT"
] | null | null | null | functions/print_initial_values.py | CINPLA/edNEGmodel_analysis | be8854c563376a14ee7d15e51d98d0d82be96a35 | [
"MIT"
] | null | null | null | functions/print_initial_values.py | CINPLA/edNEGmodel_analysis | be8854c563376a14ee7d15e51d98d0d82be96a35 | [
"MIT"
] | null | null | null | import numpy as np
| 56.428571 | 158 | 0.637975 |
7a0c0a5f5ecb615e0a6336ce27fac2621034f8ff | 21,021 | py | Python | anyway/parsers/cbs.py | edermon/anyway | 3523b7871b7eebeca225e088af653ba074e5bee3 | [
"BSD-3-Clause"
] | null | null | null | anyway/parsers/cbs.py | edermon/anyway | 3523b7871b7eebeca225e088af653ba074e5bee3 | [
"BSD-3-Clause"
] | null | null | null | anyway/parsers/cbs.py | edermon/anyway | 3523b7871b7eebeca225e088af653ba074e5bee3 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import glob
import os
import json
from collections import OrderedDict
import itertools
import re
from datetime import datetime
import six
from six import iteritems
from flask.ext.sqlalchemy import SQLAlchemy
from sqlalchemy import or_
from .. import field_names, localization
from ..models import AccidentMarker, Involved, Vehicle
from .. import models
from ..utilities import ItmToWGS84, init_flask, CsvReader, time_delta, decode_hebrew,ImporterUI,truncate_tables
from functools import partial
import logging
failed_dirs = OrderedDict()
CONTENT_ENCODING = 'cp1255'
ACCIDENT_TYPE_REGEX = re.compile(r"Accidents Type (?P<type>\d)")
ACCIDENTS = 'accidents'
CITIES = 'cities'
STREETS = 'streets'
ROADS = "roads"
URBAN_INTERSECTION = 'urban_intersection'
NON_URBAN_INTERSECTION = 'non_urban_intersection'
DICTIONARY = "dictionary"
INVOLVED = "involved"
VEHICLES = "vehicles"
cbs_files = {
ACCIDENTS: "AccData.csv",
URBAN_INTERSECTION: "IntersectUrban.csv",
NON_URBAN_INTERSECTION: "IntersectNonUrban.csv",
STREETS: "DicStreets.csv",
DICTIONARY: "Dictionary.csv",
INVOLVED: "InvData.csv",
VEHICLES: "VehData.csv"
}
coordinates_converter = ItmToWGS84()
app = init_flask()
db = SQLAlchemy(app)
json_dumps = partial(json.dumps, encoding=models.db_encoding) if six.PY2 else json.dumps
def get_street(settlement_sign, street_sign, streets):
"""
extracts the street name using the settlement id and street id
"""
if settlement_sign not in streets:
# Changed to return blank string instead of None for correct presentation (Omer)
return u""
street_name = [decode_hebrew(x[field_names.street_name]) for x in streets[settlement_sign] if
x[field_names.street_sign] == street_sign]
# there should be only one street name, or none if it wasn't found.
return street_name[0] if len(street_name) == 1 else u""
def get_address(accident, streets):
"""
extracts the address of the main street.
tries to build the full address: <street_name> <street_number>, <settlement>,
but might return a partial one if unsuccessful.
"""
street = get_street(accident[field_names.settlement_sign], accident[field_names.street1], streets)
if not street:
return u""
# the home field is invalid if it's empty or if it contains 9999
home = accident[field_names.home] if accident[field_names.home] != 9999 else None
settlement = localization.get_city_name(accident[field_names.settlement_sign])
if not home and not settlement:
return street
if not home and settlement:
return u"{}, {}".format(street, settlement)
if home and not settlement:
return u"{} {}".format(street, home)
return u"{} {}, {}".format(street, home, settlement)
def get_streets(accident, streets):
"""
extracts the streets the accident occurred in.
every accident has a main street and a secondary street.
:return: a tuple containing both streets.
"""
main_street = get_address(accident, streets)
secondary_street = get_street(accident[field_names.settlement_sign], accident[field_names.street2], streets)
return main_street, secondary_street
def get_junction(accident, roads):
"""
extracts the junction from an accident
omerxx: added "km" parameter to the calculation to only show the right junction,
every non-urban accident shows nearest junction with distance and direction
:return: returns the junction or None if it wasn't found
"""
if accident["KM"] is not None and accident[field_names.non_urban_intersection] is None:
min_dist = 100000
key = (), ()
junc_km = 0
for option in roads:
if accident[field_names.road1] == option[0] and abs(accident["KM"]-option[2]) < min_dist:
min_dist = abs(accident["KM"]-option[2])
key = accident[field_names.road1], option[1], option[2]
junc_km = option[2]
junction = roads.get(key, None)
if junction:
if accident["KM"] - junc_km > 0:
direction = u"" if accident[field_names.road1] % 2 == 0 else u""
else:
direction = u"" if accident[field_names.road1] % 2 == 0 else u""
if abs(float(accident["KM"] - junc_km)/10) >= 1:
string = str(abs(float(accident["KM"])-junc_km)/10) + u" " + direction + u" " + \
decode_hebrew(junction)
elif 0 < abs(float(accident["KM"] - junc_km)/10) < 1:
string = str(int((abs(float(accident["KM"])-junc_km)/10)*1000)) + u" " + direction + u" " + \
decode_hebrew(junction)
else:
string = decode_hebrew(junction)
return string
else:
return u""
elif accident[field_names.non_urban_intersection] is not None:
key = accident[field_names.road1], accident[field_names.road2], accident["KM"]
junction = roads.get(key, None)
return decode_hebrew(junction) if junction else u""
else:
return u""
def parse_date(accident):
"""
parses an accident's date
"""
year = accident[field_names.accident_year]
month = accident[field_names.accident_month]
day = accident[field_names.accident_day]
'''
hours calculation explanation - The value of the hours is between 1 to 96.
These values represent 15 minutes each that start at 00:00:
1 equals 00:00, 2 equals 00:15, 3 equals 00:30 and so on.
'''
minutes = accident[field_names.accident_hour] * 15 - 15
hours = int(minutes // 60)
minutes %= 60
accident_date = datetime(year, month, day, hours, minutes, 0)
return accident_date
def load_extra_data(accident, streets, roads):
"""
loads more data about the accident
:return: a dictionary containing all the extra fields and their values
:rtype: dict
"""
extra_fields = {}
# if the accident occurred in an urban setting
if bool(accident[field_names.urban_intersection]):
main_street, secondary_street = get_streets(accident, streets)
if main_street:
extra_fields[field_names.street1] = main_street
if secondary_street:
extra_fields[field_names.street2] = secondary_street
# if the accident occurred in a non urban setting (highway, etc')
if bool(accident[field_names.non_urban_intersection]):
junction = get_junction(accident, roads)
if junction:
extra_fields[field_names.junction_name] = junction
# localize static accident values
for field in localization.get_supported_tables():
# if we have a localized field for that particular field, save the field value
# it will be fetched we deserialized
if accident[field] and localization.get_field(field, accident[field]):
extra_fields[field] = accident[field]
return extra_fields
def get_data_value(value):
"""
:returns: value for parameters which are not mandatory in an accident data
OR -1 if the parameter value does not exist
"""
return int(value) if value else -1
def import_to_datastore(directory, provider_code, batch_size):
"""
goes through all the files in a given directory, parses and commits them
"""
try: xrange
except NameError:
xrange = range
try:
assert batch_size > 0
files_from_cbs = dict(get_files(directory))
if len(files_from_cbs) == 0:
return 0
logging.info("Importing '{}'".format(directory))
started = datetime.now()
new_items = 0
all_existing_accidents_ids = set(map(lambda x: x[0], db.session.query(AccidentMarker.id).all()))
accidents = import_accidents(provider_code=provider_code, **files_from_cbs)
accidents = [accident for accident in accidents if accident['id'] not in all_existing_accidents_ids]
new_items += len(accidents)
for accidents_chunk in chunks(accidents, batch_size, xrange):
db.session.bulk_insert_mappings(AccidentMarker, accidents_chunk)
all_involved_accident_ids = set(map(lambda x: x[0], db.session.query(Involved.accident_id).all()))
involved = import_involved(provider_code=provider_code, **files_from_cbs)
involved = [x for x in involved if x['accident_id'] not in all_involved_accident_ids]
for involved_chunk in chunks(involved, batch_size, xrange):
db.session.bulk_insert_mappings(Involved, involved_chunk)
new_items += len(involved)
all_vehicles_accident_ids = set(map(lambda x: x[0], db.session.query(Vehicle.accident_id).all()))
vehicles = import_vehicles(provider_code=provider_code, **files_from_cbs)
vehicles = [x for x in vehicles if x['accident_id'] not in all_vehicles_accident_ids]
for vehicles_chunk in chunks(vehicles, batch_size, xrange):
db.session.bulk_insert_mappings(Vehicle, vehicles_chunk)
new_items += len(vehicles)
logging.info("\t{0} items in {1}".format(new_items, time_delta(started)))
return new_items
except ValueError as e:
failed_dirs[directory] = str(e)
return 0
def delete_invalid_entries():
"""
deletes all markers in the database with null latitude or longitude
first deletes from tables Involved and Vehicle, then from table AccidentMarker
"""
marker_ids_to_delete = db.session.query(AccidentMarker.id).filter(or_((AccidentMarker.longitude == None),
(AccidentMarker.latitude == None))).all()
marker_ids_to_delete = [acc_id[0] for acc_id in marker_ids_to_delete]
q = db.session.query(Involved).filter(Involved.accident_id.in_(marker_ids_to_delete))
if q.all():
print('deleting invalid entries from Involved')
q.delete(synchronize_session='fetch')
q = db.session.query(Vehicle).filter(Vehicle.accident_id.in_(marker_ids_to_delete))
if q.all():
print('deleting invalid entries from Vehicle')
q.delete(synchronize_session='fetch')
q = db.session.query(AccidentMarker).filter(AccidentMarker.id.in_(marker_ids_to_delete))
if q.all():
print('deleting invalid entries from AccidentMarker')
q.delete(synchronize_session='fetch')
db.session.commit()
| 43.521739 | 120 | 0.667999 |
7a0d3a18b6c3bcab1db31cd7020fbecfa8d1cc2b | 7,709 | py | Python | src/tests/test_pagure_flask_api_project_delete_project.py | yifengyou/learn-pagure | e54ba955368918c92ad2be6347b53bb2c24a228c | [
"Unlicense"
] | null | null | null | src/tests/test_pagure_flask_api_project_delete_project.py | yifengyou/learn-pagure | e54ba955368918c92ad2be6347b53bb2c24a228c | [
"Unlicense"
] | null | null | null | src/tests/test_pagure_flask_api_project_delete_project.py | yifengyou/learn-pagure | e54ba955368918c92ad2be6347b53bb2c24a228c | [
"Unlicense"
] | null | null | null | # -*- coding: utf-8 -*-
"""
(c) 2020 - Copyright Red Hat Inc
Authors:
Pierre-Yves Chibon <pingou@pingoured.fr>
"""
from __future__ import unicode_literals, absolute_import
import datetime
import json
import unittest
import shutil
import sys
import tempfile
import os
import pygit2
from celery.result import EagerResult
from mock import patch, Mock
sys.path.insert(
0, os.path.join(os.path.dirname(os.path.abspath(__file__)), "..")
)
import pagure.api
import pagure.flask_app
import pagure.lib.query
import tests
from pagure.lib.repo import PagureRepo
| 37.42233 | 188 | 0.572578 |
7a0ed4f58fe297f5e920c7a02179f8ba85d4d8b4 | 3,827 | py | Python | 06_reproducibility/workflow_pipeline/my_pipeline/pipeline/configs.py | fanchi/ml-design-patterns | 6f686601d2385a11a517f8394324062ec6094e14 | [
"Apache-2.0"
] | 1,149 | 2020-04-09T21:20:56.000Z | 2022-03-31T02:41:53.000Z | 06_reproducibility/workflow_pipeline/my_pipeline/pipeline/configs.py | dfinke/ml-design-patterns | 6f686601d2385a11a517f8394324062ec6094e14 | [
"Apache-2.0"
] | 28 | 2020-06-14T15:17:59.000Z | 2022-02-17T10:13:08.000Z | 06_reproducibility/workflow_pipeline/my_pipeline/pipeline/configs.py | dfinke/ml-design-patterns | 6f686601d2385a11a517f8394324062ec6094e14 | [
"Apache-2.0"
] | 296 | 2020-04-28T06:26:41.000Z | 2022-03-31T06:52:33.000Z | # Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: this is adapted from the official TFX taxi pipeline sample
# You can find it here: https://github.com/tensorflow/tfx/tree/master/tfx/examples/chicago_taxi_pipeline
import os # pylint: disable=unused-import
# Pipeline name will be used to identify this pipeline
PIPELINE_NAME = 'my_pipeline'
# TODO: replace with your Google Cloud project
GOOGLE_CLOUD_PROJECT='your-cloud-project'
# TODO: replace with the GCS bucket where you'd like to store model artifacts
# Only include the bucket name here, without the 'gs://'
GCS_BUCKET_NAME = 'your-gcs-bucket'
# TODO: set your Google Cloud region below (or use us-central1)
GOOGLE_CLOUD_REGION = 'us-central1'
RUN_FN = 'pipeline.model.run_fn'
TRAIN_NUM_STEPS = 100
EVAL_NUM_STEPS = 100
BIG_QUERY_WITH_DIRECT_RUNNER_BEAM_PIPELINE_ARGS = [
'--project=' + GOOGLE_CLOUD_PROJECT,
'--temp_location=' + os.path.join('gs://', GCS_BUCKET_NAME, 'tmp'),
]
# The rate at which to sample rows from the Chicago Taxi dataset using BigQuery.
# The full taxi dataset is > 120M record. In the interest of resource
# savings and time, we've set the default for this example to be much smaller.
# Feel free to crank it up and process the full dataset!
_query_sample_rate = 0.0001 # Generate a 0.01% random sample.
# The query that extracts the examples from BigQuery. This sample uses
# a BigQuery public dataset from NOAA
BIG_QUERY_QUERY = """
SELECT
usa_wind,
usa_sshs
FROM
`bigquery-public-data.noaa_hurricanes.hurricanes`
WHERE
latitude > 19.5
AND latitude < 64.85
AND longitude > -161.755
AND longitude < -68.01
AND usa_wind IS NOT NULL
AND longitude IS NOT NULL
AND latitude IS NOT NULL
AND usa_sshs IS NOT NULL
AND usa_sshs > 0
"""
# A dict which contains the training job parameters to be passed to Google
# Cloud AI Platform. For the full set of parameters supported by Google Cloud AI
# Platform, refer to
# https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#Job
GCP_AI_PLATFORM_TRAINING_ARGS = {
'project': GOOGLE_CLOUD_PROJECT,
'region': 'us-central1',
# Starting from TFX 0.14, training on AI Platform uses custom containers:
# https://cloud.google.com/ml-engine/docs/containers-overview
# You can specify a custom container here. If not specified, TFX will use
# a public container image matching the installed version of TFX.
# Set your container name below.
'masterConfig': {
'imageUri': 'gcr.io/' + GOOGLE_CLOUD_PROJECT + '/tfx-pipeline'
},
# Note that if you do specify a custom container, ensure the entrypoint
# calls into TFX's run_executor script (tfx/scripts/run_executor.py)
}
# A dict which contains the serving job parameters to be passed to Google
# Cloud AI Platform. For the full set of parameters supported by Google Cloud AI
# Platform, refer to
# https://cloud.google.com/ml-engine/reference/rest/v1/projects.models
GCP_AI_PLATFORM_SERVING_ARGS = {
'model_name': PIPELINE_NAME,
'project_id': GOOGLE_CLOUD_PROJECT,
# The region to use when serving the model. See available regions here:
# https://cloud.google.com/ml-engine/docs/regions
'regions': [GOOGLE_CLOUD_REGION],
}
| 37.519608 | 104 | 0.736608 |
7a0ee0d44c1b61902945942d2ba7e385c1519999 | 4,707 | py | Python | tests/test_vcf_info_annotator.py | apaul7/VAtools | 9e969cfdb605ec5e65a6aa60a416d7d74a8ff4fd | [
"MIT"
] | 15 | 2019-03-20T06:55:04.000Z | 2022-02-22T06:16:56.000Z | tests/test_vcf_info_annotator.py | apaul7/VAtools | 9e969cfdb605ec5e65a6aa60a416d7d74a8ff4fd | [
"MIT"
] | 27 | 2019-03-05T18:20:19.000Z | 2022-03-04T14:58:36.000Z | tests/test_vcf_info_annotator.py | apaul7/VAtools | 9e969cfdb605ec5e65a6aa60a416d7d74a8ff4fd | [
"MIT"
] | 4 | 2019-03-19T10:33:38.000Z | 2022-02-23T13:40:33.000Z | import unittest
import sys
import os
import py_compile
from vatools import vcf_info_annotator
import tempfile
from filecmp import cmp
| 40.930435 | 183 | 0.599745 |
7a0f470f2ade1699e468a55aa0458f89b6b1d2f2 | 17,965 | py | Python | bddtests/peer/admin_pb2.py | hacera-jonathan/fabric | 3ba291e8fbb0246aa440e02cba54d16924649479 | [
"Apache-2.0"
] | null | null | null | bddtests/peer/admin_pb2.py | hacera-jonathan/fabric | 3ba291e8fbb0246aa440e02cba54d16924649479 | [
"Apache-2.0"
] | 1 | 2021-03-20T05:34:24.000Z | 2021-03-20T05:34:24.000Z | bddtests/peer/admin_pb2.py | hacera-jonathan/fabric | 3ba291e8fbb0246aa440e02cba54d16924649479 | [
"Apache-2.0"
] | null | null | null | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: peer/admin.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='peer/admin.proto',
package='protos',
syntax='proto3',
serialized_pb=_b('\n\x10peer/admin.proto\x12\x06protos\x1a\x1bgoogle/protobuf/empty.proto\"\x9a\x01\n\x0cServerStatus\x12/\n\x06status\x18\x01 \x01(\x0e\x32\x1f.protos.ServerStatus.StatusCode\"Y\n\nStatusCode\x12\r\n\tUNDEFINED\x10\x00\x12\x0b\n\x07STARTED\x10\x01\x12\x0b\n\x07STOPPED\x10\x02\x12\n\n\x06PAUSED\x10\x03\x12\t\n\x05\x45RROR\x10\x04\x12\x0b\n\x07UNKNOWN\x10\x05\"8\n\x0fLogLevelRequest\x12\x12\n\nlog_module\x18\x01 \x01(\t\x12\x11\n\tlog_level\x18\x02 \x01(\t\"9\n\x10LogLevelResponse\x12\x12\n\nlog_module\x18\x01 \x01(\t\x12\x11\n\tlog_level\x18\x02 \x01(\t2\xd5\x02\n\x05\x41\x64min\x12;\n\tGetStatus\x12\x16.google.protobuf.Empty\x1a\x14.protos.ServerStatus\"\x00\x12=\n\x0bStartServer\x12\x16.google.protobuf.Empty\x1a\x14.protos.ServerStatus\"\x00\x12<\n\nStopServer\x12\x16.google.protobuf.Empty\x1a\x14.protos.ServerStatus\"\x00\x12H\n\x11GetModuleLogLevel\x12\x17.protos.LogLevelRequest\x1a\x18.protos.LogLevelResponse\"\x00\x12H\n\x11SetModuleLogLevel\x12\x17.protos.LogLevelRequest\x1a\x18.protos.LogLevelResponse\"\x00\x42+Z)github.com/hyperledger/fabric/protos/peerb\x06proto3')
,
dependencies=[google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_SERVERSTATUS_STATUSCODE = _descriptor.EnumDescriptor(
name='StatusCode',
full_name='protos.ServerStatus.StatusCode',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNDEFINED', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STARTED', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STOPPED', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PAUSED', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ERROR', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=5, number=5,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=123,
serialized_end=212,
)
_sym_db.RegisterEnumDescriptor(_SERVERSTATUS_STATUSCODE)
_SERVERSTATUS = _descriptor.Descriptor(
name='ServerStatus',
full_name='protos.ServerStatus',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='protos.ServerStatus.status', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_SERVERSTATUS_STATUSCODE,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=58,
serialized_end=212,
)
_LOGLEVELREQUEST = _descriptor.Descriptor(
name='LogLevelRequest',
full_name='protos.LogLevelRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='log_module', full_name='protos.LogLevelRequest.log_module', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='log_level', full_name='protos.LogLevelRequest.log_level', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=214,
serialized_end=270,
)
_LOGLEVELRESPONSE = _descriptor.Descriptor(
name='LogLevelResponse',
full_name='protos.LogLevelResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='log_module', full_name='protos.LogLevelResponse.log_module', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='log_level', full_name='protos.LogLevelResponse.log_level', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=272,
serialized_end=329,
)
_SERVERSTATUS.fields_by_name['status'].enum_type = _SERVERSTATUS_STATUSCODE
_SERVERSTATUS_STATUSCODE.containing_type = _SERVERSTATUS
DESCRIPTOR.message_types_by_name['ServerStatus'] = _SERVERSTATUS
DESCRIPTOR.message_types_by_name['LogLevelRequest'] = _LOGLEVELREQUEST
DESCRIPTOR.message_types_by_name['LogLevelResponse'] = _LOGLEVELRESPONSE
ServerStatus = _reflection.GeneratedProtocolMessageType('ServerStatus', (_message.Message,), dict(
DESCRIPTOR = _SERVERSTATUS,
__module__ = 'peer.admin_pb2'
# @@protoc_insertion_point(class_scope:protos.ServerStatus)
))
_sym_db.RegisterMessage(ServerStatus)
LogLevelRequest = _reflection.GeneratedProtocolMessageType('LogLevelRequest', (_message.Message,), dict(
DESCRIPTOR = _LOGLEVELREQUEST,
__module__ = 'peer.admin_pb2'
# @@protoc_insertion_point(class_scope:protos.LogLevelRequest)
))
_sym_db.RegisterMessage(LogLevelRequest)
LogLevelResponse = _reflection.GeneratedProtocolMessageType('LogLevelResponse', (_message.Message,), dict(
DESCRIPTOR = _LOGLEVELRESPONSE,
__module__ = 'peer.admin_pb2'
# @@protoc_insertion_point(class_scope:protos.LogLevelResponse)
))
_sym_db.RegisterMessage(LogLevelResponse)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('Z)github.com/hyperledger/fabric/protos/peer'))
try:
# THESE ELEMENTS WILL BE DEPRECATED.
# Please use the generated *_pb2_grpc.py files instead.
import grpc
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
def add_AdminServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetStatus': grpc.unary_unary_rpc_method_handler(
servicer.GetStatus,
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
response_serializer=ServerStatus.SerializeToString,
),
'StartServer': grpc.unary_unary_rpc_method_handler(
servicer.StartServer,
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
response_serializer=ServerStatus.SerializeToString,
),
'StopServer': grpc.unary_unary_rpc_method_handler(
servicer.StopServer,
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
response_serializer=ServerStatus.SerializeToString,
),
'GetModuleLogLevel': grpc.unary_unary_rpc_method_handler(
servicer.GetModuleLogLevel,
request_deserializer=LogLevelRequest.FromString,
response_serializer=LogLevelResponse.SerializeToString,
),
'SetModuleLogLevel': grpc.unary_unary_rpc_method_handler(
servicer.SetModuleLogLevel,
request_deserializer=LogLevelRequest.FromString,
response_serializer=LogLevelResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'protos.Admin', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
def beta_create_Admin_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
request_deserializers = {
('protos.Admin', 'GetModuleLogLevel'): LogLevelRequest.FromString,
('protos.Admin', 'GetStatus'): google_dot_protobuf_dot_empty__pb2.Empty.FromString,
('protos.Admin', 'SetModuleLogLevel'): LogLevelRequest.FromString,
('protos.Admin', 'StartServer'): google_dot_protobuf_dot_empty__pb2.Empty.FromString,
('protos.Admin', 'StopServer'): google_dot_protobuf_dot_empty__pb2.Empty.FromString,
}
response_serializers = {
('protos.Admin', 'GetModuleLogLevel'): LogLevelResponse.SerializeToString,
('protos.Admin', 'GetStatus'): ServerStatus.SerializeToString,
('protos.Admin', 'SetModuleLogLevel'): LogLevelResponse.SerializeToString,
('protos.Admin', 'StartServer'): ServerStatus.SerializeToString,
('protos.Admin', 'StopServer'): ServerStatus.SerializeToString,
}
method_implementations = {
('protos.Admin', 'GetModuleLogLevel'): face_utilities.unary_unary_inline(servicer.GetModuleLogLevel),
('protos.Admin', 'GetStatus'): face_utilities.unary_unary_inline(servicer.GetStatus),
('protos.Admin', 'SetModuleLogLevel'): face_utilities.unary_unary_inline(servicer.SetModuleLogLevel),
('protos.Admin', 'StartServer'): face_utilities.unary_unary_inline(servicer.StartServer),
('protos.Admin', 'StopServer'): face_utilities.unary_unary_inline(servicer.StopServer),
}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options)
def beta_create_Admin_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
request_serializers = {
('protos.Admin', 'GetModuleLogLevel'): LogLevelRequest.SerializeToString,
('protos.Admin', 'GetStatus'): google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
('protos.Admin', 'SetModuleLogLevel'): LogLevelRequest.SerializeToString,
('protos.Admin', 'StartServer'): google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
('protos.Admin', 'StopServer'): google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
}
response_deserializers = {
('protos.Admin', 'GetModuleLogLevel'): LogLevelResponse.FromString,
('protos.Admin', 'GetStatus'): ServerStatus.FromString,
('protos.Admin', 'SetModuleLogLevel'): LogLevelResponse.FromString,
('protos.Admin', 'StartServer'): ServerStatus.FromString,
('protos.Admin', 'StopServer'): ServerStatus.FromString,
}
cardinalities = {
'GetModuleLogLevel': cardinality.Cardinality.UNARY_UNARY,
'GetStatus': cardinality.Cardinality.UNARY_UNARY,
'SetModuleLogLevel': cardinality.Cardinality.UNARY_UNARY,
'StartServer': cardinality.Cardinality.UNARY_UNARY,
'StopServer': cardinality.Cardinality.UNARY_UNARY,
}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'protos.Admin', cardinalities, options=stub_options)
except ImportError:
pass
# @@protoc_insertion_point(module_scope)
| 41.77907 | 1,109 | 0.74044 |
7a11c84dcc647f7a847b687bafc676e5c125037d | 4,002 | py | Python | tests/basic_test.py | c0fec0de/anycache | 1848d9b85cd11c16c271284e0911ba5628391835 | [
"Apache-2.0"
] | 13 | 2018-02-07T15:52:07.000Z | 2022-02-18T12:37:40.000Z | tests/basic_test.py | c0fec0de/anycache | 1848d9b85cd11c16c271284e0911ba5628391835 | [
"Apache-2.0"
] | 2 | 2018-09-23T15:43:32.000Z | 2021-09-21T00:34:55.000Z | tests/basic_test.py | c0fec0de/anycache | 1848d9b85cd11c16c271284e0911ba5628391835 | [
"Apache-2.0"
] | 1 | 2020-01-20T23:58:54.000Z | 2020-01-20T23:58:54.000Z | from pathlib import Path
from tempfile import mkdtemp
from nose.tools import eq_
from anycache import AnyCache
from anycache import get_defaultcache
from anycache import anycache
def test_basic():
"""Basic functionality."""
myfunc.callcount = 0
eq_(myfunc(4, 5), 9)
eq_(myfunc.callcount, 1)
eq_(myfunc(4, 5), 9)
eq_(myfunc.callcount, 1)
eq_(myfunc(4, 5), 9)
eq_(myfunc.callcount, 1)
eq_(myfunc(4, 2), 6)
eq_(myfunc.callcount, 2)
eq_(myfunc(4, 5), 9)
eq_(myfunc.callcount, 2)
assert get_defaultcache().size > 0
def test_cleanup():
"""Cleanup."""
ac = AnyCache()
cachedir = ac.cachedir
myfunc.callcount = 0
# first use
eq_(myfunc(4, 5), 9)
eq_(myfunc.callcount, 1)
eq_(myfunc(4, 2), 6)
eq_(myfunc.callcount, 2)
eq_(myfunc(4, 2), 6)
eq_(myfunc.callcount, 2)
assert ac.size > 0
# clear
ac.clear()
eq_(ac.size, 0)
eq_(tuple(cachedir.glob("*")), tuple())
# second use
eq_(myfunc(4, 4), 8)
eq_(myfunc.callcount, 3)
assert ac.size > 0
# clear twice
ac.clear()
eq_(ac.size, 0)
ac.clear()
eq_(ac.size, 0)
def test_size():
"""Size."""
ac = AnyCache()
eq_(ac.size, 0)
eq_(len(tuple(ac.cachedir.glob("*.cache"))), 0)
eq_(myfunc(4, 5), 9)
eq_(len(tuple(ac.cachedir.glob("*.cache"))), 1)
size1 = ac.size
eq_(myfunc(4, 2), 6)
eq_(ac.size, 2 * size1)
eq_(len(tuple(ac.cachedir.glob("*.cache"))), 2)
def test_corrupt_cache():
"""Corrupted Cache."""
cachedir = Path(mkdtemp())
ac = AnyCache(cachedir=cachedir)
myfunc.callcount = 0
eq_(myfunc(4, 5), 9)
eq_(myfunc.callcount, 1)
eq_(myfunc(4, 5), 9)
eq_(myfunc.callcount, 1)
# corrupt cache
cachefilepath = list(cachedir.glob("*.cache"))[0]
with open(str(cachefilepath), "w") as cachefile:
cachefile.write("foo")
# repair
eq_(myfunc(4, 5), 9)
eq_(myfunc.callcount, 2)
eq_(myfunc(4, 5), 9)
eq_(myfunc.callcount, 2)
# corrupt dep
depfilepath = list(cachedir.glob("*.dep"))[0]
with open(str(depfilepath), "w") as depfile:
depfile.write("foo")
# repair
eq_(myfunc(4, 5), 9)
eq_(myfunc.callcount, 3)
eq_(myfunc(4, 5), 9)
eq_(myfunc.callcount, 3)
ac.clear()
def test_cachedir():
"""Corrupted Cache."""
cachedir = Path(mkdtemp())
myfunc.callcount = 0
eq_(myfunc(4, 5), 9)
eq_(myfunc.callcount, 1)
eq_(myfunc(4, 5), 9)
eq_(myfunc.callcount, 1)
myfunc.callcount = 0
eq_(myfunc(4, 5), 9)
eq_(myfunc.callcount, 0)
| 21.063158 | 53 | 0.581709 |
7a11f415ef1c8a456c66c6b816eed5e347dea42d | 2,173 | py | Python | self-paced-labs/vertex-ai/vertex-pipelines/tfx/tfx_taxifare_tips/model_training/model_runner.py | Glairly/introduction_to_tensorflow | aa0a44d9c428a6eb86d1f79d73f54c0861b6358d | [
"Apache-2.0"
] | 2 | 2022-01-06T11:52:57.000Z | 2022-01-09T01:53:56.000Z | self-paced-labs/vertex-ai/vertex-pipelines/tfx/tfx_taxifare_tips/model_training/model_runner.py | Glairly/introduction_to_tensorflow | aa0a44d9c428a6eb86d1f79d73f54c0861b6358d | [
"Apache-2.0"
] | null | null | null | self-paced-labs/vertex-ai/vertex-pipelines/tfx/tfx_taxifare_tips/model_training/model_runner.py | Glairly/introduction_to_tensorflow | aa0a44d9c428a6eb86d1f79d73f54c0861b6358d | [
"Apache-2.0"
] | null | null | null | """A run_fn method called by the TFX Trainer component."""
import os
import logging
from tfx import v1 as tfx
from tfx_taxifare_tips.model_training import defaults
from tfx_taxifare_tips.model_training import model_trainer
from tfx_taxifare_tips.model_training import model_exporter
# TFX Trainer will call this function.
def run_fn(fn_args: tfx.components.FnArgs):
"""Train the model based on given args.
Args:
fn_args: Holds args used to train the model as name/value pairs. See
https://www.tensorflow.org/tfx/api_docs/python/tfx/v1/components/FnArgs.
"""
logging.info("Model Runner started...")
logging.info("fn_args: %s", fn_args)
logging.info("")
try:
log_dir = fn_args.model_run_dir
except KeyError:
log_dir = os.path.join(os.path.dirname(fn_args.serving_model_dir), "logs")
hyperparameters = fn_args.hyperparameters
if not hyperparameters:
hyperparameters = {}
hyperparameters = defaults.update_hyperparameters(hyperparameters)
logging.info("Hyperparameter:")
logging.info(hyperparameters)
logging.info("")
logging.info("Model Runner executing model trainer...")
classifier = model_trainer.train(
data_accessor=fn_args.data_accessor,
train_data_dir=fn_args.train_files,
eval_data_dir=fn_args.eval_files,
tft_output_dir=fn_args.transform_output,
log_dir=log_dir,
hyperparameters=hyperparameters,
)
logging.info("Model Runner executing model evaluation...")
classifier = model_trainer.evaluate(
classifier=classifier,
data_accessor=fn_args.data_accessor,
eval_data_dir=fn_args.eval_files,
tft_output_dir=fn_args.transform_output,
hyperparameters=hyperparameters,
)
logging.info("Model Runner executing exporter...")
model_exporter.export_serving_model(
classifier=classifier,
serving_model_dir=fn_args.serving_model_dir,
raw_schema_location=fn_args.schema_path,
tft_output_dir=fn_args.transform_output,
)
logging.info("Model Runner completed.")
| 34.492063 | 83 | 0.703175 |
7a15cfeb891a079af5b1c667c60e264effefd0f3 | 4,602 | py | Python | main.py | Lorn-Hukka/academy-record-sender | 137ef9d1dff373662a046bc2a50d7dd5f4fad0ee | [
"MIT"
] | null | null | null | main.py | Lorn-Hukka/academy-record-sender | 137ef9d1dff373662a046bc2a50d7dd5f4fad0ee | [
"MIT"
] | null | null | null | main.py | Lorn-Hukka/academy-record-sender | 137ef9d1dff373662a046bc2a50d7dd5f4fad0ee | [
"MIT"
] | null | null | null | import random, os, string, subprocess, shutil, requests
from discord import Webhook, RequestsWebhookAdapter, Embed
from dotenv import dotenv_values
import argparse, colorama
from colorama import Fore
if __name__ == "__main__":
colorama.init(autoreset=True)
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--verbose", help="Display errors in console.", action="store_true", default=False)
args = parser.parse_args()
CONFIG = Settings()
app = App(CONFIG)
try:
app.run()
except Exception as e:
if args.verbose:
print(e)
exit(f'{Fore.RED}An Error occured program will exit.')
| 40.368421 | 140 | 0.526945 |
7a1607febbd34072033d2922ea13752164e46320 | 357 | py | Python | src/__init__.py | w9PcJLyb/GFootball | b271238bd0dc922787a0a9b984a8ae598cea2b2b | [
"Apache-2.0"
] | null | null | null | src/__init__.py | w9PcJLyb/GFootball | b271238bd0dc922787a0a9b984a8ae598cea2b2b | [
"Apache-2.0"
] | null | null | null | src/__init__.py | w9PcJLyb/GFootball | b271238bd0dc922787a0a9b984a8ae598cea2b2b | [
"Apache-2.0"
] | null | null | null | from .board import Board
from .slide import slide_action
from .corner import corner_action
from .control import control_action
from .penalty import penalty_action
from .throwin import throwin_action
from .kickoff import kickoff_action
from .goalkick import goalkick_action
from .freekick import freekick_action
from .without_ball import without_ball_action
| 32.454545 | 45 | 0.859944 |
7a1ab771a442031e1729dd19987c53780afb2187 | 3,447 | py | Python | tests/bin/test_tcex_list.py | phuerta-tc/tcex | 4a4e800e1a6114c1fde663f8c3ab7a1d58045c79 | [
"Apache-2.0"
] | null | null | null | tests/bin/test_tcex_list.py | phuerta-tc/tcex | 4a4e800e1a6114c1fde663f8c3ab7a1d58045c79 | [
"Apache-2.0"
] | null | null | null | tests/bin/test_tcex_list.py | phuerta-tc/tcex | 4a4e800e1a6114c1fde663f8c3ab7a1d58045c79 | [
"Apache-2.0"
] | null | null | null | """Bin Testing"""
# standard library
from importlib.machinery import SourceFileLoader
from importlib.util import module_from_spec, spec_from_loader
from typing import List
# third-party
from typer.testing import CliRunner
# dynamically load bin/tcex file
spec = spec_from_loader('app', SourceFileLoader('app', 'bin/tcex'))
tcex_cli = module_from_spec(spec)
spec.loader.exec_module(tcex_cli)
# get app from bin/tcex CLI script
app = tcex_cli.app
# get instance of typer CliRunner for test case
runner = CliRunner()
| 33.794118 | 83 | 0.642878 |
7a1abf4048e07e8bc9343e0dfe167284107c6c27 | 16,752 | py | Python | sdk/python/pulumi_aws/ec2/managed_prefix_list.py | jen20/pulumi-aws | 172e00c642adc03238f89cc9c5a16b914a77c2b1 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/ec2/managed_prefix_list.py | jen20/pulumi-aws | 172e00c642adc03238f89cc9c5a16b914a77c2b1 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/ec2/managed_prefix_list.py | jen20/pulumi-aws | 172e00c642adc03238f89cc9c5a16b914a77c2b1 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['ManagedPrefixListArgs', 'ManagedPrefixList']
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
address_family: Optional[pulumi.Input[str]] = None,
entries: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ManagedPrefixListEntryArgs']]]]] = None,
max_entries: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if address_family is None and not opts.urn:
raise TypeError("Missing required property 'address_family'")
__props__['address_family'] = address_family
__props__['entries'] = entries
if max_entries is None and not opts.urn:
raise TypeError("Missing required property 'max_entries'")
__props__['max_entries'] = max_entries
__props__['name'] = name
__props__['tags'] = tags
__props__['arn'] = None
__props__['owner_id'] = None
__props__['version'] = None
super(ManagedPrefixList, __self__).__init__(
'aws:ec2/managedPrefixList:ManagedPrefixList',
resource_name,
__props__,
opts)
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 41.465347 | 168 | 0.621299 |
7a1b3ef788466c80c3a4e53bf1538ad6b91df51a | 1,847 | py | Python | scripts/ann_architectures/mnist/lenet5.py | qian-liu/snn_toolbox | 9693647f9b2421a4f1ab789a97cc19fd17781e87 | [
"MIT"
] | null | null | null | scripts/ann_architectures/mnist/lenet5.py | qian-liu/snn_toolbox | 9693647f9b2421a4f1ab789a97cc19fd17781e87 | [
"MIT"
] | null | null | null | scripts/ann_architectures/mnist/lenet5.py | qian-liu/snn_toolbox | 9693647f9b2421a4f1ab789a97cc19fd17781e87 | [
"MIT"
] | null | null | null | # coding=utf-8
"""LeNet for MNIST"""
import os
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D
from keras.utils import np_utils
from keras.callbacks import ModelCheckpoint, TensorBoard
from snntoolbox.parsing.utils import \
get_quantized_activation_function_from_string
from snntoolbox.utils.utils import ClampedReLU
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(X_train.shape[0], 1, 28, 28).astype('float32') / 255.
X_test = X_test.reshape(X_test.shape[0], 1, 28, 28).astype('float32') / 255.
Y_train = np_utils.to_categorical(y_train, 10)
Y_test = np_utils.to_categorical(y_test, 10)
# nonlinearity = get_quantized_activation_function_from_string('relu_Q1.4')
# nonlinearity = ClampedReLU
nonlinearity = 'relu'
model = Sequential()
model.add(Conv2D(6, (5, 5), input_shape=(1, 28, 28), activation=nonlinearity))
model.add(MaxPooling2D())
model.add(Conv2D(16, (5, 5), activation=nonlinearity))
model.add(MaxPooling2D())
model.add(Dropout(0.5))
model.add(Conv2D(120, (5, 5), padding='same', activation=nonlinearity))
model.add(Flatten())
model.add(Dense(84, activation=nonlinearity))
model.add(Dense(10, activation='softmax'))
model.compile('adam', 'categorical_crossentropy', metrics=['accuracy'])
path = '/home/rbodo/.snntoolbox/data/mnist/cnn/lenet5/keras/gradients'
checkpoint = ModelCheckpoint('weights.{epoch:02d}-{val_acc:.2f}.h5', 'val_acc')
gradients = TensorBoard(os.path.join(path, 'logs'), 2, write_grads=True)
model.fit(X_train, Y_train, validation_data=(X_test, Y_test),
callbacks=[checkpoint, gradients])
score = model.evaluate(X_test, Y_test)
print('Test score:', score[0])
print('Test accuracy:', score[1])
model.save(os.path.join(path, '{:2.2f}.h5'.format(score[1]*100)))
| 33.581818 | 79 | 0.750947 |
7a1eab82419109b15e6baf92f1df08cd9c6fa14b | 856 | py | Python | class_exercises/using_numpy.py | Eddz7/astr-19 | 380c6b45762e0207cd6c237fa28a4d796b1aef94 | [
"MIT"
] | null | null | null | class_exercises/using_numpy.py | Eddz7/astr-19 | 380c6b45762e0207cd6c237fa28a4d796b1aef94 | [
"MIT"
] | 1 | 2022-03-31T17:57:17.000Z | 2022-03-31T17:57:17.000Z | class_exercises/using_numpy.py | Eddz7/astr-19 | 380c6b45762e0207cd6c237fa28a4d796b1aef94 | [
"MIT"
] | null | null | null | import numpy as np
x = 1.0 #define a float
y = 2.0 #define another float
#trigonometry
print(f"np.sin({x}) = {np.sin(x)}") #sin(x)
print(f"np.cos({x}) = {np.cos(x)}") #cos(x)
print(f"np.tan({x}) = {np.tan(x)}") #tan(x)
print(f"np.arcsin({x}) = {np.arcsin(x)}") #arcsin(x)
print(f"np.arccos({x}) = {np.arccos(x)}") #arccos(x)
print(f"np.arctan({x}) = {np.arctan(x)}") #arctan(x)
print(f"np.arctan2({x}) = {np.arctan2(x,y)}") #arctan(x/y)
print(f"np.rad2deg({x}) = {np.rad2deg(x)}") #convert rad to degree
#hyperbolic functions
print(f"np.sinh({x}) = {np.sinh(x)}") #sinh(x)
print(f"np.cosh({x}) = {np.cosh(x)}") #cosh(x)
print(f"np.tanh({x}) = {np.tanh(x)}") #tanh(x)
print(f"np.arcsinh({x}) = {np.arcsinh(x)}") #arcsinh(x)
print(f"np.arccosh({x}) = {np.arccosh(x)}") #arccosh(x)
print(f"np.arctanh({x}) = {np.arctanh(x)}") #arctanh(x) | 40.761905 | 67 | 0.580607 |
7a1ed1421848b1354b08c81026945785b3714d10 | 10,544 | py | Python | amy/workshops/migrations/0158_curriculum_workshoprequest.py | code-review-doctor/amy | 268c1a199510457891459f3ddd73fcce7fe2b974 | [
"MIT"
] | 53 | 2015-01-10T17:39:19.000Z | 2019-06-12T17:36:34.000Z | amy/workshops/migrations/0158_curriculum_workshoprequest.py | code-review-doctor/amy | 268c1a199510457891459f3ddd73fcce7fe2b974 | [
"MIT"
] | 1,176 | 2015-01-02T06:32:47.000Z | 2019-06-18T11:57:47.000Z | amy/workshops/migrations/0158_curriculum_workshoprequest.py | code-review-doctor/amy | 268c1a199510457891459f3ddd73fcce7fe2b974 | [
"MIT"
] | 44 | 2015-01-03T15:08:56.000Z | 2019-06-09T05:33:08.000Z | # Generated by Django 2.1.2 on 2018-10-27 15:50
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django_countries.fields
| 142.486486 | 860 | 0.71434 |
7a1ef72332e8f8f0f2089763d5744f430bdbbf1f | 2,365 | py | Python | log_parser/single_hand_efficiency_training_data.py | xinranhe/mahjong | 8cfc6234f9c80fd11267adf06b420b63f4c8d87d | [
"MIT"
] | null | null | null | log_parser/single_hand_efficiency_training_data.py | xinranhe/mahjong | 8cfc6234f9c80fd11267adf06b420b63f4c8d87d | [
"MIT"
] | null | null | null | log_parser/single_hand_efficiency_training_data.py | xinranhe/mahjong | 8cfc6234f9c80fd11267adf06b420b63f4c8d87d | [
"MIT"
] | null | null | null | import argparse
from mahjong.shanten import Shanten
from multiprocessing import Pool
import os
import sys
from log_parser.discard_prediction_parser import parse_discard_prediction
SHANTEN = Shanten()
INPUT_DATA_FOLDER = "data/raw"
OUTPUT_DATA_DIR = "data/single_hand_efficiency"
if __name__ == '__main__':
parser = argparse.ArgumentParser(fromfile_prefix_chars='@')
parser.add_argument('--start_date', default='')
parser.add_argument('--end_date', default='')
known_args, _ = parser.parse_known_args(sys.argv)
date_to_process = []
for date in os.listdir(INPUT_DATA_FOLDER):
if date >= known_args.start_date and date <= known_args.end_date:
date_to_process.append(date)
print date_to_process
generate_data(date_to_process[0])
# multithread generate training data
#p = Pool(NUM_THREADS)
#p.map(generate_data, date_to_process)
| 35.833333 | 124 | 0.60296 |
e12e6ff3f71515946f2d758523bf5e5b716bfa6b | 1,942 | py | Python | apps/portalbase/system/system__alerts/methodclass/system_alerts.py | Jumpscale/jumpscale_portal8 | 3a4d56a1ba985b68fe9b525aed2486a54808332f | [
"Apache-2.0"
] | null | null | null | apps/portalbase/system/system__alerts/methodclass/system_alerts.py | Jumpscale/jumpscale_portal8 | 3a4d56a1ba985b68fe9b525aed2486a54808332f | [
"Apache-2.0"
] | 74 | 2015-12-28T16:17:20.000Z | 2021-09-08T12:28:59.000Z | apps/portalbase/system/system__alerts/methodclass/system_alerts.py | Jumpscale/jumpscale_portal8 | 3a4d56a1ba985b68fe9b525aed2486a54808332f | [
"Apache-2.0"
] | null | null | null | from JumpScale import j
| 30.825397 | 89 | 0.581874 |
e12ea6090b7a3fc25058fb7f99f94d6f336e2f07 | 17,628 | py | Python | docs/pyqbdi.py | pbrunet/QBDI | 39a936b2efd000f0c5def0a8ea27538d7d5fab47 | [
"Apache-2.0"
] | 1 | 2019-10-01T08:32:41.000Z | 2019-10-01T08:32:41.000Z | docs/pyqbdi.py | pbrunet/QBDI | 39a936b2efd000f0c5def0a8ea27538d7d5fab47 | [
"Apache-2.0"
] | null | null | null | docs/pyqbdi.py | pbrunet/QBDI | 39a936b2efd000f0c5def0a8ea27538d7d5fab47 | [
"Apache-2.0"
] | null | null | null | # This file is only used to generate documentation
# VM class
# PyQBDI module functions
def alignedAlloc(size, align):
"""Allocate a block of memory of a specified sized with an aligned base address.
:param size: Allocation size in bytes.
:param align: Base address alignement in bytes.
:returns: Pointer to the allocated memory (as a long) or NULL in case an error was encountered.
"""
pass
def alignedFree():
"""
"""
pass
def allocateVirtualStack(ctx, stackSize):
"""Allocate a new stack and setup the GPRState accordingly.
The allocated stack needs to be freed with alignedFree().
:param ctx: GPRState which will be setup to use the new stack.
:param stackSize: Size of the stack to be allocated.
:returns: A tuple (bool, stack) where 'bool' is true if stack allocation was successfull. And 'stack' the newly allocated stack pointer.
"""
pass
def simulateCall(ctx, returnAddress, args):
"""Simulate a call by modifying the stack and registers accordingly.
:param ctx: GPRState where the simulated call will be setup. The state needs to point to a valid stack for example setup with allocateVirtualStack().
:param returnAddress: Return address of the call to simulate.
:param args: A list of arguments.
"""
pass
def getModuleNames():
""" Get a list of all the module names loaded in the process memory.
:returns: A list of strings, each one containing the name of a loaded module.
"""
pass
def getCurrentProcessMaps():
""" Get a list of all the memory maps (regions) of the current process.
:returns: A list of :py:class:`MemoryMap` object.
"""
pass
def readMemory(address, size):
"""Read a memory content from a base address.
:param address: Base address
:param size: Read size
:returns: Bytes of content.
.. warning::
This API is hazardous as the whole process memory can be read.
"""
pass
def writeMemory(address, bytes):
"""Write a memory content to a base address.
:param address: Base address
:param bytes: Memory content
.. warning::
This API is hazardous as the whole process memory can be written.
"""
pass
def decodeFloat(val):
""" Decode a float stored as a long.
:param val: Long value.
"""
pass
def encodeFloat(val):
"""Encode a float as a long.
:param val: Float value
"""
pass
# Various objects
GPRState = None
""" GPRState object, a binding to :cpp:type:`QBDI::GPRState`
"""
FPRState = None
""" FPRState object, a binding to :cpp:type:`QBDI::FPRState`
"""
| 35.90224 | 271 | 0.645677 |
e13042781e2e380894da0aab1c6ec72861b3ce01 | 227 | py | Python | krkbipscraper/settings.py | pawmar/krkbipscraper | f2629bede33930cf91378caa7f2ee5d683cf1616 | [
"BSD-3-Clause"
] | null | null | null | krkbipscraper/settings.py | pawmar/krkbipscraper | f2629bede33930cf91378caa7f2ee5d683cf1616 | [
"BSD-3-Clause"
] | null | null | null | krkbipscraper/settings.py | pawmar/krkbipscraper | f2629bede33930cf91378caa7f2ee5d683cf1616 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""Scrapy settings."""
BOT_NAME = 'krkbipscraper'
SPIDER_MODULES = ['krkbipscraper.spiders']
NEWSPIDER_MODULE = 'krkbipscraper.spiders'
ITEM_PIPELINES = ['krkbipscraper.pipelines.JsonWriterPipeline']
| 22.7 | 63 | 0.744493 |
e1311759e08a6c90f2dd14452c29543ae793ad35 | 1,797 | py | Python | sap hana/connections and query execution with python/script.py | Phelipe-Sempreboni/databases | 3be823db9029994d7b50d23d1830209276e5f40a | [
"MIT"
] | 1 | 2020-10-27T21:50:28.000Z | 2020-10-27T21:50:28.000Z | sap hana/connections and query execution with python/script.py | Phelipe-Sempreboni/databases | 3be823db9029994d7b50d23d1830209276e5f40a | [
"MIT"
] | null | null | null | sap hana/connections and query execution with python/script.py | Phelipe-Sempreboni/databases | 3be823db9029994d7b50d23d1830209276e5f40a | [
"MIT"
] | null | null | null | # Importao da biblioteca.
# Certifique-se de ter a biblioteca instalada.
import pyhdb
# Essa funo traz/chama outro arquivo que contm a senha, visando no deixar exposta na aplicao.
# Caso no queira utilizar esse mtodo e inserir diretamente a senha na conexo, exclua esse bloco e insira a senha diretamente no bloco (def connect) em (passoword).
# Realiza a conexo com o Sap Hana.
# Executa a query no Sap Hana.
if __name__ == '__main__':
connect() # Execuo a funo de conexo.
resultado = query_exec() # Executa a funo de execuo da query.
print (resultado) # Imprimi o resultado no terminal. | 41.790698 | 171 | 0.670006 |
e131340a4484b6722bf5a16704072d57bfdba8fe | 2,418 | py | Python | tests/mvae/distributions/test_von_mises_fisher.py | macio232/mvae | df3d5158ce29744e54b378ad663361e8b785632a | [
"Apache-2.0"
] | 53 | 2019-11-20T05:39:54.000Z | 2022-02-05T06:36:43.000Z | tests/mvae/distributions/test_von_mises_fisher.py | macio232/mvae | df3d5158ce29744e54b378ad663361e8b785632a | [
"Apache-2.0"
] | 8 | 2020-03-14T20:25:08.000Z | 2021-06-10T08:06:15.000Z | tests/mvae/distributions/test_von_mises_fisher.py | macio232/mvae | df3d5158ce29744e54b378ad663361e8b785632a | [
"Apache-2.0"
] | 10 | 2020-03-14T20:17:47.000Z | 2021-12-01T14:08:06.000Z | # Copyright 2019 Ondrej Skopek.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import pytest
import torch
from mt.mvae import utils
from mt.mvae.distributions.von_mises_fisher import VonMisesFisher
dims = [2, 3, 4]
scales = [1e9, 1e5, 1e1, 1e0, 1e-5, 1e-15]
# This does not depend on the mean (loc), just it's dimensionality.
# This does not depend on the mean (loc), just it's dimensionality.
| 37.2 | 110 | 0.669975 |
e13147c692ddf6997325ddaffddf29246eba0b66 | 1,033 | py | Python | cello/download_resources.py | Ann-Holmes/CellO | bc2192a2d27e0859f6df885a6fc246e26e54a7b0 | [
"MIT"
] | 42 | 2019-05-14T19:04:38.000Z | 2022-03-06T12:57:00.000Z | cello/download_resources.py | Ann-Holmes/CellO | bc2192a2d27e0859f6df885a6fc246e26e54a7b0 | [
"MIT"
] | 16 | 2020-08-04T12:34:08.000Z | 2022-03-31T22:30:48.000Z | cello/download_resources.py | Ann-Holmes/CellO | bc2192a2d27e0859f6df885a6fc246e26e54a7b0 | [
"MIT"
] | 6 | 2019-05-13T15:57:03.000Z | 2022-03-18T02:17:05.000Z | """
Download CellO's resources files. These files include CellO's pre-trained
models, gene ID-to-symbol mappings, and training sets for training CellO's
models on new gene sets.
Authors: Matthew Bernstein <mbernstein@morgridge.org>
"""
import subprocess
from os.path import join
from shutil import which
| 31.30303 | 109 | 0.621491 |
e133fe625681b1837857d1c7c1998eeec6f05e88 | 7,755 | py | Python | mmtrack/models/mot/trackers/base_tracker.py | sht47/mmtracking | 5a25e418e9c598d1b576bce8702f5e156cbbefe7 | [
"Apache-2.0"
] | 12 | 2021-09-05T20:47:16.000Z | 2022-03-23T07:00:35.000Z | mmtrack/models/mot/trackers/base_tracker.py | hellock/mmtracking | a22a36b2055d80cf4a7a5ef3913849abb56defcb | [
"Apache-2.0"
] | 2 | 2021-09-06T13:20:09.000Z | 2022-01-13T05:36:14.000Z | mmtrack/models/mot/trackers/base_tracker.py | hellock/mmtracking | a22a36b2055d80cf4a7a5ef3913849abb56defcb | [
"Apache-2.0"
] | 1 | 2022-02-28T19:33:49.000Z | 2022-02-28T19:33:49.000Z | from abc import ABCMeta, abstractmethod
import torch
import torch.nn.functional as F
from addict import Dict
from mmtrack.models import TRACKERS
| 34.466667 | 79 | 0.52392 |
e134a13671522e1fa873cc9f15fcf37d47bcca9a | 3,675 | py | Python | test/conftest.py | pauldg/ro-crate-py | 695004f18175ca70b439534adece9e2242dca778 | [
"Apache-2.0"
] | null | null | null | test/conftest.py | pauldg/ro-crate-py | 695004f18175ca70b439534adece9e2242dca778 | [
"Apache-2.0"
] | null | null | null | test/conftest.py | pauldg/ro-crate-py | 695004f18175ca70b439534adece9e2242dca778 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019-2022 The University of Manchester, UK
# Copyright 2020-2022 Vlaams Instituut voor Biotechnologie (VIB), BE
# Copyright 2020-2022 Barcelona Supercomputing Center (BSC), ES
# Copyright 2020-2022 Center for Advanced Studies, Research and Development in Sardinia (CRS4), IT
# Copyright 2022 cole Polytechnique Fdrale de Lausanne, CH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import pathlib
import shutil
import pytest
from rocrate.utils import get_norm_value
THIS_DIR = pathlib.Path(__file__).absolute().parent
TEST_DATA_NAME = 'test-data'
BASE_URL = 'https://w3id.org/ro/crate'
VERSION = '1.1'
LEGACY_VERSION = '1.0'
# pytest's default tmpdir returns a py.path object
| 37.5 | 98 | 0.71619 |
e134f405b60309ac638075a35a6b8ff83d2c5ab6 | 3,791 | py | Python | tests/unit/test_marathon.py | seomoz/roger-mesos-tools | 88b4cb3550a4b49d0187cfb5e6a22246ff6b9765 | [
"Apache-2.0"
] | null | null | null | tests/unit/test_marathon.py | seomoz/roger-mesos-tools | 88b4cb3550a4b49d0187cfb5e6a22246ff6b9765 | [
"Apache-2.0"
] | 47 | 2016-05-26T22:09:56.000Z | 2018-08-08T20:33:39.000Z | tests/unit/test_marathon.py | seomoz/roger-mesos-tools | 88b4cb3550a4b49d0187cfb5e6a22246ff6b9765 | [
"Apache-2.0"
] | 3 | 2017-09-20T22:39:03.000Z | 2017-11-07T22:29:29.000Z | #!/usr/bin/python
from __future__ import print_function
import unittest
import json
import os
import sys
import requests
sys.path.insert(0, os.path.abspath(os.path.join(
os.path.dirname(os.path.realpath(__file__)), os.pardir, "cli")))
from cli.marathon import Marathon
from cli.appconfig import AppConfig
from mockito import mock, when
# Test basic functionalities of MarathonValidator class
if __name__ == '__main__':
unittest.main()
| 36.104762 | 80 | 0.613031 |
e136e8225ad172a851846dc46f34389a3f760935 | 65 | py | Python | 1/0/10821/10821.py | chr0m3/boj-codes | d71d0a22d0a3ae62c225f382442461275f56fe8f | [
"MIT"
] | 3 | 2017-07-08T16:29:06.000Z | 2020-07-20T00:17:45.000Z | 1/0/10821/10821.py | chr0m3/boj-codes | d71d0a22d0a3ae62c225f382442461275f56fe8f | [
"MIT"
] | null | null | null | 1/0/10821/10821.py | chr0m3/boj-codes | d71d0a22d0a3ae62c225f382442461275f56fe8f | [
"MIT"
] | 2 | 2017-11-20T14:06:06.000Z | 2020-07-20T00:17:47.000Z | numbers = list(map(int, input().split(',')))
print(len(numbers))
| 21.666667 | 44 | 0.646154 |
e137881799720563759aa64b3e6bb8a63eb7afae | 496 | py | Python | Chapter13/server.py | Joustie/Mastering-GitLab-12 | 5ac4700791e4274ef3de825bc789c46142af403e | [
"MIT"
] | 40 | 2019-07-06T04:40:27.000Z | 2022-03-31T09:25:07.000Z | Chapter13/server.py | Joustie/Mastering-GitLab-12 | 5ac4700791e4274ef3de825bc789c46142af403e | [
"MIT"
] | 1 | 2019-08-03T17:52:08.000Z | 2020-12-16T06:31:53.000Z | Chapter13/server.py | Joustie/Mastering-GitLab-12 | 5ac4700791e4274ef3de825bc789c46142af403e | [
"MIT"
] | 50 | 2019-07-26T08:49:49.000Z | 2022-03-17T21:01:03.000Z | from flask import Flask, request
import json
app = Flask(__name__)
if __name__ == '__main__':
app.run() | 24.8 | 74 | 0.635081 |
e1380bef90ab2ac303d6b8ab31b603e3157ac287 | 4,349 | py | Python | tests/test_nlp4e.py | EDTAKE/IA | 2731e8ccb9d1b72f564c8c7a1c46a855760edfac | [
"MIT"
] | null | null | null | tests/test_nlp4e.py | EDTAKE/IA | 2731e8ccb9d1b72f564c8c7a1c46a855760edfac | [
"MIT"
] | null | null | null | tests/test_nlp4e.py | EDTAKE/IA | 2731e8ccb9d1b72f564c8c7a1c46a855760edfac | [
"MIT"
] | 1 | 2019-10-26T22:33:40.000Z | 2019-10-26T22:33:40.000Z | import pytest
import nlp
from nlp4e import Rules, Lexicon, Grammar, ProbRules, ProbLexicon, ProbGrammar, E0
from nlp4e import Chart, CYK_parse, subspan, astar_search_parsing, beam_search_parsing
# Clumsy imports because we want to access certain nlp.py globals explicitly, because
# they are accessed by functions within nlp.py
if __name__ == '__main__':
pytest.main()
| 31.977941 | 87 | 0.539894 |
e13a783a47008677ccb95568f58fe7dd6ad2e4f3 | 1,598 | py | Python | integration_test/ESI/cosim/loopback.py | Patosga/circt | ebf06c9aa5a4e8ae2485b52fd3c564eec7df5754 | [
"Apache-2.0"
] | null | null | null | integration_test/ESI/cosim/loopback.py | Patosga/circt | ebf06c9aa5a4e8ae2485b52fd3c564eec7df5754 | [
"Apache-2.0"
] | null | null | null | integration_test/ESI/cosim/loopback.py | Patosga/circt | ebf06c9aa5a4e8ae2485b52fd3c564eec7df5754 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
import binascii
import random
import cosim
| 27.551724 | 72 | 0.6602 |
e13cf9268aca0f5ca5922030192f194f32c26039 | 48,282 | py | Python | pcdsdevices/targets.py | christina-pino/pcdsdevices | c696093b33b252a5fe6ca020063216b0d062aa61 | [
"BSD-3-Clause-LBNL"
] | 3 | 2019-06-17T20:08:54.000Z | 2022-01-11T17:55:21.000Z | pcdsdevices/targets.py | christina-pino/pcdsdevices | c696093b33b252a5fe6ca020063216b0d062aa61 | [
"BSD-3-Clause-LBNL"
] | 757 | 2017-12-21T23:16:41.000Z | 2022-03-31T22:56:06.000Z | pcdsdevices/targets.py | christina-pino/pcdsdevices | c696093b33b252a5fe6ca020063216b0d062aa61 | [
"BSD-3-Clause-LBNL"
] | 38 | 2018-01-26T00:01:35.000Z | 2022-02-17T00:48:55.000Z | """
Module for common target stage stack configurations.
"""
import logging
import numpy as np
from datetime import datetime
import os
from ophyd.device import Device
import json
import jsonschema
import yaml
from itertools import chain
from pcdsdevices.epics_motor import _GetMotorClass
from .interface import tweak_base
logger = logging.getLogger(__name__)
def StageStack(mdict, name):
"""
Conveniencefunction for generating a stage stack device. Intended for
bundling various motors into a single object. The function takes a
dictionary of PVs and/or previously instantiated motor objects and bundles
them together. If given a PV, The factory function attempts to determine
the appropriate motor class from the given base PV; if this fails then it
will attempt to create an EpicsMotor. Axes are given the same name as they
are assigned in the provided dictionary. See examples below.
Parameters
----------
mdict : dictionary
Dictionary of motor objects and or base PVs.
name : str
Name for the stack. Used to make a class name. No whitespace.
Examples
--------
# Make a classic XYZ stack with two PVs and one motor object
d = {'x': 'TST:MMS:01', 'y': 'TST:MMS:02', 'z': z_motor}
xyz = StageStack(d, 'my_xyz')
"""
cpts = {}
for mname, mitem in mdict.items():
# Check if this is a PV or motor object
if issubclass(type(mitem), Device): # Motor object
cpts[mname] = mitem
elif isinstance(mitem, (str)): # PV
mcls = _GetMotorClass(mitem)
cpt = mcls(prefix=mitem, name=mname)
cpts[mname] = cpt
else: # Something is wrong
logger.warning("Unrecognized input {}. "
"Skipping axis {}.".format(mitem, mname))
cls_name = name + '_StageStack'
cls = type(cls_name, (object,), cpts)
dev = cls()
return dev
# Internal class
def set_presets(self):
"""
Save four preset coordinate points.
These are the coordinates from the four corners of the
wanted/defined grid. The points for these coordinates shuld be taken
from the middle of the four targets that are encasing the grid.
The user will be asked to define the coordinates using the `tweak`
method.
Examples
--------
# Press q when ready to save the coordinates
>>> xy.set_presets()
Setting coordinates for (0, 0) top left corner:
0.0000, : 0.0000, scale: 0.1
Setting coordinates for (0, M) top right corner:
10.0000, : 0.0000, scale: 0.1
Setting coordinates for (N, M) bottom right corner:
10.0000, : -10.0000, scale: 0.1
Setting coordinates for (N, 0) bottom left corner:
-0.0000, : -10.0000, scale: 0.1
"""
# check to see the the presets are setup
if not hasattr(self.x.presets, 'add_hutch'):
raise AttributeError('No folder setup for motor presets. '
'Please add a location to save the positions '
'to, using setup_preset_paths from '
'pcdsdevices.interface to save the position.')
print('\nSetting coordinates for (0, 0) top left corner: \n')
self.tweak()
pos = [self.x.position, self.y.position]
print('\nSetting coordinates for (0, M) top right corner: \n')
self.tweak()
pos.extend([self.x.position, self.y.position])
print('\nSetting coordinates for (N, M) bottom right corner: \n')
self.tweak()
pos.extend([self.x.position, self.y.position])
print('\nSetting coordinates for (N, 0) bottom left corner: \n')
self.tweak()
pos.extend([self.x.position, self.y.position])
# create presets
# corner (0, 0)
self.x.presets.add_hutch(value=pos[0], name="x_top_left")
self.y.presets.add_hutch(value=pos[1], name="y_top_left")
# corner (0, M)
self.x.presets.add_hutch(value=pos[2], name="x_top_right")
self.y.presets.add_hutch(value=pos[3], name="y_top_right")
# corner (M, N)
self.x.presets.add_hutch(value=pos[4], name="x_bottom_right")
self.y.presets.add_hutch(value=pos[5], name="y_bottom_right")
# corner (N, 0)
self.x.presets.add_hutch(value=pos[6], name="x_bottom_left")
self.y.presets.add_hutch(value=pos[7], name="y_bottom_left")
def get_presets(self):
"""
Get the saved presets if any.
Examples
--------
>>> xy.get_presets()
((0, 0),
(9.99999999999998, 0),
(9.99999999999998, -9.99999999999998),
(-6.38378239159465e-16, -9.99999999999998))
Returns
-------
coord : tuple
Four coordinate positions.
(top_left, top_right, bottom_right, bottom_left)
"""
try:
top_left = (self.x.presets.positions.x_top_left.pos,
self.y.presets.positions.y_top_left.pos)
# corner (0, M)
top_right = (self.x.presets.positions.x_top_right.pos,
self.y.presets.positions.y_top_right.pos)
# corner (M, N)
bottom_right = (self.x.presets.positions.x_bottom_right.pos,
self.y.presets.positions.y_bottom_right.pos)
# corner (N, 0)
bottom_left = (self.x.presets.positions.x_bottom_left.pos,
self.y.presets.positions.y_bottom_left.pos)
return top_left, top_right, bottom_right, bottom_left
except Exception:
logger.warning('Could not get presets, try to set_presets.')
def get_samples(self, path=None):
"""
Get all the available sample grids names that are currently saved.
Returns
-------
samples : list
List of strings of all the sample names available.
"""
samples = []
path = path or self._path
with os.scandir(path) as entries:
for entry in entries:
if entry.is_file():
samples.append(entry.name.split('.yml')[0])
return samples
def load(self, sample_name, path=None):
"""
Get the sample information and populate these parameters.
This function displays the parameters for the sample just loaded, but
also populates them, in the sense that it sets the current
`coefficients` and current `m, n` values.
Parameters
----------
sample_name : str
Name of the sample to load.
path : str, optional
Path where the samples yaml file exists.
"""
path = path or self._path
entry = os.path.join(path, sample_name + '.yml')
m_points, n_points, coeffs = self.get_sample_map_info(
str(sample_name), path=entry)
self.m_n_points = m_points, n_points
self.coefficients = coeffs
# make this sample the current one
self.current_sample = str(sample_name)
def get_sample_data(self, sample_name, path=None):
"""
Get the information for a saved sample.
Parameters
----------
sample_name : str
The sample name that we want the grid for. To see current
available samples call `mapped_grids`
path : str, optional
Path to the `.yml` file. Defaults to the path defined when
creating this object.
Returns
-------
data : dictionary
Dictionary of all the information for a saved sample, or empty
dictionary if troubles getting the sample.
Examples
--------
>>> get_sample('sample1')
{'time_created': '2021-01-06 11:43:40.701095',
'top_left': [0, 0],
'top_right': [4.0, -1.0],
'bottom_right': [4.4, -3.5],
'bottom_left': [1.0, -3.0],
'M': 10,
'N': 10,
'coefficients': [1.1686746987951824,
-0.3855421686746996,
-9.730859023513261e-15,
-0.29216867469879476,
1.1566265060240974,
6.281563288265657e-16,
0.042168674698794054,
-0.05220883534136586],
xx:
...
yy:
...}
"""
path = path or os.path.join(self._path, sample_name + '.yml')
data = None
with open(path) as sample_file:
try:
data = yaml.safe_load(sample_file)
except yaml.YAMLError as err:
logger.error('Error when loading the samples yaml file: %s',
err)
raise err
if data is None:
logger.warning('The file is empty, no sample grid yet. '
'Please use `save_presets` to insert grids '
'in the file.')
return {}
try:
return data[str(sample_name)]
except Exception:
logger.error('The sample %s might not exist in the file.',
sample_name)
return {}
def get_sample_map_info(self, sample_name, path=None):
"""
Given a sample name, get the m and n points, as well as the coeffs.
Parameters
----------
sample_name : str
The name of the sample to get the mapped points from. To see the
available mapped samples call the `mapped_samples()` method.
path : str, optional
Path to the samples yaml file.
"""
path = path or os.path.join(self._path, sample_name + '.yml')
sample = self.get_sample_data(str(sample_name), path=path)
coeffs = []
m_points, n_points = 0, 0
if sample:
try:
coeffs = sample["coefficients"]
m_points = sample['M']
n_points = sample['N']
except Exception as ex:
logger.error('Something went wrong when getting the '
'information for sample %s. %s', sample_name, ex)
raise ex
else:
err_msg = ('This sample probably does not exist. Please call'
' mapped_samples() to see which ones are available.')
logger.error(err_msg)
raise Exception(err_msg)
return m_points, n_points, coeffs
def save_grid(self, sample_name, path=None):
"""
Save a grid file of mapped points for a sample.
This will save the date it was created, along with the sample name,
the m and n points, the coordinates for the four corners, and the
coefficients that will help get the x and y position on the grid.
If an existing name for a sample is saved again, it will override
the information for that samplefile keeping the status of the targets.
When overriding a sample, this is assuming that a re-calibration was
needed for that sample, so in case we have already shot targets from
that sample - we want to keep track of that.
Parameters
----------
sample_name : str
A name to identify the sample grid, should be snake_case style.
path : str, optional
Path to the sample folder where this sample will be saved.
Defaults to the path defined when creating this object.
Examples
--------
>>> save_grid('sample_1')
"""
path = path or self._path
entry = os.path.join(path, sample_name + '.yml')
now = str(datetime.now())
top_left, top_right, bottom_right, bottom_left = [], [], [], []
if self.get_presets():
top_left, top_right, bottom_right, bottom_left = self.get_presets()
xx, yy = self.positions_x, self.positions_y
flat_xx, flat_yy = [], []
if xx and yy:
flat_xx = [float(x) for x in xx]
flat_yy = [float(y) for y in yy]
# add False to each target to indicate they
# have not been shot yet
flat_xx = [{"pos": x, "status": False} for x in flat_xx]
flat_yy = [{"pos": y, "status": False} for y in flat_yy]
m_points, n_points = self.m_n_points
coefficients = self.coefficients
data = {sample_name: {"time_created": now,
"top_left": list(top_left),
"top_right": list(top_right),
"bottom_right": list(bottom_right),
"bottom_left": list(bottom_left),
"M": m_points, # number of rows
"N": n_points, # number of columns
"coefficients": coefficients,
"xx": flat_xx,
"yy": flat_yy}}
try:
jsonschema.validate(data[sample_name], self.sample_schema)
except jsonschema.exceptions.ValidationError as err:
logger.warning('Invalid input: %s', err)
raise err
# entry = os.path.join(path, sample_name + '.yml')
# if this is an existing file, overrite the info but keep the statuses
if os.path.isfile(entry):
with open(entry) as sample_file:
yaml_dict = yaml.safe_load(sample_file)
sample = yaml_dict[sample_name]
# when overriding the same sample, this is assuming that a
# re-calibration was done - so keep the previous statuses.
temp_xx = sample['xx']
temp_yy = sample['yy']
temp_x_status = [i['status'] for i in temp_xx]
temp_y_status = [i['status'] for i in temp_yy]
# update the current data statuses with previous ones
for xd, status in zip(data[sample_name]['xx'], temp_x_status):
xd.update((k, status)
for k, v in xd.items() if k == 'status')
for yd, status in zip(data[sample_name]['yy'], temp_y_status):
yd.update((k, status)
for k, v in yd.items() if k == 'status')
yaml_dict.update(data)
with open(entry, 'w') as sample_file:
yaml.safe_dump(data, sample_file,
sort_keys=False, default_flow_style=False)
else:
# create a new file
with open(entry, 'w') as sample_file:
yaml.safe_dump(data, sample_file,
sort_keys=False, default_flow_style=False)
def reset_statuses(self, sample_name, path=None):
"""
Reset the statuses to `False` for the sample targets.
Parameters
----------
sample_name : str
A name to identify the sample grid, should be snake_case style.
path : str, optional
Path to the `.yml` file. Defaults to the path defined when
creating this object.
"""
path = path or os.path.join(self._path, sample_name + '.yml')
with open(path) as sample_file:
yaml_dict = yaml.safe_load(sample_file) or {}
sample = yaml_dict.get(sample_name)
if sample:
for xd in sample.get('xx'):
xd.update((k, False)
for k, v in xd.items() if k == 'status')
for yd in sample.get('yy'):
yd.update((k, False)
for k, v in yd.items() if k == 'status')
yaml_dict[sample_name].update(sample)
else:
raise ValueError('Could not find this sample name in the file:'
f' {sample}')
with open(path, 'w') as sample_file:
yaml.safe_dump(yaml_dict, sample_file,
sort_keys=False, default_flow_style=False)
def map_points(self, snake_like=True, top_left=None, top_right=None,
bottom_right=None, bottom_left=None, m_rows=None,
n_columns=None):
"""
Map the points of a quadrilateral.
Given the 4 corners coordinates of a grid, and the numbers of rows and
columns, map all the sample positions in 2-d coordinates.
Parameters
----------
snake_like : bool
Indicates if the points should be saved in a snake_like pattern.
top_left : tuple, optional
(x, y) coordinates of the top left corner
top_right : tuple, optional
(x, y) coordinates of the top right corner
bottom_right : tuple, optional
(x, y) coordinates of the bottom right corner
bottom_left : tuple, optional
(x, y) coordinates of the bottom left corner
m_rows : int, optional
Number of rows the grid has.
n_columns : int, optional
Number of columns the grid has.
Returns
-------
xx, yy : tuple
Tuple of two lists with all mapped points for x and y positions in
the grid.
"""
top_left = top_left or self.get_presets()[0]
top_right = top_right or self.get_presets()[1]
bottom_right = bottom_right or self.get_presets()[2]
bottom_left = bottom_left or self.get_presets()[3]
if any(v is None for v in [top_left, top_right, bottom_right,
bottom_left]):
raise ValueError('Could not get presets, make sure you set presets'
' first using the `set_presets` method.')
rows = m_rows or self.m_n_points[0]
columns = n_columns or self.m_n_points[1]
a_coeffs, b_coeffs = mesh_interpolation(top_left, top_right,
bottom_right, bottom_left)
self.coefficients = a_coeffs.tolist() + b_coeffs.tolist()
x_points, y_points = [], []
xx, yy = get_unit_meshgrid(m_rows=rows, n_columns=columns)
# return x_points, y_points
for rowx, rowy in zip(xx, yy):
for x, y in zip(rowx, rowy):
i, j = convert_to_physical(a_coeffs=a_coeffs,
b_coeffs=b_coeffs,
logic_x=x, logic_y=y)
x_points.append(i)
y_points.append(j)
if snake_like:
x_points = snake_grid_list(
np.array(x_points).reshape(rows, columns))
y_points = snake_grid_list(
np.array(y_points).reshape(rows, columns))
self.positions_x = x_points
self.positions_y = y_points
return x_points, y_points
def is_target_shot(self, m, n, sample=None, path=None):
"""
Check to see if the target position at MxN is shot.
Parameters
----------
sample_name : str, optional
The name of the sample to get the mapped points from. To see the
available mapped samples call the `mapped_samples()` method.
m_point : int
Represents the row value of the point we want the position for.
n_point : int
Represents the column value of the point we want the position for.
path : str, optional
Sample path.
Returns
-------
is_shot : bool
Indicates is target is shot or not.
"""
sample = sample or self.current_sample
path = path or self.current_sample_path
x, y = self.compute_mapped_point(m_row=m,
n_column=n,
sample_name=sample, path=path)
data = self.get_sample_data(sample)
xx = data.get('xx')
x_status = None
# one value should be enough
# TODO: this is assuming that none of the points will be the unique.
if xx is not None:
x_status = next((item['status']
for item in xx if item['pos'] == x), None)
return x_status
def compute_mapped_point(self, m_row, n_column, sample_name=None,
path=None, compute_all=False):
"""
For a given sample, compute the x, y position for M and N respecively.
Parameters
----------
sample_name : str
The name of the sample to get the mapped points from. To see the
available mapped samples call the `mapped_samples()` method.
m_point : int
Represents the row value of the point we want the position for.
n_point : int
Represents the column value of the point we want the position for.
compute_all : boolean, optional
If `True` all the point positions will be computed for this sample.
path : str, optional
Path to the samples yaml file.
Returns
-------
x, y : tuple
The x, y position for m n location.
"""
path = path or self._path
sample_name = sample_name or self.current_sample
if sample_name is None or sample_name == '':
raise ValueError(
'Please make sure you provide a sample name or use load()')
# if we have a current loaded sample, use the current M, N values and
# current coefficients
if self.current_sample != '':
m_points, n_points = self.m_n_points
coeffs = self.coefficients
else:
# try to get them from the sample_name file
entry = os.path.join(path, sample_name + '.yml')
m_points, n_points, coeffs = self.get_sample_map_info(
str(sample_name), path=entry)
if any(v is None for v in [m_points, n_points, coeffs]):
raise ValueError('Some values are empty, please check the sample '
f'{sample_name} in the has the M and N values as '
'well as coefficients saved')
if (m_row > m_points) or (n_column > n_points):
raise IndexError('Index out of range, make sure the m and n values'
f' are between ({m_points, n_points})')
if (m_row or n_column) == 0:
raise IndexError('Please start at 1, 1, as the initial points.')
xx_origin, yy_origin = get_unit_meshgrid(m_rows=m_points,
n_columns=n_points)
a_coeffs = coeffs[:4]
b_coeffs = coeffs[4:]
if not compute_all:
logic_x = xx_origin[m_row - 1][n_column - 1]
logic_y = yy_origin[m_row - 1][n_column - 1]
x, y = convert_to_physical(a_coeffs, b_coeffs, logic_x, logic_y)
return x, y
else:
# compute all points
x_points, y_points = [], []
for rowx, rowy in zip(xx_origin, yy_origin):
for x, y in zip(rowx, rowy):
i, j = convert_to_physical(a_coeffs=a_coeffs,
b_coeffs=b_coeffs,
logic_x=x, logic_y=y)
x_points.append(i)
y_points.append(j)
return x_points, y_points
def move_to_sample(self, m, n):
"""
Move x,y motors to the computed positions of n, m of current sample.
Given m (row) and n (column), compute the positions for x and y based
on the current sample's parameters. See `current_sample` and move
the x and y motor to those positions.
Parameters
----------
m : int
Indicates the row on the grid.
n : int
Indicates the column on the grid.
"""
sample_name = self.current_sample
if sample_name:
n, m = self.compute_mapped_point(m_row=m, n_column=n)
self.x.mv(n)
self.y.mv(m)
def move_to(self, sample, m, n):
"""
Move x,y motors to the computed positions of n, m of given sample.
Given m (row) and n (column), compute the positions for x and y based
on the current sample's parameters. See `current_sample`
Parameters
----------
m : int
Indicates the row on the grid.
n : int
Indicates the column on the grid.
"""
entry = os.path.join(self._path, sample + '.yml')
n, m = self.compute_mapped_point(m_row=m, n_column=n,
sample_name=sample, path=entry)
self.x.mv(n)
self.y.mv(m)
def set_status(self, m, n, status=False, sample_name=None, path=None):
"""
TODO not working properly yet
Set the status for a specific m and n point.
Parametrs:
---------
m : int
Indicates the row number starting at 1.
n : int
Indicates the column number starting at 1.
status : bool, optional
`False` to indicate that is has been shot, and `True` for
available.
"""
assert isinstance(status, bool)
sample_name = sample_name or self.current_sample
path = path or os.path.join(self._path, sample_name + '.yml')
m_points, n_points = self.m_n_points
if (m > m_points) or (n > n_points):
raise IndexError('Index out of range, make sure the m and n values'
f' are between ({m_points, n_points})')
if (m or n) == 0:
raise IndexError('Please start at 1, 1, as the initial points.')
with open(path) as sample_file:
yaml_dict = yaml.safe_load(sample_file) or {}
sample = yaml_dict.get(sample_name)
if sample:
xx = sample['xx']
yy = sample['yy']
n_pos = next(d['pos'] for (index, d) in enumerate(xx)
if index == n - 1)
m_pos = next(d['pos'] for (index, d) in enumerate(yy)
if index == m - 1)
for xd in sample.get('xx'):
for k, v in xd.items():
if k == 'pos' and v == n_pos:
xd.update((st, status)
for st, vv in xd.items()
if st == 'status')
for yd in sample.get('yy'):
for k, v in yd.items():
if k == 'pos' and v == m_pos:
yd.update((st, status)
for st, vv in xd.items()
if st == 'status')
yaml_dict[sample_name].update(sample)
else:
raise ValueError('Could not find this sample name in the file:'
f' {sample}')
with open(path, 'w') as sample_file:
yaml.safe_dump(yaml_dict, sample_file,
sort_keys=False, default_flow_style=False)
def mesh_interpolation(top_left, top_right, bottom_right, bottom_left):
"""
Mapping functions for an arbitrary quadrilateral.
Reference: https://www.particleincell.com/2012/quad-interpolation/
In order to perform the interpolation on an arbitrary quad, we need to
obtain a mapping function. Our goal is to come up with a function such
as (x, y) = f(l, m) where l = [0, 1] and m = [0, 1] describes the
entire point space enclosed by the quadrilateral. In addition, we want
f(0, 0) = (x1, y1), f(1, 0) = (x2, y2) and so on to correspond to the
polygon vertices. This function forms a map that allows us to
transform the quad from the physical coordinates set to a logical
coordinate space. In the logical coordinates, the polygon morphs into
a square, regardless of its physical form. Once the logical
coordinates are obtained, we perform the scatter and find the
physical x, y values.
To find the map, we assume a bilinear mapping function given by:
x = alpha_1 + alpha_2*l + alpha_3*m + alpha_4 * l _ m
y = beta_1 + beta_2 * l + beta_3 * m + beta_4 * l * m
Next we use these experessions to solve for the 4 coefficients:
x1 1 0 0 0 alpha_1
x2 1 1 0 0 alpha_2
x3 1 1 1 1 alpha_3
x4 1 0 1 0 alpha_4
We do the same for the beta coefficients.
Parameters
----------
top_left : tuple
(x, y) coordinates of the top left corner
top_right : tuple
(x, y) coordinates of the top right corner
bottom_right : tuple
(x, y) coordinates of the bottom right corner
bottom_left : tuple
(x, y) coordinates of the bottom left corner
Returns
-------
a_coeffs, b_coeffs : tuple
List of tuples with the alpha and beta coefficients for projective
transformation. They are used to find x and y.
"""
# describes the entire point space enclosed by the quadrilateral
unit_grid = np.array([[1, 0, 0, 0],
[1, 1, 0, 0],
[1, 1, 1, 1],
[1, 0, 1, 0]])
# x value coordinates for current grid (4 corners)
px = np.array([top_left[0],
top_right[0],
bottom_right[0],
bottom_left[0]])
# y value coordinates for current grid (4 corners)
py = np.array([top_left[1],
top_right[1],
bottom_right[1],
bottom_left[1]])
a_coeffs = np.linalg.solve(unit_grid, px)
b_coeffs = np.linalg.solve(unit_grid, py)
return a_coeffs, b_coeffs
def get_unit_meshgrid(m_rows, n_columns):
"""
Based on the 4 coordinates and m and n points, find the meshgrid.
Regardless of the physical form of our polygon, we first need to morph
it into a unit square.
Parameters
----------
m_rows : int
Number of rows our grid has.
n_columns : int
Number of columns our grid has.
"""
px = [0, 1, 1, 0]
py = [0, 0, 1, 1]
x0 = min(px)
lx = max(px) - min(px)
y0 = min(py)
ly = max(py) - min(py)
ni = n_columns
nj = m_rows
dx = lx / (ni - 1)
dy = ly / (nj - 1)
xx = [x0 + (i - 1) * dx for i in range(1, ni + 1)]
yy = [y0 + (j - 1) * dy for j in range(1, nj + 1)]
return np.meshgrid(xx, yy)
def convert_to_physical(a_coeffs, b_coeffs, logic_x, logic_y):
"""
Convert to physical coordinates from logical coordinates.
Parameters
----------
a_coeffs : array
Perspective transformation coefficients for alpha.
b_coeffs : array
Perspective transformation coefficients for beta.
logic_x : float
Logical point in the x direction.
logic_y : float
Logical point in the y direction.
Returns
-------
x, y : tuple
The x and y physical values on the specified grid.
"""
# x = a(1) + a(2)*l + a(3)*m + a(4)*l*m
x = (a_coeffs[0] + a_coeffs[1] * logic_x + a_coeffs[2]
* logic_y + a_coeffs[3] * logic_x * logic_y)
# y = b(1) + b(2)*l + b(3)*m + b(4)*l*m
y = (b_coeffs[0] + b_coeffs[1] * logic_x +
b_coeffs[2] * logic_y + b_coeffs[3] * logic_x * logic_y)
return x, y
def snake_grid_list(points):
"""
Flatten them into lists with snake_like pattern coordinate points.
[[1, 2], [3, 4]] => [1, 2, 4, 3]
Parameters
----------
points : array
Array containing the grid points for an axis with shape MxN.
Returns
-------
flat_points : list
List of all the grid points folowing a snake-like pattern.
"""
temp_points = []
for i in range(points.shape[0]):
if i % 2 == 0:
temp_points.append(points[i])
else:
t = points[i]
tt = t[::-1]
temp_points.append(tt)
flat_points = list(chain.from_iterable(temp_points))
# convert the numpy.float64 to normal float to be able to easily
# save them in the yaml file
flat_points = [float(v) for v in flat_points]
return flat_points
| 35.579956 | 79 | 0.558096 |
e13d3df96caed4ad7bea9f68e21a31547457cf49 | 1,564 | py | Python | release/src-rt-6.x.4708/router/samba3/source4/scripting/python/samba/netcmd/time.py | zaion520/ATtomato | 4d48bb79f8d147f89a568cf18da9e0edc41f93fb | [
"FSFAP"
] | 2 | 2019-01-13T09:16:31.000Z | 2019-02-15T03:30:28.000Z | release/src-rt-6.x.4708/router/samba3/source4/scripting/python/samba/netcmd/time.py | zaion520/ATtomato | 4d48bb79f8d147f89a568cf18da9e0edc41f93fb | [
"FSFAP"
] | null | null | null | release/src-rt-6.x.4708/router/samba3/source4/scripting/python/samba/netcmd/time.py | zaion520/ATtomato | 4d48bb79f8d147f89a568cf18da9e0edc41f93fb | [
"FSFAP"
] | 2 | 2020-03-08T01:58:25.000Z | 2020-12-20T10:34:54.000Z | #!/usr/bin/env python
#
# time
#
# Copyright Jelmer Vernooij 2010 <jelmer@samba.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import samba.getopt as options
import common
from samba.net import Net
from samba.netcmd import (
Command,
)
| 32.583333 | 85 | 0.710997 |
e13edb5a04062ed656b823c80283871afa60af92 | 900 | py | Python | tests/job/test_redis.py | ulule/bokchoy | 58afaf325ce275edf5c4a955379afb1cc5eb5de3 | [
"MIT"
] | null | null | null | tests/job/test_redis.py | ulule/bokchoy | 58afaf325ce275edf5c4a955379afb1cc5eb5de3 | [
"MIT"
] | null | null | null | tests/job/test_redis.py | ulule/bokchoy | 58afaf325ce275edf5c4a955379afb1cc5eb5de3 | [
"MIT"
] | null | null | null | import unittest
import redis
import socket
import pytest
from bokchoy.conductors.dummy import DummyConductor
from bokchoy.results.redis import RedisResult
from bokchoy.serializers.json import JSONSerializer
from exam import fixture
from .base import JobTests
requires_redis = pytest.mark.skipif(
not redis_is_available(),
reason="requires redis search server running")
| 21.95122 | 77 | 0.728889 |
e13fba4b45b4ccda568c26a9f752c38c0cf1cb17 | 97 | py | Python | venv/lib/python3.8/site-packages/pip/_internal/network/__init__.py | realxwx/leetcode-solve | 3a7d7d8e92a5fd5fecc347d141a1c532b92e763e | [
"Apache-2.0"
] | null | null | null | venv/lib/python3.8/site-packages/pip/_internal/network/__init__.py | realxwx/leetcode-solve | 3a7d7d8e92a5fd5fecc347d141a1c532b92e763e | [
"Apache-2.0"
] | null | null | null | venv/lib/python3.8/site-packages/pip/_internal/network/__init__.py | realxwx/leetcode-solve | 3a7d7d8e92a5fd5fecc347d141a1c532b92e763e | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020
# Author: xiaoweixiang
"""Contains purely network-related utilities.
"""
| 16.166667 | 45 | 0.71134 |
e13fcadccf45c68be598d453263bc3fd7d573b02 | 3,004 | py | Python | Constants.py | micv-dev/DeepKubeGPUCluster | b1f674ea3c251a5287ee83d582b193248e04f9d6 | [
"Apache-2.0"
] | 2 | 2021-01-22T05:56:40.000Z | 2021-07-03T17:50:49.000Z | Constants.py | micv-dev/DeepKubeGPUCluster | b1f674ea3c251a5287ee83d582b193248e04f9d6 | [
"Apache-2.0"
] | null | null | null | Constants.py | micv-dev/DeepKubeGPUCluster | b1f674ea3c251a5287ee83d582b193248e04f9d6 | [
"Apache-2.0"
] | null | null | null | DEFAULT_KUBE_VERSION=1.14
KUBE_VERSION="kubeVersion"
USER_ID="userId"
DEFAULT_USER_ID=1
CLUSTER_NAME="clusterName"
CLUSTER_MASTER_IP="masterHostIP"
CLUSTER_WORKER_IP_LIST="workerIPList"
FRAMEWORK_TYPE= "frameworkType"
FRAMEWORK_VERSION="frameworkVersion"
FRAMEWORK_RESOURCES="frameworkResources"
FRAMEWORK_VOLUME_SIZE= "storageVolumeSizegb"
FRAMEWORK_ASSIGN_DPU_TYPE= "dpuType"
FRAMEWORK_ASSIGN_DPU_COUNT= "count"
FRAMEWORK_INSTANCE_COUNT="instanceCount"
FRAMEWORK_SPEC="spec"
FRAMEWORK_IMAGE_NAME="imageName"
FRAMEWORK_DPU_ID="dpuId"
FRAMEWORK_DPU_COUNT="count"
CLUSTER_ID="clusterId"
FRAMEWORK_DEFAULT_PVC="/home/user/"
DEFAULT_FRAMEWORK_TYPE="POLYAXON"
DEFAULT_FRAMEWORK_VERSION="0.4.4"
POLYAXON_TEMPLATE="templates/polyaxon_config"
POLYAXON_CONFIG_FILE="/home/user/polyaxonConfig.yaml"
POLYAXON_DEFAULT_NAMESPACE="polyaxon"
TENSORFLOW_TEMPLATE="templates/tensorflow-gpu"
DEFAULT_PATH="/home/user/"
##########Cluster Info####################
POD_IP="podIp"
POD_STATUS="podStatus"
POD_HOST_IP="hostIp"
##########End Of Cluster Info####################
PVC_MAX_ITERATIONS=50
SLEEP_TIME=5
GLUSTER_DEFAULT_MOUNT_PATH="/volume"
CONTAINER_VOLUME_PREFIX="volume"
MAX_RETRY_FOR_CLUSTER_FORM=10
##############Cluster Related ####################33
CLUSTER_NODE_READY_COUNT=60
CLUSTER_NODE_READY_SLEEP=6
CLUSTER_NODE_NAME_PREFIX="worker"
NO_OF_GPUS_IN_GK210_K80=2
POLYAXON_NODE_PORT_RANGE_START=30000
POLYAXON_NODE_PORT_RANGE_END=32767
DEFAULT_CIDR="10.244.0.0/16"
GFS_STORAGE_CLASS="glusterfs"
GFS_STORAGE_REPLICATION="replicate:2"
HEKETI_REST_URL="http://10.138.0.2:8080"
DEFAULT_VOLUME_MOUNT_PATH="/volume"
GLUSTER_DEFAULT_REP_FACTOR=2
POLYAXON_DEFAULT_HTTP_PORT=80
POLYAXON_DEFAULT_WS_PORT=1337
SUCCESS_MESSAGE_STATUS="SUCCESS"
ERROR_MESSAGE_STATUS="SUCCESS"
ROLE="role"
IP_ADDRESS="ipAddress"
INTERNAL_IP_ADDRESS="internalIpAddress"
ADD_NODE_USER_ID="hostUserId"
ADD_NODE_PASSWORD="password"
####Polyaxon GetClusterInfo###
QUOTA_NAME="quotaName"
QUOTA_USED="used"
QUOTA_LIMIT="limit"
DEFAULT_QUOTA="default"
VOLUME_NAME="volumeName"
MOUNT_PATH_IN_POD="volumePodMountPath"
VOLUME_TOTAL_SIZE="totalSize"
VOLUME_FREE="free"
NVIDIA_GPU_RESOURCE_NAME="requests.nvidia.com/gpu"
EXECUTOR="executor"
MASTER_IP="masterIP"
GPU_COUNT="gpuCount"
NAME="name"
KUBE_CLUSTER_INFO="kubeClusterInfo"
ML_CLUSTER_INFO="mlClusterInfo"
POLYAXON_DEFAULT_USER_ID="root"
POLYAXON_DEFAULT_PASSWORD="rootpassword"
POLYAXON_USER_ID="polyaxonUserId"
POLYAXON_PASSWORD="polyaxonPassword"
DEFAULT_DATASET_VOLUME_NAME="vol_f37253d9f0f35868f8e3a1d63e5b1915"
DEFAULT_DATASET_MOUNT_PATH="/home/user/dataset"
DEFAULT_CLUSTER_VOLUME_MOUNT_PATH="/home/user/volume"
DEFAULT_GLUSTER_SERVER="10.138.0.2"
DEFAULT_DATASET_VOLUME_SIZE="10Gi"
CLUSTER_VOLUME_MOUNT_PATH="volumeHostMountPath"
DATASET_VOLUME_MOUNT_POINT="dataSetVolumemountPointOnHost"
DATASET_VOLUME_MOUNT_PATH_IN_POD_REST= "volumeDataSetPodMountPoint"
DATASET_VOLUME_MOUNT_PATH_IN_POD="/dataset"
DYNAMIC_GLUSTERFS_ENDPOINT_STARTS_WITH="glusterfs-dynamic-" | 25.243697 | 67 | 0.831891 |
e13feb6e08fa5f3de107d84f4998b9cc0fdd3b93 | 1,582 | py | Python | mpcontribs-portal/mpcontribs/portal/urls.py | fraricci/MPContribs | 800e8fded594dce57807e7ef0ec8d3192ce54825 | [
"MIT"
] | null | null | null | mpcontribs-portal/mpcontribs/portal/urls.py | fraricci/MPContribs | 800e8fded594dce57807e7ef0ec8d3192ce54825 | [
"MIT"
] | null | null | null | mpcontribs-portal/mpcontribs/portal/urls.py | fraricci/MPContribs | 800e8fded594dce57807e7ef0ec8d3192ce54825 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from django.conf.urls import url
from django.views.generic.base import RedirectView
from mpcontribs.portal import views
app_name = "mpcontribs_portal"
urlpatterns = [
url(r"^$", views.index, name="index"),
url(r"^healthcheck/?$", views.healthcheck, name="healthcheck"),
url(
r"^notebooks/(?P<nb>[A-Za-z0-9_\/]{3,}).html$",
views.notebooks,
name="notebooks",
),
url(r"^(?P<cid>[a-f\d]{24})/?$", views.contribution, name="contribution"),
# downloads
url(
r"^component/(?P<oid>[a-f\d]{24})$",
views.download_component,
name="download_component",
),
url(
r"^(?P<cid>[a-f\d]{24}).json.gz$",
views.download_contribution,
name="download_contribution",
),
# TODO .(?P<fmt>[a-z]{3})
url(
r"^(?P<project>[a-zA-Z0-9_]{3,}).json.gz$",
views.download_project,
name="download_project",
),
# redirects
url(r"^fe-co-v/?$", RedirectView.as_view(url="/swf/", permanent=False)),
url(r"^fe-co-v/dataset-01/?$", RedirectView.as_view(url="/swf/", permanent=False)),
url(
r"^boltztrap/?$",
RedirectView.as_view(url="/carrier_transport/", permanent=True),
),
url(
r"^Screeninginorganicpv/?$",
RedirectView.as_view(url="/screening_inorganic_pv/", permanent=False),
),
url(
r"^ScreeningInorganicPV/?$",
RedirectView.as_view(url="/screening_inorganic_pv/", permanent=False),
),
# default view
url(r"^[a-zA-Z0-9_]{3,}/?$", views.landingpage),
]
| 31.019608 | 87 | 0.584071 |
e1404018df8652fa89529ce0d2a499530d166df6 | 3,363 | py | Python | src/mp_api/dielectric/client.py | jmmshn/api | 5254a453f6ec749793639e4ec08bea14628c7dc3 | [
"BSD-3-Clause-LBNL"
] | null | null | null | src/mp_api/dielectric/client.py | jmmshn/api | 5254a453f6ec749793639e4ec08bea14628c7dc3 | [
"BSD-3-Clause-LBNL"
] | 159 | 2020-11-16T16:02:31.000Z | 2022-03-28T15:03:38.000Z | src/mp_api/dielectric/client.py | jmmshn/api | 5254a453f6ec749793639e4ec08bea14628c7dc3 | [
"BSD-3-Clause-LBNL"
] | null | null | null | from typing import List, Optional, Tuple
from collections import defaultdict
from mp_api.core.client import BaseRester, MPRestError
import warnings
| 33.969697 | 106 | 0.600059 |
e1404a753371b136c19314c274ee0f8405dd2c32 | 1,598 | py | Python | docs/example/advanced/view.py | Kozea/Pynuts | f2eb1839f59d2e8a4ec96175726186e67f85c4b0 | [
"BSD-3-Clause"
] | 1 | 2016-06-16T15:31:30.000Z | 2016-06-16T15:31:30.000Z | docs/example/advanced/view.py | Kozea/Pynuts | f2eb1839f59d2e8a4ec96175726186e67f85c4b0 | [
"BSD-3-Clause"
] | null | null | null | docs/example/advanced/view.py | Kozea/Pynuts | f2eb1839f59d2e8a4ec96175726186e67f85c4b0 | [
"BSD-3-Clause"
] | null | null | null | from wtforms import TextField, IntegerField, PasswordField
from wtforms.ext.sqlalchemy.fields import (
QuerySelectField, QuerySelectMultipleField)
from wtforms.validators import Required
from pynuts.view import BaseForm
import database
from application import nuts
| 35.511111 | 76 | 0.682728 |
e1412f411269485acbe2ebcad67a9f18d2b335f9 | 330 | py | Python | scripts/extract_hit_upstreams.py | waglecn/helD_search | 2b77e81419b9929d5cf5ecc519f27cb381178b2c | [
"MIT"
] | null | null | null | scripts/extract_hit_upstreams.py | waglecn/helD_search | 2b77e81419b9929d5cf5ecc519f27cb381178b2c | [
"MIT"
] | null | null | null | scripts/extract_hit_upstreams.py | waglecn/helD_search | 2b77e81419b9929d5cf5ecc519f27cb381178b2c | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import sys
from Bio import SeqIO
import os
genome = sys.argv[1]
in_aa = f'hits/{genome}.hits'
in_up = f'fa/{genome}.upstream'
hits = SeqIO.to_dict(SeqIO.parse(in_aa, 'fasta'))
raes = SeqIO.to_dict(SeqIO.parse(in_up, 'fasta'))
for k in hits.keys():
i = k.split('|')[1]
print(raes[i].format('fasta'))
| 17.368421 | 49 | 0.672727 |
e14130d3b319054f84f8b96b0e660e7e60ab2e53 | 11,674 | py | Python | homeassistant/components/airtouch4/climate.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 4 | 2021-07-11T09:11:00.000Z | 2022-02-27T14:43:50.000Z | homeassistant/components/airtouch4/climate.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 277 | 2021-10-04T06:39:33.000Z | 2021-12-28T22:04:17.000Z | homeassistant/components/airtouch4/climate.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 3 | 2022-01-02T18:49:54.000Z | 2022-01-25T02:03:54.000Z | """AirTouch 4 component to control of AirTouch 4 Climate Devices."""
from __future__ import annotations
import logging
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
FAN_AUTO,
FAN_DIFFUSE,
FAN_FOCUS,
FAN_HIGH,
FAN_LOW,
FAN_MEDIUM,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
SUPPORT_FAN_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import DOMAIN
SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_FAN_MODE
AT_TO_HA_STATE = {
"Heat": HVAC_MODE_HEAT,
"Cool": HVAC_MODE_COOL,
"AutoHeat": HVAC_MODE_AUTO, # airtouch reports either autoheat or autocool
"AutoCool": HVAC_MODE_AUTO,
"Auto": HVAC_MODE_AUTO,
"Dry": HVAC_MODE_DRY,
"Fan": HVAC_MODE_FAN_ONLY,
}
HA_STATE_TO_AT = {
HVAC_MODE_HEAT: "Heat",
HVAC_MODE_COOL: "Cool",
HVAC_MODE_AUTO: "Auto",
HVAC_MODE_DRY: "Dry",
HVAC_MODE_FAN_ONLY: "Fan",
HVAC_MODE_OFF: "Off",
}
AT_TO_HA_FAN_SPEED = {
"Quiet": FAN_DIFFUSE,
"Low": FAN_LOW,
"Medium": FAN_MEDIUM,
"High": FAN_HIGH,
"Powerful": FAN_FOCUS,
"Auto": FAN_AUTO,
"Turbo": "turbo",
}
AT_GROUP_MODES = [HVAC_MODE_OFF, HVAC_MODE_FAN_ONLY]
HA_FAN_SPEED_TO_AT = {value: key for key, value in AT_TO_HA_FAN_SPEED.items()}
_LOGGER = logging.getLogger(__name__)
| 33.642651 | 91 | 0.668837 |
e1414f639d12d9584079f8b303441fd98b73dfdd | 772 | py | Python | giosgappsdk/giosg_api.py | mentholi/giosgapp-python-sdk | 2a5ea25e223dc4a88a32e917dd393cc9a07f9999 | [
"MIT"
] | null | null | null | giosgappsdk/giosg_api.py | mentholi/giosgapp-python-sdk | 2a5ea25e223dc4a88a32e917dd393cc9a07f9999 | [
"MIT"
] | null | null | null | giosgappsdk/giosg_api.py | mentholi/giosgapp-python-sdk | 2a5ea25e223dc4a88a32e917dd393cc9a07f9999 | [
"MIT"
] | null | null | null | import json
import requests
| 38.6 | 120 | 0.700777 |
e1416e342916d61944b1391ba364f72736a6b340 | 1,415 | py | Python | Pixelfonts/Delete duplicate components.py | NaN-xyz/Glyphs-Scripts | bdacf455babc72e0801d8d8db5dc10f8e88aa37b | [
"Apache-2.0"
] | 1 | 2022-01-09T04:28:36.000Z | 2022-01-09T04:28:36.000Z | Pixelfonts/Delete duplicate components.py | NaN-xyz/Glyphs-Scripts | bdacf455babc72e0801d8d8db5dc10f8e88aa37b | [
"Apache-2.0"
] | null | null | null | Pixelfonts/Delete duplicate components.py | NaN-xyz/Glyphs-Scripts | bdacf455babc72e0801d8d8db5dc10f8e88aa37b | [
"Apache-2.0"
] | null | null | null | #MenuTitle: Delete Duplicate Components
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
__doc__="""
Looks for duplicate components (same component, same x/y values) and keeps only one of them.
"""
Font = Glyphs.font
selectedLayers = Font.selectedLayers
Font.disableUpdateInterface()
for thisLayer in selectedLayers:
print "Components deleted in %s:" % thisLayer.parent.name,
process( thisLayer )
Font.enableUpdateInterface()
| 27.745098 | 128 | 0.743463 |
e141938b24307f066ff503fed7f111fa1bbefd00 | 3,317 | py | Python | src/structures/Errors.py | Xiddoc/ComPy | 7d26f95209d0615d7eb188fa02470ddae5311fca | [
"MIT"
] | null | null | null | src/structures/Errors.py | Xiddoc/ComPy | 7d26f95209d0615d7eb188fa02470ddae5311fca | [
"MIT"
] | 9 | 2022-02-23T10:32:44.000Z | 2022-03-27T17:55:43.000Z | src/structures/Errors.py | Xiddoc/ComPy | 7d26f95209d0615d7eb188fa02470ddae5311fca | [
"MIT"
] | null | null | null | """
Error classes, when needed for exceptions.
"""
from _ast import AST
from dataclasses import dataclass, field
from typing import Optional, Union
from src.compiler.Util import Util
| 31.894231 | 118 | 0.67561 |
e1419fb66f46497cc9f96ff1980d0c0ddc909d97 | 4,314 | py | Python | github/recorders/github/github_user_info_recorder.py | zvtvz/play-github | 30ad38ca88c1a57b2cec48b19ca31ffa28fa0154 | [
"MIT"
] | 2 | 2019-09-21T04:31:01.000Z | 2020-01-21T03:45:51.000Z | github/recorders/github/github_user_info_recorder.py | zvtvz/play-github | 30ad38ca88c1a57b2cec48b19ca31ffa28fa0154 | [
"MIT"
] | null | null | null | github/recorders/github/github_user_info_recorder.py | zvtvz/play-github | 30ad38ca88c1a57b2cec48b19ca31ffa28fa0154 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import argparse
from github.accounts.github_account import GithubAccount
from github.domain.github import GithubUser
from github.recorders.github.common import get_result
from zvdata.api import get_entities
from zvdata.domain import get_db_session
from zvdata.recorder import TimeSeriesDataRecorder
from zvdata.utils.time_utils import day_offset_today, now_pd_timestamp
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--start', help='start_timestamp', default='2015-01-01')
parser.add_argument('--end', help='end_timestamp', default='2015-12-31')
args = parser.parse_args()
start = args.start
end = args.end
recorder = GithubUserInfoRecorder(start_timestamp=start, end_timestamp=end)
recorder.run()
| 38.176991 | 119 | 0.592721 |
e141a2ac84bf3c71baee17e1baf51d264eb93a13 | 94 | py | Python | pyEDAA/OutputFilter/__init__.py | edaa-org/pyEDAA.OutputFilter | ca602c9992b40df7bd117968c0dc333a4f16d255 | [
"Apache-2.0"
] | 1 | 2021-12-30T02:49:43.000Z | 2021-12-30T02:49:43.000Z | pyEDAA/OutputFilter/__init__.py | edaa-org/pyEDAA.OutputFilter | ca602c9992b40df7bd117968c0dc333a4f16d255 | [
"Apache-2.0"
] | null | null | null | pyEDAA/OutputFilter/__init__.py | edaa-org/pyEDAA.OutputFilter | ca602c9992b40df7bd117968c0dc333a4f16d255 | [
"Apache-2.0"
] | null | null | null |
from pyTooling.Decorators import export
__version__ = "0.1.0"
| 9.4 | 39 | 0.744681 |
e141a89f1384646896cf35e7b57e68052818e1a7 | 1,766 | py | Python | tut/app.py | Tyler9937/titanic-test | 6a5200558caf203ed1dc3de71a6c9b5d488f847a | [
"MIT"
] | null | null | null | tut/app.py | Tyler9937/titanic-test | 6a5200558caf203ed1dc3de71a6c9b5d488f847a | [
"MIT"
] | null | null | null | tut/app.py | Tyler9937/titanic-test | 6a5200558caf203ed1dc3de71a6c9b5d488f847a | [
"MIT"
] | null | null | null | # Importing needed libraries
import uuid
from decouple import config
from dotenv import load_dotenv
from flask import Flask, render_template, request, jsonify
from sklearn.externals import joblib
import traceback
import pandas as pd
import numpy as np
from flask_sqlalchemy import SQLAlchemy
# Saving DB var
DB = SQLAlchemy()
# Reads key value pair from .env
load_dotenv()
# Running function to create the app
def create_app():
'''
Used to initiate the app
'''
# saving flask(__name__) to var app
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = config('DATABASE_URL')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
DB.init_app(app)
if __name__ == '__main__':
try:
port = int(sys.argv[1]) # This is for a command-line input
except:
port = 12345 # If you don't provide any port the port will be set to 12345
lr = joblib.load("model.pkl") # Load "model.pkl"
print ('Model loaded')
model_columns = joblib.load("model_columns.pkl") # Load "model_columns.pkl"
print ('Model columns loaded')
app.run(port=port, debug=True)
| 28.95082 | 86 | 0.623443 |
e143b369aa9fc5500990d0521c4867296c4568dc | 1,237 | py | Python | trainer.py | thedesertm/leapmotion_training_svm | 659a439be4209450b98d638e655ee025e5bd562b | [
"MIT"
] | null | null | null | trainer.py | thedesertm/leapmotion_training_svm | 659a439be4209450b98d638e655ee025e5bd562b | [
"MIT"
] | null | null | null | trainer.py | thedesertm/leapmotion_training_svm | 659a439be4209450b98d638e655ee025e5bd562b | [
"MIT"
] | null | null | null | import pandas as pd
import os
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.model_selection import train_test_split
import pickle
BASE_PATH = os.path.join(os.getcwd() , "dataset")
df = None
i = 0
for file_name in os.listdir(BASE_PATH):
file_path = os.path.join(BASE_PATH , file_name)
print(file_path)
data_frame = pd.read_csv(file_path , header=None)
data_frame.pop(178)
data_frame.pop(0)
dat = pd.DataFrame({'result': [i for k in range(data_frame.shape[1])]})
data_frame = data_frame.join(dat)
if not df is None :
df = df.append(data_frame , ignore_index=True)
else:
df = data_frame
i += 1
scaler = StandardScaler()
y = df.pop("result")
scalled_data = scaler.fit_transform(df)
X_train, X_test, y_train, y_test = train_test_split(scalled_data , y, test_size = 0.20)
svclassifier = SVC(kernel='linear')
svclassifier.fit(X_train, y_train)
y_pred = svclassifier.predict(X_test)
print(confusion_matrix(y_test,y_pred))
print(classification_report(y_test,y_pred))
pickle.dump(svclassifier , open("classifier.pkl" , 'wb'))
pickle.dump(scaler , open("scaler.pkl" , 'wb')) | 31.717949 | 87 | 0.735651 |
e145c5c7a800878dc251c5025a3fb2b44ba71b0b | 6,266 | py | Python | 1 - Data Analysis/2_Analysis - Data Exploration.py | dkim319/NFL_Predictive_Model_v2 | 5884e10a681e2e34f54a2280c94d2f42fc442d17 | [
"CNRI-Python"
] | 1 | 2019-09-14T04:04:51.000Z | 2019-09-14T04:04:51.000Z | 1 - Data Analysis/2_Analysis - Data Exploration.py | dkim319/NFL_Predictive_Model_v2 | 5884e10a681e2e34f54a2280c94d2f42fc442d17 | [
"CNRI-Python"
] | null | null | null | 1 - Data Analysis/2_Analysis - Data Exploration.py | dkim319/NFL_Predictive_Model_v2 | 5884e10a681e2e34f54a2280c94d2f42fc442d17 | [
"CNRI-Python"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 14 20:21:23 2017
@author: DKIM
"""
import pandas as pd
import numpy as np
# required libraries loaded
import pandas as pd
import numpy as np
import matplotlib
matplotlib.style.use('ggplot')
import matplotlib.pyplot as plt
seednumber = 319
data = pd.read_csv('Data.csv')
# Initial dataset
print('Initial dataset dimensions')
print(data.shape)
target_year = 2017
print('Filter to only the training data')
orig_data = data[data['season'] <= target_year]
# Data Preprocessing
# replace any null values with 0
data = data.fillna(0)
# use one-hot coding to replace the favorite and underdog categorical variables
fav_team = pd.get_dummies(data['favorite'])
und_team = pd.get_dummies(data['underdog'])
# use a prefix to distinguish the two categorical variables
fav_team = fav_team.add_prefix('fav_')
und_team = und_team.add_prefix('und_')
# remove the original fields
data = data.drop('favorite', axis = 1)
data = data.drop('underdog', axis = 1)
# add the one-hot coded fields
data = pd.concat([data, fav_team], axis = 1)
data = pd.concat([data, und_team], axis = 1)
#print data.head(5)
#print(data.describe())
# split the dataset into training and testing datasets
data = data[data['season'] <= target_year]
data.reset_index()
print('Final dataset dimensions')
print(data.shape)
#statistics = data.describe()
#statistics.to_csv('stats.csv')
print('Review the distribution of the target variable')
print('Target variable is evenly distributed and is not skewed')
spread_by_year = data.groupby(['season'])['spreadflag'].mean()
print(spread_by_year)
corr_data = data.corr(method = 'pearson')
print('Review the correlation between the variables and the target variable')
print('Top 10 correlated variables')
print(corr_data['spreadflag'].sort_values(ascending=False).head(11))
print('Top 10 negatively correlated variables')
print(corr_data['spreadflag'].sort_values(ascending=True).head(10))
years = [2010,2011,2012,2013,2014,2015,2016,2017]
for x in years:
year_data = data[data['season'] == x]
year_data_corr = year_data.corr(method = 'pearson')
print('Top 10 correlated variables for the target variable, spreadflag, for the year ' + str(x))
print(year_data_corr['spreadflag'].sort_values(ascending=False).head(11))
print('')
print('Top 10 negatively correlated variables for the target variable, spreadflag, for the year ' + str(x))
print(year_data_corr['spreadflag'].sort_values(ascending=True).head(10))
print('')
# Plot favorite win % over spread
spread_agg = data.groupby(['spread'])['spreadflag'].mean()
spread_count = data.groupby(['spread'])['spreadflag'].count() / data.shape[0]
fig, axes = plt.subplots(2,1)
spread_agg_ax = spread_agg.plot(ax = axes[0])
spread_agg_ax.set_ylabel('favorite win %')
spread_agg_ax.set_title('Figure 1 - Spread')
spread_agg_figure = spread_agg_ax.get_figure()
spread_count_ax = spread_count.plot(kind = 'line',ax = axes[1])
spread_count_ax.set_ylabel('spread %')
spread_count_figure = spread_count_ax.get_figure()
plt.show()
#plt.savefig('2b - fig 1 - spread_vis.png')
# Plot the favorite win % over total
total_agg = data.groupby(['total'])['spreadflag'].mean()
total_count = data.groupby(['total'])['spreadflag'].count() / data.shape[0]
fig, axes = plt.subplots(2,1)
total_agg_ax = total_agg.plot(ax = axes[0])
total_agg_ax.set_ylabel('favorite win %')
total_agg_ax.set_title('Figure 2 - Total')
total_agg_figure = total_agg_ax.get_figure()
total_count_ax = total_count.plot(kind = 'line',ax = axes[1])
total_count_ax.set_ylabel('total %')
total_count_figure = total_count_ax.get_figure()
plt.show()
#plt.savefig('2b - fig 2 - total_vis.png')
# Check the Team over winning %
favorite_win_percent = orig_data.groupby(['favorite'])['spreadflag'].mean()
underdog_win_percent = 1 - orig_data.groupby(['underdog'])['spreadflag'].mean()
print('Top 10 Favorites by ATS percent')
print(favorite_win_percent.sort_values(ascending=False).head(10))
print('')
print('Top 10 Underdogs by ATS percent')
print(underdog_win_percent.sort_values(ascending=False).head(10))
print('')
# Plot the favorite win % over favorite's win record over last 5 and 10 games
fav_last_5_percent_vis_agg = data.groupby(['fav_last_5_percent'])['spreadflag'].mean()
fav_last_10_percent_vis_agg = data.groupby(['fav_last_10_percent'])['spreadflag'].mean()
fig, axes = plt.subplots(2,1)
fav_last_5_percent_vis_agg_ax = fav_last_5_percent_vis_agg.plot(ax = axes[0])
fav_last_5_percent_vis_agg_ax.set_ylabel('favorite win %')
fav_last_5_percent_vis_agg_ax.set_title('Figure 3a - Favorite Win % Last 5 Games')
fav_last_5_percent_vis_agg_figure = fav_last_5_percent_vis_agg_ax.get_figure()
fav_last_5_percent_vis_agg_figure.subplots_adjust(hspace=0.75)
fav_last_10_percent_vis_agg_ax = fav_last_10_percent_vis_agg.plot(kind = 'line',ax = axes[1])
fav_last_10_percent_vis_agg_ax.set_ylabel('favorite win %')
fav_last_10_percent_vis_agg_ax.set_title('Figure 3b - Favorite Win % Last 10 Games')
fav_last_10_percent_vis_count_figure = fav_last_10_percent_vis_agg_ax.get_figure()
plt.show()
#plt.savefig('2b - fig 3 - fav_last_5_percent.png')
# Plot the favorite win % over underdog's win record over last 5 and 10 games
undlast_5_percent_vis_agg = data.groupby(['und_last_5_percent'])['spreadflag'].mean()#.sum()/ data.groupby(['spread'])['spreadflag'].count()
und_last_10_percent_vis_agg = data.groupby(['und_last_10_percent'])['spreadflag'].mean()
fig, axes = plt.subplots(2,1)
und_last_5_percent_vis_agg_ax = undlast_5_percent_vis_agg.plot(ax = axes[0])
und_last_5_percent_vis_agg_ax.set_ylabel('underdog win %')
und_last_5_percent_vis_agg_ax.set_title('Figure 4a - Underdog Win % Last 5 Games')
und_last_5_percent_vis_agg_figure = und_last_5_percent_vis_agg_ax.get_figure()
und_last_5_percent_vis_agg_figure.subplots_adjust(hspace=0.75)
und_last_10_percent_vis_agg_ax = und_last_10_percent_vis_agg.plot(kind = 'line',ax = axes[1])
und_last_10_percent_vis_agg_ax.set_ylabel('underdog win %')
und_last_10_percent_vis_agg_ax.set_title('Figure 4b - Underdog Win % Last 10 Games')
und_last_10_percent_vis_agg_figure = und_last_10_percent_vis_agg_ax.get_figure()
plt.show()
#plt.savefig('2b - fig 4 - und_last_5_percent.png')
| 34.811111 | 141 | 0.76157 |
e1473bb4e004b0d3642a2fee0b5a8667fbdf36d4 | 597 | py | Python | tests/functional/testplan/test_plan_timeout.py | dobragab/testplan | 407ac1dfd33d19753e41235a1f576aeb06118840 | [
"Apache-2.0"
] | null | null | null | tests/functional/testplan/test_plan_timeout.py | dobragab/testplan | 407ac1dfd33d19753e41235a1f576aeb06118840 | [
"Apache-2.0"
] | null | null | null | tests/functional/testplan/test_plan_timeout.py | dobragab/testplan | 407ac1dfd33d19753e41235a1f576aeb06118840 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""Testplan that is expected to time out."""
import sys
import threading
import testplan
from testplan.testing import multitest
if __name__ == '__main__':
sys.exit(main().exit_code)
| 20.586207 | 58 | 0.643216 |
e14841f80a1f905b5006c26969f6f10bf64c27b5 | 107 | py | Python | Codefights/arcade/intro/level-2/6.Make-Array-Consecutive-2/Python/solution1.py | RevansChen/online-judge | ad1b07fee7bd3c49418becccda904e17505f3018 | [
"MIT"
] | 7 | 2017-09-20T16:40:39.000Z | 2021-08-31T18:15:08.000Z | Codefights/arcade/intro/level-2/6.Make-Array-Consecutive-2/Python/solution1.py | RevansChen/online-judge | ad1b07fee7bd3c49418becccda904e17505f3018 | [
"MIT"
] | null | null | null | Codefights/arcade/intro/level-2/6.Make-Array-Consecutive-2/Python/solution1.py | RevansChen/online-judge | ad1b07fee7bd3c49418becccda904e17505f3018 | [
"MIT"
] | null | null | null | # Python3
| 21.4 | 59 | 0.700935 |
e14a89ff9896dc6d76ffe641bcbb01393e6b478d | 1,127 | py | Python | tests/classification_test.py | mjirik/lisa | 06c5cb8f375f51302341e768512f02236774c8a3 | [
"BSD-3-Clause"
] | 22 | 2015-01-26T12:58:54.000Z | 2021-04-15T17:48:13.000Z | tests/classification_test.py | mjirik/lisa | 06c5cb8f375f51302341e768512f02236774c8a3 | [
"BSD-3-Clause"
] | 31 | 2015-01-23T14:46:13.000Z | 2018-05-18T14:47:18.000Z | tests/classification_test.py | mjirik/lisa | 06c5cb8f375f51302341e768512f02236774c8a3 | [
"BSD-3-Clause"
] | 13 | 2015-06-30T08:54:27.000Z | 2020-09-11T16:08:19.000Z | # ! /usr/bin/python
# -*- coding: utf-8 -*-
# import funkc z jinho adrese
# import sys
import os.path
path_to_script = os.path.dirname(os.path.abspath(__file__))
# sys.path.append(os.path.join(path_to_script, "../extern/pyseg_base/src/"))
# sys.path.append(os.path.join(path_to_script, "../extern/sed3/"))
# sys.path.append(os.path.join(path_to_script, "../src/"))
import unittest
import numpy as np
import lisa.classification
if __name__ == "__main__":
unittest.main()
| 29.657895 | 76 | 0.615794 |
e14b23b0342f7644f668cb1aa04ae3158b4e1e5b | 751 | py | Python | application.py | milindvb/python-docs-hello-world | 6d3c8b1936c10ee245cc7c4ffb448e94c8b4b9de | [
"MIT"
] | null | null | null | application.py | milindvb/python-docs-hello-world | 6d3c8b1936c10ee245cc7c4ffb448e94c8b4b9de | [
"MIT"
] | null | null | null | application.py | milindvb/python-docs-hello-world | 6d3c8b1936c10ee245cc7c4ffb448e94c8b4b9de | [
"MIT"
] | null | null | null | from flask import Flask
# import pyodbc
app = Flask(__name__)
| 28.884615 | 141 | 0.600533 |
e14ca387e55877393570685f057c5e66f54b5ec5 | 3,906 | py | Python | basefiles/sweeps/SMTBFsweep.py | hpec-2021-ccu-lanl/simulator | 21a7cc0dd12feef5ad26668a3cc216854cc2dd40 | [
"BSD-3-Clause"
] | null | null | null | basefiles/sweeps/SMTBFsweep.py | hpec-2021-ccu-lanl/simulator | 21a7cc0dd12feef5ad26668a3cc216854cc2dd40 | [
"BSD-3-Clause"
] | null | null | null | basefiles/sweeps/SMTBFsweep.py | hpec-2021-ccu-lanl/simulator | 21a7cc0dd12feef5ad26668a3cc216854cc2dd40 | [
"BSD-3-Clause"
] | null | null | null | from sweeps.sweepFunctions import *
import numpy as np
| 48.222222 | 164 | 0.575269 |
e14ce3e30f3e8ef1bb113abf4b81672a5245be55 | 1,708 | py | Python | tests/functional_pyecore/regressions/test_issue_34_resolving_pyecore.py | aranega/textX | abb04d272a1b74f937d43400be130cf7a3be3516 | [
"MIT"
] | 4 | 2017-12-04T11:07:11.000Z | 2021-06-21T20:54:09.000Z | tests/functional_pyecore/regressions/test_issue_34_resolving_pyecore.py | aranega/textX | abb04d272a1b74f937d43400be130cf7a3be3516 | [
"MIT"
] | null | null | null | tests/functional_pyecore/regressions/test_issue_34_resolving_pyecore.py | aranega/textX | abb04d272a1b74f937d43400be130cf7a3be3516 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
import pytest # noqa
import sys
pytestmark = pytest.mark.skipif(sys.version_info[0] < 3,
reason="pyecore is not Python 2 compatible") # noqa
pyecore = pytest.importorskip("pyecore") # noqa
import textx
from textx.metamodel import metamodel_from_str
pytestmark = pytest.mark.usefixtures("enable_pyecore_support")
def test_issue_34_resolving():
"""An issue in resolving a list of objects of different types.
In the grammar below, attribute `values` in `FormulaExp` collect STRING
instances which leads textX to deduce the type of this attribute to be list
of STRING objects. Thus, object reference resolving does not consider the
`values` list.
In the new version textX will deduce type OBJECT if different types are
used in multiple assignments.
"""
grammar = """
Expression:
atts+=Attribute[','] 'formula' form=Formula
;
Formula:
value=FormulaExp
;
FormulaExp:
values=Cond
| ( values='(' values=Formula values=')' )
;
Cond:
attribute = [Attribute|attr_id] '<' values=STRING
;
attr_id:
/attr_[a-f0-9]+/
;
Attribute:
name = attr_id
;
"""
meta_model = metamodel_from_str(grammar)
model = meta_model.model_from_str(
"attr_123, attr_444 formula attr_123 < 'aa'")
assert type(model.form.value.values[0].attribute).__name__ == 'Attribute'
assert model.form.value.values[0].attribute.name == 'attr_123'
| 25.878788 | 84 | 0.67096 |
e14d0acbede38071c9f51e6e3d4fd2359e4f607b | 863 | py | Python | pylbd/s3_object.py | MacHu-GWU/pylbd-project | d9be28d1f9f7679237e4d3c86f63ea06f43249dd | [
"MIT"
] | null | null | null | pylbd/s3_object.py | MacHu-GWU/pylbd-project | d9be28d1f9f7679237e4d3c86f63ea06f43249dd | [
"MIT"
] | null | null | null | pylbd/s3_object.py | MacHu-GWU/pylbd-project | d9be28d1f9f7679237e4d3c86f63ea06f43249dd | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import boto3
from botocore.exceptions import ClientError
import attr
from attrs_mate import AttrsClass
import weakref
| 26.151515 | 86 | 0.659328 |
e14d1130b819743aa4189ff145d7b0695bac00b3 | 543 | py | Python | android_toast/toast.py | ShareASmile/car-locator | 765d26ad414ab86e4d93bc5338868769e8b3e90f | [
"MIT"
] | 21 | 2020-09-08T21:03:25.000Z | 2022-02-15T07:08:04.000Z | android_toast/toast.py | ShareASmile/car-locator | 765d26ad414ab86e4d93bc5338868769e8b3e90f | [
"MIT"
] | 3 | 2021-04-13T09:40:20.000Z | 2021-05-28T20:53:07.000Z | android_toast/toast.py | ShareASmile/car-locator | 765d26ad414ab86e4d93bc5338868769e8b3e90f | [
"MIT"
] | 9 | 2020-12-11T09:01:42.000Z | 2022-03-28T00:55:59.000Z |
from android.runnable import run_on_ui_thread
from jnius import autoclass, cast
mActivity = autoclass("org.kivy.android.PythonActivity").mActivity
Toast = autoclass("android.widget.Toast")
CharSequence = autoclass("java.lang.CharSequence")
String = autoclass("java.lang.String")
| 28.578947 | 66 | 0.756906 |