hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
436d01399c03b77d98f4cf23e9025181a7999308 | 3,767 | py | Python | app/app.py | shaswat01/Disaster_Response_ETL | c441514fb5231d193cd4b29afad00fe0f3513562 | [
"MIT"
] | null | null | null | app/app.py | shaswat01/Disaster_Response_ETL | c441514fb5231d193cd4b29afad00fe0f3513562 | [
"MIT"
] | null | null | null | app/app.py | shaswat01/Disaster_Response_ETL | c441514fb5231d193cd4b29afad00fe0f3513562 | [
"MIT"
] | null | null | null | import nltk
import json
import plotly
import pandas as pd
import plotly.graph_objects as go
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize
nltk.download(['punkt','wordnet'])
from flask import Flask
from flask import render_template, request, jsonify
from plotly.graph_objs import Bar, Histogram
import joblib
from sqlalchemy import create_engine
app = Flask(__name__)
# load data
engine = create_engine('sqlite:///data/DisasterResponse.db')
df = pd.read_sql_table('messages', engine)
# load model
model = joblib.load("models/model.pkl")
# index webpage displays cool visuals and receives user input text for model
# web page that handles user query and displays model results
def main():
app.run()
#app.run(host='0.0.0.0', port=3001, debug=True)
if __name__ == '__main__':
main()
| 25.281879 | 131 | 0.528537 |
436d1a37515679503cc50623874a3539d00946be | 4,659 | py | Python | tools/mo/openvino/tools/mo/front/mxnet/mx_reshape_reverse.py | pazamelin/openvino | b7e8ef910d7ed8e52326d14dc6fd53b71d16ed48 | [
"Apache-2.0"
] | 1 | 2019-09-22T01:05:07.000Z | 2019-09-22T01:05:07.000Z | tools/mo/openvino/tools/mo/front/mxnet/mx_reshape_reverse.py | pazamelin/openvino | b7e8ef910d7ed8e52326d14dc6fd53b71d16ed48 | [
"Apache-2.0"
] | 58 | 2020-11-06T12:13:45.000Z | 2022-03-28T13:20:11.000Z | tools/mo/openvino/tools/mo/front/mxnet/mx_reshape_reverse.py | pazamelin/openvino | b7e8ef910d7ed8e52326d14dc6fd53b71d16ed48 | [
"Apache-2.0"
] | 2 | 2019-09-20T01:33:37.000Z | 2019-09-20T08:42:11.000Z | # Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.mxnet.mx_reshape_to_reshape import MXReshapeToReshape
from openvino.tools.mo.ops.Reverse import Reverse
from openvino.tools.mo.ops.mxreshape import MXReshape
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from openvino.tools.mo.front.common.replacement import FrontReplacementOp
from openvino.tools.mo.front.tf.graph_utils import create_op_node_with_second_input
from openvino.tools.mo.graph.graph import Graph
from openvino.tools.mo.ops.reshape import Reshape
from openvino.tools.mo.ops.shape import Shape
from openvino.tools.mo.ops.squeeze import Squeeze
from openvino.tools.mo.ops.unsqueeze import Unsqueeze
| 59.730769 | 127 | 0.69457 |
436dafbd787a4e7854f10318324bcf64277e6432 | 6,480 | py | Python | Python/Simulation/Numerical_Methods/test_cubic_spline_solve.py | MattMarti/Lambda-Trajectory-Sim | 4155f103120bd49221776cc3b825b104f36817f2 | [
"MIT"
] | null | null | null | Python/Simulation/Numerical_Methods/test_cubic_spline_solve.py | MattMarti/Lambda-Trajectory-Sim | 4155f103120bd49221776cc3b825b104f36817f2 | [
"MIT"
] | null | null | null | Python/Simulation/Numerical_Methods/test_cubic_spline_solve.py | MattMarti/Lambda-Trajectory-Sim | 4155f103120bd49221776cc3b825b104f36817f2 | [
"MIT"
] | null | null | null | import unittest;
import numpy as np;
import scipy as sp;
from cubic_spline_solve import cubic_spline_solve;
from cubic_spline_fun import cubic_spline_fun;
# | 37.241379 | 78 | 0.564352 |
437021d671825e959375a0374106a655349dffb0 | 7,803 | py | Python | PassWord.py | IQUBE-X/passGenerator | a56a5928c1e8ee503d2757ecf0ab4108a52ec677 | [
"MIT"
] | 1 | 2020-07-11T07:59:54.000Z | 2020-07-11T07:59:54.000Z | PassWord.py | dhruvaS-hub/passGenerator | a56a5928c1e8ee503d2757ecf0ab4108a52ec677 | [
"MIT"
] | null | null | null | PassWord.py | dhruvaS-hub/passGenerator | a56a5928c1e8ee503d2757ecf0ab4108a52ec677 | [
"MIT"
] | 1 | 2021-06-02T10:11:19.000Z | 2021-06-02T10:11:19.000Z | # PassWord - The Safe Password Generator App!
# importing the tkinter module for GUI
from tkinter import *
# importing the message box widget from tkinter
from tkinter import messagebox
# importing sqlite3 for database
import sqlite3
# importing random for password generation
import random
# creating fonts
font = ('Fixedsys', 10)
font2 = ('Comic Sans MS', 9)
font3 = ('System', 9)
font4 = ('Two Cen MT', 9)
# creating a database and establishing a connection
conn = sqlite3.connect('password.db')
# creating a cursor to navigate through database
c = conn.cursor()
# creating the table
'''
c.execute("""CREATE TABLE passwords (
password text
)""")
'''
# defining the root variable
root = Tk()
# Naming the app
root.title('PassWord')
# creating a label frame to organize content
label_frame = LabelFrame(root, padx=10, pady=10, text='Password Generator', font=font)
# printing the label frame onto the screen or window
label_frame.grid(row=0, column=0, columnspan=1, padx=10, pady=10, sticky=E + W)
# creating a separate label frame to perform delete functions
delete_labelframe = LabelFrame(root, text='Delete Password', padx=10, pady=10, font=font4)
# printing delete labelframe onto the screen
delete_labelframe.grid(row=5, column=0, columnspan=1, padx=10, pady=10, sticky=E + W)
# making the text box where password is going to be displayed
e = Entry(label_frame, fg='black', bg='white')
# printing the text box to the screen
e.grid(row=0, column=0, padx=10, pady=10, columnspan=1)
# (for the delete function) to give information on input for delete function
# (for the delete function) to give information on input for delete function
info = Label(delete_labelframe, text='Password ID', fg='black', font=font2)
# printing the label onto the screen
info.grid(row=6, column=0, pady=10)
# making the entry for user to input which password
e2 = Entry(delete_labelframe, fg='black', bg='white')
# printing the entry onto the screen
e2.grid(row=6, column=1, pady=10)
# making the password generate function
# making a function to save the password into the database
# making a function to show all the saved passwords
# making a function to hide the saved passwords
# making a function to delete passwords from database
# making a function to delete all the passwords in the database
# button for generating password
generate_password = Button(label_frame, text='Generate Strong Password', command=generate, font=font2)
# printing the button onto the screen
generate_password.grid(row=1, padx=10, pady=10, column=0)
# button to save password
save = Button(label_frame, text='Save Password', command=save_password, font=font2)
# printing the button onto the screen
save.grid(row=2, padx=10, pady=10, column=0)
# making a button to show all the passwords
show = Button(label_frame, text='Show Passwords', command=show_password, font=font2)
# printing the button onto the screen
show.grid(row=4, padx=10, pady=10, column=0)
# making a button to hide the shown passwords
hide = Button(label_frame, text='Hide Passwords', command=hide_password, font=font2)
# printing the button onto the screen
hide.grid(row=6, column=0, padx=10, pady=10)
# making a button to delete a password
delete = Button(delete_labelframe, text='Delete Password', command=delete, font=font2)
# printing the button onto the screen
delete.grid(row=8, padx=10, pady=10, column=1)
# making a button to delete all the passwords
delete_all = Button(delete_labelframe, text='Delete All', command=delete_all, fg='dark red', width=20, anchor=CENTER,
font=font3)
# printing the button onto the screen
delete_all.grid(row=9, column=1, padx=10, pady=10, ipadx=15)
# committing the changes to the database
conn.commit()
# closing the connection with database
conn.close()
# making the final loop
root.mainloop()
| 32.648536 | 134 | 0.656927 |
4370bea6e2a16934ad57aff4637712bbcfdb6bc4 | 331 | py | Python | 1805_number_of_different_integers_in_a_string.py | hotternative/leetcode | d0ec225abc2ada1398666641c7872f3eb889e7ed | [
"MIT"
] | null | null | null | 1805_number_of_different_integers_in_a_string.py | hotternative/leetcode | d0ec225abc2ada1398666641c7872f3eb889e7ed | [
"MIT"
] | null | null | null | 1805_number_of_different_integers_in_a_string.py | hotternative/leetcode | d0ec225abc2ada1398666641c7872f3eb889e7ed | [
"MIT"
] | null | null | null | from string import ascii_lowercase
ts = 'a123bc34d8ef34'
cur = []
res = set()
for c in ts:
if c in ascii_lowercase:
if cur:
s = ''.join(cur)
res.add(int(s))
cur = []
else:
cur.append(c)
else:
if cur:
s = ''.join(cur)
res.add(int(s))
print(res)
| 13.24 | 34 | 0.480363 |
4371e6643a58d749ad832f8647f0481df0293c7c | 1,087 | py | Python | app.py | ahmedriaz9908/memeapiiz | eef98f837f2ec83edc3dd004f19dcefda9b582a5 | [
"MIT"
] | null | null | null | app.py | ahmedriaz9908/memeapiiz | eef98f837f2ec83edc3dd004f19dcefda9b582a5 | [
"MIT"
] | null | null | null | app.py | ahmedriaz9908/memeapiiz | eef98f837f2ec83edc3dd004f19dcefda9b582a5 | [
"MIT"
] | null | null | null | from flask import Flask, render_template, jsonify
from reddit_handler import *
app = Flask(__name__)
meme_subreddits = ['izlam']
| 20.12963 | 84 | 0.601656 |
4372710c66361fa93707980328afe4826b15ed27 | 6,609 | py | Python | 10_compare_between_main_product_pages.py | e-davydenkova/SeleniumWebDriver_Training | e03cfbe4ea74ddc8f0c575d8fcaa3a6c7ccb7d0a | [
"Apache-2.0"
] | null | null | null | 10_compare_between_main_product_pages.py | e-davydenkova/SeleniumWebDriver_Training | e03cfbe4ea74ddc8f0c575d8fcaa3a6c7ccb7d0a | [
"Apache-2.0"
] | null | null | null | 10_compare_between_main_product_pages.py | e-davydenkova/SeleniumWebDriver_Training | e03cfbe4ea74ddc8f0c575d8fcaa3a6c7ccb7d0a | [
"Apache-2.0"
] | null | null | null | import pytest
from selenium import webdriver
import re
# check that product names are identical on the main page and on product page
def test_product_names(driver):
# get a product name on the main page
main_name = driver.find_element_by_css_selector("#box-campaigns div li.product.column.shadow.hover-light .name").text
# get a product name on a product page
driver.find_element_by_css_selector("#box-campaigns div li.product.column.shadow.hover-light").click()
product_name = driver.find_element_by_css_selector("#box-product .title").text
assert main_name == product_name, "Product names on the main page and on product page are NOT identical"
# check that prices (regular and campaign) are identical on the main page and on product page
# check color of regular and campaign prices and their attributes on the main page
# check color of regular and campaign prices and their attributes on the product page
# check that campaign price is bigger than regular prise on the main and product pages
| 52.03937 | 125 | 0.733394 |
4372f137c065f7fda02b994b61b1b4bd3b7965e5 | 1,775 | py | Python | pyrite/llvm.py | iahuang/pyrite | 0db83aad6aa8f245edf13d393f65d408eb956c4d | [
"MIT"
] | null | null | null | pyrite/llvm.py | iahuang/pyrite | 0db83aad6aa8f245edf13d393f65d408eb956c4d | [
"MIT"
] | 1 | 2022-03-28T00:35:11.000Z | 2022-03-29T21:17:06.000Z | pyrite/llvm.py | iahuang/pyrite | 0db83aad6aa8f245edf13d393f65d408eb956c4d | [
"MIT"
] | null | null | null | import shutil
from pyrite import fs
from pyrite.command_line import run_command
from pyrite.errors import UserError
from pyrite.globals import Globals
from os.path import join
| 29.583333 | 127 | 0.60507 |
43741937702bf1405a4a4845184d5f67e95b3dd1 | 526 | py | Python | bag_recursive.py | eduardogerentklein/Algoritmos-Geneticos | 499836ac4867240ee3777dcdd554081a480cb8c9 | [
"MIT"
] | null | null | null | bag_recursive.py | eduardogerentklein/Algoritmos-Geneticos | 499836ac4867240ee3777dcdd554081a480cb8c9 | [
"MIT"
] | null | null | null | bag_recursive.py | eduardogerentklein/Algoritmos-Geneticos | 499836ac4867240ee3777dcdd554081a480cb8c9 | [
"MIT"
] | null | null | null | maxWeight = 30
value = [15, 7, 10, 5, 8, 17]
weight = [15, 3, 2, 5, 9, 20]
bestAnswer = bag(0, [])
print(bestAnswer) | 18.137931 | 41 | 0.629278 |
437727aaebd2b60da03893cf1960a1dac044f4b8 | 14,215 | py | Python | train.py | MEfeTiryaki/trpo | e1c7bc25165730afa60d9733555398e078a13e67 | [
"MIT"
] | 2 | 2020-03-26T23:36:41.000Z | 2020-03-27T03:04:27.000Z | train.py | MEfeTiryaki/trpo | e1c7bc25165730afa60d9733555398e078a13e67 | [
"MIT"
] | null | null | null | train.py | MEfeTiryaki/trpo | e1c7bc25165730afa60d9733555398e078a13e67 | [
"MIT"
] | 1 | 2020-03-27T03:04:28.000Z | 2020-03-27T03:04:28.000Z | import argparse
from itertools import count
import signal
import sys
import os
import time
import numpy as np
import gym
import torch
import torch.autograd as autograd
from torch.autograd import Variable
import scipy.optimize
import matplotlib.pyplot as plt
from value import Value
from policy import Policy
from utils import *
from trpo import trpo_step
parser = argparse.ArgumentParser(description='PyTorch actor-critic example')
# Algorithm Parameters
parser.add_argument('--gamma', type=float, default=0.995, metavar='G', help='discount factor (default: 0.995)')
parser.add_argument('--lambda-', type=float, default=0.97, metavar='G', help='gae (default: 0.97)')
# Value Function Learning Parameters
parser.add_argument('--l2-reg', type=float, default=1e-3, metavar='G', help='(NOT USED)l2 regularization regression (default: 1e-3)')
parser.add_argument('--val-opt-iter', type=int, default=200, metavar='G', help='iteration number for value function learning(default: 200)')
parser.add_argument('--lr', type=float, default=1e-3, metavar='G', help='learning rate for value function (default: 1e-3)')
parser.add_argument('--value-memory', type=int, default=1, metavar='G', help='ratio of past value to be used to batch size (default: 1)')
parser.add_argument('--value-memory-shuffle', action='store_true',help='if not shuffled latest memory stay') # TODO: implement
# Policy Optimization parameters
parser.add_argument('--max-kl', type=float, default=1e-2, metavar='G', help='max kl value (default: 1e-2)')
parser.add_argument('--damping', type=float, default=1e-1, metavar='G', help='damping (default: 1e-1)')
parser.add_argument('--fisher-ratio', type=float, default=1, metavar='G', help='ratio of data to calcualte fisher vector product (default: 1)')
# Environment parameters
parser.add_argument('--env-name', default="Pendulum-v0", metavar='G', help='name of the environment to run')
parser.add_argument('--seed', type=int, default=543, metavar='N', help='random seed (default: 1)')
# Training length
parser.add_argument('--batch-size', type=int, default=5000, metavar='N', help='number of steps per iteration')
parser.add_argument('--episode-length', type=int, default=1000, metavar='N', help='max step size for one episode')
parser.add_argument('--max-iteration-number', type=int, default=200, metavar='N', help='max policy iteration number')
# Rendering
parser.add_argument('--render', action='store_true', help='render the environment')
# Logging
parser.add_argument('--log-interval', type=int, default=1, metavar='N', help='interval between training status logs (default: 10)')
parser.add_argument('--log', action='store_true', help='log the results at the end')
parser.add_argument('--log-dir', type=str, default=".", metavar='N', help='log directory')
parser.add_argument('--log-prefix', type=str, default="log", metavar='N', help='log file prefix')
# Load
parser.add_argument('--load', action='store_true', help='load models')
parser.add_argument('--save', action='store_true', help='load models')
parser.add_argument('--load-dir', type=str, default=".", metavar='N', help='')
args = parser.parse_args()
env = gym.make(args.env_name)
env.seed(args.seed)
num_inputs = env.observation_space.shape[0]
num_actions = env.action_space.shape[0]
torch.set_printoptions(profile="full")
if args.load:
policy_net = Policy(num_inputs, num_actions,30)
value_net = Value(num_inputs,30)
set_flat_params_to(value_net, loadParameterCsv(args.load_dir+"/ValueNet"))
set_flat_params_to(policy_net, loadParameterCsv(args.load_dir+"/PolicyNet"))
print("Networks are loaded from "+args.load_dir+"/")
else:
policy_net = Policy(num_inputs, num_actions,30)
value_net = Value(num_inputs,30)
def signal_handler(sig, frame):
""" Signal Handler to save the networks when shutting down via ctrl+C
Parameters:
Returns:
"""
if(args.save):
valueParam = get_flat_params_from(value_net)
policyParam = get_flat_params_from(policy_net)
saveParameterCsv(valueParam,args.load_dir+"/ValueNet")
saveParameterCsv(policyParam,args.load_dir+"/PolicyNet")
print("Networks are saved in "+args.load_dir+"/")
print('Closing!!')
env.close()
sys.exit(0)
def prepare_data(batch,valueBatch,previousBatch):
""" Get the batch data and calculate value,return and generalized advantage
Detail: TODO
Parameters:
batch (dict of arrays of numpy) : TODO
valueBatch (dict of arrays of numpy) : TODO
previousBatch (dict of arrays of numpy) : TODO
Returns:
"""
# TODO : more description above
stateList = [ torch.from_numpy(np.concatenate(x,axis=0)) for x in batch["states"]]
actionsList = [torch.from_numpy(np.concatenate(x,axis=0)) for x in batch["actions"]]
for states in stateList:
value = value_net.forward(states)
batch["values"].append(value)
advantagesList = []
returnsList = []
rewardsList = []
for rewards,values,masks in zip(batch["rewards"],batch["values"],batch["mask"]):
returns = torch.Tensor(len(rewards),1)
advantages = torch.Tensor(len(rewards),1)
deltas = torch.Tensor(len(rewards),1)
prev_return = 0
prev_value = 0
prev_advantage = 0
for i in reversed(range(len(rewards))):
returns[i] = rewards[i] + args.gamma * prev_value * masks[i] # TD
# returns[i] = rewards[i] + args.gamma * prev_return * masks[i] # Monte Carlo
deltas[i] = rewards[i] + args.gamma * prev_value * masks[i]- values.data[i]
advantages[i] = deltas[i] + args.gamma * args.lambda_* prev_advantage* masks[i]
prev_return = returns[i, 0]
prev_value = values.data[i, 0]
prev_advantage = advantages[i, 0]
returnsList.append(returns)
advantagesList.append(advantages)
rewardsList.append(torch.Tensor(rewards))
batch["states"] = torch.cat(stateList,0)
batch["actions"] = torch.cat(actionsList,0)
batch["rewards"] = torch.cat(rewardsList,0)
batch["returns"] = torch.cat(returnsList,0)
advantagesList = torch.cat(advantagesList,0)
batch["advantages"] = (advantagesList- advantagesList.mean()) / advantagesList.std()
valueBatch["states"] = torch.cat(( previousBatch["states"],batch["states"]),0)
valueBatch["targets"] = torch.cat((previousBatch["returns"],batch["returns"]),0)
def update_policy(batch):
""" Get advantage , states and action and calls trpo step
Parameters:
batch (dict of arrays of numpy) : TODO (batch is different than prepare_data by structure)
Returns:
"""
advantages = batch["advantages"]
states = batch["states"]
actions = batch["actions"]
trpo_step(policy_net, states,actions,advantages , args.max_kl, args.damping)
def update_value(valueBatch):
""" Get valueBatch and run adam optimizer to learn value function
Parameters:
valueBatch (dict of arrays of numpy) : TODO
Returns:
"""
# shuffle the data
dataSize = valueBatch["targets"].size()[0]
permutation = torch.randperm(dataSize)
input = valueBatch["states"][permutation]
target = valueBatch["targets"][permutation]
iter = args.val_opt_iter
batchSize = int(dataSize/ iter)
loss_fn = torch.nn.MSELoss(reduction='sum')
optimizer = torch.optim.Adam(value_net.parameters(), lr=args.lr)
for t in range(iter):
prediction = value_net(input[t*batchSize:t*batchSize+batchSize])
loss = loss_fn(prediction, target[t*batchSize:t*batchSize+batchSize])
# XXX : Comment out for debug
# if t%100==0:
# print("\t%f"%loss.data)
optimizer.zero_grad()
loss.backward()
optimizer.step()
def save_to_previousBatch(previousBatch,batch):
""" Save previous batch to use in future value optimization
Details: TODO
Parameters:
Returns:
"""
if args.value_memory<0:
print("Value memory should be equal or greater than zero")
elif args.value_memory>0:
if previousBatch["returns"].size() == 0:
previousBatch= {"states":batch["states"],
"returns":batch["returns"]}
else:
previous_size = previousBatch["returns"].size()[0]
size = batch["returns"].size()[0]
if previous_size/size == args.value_memory:
previousBatch["states"] = torch.cat([previousBatch["states"][size:],batch["states"]],0)
previousBatch["returns"] = torch.cat([previousBatch["returns"][size:],batch["returns"]],0)
else:
previousBatch["states"] = torch.cat([previousBatch["states"],batch["states"]],0)
previousBatch["returns"] = torch.cat([previousBatch["returns"],batch["returns"]],0)
if args.value_memory_shuffle:
permutation = torch.randperm(previousBatch["returns"].size()[0])
previousBatch["states"] = previousBatch["states"][permutation]
previousBatch["returns"] = previousBatch["returns"][permutation]
def calculate_loss(reward_sum_mean,reward_sum_std,test_number = 10):
""" Calculate mean cummulative reward for test_nubmer of trials
Parameters:
reward_sum_mean (list): holds the history of the means.
reward_sum_std (list): holds the history of the std.
Returns:
list: new value appended means
list: new value appended stds
"""
rewardSum = []
for i in range(test_number):
state = env.reset()
rewardSum.append(0)
for t in range(args.episode_length):
state, reward, done, _ = env.step(policy_net.get_action(state)[0] )
state = np.transpose(state)
rewardSum[-1] += reward
if done:
break
reward_sum_mean.append(np.array(rewardSum).mean())
reward_sum_std.append(np.array(rewardSum).std())
return reward_sum_mean, reward_sum_std
def log(rewards):
""" Saves mean and std over episodes in log file
Parameters:
Returns:
"""
# TODO : add duration to log
filename = args.log_dir+"/"+ args.log_prefix \
+ "_env_" + args.env_name \
+ "_maxIter_" + str(args.max_iteration_number) \
+ "_batchSize_" + str(args.batch_size) \
+ "_gamma_" + str(args.gamma) \
+ "_lambda_" + str(args.lambda_) \
+ "_lr_" + str(args.lr) \
+ "_valOptIter_" + str(args.val_opt_iter)
if os.path.exists(filename + "_index_0.csv"):
id = 0
file = filename + "_index_" + str(id)
while os.path.exists(file + ".csv"):
id = id +1
file = filename + "_index_" + str(id)
filename = file
else:
filename = filename + "_index_0"
import csv
filename = filename+ ".csv"
pythonVersion = sys.version_info[0]
if pythonVersion == 3:
with open(filename, 'w', newline='') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=' ',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
spamwriter.writerow(rewards)
elif pythonVersion == 2:
with open(filename, 'w', ) as csvfile:
spamwriter = csv.writer(csvfile, delimiter=' ',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
spamwriter.writerow(rewards)
def main():
"""
Parameters:
Returns:
"""
signal.signal(signal.SIGINT, signal_handler)
time_start = time.time()
reward_sum_mean,reward_sum_std = [], []
previousBatch= {"states":torch.Tensor(0) ,
"returns":torch.Tensor(0)}
reward_sum_mean,reward_sum_std = calculate_loss(reward_sum_mean,reward_sum_std)
print("Initial loss \n\tloss | mean : %6.4f / std : %6.4f"%(reward_sum_mean[-1],reward_sum_std[-1]) )
for i_episode in range(args.max_iteration_number):
time_episode_start = time.time()
# reset batches
batch = {"states":[] ,
"actions":[],
"next_states":[] ,
"rewards":[],
"returns":[],
"values":[],
"advantages":[],
"mask":[]}
valueBatch = {"states" :[],
"targets" : []}
num_steps = 0
while num_steps < args.batch_size:
state = env.reset()
reward_sum = 0
states,actions,rewards,next_states,masks = [],[],[],[],[]
steps = 0
for t in range(args.episode_length):
action = policy_net.get_action(state)[0] # agent
next_state, reward, done, info = env.step(action)
next_state = np.transpose(next_state)
mask = 0 if done else 1
masks.append(mask)
states.append(state)
actions.append(action)
next_states.append(next_state)
rewards.append(reward)
state = next_state
reward_sum += reward
steps+=1
if args.render:
env.render()
if done:
break
batch["states"].append(np.expand_dims(states, axis=1) )
batch["actions"].append(actions)
batch["next_states"].append(np.expand_dims(next_states, axis=1))
batch["rewards"].append(rewards)
batch["mask"].append(masks)
num_steps += steps
prepare_data(batch,valueBatch,previousBatch)
update_policy(batch) # First policy update to avoid overfitting
update_value(valueBatch)
save_to_previousBatch(previousBatch,batch)
print("episode %d | total: %.4f "%( i_episode, time.time()-time_episode_start))
reward_sum_mean,reward_sum_std = calculate_loss(reward_sum_mean,reward_sum_std)
print("\tloss | mean : %6.4f / std : %6.4f"%(reward_sum_mean[-1],reward_sum_std[-1]) )
if args.log:
print("Data is logged in "+args.log_dir+"/")
log(reward_sum_mean)
print("Total training duration: %.4f "%(time.time()-time_start))
env.close()
if __name__ == '__main__':
main()
| 38.838798 | 143 | 0.636722 |
43785386d2679f8fabe7de8f8acd7359d1da2540 | 5,112 | py | Python | task3/task3_xgb_cv.py | meck93/intro_ml | 903710b13e9eed8b45fdbd9957c2fb49b2981f62 | [
"MIT"
] | null | null | null | task3/task3_xgb_cv.py | meck93/intro_ml | 903710b13e9eed8b45fdbd9957c2fb49b2981f62 | [
"MIT"
] | null | null | null | task3/task3_xgb_cv.py | meck93/intro_ml | 903710b13e9eed8b45fdbd9957c2fb49b2981f62 | [
"MIT"
] | null | null | null |
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.feature_selection import f_classif, SelectKBest
import numpy as np
import pandas as pd
import os
mingw_path = 'C:\\Program Files\\mingw-w64\\x86_64-7.2.0-posix-sjlj-rt_v5-rev1\\mingw64\\bin'
os.environ['PATH'] = mingw_path + ';' + os.environ['PATH']
import xgboost as xgb
# Constants
FILE_PATH_TRAIN = "./input/train.h5"
FILE_PATH_TEST = "./input/test.h5"
TEST_SIZE = 0.25
# read training file
# test_data = pd.read_hdf(FILE_PATH_TRAIN, "test")
training_data = pd.read_hdf(FILE_PATH_TRAIN, "train")
# training data
# extracting the x-values
x_values_training = training_data.copy()
x_values_training = x_values_training.drop(labels=['y'], axis=1)
x_component_training = x_values_training.values
# extracting the y-values
y_component_training = training_data['y'].values
# training the scaler
scaler = StandardScaler(with_mean=True, with_std=True)
scaler = scaler.fit(x_component_training)
# scaling the training and test data
x_train_scaled = scaler.transform(x_component_training)
# feature selection
selector = SelectKBest(f_classif, k=25)
selector = selector.fit(x_train_scaled, y_component_training)
x_train_scaled_new = selector.transform(x_train_scaled)
# splitting the training set into a training & validation set
x_train, x_val, y_train, y_val = train_test_split(x_train_scaled_new, y_component_training, test_size=TEST_SIZE, random_state=42)
# training, evaluation and test data in xgboost DMatrix
xg_train = xgb.DMatrix(x_train, label=y_train)
xg_val = xgb.DMatrix(x_val, label=y_val)
# setup parameters for xgboost
params = {}
# use softmax multi-class classification
params['objective'] = 'multi:softmax'
# scale weight of positive examples
params['silent'] = 0
params['num_class'] = 5
params['tree_method'] = 'auto'
params['seed'] = 42
# number of boosting rounds
rounds = 300
# gridsearch_params = [
# (max_depth, min_child_weight)
# for max_depth in range(6,13,2)
# for min_child_weight in range(4,9,2)
# ]
# print(gridsearch_params)
# best_params = None
# min_error = float("Inf")
# for max_depth, min_child_weight in gridsearch_params:
# print("CV with max_depth={}, min_child_weight={}".format(max_depth, min_child_weight))
# # Update our parameters
# params['max_depth'] = max_depth
# params['min_child_weight'] = min_child_weight
# # Run CV
# cv_results = xgb.cv(params, xg_train, num_boost_round=rounds, seed=42, nfold=5, metrics={'merror'}, early_stopping_rounds=10, verbose_eval=True)
# # Update best error
# mean_error = cv_results['test-merror-mean'].min()
# boost_rounds = cv_results['test-merror-mean'].argmin()
# print("\t Multiclass Error {} for {} rounds".format(mean_error, boost_rounds))
# print()
# if mean_error < min_error:
# min_error = mean_error
# best_params = (max_depth, min_child_weight)
# print("Best params: {}, {}, MAE: {}".format(best_params[0], best_params[1], min_error))
# # grid search parameters
# gridsearch_params = []
# # tree depth, gamma, learning rate, regularization lambda
# for max_tree_depth in range(6, 11, 1):
# for gamma in range(0, 13, 2):
# for learn_rate in [0.3, 0.1, 0.05]:
# for reg_lambda in [10.0, 1.0, 0.0, 0.1, 0.01]:
# gridsearch_params.append((max_tree_depth, gamma, learn_rate, reg_lambda))
# print(gridsearch_params)
gridsearch_params = [
(max_depth, gamma)
for max_depth in range(6,13,2)
for gamma in range(0,13,2)
]
print(gridsearch_params)
best_params = None
min_test_error = float("Inf")
min_train_error = float("Inf")
file = open("output.txt", mode="w+", encoding='utf-8', newline='\n')
for max_depth, gamma in gridsearch_params:
print("CV with max_depth={}, gamma={}".format(max_depth, gamma))
file.write("CV with max_depth={}, gamma={}\n".format(max_depth, gamma))
# Update our parameters
params['max_depth'] = max_depth
params['gamma'] = gamma
# Run CV
cv_results = xgb.cv(params, xg_train, num_boost_round=rounds, seed=42, nfold=5, metrics={'merror'}, early_stopping_rounds=10, verbose_eval=True)
# Update best error
test_error = cv_results['test-merror-mean'].min()
train_error = cv_results['train-merror-mean'].min()
boost_rounds = cv_results['test-merror-mean'].argmin()
print("Multiclass Error {} for {} rounds".format(test_error, boost_rounds))
print()
file.write("Multiclass Error - Test: {} - Train: {} for {} rounds\n".format(test_error, train_error, boost_rounds))
file.write("\n")
if test_error < min_test_error:
min_test_error = test_error
min_train_error = train_error
best_params = (max_depth, gamma)
print("Best params: {}, {}, Test Error: {}, Train Error: {}".format(best_params[0], best_params[1], min_test_error, min_train_error))
file.write("Best params: {}, {}, Test Error: {}, Train Error: {}\n".format(best_params[0], best_params[1], min_test_error, min_train_error))
file.close()
| 32.35443 | 150 | 0.714593 |
4378f461808522c0661a502153858f383b5e6b02 | 1,369 | py | Python | discovery-provider/src/queries/get_plays_metrics.py | atticwip/audius-protocol | 9758e849fae01508fa1d27675741228b11533e6e | [
"Apache-2.0"
] | 429 | 2019-08-14T01:34:07.000Z | 2022-03-30T06:31:38.000Z | discovery-provider/src/queries/get_plays_metrics.py | SNOmad1/audius-protocol | 3d5fc2bf688265eb529060f1f3234ef2b95ed231 | [
"Apache-2.0"
] | 998 | 2019-08-14T01:52:37.000Z | 2022-03-31T23:17:22.000Z | discovery-provider/src/queries/get_plays_metrics.py | SNOmad1/audius-protocol | 3d5fc2bf688265eb529060f1f3234ef2b95ed231 | [
"Apache-2.0"
] | 73 | 2019-10-04T04:24:16.000Z | 2022-03-24T16:27:30.000Z | import logging
import time
from sqlalchemy import func, desc
from src.models import Play
from src.utils import db_session
logger = logging.getLogger(__name__)
def get_plays_metrics(args):
"""
Returns metrics for play counts
Args:
args: dict The parsed args from the request
args.start_time: date The start of the query
args.limit: number The max number of responses to return
args.bucket_size: string A date_trunc operation to aggregate timestamps by
Returns:
Array of dictionaries with the play counts and timestamp
"""
db = db_session.get_db_read_replica()
with db.scoped_session() as session:
return _get_plays_metrics(session, args)
| 27.938776 | 82 | 0.646457 |
437984a8785d9b1726c62d66ab94644c9b6578d8 | 5,275 | py | Python | CAutomation/settings.py | Rich9rd/CAutomation | d1c1b963e806a216d4c825243c1c405336414413 | [
"MIT"
] | null | null | null | CAutomation/settings.py | Rich9rd/CAutomation | d1c1b963e806a216d4c825243c1c405336414413 | [
"MIT"
] | null | null | null | CAutomation/settings.py | Rich9rd/CAutomation | d1c1b963e806a216d4c825243c1c405336414413 | [
"MIT"
] | null | null | null | """
Django settings for CAutomation project.
Generated by 'django-admin startproject' using Django 3.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
import dj_database_url
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, 'static'),
)
ACCOUNT_AUTHENTICATION_METHOD = 'username_email'
ACCOUNT_LOGOUT_ON_GET = False
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = "none"
AUTH_USER_MODEL = 'cleaning.User'
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
'django.contrib.auth.backends.ModelBackend',
# `allauth` specific authentication methods, such as login by e-mail
'allauth.account.auth_backends.AuthenticationBackend',
)
ACCOUNT_CONFIRM_EMAIL_ON_GET = False
SWAGGER_SETTINGS = {
'SECURITY_DEFINITIONS': {
'api_key': {
'type': 'apiKey',
'in': 'header',
'name': 'Authorization'
}
},
'USE_SESSION_AUTH': False,
'JSON_EDITOR': True,
}
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-=(#vt!5x^l3-j(e*%@p0)d_p&qd2x_#&n*^i=j38@b(26zz^mr'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
REST_FRAMEWORK = {
'DEFAULT_SCHEMA_CLASS': 'rest_framework.schemas.coreapi.AutoSchema',
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
],
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.TokenAuthentication',
],
}
# Application definition
SITE_ID = 1
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'corsheaders',
'allauth',
'allauth.account',
'allauth.socialaccount',
'drf_yasg',
'rest_framework',
'rest_framework.authtoken',
'rest_auth.registration',
'rest_auth',
'common.apps.CommonConfig',
'cleaning.apps.CleaningConfig',
]
#'corsheaders',
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.common.CommonMiddleware',
'corsheaders.middleware.CorsMiddleware',
]
#'django.middleware.common.CommonMiddleware',
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
#'corsheaders.middleware.CommonMiddleware',
ROOT_URLCONF = 'CAutomation.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'CAutomation.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': dj_database_url.config(
default='postgres://mzqgdpoeqiolgg:270514539442574d87e9f9c742314e58d57ff59139679e5c6e46eff5482b5b6e@ec2-52-208-221-89.eu-west-1.compute.amazonaws.com:5432/d96ohaomhouuat'
),
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
CORS_ALLOW_ALL_ORIGINS = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| 27.473958 | 178 | 0.714123 |
437c42fd9708572ca32db3dd04de75e0b264c088 | 1,361 | py | Python | calculators/credit_card_calculator.py | wanderindev/financial-calculator-backend | ad7e736c858298c240eb9af52fbadcb02c693968 | [
"MIT"
] | 2 | 2021-01-08T04:26:54.000Z | 2022-02-04T22:22:27.000Z | calculators/credit_card_calculator.py | wanderindev/financial-calculator-backend | ad7e736c858298c240eb9af52fbadcb02c693968 | [
"MIT"
] | null | null | null | calculators/credit_card_calculator.py | wanderindev/financial-calculator-backend | ad7e736c858298c240eb9af52fbadcb02c693968 | [
"MIT"
] | 2 | 2019-06-06T19:36:17.000Z | 2020-05-20T12:37:08.000Z | from .calculator import Calculator
# noinspection PyTypeChecker
| 30.244444 | 70 | 0.543718 |
437c6a6a6d5abf3db9e497007b852df839401638 | 2,075 | py | Python | setup.py | phaustin/MyST-Parser | 181e921cea2794f10ca612df6bf2a2057b66c372 | [
"MIT"
] | null | null | null | setup.py | phaustin/MyST-Parser | 181e921cea2794f10ca612df6bf2a2057b66c372 | [
"MIT"
] | null | null | null | setup.py | phaustin/MyST-Parser | 181e921cea2794f10ca612df6bf2a2057b66c372 | [
"MIT"
] | null | null | null | """myst-parser package setup."""
from importlib import import_module
from setuptools import find_packages, setup
setup(
name="myst-parser",
version=import_module("myst_parser").__version__,
description=(
"An extended commonmark compliant parser, " "with bridges to docutils & sphinx."
),
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
url="https://github.com/executablebooks/MyST-Parser",
project_urls={"Documentation": "https://myst-parser.readthedocs.io"},
author="Chris Sewell",
author_email="chrisj_sewell@hotmail.com",
license="MIT",
packages=find_packages(),
entry_points={
"console_scripts": ["myst-benchmark = myst_parser.cli.benchmark:main"]
},
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Text Processing :: Markup",
"Framework :: Sphinx :: Extension",
],
keywords="markdown lexer parser development docutils sphinx",
python_requires=">=3.6",
install_requires=["markdown-it-py~=0.4.5"],
extras_require={
"sphinx": ["pyyaml", "docutils>=0.15", "sphinx>=2,<3"],
"code_style": ["flake8<3.8.0,>=3.7.0", "black", "pre-commit==1.17.0"],
"testing": [
"coverage",
"pytest>=3.6,<4",
"pytest-cov",
"pytest-regressions",
"beautifulsoup4",
],
"rtd": ["sphinxcontrib-bibtex", "ipython", "sphinx-book-theme", "sphinx_tabs"],
},
zip_safe=True,
)
| 37.727273 | 88 | 0.61012 |
437d5b7a20ce44c03f3d4a4f70ef524faf474a1a | 554 | py | Python | python/tests/extractor/refmt.py | kho/cdec | d88186af251ecae60974b20395ce75807bfdda35 | [
"BSD-3-Clause-LBNL",
"Apache-2.0"
] | 114 | 2015-01-11T05:41:03.000Z | 2021-08-31T03:47:12.000Z | python/tests/extractor/refmt.py | kho/cdec | d88186af251ecae60974b20395ce75807bfdda35 | [
"BSD-3-Clause-LBNL",
"Apache-2.0"
] | 29 | 2015-01-09T01:00:09.000Z | 2019-09-25T06:04:02.000Z | python/tests/extractor/refmt.py | kho/cdec | d88186af251ecae60974b20395ce75807bfdda35 | [
"BSD-3-Clause-LBNL",
"Apache-2.0"
] | 50 | 2015-02-13T13:48:39.000Z | 2019-08-07T09:45:11.000Z | #!/usr/bin/env python
import collections, sys
lines = []
f = collections.defaultdict(int)
fe = collections.defaultdict(lambda: collections.defaultdict(int))
for line in sys.stdin:
tok = [x.strip() for x in line.split('|||')]
count = int(tok[4])
f[tok[1]] += count
fe[tok[1]][tok[2]] += count
lines.append(tok)
for tok in lines:
feat = 'IsSingletonF={0}.0 IsSingletonFE={1}.0'.format(
0 if f[tok[1]] > 1 else 1,
0 if fe[tok[1]][tok[2]] > 1 else 1)
print ' ||| '.join((tok[0], tok[1], tok[2], feat, tok[3]))
| 26.380952 | 66 | 0.590253 |
437e1e0973bde8b1e251b37ffc137a684d4dc2b8 | 436 | py | Python | blog/models.py | tomitokko/django-blog-with-astradb | 236aaf625ceb854345b6d6bbdd6d17b81e0e3c4f | [
"Apache-2.0"
] | 3 | 2021-12-13T21:40:32.000Z | 2022-03-28T08:08:36.000Z | blog/models.py | tomitokko/django-blog-with-astradb | 236aaf625ceb854345b6d6bbdd6d17b81e0e3c4f | [
"Apache-2.0"
] | null | null | null | blog/models.py | tomitokko/django-blog-with-astradb | 236aaf625ceb854345b6d6bbdd6d17b81e0e3c4f | [
"Apache-2.0"
] | 1 | 2022-02-11T20:49:08.000Z | 2022-02-11T20:49:08.000Z | from django.db import models
import uuid
from datetime import datetime
from cassandra.cqlengine import columns
from django_cassandra_engine.models import DjangoCassandraModel
# Create your models here. | 36.333333 | 63 | 0.802752 |
43826b793ab889bf34bea8a88631da20426a6acb | 3,880 | py | Python | fedex/services/availability_commitment_service.py | miczone/python-fedex | 1a17b45753b16b2551b0b8ba2c6aa65be8e73931 | [
"BSD-3-Clause"
] | null | null | null | fedex/services/availability_commitment_service.py | miczone/python-fedex | 1a17b45753b16b2551b0b8ba2c6aa65be8e73931 | [
"BSD-3-Clause"
] | null | null | null | fedex/services/availability_commitment_service.py | miczone/python-fedex | 1a17b45753b16b2551b0b8ba2c6aa65be8e73931 | [
"BSD-3-Clause"
] | null | null | null | """
Service Availability and Commitment Module
This package contains the shipping methods defined by Fedex's
ValidationAvailabilityAndCommitmentService WSDL file. Each is encapsulated in a class for
easy access. For more details on each, refer to the respective class's
documentation.
"""
import datetime
from ..base_service import FedexBaseService
| 38.039216 | 111 | 0.643557 |
43844440dd179ab3f122498113b16b020a8f05b8 | 15,375 | py | Python | xverse/transformer/_woe.py | gb-andreygsouza/XuniVerse | 74f4b9112c32a8f1411ae0c5a6de906f8d2e895a | [
"MIT"
] | null | null | null | xverse/transformer/_woe.py | gb-andreygsouza/XuniVerse | 74f4b9112c32a8f1411ae0c5a6de906f8d2e895a | [
"MIT"
] | null | null | null | xverse/transformer/_woe.py | gb-andreygsouza/XuniVerse | 74f4b9112c32a8f1411ae0c5a6de906f8d2e895a | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
import scipy.stats.stats as stats
import pandas.core.algorithms as algos
#from sklearn.utils.validation import check_is_fitted
from sklearn.utils import check_array
from ..transformer import MonotonicBinning
pd.options.mode.chained_assignment = None
| 47.748447 | 117 | 0.625821 |
4385a715a45f63ba193550d4819fe3bbd3dc2013 | 7,908 | py | Python | cupy/linalg/product.py | okapies/cupy | 4e8394e5e0c4e420295cbc36819e8e0f7de90e9d | [
"MIT"
] | 1 | 2021-10-04T21:57:09.000Z | 2021-10-04T21:57:09.000Z | cupy/linalg/product.py | hephaex/cupy | 5cf50a93bbdebe825337ed7996c464e84b1495ba | [
"MIT"
] | 1 | 2019-08-05T09:36:13.000Z | 2019-08-06T12:03:01.000Z | cupy/linalg/product.py | hephaex/cupy | 5cf50a93bbdebe825337ed7996c464e84b1495ba | [
"MIT"
] | 1 | 2022-03-24T13:19:55.000Z | 2022-03-24T13:19:55.000Z | import numpy
import six
import cupy
from cupy import core
from cupy import internal
from cupy.linalg.solve import inv
from cupy.util import collections_abc
matmul = core.matmul
def dot(a, b, out=None):
"""Returns a dot product of two arrays.
For arrays with more than one axis, it computes the dot product along the
last axis of ``a`` and the second-to-last axis of ``b``. This is just a
matrix product if the both arrays are 2-D. For 1-D arrays, it uses their
unique axis as an axis to take dot product over.
Args:
a (cupy.ndarray): The left argument.
b (cupy.ndarray): The right argument.
out (cupy.ndarray): Output array.
Returns:
cupy.ndarray: The dot product of ``a`` and ``b``.
.. seealso:: :func:`numpy.dot`
"""
# TODO(okuta): check type
return a.dot(b, out)
def vdot(a, b):
"""Returns the dot product of two vectors.
The input arrays are flattened into 1-D vectors and then it performs inner
product of these vectors.
Args:
a (cupy.ndarray): The first argument.
b (cupy.ndarray): The second argument.
Returns:
cupy.ndarray: Zero-dimensional array of the dot product result.
.. seealso:: :func:`numpy.vdot`
"""
if a.size != b.size:
raise ValueError('Axis dimension mismatch')
if a.dtype.kind == 'c':
a = a.conj()
return core.tensordot_core(a, b, None, 1, 1, a.size, ())
def inner(a, b):
"""Returns the inner product of two arrays.
It uses the last axis of each argument to take sum product.
Args:
a (cupy.ndarray): The first argument.
b (cupy.ndarray): The second argument.
Returns:
cupy.ndarray: The inner product of ``a`` and ``b``.
.. seealso:: :func:`numpy.inner`
"""
a_ndim = a.ndim
b_ndim = b.ndim
if a_ndim == 0 or b_ndim == 0:
return cupy.multiply(a, b)
a_axis = a_ndim - 1
b_axis = b_ndim - 1
if a.shape[-1] != b.shape[-1]:
raise ValueError('Axis dimension mismatch')
if a_axis:
a = cupy.rollaxis(a, a_axis, 0)
if b_axis:
b = cupy.rollaxis(b, b_axis, 0)
ret_shape = a.shape[1:] + b.shape[1:]
k = a.shape[0]
n = a.size // k
m = b.size // k
return core.tensordot_core(a, b, None, n, m, k, ret_shape)
def outer(a, b, out=None):
"""Returns the outer product of two vectors.
The input arrays are flattened into 1-D vectors and then it performs outer
product of these vectors.
Args:
a (cupy.ndarray): The first argument.
b (cupy.ndarray): The second argument.
out (cupy.ndarray): Output array.
Returns:
cupy.ndarray: 2-D array of the outer product of ``a`` and ``b``.
.. seealso:: :func:`numpy.outer`
"""
n = a.size
m = b.size
ret_shape = (n, m)
if out is None:
return core.tensordot_core(a, b, None, n, m, 1, ret_shape)
if out.size != n * m:
raise ValueError('Output array has an invalid size')
if out.flags.c_contiguous:
return core.tensordot_core(a, b, out, n, m, 1, ret_shape)
else:
out[:] = core.tensordot_core(a, b, None, n, m, 1, ret_shape)
return out
def tensordot(a, b, axes=2):
"""Returns the tensor dot product of two arrays along specified axes.
This is equivalent to compute dot product along the specified axes which
are treated as one axis by reshaping.
Args:
a (cupy.ndarray): The first argument.
b (cupy.ndarray): The second argument.
axes:
- If it is an integer, then ``axes`` axes at the last of ``a`` and
the first of ``b`` are used.
- If it is a pair of sequences of integers, then these two
sequences specify the list of axes for ``a`` and ``b``. The
corresponding axes are paired for sum-product.
Returns:
cupy.ndarray: The tensor dot product of ``a`` and ``b`` along the
axes specified by ``axes``.
.. seealso:: :func:`numpy.tensordot`
"""
a_ndim = a.ndim
b_ndim = b.ndim
if a_ndim == 0 or b_ndim == 0:
if axes != 0 and axes != ((), ()):
raise ValueError('An input is zero-dim while axes has dimensions')
return cupy.multiply(a, b)
if isinstance(axes, collections_abc.Sequence):
if len(axes) != 2:
raise ValueError('Axes must consist of two arrays.')
a_axes, b_axes = axes
if numpy.isscalar(a_axes):
a_axes = a_axes,
if numpy.isscalar(b_axes):
b_axes = b_axes,
else:
a_axes = tuple(six.moves.range(a_ndim - axes, a_ndim))
b_axes = tuple(six.moves.range(axes))
sum_ndim = len(a_axes)
if sum_ndim != len(b_axes):
raise ValueError('Axes length mismatch')
for a_axis, b_axis in zip(a_axes, b_axes):
if a.shape[a_axis] != b.shape[b_axis]:
raise ValueError('Axis dimension mismatch')
# Make the axes non-negative
a = _move_axes_to_head(a, [axis % a_ndim for axis in a_axes])
b = _move_axes_to_head(b, [axis % b_ndim for axis in b_axes])
ret_shape = a.shape[sum_ndim:] + b.shape[sum_ndim:]
k = internal.prod(a.shape[:sum_ndim])
# Avoid division by zero: core.tensordot_core returns zeros without
# checking n, m consistency, thus allowing 0-length dimensions to work
n = a.size // k if k != 0 else 0
m = b.size // k if k != 0 else 0
return core.tensordot_core(a, b, None, n, m, k, ret_shape)
def matrix_power(M, n):
"""Raise a square matrix to the (integer) power `n`.
Args:
M (~cupy.ndarray): Matrix to raise by power n.
n (~int): Power to raise matrix to.
Returns:
~cupy.ndarray: Output array.
.. note:: M must be of dtype `float32` or `float64`.
..seealso:: :func:`numpy.linalg.matrix_power`
"""
if M.ndim != 2 or M.shape[0] != M.shape[1]:
raise ValueError('input must be a square array')
if not isinstance(n, six.integer_types):
raise TypeError('exponent must be an integer')
if n == 0:
return cupy.identity(M.shape[0], dtype=M.dtype)
elif n < 0:
M = inv(M)
n *= -1
# short-cuts
if n <= 3:
if n == 1:
return M
elif n == 2:
return cupy.matmul(M, M)
else:
return cupy.matmul(cupy.matmul(M, M), M)
# binary decomposition to reduce the number of Matrix
# multiplications for n > 3.
result, Z = None, None
for b in cupy.binary_repr(n)[::-1]:
Z = M if Z is None else cupy.matmul(Z, Z)
if b == '1':
result = Z if result is None else cupy.matmul(result, Z)
return result
def kron(a, b):
"""Returns the kronecker product of two arrays.
Args:
a (~cupy.ndarray): The first argument.
b (~cupy.ndarray): The second argument.
Returns:
~cupy.ndarray: Output array.
.. seealso:: :func:`numpy.kron`
"""
a_ndim = a.ndim
b_ndim = b.ndim
if a_ndim == 0 or b_ndim == 0:
return cupy.multiply(a, b)
ndim = b_ndim
a_shape = a.shape
b_shape = b.shape
if a_ndim != b_ndim:
if b_ndim > a_ndim:
a_shape = (1,) * (b_ndim - a_ndim) + a_shape
else:
b_shape = (1,) * (a_ndim - b_ndim) + b_shape
ndim = a_ndim
axis = ndim - 1
out = core.tensordot_core(a, b, None, a.size, b.size, 1, a_shape + b_shape)
for _ in six.moves.range(ndim):
out = core.concatenate_method(out, axis=axis)
return out
| 27.175258 | 79 | 0.592059 |
4386319503aab2a6844b6ef0973d20403a850ff6 | 998 | py | Python | fibo.py | aligoren/pyalgo | 8aa58143d3301f70ed7189ca86ce0c7886f92e8c | [
"MIT"
] | 22 | 2015-05-04T14:16:18.000Z | 2021-05-12T07:21:14.000Z | fibo.py | aligoren/pyalgo | 8aa58143d3301f70ed7189ca86ce0c7886f92e8c | [
"MIT"
] | null | null | null | fibo.py | aligoren/pyalgo | 8aa58143d3301f70ed7189ca86ce0c7886f92e8c | [
"MIT"
] | 12 | 2015-12-26T05:00:24.000Z | 2022-02-28T05:03:13.000Z |
fibo_main()
# profiling result for 47 numbers
# profile: python -m profile fibo.py
"""
-1273940835 function calls (275 primitive calls) in 18966.707 seconds
Ordered by: standard name
ncalls tottime percall cumtime percall filename:lineno(function)
90 0.000 0.000 0.001 0.000 cp857.py:18(encode)
1 0.000 0.000 18966.707 18966.707 fibo.py:1(<module>)
-1273941064/46 18966.697 -0.000 18966.697 412.319 fibo.py:1(fibo)
1 0.001 0.001 18966.707 18966.707 fibo.py:4(main)
90 0.000 0.000 0.000 0.000 {built-in method charmap_encode}
1 0.000 0.000 18966.707 18966.707 {built-in method exec}
45 0.009 0.000 0.010 0.000 {built-in method print}
1 0.000 0.000 0.000 0.000 {method 'disable' of '_lsprof.Prof
iler' objects}
""" | 33.266667 | 80 | 0.607214 |
4387549ca0c49a838b5d253586eefe17b1221bbf | 9,050 | py | Python | trt_util/common.py | yihui8776/TensorRT-DETR | 1f32e9a2f98e26ec5b2376f9a2695193887430fb | [
"Apache-2.0"
] | null | null | null | trt_util/common.py | yihui8776/TensorRT-DETR | 1f32e9a2f98e26ec5b2376f9a2695193887430fb | [
"Apache-2.0"
] | null | null | null | trt_util/common.py | yihui8776/TensorRT-DETR | 1f32e9a2f98e26ec5b2376f9a2695193887430fb | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ~~~Medcare AI Lab~~~
# TensorRT
#
import pycuda.driver as cuda
#https://documen.tician.de/pycuda/driver.html
import pycuda.autoinit
import numpy as np
import tensorrt as trt
from .calibrator import Calibrator
import sys, os
import time
# TRT_LOGGER = trt.Logger(trt.Logger.VERBOSE)
# TRT_LOGGER = trt.Logger(trt.Logger.INFO)
TRT_LOGGER = trt.Logger()
# Allocate host and device buffers, and create a stream.
# do inference multi outputs
# The onnx path is used for Pytorch models.
# int8 quant
def build_engine_onnx_v2(onnx_file_path="", engine_file_path="",fp16_mode=False, int8_mode=False, \
max_batch_size=1,calibration_stream=None, calibration_table_path="", save_engine=False):
"""Attempts to load a serialized engine if available, otherwise builds a new TensorRT engine and saves it."""
def build_engine(max_batch_size, save_engine):
"""Takes an ONNX file and creates a TensorRT engine to run inference with"""
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(1) as network,\
builder.create_builder_config() as config,trt.OnnxParser(network, TRT_LOGGER) as parser:
# parse onnx model file
if not os.path.exists(onnx_file_path):
quit(f'[Error]ONNX file {onnx_file_path} not found')
print(f'[INFO] Loading ONNX file from path {onnx_file_path}...')
with open(onnx_file_path, 'rb') as model:
print('[INFO] Beginning ONNX file parsing')
parser.parse(model.read())
assert network.num_layers > 0, '[Error] Failed to parse ONNX model. \
Please check if the ONNX model is compatible '
print('[INFO] Completed parsing of ONNX file')
print(f'[INFO] Building an engine from file {onnx_file_path}; this may take a while...')
# build trt engine
# config.max_workspace_size = 2 << 30 # 2GB
builder.max_batch_size = max_batch_size
config.max_workspace_size = 2 << 30 # 2GB
if fp16_mode:
config.set_flag(trt.BuilderFlag.FP16)
if int8_mode:
#builder.int8_mode = int8_mode
config.set_flag(trt.BuilderFlag.INT8)
assert calibration_stream, '[Error] a calibration_stream should be provided for int8 mode'
config.int8_calibrator = Calibrator(calibration_stream, calibration_table_path)
# builder.int8_calibrator = Calibrator(calibration_stream, calibration_table_path)
print('[INFO] Int8 mode enabled')
#engine = builder.build_cuda_engine(network)
engine = builder.build_engine(network, config)
if engine is None:
print('[INFO] Failed to create the engine')
return None
print("[INFO] Completed creating the engine")
if save_engine:
with open(engine_file_path, "wb") as f:
f.write(engine.serialize())
return engine
if os.path.exists(engine_file_path):
# If a serialized engine exists, load it instead of building a new one.
print(f"[INFO] Reading engine from file {engine_file_path}")
with open(engine_file_path, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime:
return runtime.deserialize_cuda_engine(f.read())
else:
return build_engine(max_batch_size, save_engine)
| 42.890995 | 189 | 0.650276 |
4388c3265a288b272ad7c01a54a34148e2ab938e | 2,506 | py | Python | src/init.py | inpanel/inpanel-desktop | bff4a6accdf8a2976c722adc65f3fa2fe6650448 | [
"MIT"
] | 1 | 2020-03-18T11:40:56.000Z | 2020-03-18T11:40:56.000Z | src/init.py | inpanel/inpanel-desktop | bff4a6accdf8a2976c722adc65f3fa2fe6650448 | [
"MIT"
] | null | null | null | src/init.py | inpanel/inpanel-desktop | bff4a6accdf8a2976c722adc65f3fa2fe6650448 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding:utf-8-*-
import tkinter.messagebox
from tkinter import Button, Label, Tk
from utils.functions import set_window_center
from utils.sqlite_helper import DBHelper
from inpanel import App
if __name__ == "__main__":
APP_INIT = InitWindow()
APP_INIT.mainloop()
| 29.482353 | 83 | 0.57901 |
4389760f68989bd2ea1837354a093cc8ebd81958 | 7,406 | py | Python | Toolkits/CMake/hunter/packages/sugar/python/sugar/sugar_warnings_wiki_table_generator.py | roscopecoltran/SniperKit-Core | 4600dffe1cddff438b948b6c22f586d052971e04 | [
"MIT"
] | 102 | 2015-01-28T20:51:35.000Z | 2021-04-09T11:36:01.000Z | Toolkits/CMake/hunter/packages/sugar/python/sugar/sugar_warnings_wiki_table_generator.py | roscopecoltran/SniperKit-Core | 4600dffe1cddff438b948b6c22f586d052971e04 | [
"MIT"
] | 56 | 2015-01-01T19:22:34.000Z | 2020-01-28T13:48:14.000Z | python/sugar/sugar_warnings_wiki_table_generator.py | idscan/sugar | 0a64153710d039dc081698be83562cdf464c84dc | [
"BSD-2-Clause"
] | 28 | 2015-03-05T19:47:08.000Z | 2021-01-17T21:07:31.000Z | #!/usr/bin/env python3
# Copyright (c) 2014, Ruslan Baratov
# All rights reserved.
"""
* Wiki table for `leathers` C++ project
Expected format:
### Main table
Name | Clang | GCC | MSVC |
-----------------------------|----------|----------|------|
static-ctor-not-thread-safe | *no* | *no* | 4640 |
switch | **same** | **same** | 4062 |
switch-enum | **same** | **same** | 4061 |
### Xcode/Clang table
Clang | Xcode | Objective-C |
-----------------------|--------------------------------|-------------|
bool-conversion | CLANG_WARN_BOOL_CONVERSION | no |
c++11-extensions | CLANG_WARN_CXX0X_EXTENSIONS | no |
strict-selector-match | GCC_WARN_STRICT_SELECTOR_MATCH | yes |
undeclared-selector | GCC_WARN_UNDECLARED_SELECTOR | yes |
"""
| 27.634328 | 75 | 0.626924 |
4389b795742ce4092fa55a8e1be92e8c6adf1239 | 2,945 | py | Python | neutron/plugins/ofagent/agent/ports.py | armando-migliaccio/neutron-1 | e31861c15bc73e65a7c22212df2a56f9e45aa0e4 | [
"Apache-2.0"
] | null | null | null | neutron/plugins/ofagent/agent/ports.py | armando-migliaccio/neutron-1 | e31861c15bc73e65a7c22212df2a56f9e45aa0e4 | [
"Apache-2.0"
] | null | null | null | neutron/plugins/ofagent/agent/ports.py | armando-migliaccio/neutron-1 | e31861c15bc73e65a7c22212df2a56f9e45aa0e4 | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2014 VA Linux Systems Japan K.K.
# Copyright (C) 2014 YAMAMOTO Takashi <yamamoto at valinux co jp>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
PORT_NAME_LEN = 14
PORT_NAME_PREFIXES = [
"tap", # common cases, including ovs_use_veth=True
"qvo", # nova hybrid interface driver
"qr-", # l3-agent INTERNAL_DEV_PREFIX (ovs_use_veth=False)
"qg-", # l3-agent EXTERNAL_DEV_PREFIX (ovs_use_veth=False)
]
def _is_neutron_port(name):
"""Return True if the port name looks like a neutron port."""
if len(name) != PORT_NAME_LEN:
return False
for pref in PORT_NAME_PREFIXES:
if name.startswith(pref):
return True
return False
def get_normalized_port_name(interface_id):
"""Convert from neutron device id (uuid) to "normalized" port name.
This needs to be synced with ML2 plugin's _device_to_port_id().
An assumption: The switch uses an OS's interface name as the
corresponding OpenFlow port name.
NOTE(yamamoto): While it's true for Open vSwitch, it isn't
necessarily true everywhere. For example, LINC uses something
like "LogicalSwitch0-Port2".
NOTE(yamamoto): The actual prefix might be different. For example,
with the hybrid interface driver, it's "qvo". However, we always
use "tap" prefix throughout the agent and plugin for simplicity.
Some care should be taken when talking to the switch.
"""
return ("tap" + interface_id)[0:PORT_NAME_LEN]
def _normalize_port_name(name):
"""Normalize port name.
See comments in _get_ofport_name.
"""
for pref in PORT_NAME_PREFIXES:
if name.startswith(pref):
return "tap" + name[len(pref):]
return name
| 33.089888 | 78 | 0.69202 |
4389f5cc4e8592cb8c9777c1297c9ec965389eb9 | 1,947 | py | Python | pdf/wechat/step.py | damaainan/html2md | 0d241381e716d64bbcacad013c108857e815bb15 | [
"MIT"
] | null | null | null | pdf/wechat/step.py | damaainan/html2md | 0d241381e716d64bbcacad013c108857e815bb15 | [
"MIT"
] | null | null | null | pdf/wechat/step.py | damaainan/html2md | 0d241381e716d64bbcacad013c108857e815bb15 | [
"MIT"
] | null | null | null | # -*- coding=utf-8 -*-
from zwechathihu.mypdf import GenPdf
from db.mysqlite import simpleToolSql
data=[{"url": "http://mp.weixin.qq.com/s?__biz=MzAxODQxMDM0Mw==&mid=2247484852&idx=1&sn=85b50b8b0470bb4897e517955f4e5002&chksm=9bd7fbbcaca072aa75e2a241064a403fde1e579d57ab846cd8537a54253ceb2c8b93cc3bf38e&scene=21#wechat_redirect", "name": "001"}
]
# path = '***/' || ''
# for val in data:
# # print(val["url"])
# # print(val["name"])
# pdf = GenPdf()
# title = val["name"].replace("/", "-")
# print(title)
# pdf.deal(val["url"], title, '')
# sql = simpleToolSql("url")
# # sql.execute("insert into wx_article (id,name,age) values (?,?,?);",[(1,'abc',15),(2,'bca',16)])
# res = sql.query("select * from wx_article;")
# print(res)
# res = sql.query("select * from wx_article where id=?;",(3,))
# print(res)
# sql.close()
# db url
# db url
# db
# addUrl()
updateUrl(1)
res = getListFromSql()
print(res) | 29.059701 | 257 | 0.634309 |
438cea957a4d584b046abd2a8ee5c64fd504407c | 1,168 | py | Python | pipeline/validators/handlers.py | ZhuoZhuoCrayon/bk-nodeman | 76cb71fcc971c2a0c2be161fcbd6b019d4a7a8ab | [
"MIT"
] | 31 | 2021-07-28T13:06:11.000Z | 2022-03-10T12:16:44.000Z | pipeline/validators/handlers.py | ZhuoZhuoCrayon/bk-nodeman | 76cb71fcc971c2a0c2be161fcbd6b019d4a7a8ab | [
"MIT"
] | 483 | 2021-07-29T03:17:44.000Z | 2022-03-31T13:03:04.000Z | pipeline/validators/handlers.py | ZhuoZhuoCrayon/bk-nodeman | 76cb71fcc971c2a0c2be161fcbd6b019d4a7a8ab | [
"MIT"
] | 29 | 2021-07-28T13:06:21.000Z | 2022-03-25T06:18:18.000Z | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making PaaS (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2019 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from django.dispatch import receiver
from pipeline.core.flow.event import EndEvent
from pipeline.core.flow.signals import post_new_end_event_register
from pipeline.validators import rules
| 46.72 | 115 | 0.808219 |
438e9f1f07ffd73b9b9fd9f25c52f215537b1381 | 1,358 | py | Python | NumPy/Array Basics/Random Shuffle/tests/test_task.py | jetbrains-academy/Python-Libraries-NumPy | 7ce0f2d08f87502d5d97bbc6921f0566184d4ebb | [
"MIT"
] | null | null | null | NumPy/Array Basics/Random Shuffle/tests/test_task.py | jetbrains-academy/Python-Libraries-NumPy | 7ce0f2d08f87502d5d97bbc6921f0566184d4ebb | [
"MIT"
] | 4 | 2022-01-14T10:40:47.000Z | 2022-02-14T13:01:13.000Z | NumPy/Array Basics/Random Shuffle/tests/test_task.py | jetbrains-academy/Python-Libraries-NumPy | 7ce0f2d08f87502d5d97bbc6921f0566184d4ebb | [
"MIT"
] | null | null | null | import unittest
import numpy as np
from task import arr, permuted_2d, fully_random
| 46.827586 | 115 | 0.635493 |
438f17abc40a90f956704fbac8d28a04a5de63c3 | 2,409 | py | Python | resources/lib/channelui.py | lausitzer/plugin.video.mediathekview | 7f2086240625b9b4f8d50af114f8f47654346ed1 | [
"MIT"
] | null | null | null | resources/lib/channelui.py | lausitzer/plugin.video.mediathekview | 7f2086240625b9b4f8d50af114f8f47654346ed1 | [
"MIT"
] | null | null | null | resources/lib/channelui.py | lausitzer/plugin.video.mediathekview | 7f2086240625b9b4f8d50af114f8f47654346ed1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
The channel model UI module
Copyright 2017-2018, Leo Moll and Dominik Schlsser
SPDX-License-Identifier: MIT
"""
# pylint: disable=import-error
import os
import xbmcgui
import xbmcplugin
import resources.lib.mvutils as mvutils
from resources.lib.channel import Channel
| 26.766667 | 74 | 0.584475 |
438f4c0d3f4d94dad9a093f3100bc1608c38e26a | 6,838 | py | Python | getconf.py | smk762/Dragonhound | 7cbaed2779afec47fcbf2481d0dae61daa4c11da | [
"MIT"
] | 3 | 2019-01-06T08:00:11.000Z | 2019-03-13T13:24:23.000Z | getconf.py | smk762/Dragonhound | 7cbaed2779afec47fcbf2481d0dae61daa4c11da | [
"MIT"
] | 1 | 2018-11-27T17:16:57.000Z | 2018-12-15T07:51:26.000Z | getconf.py | smk762/Dragonhound | 7cbaed2779afec47fcbf2481d0dae61daa4c11da | [
"MIT"
] | 2 | 2018-12-15T14:03:41.000Z | 2019-01-26T14:22:07.000Z | #!/usr/bin/env python3
#Credit to @Alright for the RPCs
import re
import os
import requests
import json
import platform
# define function that fetchs rpc creds from .conf
# define function that posts json data
# Return current -pubkey=
# return latest batontxid from all publishers
#VANILLA RPC
return(getlastsegidstakes_result['result'])
| 32.254717 | 90 | 0.620942 |
43918d07649e9b1f2f91c59a28e777ac9f008513 | 46,128 | py | Python | cwr/parser/decoder/dictionary.py | orenyodfat/CWR-DataApi | f3b6ba8308c901b6ab87073c155c08e30692333c | [
"MIT"
] | 37 | 2015-04-21T15:33:53.000Z | 2022-02-07T00:02:29.000Z | cwr/parser/decoder/dictionary.py | orenyodfat/CWR-DataApi | f3b6ba8308c901b6ab87073c155c08e30692333c | [
"MIT"
] | 86 | 2015-02-01T22:26:02.000Z | 2021-07-09T08:49:36.000Z | cwr/parser/decoder/dictionary.py | orenyodfat/CWR-DataApi | f3b6ba8308c901b6ab87073c155c08e30692333c | [
"MIT"
] | 27 | 2015-01-26T16:01:09.000Z | 2021-11-08T23:53:55.000Z | # -*- coding: utf-8 -*-
from cwr.acknowledgement import AcknowledgementRecord, MessageRecord
from cwr.agreement import AgreementRecord, AgreementTerritoryRecord, \
InterestedPartyForAgreementRecord
from cwr.group import Group, GroupHeader, GroupTrailer
from cwr.info import AdditionalRelatedInfoRecord
from cwr.parser.decoder.common import Decoder
from cwr.interested_party import IPTerritoryOfControlRecord, Publisher, \
PublisherRecord, Writer, PublisherForWriterRecord, WriterRecord
from cwr.non_roman_alphabet import NonRomanAlphabetAgreementPartyRecord, \
NonRomanAlphabetOtherWriterRecord, NonRomanAlphabetPerformanceDataRecord, \
NonRomanAlphabetPublisherNameRecord, NonRomanAlphabetTitleRecord, \
NonRomanAlphabetWorkRecord, NonRomanAlphabetWriterNameRecord
from cwr.transmission import Transmission, TransmissionTrailer, \
TransmissionHeader
from cwr.work import RecordingDetailRecord, ComponentRecord, \
AlternateTitleRecord, AuthoredWorkRecord, InstrumentationDetailRecord, \
InstrumentationSummaryRecord, PerformingArtistRecord, WorkOriginRecord, \
WorkRecord
from cwr.file import CWRFile, FileTag
from cwr.other import AVIKey, VISAN
from cwr.table_value import MediaTypeValue, TableValue, InstrumentValue
"""
Classes for transforming dictionaries into instances of the CWR model.
There is a decoder for each of the model classes, and all of them expect a
dictionary having at least one key for each field, having the same name as the
field, which will refer to a valid value.
As said, the values on the dictionary should be valid values, for example if
an integer is expected, then the dictionary contains an integer. The values
contained in the dictionary entries should not need to be parsed.
These decoders are useful for handling JSON transmissions or Mongo databases.
"""
__author__ = 'Bernardo Martnez Garrido'
__license__ = 'MIT'
__status__ = 'Development'
| 45.223529 | 96 | 0.560701 |
43924097832cb6270f8da8544d56269f7551b02e | 6,651 | py | Python | prebuilt/twrp_fonts.py | imranpopz/android_bootable_recovery-1 | ec4512ad1e20f640b3dcd6faf8c04cae711e4f30 | [
"Apache-2.0"
] | 95 | 2018-10-31T12:12:01.000Z | 2022-03-20T21:30:48.000Z | prebuilt/twrp_fonts.py | imranpopz/android_bootable_recovery-1 | ec4512ad1e20f640b3dcd6faf8c04cae711e4f30 | [
"Apache-2.0"
] | 34 | 2018-10-22T11:01:15.000Z | 2021-11-21T14:10:26.000Z | prebuilt/twrp_fonts.py | imranpopz/android_bootable_recovery-1 | ec4512ad1e20f640b3dcd6faf8c04cae711e4f30 | [
"Apache-2.0"
] | 81 | 2018-10-23T08:37:20.000Z | 2022-03-20T00:27:08.000Z | #!/usr/bin/env python
# -*- coding: utf8 -*-
import codecs,os,gzip,ctypes,ctypes.util,sys
from struct import *
from PIL import Image, ImageDraw, ImageFont
# ====== Python script to convert TrueTypeFonts to TWRP's .dat format ======
# This script was originally made by https://github.com/suky for his chinese version of TWRP
# and then translated to English by feilplane at #twrp of irc.freenode.net.
# However, it was not compatible with vanilla TWRP, so https://github.com/Tasssadar rewrote
# most of it and it now has very little in common with the original script.
quiet = Reference(False)
if __name__ == "__main__":
fontsize = Reference(20)
out_fname = Reference("font.dat")
voffset = Reference(None)
padding = Reference(0)
font_fname = Reference(None)
preview = Reference(None)
arg_parser = [
["-s", "--size=", fontsize, int],
["-o", "--output=", out_fname, str],
["-p", "--preview=", preview, str],
[None, "--padding=", padding, int],
["-q", "--quiet", quiet, None],
[None, "--voffset=", voffset, int]
]
argv = sys.argv
argc = len(argv)
i = 1
while i < argc:
arg = argv[i]
arg_next = argv[i+1] if i+1 < argc else None
if arg == "--help" or arg == "-h":
print ("This script converts TrueTypeFonts to .dat file for TWRP recovery.\n\n"
"Usage: %s [SWITCHES] [TRUETYPE FILE]\n\n"
" -h, --help - print help\n"
" -o, --output=[FILE] - output file or '-' for stdout (default: font.dat)\n"
" -p, --preview=[FILE] - generate font preview to png file\n"
" --padding=[PIXELS] - horizontal padding around each character (default: 0)\n"
" -q, --quiet - Do not print any output\n"
" -s, --size=[SIZE IN PIXELS] - specify font size in points (default: 20)\n"
" --voffset=[PIXELS] - vertical offset (default: font size*0.25)\n\n"
"Example:\n"
" %s -s 40 -o ComicSans_40.dat -p preview.png ComicSans.ttf\n") % (
sys.argv[0], sys.argv[0]
)
exit(0)
found = False
for p in arg_parser:
if p[0] and arg == p[0] and (arg_next or not p[3]):
if p[3]:
p[2].set(p[3](arg_next))
else:
p[2].set(True)
i += 1
found = True
break
elif p[1] and arg.startswith(p[1]):
if p[3]:
p[2].set(p[3](arg[len(p[1]):]))
else:
p[2].set(True)
found = True
break
if not found:
font_fname.set(arg)
i += 1
if not voffset.get():
voffset.set(int(fontsize.get()*0.25))
if out_fname.get() == "-":
quiet.set(True)
log("Loading font %s...\n" % font_fname.get())
font = ImageFont.truetype(font_fname.get(), fontsize.get(), 0, "utf-32be")
cwidth = 0
cheight = font.getsize('A')[1]
offsets = []
renders = []
data = bytes()
# temp Image and ImageDraw to get access to textsize
res = Image.new('L', (1, 1), 0)
res_draw = ImageDraw.Draw(res)
# Measure each character and render it to separate Image
log("Rendering characters...\n")
for i in range(32, 128):
w, h = res_draw.textsize(chr(i), font)
w += padding.get()*2
offsets.append(cwidth)
cwidth += w
if h > cheight:
cheight = h
ichr = Image.new('L', (w, cheight*2))
ichr_draw = ImageDraw.Draw(ichr)
ichr_draw.text((padding.get(), 0), chr(i), 255, font)
renders.append(ichr)
# Twice the height to account for under-the-baseline characters
cheight *= 2
# Create the result bitmap
log("Creating result bitmap...\n")
res = Image.new('L', (cwidth, cheight), 0)
res_draw = ImageDraw.Draw(res)
# Paste all characters into result bitmap
for i in range(len(renders)):
res.paste(renders[i], (offsets[i], 0))
# uncomment to draw lines separating each character (for debug)
#res_draw.rectangle([offsets[i], 0, offsets[i], cheight], outline="blue")
# crop the blank areas on top and bottom
(_, start_y, _, end_y) = res.getbbox()
res = res.crop((0, start_y, cwidth, end_y))
cheight = (end_y - start_y) + voffset.get()
new_res = Image.new('L', (cwidth, cheight))
new_res.paste(res, (0, voffset.get()))
res = new_res
# save the preview
if preview.get():
log("Saving preview to %s...\n" % preview.get())
res.save(preview.get())
# Pack the data.
# The "data" is a B/W bitmap with all 96 characters next to each other
# on one line. It is as wide as all the characters combined and as
# high as the tallest character, plus padding.
# Each byte contains info about eight pixels, starting from
# highest to lowest bit:
# bits: | 7 6 5 4 3 2 1 0 | 15 14 13 12 11 10 9 8 | ...
# pixels: | 0 1 2 3 4 5 6 7 | 8 9 10 11 12 13 14 15 | ...
log("Packing data...\n")
bit = 0
bit_itr = 0
for c in res.tostring():
# FIXME: How to handle antialiasing?
# if c != '\x00':
# In Python3, c is int, in Python2, c is string. Because of reasons.
try:
fill = (ord(c) >= 127)
except TypeError:
fill = (c >= 127)
if fill:
bit |= (1 << (7-bit_itr))
bit_itr += 1
if bit_itr >= 8:
data += pack("<B", bit)
bit_itr = 0
bit = 0
# Write them to the file.
# Format:
# 000: width
# 004: height
# 008: offsets of each characters (96*uint32)
# 392: data as described above
log("Writing to %s...\n" % out_fname.get())
if out_fname.get() == "-":
write_data(sys.stdout, cwidth, cheight, offsets, data)
else:
with open(out_fname.get(), 'wb') as f:
write_data(f, cwidth, cheight, offsets, data)
exit(0)
| 33.422111 | 106 | 0.537062 |
4392cd17a2182a5ad123dad587354133d5fbcf62 | 3,471 | py | Python | open/users/serializers.py | lawrendran/open | d136f694bafab647722c78be6f39ec79d589f774 | [
"MIT"
] | 105 | 2019-06-01T08:34:47.000Z | 2022-03-15T11:48:36.000Z | open/users/serializers.py | lawrendran/open | d136f694bafab647722c78be6f39ec79d589f774 | [
"MIT"
] | 111 | 2019-06-04T15:34:14.000Z | 2022-03-12T21:03:20.000Z | open/users/serializers.py | lawrendran/open | d136f694bafab647722c78be6f39ec79d589f774 | [
"MIT"
] | 26 | 2019-09-04T06:06:12.000Z | 2022-01-03T03:40:11.000Z | import pytz
from rest_auth.serializers import TokenSerializer
from rest_framework.authtoken.models import Token
from rest_framework.exceptions import ValidationError
from rest_framework.fields import (
CharField,
CurrentUserDefault,
HiddenField,
UUIDField,
ChoiceField,
)
from rest_framework.serializers import ModelSerializer, Serializer
from rest_framework.validators import UniqueValidator
from django.contrib.auth.hashers import check_password
from open.users.models import User
# TODO - this view and serializer is on hold as you figure out registration (later)
| 30.182609 | 95 | 0.661481 |
4393bd0d5f4f1245ce5fd0c8893a7351e5ec7276 | 3,589 | py | Python | tests/en/test_asr.py | rhasspy/rhasspy-test | 0c180bfdd370f18ad2f8b9ee483ea5520161ab74 | [
"MIT"
] | null | null | null | tests/en/test_asr.py | rhasspy/rhasspy-test | 0c180bfdd370f18ad2f8b9ee483ea5520161ab74 | [
"MIT"
] | null | null | null | tests/en/test_asr.py | rhasspy/rhasspy-test | 0c180bfdd370f18ad2f8b9ee483ea5520161ab74 | [
"MIT"
] | 1 | 2020-07-25T13:59:25.000Z | 2020-07-25T13:59:25.000Z | """Automated speech recognition tests."""
import os
import sys
import unittest
from pathlib import Path
import requests
from rhasspyhermes.asr import AsrTextCaptured
from rhasspyhermes.nlu import NluIntent
| 35.534653 | 87 | 0.655893 |
4393be2aca5a25d561f41614d1c61c91497bb77e | 775 | py | Python | speech/melgan/model/multiscale.py | OthmaneJ/deep-tts | 93059d568c5b458d3f0d80eb294d397ecace8731 | [
"MIT"
] | 213 | 2020-05-21T12:37:37.000Z | 2022-03-28T16:36:07.000Z | speech/melgan/model/multiscale.py | OthmaneJ/deep-tts | 93059d568c5b458d3f0d80eb294d397ecace8731 | [
"MIT"
] | 36 | 2020-08-14T08:23:34.000Z | 2022-02-07T11:26:17.000Z | speech/melgan/model/multiscale.py | OthmaneJ/deep-tts | 93059d568c5b458d3f0d80eb294d397ecace8731 | [
"MIT"
] | 38 | 2020-05-21T20:03:30.000Z | 2022-01-19T16:31:15.000Z | import torch
import torch.nn as nn
import torch.nn.functional as F
from .discriminator import Discriminator
from .identity import Identity
| 25.833333 | 83 | 0.602581 |
4393d8ec0408fae06ace653dd14db15c556ea5c5 | 2,516 | py | Python | main.py | AntonioLourencos/jogo-da-velha | 3b3e46e2d2f8c064f0df6a383bc5a0fe6bb01f63 | [
"MIT"
] | 10 | 2020-12-24T01:40:54.000Z | 2021-06-03T01:22:34.000Z | main.py | AntonioLourencos/jogo-da-velha | 3b3e46e2d2f8c064f0df6a383bc5a0fe6bb01f63 | [
"MIT"
] | 4 | 2020-12-26T15:09:05.000Z | 2021-10-01T13:36:16.000Z | main.py | AntonioLourencos/jogo-da-velha | 3b3e46e2d2f8c064f0df6a383bc5a0fe6bb01f63 | [
"MIT"
] | 3 | 2021-05-14T20:20:02.000Z | 2021-08-09T19:10:12.000Z | from game import about_button, start_button, play_sound, center_pos
import pygame
WHITE = (255,255,255)
BLACK = (0,0,0)
GREEN = (0, 255, 0)
pygame.init()
pygame.font.init()
pygame.mixer.init()
FONT = pygame.font.Font("assets/font.ttf", 70)
FONT_MIN = pygame.font.Font("assets/font.ttf", 30)
window = pygame.display.set_mode([600,600])
running = True
clock = pygame.time.Clock()
nickname = " "
me = "X"
ia = "O"
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
play_sound("minimize_001")
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_BACKSPACE and len(nickname) > 2:
nickname = list(nickname)
nickname.pop(-2)
nickname = "".join(nickname)
play_sound("error_001")
elif len(nickname.strip()) <= 10:
play_sound("bong_001")
if len(nickname) > 1:
nickname = list(nickname)
nickname.pop(-1)
nickname = "".join(nickname)
nickname += event.unicode
nickname += " "
if event.key == pygame.K_UP or event.key == pygame.K_DOWN:
if me == "X":
me = "O"
ia = "X"
else:
me = "X"
ia = "O"
window.fill(BLACK)
title = FONT.render("JOGO DA VELHA", True, WHITE)
title_pos = center_pos(title.get_rect(), 10)
window.blit(title, title_pos)
nickname_label = FONT.render("SEU NOME", True, WHITE)
nickname_label_pos = center_pos(nickname_label.get_rect(), 100)
window.blit(nickname_label, nickname_label_pos)
nickname_render = FONT.render(nickname, True, BLACK)
nickname_rect = nickname_render.get_rect()
nickname_pos = center_pos(nickname_rect, 180)
pygame.draw.rect(window, WHITE, (nickname_pos[0], 180, nickname_rect[2], nickname_rect[3]))
window.blit(nickname_render, nickname_pos)
choice_render = FONT.render(f"JOGUE COM {me}", True, WHITE)
window.blit(choice_render, center_pos(choice_render.get_rect(), 280))
my_name = FONT_MIN.render(f"DESENVOLVIDO POR MARIA EDUARDA DE AZEVEDO", True, WHITE)
window.blit(my_name, center_pos(my_name.get_rect(), 560))
start_button(window, "JOGAR", 380, me, ia, nickname.strip(), 10)
about_button(window, 450, 10)
pygame.display.flip()
clock.tick(60) | 31.45 | 95 | 0.591017 |
43952014f41c3fec2a8b86f2f567eb906cd4cf2f | 1,463 | py | Python | schedule/views.py | 1donggri/teamProject | 9b4f37c2a93b065529ce9dd245f9717a783dd456 | [
"CC-BY-3.0"
] | null | null | null | schedule/views.py | 1donggri/teamProject | 9b4f37c2a93b065529ce9dd245f9717a783dd456 | [
"CC-BY-3.0"
] | null | null | null | schedule/views.py | 1donggri/teamProject | 9b4f37c2a93b065529ce9dd245f9717a783dd456 | [
"CC-BY-3.0"
] | null | null | null | from django.shortcuts import render, redirect
from .models import Post
from .forms import ScheduleForm
from django.core.paginator import Paginator
# Create your views here. | 34.833333 | 75 | 0.6473 |
43956cd7582f0725f3e08ed11af962dc403ba2f7 | 402 | py | Python | archetype/settings/local_stg.py | kingsdigitallab/archetype-django | 6315c8f38e873e2d3b2d99fcfd47d01ce0ae35bc | [
"MIT"
] | 1 | 2018-11-18T22:42:09.000Z | 2018-11-18T22:42:09.000Z | archetype/settings/local_stg.py | kingsdigitallab/archetype-django | 6315c8f38e873e2d3b2d99fcfd47d01ce0ae35bc | [
"MIT"
] | null | null | null | archetype/settings/local_stg.py | kingsdigitallab/archetype-django | 6315c8f38e873e2d3b2d99fcfd47d01ce0ae35bc | [
"MIT"
] | null | null | null | from .base import * # noqa
CACHE_REDIS_DATABASE = '1'
CACHES['default']['LOCATION'] = '127.0.0.1:6379:' + CACHE_REDIS_DATABASE
INTERNAL_IPS = INTERNAL_IPS + ('', )
ALLOWED_HOSTS = ['']
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'app_archetype_stg',
'USER': 'app_archetype',
'PASSWORD': '',
'HOST': ''
},
}
| 22.333333 | 72 | 0.58209 |
4397c55661379269054e0b0a47adf3a823197ee1 | 173 | py | Python | website/sites/admin.py | vnaskos/Website | 1c2adb0985f3932ddeca12025a2d216d2470cb63 | [
"MIT"
] | null | null | null | website/sites/admin.py | vnaskos/Website | 1c2adb0985f3932ddeca12025a2d216d2470cb63 | [
"MIT"
] | null | null | null | website/sites/admin.py | vnaskos/Website | 1c2adb0985f3932ddeca12025a2d216d2470cb63 | [
"MIT"
] | null | null | null | from django.contrib import admin
# Register your models here.]
from website.sites.models import Post
| 15.727273 | 38 | 0.768786 |
43985e0c9aab5f6373fb70168960c90190116e6d | 4,005 | py | Python | mcts.py | korbi98/TicTacToeGo_Zero | b8ea4562f3ddf914a53fc380f2266f13ab887e04 | [
"MIT"
] | null | null | null | mcts.py | korbi98/TicTacToeGo_Zero | b8ea4562f3ddf914a53fc380f2266f13ab887e04 | [
"MIT"
] | null | null | null | mcts.py | korbi98/TicTacToeGo_Zero | b8ea4562f3ddf914a53fc380f2266f13ab887e04 | [
"MIT"
] | 1 | 2021-12-20T12:03:49.000Z | 2021-12-20T12:03:49.000Z | # Monte Carlo tree search for TicTacToe
import numpy as np
from tictactoe import Tictactoe
import copy
from random import choice
from tree import Node
import time
| 35.131579 | 104 | 0.640949 |
4399aded5ee5a7bbfaba489cfa6e1bbdb4b8689f | 3,911 | py | Python | grimer/metadata.py | pirovc/grimer | 169f8d3009004d6d2f4ca4d3e7dfec819078cb34 | [
"MIT"
] | 5 | 2021-06-24T03:19:47.000Z | 2021-12-18T22:33:04.000Z | grimer/metadata.py | pirovc/grimer | 169f8d3009004d6d2f4ca4d3e7dfec819078cb34 | [
"MIT"
] | 1 | 2022-02-04T14:52:40.000Z | 2022-03-07T10:04:54.000Z | grimer/metadata.py | pirovc/grimer | 169f8d3009004d6d2f4ca4d3e7dfec819078cb34 | [
"MIT"
] | null | null | null | import pandas as pd
from pandas.api.types import is_numeric_dtype
from grimer.utils import print_log
| 38.722772 | 136 | 0.628995 |
439a75ca9b8d0ab554205540e1b91cb943b0c4ba | 5,162 | py | Python | allennlp/training/metric_tracker.py | MSLars/allennlp | 2cdb8742c8c8c3c38ace4bdfadbdc750a1aa2475 | [
"Apache-2.0"
] | 11,433 | 2017-06-27T03:08:46.000Z | 2022-03-31T18:14:33.000Z | allennlp/training/metric_tracker.py | MSLars/allennlp | 2cdb8742c8c8c3c38ace4bdfadbdc750a1aa2475 | [
"Apache-2.0"
] | 4,006 | 2017-06-26T21:45:43.000Z | 2022-03-31T02:11:10.000Z | allennlp/training/metric_tracker.py | MSLars/allennlp | 2cdb8742c8c8c3c38ace4bdfadbdc750a1aa2475 | [
"Apache-2.0"
] | 2,560 | 2017-06-26T21:16:53.000Z | 2022-03-30T07:55:46.000Z | from typing import Optional, Dict, Any, List, Union
from allennlp.common.checks import ConfigurationError
| 38.522388 | 93 | 0.629407 |
439aafbb1c6af8fc6a5c2fcb3a71f36930de52f2 | 605 | py | Python | authors/apps/profiles/renderers.py | MuhweziDeo/Ah-backend-xmen | 60c830977fa39a7eea9ab978a9ba0c3beb0c4d88 | [
"BSD-3-Clause"
] | 4 | 2019-01-07T09:15:17.000Z | 2020-11-09T09:58:54.000Z | authors/apps/profiles/renderers.py | MuhweziDeo/Ah-backend-xmen | 60c830977fa39a7eea9ab978a9ba0c3beb0c4d88 | [
"BSD-3-Clause"
] | 34 | 2019-01-07T15:30:14.000Z | 2019-03-06T08:23:34.000Z | authors/apps/profiles/renderers.py | MuhweziDeo/Ah-backend-xmen | 60c830977fa39a7eea9ab978a9ba0c3beb0c4d88 | [
"BSD-3-Clause"
] | 10 | 2018-12-18T14:43:52.000Z | 2020-02-07T08:27:50.000Z | from authors.apps.utils.renderers import AppJSONRenderer
import json
from rest_framework.renderers import JSONRenderer
| 23.269231 | 67 | 0.679339 |
439abf267a321356c428ab3774898fb305a07e4a | 956 | py | Python | json_analyzer.py | bantenz/NetworkConfigParser | e1aa8385540823340e8278c7d7af0201399efd8f | [
"Apache-2.0"
] | null | null | null | json_analyzer.py | bantenz/NetworkConfigParser | e1aa8385540823340e8278c7d7af0201399efd8f | [
"Apache-2.0"
] | null | null | null | json_analyzer.py | bantenz/NetworkConfigParser | e1aa8385540823340e8278c7d7af0201399efd8f | [
"Apache-2.0"
] | null | null | null | import json
from deepdiff import DeepDiff
import pprint
if __name__ == "__main__":
# If this Python file runs by itself, run below command. If imported, this section is not run
main()
| 30.83871 | 94 | 0.669456 |
439b48ead1b5b023fe47fbce88acf0d32181f26a | 9,437 | py | Python | fiwareglancesync/sync.py | telefonicaid/fiware-glancesync | 5ad0c80e12b9384473f31bf336015c75cf02a2a2 | [
"Apache-2.0"
] | null | null | null | fiwareglancesync/sync.py | telefonicaid/fiware-glancesync | 5ad0c80e12b9384473f31bf336015c75cf02a2a2 | [
"Apache-2.0"
] | 88 | 2015-07-21T22:13:23.000Z | 2016-11-15T21:28:56.000Z | fiwareglancesync/sync.py | telefonicaid/fiware-glancesync | 5ad0c80e12b9384473f31bf336015c75cf02a2a2 | [
"Apache-2.0"
] | 2 | 2015-08-12T11:19:55.000Z | 2018-05-25T19:04:43.000Z | #!/usr/bin/env python
# -- encoding: utf-8 --
#
# Copyright 2015-2016 Telefnica Investigacin y Desarrollo, S.A.U
#
# This file is part of FI-WARE project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For those usages not covered by the Apache version 2.0 License please
# contact with opensource@tid.es
#
import sys
import StringIO
import os
import os.path
import datetime
import argparse
import logging
from fiwareglancesync.glancesync import GlanceSync
if __name__ == '__main__':
# Parse cmdline
description = 'A tool to sync images from a master region to other '\
'regions'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('regions', metavar='region', type=str, nargs='*',
help='region where the images are uploaded to')
parser.add_argument('--parallel', action='store_true',
help='sync several regions in parallel')
parser.add_argument(
'--config', nargs='+', help='override configuration options. (e.g. ' +
"main.master_region=Valladolid metadata_condition='image.name=name1')")
group = parser.add_mutually_exclusive_group()
group.add_argument('--dry-run', action='store_true',
help='do not upload actually the images')
group.add_argument('--show-status', action='store_true',
help='do not sync, but show the synchronisation status')
group.add_argument('--show-regions', action='store_true',
help='don not sync, only show the available regions')
group.add_argument(
'--make-backup', action='store_true',
help="do no sync, make a backup of the regions' metadata")
meta = parser.parse_args()
options = dict()
if meta.config:
for option in meta.config:
pair = option.split('=')
if len(pair) != 2:
parser.error('config options must have the format key=value')
sys.exit(-1)
options[pair[0].strip()] = pair[1]
# Run cmd
sync = Sync(meta.regions, options)
if meta.show_status:
sync.report_status()
elif meta.parallel:
sync.parallel_sync()
elif meta.show_regions:
sync.show_regions()
elif meta.make_backup:
sync.make_backup()
else:
sync.sequential_sync(meta.dry_run)
| 35.212687 | 79 | 0.586097 |
439b5da067d8952a4649cfcbc1a2148086951365 | 2,224 | py | Python | models/object_detection/pytorch/ssd-resnet34/training/cpu/mlperf_logger.py | Pandinosaurus/models-intelai | 60f5712d79a363bdb7624e3116a66a4f1a7fe208 | [
"Apache-2.0"
] | null | null | null | models/object_detection/pytorch/ssd-resnet34/training/cpu/mlperf_logger.py | Pandinosaurus/models-intelai | 60f5712d79a363bdb7624e3116a66a4f1a7fe208 | [
"Apache-2.0"
] | null | null | null | models/object_detection/pytorch/ssd-resnet34/training/cpu/mlperf_logger.py | Pandinosaurus/models-intelai | 60f5712d79a363bdb7624e3116a66a4f1a7fe208 | [
"Apache-2.0"
] | null | null | null | ### This file is originally from: [mlcommons repo](https://github.com/mlcommons/training/tree/9947bdf21ee3f2488fa4b362eec2ce7deb2ec4dd/single_stage_detector/ssd/mlperf_logger.py)
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
import os
from mlperf_logging import mllog
from mlperf_logging.mllog import constants as mllog_const
mllogger = mllog.get_mllogger()
mllog.config(
filename=(os.getenv("COMPLIANCE_FILE") or "mlperf_compliance.log"),
root_dir=os.path.normpath(os.path.dirname(os.path.realpath(__file__))))
def barrier():
"""
Works as a temporary distributed barrier, currently pytorch
doesn't implement barrier for NCCL backend.
Calls all_reduce on dummy tensor and synchronizes with GPU.
"""
if torch.distributed.is_initialized():
torch.distributed.all_reduce(torch.cuda.FloatTensor(1))
torch.cuda.synchronize()
def get_rank():
"""
Gets distributed rank or returns zero if distributed is not initialized.
"""
if torch.distributed.is_initialized():
rank = torch.distributed.get_rank()
else:
rank = os.getenv('RANK', os.getenv('LOCAL_RANK', 0))
return rank
| 35.870968 | 178 | 0.721223 |
439c484fa1d9a64793cf4da644af68eabbc13295 | 13,932 | py | Python | omtk/models/model_avar_surface_lips.py | CDufour909/omtk_unreal | 64ae76a7b0a3f73a4b32d3b330f3174d02c54234 | [
"MIT"
] | null | null | null | omtk/models/model_avar_surface_lips.py | CDufour909/omtk_unreal | 64ae76a7b0a3f73a4b32d3b330f3174d02c54234 | [
"MIT"
] | null | null | null | omtk/models/model_avar_surface_lips.py | CDufour909/omtk_unreal | 64ae76a7b0a3f73a4b32d3b330f3174d02c54234 | [
"MIT"
] | null | null | null | import math
import pymel.core as pymel
from omtk.core.classNode import Node
from omtk.libs import libAttr
from omtk.libs import libRigging
from . import model_avar_surface
| 42.090634 | 162 | 0.680879 |
439cc020be352b363d0141cede18e92d0b0f339f | 5,910 | py | Python | project/server/main/feed.py | dataesr/harvest-theses | 1725b3ec3a944526fe62941d554bc3de6209cd28 | [
"MIT"
] | null | null | null | project/server/main/feed.py | dataesr/harvest-theses | 1725b3ec3a944526fe62941d554bc3de6209cd28 | [
"MIT"
] | null | null | null | project/server/main/feed.py | dataesr/harvest-theses | 1725b3ec3a944526fe62941d554bc3de6209cd28 | [
"MIT"
] | null | null | null | import datetime
import os
import pymongo
import requests
from urllib import parse
from urllib.parse import quote_plus
import json
from retry import retry
from bs4 import BeautifulSoup
import math
from project.server.main.logger import get_logger
from project.server.main.utils_swift import upload_object
from project.server.main.parse import parse_theses, get_idref_from_OS
from project.server.main.referentiel import harvest_and_save_idref
logger = get_logger(__name__)
| 36.708075 | 184 | 0.694755 |
439e1a09f9246f51a2f4aa291d6172d1d6ae55e7 | 808 | py | Python | DQM/L1TMonitor/python/L1TGCT_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | DQM/L1TMonitor/python/L1TGCT_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | DQM/L1TMonitor/python/L1TGCT_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDAnalyzer import DQMEDAnalyzer
l1tGct = DQMEDAnalyzer('L1TGCT',
gctCentralJetsSource = cms.InputTag("gctDigis","cenJets"),
gctForwardJetsSource = cms.InputTag("gctDigis","forJets"),
gctTauJetsSource = cms.InputTag("gctDigis","tauJets"),
gctIsoTauJetsSource = cms.InputTag("gctDigis","fake"),
gctEnergySumsSource = cms.InputTag("gctDigis"),
gctIsoEmSource = cms.InputTag("gctDigis","isoEm"),
gctNonIsoEmSource = cms.InputTag("gctDigis","nonIsoEm"),
monitorDir = cms.untracked.string("L1T/L1TGCT"),
verbose = cms.untracked.bool(False),
stage1_layer2_ = cms.bool(False),
DQMStore = cms.untracked.bool(True),
disableROOToutput = cms.untracked.bool(True),
filterTriggerType = cms.int32(1)
)
| 38.47619 | 62 | 0.72896 |
439e62c4d6bd84f9f57f7073032cb6f2eab27d1b | 15,524 | py | Python | utilities.py | gandhiy/lipMIP | 11843e6bf2223acca44f57d29791521aac15caf3 | [
"MIT"
] | 11 | 2020-05-18T17:33:25.000Z | 2022-01-28T18:42:31.000Z | utilities.py | gandhiy/lipMIP | 11843e6bf2223acca44f57d29791521aac15caf3 | [
"MIT"
] | null | null | null | utilities.py | gandhiy/lipMIP | 11843e6bf2223acca44f57d29791521aac15caf3 | [
"MIT"
] | 1 | 2020-12-10T19:57:20.000Z | 2020-12-10T19:57:20.000Z | """ General all-purpose utilities """
import sys
import torch
import torch.nn.functional as F
import numpy as np
import gurobipy as gb
import matplotlib.pyplot as plt
import io
import contextlib
import tempfile
import time
import re
import pickle
import inspect
import glob
import os
COMPLETED_JOB_DIR = os.path.join(os.path.dirname(__file__), 'jobs', 'completed')
# ===============================================================================
# = Helpful all-purpose functions =
# ===============================================================================
def cpufy(tensor_iter):
""" Takes a list of tensors and safely pushes them back onto the cpu"""
return [_.cpu() for _ in tensor_iter]
def cudafy(tensor_iter):
""" Takes a list of tensors and safely converts all of them to cuda"""
return [safe_cuda(_) for _ in tensor_iter]
def prod(num_iter):
""" returns product of all elements in this iterator *'ed together"""
cumprod = 1
for el in num_iter:
cumprod *= el
return cumprod
def partition(n, m):
""" Given ints n > m, partitions n into an iterable where all
elements are m, except for the last one which is (n % m)
"""
count = 0
while count < n:
yield min([m, n - count])
count += m
def flatten_list(lol):
""" Given list of lists, flattens it into a single list. """
output = []
for el in lol:
if not isinstance(el, list):
output.append(el)
continue
output.extend(flatten_list(el))
return output
def partition_by_suffix(iterable, func):
""" Given an iterable and a boolean-valued function which takes in
elements of that iterable, outputs a list of lists, where each list
ends in an element for which the func returns true, (except for the
last one)
e.g.
iterable := [1, 2, 3, 4, 5,5, 5]
func := lambda x: (x % 2) == 0
returns [[1,2], [3,4], [5, 5, 5]]
"""
output = []
sublist = []
for el in iterable:
sublist.append(el)
if func(el):
output.append(sublist)
sublist = []
if len(sublist) > 0:
output.append(sublist)
return output
def as_numpy(tensor_or_array):
""" If given a tensor or numpy array returns that object cast numpy array
"""
if isinstance(tensor_or_array, torch.Tensor):
tensor_or_array = tensor_or_array.cpu().detach().numpy()
return tensor_or_array
def two_col(l, r):
""" Takes two numpy arrays of size N and makes a numpy array of size Nx2
"""
return np.vstack([l, r]).T
def split_tensor_pos_neg(x):
""" Splits a tensor into positive and negative components """
pos = F.relu(x)
neg = -F.relu(-x)
return pos, neg
def split_ndarray_pos_neg(x):
""" Splits a numpy ndarray into positive and negative components """
pos = x * (x >= 0)
neg = x * (x <= 0)
return pos, neg
def swap_axes(x, source, dest):
""" Swaps the dimensions of source <-> dest for torch/numpy
ARGS:
x : numpy array or tensor
source : int index
dest : int index
RETURNS
x' - object with same data as x, but with axes swapped
"""
if isinstance(x, torch.Tensor):
return x.transpose(source, dest)
else:
return np.moveaxis(x, source, dest)
def ia_mm(matrix, intervals, lohi_dim, matrix_or_vec='matrix'):
""" Interval analysis matrix(-vec) multiplication for torch/np intervals
ARGS:
matrix : tensor or numpy array of shape (m,n) -
intervals : tensor or numpy array with shape (n1, ..., 2, n_i, ...) -
"vector" of intervals to be multiplied by a matrix
one such n_i must be equal to n (from matrix shape)
lohi_dim : int - which dimension (index) of intervals corresponds
to the lo/hi split
matrix_or_vec : string - must be matrix or vec, corresponds to whether
intervals is to be treated as a matrix or a vector.
If a v
RETURNS:
object of same type as intervals, but with the shape slightly
different: len(output[-1/-2]) == m
"""
# asserts for shapes and things
assert isinstance(matrix, torch.Tensor) # TENSOR ONLY FOR NOW
assert isinstance(intervals, torch.Tensor)
m, n = matrix.shape
assert intervals.shape[lohi_dim] == 2
assert matrix_or_vec in ['matrix', 'vec']
if matrix_or_vec == 'vec':
intervals = intervals.unsqueeze(-1)
assert lohi_dim != intervals.dim() - 2
assert intervals[dim][-2] == n
# define operators based on tensor/numpy case
matmul = lambda m, x: m.matmul(x)
stack = lambda a, b: torch.stack([a, b])
# now do IA stuff
intervals = swap_axes(intervals, 0, lohi_dim)
matrix_pos, matrix_neg = split_pos_neg(matrix)
los, his = intervals
new_los = matmul(matrix_pos, los) + matmul(matrix_neg, his)
new_his = matmul(matrix_pos, his) + matmul(matrix_neg, los)
intervals = swap_axes(stack(new_los, new_his), 0, lohi_dim)
if matrix_or_vec == 'vec':
intervals = interval.squeeze(-1)
return intervals
# =============================================================================
# = Image display functions =
# =============================================================================
def display_images(image_rows, figsize=(8, 8)):
""" Given either a tensor/np.array (or list of same), will display each
element in the row or tensor
ARGS:
image_rows: tensor or np.array or tensor[], np.array[] -
image or list of images to display
RETURNS: None, but displays images
"""
if not isinstance(image_rows, list):
image_rows = [image_rows]
np_rows = [as_numpy(row) for row in image_rows]
# Transpose channel to last dimension and stack to make rows
np_rows = [np.concatenate(_.transpose([0, 2, 3, 1]), axis=1)
for _ in np_rows]
# Now stack rows
full_image = np.concatenate(np_rows, axis=0)
# And then show image
imshow_kwargs = {}
if full_image.shape[-1] == 1:
full_image = full_image.squeeze()
imshow_kwargs['cmap'] = 'gray'
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot()
ax.axis('off')
ax.imshow(full_image, **imshow_kwargs)
plt.show()
# ======================================================
# = Pytorch helpers =
# ======================================================
def seq_append(seq, module):
""" Takes a nn.sequential and a nn.module and creates a nn.sequential
with the module appended to it
ARGS:
seq: nn.Sequntial object
module: <inherits nn.Module>
RETURNS:
nn.Sequential object
"""
seq_modules = [seq[_] for _ in range(len(seq))] + [module]
return nn.Sequential(*seq_modules)
def cpufy(tensor_iter):
""" Takes a list of tensors and safely pushes them back onto the cpu"""
output = []
for el in tensor_iter:
if isinstance(el, tuple):
output.append(tuple(_.cpu() for _ in el))
else:
output.append(el.cpu())
return output
def cudafy(tensor_iter):
""" Takes a list of tensors and safely converts all of them to cuda"""
return [safe_cuda(_) for _ in tensor_iter]
# =======================================
# = Polytope class =
# =======================================
# =========================================================
# = experiment.Result object helpers =
# =========================================================
def filename_to_epoch(filename):
return int(re.search(r'_EPOCH\d{4}_', filename).group()[-5:-1])
def read_result_files(result_files):
output = []
for result_file in result_files:
try:
with open(result_file, 'rb') as f:
output.append((result_file, pickle.load(f)))
except Exception as err:
print("Failed on file: ", result_file, err)
return output
def job_out_series(job_outs, eval_style, method,
value_or_time='value', avg_stdev='avg'):
""" Takes in some result or resultList objects and
a 'method', and desired object, and returns these objects
in a list
ARGS:
results: Result[] or ResultList[], results to consider
eval_style: str - which method of Experiment we look at
method: str - which Lipschitz-estimation technique to consider
value_or_time: 'value' or 'time' - which number to return
avg_stdev: 'avg' or 'stdev' - for ResultList[], we can
get average or stdev values
RETURNS:
list of floats
"""
# check everything is the same type
assert value_or_time in ['value', 'time']
assert avg_stdev in ['avg', 'stdev']
assert eval_style in ['do_random_evals', 'do_unit_hypercube_eval',
'do_data_evals', 'do_large_radius_evals']
results = [job_out[eval_style] for job_out in job_outs]
output = []
for result in results:
try: #Result object case
if value_or_time == 'value':
output.append(result.values(method))
else:
output.append(result.compute_times(method))
except:
triple = result.average_stdevs(value_or_time)[method]
if avg_stdev == 'avg':
output.append(triple[0])
else:
output.append(triple[1])
return output
def collect_result_outs(filematch):
""" Uses glob to collect and load result objects matching a series
ARGS:
filematch: string with *'s associated with it
e.g. 'NAME*SUBNAME*GLOBAL.result'
RESULTS:
list of (filename, experiment.Result) objects
"""
search_str = os.path.join(COMPLETED_JOB_DIR, filematch)
sorted_filenames = sorted(glob.glob(search_str))
return read_result_files(sorted_filenames)
def collect_epochs(filename_list):
""" Given a list of (filename) objects, converts
the filenames into integers, pulling the EPOCH attribute from
the filename
str[] -> int[]
"""
return [epoch_gleamer(_) for _ in filename_list]
def data_from_results(result_iter, method, lip_estimator, time_or_value='value',
avg_or_stdev='avg'):
""" Given a list of experiment.Result or experiment.ResultList objects
will return the time/value for the lip_estimator of the method
for result (or avg/stdev if resultList objects)
e.g., data_from_results('do_unit_hypercube_eval', 'LipMIP',
'value') gets a list of values of the
LipMIP over the unitHypercube domain
ARGS:
method: str - name of one of the experimental methods
lip_estimator : str - name of the class of lipschitz estimator to use
time_or_value : 'time' or 'value' - returning the time or value here
avg_or_stdev : 'avg' or 'stdev' - returning either avg or stdev of
results from ResultListObjects
"""
assert method in ['do_random_evals', 'do_data_evals',
'do_unit_hypercube_eval']
assert lip_estimator in ['LipMIP', 'FastLip', 'LipLP', 'CLEVER',
'LipSDP', 'NaiveUB', 'RandomLB', 'SeqLip']
assert time_or_value in ['time', 'value']
assert avg_or_stdev in ['avg', 'stdev']
return [datum_getter(_) for _ in result_iter]
| 28.021661 | 81 | 0.659237 |
439e723ba661ca0696137f422b31b51f63930e6a | 387 | py | Python | OLD/karma_module/text.py | alentoghostflame/StupidAlentoBot | c024bfb79a9ecb0d9fda5ddc4e361a0cb878baba | [
"MIT"
] | 1 | 2021-12-12T02:50:20.000Z | 2021-12-12T02:50:20.000Z | OLD/karma_module/text.py | alentoghostflame/StupidAlentoBot | c024bfb79a9ecb0d9fda5ddc4e361a0cb878baba | [
"MIT"
] | 17 | 2020-02-07T23:40:36.000Z | 2020-12-22T16:38:44.000Z | OLD/karma_module/text.py | alentoghostflame/StupidAlentoBot | c024bfb79a9ecb0d9fda5ddc4e361a0cb878baba | [
"MIT"
] | null | null | null | ADDED_KARMA_TO_MEMBER = "Gave {} karma to {}, their karma is now at {}."
REMOVED_KARMA_FROM_MEMBER = "Removed {} karma from {}, their karma is now at {}."
LIST_KARMA_OWN = "You currently have {} karma."
LIST_KARMA_OBJECT = "\"{}\" currently has {} karma."
LIST_KARMA_MEMBER = "{} currently has {} karma."
KARMA_TOP_START = "Top karma in server:\n"
KARMA_TOP_FORMAT = "{}. {} \\| {}\n"
| 38.7 | 81 | 0.669251 |
43a00c0b5646519c438692fcd0610b44be3beb14 | 1,340 | py | Python | read_delphin_data.py | anssilaukkarinen/mry-cluster2 | 65d80a7371a4991dfe248ff6944f050e1573f8fc | [
"MIT"
] | null | null | null | read_delphin_data.py | anssilaukkarinen/mry-cluster2 | 65d80a7371a4991dfe248ff6944f050e1573f8fc | [
"MIT"
] | null | null | null | read_delphin_data.py | anssilaukkarinen/mry-cluster2 | 65d80a7371a4991dfe248ff6944f050e1573f8fc | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 6 14:51:24 2021
@author: laukkara
This script is run first to fetch results data from university's network drive
"""
import os
import pickle
input_folder_for_Delphin_data = r'S:\91202_Rakfys_Mallinnus\RAMI\simulations'
output_folder = os.path.join(r'C:\Local\laukkara\Data\github\mry-cluster2\input')
output_pickle_file_name = 'S_RAMI.pickle'
## Preparations
if not os.path.exists(output_folder):
os.makedirs(output_folder)
output_pickle_file_path = os.path.join(output_folder,
output_pickle_file_name)
## Read in results data from pickle files
cases = {}
data = {}
cases = os.listdir(input_folder_for_Delphin_data)
cases.remove('olds')
cases.remove('RAMI_simulated_cases.xlsx')
data = {}
for case in cases:
print('Reading:', case)
fname = os.path.join(input_folder_for_Delphin_data, case, 'd.pickle')
with open(fname, 'rb') as f:
try:
df = pickle.load(f)
if df.shape[0] == 1200:
data[case] = df
else:
print('ERROR AT:', case)
except:
print('Error when reading case:', case)
print(data[cases[0]].columns)
with open(output_pickle_file_path, 'wb') as f:
pickle.dump(data, f)
| 20 | 81 | 0.630597 |
43a01f33e82c9b00675c1f842c3ac9effea08533 | 7,335 | py | Python | api/config.py | sumesh-aot/namex | 53e11aed5ea550b71b7b983f1b57b65db5a06766 | [
"Apache-2.0"
] | 1 | 2020-03-23T21:43:15.000Z | 2020-03-23T21:43:15.000Z | api/config.py | sumesh-aot/namex | 53e11aed5ea550b71b7b983f1b57b65db5a06766 | [
"Apache-2.0"
] | null | null | null | api/config.py | sumesh-aot/namex | 53e11aed5ea550b71b7b983f1b57b65db5a06766 | [
"Apache-2.0"
] | null | null | null | """Config for initializing the namex-api."""
import os
from dotenv import find_dotenv, load_dotenv
# this will load all the envars from a .env file located in the project root (api)
load_dotenv(find_dotenv())
CONFIGURATION = {
'development': 'config.DevConfig',
'testing': 'config.TestConfig',
'production': 'config.Config',
'default': 'config.Config'
}
| 43.402367 | 210 | 0.718609 |
43a04a876b69a7d204627f4d6e2351f7e07cdf98 | 518 | py | Python | examples/pylab_examples/fancybox_demo2.py | pierre-haessig/matplotlib | 0d945044ca3fbf98cad55912584ef80911f330c6 | [
"MIT",
"PSF-2.0",
"BSD-3-Clause"
] | 16 | 2016-06-14T19:45:35.000Z | 2020-11-30T19:02:58.000Z | examples/pylab_examples/fancybox_demo2.py | pierre-haessig/matplotlib | 0d945044ca3fbf98cad55912584ef80911f330c6 | [
"MIT",
"PSF-2.0",
"BSD-3-Clause"
] | 7 | 2015-05-08T19:36:25.000Z | 2015-06-30T15:32:17.000Z | examples/pylab_examples/fancybox_demo2.py | pierre-haessig/matplotlib | 0d945044ca3fbf98cad55912584ef80911f330c6 | [
"MIT",
"PSF-2.0",
"BSD-3-Clause"
] | 6 | 2015-06-05T03:34:06.000Z | 2022-01-25T09:07:10.000Z | import matplotlib.patches as mpatch
import matplotlib.pyplot as plt
styles = mpatch.BoxStyle.get_styles()
figheight = (len(styles)+.5)
fig1 = plt.figure(1, (4/1.5, figheight/1.5))
fontsize = 0.3 * 72
for i, (stylename, styleclass) in enumerate(styles.items()):
fig1.text(0.5, (float(len(styles)) - 0.5 - i)/figheight, stylename,
ha="center",
size=fontsize,
transform=fig1.transFigure,
bbox=dict(boxstyle=stylename, fc="w", ec="k"))
plt.draw()
plt.show()
| 27.263158 | 71 | 0.629344 |
43a255b174f2f6995694a3ff518d32d995c17049 | 981 | py | Python | setup.py | sdu-cfei/modest-py | dc14091fb8c20a8b3fa5ab33bbf597c0b566ba0a | [
"BSD-2-Clause"
] | 37 | 2017-06-21T19:09:11.000Z | 2022-03-13T09:26:07.000Z | setup.py | sdu-cfei/modest-py | dc14091fb8c20a8b3fa5ab33bbf597c0b566ba0a | [
"BSD-2-Clause"
] | 51 | 2017-06-21T17:40:42.000Z | 2021-10-31T09:16:21.000Z | setup.py | sdu-cfei/modest-py | dc14091fb8c20a8b3fa5ab33bbf597c0b566ba0a | [
"BSD-2-Clause"
] | 12 | 2017-10-02T12:32:50.000Z | 2022-03-13T09:26:15.000Z | from setuptools import setup
setup(
name='modestpy',
version='0.1',
description='FMI-compliant model identification package',
url='https://github.com/sdu-cfei/modest-py',
keywords='fmi fmu optimization model identification estimation',
author='Krzysztof Arendt, Center for Energy Informatics SDU',
author_email='krzysztof.arendt@gmail.com, veje@mmmi.sdu.dk',
license='BSD',
platforms=['Windows', 'Linux'],
packages=[
'modestpy',
'modestpy.estim',
'modestpy.estim.ga_parallel',
'modestpy.estim.ga',
'modestpy.estim.ps',
'modestpy.estim.scipy',
'modestpy.fmi',
'modestpy.utilities',
'modestpy.test'],
include_package_data=True,
install_requires=[
'fmpy[complete]',
'scipy',
'pandas',
'matplotlib',
'numpy',
'pyDOE',
'modestga'
],
classifiers=[
'Programming Language :: Python :: 3'
]
)
| 26.513514 | 68 | 0.59633 |
43a26f9573c5f714eb41be0b40f5f0e94681fe54 | 1,013 | py | Python | gfworkflow/core.py | andersonbrands/gfworkflow | 81c646fd53b8227691bcd3e236f538fee0d9d93c | [
"MIT"
] | null | null | null | gfworkflow/core.py | andersonbrands/gfworkflow | 81c646fd53b8227691bcd3e236f538fee0d9d93c | [
"MIT"
] | null | null | null | gfworkflow/core.py | andersonbrands/gfworkflow | 81c646fd53b8227691bcd3e236f538fee0d9d93c | [
"MIT"
] | null | null | null | import re
import subprocess as sp
from typing import Union, List
from gfworkflow.exceptions import RunCommandException
| 25.974359 | 96 | 0.722606 |
43a2afd4837130116a518598c3c7bbcceafe7999 | 306 | py | Python | tests/integration/lambdas/lambda_python3.py | jorges119/localstack | a8a78cda6c13b2e42bc46301b23c7143580132fb | [
"Apache-2.0"
] | 31,928 | 2017-07-04T03:06:28.000Z | 2022-03-31T22:33:27.000Z | tests/integration/lambdas/lambda_python3.py | jorges119/localstack | a8a78cda6c13b2e42bc46301b23c7143580132fb | [
"Apache-2.0"
] | 5,216 | 2017-07-04T11:45:41.000Z | 2022-03-31T22:02:14.000Z | tests/integration/lambdas/lambda_python3.py | jorges119/localstack | a8a78cda6c13b2e42bc46301b23c7143580132fb | [
"Apache-2.0"
] | 3,056 | 2017-06-05T13:29:11.000Z | 2022-03-31T20:54:43.000Z | # simple test function that uses python 3 features (e.g., f-strings)
# see https://github.com/localstack/localstack/issues/264
| 34 | 81 | 0.718954 |
43a39cbdc284d3d48cf14614c751040caf06e2f0 | 3,018 | py | Python | import_off.py | etiennody/purchoice | 43a2dc81ca953ac6168f8112e97a4bae91ace690 | [
"MIT"
] | null | null | null | import_off.py | etiennody/purchoice | 43a2dc81ca953ac6168f8112e97a4bae91ace690 | [
"MIT"
] | 2 | 2020-05-04T09:40:32.000Z | 2021-08-03T17:34:00.000Z | import_off.py | etiennody/purchoice | 43a2dc81ca953ac6168f8112e97a4bae91ace690 | [
"MIT"
] | null | null | null | #! usr/bin/python3
# code: utf-8
"""Download data from Open Food Facts API."""
import json
import requests
from src.purchoice.constants import CATEGORY_SELECTED
from src.purchoice.purchoice_database import PurchoiceDatabase
if __name__ == "__main__":
db = PurchoiceDatabase()
db.truncate_tables()
import_off = ImportOff(db)
for category in CATEGORY_SELECTED:
import_off.import_by_category(category)
print("Merci d'avoir patient. Vous pouvez lancer l'application !")
| 32.804348 | 79 | 0.594102 |
43a4f6e31b5eece16d50c0585d3ecac08d080d46 | 5,919 | py | Python | orio/module/loop/cfg.py | zhjp0/Orio | 7dfb80527053c5697d1bce1bd8ed996b1ea192c8 | [
"MIT"
] | null | null | null | orio/module/loop/cfg.py | zhjp0/Orio | 7dfb80527053c5697d1bce1bd8ed996b1ea192c8 | [
"MIT"
] | null | null | null | orio/module/loop/cfg.py | zhjp0/Orio | 7dfb80527053c5697d1bce1bd8ed996b1ea192c8 | [
"MIT"
] | null | null | null | '''
Created on April 26, 2015
@author: norris
'''
import ast, sys, os, traceback
from orio.main.util.globals import *
from orio.tool.graphlib import graph
from orio.module.loop import astvisitors
| 32.521978 | 107 | 0.478459 |
43a51f00be6eeff0b67bd7aa629b9ff21c09189f | 503 | py | Python | cogs rework/server specified/on_message_delete.py | lubnc4261/House-Keeper | 6de20014afaf00cf9050e54c91cd8b3a02702a27 | [
"MIT"
] | null | null | null | cogs rework/server specified/on_message_delete.py | lubnc4261/House-Keeper | 6de20014afaf00cf9050e54c91cd8b3a02702a27 | [
"MIT"
] | null | null | null | cogs rework/server specified/on_message_delete.py | lubnc4261/House-Keeper | 6de20014afaf00cf9050e54c91cd8b3a02702a27 | [
"MIT"
] | null | null | null | import discord
from discord import Embed
| 33.533333 | 91 | 0.735586 |
43a5f6e07158fad4d7bfe9f3af12b2b23116e364 | 22,646 | py | Python | test/modules/md/md_env.py | icing/mod_md | 4522ed547f0426f27aae86f00fbc9b5b17de545f | [
"Apache-2.0"
] | 320 | 2017-07-22T12:14:19.000Z | 2022-03-24T14:00:32.000Z | test/modules/md/md_env.py | icing/mod_md | 4522ed547f0426f27aae86f00fbc9b5b17de545f | [
"Apache-2.0"
] | 272 | 2017-07-22T12:30:48.000Z | 2022-03-30T07:14:50.000Z | test/modules/md/md_env.py | icing/mod_md | 4522ed547f0426f27aae86f00fbc9b5b17de545f | [
"Apache-2.0"
] | 36 | 2017-07-22T12:45:03.000Z | 2021-05-18T12:20:11.000Z | import copy
import inspect
import json
import logging
import pytest
import re
import os
import shutil
import subprocess
import time
from datetime import datetime, timedelta
from configparser import ConfigParser, ExtendedInterpolation
from typing import Dict, List, Optional
from pyhttpd.certs import CertificateSpec
from .md_cert_util import MDCertUtil
from pyhttpd.env import HttpdTestSetup, HttpdTestEnv
from pyhttpd.result import ExecResult
log = logging.getLogger(__name__)
def set_store_dir_default(self):
dirpath = "md"
if self.httpd_is_at_least("2.5.0"):
dirpath = os.path.join("state", dirpath)
self.set_store_dir(dirpath)
def set_store_dir(self, dirpath):
self._store_dir = os.path.join(self.server_dir, dirpath)
if self.acme_url:
self.a2md_stdargs([self.a2md_bin, "-a", self.acme_url, "-d", self._store_dir, "-C", self.acme_ca_pemfile, "-j"])
self.a2md_rawargs([self.a2md_bin, "-a", self.acme_url, "-d", self._store_dir, "-C", self.acme_ca_pemfile])
def get_request_domain(self, request):
return "%s-%s" % (re.sub(r'[_]', '-', request.node.originalname), MDTestEnv.DOMAIN_SUFFIX)
def get_method_domain(self, method):
return "%s-%s" % (re.sub(r'[_]', '-', method.__name__.lower()), MDTestEnv.DOMAIN_SUFFIX)
def get_module_domain(self, module):
return "%s-%s" % (re.sub(r'[_]', '-', module.__name__.lower()), MDTestEnv.DOMAIN_SUFFIX)
def get_class_domain(self, c):
return "%s-%s" % (re.sub(r'[_]', '-', c.__name__.lower()), MDTestEnv.DOMAIN_SUFFIX)
# --------- cmd execution ---------
_a2md_args = []
_a2md_args_raw = []
# --------- access local store ---------
# --------- check utilities --------- | 37.806344 | 125 | 0.597457 |
43a66e0d4848430d37cecb21387fa89ddac71ea8 | 1,949 | py | Python | models/create_message_response.py | ajrice6713/bw-messaging-emulator | d1be4976e2486ec91b419597afc8411c78ebfda7 | [
"MIT"
] | null | null | null | models/create_message_response.py | ajrice6713/bw-messaging-emulator | d1be4976e2486ec91b419597afc8411c78ebfda7 | [
"MIT"
] | null | null | null | models/create_message_response.py | ajrice6713/bw-messaging-emulator | d1be4976e2486ec91b419597afc8411c78ebfda7 | [
"MIT"
] | null | null | null | import datetime
import json
import random
import string
from typing import Dict
from sms_counter import SMSCounter
| 30.936508 | 91 | 0.578758 |
43a6cf6a117a9bd891a315706e175a03b6175d39 | 51,390 | py | Python | python/ccxt/async_support/uex.py | victor95pc/ccxt | 5c3e606296a1b15852a35f1330b645f451fa08d6 | [
"MIT"
] | 1 | 2019-03-17T22:44:30.000Z | 2019-03-17T22:44:30.000Z | python/ccxt/async_support/uex.py | Lara-Bell/ccxt | e09230b4b60d5c33e3f6ebc044002bab6f733553 | [
"MIT"
] | null | null | null | python/ccxt/async_support/uex.py | Lara-Bell/ccxt | e09230b4b60d5c33e3f6ebc044002bab6f733553 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
# -----------------------------------------------------------------------------
try:
basestring # Python 3
except NameError:
basestring = str # Python 2
import json
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import ExchangeNotAvailable
def parse_trade(self, trade, market=None):
#
# public fetchTrades
#
# { amount: 0.88,
# create_time: 1533414358000,
# price: 0.058019,
# id: 406531,
# type: "sell" },
#
# private fetchMyTrades, fetchOrder, fetchOpenOrders, fetchClosedOrders
#
# { volume: "0.010",
# side: "SELL",
# feeCoin: "BTC",
# price: "0.05816200",
# fee: "0.00000029",
# ctime: 1533616674000,
# deal_price: "0.00058162",
# id: 415779,
# type: "",
# bid_id: 3669539, # only in fetchMyTrades
# ask_id: 3669583, # only in fetchMyTrades
# }
#
timestamp = self.safe_integer_2(trade, 'create_time', 'ctime')
if timestamp is None:
timestring = self.safe_string(trade, 'created_at')
if timestring is not None:
timestamp = self.parse8601('2018-' + timestring + ':00Z')
side = self.safe_string_2(trade, 'side', 'type')
if side is not None:
side = side.lower()
id = self.safe_string(trade, 'id')
symbol = None
if market is not None:
symbol = market['symbol']
price = self.safe_float(trade, 'price')
amount = self.safe_float_2(trade, 'volume', 'amount')
cost = self.safe_float(trade, 'deal_price')
if cost is None:
if amount is not None:
if price is not None:
cost = amount * price
fee = None
feeCost = self.safe_float_2(trade, 'fee', 'deal_fee')
if feeCost is not None:
feeCurrency = self.safe_string(trade, 'feeCoin')
if feeCurrency is not None:
currencyId = feeCurrency.lower()
if currencyId in self.currencies_by_id:
feeCurrency = self.currencies_by_id[currencyId]['code']
fee = {
'cost': feeCost,
'currency': feeCurrency,
}
orderIdField = 'ask_id' if (side == 'sell') else 'bid_id'
orderId = self.safe_string(trade, orderIdField)
return {
'id': id,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'order': orderId,
'type': None,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
def parse_order_status(self, status):
statuses = {
'0': 'open', # INIT(0,"primary orderuntraded and not enter the market")
'1': 'open', # NEW_(1,"new orderuntraded and enter the market ")
'2': 'closed', # FILLED(2,"complete deal")
'3': 'open', # PART_FILLED(3,"partial deal")
'4': 'canceled', # CANCELED(4,"already withdrawn")
'5': 'canceled', # PENDING_CANCEL(5,"pending withdrawak")
'6': 'canceled', # EXPIRED(6,"abnormal orders")
}
if status in statuses:
return statuses[status]
return status
def parse_order(self, order, market=None):
#
# createOrder
#
# {"order_id":34343}
#
# fetchOrder, fetchOpenOrders, fetchClosedOrders
#
# { side: "BUY",
# total_price: "0.10000000",
# created_at: 1510993841000,
# avg_price: "0.10000000",
# countCoin: "btc",
# source: 1,
# type: 1,
# side_msg: "",
# volume: "1.000",
# price: "0.10000000",
# source_msg: "WEB",
# status_msg: "",
# deal_volume: "1.00000000",
# id: 424,
# remain_volume: "0.00000000",
# baseCoin: "eth",
# tradeList: [{ volume: "1.000",
# feeCoin: "YLB",
# price: "0.10000000",
# fee: "0.16431104",
# ctime: 1510996571195,
# deal_price: "0.10000000",
# id: 306,
# type: "" }],
# status: 2 }
#
# fetchOrder
#
# {trade_list: [{ volume: "0.010",
# feeCoin: "BTC",
# price: "0.05816200",
# fee: "0.00000029",
# ctime: 1533616674000,
# deal_price: "0.00058162",
# id: 415779,
# type: "" }],
# order_info: { side: "SELL",
# total_price: "0.010",
# created_at: 1533616673000,
# avg_price: "0.05816200",
# countCoin: "btc",
# source: 3,
# type: 2,
# side_msg: "",
# volume: "0.010",
# price: "0.00000000",
# source_msg: "API",
# status_msg: "",
# deal_volume: "0.01000000",
# id: 3669583,
# remain_volume: "0.00000000",
# baseCoin: "eth",
# tradeList: [{ volume: "0.010",
# feeCoin: "BTC",
# price: "0.05816200",
# fee: "0.00000029",
# ctime: 1533616674000,
# deal_price: "0.00058162",
# id: 415779,
# type: "" }],
# status: 2 }}
#
side = self.safe_string(order, 'side')
if side is not None:
side = side.lower()
status = self.parse_order_status(self.safe_string(order, 'status'))
symbol = None
if market is None:
baseId = self.safe_string(order, 'baseCoin')
quoteId = self.safe_string(order, 'countCoin')
marketId = baseId + quoteId
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
else:
if (baseId is not None) and(quoteId is not None):
base = baseId.upper()
quote = quoteId.upper()
base = self.common_currency_code(base)
quote = self.common_currency_code(quote)
symbol = base + '/' + quote
if market is not None:
symbol = market['symbol']
timestamp = self.safe_integer(order, 'created_at')
if timestamp is None:
timestring = self.safe_string(order, 'created_at')
if timestring is not None:
timestamp = self.parse8601('2018-' + timestring + ':00Z')
lastTradeTimestamp = None
fee = None
average = self.safe_float(order, 'avg_price')
price = self.safe_float(order, 'price')
if price == 0:
price = average
amount = self.safe_float(order, 'volume')
filled = self.safe_float(order, 'deal_volume')
remaining = self.safe_float(order, 'remain_volume')
cost = self.safe_float(order, 'total_price')
id = self.safe_string_2(order, 'id', 'order_id')
trades = None
tradeList = self.safe_value(order, 'tradeList', [])
feeCurrencies = {}
feeCost = None
for i in range(0, len(tradeList)):
trade = self.parse_trade(tradeList[i], market)
if feeCost is None:
feeCost = 0
feeCost = feeCost + trade['fee']['cost']
tradeFeeCurrency = trade['fee']['currency']
feeCurrencies[tradeFeeCurrency] = trade['fee']['cost']
if trades is None:
trades = []
lastTradeTimestamp = trade['timestamp']
trades.append(self.extend(trade, {
'order': id,
}))
if feeCost is not None:
feeCurrency = None
keys = list(feeCurrencies.keys())
numCurrencies = len(keys)
if numCurrencies == 1:
feeCurrency = keys[0]
fee = {
'cost': feeCost,
'currency': feeCurrency,
}
result = {
'info': order,
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': lastTradeTimestamp,
'symbol': symbol,
'type': 'limit',
'side': side,
'price': price,
'cost': cost,
'average': average,
'amount': amount,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': trades,
}
return result
def parse_transactions_by_type(self, type, transactions, code=None, since=None, limit=None):
result = []
for i in range(0, len(transactions)):
transaction = self.parse_transaction(self.extend({
'type': type,
}, transactions[i]))
result.append(transaction)
return self.filterByCurrencySinceLimit(result, code, since, limit)
def parse_transaction(self, transaction, currency=None):
#
# deposits
#
# { createdAt: 1533615955000,
# amount: "0.01",
# updateAt: 1533616311000,
# txid: "0x0922fde6ab8270fe6eb31cb5a37dc732d96dc8193f81cf46c4ab29fde",
# tag: "",
# confirmations: 30,
# addressTo: "0x198803ef8e0df9e8812c0105421885e843e6d2e2",
# status: 1,
# coin: "ETH" }]} }
#
# withdrawals
#
# {
# "updateAt": 1540344965000,
# "createdAt": 1539311971000,
# "status": 0,
# "addressTo": "tz1d7DXJXU3AKWh77gSmpP7hWTeDYs8WF18q",
# "tag": "100128877",
# "id": 5,
# "txid": "",
# "fee": 0.0,
# "amount": "1",
# "symbol": "XTZ"
# }
#
id = self.safe_string(transaction, 'id')
txid = self.safe_string(transaction, 'txid')
timestamp = self.safe_integer(transaction, 'createdAt')
updated = self.safe_integer(transaction, 'updateAt')
code = None
currencyId = self.safe_string_2(transaction, 'symbol', 'coin')
currency = self.safe_value(self.currencies_by_id, currencyId)
if currency is not None:
code = currency['code']
else:
code = self.common_currency_code(currencyId)
address = self.safe_string(transaction, 'addressTo')
tag = self.safe_string(transaction, 'tag')
amount = self.safe_float(transaction, 'amount')
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
type = self.safe_string(transaction, 'type') # injected from the outside
feeCost = self.safe_float(transaction, 'fee')
if (type == 'deposit') and(feeCost is None):
feeCost = 0
return {
'info': transaction,
'id': id,
'currency': code,
'amount': amount,
'address': address,
'tag': tag,
'status': status,
'type': type,
'updated': updated,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': {
'currency': code,
'cost': feeCost,
},
}
def parse_transaction_status(self, status):
statuses = {
'0': 'pending', # unaudited
'1': 'ok', # audited
'2': 'failed', # audit failed
'3': 'pending', # "payment"
'4': 'failed', # payment failed
'5': 'ok',
'6': 'canceled',
}
return self.safe_string(statuses, status, status)
| 44.882096 | 465 | 0.432088 |
43a74cac582bdf300bc81daa9bedf7b376e2c024 | 906 | py | Python | Alpha & Beta/wootMath/decimalToBinaryFraction.py | Mdlkxzmcp/various_python | be4f873c6263e3db11177bbccce2aa465514294d | [
"MIT"
] | null | null | null | Alpha & Beta/wootMath/decimalToBinaryFraction.py | Mdlkxzmcp/various_python | be4f873c6263e3db11177bbccce2aa465514294d | [
"MIT"
] | null | null | null | Alpha & Beta/wootMath/decimalToBinaryFraction.py | Mdlkxzmcp/various_python | be4f873c6263e3db11177bbccce2aa465514294d | [
"MIT"
] | null | null | null | def decimal_to_binary_fraction(x=0.5):
"""
Input: x, a float between 0 and 1
Returns binary representation of x
"""
p = 0
while ((2 ** p) * x) % 1 != 0:
# print('Remainder = ' + str((2**p)*x - int((2**p)*x)))
p += 1
num = int(x * (2 ** p))
result = ''
if num == 0:
result = '0'
while num > 0:
result = str(num % 2) + result
num //= 2
for i in range(p - len(result)):
result = '0' + result
result = result[0:-p] + '.' + result[-p:]
return result # If there is no integer p such that x*(2**p) is a whole number, then internal
# representation is always an approximation
# Suggest that testing equality of floats is not exact: Use abs(x-y) < some
# small number, rather than x == y
# Why does print(0.1) return 0.1, if not exact?
# Because Python designers set it up this way to automatically round
| 27.454545 | 97 | 0.566225 |
43a79fa3a61473b076f77344a5a402f9d3ac1f06 | 3,091 | py | Python | composer/utils/run_directory.py | ajaysaini725/composer | 00fbf95823cd50354b2410fbd88f06eaf0481662 | [
"Apache-2.0"
] | null | null | null | composer/utils/run_directory.py | ajaysaini725/composer | 00fbf95823cd50354b2410fbd88f06eaf0481662 | [
"Apache-2.0"
] | null | null | null | composer/utils/run_directory.py | ajaysaini725/composer | 00fbf95823cd50354b2410fbd88f06eaf0481662 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 MosaicML. All Rights Reserved.
import datetime
import logging
import os
import pathlib
import time
from composer.utils import dist
log = logging.getLogger(__name__)
_RUN_DIRECTORY_KEY = "COMPOSER_RUN_DIRECTORY"
_start_time_str = datetime.datetime.now().isoformat()
def get_node_run_directory() -> str:
"""Returns the run directory for the node. This folder is shared by all ranks on the node.
Returns:
str: The node run directory.
"""
node_run_directory = os.environ.get(_RUN_DIRECTORY_KEY, os.path.join("runs", _start_time_str))
if node_run_directory.endswith(os.path.sep):
# chop off the training slash so os.path.basename would work as expected
node_run_directory = node_run_directory[:-1]
os.makedirs(node_run_directory, exist_ok=True)
return os.path.abspath(node_run_directory)
def get_run_directory() -> str:
"""Returns the run directory for the current rank.
Returns:
str: The run directory.
"""
run_dir = os.path.join(get_node_run_directory(), f"rank_{dist.get_global_rank()}")
os.makedirs(run_dir, exist_ok=True)
return run_dir
def get_modified_files(modified_since_timestamp: float, *, ignore_hidden: bool = True):
"""Returns a list of files (recursively) in the run directory that have been modified since
``modified_since_timestamp``.
Args:
modified_since_timestamp (float): Minimum last modified timestamp(in seconds since EPOCH)
of files to include.
ignore_hidden (bool, optional): Whether to ignore hidden files and folders (default: ``True``)
Returns:
List[str]: List of filepaths that have been modified since ``modified_since_timestamp``
"""
modified_files = []
run_directory = get_run_directory()
if run_directory is None:
raise RuntimeError("Run directory is not defined")
for root, dirs, files in os.walk(run_directory):
del dirs # unused
for file in files:
if ignore_hidden and any(x.startswith(".") for x in file.split(os.path.sep)):
# skip hidden files and folders
continue
filepath = os.path.join(root, file)
modified_time = os.path.getmtime(filepath)
if modified_time >= modified_since_timestamp:
modified_files.append(filepath)
return modified_files
def get_run_directory_timestamp() -> float:
"""Returns the current timestamp on the run directory filesystem.
Note that the disk time can differ from system time (e.g. when using
network filesystems).
Returns:
float: the current timestamp on the run directory filesystem.
"""
run_directory = get_run_directory()
if run_directory is None:
raise RuntimeError("Run directory is not defined")
python_time = time.time()
touch_file = (pathlib.Path(run_directory) / f".{python_time}")
touch_file.touch()
new_last_uploaded_timestamp = os.path.getmtime(str(touch_file))
os.remove(str(touch_file))
return new_last_uploaded_timestamp
| 35.125 | 102 | 0.697185 |
43a848be2ab70fca075a6b29e18609d29a8a5a7d | 1,109 | py | Python | newsapp/migrations/0003_news.py | adi112100/newsapp | 7cdf6070299b4a8dcc950e7fcdfb82cf1a1d98cb | [
"MIT"
] | null | null | null | newsapp/migrations/0003_news.py | adi112100/newsapp | 7cdf6070299b4a8dcc950e7fcdfb82cf1a1d98cb | [
"MIT"
] | null | null | null | newsapp/migrations/0003_news.py | adi112100/newsapp | 7cdf6070299b4a8dcc950e7fcdfb82cf1a1d98cb | [
"MIT"
] | null | null | null | # Generated by Django 3.0.8 on 2020-07-11 08:10
from django.db import migrations, models
| 34.65625 | 114 | 0.538323 |
43a8bd9cb32de8f8138b7b033dc19e078566fbea | 426 | py | Python | src/enum/__init__.py | NazarioJL/faker_enum | c2703cae232b229b4d4ab2b73757102453d541ab | [
"MIT"
] | 5 | 2019-08-02T17:59:10.000Z | 2021-05-14T08:30:55.000Z | src/enum/__init__.py | NazarioJL/faker_enum | c2703cae232b229b4d4ab2b73757102453d541ab | [
"MIT"
] | 4 | 2018-10-26T06:52:05.000Z | 2022-01-31T20:31:17.000Z | src/enum/__init__.py | NazarioJL/faker_enum | c2703cae232b229b4d4ab2b73757102453d541ab | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from enum import Enum
from typing import TypeVar, Type, List, Iterable, cast
from faker.providers import BaseProvider
TEnum = TypeVar("TEnum", bound=Enum)
| 22.421053 | 68 | 0.676056 |
43a8f0198728c9384389bf87f96be11372c94f28 | 262 | py | Python | tests/performance/bottle/simple_server.py | Varriount/sanic | 55c36e0240dfeb03deccdeb5a53ca7fcfa728bff | [
"MIT"
] | 4,959 | 2018-09-13T08:42:51.000Z | 2021-01-05T07:01:44.000Z | tests/performance/bottle/simple_server.py | Varriount/sanic | 55c36e0240dfeb03deccdeb5a53ca7fcfa728bff | [
"MIT"
] | 864 | 2018-09-13T20:48:04.000Z | 2021-01-05T07:33:30.000Z | tests/performance/bottle/simple_server.py | Varriount/sanic | 55c36e0240dfeb03deccdeb5a53ca7fcfa728bff | [
"MIT"
] | 612 | 2018-09-13T21:10:04.000Z | 2020-12-30T12:16:36.000Z | # Run with: gunicorn --workers=1 --worker-class=meinheld.gmeinheld.MeinheldWorker -b :8000 simple_server:app
import bottle
import ujson
from bottle import route, run
app = bottle.default_app()
| 18.714286 | 108 | 0.725191 |
43a90c6754ed5d7199ff6f282438c86387b7e8d9 | 1,485 | py | Python | usuarios/views.py | alvarocneto/alura_django | da2d3619b30c9d1c8767fa910eb7253bc20eeb90 | [
"MIT"
] | 1 | 2017-04-25T10:46:24.000Z | 2017-04-25T10:46:24.000Z | usuarios/views.py | alvarocneto/alura_django | da2d3619b30c9d1c8767fa910eb7253bc20eeb90 | [
"MIT"
] | null | null | null | usuarios/views.py | alvarocneto/alura_django | da2d3619b30c9d1c8767fa910eb7253bc20eeb90 | [
"MIT"
] | null | null | null | from django.shortcuts import redirect
from django.shortcuts import render
from django.contrib.auth.models import User
from django.views.generic.base import View
from perfis.models import Perfil
from usuarios.forms import RegistrarUsuarioForm
| 31.595745 | 72 | 0.546801 |
43a9435c49bd01eb9bc3513864f993e95030f51a | 19 | py | Python | antolib/AntoCommon.py | srsuper/BOT2020 | 2cadfad470de62819b7aaa0f9ecf1e4b4052ea68 | [
"Apache-2.0"
] | 1 | 2020-05-19T16:07:05.000Z | 2020-05-19T16:07:05.000Z | antolib/AntoCommon.py | srsuper/BOT2020 | 2cadfad470de62819b7aaa0f9ecf1e4b4052ea68 | [
"Apache-2.0"
] | null | null | null | antolib/AntoCommon.py | srsuper/BOT2020 | 2cadfad470de62819b7aaa0f9ecf1e4b4052ea68 | [
"Apache-2.0"
] | null | null | null | ANTO_VER = '0.1.2'
| 9.5 | 18 | 0.578947 |
43aa177b05dce3f050fe11c02d43b9d799f954d6 | 3,509 | py | Python | cpc_fusion/pkgs/keys/main.py | CPChain/fusion | 63b6913010e8e5b296a1900c59592c8fd1802c2e | [
"MIT"
] | 5 | 2018-12-19T02:37:18.000Z | 2022-01-26T02:52:50.000Z | cpc_fusion/pkgs/keys/main.py | CPChain/fusion | 63b6913010e8e5b296a1900c59592c8fd1802c2e | [
"MIT"
] | null | null | null | cpc_fusion/pkgs/keys/main.py | CPChain/fusion | 63b6913010e8e5b296a1900c59592c8fd1802c2e | [
"MIT"
] | null | null | null | from typing import (Any, Union, Type) # noqa: F401
from ..keys.datatypes import (
LazyBackend,
PublicKey,
PrivateKey,
Signature,
)
from eth_keys.exceptions import (
ValidationError,
)
from eth_keys.validation import (
validate_message_hash,
)
# These must be aliased due to a scoping issue in mypy
# https://github.com/python/mypy/issues/1775
_PublicKey = PublicKey
_PrivateKey = PrivateKey
_Signature = Signature
# This creates an easy to import backend which will lazily fetch whatever
# backend has been configured at runtime (as opposed to import or instantiation time).
lazy_key_api = KeyAPI(backend=None)
| 35.444444 | 90 | 0.61613 |
43aab220da0c6298d29ad8922e374d3b90af61e0 | 16,406 | py | Python | qiskit/pulse/transforms/canonicalization.py | gadial/qiskit-terra | 0fc83f44a6e80969875c738b2cee7bc33223e45f | [
"Apache-2.0"
] | 1 | 2021-10-05T11:56:53.000Z | 2021-10-05T11:56:53.000Z | qiskit/pulse/transforms/canonicalization.py | gadial/qiskit-terra | 0fc83f44a6e80969875c738b2cee7bc33223e45f | [
"Apache-2.0"
] | 24 | 2021-01-27T08:20:27.000Z | 2021-07-06T09:42:28.000Z | qiskit/pulse/transforms/canonicalization.py | gadial/qiskit-terra | 0fc83f44a6e80969875c738b2cee7bc33223e45f | [
"Apache-2.0"
] | 4 | 2021-10-05T12:07:27.000Z | 2022-01-28T18:37:28.000Z | # This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Basic rescheduling functions which take schedule or instructions and return new schedules."""
import warnings
from collections import defaultdict
from typing import List, Optional, Iterable, Union
import numpy as np
from qiskit.pulse import channels as chans, exceptions, instructions
from qiskit.pulse.exceptions import PulseError
from qiskit.pulse.exceptions import UnassignedDurationError
from qiskit.pulse.instruction_schedule_map import InstructionScheduleMap
from qiskit.pulse.instructions import directives
from qiskit.pulse.schedule import Schedule, ScheduleBlock, ScheduleComponent
def block_to_schedule(block: ScheduleBlock) -> Schedule:
"""Convert ``ScheduleBlock`` to ``Schedule``.
Args:
block: A ``ScheduleBlock`` to convert.
Returns:
Scheduled pulse program.
Raises:
UnassignedDurationError: When any instruction duration is not assigned.
"""
if not block.is_schedulable():
raise UnassignedDurationError(
'All instruction durations should be assigned before creating `Schedule`.'
'Please check `.parameters` to find unassigned parameter objects.')
schedule = Schedule(name=block.name, metadata=block.metadata)
for op_data in block.instructions:
if isinstance(op_data, ScheduleBlock):
context_schedule = block_to_schedule(op_data)
schedule.append(context_schedule, inplace=True)
else:
schedule.append(op_data, inplace=True)
# transform with defined policy
return block.alignment_context.align(schedule)
def compress_pulses(schedules: List[Schedule]) -> List[Schedule]:
"""Optimization pass to replace identical pulses.
Args:
schedules: Schedules to compress.
Returns:
Compressed schedules.
"""
existing_pulses = []
new_schedules = []
for schedule in schedules:
new_schedule = Schedule(name=schedule.name, metadata=schedule.metadata)
for time, inst in schedule.instructions:
if isinstance(inst, instructions.Play):
if inst.pulse in existing_pulses:
idx = existing_pulses.index(inst.pulse)
identical_pulse = existing_pulses[idx]
new_schedule.insert(time,
instructions.Play(identical_pulse,
inst.channel,
inst.name),
inplace=True)
else:
existing_pulses.append(inst.pulse)
new_schedule.insert(time, inst, inplace=True)
else:
new_schedule.insert(time, inst, inplace=True)
new_schedules.append(new_schedule)
return new_schedules
def flatten(program: Schedule) -> Schedule:
"""Flatten (inline) any called nodes into a Schedule tree with no nested children.
Args:
program: Pulse program to remove nested structure.
Returns:
Flatten pulse program.
Raises:
PulseError: When invalid data format is given.
"""
if isinstance(program, Schedule):
return Schedule(*program.instructions, name=program.name, metadata=program.metadata)
else:
raise PulseError(f'Invalid input program {program.__class__.__name__} is specified.')
def inline_subroutines(program: Union[Schedule, ScheduleBlock]) -> Union[Schedule, ScheduleBlock]:
"""Recursively remove call instructions and inline the respective subroutine instructions.
Assigned parameter values, which are stored in the parameter table, are also applied.
The subroutine is copied before the parameter assignment to avoid mutation problem.
Args:
program: A program which may contain the subroutine, i.e. ``Call`` instruction.
Returns:
A schedule without subroutine.
Raises:
PulseError: When input program is not valid data format.
"""
if isinstance(program, Schedule):
return _inline_schedule(program)
elif isinstance(program, ScheduleBlock):
return _inline_block(program)
else:
raise PulseError(f'Invalid program {program.__class__.__name__} is specified.')
def _inline_schedule(schedule: Schedule) -> Schedule:
"""A helper function to inline subroutine of schedule.
.. note:: If subroutine is ``ScheduleBlock`` it is converted into Schedule to get ``t0``.
"""
ret_schedule = Schedule(name=schedule.name,
metadata=schedule.metadata)
for t0, inst in schedule.instructions:
if isinstance(inst, instructions.Call):
# bind parameter
subroutine = inst.assigned_subroutine()
# convert into schedule if block is given
if isinstance(subroutine, ScheduleBlock):
subroutine = block_to_schedule(subroutine)
# recursively inline the program
inline_schedule = _inline_schedule(subroutine)
ret_schedule.insert(t0, inline_schedule, inplace=True)
else:
ret_schedule.insert(t0, inst, inplace=True)
return ret_schedule
def _inline_block(block: ScheduleBlock) -> ScheduleBlock:
"""A helper function to inline subroutine of schedule block.
.. note:: If subroutine is ``Schedule`` the function raises an error.
"""
ret_block = ScheduleBlock(alignment_context=block.alignment_context,
name=block.name,
metadata=block.metadata)
for inst in block.instructions:
if isinstance(inst, instructions.Call):
# bind parameter
subroutine = inst.assigned_subroutine()
if isinstance(subroutine, Schedule):
raise PulseError(f'A subroutine {subroutine.name} is a pulse Schedule. '
'This program cannot be inserted into ScheduleBlock because '
't0 associated with instruction will be lost.')
# recursively inline the program
inline_block = _inline_block(subroutine)
ret_block.append(inline_block, inplace=True)
else:
ret_block.append(inst, inplace=True)
return ret_block
def remove_directives(schedule: Schedule) -> Schedule:
"""Remove directives.
Args:
schedule: A schedule to remove compiler directives.
Returns:
A schedule without directives.
"""
return schedule.exclude(instruction_types=[directives.Directive])
def remove_trivial_barriers(schedule: Schedule) -> Schedule:
"""Remove trivial barriers with 0 or 1 channels.
Args:
schedule: A schedule to remove trivial barriers.
Returns:
schedule: A schedule without trivial barriers
"""
return schedule.exclude(filter_func)
def align_measures(schedules: Iterable[ScheduleComponent],
inst_map: Optional[InstructionScheduleMap] = None,
cal_gate: str = 'u3',
max_calibration_duration: Optional[int] = None,
align_time: Optional[int] = None,
align_all: Optional[bool] = True,
) -> List[Schedule]:
"""Return new schedules where measurements occur at the same physical time.
This transformation will align the first :class:`qiskit.pulse.Acquire` on
every channel to occur at the same time.
Minimum measurement wait time (to allow for calibration pulses) is enforced
and may be set with ``max_calibration_duration``.
By default only instructions containing a :class:`~qiskit.pulse.AcquireChannel`
or :class:`~qiskit.pulse.MeasureChannel` will be shifted. If you wish to keep
the relative timing of all instructions in the schedule set ``align_all=True``.
This method assumes that ``MeasureChannel(i)`` and ``AcquireChannel(i)``
correspond to the same qubit and the acquire/play instructions
should be shifted together on these channels.
.. jupyter-kernel:: python3
:id: align_measures
.. jupyter-execute::
from qiskit import pulse
from qiskit.pulse import transforms
with pulse.build() as sched:
with pulse.align_sequential():
pulse.play(pulse.Constant(10, 0.5), pulse.DriveChannel(0))
pulse.play(pulse.Constant(10, 1.), pulse.MeasureChannel(0))
pulse.acquire(20, pulse.AcquireChannel(0), pulse.MemorySlot(0))
sched_shifted = sched << 20
aligned_sched, aligned_sched_shifted = transforms.align_measures([sched, sched_shifted])
assert aligned_sched == aligned_sched_shifted
If it is desired to only shift acquisition and measurement stimulus instructions
set the flag ``align_all=False``:
.. jupyter-execute::
aligned_sched, aligned_sched_shifted = transforms.align_measures(
[sched, sched_shifted],
align_all=False,
)
assert aligned_sched != aligned_sched_shifted
Args:
schedules: Collection of schedules to be aligned together
inst_map: Mapping of circuit operations to pulse schedules
cal_gate: The name of the gate to inspect for the calibration time
max_calibration_duration: If provided, inst_map and cal_gate will be ignored
align_time: If provided, this will be used as final align time.
align_all: Shift all instructions in the schedule such that they maintain
their relative alignment with the shifted acquisition instruction.
If ``False`` only the acquisition and measurement pulse instructions
will be shifted.
Returns:
The input list of schedules transformed to have their measurements aligned.
Raises:
PulseError: If the provided alignment time is negative.
"""
def get_first_acquire_times(schedules):
"""Return a list of first acquire times for each schedule."""
acquire_times = []
for schedule in schedules:
visited_channels = set()
qubit_first_acquire_times = defaultdict(lambda: None)
for time, inst in schedule.instructions:
if (isinstance(inst, instructions.Acquire) and
inst.channel not in visited_channels):
visited_channels.add(inst.channel)
qubit_first_acquire_times[inst.channel.index] = time
acquire_times.append(qubit_first_acquire_times)
return acquire_times
def get_max_calibration_duration(inst_map, cal_gate):
"""Return the time needed to allow for readout discrimination calibration pulses."""
# TODO (qiskit-terra #5472): fix behavior of this.
max_calibration_duration = 0
for qubits in inst_map.qubits_with_instruction(cal_gate):
cmd = inst_map.get(cal_gate, qubits, np.pi, 0, np.pi)
max_calibration_duration = max(cmd.duration, max_calibration_duration)
return max_calibration_duration
if align_time is not None and align_time < 0:
raise exceptions.PulseError("Align time cannot be negative.")
first_acquire_times = get_first_acquire_times(schedules)
# Extract the maximum acquire in every schedule across all acquires in the schedule.
# If there are no acquires in the schedule default to 0.
max_acquire_times = [max(0, *times.values()) for times in first_acquire_times]
if align_time is None:
if max_calibration_duration is None:
if inst_map:
max_calibration_duration = get_max_calibration_duration(inst_map, cal_gate)
else:
max_calibration_duration = 0
align_time = max(max_calibration_duration, *max_acquire_times)
# Shift acquires according to the new scheduled time
new_schedules = []
for sched_idx, schedule in enumerate(schedules):
new_schedule = Schedule(name=schedule.name, metadata=schedule.metadata)
stop_time = schedule.stop_time
if align_all:
if first_acquire_times[sched_idx]:
shift = align_time - max_acquire_times[sched_idx]
else:
shift = align_time - stop_time
else:
shift = 0
for time, inst in schedule.instructions:
measurement_channels = {
chan.index for chan in inst.channels if
isinstance(chan, (chans.MeasureChannel, chans.AcquireChannel))
}
if measurement_channels:
sched_first_acquire_times = first_acquire_times[sched_idx]
max_start_time = max(sched_first_acquire_times[chan]
for chan in measurement_channels if
chan in sched_first_acquire_times)
shift = align_time - max_start_time
if shift < 0:
warnings.warn(
"The provided alignment time is scheduling an acquire instruction "
"earlier than it was scheduled for in the original Schedule. "
"This may result in an instruction being scheduled before t=0 and "
"an error being raised."
)
new_schedule.insert(time+shift, inst, inplace=True)
new_schedules.append(new_schedule)
return new_schedules
def add_implicit_acquires(schedule: ScheduleComponent,
meas_map: List[List[int]]
) -> Schedule:
"""Return a new schedule with implicit acquires from the measurement mapping replaced by
explicit ones.
.. warning:: Since new acquires are being added, Memory Slots will be set to match the
qubit index. This may overwrite your specification.
Args:
schedule: Schedule to be aligned.
meas_map: List of lists of qubits that are measured together.
Returns:
A ``Schedule`` with the additional acquisition instructions.
"""
new_schedule = Schedule(name=schedule.name, metadata=schedule.metadata)
acquire_map = dict()
for time, inst in schedule.instructions:
if isinstance(inst, instructions.Acquire):
if inst.mem_slot and inst.mem_slot.index != inst.channel.index:
warnings.warn("One of your acquires was mapped to a memory slot which didn't match"
" the qubit index. I'm relabeling them to match.")
# Get the label of all qubits that are measured with the qubit(s) in this instruction
all_qubits = []
for sublist in meas_map:
if inst.channel.index in sublist:
all_qubits.extend(sublist)
# Replace the old acquire instruction by a new one explicitly acquiring all qubits in
# the measurement group.
for i in all_qubits:
explicit_inst = instructions.Acquire(inst.duration,
chans.AcquireChannel(i),
mem_slot=chans.MemorySlot(i),
kernel=inst.kernel,
discriminator=inst.discriminator)
if time not in acquire_map:
new_schedule.insert(time, explicit_inst, inplace=True)
acquire_map = {time: {i}}
elif i not in acquire_map[time]:
new_schedule.insert(time, explicit_inst, inplace=True)
acquire_map[time].add(i)
else:
new_schedule.insert(time, inst, inplace=True)
return new_schedule
| 40.210784 | 99 | 0.643911 |
43ab35693a6001a55d9d6314ecedca585fa99ed4 | 489 | py | Python | tests/test_scraper.py | ananelson/oacensus | 87916c92ab1233bcf82a481113017dfb8d7701b9 | [
"Apache-2.0"
] | null | null | null | tests/test_scraper.py | ananelson/oacensus | 87916c92ab1233bcf82a481113017dfb8d7701b9 | [
"Apache-2.0"
] | 2 | 2016-01-10T20:23:41.000Z | 2016-01-14T16:57:06.000Z | tests/test_scraper.py | ananelson/oacensus | 87916c92ab1233bcf82a481113017dfb8d7701b9 | [
"Apache-2.0"
] | null | null | null | from oacensus.scraper import Scraper
from oacensus.commands import defaults
| 21.26087 | 62 | 0.678937 |
43ab43b6738516044ebfd16ee957b6dda20ddd01 | 161 | py | Python | python/test-deco-1-1.py | li-ma/homework | d75b1752a02bd028af0806683abe079c7b0a9b29 | [
"Apache-2.0"
] | null | null | null | python/test-deco-1-1.py | li-ma/homework | d75b1752a02bd028af0806683abe079c7b0a9b29 | [
"Apache-2.0"
] | null | null | null | python/test-deco-1-1.py | li-ma/homework | d75b1752a02bd028af0806683abe079c7b0a9b29 | [
"Apache-2.0"
] | null | null | null |
deco1(myfunc)
| 16.1 | 36 | 0.608696 |
43abfef786fc99686d3027b89832f4ac4ffeea43 | 7,885 | py | Python | lib/jnpr/junos/transport/tty_netconf.py | mmoucka/py-junos-eznc | 9ef5ad39e32ae670fe8ed0092d725661a45b3053 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | lib/jnpr/junos/transport/tty_netconf.py | mmoucka/py-junos-eznc | 9ef5ad39e32ae670fe8ed0092d725661a45b3053 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | lib/jnpr/junos/transport/tty_netconf.py | mmoucka/py-junos-eznc | 9ef5ad39e32ae670fe8ed0092d725661a45b3053 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | import re
import time
from lxml import etree
import select
import socket
import logging
import sys
from lxml.builder import E
from lxml.etree import XMLSyntaxError
from datetime import datetime, timedelta
from ncclient.operations.rpc import RPCReply, RPCError
from ncclient.xml_ import to_ele
import six
from ncclient.transport.session import HelloHandler
__all__ = ["xmlmode_netconf"]
_NETCONF_EOM = six.b("]]>]]>")
_xmlns = re.compile(six.b("xmlns=[^>]+"))
_xmlns_strip = lambda text: _xmlns.sub(PY6.EMPTY_STR, text)
_junosns = re.compile(six.b("junos:"))
_junosns_strip = lambda text: _junosns.sub(PY6.EMPTY_STR, text)
logger = logging.getLogger("jnpr.junos.tty_netconf")
# =========================================================================
# xmlmode_netconf
# =========================================================================
| 35.200893 | 83 | 0.49182 |
43ad02233acb1702dc2da7147208eb71f07d888f | 409 | py | Python | test/_test_client.py | eydam-prototyping/mp_modbus | 8007c41dd16e6f71bd27b587628f57f38f27a7e0 | [
"MIT"
] | 2 | 2022-01-06T02:21:16.000Z | 2022-03-08T07:55:43.000Z | test/_test_client.py | eydam-prototyping/mp_modbus | 8007c41dd16e6f71bd27b587628f57f38f27a7e0 | [
"MIT"
] | 2 | 2021-12-10T15:56:52.000Z | 2022-02-19T23:45:24.000Z | test/_test_client.py | eydam-prototyping/mp_modbus | 8007c41dd16e6f71bd27b587628f57f38f27a7e0 | [
"MIT"
] | 3 | 2021-07-30T11:16:55.000Z | 2022-01-05T18:19:55.000Z | from pymodbus.client.sync import ModbusTcpClient as ModbusClient
import logging
FORMAT = ('%(asctime)-15s %(threadName)-15s '
'%(levelname)-8s %(module)-15s:%(lineno)-8s %(message)s')
logging.basicConfig(format=FORMAT)
log = logging.getLogger()
log.setLevel(logging.DEBUG)
client = ModbusClient('192.168.178.61', port=502)
client.connect()
f = client.read_holding_registers(305,1)
print(f.registers) | 37.181818 | 67 | 0.743276 |
43ad3e59d1619acb8d9309d2b2e5ad3161003839 | 2,664 | py | Python | tests/selenium/test_about/test_about_page.py | technolotrix/tests | ae5b9741e80a1fd735c66de93cc014f672c5afb2 | [
"Apache-2.0"
] | null | null | null | tests/selenium/test_about/test_about_page.py | technolotrix/tests | ae5b9741e80a1fd735c66de93cc014f672c5afb2 | [
"Apache-2.0"
] | null | null | null | tests/selenium/test_about/test_about_page.py | technolotrix/tests | ae5b9741e80a1fd735c66de93cc014f672c5afb2 | [
"Apache-2.0"
] | null | null | null | import unittest
from selenium import webdriver
import page
######## FOOTER STUFF ########
if __name__ == "__main__":
unittest.main() | 36 | 98 | 0.703453 |
43ae1b68e450c7cd53ba9d214198e618977b86cc | 1,297 | py | Python | sdk/python/lib/test/langhost/future_input/__main__.py | pcen/pulumi | 1bb85ca98c90f2161fe915df083d47c56c135e4d | [
"Apache-2.0"
] | 12,004 | 2018-06-17T23:56:29.000Z | 2022-03-31T18:00:09.000Z | sdk/python/lib/test/langhost/future_input/__main__.py | pcen/pulumi | 1bb85ca98c90f2161fe915df083d47c56c135e4d | [
"Apache-2.0"
] | 6,263 | 2018-06-17T23:27:24.000Z | 2022-03-31T19:20:35.000Z | sdk/python/lib/test/langhost/future_input/__main__.py | pcen/pulumi | 1bb85ca98c90f2161fe915df083d47c56c135e4d | [
"Apache-2.0"
] | 706 | 2018-06-17T23:56:50.000Z | 2022-03-31T11:20:23.000Z | # Copyright 2016-2018, Pulumi Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
from pulumi import CustomResource, Output, Input
# read_a_file_or_something returns a coroutine when called, which needs to be scheduled
# and awaited in order to yield a value.
file_res = FileResource("file", read_a_file_or_something())
file_res.contents.apply(lambda c: assert_eq(c, "here's a file"))
| 36.027778 | 87 | 0.739399 |
43af0965a86312e6e30a4f1113799d3cd2575b0a | 5,079 | py | Python | src/dewloosh/geom/cells/h8.py | dewloosh/dewloosh-geom | 5c97fbab4b68f4748bf4309184b9e0e877f94cd6 | [
"MIT"
] | 2 | 2021-12-11T17:25:51.000Z | 2022-01-06T15:36:27.000Z | src/dewloosh/geom/cells/h8.py | dewloosh/dewloosh-geom | 5c97fbab4b68f4748bf4309184b9e0e877f94cd6 | [
"MIT"
] | null | null | null | src/dewloosh/geom/cells/h8.py | dewloosh/dewloosh-geom | 5c97fbab4b68f4748bf4309184b9e0e877f94cd6 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from dewloosh.geom.polyhedron import HexaHedron
from dewloosh.math.numint import GaussPoints as Gauss
from dewloosh.geom.utils import cells_coords
from numba import njit, prange
import numpy as np
from numpy import ndarray
__cache = True
class H8(HexaHedron):
"""
8-node isoparametric hexahedron.
top
7--6
| |
4--5
bottom
3--2
| |
0--1
"""
def shape_function_derivatives(self, coords=None, *args, **kwargs):
coords = self.pointdata.x.to_numpy() if coords is None else coords
if len(coords.shape) == 2:
return dshp_H8_bulk(coords)
else:
return dshp_H8(coords)
def volumes(self, coords=None, topo=None):
coords = self.pointdata.x.to_numpy() if coords is None else coords
topo = self.nodes.to_numpy() if topo is None else topo
ecoords = cells_coords(coords, topo)
qpos, qweight = Gauss(2, 2, 2)
return volumes_H8(ecoords, qpos, qweight)
| 34.317568 | 74 | 0.479425 |
43af456bb12d9242e1f8878ab32c7792bb2310ac | 2,108 | py | Python | tests/models/pr_test_data.py | heaven00/github-contribution-leaderboard | 3de53a60a7c81b91291e29d063c7fd14696d426d | [
"Apache-2.0"
] | null | null | null | tests/models/pr_test_data.py | heaven00/github-contribution-leaderboard | 3de53a60a7c81b91291e29d063c7fd14696d426d | [
"Apache-2.0"
] | null | null | null | tests/models/pr_test_data.py | heaven00/github-contribution-leaderboard | 3de53a60a7c81b91291e29d063c7fd14696d426d | [
"Apache-2.0"
] | null | null | null | import copy
import json
from ghcl.models.pull_request import PullRequest
| 31.939394 | 76 | 0.597723 |
43af80522808363696ca10665012f09669723d2f | 609 | py | Python | Validation/EventGenerator/python/BasicGenParticleValidation_cfi.py | PKUfudawei/cmssw | 8fbb5ce74398269c8a32956d7c7943766770c093 | [
"Apache-2.0"
] | 2 | 2020-10-26T18:40:32.000Z | 2021-04-10T16:33:25.000Z | Validation/EventGenerator/python/BasicGenParticleValidation_cfi.py | gartung/cmssw | 3072dde3ce94dcd1791d778988198a44cde02162 | [
"Apache-2.0"
] | 30 | 2015-11-04T11:42:27.000Z | 2021-12-01T07:56:34.000Z | Validation/EventGenerator/python/BasicGenParticleValidation_cfi.py | gartung/cmssw | 3072dde3ce94dcd1791d778988198a44cde02162 | [
"Apache-2.0"
] | 8 | 2016-03-25T07:17:43.000Z | 2021-07-08T17:11:21.000Z | import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDAnalyzer import DQMEDAnalyzer
basicGenParticleValidation = DQMEDAnalyzer('BasicGenParticleValidation',
hepmcCollection = cms.InputTag("generatorSmeared"),
genparticleCollection = cms.InputTag("genParticles",""),
genjetsCollection = cms.InputTag("ak4GenJets",""),
matchingPrecision = cms.double(0.001),
verbosity = cms.untracked.uint32(0),
UseWeightFromHepMC = cms.bool(True),
signalParticlesOnly = cms.bool(False)
)
basicGenParticleValidationHiMix = basicGenParticleValidation.clone(signalParticlesOnly = True)
| 40.6 | 94 | 0.784893 |
43b1df830b2abdb7a53300c3467f70be764c0f6f | 1,235 | py | Python | k_values_graph.py | leobouts/Skyline_top_k_queries | 5f5e8ab8f5e521dc20f33a69dd042917ff5d42f0 | [
"MIT"
] | null | null | null | k_values_graph.py | leobouts/Skyline_top_k_queries | 5f5e8ab8f5e521dc20f33a69dd042917ff5d42f0 | [
"MIT"
] | null | null | null | k_values_graph.py | leobouts/Skyline_top_k_queries | 5f5e8ab8f5e521dc20f33a69dd042917ff5d42f0 | [
"MIT"
] | null | null | null | from a_top_k import *
from b_top_k import *
import time
if __name__ == "__main__":
main()
| 24.7 | 70 | 0.673684 |
43b219f1675072d8c1034bc153a5f05238d1fdf2 | 639 | py | Python | AppPkg/Applications/Python/Python-2.7.2/Lib/lib2to3/fixes/fix_methodattrs.py | CEOALT1/RefindPlusUDK | 116b957ad735f96fbb6d80a0ba582046960ba164 | [
"BSD-2-Clause"
] | 2,757 | 2018-04-28T21:41:36.000Z | 2022-03-29T06:33:36.000Z | AppPkg/Applications/Python/Python-2.7.2/Lib/lib2to3/fixes/fix_methodattrs.py | CEOALT1/RefindPlusUDK | 116b957ad735f96fbb6d80a0ba582046960ba164 | [
"BSD-2-Clause"
] | 20 | 2019-07-23T15:29:32.000Z | 2022-01-21T12:53:04.000Z | AppPkg/Applications/Python/Python-2.7.2/Lib/lib2to3/fixes/fix_methodattrs.py | CEOALT1/RefindPlusUDK | 116b957ad735f96fbb6d80a0ba582046960ba164 | [
"BSD-2-Clause"
] | 449 | 2018-05-09T05:54:05.000Z | 2022-03-30T14:54:18.000Z | """Fix bound method attributes (method.im_? -> method.__?__).
"""
# Author: Christian Heimes
# Local imports
from .. import fixer_base
from ..fixer_util import Name
MAP = {
"im_func" : "__func__",
"im_self" : "__self__",
"im_class" : "__self__.__class__"
}
| 25.56 | 80 | 0.596244 |
43b28c13174a1c70f27d43e88e2fd455da590fcc | 4,764 | py | Python | models/TextCNN/cnn2d.py | Renovamen/Text-Classification | 4a4aa4001c402ed4371ebaabe1393b27794e5992 | [
"MIT"
] | 72 | 2020-06-23T18:26:47.000Z | 2022-03-26T13:33:30.000Z | models/TextCNN/cnn2d.py | Renovamen/Text-Classification | 4a4aa4001c402ed4371ebaabe1393b27794e5992 | [
"MIT"
] | 5 | 2020-12-04T13:31:09.000Z | 2021-08-03T14:11:52.000Z | models/TextCNN/cnn2d.py | Renovamen/Text-Classification | 4a4aa4001c402ed4371ebaabe1393b27794e5992 | [
"MIT"
] | 15 | 2020-06-24T16:08:39.000Z | 2022-02-04T06:53:38.000Z | import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import List
| 30.538462 | 145 | 0.588161 |
43b32db495f046dd61a5bbd3592b8806b465b229 | 785 | py | Python | LEVEL2/다리를지나는트럭/solution.py | seunghwanly/CODING-TEST | a820da950c163d399594770199aa2e782d1fbbde | [
"MIT"
] | null | null | null | LEVEL2/다리를지나는트럭/solution.py | seunghwanly/CODING-TEST | a820da950c163d399594770199aa2e782d1fbbde | [
"MIT"
] | null | null | null | LEVEL2/다리를지나는트럭/solution.py | seunghwanly/CODING-TEST | a820da950c163d399594770199aa2e782d1fbbde | [
"MIT"
] | null | null | null |
# print(solution(2, 10, [7, 4, 5, 6]))
print(solution(100, 100, [10]))
| 28.035714 | 73 | 0.49172 |
43b37687b876abf43457859ada796360f659fa78 | 2,595 | py | Python | heat/tests/convergence/framework/testutils.py | maestro-hybrid-cloud/heat | 91a4bb3170bd81b1c67a896706851e55709c9b5a | [
"Apache-2.0"
] | null | null | null | heat/tests/convergence/framework/testutils.py | maestro-hybrid-cloud/heat | 91a4bb3170bd81b1c67a896706851e55709c9b5a | [
"Apache-2.0"
] | null | null | null | heat/tests/convergence/framework/testutils.py | maestro-hybrid-cloud/heat | 91a4bb3170bd81b1c67a896706851e55709c9b5a | [
"Apache-2.0"
] | null | null | null | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from oslo_log import log as logging
from heat.tests.convergence.framework import reality
from heat.tests.convergence.framework import scenario_template
LOG = logging.getLogger(__name__)
| 36.549296 | 79 | 0.649326 |
43b5471678e7c510bd2a55fdced1140414dcd734 | 440 | py | Python | device_geometry.py | AstroShen/fpga21-scaled-tech | 8a7016913c18d71844f733bc80a3ceaa2d033ac2 | [
"MIT"
] | 2 | 2021-09-02T13:13:35.000Z | 2021-12-19T11:35:03.000Z | device_geometry.py | AstroShen/fpga21-scaled-tech | 8a7016913c18d71844f733bc80a3ceaa2d033ac2 | [
"MIT"
] | null | null | null | device_geometry.py | AstroShen/fpga21-scaled-tech | 8a7016913c18d71844f733bc80a3ceaa2d033ac2 | [
"MIT"
] | 2 | 2021-09-29T02:53:03.000Z | 2022-03-27T09:55:35.000Z | """Holds the device gemoetry parameters (Table 5), taken from Wu et al.,
>> A Predictive 3-D Source/Drain Resistance Compact Model and the Impact on 7 nm and Scaled FinFets<<, 2020, with interpolation for 4nm. 16nm is taken from PTM HP.
"""
node_names = [16, 7, 5, 4, 3]
GP = [64, 56, 48, 44, 41]
FP = [40, 30, 28, 24, 22]
GL = [20, 18, 16, 15, 14]
FH = [26, 35, 45, 50, 55]
FW = [12, 6.5, 6, 5.5, 5.5]
vdd = [0.85, 0.75, 0.7, 0.65, 0.65]
| 36.666667 | 163 | 0.615909 |
43b56590cfbfa648aa925a4f729f3fc4fe304008 | 2,605 | py | Python | nova/tests/servicegroup/test_zk_driver.py | vmthunder/nova | baf05caab705c5778348d9f275dc541747b7c2de | [
"Apache-2.0"
] | 7 | 2017-06-19T19:37:00.000Z | 2019-06-16T02:06:14.000Z | nova/tests/servicegroup/test_zk_driver.py | vmthunder/nova | baf05caab705c5778348d9f275dc541747b7c2de | [
"Apache-2.0"
] | 9 | 2015-05-20T11:20:17.000Z | 2017-07-27T08:21:33.000Z | nova/tests/servicegroup/test_zk_driver.py | vmthunder/nova | baf05caab705c5778348d9f275dc541747b7c2de | [
"Apache-2.0"
] | 13 | 2015-05-05T09:34:04.000Z | 2017-11-08T02:03:46.000Z | # Copyright (c) AT&T 2012-2013 Yun Mao <yunmao@gmail.com>
# Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test the ZooKeeper driver for servicegroup.
You need to install ZooKeeper locally and related dependencies
to run the test. It's unclear how to install python-zookeeper lib
in venv so you might have to run the test without it.
To set up in Ubuntu 12.04:
$ sudo apt-get install zookeeper zookeeperd python-zookeeper
$ sudo pip install evzookeeper
$ nosetests nova.tests.servicegroup.test_zk_driver
"""
import eventlet
from nova import servicegroup
from nova import test
| 39.469697 | 78 | 0.700576 |
43b6084ad6323124af0ef6d980f927d5cab21334 | 780 | py | Python | tests/test_misc.py | lordmauve/chopsticks | 87c6a5d0049a45db1477a21510cba650f470a8ac | [
"Apache-2.0"
] | 171 | 2016-07-14T11:29:15.000Z | 2022-03-12T07:39:12.000Z | tests/test_misc.py | moreati/chopsticks | 87c6a5d0049a45db1477a21510cba650f470a8ac | [
"Apache-2.0"
] | 59 | 2016-07-23T14:05:58.000Z | 2020-06-26T15:49:07.000Z | tests/test_misc.py | moreati/chopsticks | 87c6a5d0049a45db1477a21510cba650f470a8ac | [
"Apache-2.0"
] | 17 | 2016-08-01T06:46:27.000Z | 2018-03-25T14:46:15.000Z | """Tests for miscellaneous properties, such as debuggability."""
import time
from chopsticks.tunnel import Docker
from chopsticks.group import Group
def test_tunnel_repr():
"""Tunnels have a usable repr."""
tun = Docker('py36', image='python:3.6')
assert repr(tun) == "Docker('py36')"
def test_group_repr():
"""Groups have a usable repr."""
grp = Group([
Docker('py35', image='python:3.5'),
Docker('py36', image='python:3.6')
])
assert repr(grp) == "Group([Docker('py35'), Docker('py36')])"
def test_group_reuse():
"""We can re-use a group."""
grp = Group([
Docker('py35', image='python:3.5'),
Docker('py36', image='python:3.6')
])
with grp:
grp.call(time.time)
grp.call(time.time)
| 25.16129 | 65 | 0.601282 |
43b62d9d4c35cd12677417d9abccab4b3568c545 | 3,028 | py | Python | Evaluation/PostProcesing.py | AnnonymousRacoon/Quantum-Random-Walks-to-Solve-Diffusion | 366ac5073cea96b662b934c3657446c9f1aa2f65 | [
"MIT"
] | null | null | null | Evaluation/PostProcesing.py | AnnonymousRacoon/Quantum-Random-Walks-to-Solve-Diffusion | 366ac5073cea96b662b934c3657446c9f1aa2f65 | [
"MIT"
] | 3 | 2022-03-12T17:16:36.000Z | 2022-03-17T12:14:56.000Z | Evaluation/PostProcesing.py | AnnonymousRacoon/Quantum-Random-Walks-to-Solve-Diffusion | 366ac5073cea96b662b934c3657446c9f1aa2f65 | [
"MIT"
] | 1 | 2022-03-12T11:56:43.000Z | 2022-03-12T11:56:43.000Z | import pandas as pd
import re
import glob
| 33.274725 | 89 | 0.691546 |
43b693bbc83efef69f13c3a5a3bab32c542470ab | 2,276 | py | Python | app/wirecard/tasks.py | michel-rodrigues/viggio_backend | f419f0b939209722e1eb1e272f33de172cd5c1f1 | [
"MIT"
] | null | null | null | app/wirecard/tasks.py | michel-rodrigues/viggio_backend | f419f0b939209722e1eb1e272f33de172cd5c1f1 | [
"MIT"
] | null | null | null | app/wirecard/tasks.py | michel-rodrigues/viggio_backend | f419f0b939209722e1eb1e272f33de172cd5c1f1 | [
"MIT"
] | null | null | null | from sentry_sdk import capture_exception
from dateutil.parser import parse
from project_configuration.celery import app
from orders.models import Charge
from request_shoutout.domain.models import Charge as DomainCharge
from .models import WirecardTransactionData
CROSS_SYSTEMS_STATUS_MAPPING = {
'WAITING': DomainCharge.PROCESSING,
'IN_ANALYSIS': DomainCharge.PROCESSING,
'PRE_AUTHORIZED': DomainCharge.PRE_AUTHORIZED,
'AUTHORIZED': DomainCharge.PAID,
'CANCELLED': DomainCharge.CANCELLED,
'REFUNDED': DomainCharge.CANCELLED,
'REVERSED': DomainCharge.CANCELLED,
'SETTLED': DomainCharge.PAID,
}
| 38.576271 | 93 | 0.784271 |
43b6c1b507adc1bb371518dff1d4802b73e3e1a5 | 434 | py | Python | py/multiple_dispatch_example.py | coalpha/coalpha.github.io | 8a620314a5c0bcbe2225d29f733379d181534430 | [
"Apache-2.0"
] | null | null | null | py/multiple_dispatch_example.py | coalpha/coalpha.github.io | 8a620314a5c0bcbe2225d29f733379d181534430 | [
"Apache-2.0"
] | 1 | 2020-04-12T07:48:18.000Z | 2020-04-12T07:49:29.000Z | py/multiple_dispatch_example.py | coalpha/coalpha.github.io | 8a620314a5c0bcbe2225d29f733379d181534430 | [
"Apache-2.0"
] | 1 | 2020-09-30T05:27:07.000Z | 2020-09-30T05:27:07.000Z | from typing import *
from multiple_dispatch import multiple_dispatch
print(add(2, "hello"))
| 18.083333 | 47 | 0.658986 |
43b93580a409ca7d715e6c81e1d0f3517269cec7 | 4,277 | py | Python | dygraph/alexnet/network.py | Sunyingbin/models | 30a7f1757bfad79935aa865f4362a7b38e63a415 | [
"Apache-2.0"
] | null | null | null | dygraph/alexnet/network.py | Sunyingbin/models | 30a7f1757bfad79935aa865f4362a7b38e63a415 | [
"Apache-2.0"
] | null | null | null | dygraph/alexnet/network.py | Sunyingbin/models | 30a7f1757bfad79935aa865f4362a7b38e63a415 | [
"Apache-2.0"
] | null | null | null | """
AlexNet
"""
import paddle.fluid as fluid
import numpy as np
if __name__ == '__main__':
with fluid.dygraph.guard():
alexnet = AlexNet('alex-net', 3)
img = np.zeros([2, 3, 224, 224]).astype('float32')
img = fluid.dygraph.to_variable(img)
outs = alexnet(img).numpy()
print(outs)
| 32.9 | 118 | 0.53098 |
43bbbe3418d6d5e2da95d398c3928141e4b68eab | 905 | py | Python | turtlegameproject/turtlegame.py | Ayon134/code_for_Kids | d90698bb38efe5e26c31f02bd129bfdadea158e2 | [
"MIT"
] | null | null | null | turtlegameproject/turtlegame.py | Ayon134/code_for_Kids | d90698bb38efe5e26c31f02bd129bfdadea158e2 | [
"MIT"
] | null | null | null | turtlegameproject/turtlegame.py | Ayon134/code_for_Kids | d90698bb38efe5e26c31f02bd129bfdadea158e2 | [
"MIT"
] | 2 | 2021-01-08T03:52:46.000Z | 2021-04-01T19:16:12.000Z | import turtle
import random
p1=turtle.Turtle()
p1.color("green")
p1.shape("turtle")
p1.penup()
p1.goto(-200,100)
p2=p1.clone()
p2.color("blue")
p2.penup()
p2.goto(-200,-100)
p1.goto(300,60)
p1.pendown()
p1.circle(40)
p1.penup()
p1.goto(-200,100)
p2.goto(300,-140)
p2.pendown()
p2.circle(40)
p2.penup()
p2.goto(-200,-100)
die=[1,2,3,4,5,6]
i=1
while(i <= 20):
if p1.pos() >= (300,100):
print("p1 wins")
break
elif p2.pos() >= (300,-100):
print("p2 wins")
break
else:
p1_turn=input("press enter to start")
die_out=random.choice(die)
print("you get", die_out)
print("the number of steps:", 20*die_out)
p1.forward(20*die_out)
p2_turn=input("press enter to challenge")
d=random.choice(die)
print("you get",d)
print("the number os steps:",20*d)
p2.forward(20*d) | 17.745098 | 49 | 0.571271 |
43bbc2ac72a79eec23f8c2578bc9f103ba32b758 | 8,684 | py | Python | hivwholeseq/sequencing/check_pipeline.py | neherlab/hivwholeseq | 978ce4060362e4973f92b122ed5340a5314d7844 | [
"MIT"
] | 3 | 2016-09-13T12:15:47.000Z | 2021-07-03T01:28:56.000Z | hivwholeseq/sequencing/check_pipeline.py | iosonofabio/hivwholeseq | d504c63b446c3a0308aad6d6e484ea1666bbe6df | [
"MIT"
] | null | null | null | hivwholeseq/sequencing/check_pipeline.py | iosonofabio/hivwholeseq | d504c63b446c3a0308aad6d6e484ea1666bbe6df | [
"MIT"
] | 3 | 2016-01-17T03:43:46.000Z | 2020-03-25T07:00:11.000Z | #!/usr/bin/env python
# vim: fdm=marker
'''
author: Fabio Zanini
date: 15/06/14
content: Check the status of the pipeline for one or more sequencing samples.
'''
# Modules
import os
import sys
from itertools import izip
import argparse
from Bio import SeqIO
from hivwholeseq.utils.generic import getchar
from hivwholeseq.sequencing.samples import SampleSeq, load_sequencing_run
from hivwholeseq.patients.patients import load_samples_sequenced as lssp
from hivwholeseq.patients.patients import SamplePat
from hivwholeseq.sequencing.samples import load_samples_sequenced as lss
from hivwholeseq.utils.mapping import get_number_reads
from hivwholeseq.cluster.fork_cluster import fork_check_pipeline as fork_self
# Globals
len_fr = 8
len_msg = 6
spacing_fragments = 4
# Functions
def check_status(sample, step, detail=1):
'''Check for a sample a certain step of the pipeline at a certain detail'''
if detail == 1:
if step == 'premapped':
return [os.path.isfile(sample.get_premapped_filename())]
elif step == 'divided':
return [(fr, os.path.isfile(sample.get_divided_filename(fr)))
for fr in sample.regions_complete]
elif step == 'consensus':
return [(fr, os.path.isfile(sample.get_consensus_filename(fr)))
for fr in sample.regions_generic]
elif step == 'mapped':
return [(fr, os.path.isfile(sample.get_mapped_filename(fr, filtered=False)))
for fr in sample.regions_generic]
elif step == 'filtered':
return [(fr, os.path.isfile(sample.get_mapped_filename(fr, filtered=True)))
for fr in sample.regions_generic]
elif step == 'mapped_initial':
return [(fr, os.path.isfile(sample.get_mapped_to_initial_filename(fr)))
for fr in sample.regions_generic]
elif step == 'mapped_filtered':
# Check whether the mapped filtered is older than the mapped_initial
from hivwholeseq.utils.generic import modification_date
out = []
for fr in sample.regions_generic:
fn_mi = sample.get_mapped_to_initial_filename(fr)
fn_mf = sample.get_mapped_filtered_filename(fr)
if not os.path.isfile(fn_mf):
out.append((fr, False))
continue
if not os.path.isfile(fn_mi):
out.append((fr, True))
continue
md_mi = modification_date(fn_mi)
md_mf = modification_date(fn_mf)
if md_mf < md_mi:
out.append((fr, 'OLD'))
else:
out.append((fr, True))
return out
elif detail == 2:
if step in ('filtered', 'consensus'):
return check_status(sample, step, detail=3)
else:
return check_status(sample, step, detail=1)
elif detail == 3:
if step == 'premapped':
if os.path.isfile(sample.get_premapped_filename()):
return [get_number_reads(sample.get_premapped_filename())]
else:
return [False]
elif step == 'divided':
stati = []
for fr in sample.regions_complete:
fn = sample.get_divided_filename(fr)
if os.path.isfile(fn):
status = (fr, get_number_reads(fn))
else:
status = (fr, False)
stati.append(status)
return stati
elif step == 'consensus':
stati = []
for fr in sample.regions_generic:
fn = sample.get_consensus_filename(fr)
if os.path.isfile(fn):
status = (fr, len(SeqIO.read(fn, 'fasta')))
else:
status = (fr, False)
stati.append(status)
return stati
elif step == 'mapped':
stati = []
for fr in sample.regions_generic:
fn = sample.get_mapped_filename(fr, filtered=False)
if os.path.isfile(fn):
status = (fr, get_number_reads(fn))
else:
status = (fr, False)
stati.append(status)
return stati
elif step == 'filtered':
stati = []
for fr in sample.regions_generic:
fn = sample.get_mapped_filename(fr, filtered=True)
if os.path.isfile(fn):
status = (fr, get_number_reads(fn))
else:
status = (fr, False)
stati.append(status)
return stati
# TODO: add mapped_to_initial and downstream
elif step in ('mapped_initial', 'mapped_filtered'):
return check_status(sample, step, detail=1)
def print_info(name, status, detail=1):
'''Print info on these files'''
print '{:<20s}'.format(name+':'),
if name.lower() in ['premapped']:
status = status[0]
if status == True:
print 'OK'
elif status == False:
print 'MISS'
else:
print str(status)
else:
stati = list(status)
msg = []
for (fr, status) in stati:
ms = ('{:<'+str(len_fr)+'s}').format(fr+':')
if status == True:
msg.append(ms+('{:>'+str(len_msg)+'}').format('OK'))
elif status == False:
msg.append(ms+('{:>'+str(len_msg)+'}').format('MISS'))
else:
msg.append(ms+('{:>'+str(len_msg)+'}').format(str(status)))
print (' ' * spacing_fragments).join(msg)
# Script
if __name__ == '__main__':
# Parse input args
parser = argparse.ArgumentParser(description='Check sequencing run for missing parts of the analysis',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--runs', required=True, nargs='+',
help='Seq runs to analyze (e.g. Tue28, test_tiny)')
parser.add_argument('--adaIDs', nargs='+',
help='Adapter IDs to analyze (e.g. TS2)')
parser.add_argument('--nopatients', action='store_false', dest='use_pats',
help='Include non-patient samples (e.g. reference strains)')
parser.add_argument('--interactive', action='store_true',
help='Interactive mode')
parser.add_argument('--detail', type=int, default=1,
help='Include details on number of reads, length of consensus')
parser.add_argument('--submit', action='store_true',
help='Execute the script in parallel on the cluster')
args = parser.parse_args()
seq_runs = args.runs
adaIDs = args.adaIDs
use_pats = args.use_pats
use_interactive = args.interactive
detail = args.detail
submit = args.submit
if submit:
fork_self(seq_runs, adaIDs=adaIDs,
pats=use_pats,
detail=detail)
sys.exit()
samples_pat = lssp(include_wrong=True)
samples = lss()
samples = samples.loc[samples['seq run'].isin(seq_runs)]
if adaIDs is not None:
samples = samples.loc[samples.adapter.isin(adaIDs)]
if len(seq_runs) >= 2:
samples.sort(columns=['patient sample', 'seq run'], inplace=True)
for isa, (samplename, sample) in enumerate(samples.iterrows()):
sample = SampleSeq(sample)
print sample.name, 'seq:', sample['seq run'], sample.adapter,
if sample['patient sample'] == 'nan':
print 'not a patient sample',
if use_pats:
print '(skip)'
continue
else:
print ''
else:
sample_pat = samples_pat.loc[sample['patient sample']]
print 'patient: '+sample_pat.patient
steps = ['premapped', 'divided', 'consensus', 'mapped', 'filtered',
'mapped_initial', 'mapped_filtered']
for step in steps:
status = check_status(sample, step, detail=detail)
print_info(step.capitalize(), status, detail=detail)
if (isa != len(samples) - 1):
print ''
if use_interactive and (isa != len(samples) - 1):
print 'Press q to exit',
sys.stdout.flush()
ch = getchar()
if ch.lower() in ['q']:
print 'stopped'
break
else:
sys.stdout.write("\x1b[1A")
print ''
| 36.033195 | 106 | 0.554353 |
43be862a8ae3652cfbde5c9e9ea45da257901956 | 1,633 | py | Python | app.py | thliang01/nba-s | 660d0e830989916b7b9f3123eb809d143b714186 | [
"BSD-2-Clause"
] | null | null | null | app.py | thliang01/nba-s | 660d0e830989916b7b9f3123eb809d143b714186 | [
"BSD-2-Clause"
] | null | null | null | app.py | thliang01/nba-s | 660d0e830989916b7b9f3123eb809d143b714186 | [
"BSD-2-Clause"
] | null | null | null | import streamlit as st
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# --------------------------------------------------------------
# Import and clean data
game_details = pd.read_csv('games_details.csv')
# print(game_details.head(5))
game_details.drop(['GAME_ID', 'TEAM_ID', 'PLAYER_ID', 'START_POSITION',
'COMMENT', 'TEAM_ABBREVIATION'], axis=1, inplace=True)
game_details['FTL'] = game_details['FTA'] - game_details['FTM']
game_details = game_details.dropna()
# game_details.shape
# game_details.info()
game_details['MIN'] = game_details['MIN'].str.strip(':').str[0:2]
df = game_details.copy()
if st.checkbox('Show dataframe'):
st.write("Players Game Details")
st.dataframe(df.head(10))
# --------------------------------------------------------------
st.write("Top 20 Players in the NBA")
top_activities = df.groupby(by='PLAYER_NAME')['PTS'].sum().sort_values(ascending=False).head(20).reset_index()
plt.figure(figsize=(15, 10))
plt.xlabel('POINTS', fontsize=15)
plt.ylabel('PLAYER_NAME', fontsize=15)
plt.title('Top 20 Players in the NBA League', fontsize=20)
ax = sns.barplot(x=top_activities['PTS'], y=top_activities['PLAYER_NAME'])
for i, (value, name) in enumerate(zip(top_activities['PTS'], top_activities['PLAYER_NAME'])):
ax.text(value, i - .05, f'{value:,.0f}', size=10, ha='left', va='center')
ax.set(xlabel='POINTS', ylabel='PLAYER_NAME')
st.pyplot(plt)
player = st.multiselect(
"Choose Player", df['PLAYER_NAME']
)
st.write("""
# My first app
Hello *world!*
""")
x = st.slider("Select a number")
st.write("You selected:", x)
| 32.019608 | 110 | 0.647887 |
43bfd11896f962234020d5d611ad3cb21b537df7 | 19,228 | py | Python | python/craftassist/voxel_models/geoscorer/geoscorer_util.py | kepolol/craftassist | f60a7edd0b4ea72b774cca45ba468d2e275445c2 | [
"MIT"
] | null | null | null | python/craftassist/voxel_models/geoscorer/geoscorer_util.py | kepolol/craftassist | f60a7edd0b4ea72b774cca45ba468d2e275445c2 | [
"MIT"
] | null | null | null | python/craftassist/voxel_models/geoscorer/geoscorer_util.py | kepolol/craftassist | f60a7edd0b4ea72b774cca45ba468d2e275445c2 | [
"MIT"
] | 1 | 2020-03-29T20:04:11.000Z | 2020-03-29T20:04:11.000Z | """
Copyright (c) Facebook, Inc. and its affiliates.
"""
import numpy as np
import random
from datetime import datetime
import sys
import argparse
import torch
import os
from inspect import currentframe, getframeinfo
GEOSCORER_DIR = os.path.dirname(os.path.realpath(__file__))
CRAFTASSIST_DIR = os.path.join(GEOSCORER_DIR, "../")
sys.path.append(CRAFTASSIST_DIR)
from shapes import get_bounds
## Train Fxns ##
def multitensor_collate_fxn(x):
"""
Takes a list of BATCHSIZE lists of tensors of length D.
Returns a list of length D of batched tensors.
"""
num_tensors_to_batch = len(x[0])
regroup_tensors = [[] for i in range(num_tensors_to_batch)]
for t_list in x:
for i, t in enumerate(t_list):
regroup_tensors[i].append(t.unsqueeze(0))
batched_tensors = [torch.cat(tl) for tl in regroup_tensors]
return batched_tensors
## 3D Utils ##
def get_side_lengths(bounds):
"""
Bounds should be a list of [min_x, max_x, min_y, max_y, min_z, max_z].
Returns a list of the side lengths.
"""
return [x + 1 for x in (bounds[1] - bounds[0], bounds[3] - bounds[2], bounds[5] - bounds[4])]
def coord_to_index(coord, sl):
"""
Takes a 3D coordinate in a cube and the cube side length.
Returns index in flattened 3D array.
"""
return coord[0] * sl * sl + coord[1] * sl + coord[2]
def index_to_coord(index, sl):
"""
Takes an index into a flattened 3D array and its side length.
Returns the coordinate in the cube.
"""
coord = []
two_d_slice_size = sl * sl
coord.append(index // two_d_slice_size)
remaining = index % two_d_slice_size
coord.append(remaining // sl)
coord.append(remaining % sl)
return coord
def shift_subsegment_corner(S):
"""
Takes a segment, described as a list of tuples of the form:
((x, y, z), (block_id, ?))
Returns the segment in the same form, shifted to the origin, and the shift vec
"""
bounds = get_bounds(S)
shift_zero_vec = [-bounds[0], -bounds[2], -bounds[4]]
new_S = []
for s in S:
new_S.append((tuple([sum(x) for x in zip(s[0], shift_zero_vec)]), s[1]))
return new_S, shift_zero_vec
def rotate_block(b, c, r):
""" rotates the block b around the point c by 90*r degrees
in the xz plane. r should be 1 or -1."""
# TODO add a reflection
c = np.array(c)
p = np.add(b[0], -c)
x = p[0]
z = p[2]
if r == -1:
p[0] = z
p[2] = -x
else:
p[0] = -z
p[2] = x
return (tuple(p + c), b[1])
def check_inrange(x, minval, maxval):
"""inclusive check"""
return all([v >= minval for v in x]) and all([v <= maxval for v in x])
# N -> batch size in training
# D -> num target coord per element
# Viewer pos, viewer_look are N x 3 tensors
# Batched target coords is a N x D x 3 tensor
# Output is a N x D x 3 tensor
# outputs a dense voxel rep (np array) from a sparse one.
# size should be a tuple of (H, W, D) for the desired voxel representation
# useid=True puts the block id into the voxel representation,
# otherwise put a 1
############################################################################
# For these "S" is a list of blocks in ((x,y,z),(id, meta)) format
# the segment is a list of the same length as S with either True or False
# at each entry marking whether that block is in the segment
# each outputs a list of blocks in ((x,y,z),(id, meta)) format
| 34.27451 | 99 | 0.63808 |
43c0a7c7b3cc424327d10e1b990bf63c250e8eb4 | 4,907 | py | Python | CryptoAttacks/tests/Block/test_gcm.py | akbarszcz/CryptoAttacks | ae675d016b314414a3dc9b23c7d8a32da4c62457 | [
"MIT"
] | 54 | 2017-03-28T23:46:58.000Z | 2022-02-23T01:53:38.000Z | CryptoAttacks/tests/Block/test_gcm.py | maximmasiutin/CryptoAttacks | d1d47d3cb2ce38738a60b728bc35ce80bfe64374 | [
"MIT"
] | null | null | null | CryptoAttacks/tests/Block/test_gcm.py | maximmasiutin/CryptoAttacks | d1d47d3cb2ce38738a60b728bc35ce80bfe64374 | [
"MIT"
] | 13 | 2017-03-31T06:07:23.000Z | 2021-11-20T19:01:30.000Z | #!/usr/bin/python
from __future__ import absolute_import, division, print_function
import subprocess
from builtins import bytes, range
from os.path import abspath, dirname
from os.path import join as join_path
from random import randint
from CryptoAttacks.Block.gcm import *
from CryptoAttacks.Utils import log
def polynomial_factors_product(factorization):
"""factorization: [(poly1, power), (poly2, power)]"""
result = factorization[0][0].one_element()
for f, f_degree in factorization:
result *= f**f_degree
return result
if __name__ == "__main__":
run()
| 31.254777 | 144 | 0.678419 |
43c14b71a9e55a3f072d7e8094c999b91490df88 | 507 | py | Python | python_clean_architecture/use_cases/orderdata_use_case.py | jfsolarte/python_clean_architecture | 56b0c0eff50bc98774a0caee12e3030789476687 | [
"MIT"
] | null | null | null | python_clean_architecture/use_cases/orderdata_use_case.py | jfsolarte/python_clean_architecture | 56b0c0eff50bc98774a0caee12e3030789476687 | [
"MIT"
] | null | null | null | python_clean_architecture/use_cases/orderdata_use_case.py | jfsolarte/python_clean_architecture | 56b0c0eff50bc98774a0caee12e3030789476687 | [
"MIT"
] | null | null | null | from python_clean_architecture.shared import use_case as uc
from python_clean_architecture.shared import response_object as res
| 31.6875 | 89 | 0.755424 |
43c1a9b70d766525944aa92cfc1043f3d5e3bc1b | 17,842 | py | Python | owscapable/swe/common.py | b-cube/OwsCapable | a01815418fe982434503d6542cb18e1ac8989684 | [
"BSD-3-Clause"
] | 1 | 2016-02-01T12:55:13.000Z | 2016-02-01T12:55:13.000Z | owscapable/swe/common.py | b-cube/OwsCapable | a01815418fe982434503d6542cb18e1ac8989684 | [
"BSD-3-Clause"
] | 1 | 2015-06-23T14:07:50.000Z | 2015-06-23T14:07:50.000Z | owscapable/swe/common.py | b-cube/OwsCapable | a01815418fe982434503d6542cb18e1ac8989684 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import (absolute_import, division, print_function)
from owscapable.util import nspath_eval
from owscapable.namespaces import Namespaces
from owscapable.util import testXMLAttribute, testXMLValue, InfiniteDateTime, NegativeInfiniteDateTime
from dateutil import parser
from datetime import timedelta
from owscapable.etree import etree
namespaces = get_namespaces()
AnyScalar = map(lambda x: nspv(x), ["swe20:Boolean", "swe20:Count", "swe20:Quantity", "swe20:Time", "swe20:Category", "swe20:Text"])
AnyNumerical = map(lambda x: nspv(x), ["swe20:Count", "swe20:Quantity", "swe20:Time"])
AnyRange = map(lambda x: nspv(x), ["swe20:QuantityRange", "swe20:TimeRange", "swe20:CountRange", "swe20:CategoryRange"])
def get_time(value, referenceTime, uom):
try:
value = parser.parse(value)
except (AttributeError, ValueError): # Most likely an integer/float using a referenceTime
try:
if uom.lower() == "s":
value = referenceTime + timedelta(seconds=float(value))
elif uom.lower() == "min":
value = referenceTime + timedelta(minutes=float(value))
elif uom.lower() == "h":
value = referenceTime + timedelta(hours=float(value))
elif uom.lower() == "d":
value = referenceTime + timedelta(days=float(value))
except (AttributeError, ValueError):
pass
except OverflowError: # Too many numbers (> 10) or INF/-INF
if value.lower() == "inf":
value = InfiniteDateTime()
elif value.lower() == "-inf":
value = NegativeInfiniteDateTime()
return value
| 43.200969 | 172 | 0.619325 |