hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a182a47e0e9e4e6e3cf93dede6480b43b9da9679 | 381 | py | Python | book2/s4_ex2.py | Felipe-Tommaselli/Python4everbody_Michigan | f4f940c15a4b165b144d14ead79d583bf31b805b | [
"MIT"
] | null | null | null | book2/s4_ex2.py | Felipe-Tommaselli/Python4everbody_Michigan | f4f940c15a4b165b144d14ead79d583bf31b805b | [
"MIT"
] | null | null | null | book2/s4_ex2.py | Felipe-Tommaselli/Python4everbody_Michigan | f4f940c15a4b165b144d14ead79d583bf31b805b | [
"MIT"
] | null | null | null | fname = input("Enter file name: ")
if len(fname) < 1 : fname = "mbox-short.txt"
list = list()
f = open(fname)
count = 0
for line in f:
line = line.rstrip()
list = line.split()
if list == []: continue
elif list[0].lower() == 'from':
count += 1
print(list[1])
print("There were", count, "lines in the file with From as the first word") | 25.4 | 75 | 0.564304 |
a183121368090836638181c5ae887b713f923588 | 6,358 | py | Python | fedsimul/models/mnist/mclr.py | cshjin/fedsimul | 1e2b9a9d9034fbc679dfaff059c42dea5642971d | [
"MIT"
] | 11 | 2021-05-07T01:28:26.000Z | 2022-03-10T08:23:16.000Z | fedsimul/models/mnist/mclr.py | cshjin/fedsimul | 1e2b9a9d9034fbc679dfaff059c42dea5642971d | [
"MIT"
] | 2 | 2021-08-13T10:12:13.000Z | 2021-08-31T02:03:20.000Z | fedsimul/models/mnist/mclr.py | cshjin/fedsimul | 1e2b9a9d9034fbc679dfaff059c42dea5642971d | [
"MIT"
] | 1 | 2021-06-08T07:23:22.000Z | 2021-06-08T07:23:22.000Z | import numpy as np
import tensorflow as tf
from tqdm import trange
from fedsimul.utils.model_utils import batch_data
from fedsimul.utils.tf_utils import graph_size
from fedsimul.utils.tf_utils import process_grad
| 35.920904 | 118 | 0.574394 |
a183e429ab2df0bcb4079f035e2dd6d3cb6737a5 | 3,402 | py | Python | angr_ctf/solutions/06_angr_symbolic_dynamic_memory.py | Hamz-a/angr_playground | 8216f43bd2ec9a91c796a56bab610b119f8311cf | [
"MIT"
] | null | null | null | angr_ctf/solutions/06_angr_symbolic_dynamic_memory.py | Hamz-a/angr_playground | 8216f43bd2ec9a91c796a56bab610b119f8311cf | [
"MIT"
] | null | null | null | angr_ctf/solutions/06_angr_symbolic_dynamic_memory.py | Hamz-a/angr_playground | 8216f43bd2ec9a91c796a56bab610b119f8311cf | [
"MIT"
] | null | null | null | import angr
import claripy
path_to_bin = "../binaries/06_angr_symbolic_dynamic_memory"
# Find callback
# Avoid callback
# Create an angr project
project = angr.Project(path_to_bin)
# Create the begin state starting from address 0x08048699 (see r2 output bellow)
# $ r2 -A 06_angr_symbolic_dynamic_memory
# [0x08048490]> pdf @main
# (fcn) main 395
# main (int argc, char **argv, char **envp);
# <REDACTED>
# 0x08048664 e8e7fdffff call sym.imp.memset ; void *memset(void *s, int c, size_t n)
# 0x08048669 83c410 add esp, 0x10
# 0x0804866c 83ec0c sub esp, 0xc
# 0x0804866f 682e880408 push str.Enter_the_password: ; 0x804882e ; "Enter the password: " ; const char *format
# 0x08048674 e877fdffff call sym.imp.printf ; int printf(const char *format)
# 0x08048679 83c410 add esp, 0x10
# 0x0804867c 8b15acc8bc0a mov edx, dword [obj.buffer1] ; [0xabcc8ac:4]=0
# 0x08048682 a1a4c8bc0a mov eax, dword [obj.buffer0] ; [0xabcc8a4:4]=0
# 0x08048687 83ec04 sub esp, 4
# 0x0804868a 52 push edx
# 0x0804868b 50 push eax
# 0x0804868c 6843880408 push str.8s__8s ; 0x8048843 ; "%8s %8s" ; const char *format
# 0x08048691 e8cafdffff call sym.imp.__isoc99_scanf ; int scanf(const char *format)
# 0x08048696 83c410 add esp, 0x10
# 0x08048699 c745f4000000. mov dword [local_ch], 0 ; <<< START HERE
# < 0x080486a0 eb64 jmp 0x8048706
entry_state = project.factory.blank_state(addr=0x08048699)
# Create a Symbolic BitVectors for each part of the password (64 bits per part %8s is used in scanf)
password_part0 = claripy.BVS("password_part0", 64)
password_part1 = claripy.BVS("password_part1", 64)
# Setup some heap space
entry_state.memory.store(0xabcc8a4, 0x4000000, endness=project.arch.memory_endness)
entry_state.memory.store(0xabcc8ac, 0x4000A00, endness=project.arch.memory_endness)
# Use the created heap and inject BVS
entry_state.memory.store(0x4000000, password_part0)
entry_state.memory.store(0x4000A00, password_part1)
# Create a simulation manager
simulation_manager = project.factory.simulation_manager(entry_state)
# Pass callbacks for states that we should find and avoid
simulation_manager.explore(avoid=try_again, find=good_job)
# If simulation manager has found a state
if simulation_manager.found:
found_state = simulation_manager.found[0]
# Get flag by solving the symbolic values using the found path
solution0 = found_state.solver.eval(password_part0, cast_to=bytes)
solution1 = found_state.solver.eval(password_part1, cast_to=bytes)
print("{} {}".format(solution0.decode("utf-8"), solution1.decode("utf-8")))
else:
print("No path found...") | 44.763158 | 131 | 0.663727 |
a1841c43709e67515946480883952c56edc55654 | 57 | py | Python | run.py | JonLMyers/MetroTransitAPI | d8f467570368cd563d69564b680cfdd47ad6b622 | [
"MIT"
] | null | null | null | run.py | JonLMyers/MetroTransitAPI | d8f467570368cd563d69564b680cfdd47ad6b622 | [
"MIT"
] | null | null | null | run.py | JonLMyers/MetroTransitAPI | d8f467570368cd563d69564b680cfdd47ad6b622 | [
"MIT"
] | null | null | null | """ Runs the server """
from aaxus import app
app.run()
| 11.4 | 23 | 0.649123 |
a1856d81103436f6d6bff2bf0852aa835858a675 | 1,416 | py | Python | ConjugateGardient_Python.py | rohitj559/HPC_MPI-project | 2b8abe5044d0e8a5a607f7d534a41bb97174e165 | [
"MIT"
] | null | null | null | ConjugateGardient_Python.py | rohitj559/HPC_MPI-project | 2b8abe5044d0e8a5a607f7d534a41bb97174e165 | [
"MIT"
] | null | null | null | ConjugateGardient_Python.py | rohitj559/HPC_MPI-project | 2b8abe5044d0e8a5a607f7d534a41bb97174e165 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Dec 6 20:36:02 2018
@author: Rohit
"""
# =============================================================================
# import numpy as np
# a = np.array([5,4])[np.newaxis]
# print(a)
# print(a.T)
#
# function [x] = conjgrad(A, b, x)
# r = b - A * x;
# p = r;
# rsold = r' * r;
#
# for i = 1:length(b)
# Ap = A * p;
# alpha = rsold / (p' * Ap);
# x = x + alpha * p;
# r = r - alpha * Ap;
# rsnew = r' * r;
# if sqrt(rsnew) < 1e-10
# break;
# end
# p = r + (rsnew / rsold) * p;
# rsold = rsnew;
# end
# end
# =============================================================================
import numpy as np
a = np.array([[3, 2, -1], [2, -1, 1], [-1, 1, -1]]) # 3X3 symmetric matrix
b = (np.array([1, -2, 0])[np.newaxis]).T # 3X1 matrix
x = (np.array([0, 1, 2])[np.newaxis]).T
val = ConjGrad(a, b, x);
print(val)
| 22.125 | 79 | 0.367232 |
a186a2c3d773bd33d3d6c3ea0aa252bbcefbcff7 | 5,232 | py | Python | examples/applications/agritrop-indexing/training_agritrop_baseline.py | Ing-David/sentence-transformers | 4895f2f806d209a41a770e96ba2425aac605497c | [
"Apache-2.0"
] | null | null | null | examples/applications/agritrop-indexing/training_agritrop_baseline.py | Ing-David/sentence-transformers | 4895f2f806d209a41a770e96ba2425aac605497c | [
"Apache-2.0"
] | null | null | null | examples/applications/agritrop-indexing/training_agritrop_baseline.py | Ing-David/sentence-transformers | 4895f2f806d209a41a770e96ba2425aac605497c | [
"Apache-2.0"
] | null | null | null | import argparse
import logging
import math
from pathlib import Path
import torch.multiprocessing as mp
import os
from datetime import datetime
import nltk
import pandas as pd
import transformers
from torch import nn
import torch.distributed
from torch._C._distributed_c10d import HashStore
from torch.utils.data import DataLoader
from tqdm import tqdm
from sentence_transformers import InputExampleDocument, BiEncoder
from sentence_transformers import LoggingHandler
from eval_agritrop import create_evaluator
# torch.distributed.init_process_group(backend="nccl",store=HashStore(), world_size=8, rank=0)
#### Just some code to print debug information to stdout
logging.basicConfig(format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
handlers=[LoggingHandler()])
os.putenv("TOKENIZERS_PARALLELISM", "true")
logger = logging.getLogger(__name__)
#### /print debug information to stdout
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Train / evaluate baseline indexing system on abstracts')
parser.add_argument('--dataset', '-d', type=str, nargs=1,
help='Path to the TSV corpus to use', dest='dataset',
default=['datasets/corpus_agritrop_transformers_abstract.tsv'])
parser.add_argument('--save-prefix', '-s', type=str, nargs=1,
help='Prefix for the model save directory', dest='save_prefix',
default=['output/training_agritrop_transformer_baseline-'])
parser.add_argument('--epochs', '-e', type=int, nargs=1, help="The number of epochs (for training)", dest='epochs',
default=[100])
parser.add_argument('--eval', '-l', type=str, nargs=1, help="Load model from directory and evaluate", dest='eval',
default=[])
args = parser.parse_args()
# dataset's path
agritrop_dataset_path = args.dataset[0]
# Define our Cross-Encoder
train_batch_size = 1
num_epochs = args.epochs[0]
load = len(args.eval) > 0
model_save_path = args.save_prefix[0] + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# Read Agritrop's dataset
logger.info("Read Agritrop's train dataset")
df_transformer = pd.read_csv(agritrop_dataset_path, sep='\t')
# list sample
train_samples = []
dev_samples = []
test_samples = []
df_document_groups = df_transformer.groupby("doc_ids")
for group in tqdm(df_document_groups):
abstract = group[1]['abstract'].iloc[0]
concept_labels = []
labels = []
for index, row in group[1].iterrows():
split_concept_labels = list(row['sentence2'].split(","))
concate_concept = " ".join(split_concept_labels)
concept_labels.append([concate_concept])
labels.append(int(row['score']))
input_example = InputExampleDocument(document_sentences=[abstract], concept_labels=concept_labels,
labels=labels)
split = group[1]['split'].iloc[0]
if split == 'dev':
dev_samples.append(input_example)
elif split == 'test':
test_samples.append(input_example)
else:
train_samples.append(input_example)
# We wrap train_samples (which is a List[InputExample]) into a pytorch DataLoader
train_dataloader = DataLoader(train_samples, shuffle=False, batch_size=train_batch_size)
# print(len(train_dataloader.dataset))
# We use bert-base-cased as base model and set num_labels=1, which predicts a continuous score between 0 and 1
if not load:
logger.info("Training model using 'squeezebert/squeezebert-uncased'...")
model = BiEncoder('squeezebert/squeezebert-uncased', num_labels=1, max_length=512, device="cuda:1",
freeze_transformer=False)
# Configure the training
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logger.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
# mp.spawn(fit_model, args=(model, train_dataloader,
# None, # evaluator,
# 4, # epochs
# warmup_steps,
# model_save_path,
# True), # use amp
# nprocs=8, join=True)
model.save(model_save_path)
model.fit(train_dataloader=train_dataloader,
epochs=num_epochs,
warmup_steps=warmup_steps,
output_path=model_save_path, use_amp=False)
model.save(model_save_path)
else:
load_path = args.eval[0]
logger.info(f"Loading model from {load_path}")
model = BiEncoder(load_path, num_labels=1, max_length=512, device="cpu",
freeze_transformer=False)
logger.info("Evaluating...")
evaluator_dev, evaluator_test = create_evaluator(df_transformer, text_field="abstract", device="cpu")
evaluator_dev(model)
evaluator_test(model)
| 39.938931 | 119 | 0.632072 |
a18749c6aba22f8c7ec4513c3967c1df5e092f47 | 1,793 | py | Python | src/utils/file_manipulation.py | SashiniHansika/Relationship-Identifying-Module | 4a640b68220c7735061cb984a7edccaee322fc33 | [
"MIT"
] | null | null | null | src/utils/file_manipulation.py | SashiniHansika/Relationship-Identifying-Module | 4a640b68220c7735061cb984a7edccaee322fc33 | [
"MIT"
] | null | null | null | src/utils/file_manipulation.py | SashiniHansika/Relationship-Identifying-Module | 4a640b68220c7735061cb984a7edccaee322fc33 | [
"MIT"
] | null | null | null | # open input text scenario
import xml.etree.ElementTree as ET
import os
PATH = "G:\\FYP\\FYP-ER-Relationships-Module\\data"
text_file = open(PATH+"\\input_text.txt", "r")
if text_file.mode == 'r':
# Read the scenario and covert that text file into lowercase
input_text_load = text_file.read()
input_text = input_text_load.lower()
print(input_text)
# Read input XML file
| 25.614286 | 64 | 0.622421 |
a187e17bf5a82ceb3711020d4fb1495722b57b3c | 2,428 | py | Python | tests/tensorflow/pruning/test_tensor_processor.py | MaximProshin/nncf | 2290d2f4cebcf6749e419dc76850e7bd8b7d8da1 | [
"Apache-2.0"
] | 136 | 2020-06-01T14:03:31.000Z | 2020-10-28T06:10:50.000Z | tests/tensorflow/pruning/test_tensor_processor.py | MaximProshin/nncf | 2290d2f4cebcf6749e419dc76850e7bd8b7d8da1 | [
"Apache-2.0"
] | 133 | 2020-05-26T13:48:04.000Z | 2020-10-28T05:25:55.000Z | tests/tensorflow/pruning/test_tensor_processor.py | MaximProshin/nncf | 2290d2f4cebcf6749e419dc76850e7bd8b7d8da1 | [
"Apache-2.0"
] | 36 | 2020-05-28T08:18:39.000Z | 2020-10-27T14:46:58.000Z | import pytest
import tensorflow as tf
from nncf.tensorflow.tensor import TFNNCFTensor
from nncf.tensorflow.pruning.tensor_processor import TFNNCFPruningTensorProcessor
| 38.539683 | 84 | 0.716227 |
a1898d71541edc0c1b30cdf2d00d4add61765cd1 | 4,288 | py | Python | src/bot/botstates/TriviaBot.py | malmgrens4/TwIOTch | a3e05f5fcb5bcd75aba3cf9533ca7c5308e4a2de | [
"MIT"
] | null | null | null | src/bot/botstates/TriviaBot.py | malmgrens4/TwIOTch | a3e05f5fcb5bcd75aba3cf9533ca7c5308e4a2de | [
"MIT"
] | null | null | null | src/bot/botstates/TriviaBot.py | malmgrens4/TwIOTch | a3e05f5fcb5bcd75aba3cf9533ca7c5308e4a2de | [
"MIT"
] | null | null | null | from twitchio.dataclasses import Message
from typing import Dict, Callable
from datetime import datetime
from dataclasses import dataclass
from src.bot.gameobservers.Observer import Observer
from src.bot.gameobservers.Subject import Subject
from src.bot.botstates.BotState import BotState
from src.bot.botstates.TeamGameHandler import TeamGameHandler
from src.bot.botstates.DefaultBot import DefaultBot
from src.bot.TeamData import TeamData
| 34.304 | 120 | 0.639459 |
a189a8ce0239f76496cb3c604a52bf52c941ff4e | 515 | py | Python | playing1.py | bert386/rpi-monitor-cam-led | d333a8313500be8150e59462df5482b307eb368d | [
"Apache-2.0"
] | null | null | null | playing1.py | bert386/rpi-monitor-cam-led | d333a8313500be8150e59462df5482b307eb368d | [
"Apache-2.0"
] | null | null | null | playing1.py | bert386/rpi-monitor-cam-led | d333a8313500be8150e59462df5482b307eb368d | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Description:
Todo:
"""
import os
import sys
import logging
from collections import deque
from base_state import BaseState
| 16.612903 | 57 | 0.664078 |
a189f72cd87554b98dd997143822d60a01facb7a | 518 | py | Python | script/isort.py | zhoumjane/devops_backend | 5567b04a042fd4a449063a96821369396a8d8586 | [
"MIT"
] | 53 | 2021-07-14T03:11:39.000Z | 2021-09-23T10:39:14.000Z | script/isort.py | zhoumjane/devops_backend | 5567b04a042fd4a449063a96821369396a8d8586 | [
"MIT"
] | null | null | null | script/isort.py | zhoumjane/devops_backend | 5567b04a042fd4a449063a96821369396a8d8586 | [
"MIT"
] | 10 | 2021-07-14T06:29:14.000Z | 2021-09-23T00:25:35.000Z | # -*- coding: utf-8 -*-
import time, random
if __name__ == "__main__":
alist = []
for i in range(50000):
alist.append(random.randint(1, 100))
start_time = time.time()
isort(alist)
end_time = time.time() - start_time
print("cost time: %ss" % (end_time)) | 27.263158 | 63 | 0.530888 |
a18ab5b8f24fd76985216d02e899cfe490730c02 | 1,903 | py | Python | test/test_estim/test_scalarnl.py | Ryandry1st/vampyre | 43bd6198ee0cbe0d3270d0c674127c7cbbb4c95e | [
"MIT"
] | 59 | 2017-01-27T22:36:38.000Z | 2021-12-08T04:16:13.000Z | test/test_estim/test_scalarnl.py | Ryandry1st/vampyre | 43bd6198ee0cbe0d3270d0c674127c7cbbb4c95e | [
"MIT"
] | 10 | 2017-01-11T15:16:11.000Z | 2021-02-17T10:43:51.000Z | test/test_estim/test_scalarnl.py | Ryandry1st/vampyre | 43bd6198ee0cbe0d3270d0c674127c7cbbb4c95e | [
"MIT"
] | 18 | 2017-01-11T14:58:32.000Z | 2021-05-03T16:34:53.000Z | """
test_relu.py: Test suite for the ReLU estimator class :class:ReLUEstim`
"""
from __future__ import print_function
from __future__ import division
import unittest
import numpy as np
# Add the path to the vampyre package and import it
import env
env.add_vp_path()
import vampyre as vp
def logistic_test(zshape=(100,10), rvar=1, tol=1, verbose=False):
"""
Unit test for the logistic estimator.
Generates random data with a logistic model and then estimates the input
logit :code:`z`.
:param zshape: shape of the data :code:`z`
:param rvar: prior variance on :code:`r`
:param tol: tolerance on estimation error. This should be large since we
are using MAP instead of MMSE estimation so the error variance
is not exact
:param verbose: print results
"""
# Create random data
z = np.random.normal(0,1,zshape)
r = z + np.random.normal(0,np.sqrt(rvar),zshape)
pz = 1/(1+np.exp(-z))
u = np.random.uniform(0,1,zshape)
y = (u < pz)
# Create an estimator
est = vp.estim.LogisticEst(y=y,var_axes='all',max_it=100)
# Run the estimator
zhat, zhatvar = est.est(r,rvar)
# Compare the error
zerr = np.mean((z-zhat)**2)
rel_err = np.maximum( zerr/zhatvar, zhatvar/zerr)-1
fail = (rel_err > tol)
if fail or verbose:
print("Error: Actual: {0:12.4e} Est: {1:12.4e} Rel: {2:12.4e}".format(\
zerr, zhatvar, rel_err))
if fail:
raise vp.common.TestException("Estimation error variance"+\
" does not match predicted value")
if __name__ == '__main__':
unittest.main()
| 29.276923 | 80 | 0.629532 |
a18aeadaf1c0a497b57a81c26b42e7ee05084e81 | 1,543 | py | Python | tests/live/test_client_auth.py | denibertovic/stormpath-sdk-python | e594a1bb48de3fa8eff26558bf4f72bb056e9d00 | [
"Apache-2.0"
] | null | null | null | tests/live/test_client_auth.py | denibertovic/stormpath-sdk-python | e594a1bb48de3fa8eff26558bf4f72bb056e9d00 | [
"Apache-2.0"
] | null | null | null | tests/live/test_client_auth.py | denibertovic/stormpath-sdk-python | e594a1bb48de3fa8eff26558bf4f72bb056e9d00 | [
"Apache-2.0"
] | null | null | null | """Live tests of client authentication against the Stormpath service API."""
from os import environ
from stormpath.client import Client
from stormpath.error import Error
from .base import LiveBase
| 29.113208 | 76 | 0.628645 |
a18bdd3e3f40a3f576715555ebb6a8270c24a370 | 256 | py | Python | languages/python/software_engineering_logging4.py | Andilyn/learntosolveit | fd15345c74ef543e4e26f4691bf91cb6dac568a4 | [
"BSD-3-Clause"
] | 136 | 2015-03-06T18:11:21.000Z | 2022-03-10T22:31:40.000Z | languages/python/software_engineering_logging4.py | Andilyn/learntosolveit | fd15345c74ef543e4e26f4691bf91cb6dac568a4 | [
"BSD-3-Clause"
] | 27 | 2015-01-07T01:38:03.000Z | 2021-12-22T19:20:15.000Z | languages/python/software_engineering_logging4.py | Andilyn/learntosolveit | fd15345c74ef543e4e26f4691bf91cb6dac568a4 | [
"BSD-3-Clause"
] | 1,582 | 2015-01-01T20:37:06.000Z | 2022-03-30T12:29:24.000Z | import logging
logger1 = logging.getLogger('package1.module1')
logger2 = logging.getLogger('package1.module2')
logging.basicConfig(level=logging.WARNING)
logger1.warning('This is a warning message')
logger2.warning('This is a another warning message')
| 23.272727 | 52 | 0.792969 |
a18c81f3ba8e0a19564872357a93750676c04e10 | 862 | py | Python | py/foreman/tests/testdata/test_command/pkg1/build.py | clchiou/garage | 446ff34f86cdbd114b09b643da44988cf5d027a3 | [
"MIT"
] | 3 | 2016-01-04T06:28:52.000Z | 2020-09-20T13:18:40.000Z | py/foreman/tests/testdata/test_command/pkg1/build.py | clchiou/garage | 446ff34f86cdbd114b09b643da44988cf5d027a3 | [
"MIT"
] | null | null | null | py/foreman/tests/testdata/test_command/pkg1/build.py | clchiou/garage | 446ff34f86cdbd114b09b643da44988cf5d027a3 | [
"MIT"
] | null | null | null | from pathlib import Path
from foreman import define_parameter, rule, get_relpath
import foreman
if __name__ != 'pkg1':
raise AssertionError(__name__)
if not __file__.endswith('foreman/tests/testdata/test_command/pkg1/build.py'):
raise AssertionError(__file__)
relpath = get_relpath()
if relpath != Path('pkg1'):
raise AssertionError(relpath)
define_parameter('par1').with_derive(lambda ps: get_relpath())
| 21.02439 | 78 | 0.691415 |
a18d2404f6cd1284bac337bd359599e5974dbe24 | 11,036 | py | Python | python/pyarrow/tests/test_dataset.py | maxburke/arrow | 344ed4bed675c4913db5cc7b17d0e6cc57ea55c4 | [
"Apache-2.0"
] | null | null | null | python/pyarrow/tests/test_dataset.py | maxburke/arrow | 344ed4bed675c4913db5cc7b17d0e6cc57ea55c4 | [
"Apache-2.0"
] | null | null | null | python/pyarrow/tests/test_dataset.py | maxburke/arrow | 344ed4bed675c4913db5cc7b17d0e6cc57ea55c4 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import pyarrow as pa
import pyarrow.fs as fs
try:
import pyarrow.dataset as ds
except ImportError:
ds = None
# Marks all of the tests in this module
# Ignore these with pytest ... -m 'not dataset'
pytestmark = pytest.mark.dataset
def test_filesystem_data_source(mockfs):
file_format = ds.ParquetFileFormat()
paths = ['subdir/1/xxx/file0.parquet', 'subdir/2/yyy/file1.parquet']
partitions = [ds.ScalarExpression(True), ds.ScalarExpression(True)]
source = ds.FileSystemDataSource(mockfs, paths, partitions,
source_partition=None,
file_format=file_format)
source_partition = ds.ComparisonExpression(
ds.CompareOperator.Equal,
ds.FieldExpression('source'),
ds.ScalarExpression(1337)
)
partitions = [
ds.ComparisonExpression(
ds.CompareOperator.Equal,
ds.FieldExpression('part'),
ds.ScalarExpression(1)
),
ds.ComparisonExpression(
ds.CompareOperator.Equal,
ds.FieldExpression('part'),
ds.ScalarExpression(2)
)
]
source = ds.FileSystemDataSource(mockfs, paths, partitions,
source_partition=source_partition,
file_format=file_format)
assert source.partition_expression.equals(source_partition)
def test_dataset(dataset):
assert isinstance(dataset, ds.Dataset)
assert isinstance(dataset.schema, pa.Schema)
# TODO(kszucs): test non-boolean expressions for filter do raise
builder = dataset.new_scan()
assert isinstance(builder, ds.ScannerBuilder)
scanner = builder.finish()
assert isinstance(scanner, ds.Scanner)
assert len(list(scanner.scan())) == 2
expected_i64 = pa.array([0, 1, 2, 3, 4], type=pa.int64())
expected_f64 = pa.array([0, 1, 2, 3, 4], type=pa.float64())
for task in scanner.scan():
assert isinstance(task, ds.ScanTask)
for batch in task.execute():
assert batch.column(0).equals(expected_i64)
assert batch.column(1).equals(expected_f64)
table = scanner.to_table()
assert isinstance(table, pa.Table)
assert len(table) == 10
condition = ds.ComparisonExpression(
ds.CompareOperator.Equal,
ds.FieldExpression('i64'),
ds.ScalarExpression(1)
)
scanner = dataset.new_scan().use_threads(True).filter(condition).finish()
result = scanner.to_table()
assert result.to_pydict() == {
'i64': [1, 1],
'f64': [1., 1.],
'group': [1, 2],
'key': ['xxx', 'yyy']
}
def test_scanner_builder(dataset):
builder = ds.ScannerBuilder(dataset, memory_pool=pa.default_memory_pool())
scanner = builder.finish()
assert isinstance(scanner, ds.Scanner)
assert len(list(scanner.scan())) == 2
with pytest.raises(pa.ArrowInvalid):
dataset.new_scan().project(['unknown'])
builder = dataset.new_scan(memory_pool=pa.default_memory_pool())
scanner = builder.project(['i64']).finish()
assert isinstance(scanner, ds.Scanner)
assert len(list(scanner.scan())) == 2
for task in scanner.scan():
for batch in task.execute():
assert batch.num_columns == 1
| 31.175141 | 78 | 0.632476 |
a18f308a306f458e03d32285aa21896641d7fc85 | 400 | py | Python | stackoverflow/venv/lib/python3.6/site-packages/scrapy/utils/markup.py | zhi-xianwei/learn_python3_spider | a3301f8112e4ded25c3578162db8c6a263a0693b | [
"MIT"
] | 9,953 | 2019-04-03T23:41:04.000Z | 2022-03-31T11:54:44.000Z | stackoverflow/venv/lib/python3.6/site-packages/scrapy/utils/markup.py | W4LKURE/learn_python3_spider | 98dd354a41598b31302641f9a0ea49d1ecfa0fb1 | [
"MIT"
] | 44 | 2019-05-27T10:59:29.000Z | 2022-03-31T14:14:29.000Z | stackoverflow/venv/lib/python3.6/site-packages/scrapy/utils/markup.py | W4LKURE/learn_python3_spider | 98dd354a41598b31302641f9a0ea49d1ecfa0fb1 | [
"MIT"
] | 2,803 | 2019-04-06T13:15:33.000Z | 2022-03-31T07:42:01.000Z | """
Transitional module for moving to the w3lib library.
For new code, always import from w3lib.html instead of this module
"""
import warnings
from scrapy.exceptions import ScrapyDeprecationWarning
from w3lib.html import *
warnings.warn("Module `scrapy.utils.markup` is deprecated. "
"Please import from `w3lib.html` instead.",
ScrapyDeprecationWarning, stacklevel=2) | 28.571429 | 66 | 0.7375 |
a190762c1566ca65105a3350c21b6933040e5549 | 2,362 | py | Python | scripts/option_normal_model.py | jcoffi/FuturesAndOptionsTradingSimulation | e02fdbe8c40021785a2a1dae56ff4b72f2d47c30 | [
"MIT"
] | 14 | 2017-02-16T15:13:53.000Z | 2021-05-26T11:34:09.000Z | scripts/option_normal_model.py | jcoffi/FuturesAndOptionsTradingSimulation | e02fdbe8c40021785a2a1dae56ff4b72f2d47c30 | [
"MIT"
] | null | null | null | scripts/option_normal_model.py | jcoffi/FuturesAndOptionsTradingSimulation | e02fdbe8c40021785a2a1dae56ff4b72f2d47c30 | [
"MIT"
] | 10 | 2016-08-05T07:37:07.000Z | 2021-11-26T17:31:48.000Z | #IMPORT log and sqrt FROM math MODULE
from math import log, sqrt, exp
#IMPORT date AND timedelta FOR HANDLING EXPIRY TIMES
from datetime import date, timedelta
#IMPORT SciPy stats MODULE
from scipy import stats
| 38.721311 | 104 | 0.640559 |
a19170892d787db003456b529cd07f4fcdc77170 | 27,286 | py | Python | code/tasks/VNLA/oracle.py | Chucooleg/vnla | b9c1367b263f00a38828ff24cefc8becc149be7a | [
"MIT"
] | null | null | null | code/tasks/VNLA/oracle.py | Chucooleg/vnla | b9c1367b263f00a38828ff24cefc8becc149be7a | [
"MIT"
] | null | null | null | code/tasks/VNLA/oracle.py | Chucooleg/vnla | b9c1367b263f00a38828ff24cefc8becc149be7a | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import math
import networkx as nx
import functools
import scipy.stats
import random
import sys
import copy
import numpy as np
import torch
import utils
try:
sys.path.append('/opt/MatterSim/build/') # local docker or Philly
import MatterSim
except:
# local conda env only
sys.path.append('/home/hoyeung/Documents/vnla/code/build')
import MatterSim
def make_oracle(oracle_type, *args, **kwargs):
if oracle_type == 'shortest':
return ShortestPathOracle(*args, **kwargs)
if oracle_type == 'next_optimal':
return NextOptimalOracle(*args, **kwargs)
if oracle_type == 'ask':
return AskOracle(*args, **kwargs)
if oracle_type == 'direct':
return MultistepShortestPathOracle(*args, **kwargs)
if oracle_type == 'verbal':
return StepByStepSubgoalOracle(*args, **kwargs)
if oracle_type == 'frontier_shortest':
return FrontierShortestPathsOracle(*args, **kwargs)
# TODO implement next
# if oracle_type == 'diverse_shortest':
# return DiverseShortestPathsOracle(*args, **kwargs)
return None
| 40.66468 | 209 | 0.606025 |
a191825d6c6da2861f6e74b98531a8374cb67f95 | 7,124 | py | Python | unit-tests/controller.py | HimariO/VideoSum | 3a81276df3b429c24ebf9a1841b5a9168c0c3ccf | [
"MIT"
] | null | null | null | unit-tests/controller.py | HimariO/VideoSum | 3a81276df3b429c24ebf9a1841b5a9168c0c3ccf | [
"MIT"
] | null | null | null | unit-tests/controller.py | HimariO/VideoSum | 3a81276df3b429c24ebf9a1841b5a9168c0c3ccf | [
"MIT"
] | null | null | null | import tensorflow as tf
import numpy as np
import unittest
from dnc.controller import BaseController
if __name__ == '__main__':
unittest.main(verbosity=2)
| 44.805031 | 127 | 0.593346 |
a19397d382efe02f3787d8d407c6638e72798564 | 1,538 | py | Python | movies/movies/spiders/douban.py | Devon-pku/repso | b86666aaebb3482240aba42437c606d856d44d21 | [
"MIT"
] | null | null | null | movies/movies/spiders/douban.py | Devon-pku/repso | b86666aaebb3482240aba42437c606d856d44d21 | [
"MIT"
] | null | null | null | movies/movies/spiders/douban.py | Devon-pku/repso | b86666aaebb3482240aba42437c606d856d44d21 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from scrapy.linkextractors import LinkExtractor
from scrapy.loader import ItemLoader
from scrapy.loader.processors import Join, MapCompose
from scrapy.spiders import CrawlSpider, Rule
from movies.items import MoviesItem
| 37.512195 | 97 | 0.606632 |
a1946a453629c94f8bc3d4a45b2c968101db6df0 | 1,546 | py | Python | CatFaultDetection/LSTM/Test_LSTM.py | jonlwowski012/UGV-Wheel-Slip-Detection-Using-LSTM-and-DNN | 2af5dcf4c3b043f065f75b612a4bbfc4aa2d11e8 | [
"Apache-2.0"
] | null | null | null | CatFaultDetection/LSTM/Test_LSTM.py | jonlwowski012/UGV-Wheel-Slip-Detection-Using-LSTM-and-DNN | 2af5dcf4c3b043f065f75b612a4bbfc4aa2d11e8 | [
"Apache-2.0"
] | null | null | null | CatFaultDetection/LSTM/Test_LSTM.py | jonlwowski012/UGV-Wheel-Slip-Detection-Using-LSTM-and-DNN | 2af5dcf4c3b043f065f75b612a4bbfc4aa2d11e8 | [
"Apache-2.0"
] | null | null | null | import numpy as np
from scipy.misc import imread, imsave, imresize
from keras.models import model_from_json
from os.path import join
import matplotlib.pyplot as plt
import pandas as pd
import time
num_classes = 4
# Read Dataset
data = pd.read_csv('../dataset/fault_dataset.csv')
data = shuffler('../dataset/fault_dataset.csv')
X = np.asarray(data[['posex','posey','orix','oriy','oriz','oriw']])
y_norm = np.asarray(data['labels'])
y = np.zeros((len(y_norm), num_classes))
y[np.arange(len(y_norm)), y_norm] = 1
# Define Paths and Variables
model_dir = 'model'
#%% Load model and weights separately due to error in keras
model = model_from_json(open(model_dir+"/model_weights.json").read())
model.load_weights(model_dir+"/model_weights.h5")
#%% Predict Output
t0 = time.time()
output_org = model.predict(np.reshape(X, (X.shape[0], 1, X.shape[1])))
print "Time to predict all ", len(X), " samples: ", time.time()-t0
print "Average time to predict a sample: ", (time.time()-t0)/len(X)
output = np.zeros_like(output_org)
output[np.arange(len(output_org)), output_org.argmax(1)] = 1
correct = 0
for i in range(len(output)):
if np.array_equal(output[i],y[i]):
correct += 1
print "Acc: ", correct/float(len(output))
output_index = []
for row in output:
output_index.append(np.argmax(row))
plt.plot(y_norm, color='red',linewidth=3)
plt.plot(output_index, color='blue', linewidth=1)
plt.show()
| 28.109091 | 70 | 0.721863 |
a194bf4b74105b49a6100082214a932f48fe4c3d | 3,304 | py | Python | examples/spring_system.py | tkoziara/parmec | fefe0586798cd65744334f9abeab183159bd3d7a | [
"MIT"
] | null | null | null | examples/spring_system.py | tkoziara/parmec | fefe0586798cd65744334f9abeab183159bd3d7a | [
"MIT"
] | 15 | 2017-06-09T12:05:27.000Z | 2018-10-25T13:59:58.000Z | examples/spring_system.py | parmes/parmec | fefe0586798cd65744334f9abeab183159bd3d7a | [
"MIT"
] | null | null | null | # find parmec path
import os, sys
path = where('parmec4')
if path == None:
print 'ERROR: parmec4 not found in PATH!'
print ' Download and compile parmec;',
print 'add parmec directory to PATH variable;'
sys.exit(1)
print '(Found parmec4 at:', path + ')'
sys.path.append(os.path.join (path, 'python'))
from progress_bar import * # and import progress bar
from scipy import spatial # import scipy
import numpy as np # and numpy
# command line arguments
av = ARGV()
if '-h' in av or '--help' in av:
print 'Beam-like spring-system example:',
print 'cantilever beam fixed at x-far-end'
print 'Unit cubes interact via springs',
print 'connected within a radius of influence'
print 'Available arguments:'
print ' -nx int --> x resolution (or 10)'
print ' -ny int --> y resolution (or 5)'
print ' -nz int --> z resolution (or 5)'
print ' -du float --> duration (or 5.)'
print ' -st float --> time step (or auto)'
print ' -ra float --> spring influence radius (or 2.)'
print ' -h or --help --> print this help'
sys.exit(0)
# input parameters
nx = int(av[av.index('-nx')+1]) if '-nx' in av else 10
ny = int(av[av.index('-ny')+1]) if '-ny' in av else 5
nz = int(av[av.index('-nz')+1]) if '-nz' in av else 5
du = float(av[av.index('-du')+1]) if '-du' in av else 5.
st = float(av[av.index('-st')+1]) if '-st' in av else -1
ra = float(av[av.index('-ra')+1]) if '-ra' in av else 2.
# materials
matnum = MATERIAL (1E3, 1E9, 0.25)
spring = [-1,-1E6, 1,1E6]
dratio = 10.
# (nx,ny,nz) array of unit cubes
iend = nx*ny*nz-1
progress_bar(0, iend, 'Adding particles:')
x, y, z = np.mgrid[0:nx, 0:ny, 0:nz]
data = zip(x.ravel(), y.ravel(), z.ravel())
datarange = range (0, len(data))
for i in datarange:
p = data[i]
nodes = [p[0]-.5, p[1]-.5, p[2]-.5,
p[0]+.5, p[1]-.5, p[2]-.5,
p[0]+.5, p[1]+.5, p[2]-.5,
p[0]-.5, p[1]+.5, p[2]-.5,
p[0]-.5, p[1]-.5, p[2]+.5,
p[0]+.5, p[1]-.5, p[2]+.5,
p[0]+.5, p[1]+.5, p[2]+.5,
p[0]-.5, p[1]+.5, p[2]+.5]
elements = [8, 0, 1, 2, 3, 4, 5, 6, 7, matnum]
parnum = MESH (nodes, elements, matnum, 0)
progress_bar(i, iend, 'Adding particles:')
# connecting springs within radius
progress_bar(0, iend, 'Adding springs:')
tree = spatial.KDTree(data)
for i in datarange:
p = data[i]
adj = tree.query_ball_point(np.array(p), ra)
for j in [k for k in adj if k < i]:
q = data[j]
x = mul(add(p,q),.5)
sprnum = SPRING (i, x, j, x, spring, dratio)
progress_bar(i, iend, 'Adding springs:')
# fixed at x-far-end
for i in datarange[-ny*nz:]:
RESTRAIN (i, [1,0,0,0,1,0,0,0,1], [1,0,0,0,1,0,0,0,1])
# gravity acceleration
GRAVITY (0., 0., -9.8)
# time step
hc = CRITICAL(perparticle=10)
if st < 0: st = 0.5 * hc[0][0]
# print out statistics
print '%dx%dx%d=%d particles and %d springs' % (nx,ny,nz,parnum,sprnum)
print '10 lowest-step per-particle tuples (critical step, particle index, circular frequency, damping ratio):'
print hc
print 'Running %d steps of size %g:' % (int(du/st),st)
# run simulation
DEM (du, st, (0.05, 0.01))
| 32.07767 | 110 | 0.608656 |
a194ce5184afbac2e200ce258188a996d6313650 | 113 | py | Python | api/weibo/api/api.py | Eurkon/api | a51eae901e003ac6b94c04d12f1afeec00108256 | [
"MIT"
] | 5 | 2021-06-15T05:33:01.000Z | 2022-03-14T01:17:38.000Z | api/weibo/api/api.py | Eurkon/api | a51eae901e003ac6b94c04d12f1afeec00108256 | [
"MIT"
] | 1 | 2021-06-03T09:22:50.000Z | 2021-06-03T09:22:50.000Z | api/weibo/api/api.py | Eurkon/api | a51eae901e003ac6b94c04d12f1afeec00108256 | [
"MIT"
] | 1 | 2021-07-25T15:58:40.000Z | 2021-07-25T15:58:40.000Z | # -*- coding: utf-8 -*-
# @Author : Eurkon
# @Date : 2021/6/9 17:13
from api.weibo.api.top import weibo_top | 22.6 | 39 | 0.610619 |
a1957451f3af335e8adc1d7f31b338f3928c6579 | 1,293 | py | Python | leds.py | sthysel/pyboard-play | 0df2dc98376667211958a2bcc18718d0cd69a400 | [
"MIT"
] | null | null | null | leds.py | sthysel/pyboard-play | 0df2dc98376667211958a2bcc18718d0cd69a400 | [
"MIT"
] | null | null | null | leds.py | sthysel/pyboard-play | 0df2dc98376667211958a2bcc18718d0cd69a400 | [
"MIT"
] | null | null | null | import pyb
import random
leds = [pyb.LED(i) for i in range(1, 5)]
blue_led = pyb.LED(4)
glow()
| 20.52381 | 115 | 0.464811 |
a195963a8a3b4f30d7ce7608dabc36d736c3bd7d | 8,088 | py | Python | main.py | droher/diachronic | 4d50f37af96c2a89c46e027f5ab7f46bce9b9521 | [
"Apache-2.0"
] | 3 | 2018-07-23T13:58:33.000Z | 2020-01-23T09:02:01.000Z | main.py | droher/diachronic | 4d50f37af96c2a89c46e027f5ab7f46bce9b9521 | [
"Apache-2.0"
] | 1 | 2021-03-22T17:15:48.000Z | 2021-03-22T17:15:48.000Z | main.py | droher/diachronic | 4d50f37af96c2a89c46e027f5ab7f46bce9b9521 | [
"Apache-2.0"
] | null | null | null | import json
import os
import shutil
import urllib.request
import traceback
import logging
import psutil
from collections import defaultdict
from typing import List, Dict, Tuple
from multiprocessing import Semaphore, Pool
from subprocess import Popen, PIPE
from datetime import datetime, timedelta
from lxml import etree
from lxml.etree import Element
import pyarrow as pa
import pyarrow.parquet as pq
from google.cloud import storage
from diachronic import global_conf, Tags
PROCESS_MEM = psutil.virtual_memory().total / psutil.cpu_count()
# Fraction of (total_mem/cpu_count) that a given process uses before flushing buffer
PROCESS_MEM_LIMIT = .1
DOWNLOAD_SEMAPHORE = Semaphore(global_conf.download_parallelism)
FAILURES = []
if __name__ == "__main__":
WikiHandler().run()
| 38.514286 | 111 | 0.641568 |
a196cc5f96a8b93a3bb1cc5156a3a6b18c755ee7 | 9,491 | py | Python | apps/core/helpers.py | tarvitz/icu | 9a7cdac9d26ea224539f68f678b90bf70084374d | [
"BSD-3-Clause"
] | 1 | 2022-03-12T23:44:21.000Z | 2022-03-12T23:44:21.000Z | apps/core/helpers.py | tarvitz/icu | 9a7cdac9d26ea224539f68f678b90bf70084374d | [
"BSD-3-Clause"
] | null | null | null | apps/core/helpers.py | tarvitz/icu | 9a7cdac9d26ea224539f68f678b90bf70084374d | [
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
#
import re
import os
from django.conf import settings
from django.shortcuts import (
render_to_response, get_object_or_404 as _get_object_or_404,
redirect)
from django.http import HttpResponse, HttpResponseRedirect
from django.template import RequestContext
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from django.utils.translation import ugettext_lazy as _, ugettext as tr
from django.http import Http404
from datetime import datetime, time, date
import simplejson as json
def get_object_or_404(Object, *args, **kwargs):
"""Retruns object or raise Http404 if it does not exist"""
try:
if hasattr(Object, 'objects'):
return Object.objects.get(*args, **kwargs)
elif hasattr(Object, 'get'):
return Object.get(*args, **kwargs)
else:
raise Http404("Giving object has no manager instance")
except (Object.DoesNotExist, Object.MultipleObjectReturned):
raise Http404("Object does not exist or multiple object returned")
def get_content_type(Object):
"""
works with ModelBase based classes, its instances
and with format string 'app_label.model_name', also supports
sphinx models and instances modification
source taken from warmist helpers source
retrieves content_type or raise the common django Exception
Examples:
get_content_type(User)
get_content_type(onsite_user)
get_content_type('auth.user')
"""
if callable(Object): # class
model = Object._meta.module_name
app_label = Object._meta.app_label
#model = Object.__name__.lower()
#app_label = (x for x in reversed(
# Object.__module__.split('.')) if x not in 'models').next()
elif hasattr(Object, 'pk'): # class instance
if hasattr(Object, '_sphinx') or hasattr(Object, '_current_object'):
model = Object._current_object._meta.module_name
app_label = Object._current_object._meta.app_label
#app_label = (x for x in reversed(
# Object._current_object.__module__.split('.')) \
#if x not in 'models').next()
#model = Object._current_object.__class__.__name__.lower()
else:
app_label = Object._meta.app_label
model = Object._meta.module_name
#app_label = (x for x in reversed(Object.__module__.split('.')) \
#if x not in 'models').next()
#model = Object.__class__.__name__.lower()
elif isinstance(Object, basestring):
app_label, model = Object.split('.')
ct = ContentType.objects.get(app_label=app_label, model=model)
return ct
def get_form(app_label, form_name):
""" retrieve form within app_label and form_name given set"""
pass
def ajax_form_errors(errors):
""" returns form errors as python list """
errs = [{'key': k, 'msg': unicode(errors[k])} for k in errors.keys()]
#equivalent to
#for k in form.errors.keys():
# errors.append({'key': k, 'msg': unicode(form.errors[k])})
return errs
def get_safe_fields(lst, Obj):
""" excludes fields in given lst from Object """
return [
field.attname for field in Obj._meta.fields
if field.attname not in lst
]
#decorators
| 32.282313 | 78 | 0.623011 |
a196d336d93a22ab16f1f21a1b3e7182f45daa9b | 536 | py | Python | Python/Numpy/Shape and Reshape/shape_and_reshape.py | brianchiang-tw/HackerRank | 02a30a0033b881206fa15b8d6b4ef99b2dc420c8 | [
"MIT"
] | 2 | 2020-05-28T07:15:00.000Z | 2020-07-21T08:34:06.000Z | Python/Numpy/Shape and Reshape/shape_and_reshape.py | brianchiang-tw/HackerRank | 02a30a0033b881206fa15b8d6b4ef99b2dc420c8 | [
"MIT"
] | null | null | null | Python/Numpy/Shape and Reshape/shape_and_reshape.py | brianchiang-tw/HackerRank | 02a30a0033b881206fa15b8d6b4ef99b2dc420c8 | [
"MIT"
] | null | null | null | import numpy as np
from typing import List
if __name__ == '__main__':
int_sequence = list( map( int, input().split() ) )
# Method_#1
#sq_matrix = reshpare_to_square_matrix( int_sequence )
#print( sq_matrix )
# Method_#2
sq_matrix = np.array( int_sequence )
sq_matrix = np.reshape( sq_matrix, (3,3) )
print( sq_matrix )
| 20.615385 | 58 | 0.660448 |
a197169860a861a5d23aca5ba4544937a0ade0fe | 2,440 | py | Python | figures_in_paper/Fig3/ParticleSimulations/Fig3_particle_simulation_10-3.py | tstepien/moth-mating | eac5c735f40962f18d9d05b46bc3cc622ff5258d | [
"MIT"
] | null | null | null | figures_in_paper/Fig3/ParticleSimulations/Fig3_particle_simulation_10-3.py | tstepien/moth-mating | eac5c735f40962f18d9d05b46bc3cc622ff5258d | [
"MIT"
] | null | null | null | figures_in_paper/Fig3/ParticleSimulations/Fig3_particle_simulation_10-3.py | tstepien/moth-mating | eac5c735f40962f18d9d05b46bc3cc622ff5258d | [
"MIT"
] | 1 | 2021-08-08T14:45:17.000Z | 2021-08-08T14:45:17.000Z | import numpy as np
import time
import csv
import multiprocessing
import os
from numba import njit
def FractionAbsorbed(d,rt):
m = 2 #spatial dimension, can be 2 or 3 but not set up for 1d
t = 100.0 #total time
R = 1 #circle radius
num_particles = 5000
trappeds = []
for k in range(num_particles):
trapped = random_walk(m,d,t,R,rt)
trappeds.append(trapped)
return sum(trappeds)/num_particles
def parallel_fun(fn,input_args,num_threads):
#need to make list of pairs of d rt to pass to function...
with multiprocessing.Pool(num_threads) as pool:
out = pool.starmap(fn,input_args)
return np.array(out)
def get_cpus_per_task():
""" Returns the SLURM environment variable if set else throws
KeyError """
try:
return os.environ["SLURM_CPUS_PER_TASK"]
except KeyError:
print("SLURM environment variable unset: \
use salloc or sbatch to launch job")
raise
CPUS_PER_TASK = int(get_cpus_per_task())
# CPUS_PER_TASK = 4
begin = time.time()
D = [10**-3]
rt = np.linspace(1e-4,0.99,20)
input_args = [(x,y) for x in D for y in rt]
prop = parallel_fun(FractionAbsorbed,input_args,CPUS_PER_TASK)
data = []
for i in range(len(prop)):
data.append([input_args[i][0],input_args[i][1],prop[i]])
csvfile = csv.writer(open('C(100)_10-3.csv','w'))
for row in data:
csvfile.writerow(row)
end = time.time()
print(end-begin)
| 24.158416 | 65 | 0.59877 |
a19804bd039dd872f53c4d69a22088d534d74c39 | 8,153 | py | Python | tests/core/test_factory.py | pdwaggoner/datar | a03f1c0ca0de1270059178e59cea151a51a6e7aa | [
"MIT"
] | null | null | null | tests/core/test_factory.py | pdwaggoner/datar | a03f1c0ca0de1270059178e59cea151a51a6e7aa | [
"MIT"
] | null | null | null | tests/core/test_factory.py | pdwaggoner/datar | a03f1c0ca0de1270059178e59cea151a51a6e7aa | [
"MIT"
] | null | null | null | import inspect
import pytest
import numpy as np
from datar.core.backends.pandas import Categorical, DataFrame, Series
from datar.core.backends.pandas.testing import assert_frame_equal
from datar.core.backends.pandas.core.groupby import SeriesGroupBy
from datar.core.factory import func_factory
from datar.core.tibble import (
SeriesCategorical,
SeriesRowwise,
TibbleGrouped,
TibbleRowwise,
)
from datar.tibble import tibble
from ..conftest import assert_iterable_equal
| 24.050147 | 69 | 0.577333 |
a198bfc5af6a0e4572de99e815bf83c6452a7e36 | 2,234 | py | Python | worker/resources/Twitch.py | fga-eps-mds/2018.2-GamesBI_Importadores | 72ae9c8bd7a2693591c5ebcba39d4ce14f28d3ae | [
"MIT"
] | 1 | 2018-10-25T20:39:16.000Z | 2018-10-25T20:39:16.000Z | worker/resources/Twitch.py | fga-eps-mds/2018.2-GamesBI_Importadores | 72ae9c8bd7a2693591c5ebcba39d4ce14f28d3ae | [
"MIT"
] | null | null | null | worker/resources/Twitch.py | fga-eps-mds/2018.2-GamesBI_Importadores | 72ae9c8bd7a2693591c5ebcba39d4ce14f28d3ae | [
"MIT"
] | 2 | 2018-11-10T16:08:46.000Z | 2018-11-26T14:06:12.000Z | import requests
from functools import reduce
import operator
from urllib.parse import quote
import time
TWITCH_HEADER = {'Client-ID': 'nhnlqt9mgdmkf9ls184tt1nd753472',
'Accept': 'application/json'}
| 30.60274 | 105 | 0.527305 |
a198ce3c9c299466d4689e0f835f493506d51e28 | 2,407 | py | Python | maas/plugins/neutron_service_check.py | claco/rpc-openstack | fc5328fd174344d5445132ec8d8973a572aa4a0f | [
"Apache-2.0"
] | null | null | null | maas/plugins/neutron_service_check.py | claco/rpc-openstack | fc5328fd174344d5445132ec8d8973a572aa4a0f | [
"Apache-2.0"
] | null | null | null | maas/plugins/neutron_service_check.py | claco/rpc-openstack | fc5328fd174344d5445132ec8d8973a572aa4a0f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from maas_common import get_neutron_client
from maas_common import metric_bool
from maas_common import print_output
from maas_common import status_err
from maas_common import status_ok
if __name__ == "__main__":
with print_output():
parser = argparse.ArgumentParser(description='Check neutron agents')
parser.add_argument('hostname',
type=str,
help='Neutron API hostname or IP address')
parser.add_argument('--host',
type=str,
help='Only return metrics for specified host',
default=None)
args = parser.parse_args()
main(args)
| 30.858974 | 78 | 0.624429 |
a199ff1b2e5c00d402dfeaa1e9dbf8a6d4be69df | 946 | py | Python | integration-test/797-add-missing-boundaries.py | nextzen/vector-datasource | f11700f232a3a6251915579106ff07b2bee25d12 | [
"MIT"
] | 1 | 2018-01-03T21:26:27.000Z | 2018-01-03T21:26:27.000Z | integration-test/797-add-missing-boundaries.py | nextzen/vector-datasource | f11700f232a3a6251915579106ff07b2bee25d12 | [
"MIT"
] | null | null | null | integration-test/797-add-missing-boundaries.py | nextzen/vector-datasource | f11700f232a3a6251915579106ff07b2bee25d12 | [
"MIT"
] | 1 | 2019-06-19T19:14:42.000Z | 2019-06-19T19:14:42.000Z | from . import FixtureTest
| 32.62069 | 76 | 0.614165 |
a19b0023958a3698889f955479e01ea3cfa60e20 | 836 | py | Python | flask/app/views.py | Ivche1337/Dodgerino-Game | 17ff7f3f7da4f5801be0f9c606fcd52fb14dfb95 | [
"MIT"
] | 1 | 2018-01-21T16:24:51.000Z | 2018-01-21T16:24:51.000Z | flask/app/views.py | Ivche1337/Dodgerino-Game | 17ff7f3f7da4f5801be0f9c606fcd52fb14dfb95 | [
"MIT"
] | 1 | 2018-01-18T04:42:07.000Z | 2018-01-19T03:52:13.000Z | flask/app/views.py | Ivche1337/Dodgerino-Game | 17ff7f3f7da4f5801be0f9c606fcd52fb14dfb95 | [
"MIT"
] | null | null | null | import os
from flask import render_template
from flask_sqlalchemy import SQLAlchemy
from app import app
FILE_PATH = "/home/ivche/dev/Dodgerino-Game/highscores.db"
print(FILE_PATH)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///'+FILE_PATH
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
DB = SQLAlchemy(app)
| 23.885714 | 62 | 0.674641 |
a19b10b3dbefe70b02dea663c226b3a10d170161 | 24,076 | py | Python | mds/files.py | VilledeMontreal/mds-provider | f1e70a7dc5a8afa64fd88d0c40e6d02f3da25d05 | [
"MIT"
] | null | null | null | mds/files.py | VilledeMontreal/mds-provider | f1e70a7dc5a8afa64fd88d0c40e6d02f3da25d05 | [
"MIT"
] | null | null | null | mds/files.py | VilledeMontreal/mds-provider | f1e70a7dc5a8afa64fd88d0c40e6d02f3da25d05 | [
"MIT"
] | null | null | null | """
Work with MDS Provider data in JSON files.
"""
import csv
import datetime
import hashlib
import json
import os
import pathlib
import urllib
import requests
import pandas as pd
from .encoding import JsonEncoder
from .providers import Provider
from .schemas import SCHEMA_TYPES, STATUS_CHANGES, TRIPS
from .versions import UnexpectedVersionError, Version
| 37.560062 | 125 | 0.590713 |
a19dcdf3a1a9976de17738ed277080bb753f9bd2 | 7,600 | py | Python | App/neon_ann_stitch.py | weecology/NEON_crown_maps | 2da84d36ae5af44631a6d0489ccb29b212f83fd8 | [
"MIT"
] | null | null | null | App/neon_ann_stitch.py | weecology/NEON_crown_maps | 2da84d36ae5af44631a6d0489ccb29b212f83fd8 | [
"MIT"
] | 34 | 2020-01-30T05:44:47.000Z | 2021-02-08T22:51:57.000Z | App/neon_ann_stitch.py | weecology/NEON_crown_maps | 2da84d36ae5af44631a6d0489ccb29b212f83fd8 | [
"MIT"
] | null | null | null | import os
import rasterio
import argparse
from PIL import Image
import subprocess
import pathlib
import shutil
from glob import glob
from numba import njit, prange
from OpenVisus import *
### Configuration
ext_name = ".tif"
dtype = "uint8[3]"
limit = 1000
###--------------
parser = argparse.ArgumentParser(description='Parse set of geotiff')
parser.add_argument('-rgb', type=str, nargs = 1, help ='rbg image path', required = True)
parser.add_argument('-ann', type=str, nargs = 1, help ='ann image path', required = False)
parser.add_argument('-out', type=str, nargs = 1, help ='output name', required = True)
args = parser.parse_args()
rgb_dir = args.rgb[0]
outdir = args.out[0]
pathlib.Path(outdir+"/temp").mkdir(parents=True, exist_ok=True)
outname = outdir.split("/")[-1]
if(outname==""):
outname = outdir.split("/")[-2]
if(args.ann):
ann_dir = args.ann[0]
# Blend rgb and annotations
for f in os.listdir(rgb_dir):
if f.endswith(ext_name):
rgb_path=rgb_dir+"/"+f
ann_path=ann_dir+"/"+f.replace("image.tif", "image_rasterized.tif")
ageo = rasterio.open(rgb_path)
a = ageo.read()
bgeo = rasterio.open(ann_path)
b = bgeo.read()
print("Blending ", rgb_path, "and", ann_path, "...")
blend_rgb_ann(a, b[0])
#tiff.imsave(outdir+"/"+f,a)
with rasterio.open(
outdir+"/"+f,
'w',
driver='GTiff',
height=ageo.height,
width=ageo.width,
count=3,
dtype=a.dtype,
crs='+proj=latlong',
transform=ageo.transform,
) as dst:
dst.write(a)
idir = outdir
else:
idir = rgb_dir
# Convert and stitch
images = []
for f in os.listdir(idir):
if f.endswith(ext_name):
filepath=idir+"/"+f
s = os.path.basename(f)
# filepath = filepath.replace('(','\(')
# filepath = filepath.replace(')','\)')
images.append(tile(filepath,s))
bbox = [99999999, 0, 99999999, 0]
count = 0
for img in images:
if count > limit:
break
count += 1
try:
ds = rasterio.open(img.path)
width = ds.width
height = ds.height
bounds = ds.bounds
except:
print("ERROR: metadata failure, skipping "+idir)
minx = bounds.left
miny = bounds.top
maxx = bounds.right
maxy = bounds.bottom
img.frame = [minx, maxx, miny, maxy]
img.size = [width, height]
#print("found gdal data", gt, "size", [height, width], "frame", [minx, maxx, miny, maxy], "psize", [maxx-minx, maxy-miny])
print("frame", img.frame)#, "psize", [(maxx-minx)/width, (maxy-miny)/height])
if(minx < bbox[0]):
bbox[0] = minx
if(miny < bbox[2]):
bbox[2] = miny
if(maxx > bbox[1]):
bbox[1] = maxx
if(maxy > bbox[3]):
bbox[3] = maxy
ratio=[(maxx-minx)/width,(maxy-miny)/height]
out_size = [bbox[1]-bbox[0], bbox[3]-bbox[2]]
img_size = [int(out_size[0]/ratio[0]), int(out_size[1]/ratio[1])]
gbox = "0 "+str(img_size[0]-1)+" 0 "+str(img_size[1]-1)
midx_name=outdir+"/global.midx"
midx_out = open(midx_name,"wt")
midx_out.write("<dataset typename='IdxMultipleDataset'>\n")
midx_out.write('<field name="voronoi">\n <code>output=voronoi()</code>\n</field>')
cwd = os.getcwd()
count = 0
for img in images:
if count > limit:
break
count += 1
lbox = "0 "+str(img.size[0]-1)+" 0 "+str(img.size[1]-1)
ancp = [int((img.frame[0]-bbox[0])/ratio[0]), int((img.frame[2]-bbox[2])/ratio[1])]
#print(ancp)
dbox = str(ancp[0])+ " " +str(ancp[0]+img.size[0]-1)+ " "+str(ancp[1])+ " "+str(ancp[1]+img.size[1]-1)
#midx_out.write('\t<dataset url="file://'+outdir+"/"+img.name+'exp.idx" name="'+img.name+'"> <M><translate x="'+str(ancp[0])+'" y="'+str(ancp[1])+'"/></M> </dataset>\n')
midx_out.write('\t<dataset url="file://'+outdir+"/"+img.name+'exp.idx" name="'+img.name+'" offset="'+str(ancp[0])+' '+str(ancp[1])+'"/>\n')
exp_idx = outdir+"/"+img.name+"exp.idx"
field=Field("data",dtype,"row_major")
CreateIdx(url=exp_idx,dims=img.size,fields=[field])
db=PyDataset(exp_idx)
#convertCommand(["create", exp_idx, "--box", lbox, "--fields", 'data '+dtype,"--time","0 0 time%03d/"])
#convert.runFromArgs(["create", exp_idx, "--box", lbox, "--fields", 'data '+dtype,"--time","0 0 time%03d/"])
print("Converting "+str(count)+"/"+str(min(limit, len(images)))+"...")
data=numpy.asarray(Image.open(img.path))
db.write(data)
#convertCommand(["import",img.path,"--dims",str(img.size[0]),str(img.size[1])," --dtype ",dtype,"--export",exp_idx," --box ",lbox, "--time", "0"])
#convert.runFromArgs(["import",img.path,"--dims",str(img.size[0]),str(img.size[1])," --dtype ",dtype,"--export",exp_idx," --box ",lbox, "--time", "0"])
midx_out.write('</dataset>')
midx_out.close();
print("Done conversion of tiles, now generating final mosaic")
# Make one big photomosaic
midxToIdx(os.path.abspath(midx_name), os.path.abspath(outdir+"/"+outname+".idx"))
# moving clutter to "outdir/temp" folder
for f in glob.glob(outdir+"/*tifexp*"):
subprocess.run(["mv",f,outdir+"/temp/"])
for f in glob.glob(outdir+"/*.tif"):
subprocess.run(["mv",f,outdir+"/temp/"])
subprocess.run(["mv",outdir+"/global.midx",outdir+"/temp/"])
# delete temp folder at the end
#subprocess.run(["rm","-R", outdir+"/temp"])
print("DONE")
| 30.522088 | 172 | 0.619342 |
a19de4fc6f1c20cd12d2dfef53eca7293ca3f561 | 38 | py | Python | scooby/plugins/processtime/__init__.py | zetaab/django-scooby-profiler | c4e63b5751a7aec2b01df3b46368c6ad40ec51e3 | [
"MIT"
] | 9 | 2018-09-20T16:45:40.000Z | 2021-08-08T07:04:55.000Z | scooby/plugins/processtime/__init__.py | zetaab/django-scooby-profiler | c4e63b5751a7aec2b01df3b46368c6ad40ec51e3 | [
"MIT"
] | 7 | 2018-09-14T10:34:37.000Z | 2019-04-20T06:54:29.000Z | scooby/plugins/processtime/__init__.py | zetaab/django-scooby-profiler | c4e63b5751a7aec2b01df3b46368c6ad40ec51e3 | [
"MIT"
] | 3 | 2018-09-14T10:39:51.000Z | 2019-06-26T09:32:13.000Z | from .plugin import ProcessTimePlugin
| 19 | 37 | 0.868421 |
a19e03506530c3d0c99934eb6006220cb01ea229 | 3,972 | py | Python | data_creation/generate_cosmology_data.py | kstoreyf/emu-fight | 2b2c538619f0e5ff7192d83f31346bb25b7ca41e | [
"MIT"
] | 3 | 2020-09-11T01:55:40.000Z | 2020-11-24T00:49:17.000Z | data_creation/generate_cosmology_data.py | kstoreyf/emu-fight | 2b2c538619f0e5ff7192d83f31346bb25b7ca41e | [
"MIT"
] | 9 | 2020-09-02T09:21:49.000Z | 2020-09-09T19:15:44.000Z | data_creation/generate_cosmology_data.py | kstoreyf/emu-fight | 2b2c538619f0e5ff7192d83f31346bb25b7ca41e | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Created on Tue Sep 1 2020
@author: kstoreyf
"""
import numpy as np
import nbodykit
import pandas as pd
import pickle
from nbodykit import cosmology
# Generate the parameters that govern the output training set data
# Generate the parameters that govern the output testing set data
# Generate the output data that we're interested in emulating
# Format data into pandas data frames
# Format the data to save it
# Save the data to a file
if __name__=='__main__':
main()
| 32.557377 | 139 | 0.651561 |
a19e65a3cf25b4afaeb7f38c8c02fdf3601144bc | 1,256 | py | Python | handlers/checkers/highway/track.py | n0s0r0g/perfect_OSM | d07fef525865a337d8d9bd3e8168cf6b411a182b | [
"MIT"
] | 4 | 2016-04-03T21:12:57.000Z | 2016-05-04T09:14:43.000Z | handlers/checkers/highway/track.py | n0s0r0g/perfect_OSM | d07fef525865a337d8d9bd3e8168cf6b411a182b | [
"MIT"
] | null | null | null | handlers/checkers/highway/track.py | n0s0r0g/perfect_OSM | d07fef525865a337d8d9bd3e8168cf6b411a182b | [
"MIT"
] | null | null | null | from handlers.simplehandler import SimpleHandler
_NO_SURFACE = {
'title': ' ',
'help_text': """ highway=track (surface).
:
- surface -
:
- surface:grade - (0..3)
- smoothness -
- maxspeed:practical - ,
- tracktype
:
- http://wiki.openstreetmap.org/wiki/RU:Tag:highway%3Dtrack
- http://wiki.openstreetmap.org/wiki/RU:Key:surface
- http://wiki.openstreetmap.org/wiki/RU:Proposed_features/Surface_Quality
- http://wiki.openstreetmap.org/wiki/User:Danidin9/Variants_of_smooth_surfaces
""",
}
| 33.052632 | 80 | 0.713376 |
a19fbb8c0d58c560088872b36cde005f0cdcc5c0 | 9,636 | py | Python | job_title_processing/ressources_txt/FR/cleaner/job.py | OnlineJobVacanciesESSnetBigData/JobTitleProcessing_FR | d5cf340e1a57d84562705a92b213333875be21f7 | [
"MIT"
] | 3 | 2020-10-25T17:44:50.000Z | 2021-12-11T22:28:18.000Z | job_title_processing/ressources_txt/FR/cleaner/job.py | OnlineJobVacanciesESSnetBigData/JobTitleProcessing_FR | d5cf340e1a57d84562705a92b213333875be21f7 | [
"MIT"
] | null | null | null | job_title_processing/ressources_txt/FR/cleaner/job.py | OnlineJobVacanciesESSnetBigData/JobTitleProcessing_FR | d5cf340e1a57d84562705a92b213333875be21f7 | [
"MIT"
] | 1 | 2020-11-19T12:44:25.000Z | 2020-11-19T12:44:25.000Z | # -*- coding: utf-8 -*-
jobwords = [
'nan',
'temps plein', 'temps complet', 'mi temps', 'temps partiel', # Part / Full time
'cherche', # look for
'urgent','rapidement', 'futur',
'job', 'offre', # Job offer
'trice', 're', 'eur', 'euse', 're', 'se', 'me', 'trices', # Female endings
'res', 'eurs', 'euses', 'res', 'fe', 'fes',# Female endings
've', 'ne', 'iere', 'rice', 'te', 'er', 'ice',
'ves', 'nes', 'ieres', 'rices', "tes", 'ices', # Female endings
'hf', 'fh', # Male/Female, Female/Male
'semaine', 'semaines', 'sem',
'h', 'heure', 'heures', 'hebdo', 'hebdomadaire', # Time (week, hour)
'anne', 'mois', 'an', # Year
'jour', 'jours', # Day
't', 'automne', 'hiver', 'printemps', # summer, winter ...
'lundi', 'mardi', 'mercredi', 'jeudi', 'vendredi', 'samedi', 'dimanche', # Week day
'janvier', 'fvrier', 'mars', 'avril', 'mai', 'juin', # Month
'juillet', 'aout', 'septembre', 'octobre', 'novembre', 'dcembre',
"deux", "trois", "quatre", "cinq", "six", "sept", # Number
"huit", "neuf", "dix", "onze", # Number
"euros", "euro", "dollars", "dollar", # Money
"super", # Pour viter "super poids lourd"
# To clean
'caces', 'cap', 'bts', 'dea', 'diplme', 'bac',
"taf", "ref", "poste", "pourvoir", "sein", "profil",
"possible",
'indpendant',
'saisonnier', 'alternance', 'alternant', 'apprenti',
'apprentissage', 'stagiaire', 'tudiant', 'fonctionnaire',
'intermittent', 'lve', 'freelance', "professionnalisation",
'partiel', 'cdd', 'cdi', 'contrat', 'pro',
"fpe", # Fonction publique d'tat
'dbutant', 'expriment', 'junior', 'senior',
'confirm', 'catgorie',
'trilingue', 'bilingue',
'bi','international', 'france', 'national', 'rgional',
'europen', 'emploi', 'non',
'exclusif', 'uniquement',
'permis', 'ssiap', 'bnssa',
]
job_replace_infirst = {
'3 d' : 'troisd',
'3d':'troisd',
'2 d': 'deuxd',
'2d':'deuxd',
'b to b': 'btob'
}
job_lemmas_expr = {
'cours particulier' : 'professeur',
'call center' : 'centre appels',
'vl pl vu' : 'poids lourd',
'front end' : 'informatique',
'back end' : 'informatique',
'homme femme' : '',
'femme homme' : ''
}
job_normalize_map = [
("indu", "industriel"),
("pl","poids lourd"),
("spl","poids lourd"),
("sav","service aprs vente"),
("unix","informatique"),
("windows","informatique"),
("php","informatique"),
("java","informatique"),
("python","informatique"),
("jee","informatique"),
("sap","informatique"),
("abap","informatique"),
("ntic","informatique"),
# ("c","informatique"),
("rh","ressources humaines"),
("vrd","voirie rseaux divers"),
("super poids lourd","poids lourd"),
("adv","administration des ventes"),
("cvv","chauffage climatisation"),
("agt","agent"),
("ash","agent des services hospitaliers"),
("ibode","infirmier de bloc opratoire"),
("aes","accompagnant ducatif et social"),
("ads","agent de scurit"),
("amp","aide mdico psychologique"),
("asvp","agent de surveillance des voies publiques"),
("cesf","conseiller en conomie sociale et familiale"),
("babysitter","baby sitter"),
("babysitting","baby sitter"),
("sitting","sitter"),
("nounou", "nourrice"),
("coaching","coach"),
("webdesigner","web designer"),
("webmarketer","web marketer"),
("helpdesk","help desk"),
("prof","professeur"),
("maths", "mathmatiques"),
("go", "gographie"),
("philo", "philosophie"),
("epr","employe polyvalent de restauration"),
("NTIC","Informatique"),
("SIG","Systmes d Information Gographique "),
("EPSCP","tablissement public caractre scientifique, culturel et professionnel "),
("NRBC","Nuclaire, Radiologique, Bactriologique, Chimique "),
("SAV","Service aprs vente"),
("ACIM ","Agent des Cabinets en Imagerie Mdicale "),
("ASC","Agent des Services Commerciaux"),
("AEC","Agent d Escale Commerciale"),
("ASEM","Agent spcialis des coles maternelles "),
("TIC","Informatique"),
("HSE","Hygine Scurit Environnement "),
("ATER","Attach temporaire d enseignement et de recherche "),
("AVS","Auxiliaire de Vie Sociale "),
("AIS","Auxiliaire d Intgration Scolaire"),
("ASV","Auxiliaire Spcialis Vtrinaire "),
("AVQ","Auxiliaire Vtrinaire Qualifi"),
("IARD","Incendie, Accidents, Risques Divers "),
("NBC","Nuclaire, Bactriologique et Chimique"),
("PGC","Produits de Grande Consommation "),
("PNT","Personnel Navigant Technique "),
("PAO","Publication Assiste par Ordinateur"),
("TTA","toute arme"),
("VRD","Voiries et Rseaux Divers"),
("CMS","Composants Monts en Surface "),
("VSL","Vhicule Sanitaire Lger"),
("CIP","Conseiller d Insertion et de Probation "),
("CND","Contrle Non Destructif "),
("MOA","Matrise d Ouvrage"),
("OPC","Ordonnancement, Pilotage et Coordination de chantier"),
("SPS","Scurit, Protection de la Sant "),
("DAF","Directeur administratif et financier"),
("CHU","Centre Hospitalier Universitaire "),
("GSB","Grande Surface de Bricolage "),
("GSS","Grande Surface Spcialise "),
("DOSI","Directeur de l Organisation et des Systmes d Information "),
("ESAT","entreprise ou de Service d Aide par le Travail "),
("DRH","Directeur des Ressources Humaines "),
("DSI","Directeur des services informatiques "),
("DSPIP","Directeur des services pnitentiaires d insertion et de probation "),
("EPA","Etablissement Public caractre Administratif "),
("EPST","Etablissement Public caractre Scientifique et Technologique "),
("EPCC","Etablissement Public de Coopration Culturelle "),
("EPIC","Etablissement Public et Commercial "),
("IFSI","Institut de formation en soins infirmiers"),
("MAS","Machines Sous "),
("SCOP","Socit Cooprative Ouvrire de Production"),
(" EVS","Employe du Service Aprs Vente "),
("EVAT","Engage Volontaire de l Arme de Terre "),
("EV","Engag Volontaire "),
("GIR","Groupement d Individuels Regroups "),
("CN","Commande Numrique "),
("SICAV","Socit d Investissement Capital Variable "),
("OPCMV","Organisme de Placement Collectif en Valeurs Mobilires "),
("OPCVM","Organisme de Placement Collectif en Valeurs Mobilires "),
("IADE","Infirmier Anesthsiste Diplm d Etat "),
("IBODE","Infirmier de bloc opratoire Diplm d Etat "),
("CTC","contrle technique de construction "),
("IGREF","Ingnieur du gnie rural des eaux et forts "),
("IAA","Inspecteur d acadmie adjoint"),
("DSDEN","directeur des services dpartementaux de l Education nationale "),
("IEN","Inspecteur de l Education Nationale "),
("IET","Inspecteur de l enseignement technique "),
("ISPV","Inspecteur de Sant Publique Vtrinaire "),
("IDEN","Inspecteur dpartemental de l Education nationale "),
("IIO","Inspecteur d information et d orientation "),
("IGEN","Inspecteur gnral de l Education nationale "),
("IPR","Inspecteur pdagogique rgional"),
("IPET","Inspecteur principal de l enseignement technique "),
("PNC","Personnel Navigant Commercial "),
("MPR","Magasin de Pices de Rechange "),
("CME","Cellule, Moteur, Electricit "),
("BTP","Btiments et Travaux Publics "),
("EIR","Electricit, Instrument de bord, Radio "),
("MAR","Mdecin Anesthsiste Ranimateur "),
("PMI","Protection Maternelle et Infantile "),
("MISP","Mdecin Inspecteur de Sant Publique "),
("MIRTMO","Mdecin Inspecteur Rgional du Travail et de la Main d oeuvre "),
("DIM","Documentation et de l Information Mdicale"),
("OPL","Officier pilote de ligne "),
("CN","commande numrique "),
("PPM","Patron Plaisance Moteur "),
("PPV","Patron Plaisance Moteur "),
("PhISP","Pharmacien Inspecteur de Sant Publique "),
("PDG","Prsident Directeur Gnral "),
("FLE","Franais Langue Etrangre "),
("PLP","Professeur de lyce professionnel "),
("EPS","ducation physique et sportive "),
("PEGL","Professeur d enseignement gnral de lyce "),
("PEGC","Professeur d enseignement gnral des collges "),
("INJS","instituts nationaux de jeunes sourds "),
("INJA","instituts nationaux de jeunes aveugles "),
("TZR","titulaire en zone de remplacement "),
("CFAO","Conception de Fabrication Assiste par Ordinateur "),
("SPIP","service pnitentiaire d insertion et de probation "),
("PME","Petite ou Moyenne Entreprise "),
("RRH","Responsable des Ressources Humaines "),
("QSE","Qualit Scurit Environnement "),
("SASU","Secrtaire d administration scolaire et universitaire "),
("MAG","Metal Active Gas "),
("MIG","Metal Inert Gas "),
("TIG","Tungsten Inert Gas "),
("GED","Gestion lectronique de documents"),
("CVM","Circulations Verticales Mcanises "),
("TISF","Technicien Intervention Sociale et Familiale"),
("MAO","Musique Assiste par Ordinateur"),
# ("Paie","paye"),
# ("paies","paye"),
("ml","mission locale"),
("AS","aide soignant"),
("IDE","infirmier de soins gnraux"),
("ERD","tudes recherche et dveloppement")
]
| 42.263158 | 91 | 0.603881 |
a19ffbe9ac756d60be5cdc280b27e2d8d949602c | 6,262 | py | Python | appimagebuilder/app_dir/runtime/app_run.py | srevinsaju/appimage-builder | 105e253ccc43a345841b7d4037c1974938132a1d | [
"MIT"
] | null | null | null | appimagebuilder/app_dir/runtime/app_run.py | srevinsaju/appimage-builder | 105e253ccc43a345841b7d4037c1974938132a1d | [
"MIT"
] | null | null | null | appimagebuilder/app_dir/runtime/app_run.py | srevinsaju/appimage-builder | 105e253ccc43a345841b7d4037c1974938132a1d | [
"MIT"
] | null | null | null | # Copyright 2020 Alexis Lopez Zubieta
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
import fnmatch
import logging
import os
import shutil
import stat
import subprocess
import uuid
from pathlib import Path
from urllib import request
| 34.98324 | 86 | 0.603641 |
a1a133f4a1f010df28c349cd5d84226826c23e63 | 1,631 | py | Python | setup.py | cardosan/tempo_test | ff5a757c9ca54e5af1ccd71e9e5840bac279e4f0 | [
"BSD-3-Clause"
] | null | null | null | setup.py | cardosan/tempo_test | ff5a757c9ca54e5af1ccd71e9e5840bac279e4f0 | [
"BSD-3-Clause"
] | null | null | null | setup.py | cardosan/tempo_test | ff5a757c9ca54e5af1ccd71e9e5840bac279e4f0 | [
"BSD-3-Clause"
] | null | null | null | from setuptools import setup
import io
setup(
name='bw2temporalis',
version="0.9.2",
packages=[
"bw2temporalis",
"bw2temporalis.tests",
"bw2temporalis.examples",
"bw2temporalis.cofire"
],
author="Chris Mutel",
author_email="cmutel@gmail.com",
license=io.open('LICENSE.txt', encoding='utf-8').read(),
url="https://bitbucket.org/cmutel/brightway2-temporalis",
install_requires=[
"arrow",
"eight",
"brightway2",
"bw2analyzer",
"bw2calc>=0.11",
"bw2data>=0.12",
"bw2speedups>=2.0",
"numexpr",
"numpy",
"scipy",
"stats_arrays",
],
description='Provide a dynamic LCA calculations for the Brightway2 life cycle assessment framework',
long_description=io.open('README.rst', encoding='utf-8').read(),
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Visualization',
],
)
| 32.62 | 104 | 0.591048 |
a1a1aaea4e69c1175a5a073ed210e340c1ccb2d1 | 8,444 | py | Python | applications/FemToDemApplication/python_scripts/MainFEM_for_coupling.py | lkusch/Kratos | e8072d8e24ab6f312765185b19d439f01ab7b27b | [
"BSD-4-Clause"
] | 778 | 2017-01-27T16:29:17.000Z | 2022-03-30T03:01:51.000Z | applications/FemToDemApplication/python_scripts/MainFEM_for_coupling.py | lkusch/Kratos | e8072d8e24ab6f312765185b19d439f01ab7b27b | [
"BSD-4-Clause"
] | 6,634 | 2017-01-15T22:56:13.000Z | 2022-03-31T15:03:36.000Z | applications/FemToDemApplication/python_scripts/MainFEM_for_coupling.py | lkusch/Kratos | e8072d8e24ab6f312765185b19d439f01ab7b27b | [
"BSD-4-Clause"
] | 224 | 2017-02-07T14:12:49.000Z | 2022-03-06T23:09:34.000Z |
import KratosMultiphysics
import KratosMultiphysics.FemToDemApplication.MainFemDem as MainFemDem
import KratosMultiphysics.FemToDemApplication as KratosFemDem
import KratosMultiphysics.DEMApplication as DEM
import KratosMultiphysics.DemStructuresCouplingApplication as DEM_Structures
# Python script created to modify the existing one due to the coupling of the DEM app in 2D
| 51.487805 | 126 | 0.682852 |
a1a27befca81b9961c7c90b5224fd531c6279e19 | 5,284 | py | Python | utils/arg_parser.py | dataflowr/Project-Neural-Bootstrapper | 36278a7f6884438553d90d9cdc12eaf0da1bc7bf | [
"MIT"
] | 17 | 2020-10-17T08:46:56.000Z | 2022-02-27T17:32:43.000Z | utils/arg_parser.py | dataflowr/Project-Neural-Bootstrapper | 36278a7f6884438553d90d9cdc12eaf0da1bc7bf | [
"MIT"
] | 1 | 2022-03-12T15:44:38.000Z | 2022-03-13T00:47:41.000Z | utils/arg_parser.py | dataflowr/Project-Neural-Bootstrapper | 36278a7f6884438553d90d9cdc12eaf0da1bc7bf | [
"MIT"
] | 5 | 2021-01-30T05:04:29.000Z | 2022-02-14T23:49:42.000Z | import os
import yaml
import copy
import logging
from pathlib import Path
import torch
from torch.nn import *
from torch.optim import *
import torch.distributed as dist
from torch.optim.lr_scheduler import *
from torch.nn.parallel import DistributedDataParallel
from utils.metrics import *
from models import _get_model
torch.backends.cudnn.benchmark = True
if __name__ == '__main__':
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
stream_handler = logging.StreamHandler()
file_handler = logging.FileHandler('log.log')
file_handler.setLevel(logging.INFO)
log.addHandler(stream_handler)
log.addHandler(file_handler)
Args = Argments('test.yaml')
Args._update('path', 'abcd', 'efgh', value='zzzz')
Args['path/cccc/dddd'] = 'ffff'
log.debug(Args)
log.debug(Args['path/cccc/dddd'])
# print(Args)
# print('path' in Args)
# print(Args['path/abcd/efgh'])
# print(Args['path/cccc/dddd'])
# print(Args.module['lr_scheduler'])
| 35.702703 | 107 | 0.543906 |
a1a36361a953bc1ab0c48721b0d1db387eabef20 | 6,139 | py | Python | MDP/MDP.py | ADP-Benchmarks/ADP-Benchmark | aea3d1be7c28c7290a23e731b9e7b460ee6976f7 | [
"MIT"
] | 1 | 2020-01-17T17:09:46.000Z | 2020-01-17T17:09:46.000Z | MDP/MDP.py | ADP-Benchmarks/ADP-Benchmark | aea3d1be7c28c7290a23e731b9e7b460ee6976f7 | [
"MIT"
] | null | null | null | MDP/MDP.py | ADP-Benchmarks/ADP-Benchmark | aea3d1be7c28c7290a23e731b9e7b460ee6976f7 | [
"MIT"
] | 2 | 2020-10-26T04:51:42.000Z | 2020-11-22T20:20:30.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
GitHub Homepage
----------------
https://github.com/ADP-Benchmarks
Contact information
-------------------
ADP.Benchmarks@gmail.com.
License
-------
The MIT License
"""
from MDP.spaces.space import Space
from MDP.transition import Transition
from MDP.objective import Objective
import copy
| 32.654255 | 80 | 0.468154 |
a1a59271f18a59c5e8650b4f274444162d49578d | 7,186 | py | Python | tests/test_multiplegraphscallpeaks.py | uio-bmi/graph_peak_caller | 89deeabf3cd0b23fba49b1304f1c81222fb534d7 | [
"BSD-3-Clause"
] | 10 | 2018-04-19T21:54:31.000Z | 2021-07-22T12:46:33.000Z | tests/test_multiplegraphscallpeaks.py | uio-bmi/graph_peak_caller | 89deeabf3cd0b23fba49b1304f1c81222fb534d7 | [
"BSD-3-Clause"
] | 9 | 2018-01-30T20:41:36.000Z | 2021-01-28T23:00:18.000Z | tests/test_multiplegraphscallpeaks.py | uio-bmi/graph_peak_caller | 89deeabf3cd0b23fba49b1304f1c81222fb534d7 | [
"BSD-3-Clause"
] | 3 | 2019-08-20T21:43:53.000Z | 2022-01-20T14:39:34.000Z | from graph_peak_caller.multiplegraphscallpeaks import MultipleGraphsCallpeaks
from graph_peak_caller.intervals import Intervals
from graph_peak_caller import Configuration
from graph_peak_caller.reporter import Reporter
from offsetbasedgraph import GraphWithReversals as Graph, \
DirectedInterval, IntervalCollection, Block, SequenceGraph, Interval
import unittest
from graph_peak_caller.control.linearmap import LinearMap
from pyvg.sequences import SequenceRetriever
import logging
from graph_peak_caller.logging_config import set_logging_config
#set_logging_config(1)
import os
from graph_peak_caller.command_line_interface import run_argument_parser
if __name__ == "__main__":
unittest.main()
| 38.427807 | 124 | 0.585305 |
a1a925ea7d8dee1ab5cd0e823a74e840575eb035 | 7,141 | py | Python | brainite/models/mcvae.py | neurospin-deepinsight/brainite | 18aafe5d1522f1a4a4081d43f120464afe6cd0a7 | [
"CECILL-B"
] | null | null | null | brainite/models/mcvae.py | neurospin-deepinsight/brainite | 18aafe5d1522f1a4a4081d43f120464afe6cd0a7 | [
"CECILL-B"
] | null | null | null | brainite/models/mcvae.py | neurospin-deepinsight/brainite | 18aafe5d1522f1a4a4081d43f120464afe6cd0a7 | [
"CECILL-B"
] | 1 | 2021-09-16T08:29:19.000Z | 2021-09-16T08:29:19.000Z | # -*- coding: utf-8 -*-
##########################################################################
# NSAp - Copyright (C) CEA, 2021
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
"""
Sparse Multi-Channel Variational Autoencoderfor the Joint Analysis of
Heterogeneous Data.
[1] Sparse Multi-Channel Variational Autoencoder for the Joint Analysis of
Heterogeneous Data, Antelmi, Luigi, PMLR 2019,
https://github.com/ggbioing/mcvae.
"""
# Imports
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as func
from torch.distributions import Normal, kl_divergence
from .vae import VAE
| 33.213953 | 77 | 0.545022 |
a1a93df58c13961d6720cb2c8092c988d4421933 | 5,312 | py | Python | 3.algorithmic_expert/Tries/1.Suffix Trie Construction.py | jimmymalhan/Coding_Interview_Questions_Python_algoexpert | 94e8b4c63e8db92793b99741120a09f22806234f | [
"MIT"
] | 1 | 2020-10-05T04:55:26.000Z | 2020-10-05T04:55:26.000Z | 3.algorithmic_expert/Tries/1.Suffix Trie Construction.py | jimmymalhan/Coding_Interview_Questions_Python_algoexpert | 94e8b4c63e8db92793b99741120a09f22806234f | [
"MIT"
] | null | null | null | 3.algorithmic_expert/Tries/1.Suffix Trie Construction.py | jimmymalhan/Coding_Interview_Questions_Python_algoexpert | 94e8b4c63e8db92793b99741120a09f22806234f | [
"MIT"
] | null | null | null | # Problem Name: Suffix Trie Construction
# Problem Description:
# Write a SuffixTrie class for Suffix-Trie-like data structures. The class should have a root property set to be the root node of the trie and should support:
# - Creating the trie from a string; this will be done by calling populateSuffixTrieFrom method upon class instantiation(creation), which should populate the root of the class.
# - Searching for strings in the trie.
# Note that every string added to the trie should end with special endSymbol character: "*".
####################################
# Sample Input (for creation):
# string = "babc"
# Sample Output (for creation):
# The structure below is the root of the trie:
# {
# "c": {"*": true},
# "b": {
# "c": {"*": true},
# "a": {"b": {"c": {"*": true}}},
# },
# "a": {"b": {"c": {"*": true}}},
# }
# Sample Input (for searching in the suffix trie above):
# string = "abc"
# Sample Output (for searching in the suffix trie above):
# True
####################################
"""
Explain the solution:
- Building a suffix-trie-like data structure consists of essentially storing every suffix of a given string in a trie. To do so, iterate through the input string one character at a time, and insert every substring starting at each character and ending at the end of string into the trie.
- To insert a string into the trie, start by adding the first character of the string into the root node of the trie and map it to an empty hash table if it isin't already there. Then, iterate through the rest of the string, inserting each of the remaining characters into the previous character's corresponding node(or hash table) in the trie, making sure to add an endSymbol "*" at the end.
- Searching the trie for a specific string should follow a nearly identical logic to the one used to add a string in the trie.
# Creation: O(n^2) time | O(n^2) space - where n is the length of the input string
# Searching: O(m) time | O(1) space - where m is the length of the input string
##################
Detailed explanation of the Solution:
create a class called SuffixTrie:
initialize function takes in a string:
initialize the class with root as an empty hash table
initialize the class with a endSymbol variable that is set to "*"
create a method called populateSuffixTrieFrom with a parameter of string
# Creation:
initialize function populateSuffixTrieFrom takes in a string:
iterate as i through the string one character at a time:
use Helper function insertSubsStringStartingAt with the parameter of the string and the current character(i)
initialize function insertSubsStringStartingAt takes in a string and a character(i):
create a variable called node that is set to the root of the trie
iterate as j through the string starting at the character(i) and ending at the end of the string:
create a variable called letter that is set to the current string[j]
if the letter is not in the node:
create a new hash table and set it to the node[letter] # this is the first time we've seen this letter
create a variable called node that is set to the node[letter] # this is the node we're currently at
node[self.endSymbol] = True # insert the endSymbol "*" at the end of the string
# Searching:
initialize function contains takes in a string:
create a variable called node that is set to the root of the trie
iterate as letter through the string:
if the letter is not in the node:
return False
create a variable called node that is set to the node[letter]
return self.endSymbol in node # return True if the endSymbol "*" is in the node
"""
####################################
if __name__ == '__main__':
main() | 47.855856 | 392 | 0.672063 |
a1a9ddb3b1fe60f0adead9941a1fa52ce26179fe | 2,026 | py | Python | Tms-GCN-PyTorch/utils/callbacks/base/best_epoch.py | Joker-L0912/Tms-GCN-Py | daed1c704e797cbb86d219d24b878284f3d5c426 | [
"Apache-2.0"
] | null | null | null | Tms-GCN-PyTorch/utils/callbacks/base/best_epoch.py | Joker-L0912/Tms-GCN-Py | daed1c704e797cbb86d219d24b878284f3d5c426 | [
"Apache-2.0"
] | null | null | null | Tms-GCN-PyTorch/utils/callbacks/base/best_epoch.py | Joker-L0912/Tms-GCN-Py | daed1c704e797cbb86d219d24b878284f3d5c426 | [
"Apache-2.0"
] | null | null | null | import copy
import numpy as np
import torch
from pytorch_lightning.utilities import rank_zero_warn
from pytorch_lightning.callbacks import Callback
| 38.961538 | 101 | 0.612043 |
a1a9fa4dcfc3f60c5f6176dc7d9d7778a0c79011 | 12,840 | py | Python | playhouse/tests.py | mikiec84/peewee | 2abc201d807bfed99048ca67a465ccd758ee7852 | [
"MIT"
] | 1 | 2020-03-12T17:01:44.000Z | 2020-03-12T17:01:44.000Z | playhouse/tests.py | mikiec84/peewee | 2abc201d807bfed99048ca67a465ccd758ee7852 | [
"MIT"
] | null | null | null | playhouse/tests.py | mikiec84/peewee | 2abc201d807bfed99048ca67a465ccd758ee7852 | [
"MIT"
] | 1 | 2020-03-12T17:02:03.000Z | 2020-03-12T17:02:03.000Z | from hashlib import sha1 as _sha1
import sqlite3
import unittest
from peewee import *
import signals
import sqlite_ext as sqe
import sweepea
db = SqliteDatabase(':memory:')
# use a disk-backed db since memory dbs only exist for a single connection and
# we need to share the db w/2 for the locking tests. additionally, set the
# sqlite_busy_timeout to 100ms so when we test locking it doesn't take forever
ext_db = sqe.SqliteExtDatabase('tmp.db', timeout=.1)
ext_db.adapter.register_aggregate(sqe.WeightedAverage, 1, 'weighted_avg')
ext_db.adapter.register_aggregate(sqe.WeightedAverage, 2, 'weighted_avg2')
ext_db.adapter.register_collation(sqe.collate_reverse)
ext_db.adapter.register_function(sqe.sha1)
#ext_db.adapter.register_function(sqerank) # < auto register
| 33.007712 | 147 | 0.597118 |
a1aa7f5e730996934c8876a85b426f2a47d1eacc | 799 | py | Python | appengine/experimental/crbadge/testdata/upload.py | allaparthi/monorail | e18645fc1b952a5a6ff5f06e0c740d75f1904473 | [
"BSD-3-Clause"
] | 2 | 2021-04-13T21:22:18.000Z | 2021-09-07T02:11:57.000Z | appengine/experimental/crbadge/testdata/upload.py | allaparthi/monorail | e18645fc1b952a5a6ff5f06e0c740d75f1904473 | [
"BSD-3-Clause"
] | 21 | 2020-09-06T02:41:05.000Z | 2022-03-02T04:40:01.000Z | appengine/experimental/crbadge/testdata/upload.py | allaparthi/monorail | e18645fc1b952a5a6ff5f06e0c740d75f1904473 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python
import os, sys
import optparse
import json, urllib
import httplib2
import urlparse
if __name__ == '__main__':
main()
| 22.828571 | 72 | 0.649562 |
a1ab946e745fb18496c5d63e37229b34b0071a28 | 112 | py | Python | libs/test_utils.py | bongnv/sublime-go | 9f5f4f9795357ec595f73c1f71e479eca694b61e | [
"MIT"
] | 6 | 2018-05-12T04:43:36.000Z | 2018-09-21T17:44:53.000Z | libs/test_utils.py | bongnv/sublime-go | 9f5f4f9795357ec595f73c1f71e479eca694b61e | [
"MIT"
] | null | null | null | libs/test_utils.py | bongnv/sublime-go | 9f5f4f9795357ec595f73c1f71e479eca694b61e | [
"MIT"
] | null | null | null | import unittest
| 16 | 38 | 0.723214 |
a1ac73057ccc5855df2d0931ac3ee0a8a54ddd18 | 855 | py | Python | python-algorithm/leetcode/problem_457.py | isudox/nerd-algorithm | c1fbe153953cf3fc24395f75d102016fdf9ea0fa | [
"MIT"
] | 5 | 2017-06-11T09:19:34.000Z | 2019-01-16T16:58:31.000Z | python-algorithm/leetcode/problem_457.py | isudox/leetcode-solution | 60085e64deaf396a171367affc94b18114565c43 | [
"MIT"
] | 5 | 2020-03-22T13:53:54.000Z | 2020-03-23T08:49:35.000Z | python-algorithm/leetcode/problem_457.py | isudox/nerd-algorithm | c1fbe153953cf3fc24395f75d102016fdf9ea0fa | [
"MIT"
] | 1 | 2019-03-02T15:50:43.000Z | 2019-03-02T15:50:43.000Z | """457. Circular Array Loop
https://leetcode.com/problems/circular-array-loop/
"""
from typing import List
| 31.666667 | 70 | 0.527485 |
a1ac757a73cea2cb4a80f87ddc034e4b6d7ef1b0 | 10,937 | py | Python | task/task2.py | joseph9991/Milestone1 | 08f95e845a743539160e9a7330ca58ea20240229 | [
"MIT"
] | null | null | null | task/task2.py | joseph9991/Milestone1 | 08f95e845a743539160e9a7330ca58ea20240229 | [
"MIT"
] | null | null | null | task/task2.py | joseph9991/Milestone1 | 08f95e845a743539160e9a7330ca58ea20240229 | [
"MIT"
] | null | null | null | import pandas as pd
from pandas import read_csv
import os
import sys
import glob
import re
import soundfile as sf
import pyloudnorm as pyln
from .thdncalculator import execute_thdn
# # For Testing
# if __name__ == "__main__":
# file_name = sys.argv[1]
# # Temp Code
# data =[
# {
# "Unnamed: 0": 0,
# "start_time": "00:00:00",
# "end_time": "00:00:00",
# "speaker": "spk_1",
# "comment": "Well,",
# "stopwords": 0,
# "fillerwords": 0
# },
# {
# "Unnamed: 0": 1,
# "start_time": "00:00:01",
# "end_time": "00:00:02",
# "speaker": "spk_1",
# "comment": "Hi, everyone.",
# "stopwords": 0,
# "fillerwords": 0
# },
# {
# "Unnamed: 0": 2,
# "start_time": "00:00:03",
# "end_time": "00:00:05",
# "speaker": "spk_0",
# "comment": "Everyone's money. Good",
# "stopwords": 0,
# "fillerwords": 0
# },
# {
# "Unnamed: 0": 3,
# "start_time": "00:00:05",
# "end_time": "00:00:10",
# "speaker": "spk_2",
# "comment": "morning, everyone. Money. Thanks for joining. Uh, so let's quickly get started with the meeting.",
# "stopwords": 4,
# "fillerwords": 1
# },
# {
# "Unnamed: 0": 4,
# "start_time": "00:00:11",
# "end_time": "00:00:14",
# "speaker": "spk_2",
# "comment": "Today's agenda is to discuss how we plan to increase the reach off our website",
# "stopwords": 8,
# "fillerwords": 0
# },
# {
# "Unnamed: 0": 5,
# "start_time": "00:00:15",
# "end_time": "00:00:20",
# "speaker": "spk_2",
# "comment": "and how to make it popular. Do you have any ideas, guys? Yes.",
# "stopwords": 8,
# "fillerwords": 0
# },
# {
# "Unnamed: 0": 6,
# "start_time": "00:00:20",
# "end_time": "00:00:22",
# "speaker": "spk_0",
# "comment": "Oh, Whoa. Um,",
# "stopwords": 0,
# "fillerwords": 1
# },
# {
# "Unnamed: 0": 7,
# "start_time": "00:00:23",
# "end_time": "00:00:36",
# "speaker": "spk_1",
# "comment": "it's okay. Thank you so much. Yes. Asai was saying one off. The ideas could be to make it more such friendly, you know? And to that I think we can. We need to improve the issue off our website.",
# "stopwords": 21,
# "fillerwords": 0
# },
# {
# "Unnamed: 0": 8,
# "start_time": "00:00:37",
# "end_time": "00:00:41",
# "speaker": "spk_2",
# "comment": "Yeah, that's a great point. We certainly need to improve the SC off our site.",
# "stopwords": 6,
# "fillerwords": 0
# },
# {
# "Unnamed: 0": 9,
# "start_time": "00:00:42",
# "end_time": "00:00:43",
# "speaker": "spk_2",
# "comment": "Let me let me take a note of this.",
# "stopwords": 4,
# "fillerwords": 0
# },
# {
# "Unnamed: 0": 10,
# "start_time": "00:00:45",
# "end_time": "00:00:57",
# "speaker": "spk_0",
# "comment": "How about using social media channels to promote our website? Everyone is on social media these days on way. We just need to target the right audience and share outside with them. Were often Oh, what do you think?",
# "stopwords": 18,
# "fillerwords": 0
# },
# {
# "Unnamed: 0": 11,
# "start_time": "00:00:58",
# "end_time": "00:01:05",
# "speaker": "spk_2",
# "comment": "It's definitely a great idea on since we already have our social accounts, I think we can get started on this one immediately.",
# "stopwords": 11,
# "fillerwords": 0
# },
# {
# "Unnamed: 0": 12,
# "start_time": "00:01:06",
# "end_time": "00:01:11",
# "speaker": "spk_0",
# "comment": "Yes, I can work on creating a plan for this. I come up with the content calendar base.",
# "stopwords": 9,
# "fillerwords": 0
# },
# {
# "Unnamed: 0": 13,
# "start_time": "00:01:11",
# "end_time": "00:01:17",
# "speaker": "spk_1",
# "comment": "Yeah, and I can start with creating the CEO content for all the periods off our website.",
# "stopwords": 10,
# "fillerwords": 0
# },
# {
# "Unnamed: 0": 14,
# "start_time": "00:01:17",
# "end_time": "00:01:24",
# "speaker": "spk_2",
# "comment": "Awesome. I think we already have a plan in place. Let's get rolling Eyes. Yeah, definitely.",
# "stopwords": 5,
# "fillerwords": 0
# },
# {
# "Unnamed: 0": 15,
# "start_time": "00:01:24",
# "end_time": "00:01:25",
# "speaker": "spk_2",
# "comment": "Yeah, sure.",
# "stopwords": 0,
# "fillerwords": 0
# },
# {
# "Unnamed: 0": 16,
# "start_time": "00:01:26",
# "end_time": "00:01:33",
# "speaker": "spk_2",
# "comment": "Great. Thanks. Thanks, everyone, for your ideas. I'm ending the call now. Talk to you soon. Bye. Bye bye. Thanks.",
# "stopwords": 5,
# "fillerwords": 0
# }]
# obj = Task2(data,file_name)
# obj.execute_all_functions() | 32.357988 | 241 | 0.526744 |
a1acd3aad52a9f207d22596dfa16d615ad5b5b36 | 6,253 | py | Python | agents/hub_policy.py | floriandonhauser/TeBaG-RL | 0110087c97e4d67f739961e7320945da4b3d9592 | [
"MIT"
] | null | null | null | agents/hub_policy.py | floriandonhauser/TeBaG-RL | 0110087c97e4d67f739961e7320945da4b3d9592 | [
"MIT"
] | null | null | null | agents/hub_policy.py | floriandonhauser/TeBaG-RL | 0110087c97e4d67f739961e7320945da4b3d9592 | [
"MIT"
] | null | null | null | import tensorflow as tf
import tensorflow_hub as hub
from tf_agents.networks import network
# Bert needs this (I think) TODO: Check?
import tensorflow_text as text
embedding = "https://tfhub.dev/google/nnlm-en-dim128-with-normalization/2"
tfhub_handle_encoder = (
"https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-2_H-128_A-2/1"
)
tfhub_handle_preprocess = "https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"
| 40.869281 | 120 | 0.669119 |
a1ad8c52da06d6abbbc870ab6152a1b0cfde52b7 | 475 | py | Python | meiduo_mall/apps/orders/urls.py | MarioKarting/Django_meiduo_project | ef06e70b1ddb6709983ebb644452c980afc29000 | [
"MIT"
] | null | null | null | meiduo_mall/apps/orders/urls.py | MarioKarting/Django_meiduo_project | ef06e70b1ddb6709983ebb644452c980afc29000 | [
"MIT"
] | null | null | null | meiduo_mall/apps/orders/urls.py | MarioKarting/Django_meiduo_project | ef06e70b1ddb6709983ebb644452c980afc29000 | [
"MIT"
] | null | null | null | # !/usr/bin/env python
# _*_ coding:utf-8 _*_
from django.conf.urls import url
from . import views
urlpatterns = [
# 1. orders/settlement/
url(r'^orders/settlement/$', views.OrdersSettlementView.as_view(), name='settlement'),
# 2. orders/commit/
url(r'^orders/commit/$', views.OrdersCommitView.as_view(), name='commit'),
# 3. -- orders/success/
url(r'^orders/success/$', views.OrdersSuccessView.as_view(), name='sucess'),
]
| 22.619048 | 90 | 0.661053 |
a1ade519e607956e6b09f57c472fa7d337099ebf | 138 | py | Python | goldmeister/__init__.py | USDA-ARS-NWRC/goldmeister | b4624a355e551c4610834a9dcb971524c45bb437 | [
"CC0-1.0"
] | null | null | null | goldmeister/__init__.py | USDA-ARS-NWRC/goldmeister | b4624a355e551c4610834a9dcb971524c45bb437 | [
"CC0-1.0"
] | 1 | 2020-09-17T16:16:13.000Z | 2020-09-17T16:21:00.000Z | goldmeister/__init__.py | USDA-ARS-NWRC/goldmeister | b4624a355e551c4610834a9dcb971524c45bb437 | [
"CC0-1.0"
] | null | null | null | """Top-level package for Goldmeister."""
__author__ = """Micah Johnson"""
__email__ = 'micah.johnson150@gmail.com'
__version__ = '0.2.0'
| 23 | 40 | 0.702899 |
a1b0c44fad44484d33a19381232ed8782c4771bb | 1,014 | py | Python | db/update.py | msgangwar/Leaderboard | d4cce6a3bb76f6a3c2344c485f67a7aa080d4e96 | [
"MIT"
] | 2 | 2019-02-13T04:40:10.000Z | 2019-02-14T17:56:09.000Z | db/update.py | msgangwar/Leaderboard | d4cce6a3bb76f6a3c2344c485f67a7aa080d4e96 | [
"MIT"
] | 3 | 2021-02-08T20:28:25.000Z | 2021-06-01T23:21:51.000Z | db/update.py | msgangwar/Leaderboard | d4cce6a3bb76f6a3c2344c485f67a7aa080d4e96 | [
"MIT"
] | 6 | 2019-02-13T04:40:16.000Z | 2020-10-02T05:26:25.000Z | from user import User
from Env import Env_Vars
from fetch_from_sheet import SheetData
from pymongo import MongoClient
from pprint import pprint
env_vars = Env_Vars()
MongoURI = env_vars.MongoURI
client = MongoClient(MongoURI, 27017)
db = client['users']
users = db['users']
if __name__ == "__main__":
do_update() | 22.533333 | 89 | 0.653846 |
a1b1b372ea41556cd122b9d3a8b1aaadf901cbd1 | 1,956 | py | Python | uvicore/http/OBSOLETE/routes-OLD.py | coboyoshi/uvicore | 9cfdeeac83000b156fe48f068b4658edaf51c8de | [
"MIT"
] | 11 | 2021-03-22T22:07:49.000Z | 2022-03-08T16:18:33.000Z | uvicore/http/OBSOLETE/routes-OLD.py | coboyoshi/uvicore | 9cfdeeac83000b156fe48f068b4658edaf51c8de | [
"MIT"
] | 12 | 2021-03-04T05:51:24.000Z | 2021-09-22T05:16:18.000Z | uvicore/http/OBSOLETE/routes-OLD.py | coboyoshi/uvicore | 9cfdeeac83000b156fe48f068b4658edaf51c8de | [
"MIT"
] | 2 | 2021-03-25T14:49:56.000Z | 2021-11-17T23:20:29.000Z |
# @uvicore.service()
# class Routes(RoutesInterface, Generic[R]):
# endpoints: str = None
# @property
# def app(self) -> ApplicationInterface:
# return self._app
# @property
# def package(self) -> PackageInterface:
# return self._package
# @property
# def Router(self) -> R:
# return self._Router
# @property
# def prefix(self) -> str:
# return self._prefix
# def __init__(self,
# app: ApplicationInterface,
# package: PackageInterface,
# Router: R,
# prefix: str
# ):
# self._app = app
# self._package = package
# self._Router = Router
# self._prefix = prefix
# def new_router(self):
# router = self.Router()
# # Add route context into Router
# router.uvicore = Dict({
# 'prefix': self.prefix,
# 'endpoints': self.endpoints,
# })
# return router
# def include(self, module, *, prefix: str = '', tags: List[str] = None) -> None:
# #self.http.controller(controller.route, prefix=self.prefix)
# if type(module) == str:
# # Using a string to point to an endpoint class controller
# controller = load(self.endpoints + '.' + module + '.route')
# uvicore.app.http.include_router(
# controller.object,
# prefix=self.prefix + str(prefix),
# tags=tags,
# )
# else:
# # Passing in an actual router class
# uvicore.app.http.include_router(
# module,
# prefix=self.prefix + str(prefix),
# tags=tags,
# )
# # def Router(self) -> R:
# # return self._Router()
# # IoC Class Instance
# #Routes: RoutesInterface = uvicore.ioc.make('Routes', _Routes)
# # Public API for import * and doc gens
# #__all__ = ['Routes', '_Routes']
| 25.736842 | 85 | 0.528119 |
a1b3738a830ad504560b84aa6870219df1d05595 | 182 | py | Python | tudo/ex052.py | Ramon-Erik/Exercicios-Python | 158a7f1846dd3d486aa0517fa337d46d73aab649 | [
"MIT"
] | 1 | 2021-07-08T00:35:57.000Z | 2021-07-08T00:35:57.000Z | tudo/ex052.py | Ramon-Erik/Exercicios-Python | 158a7f1846dd3d486aa0517fa337d46d73aab649 | [
"MIT"
] | null | null | null | tudo/ex052.py | Ramon-Erik/Exercicios-Python | 158a7f1846dd3d486aa0517fa337d46d73aab649 | [
"MIT"
] | null | null | null | n = int(input('Digite um nmero: '))
if n % 2 == 0 and n % 3 == 0 and n % 5 == 0:
print('{} um nmero primo!'.format(n))
else:
print('{} no um nmero primo!'.format(n))
| 30.333333 | 48 | 0.543956 |
a1b46b1cb092d1e3618170f67ba0443c89c2d63b | 1,684 | py | Python | Firmware/RaspberryPi/backend-pi/PWMController.py | librerespire/ventilator | c0cfa63f1eae23c20d5d72fe52f42785070bbb3d | [
"MIT"
] | 5 | 2020-04-08T12:33:31.000Z | 2021-04-17T15:45:08.000Z | Firmware/RaspberryPi/backend-pi/PWMController.py | cmfsx/ventilator | 996dd5ad5010c19799e03576acf068663276a5e8 | [
"MIT"
] | 7 | 2020-03-27T13:16:09.000Z | 2020-06-24T11:15:59.000Z | Firmware/RaspberryPi/backend-pi/PWMController.py | cmfsx/ventilator | 996dd5ad5010c19799e03576acf068663276a5e8 | [
"MIT"
] | 2 | 2020-09-03T16:29:22.000Z | 2021-01-05T23:17:59.000Z | import threading
import time
import RPi.GPIO as GPIO
import logging
import logging.config
# declare logger parameters
logger = logging.getLogger(__name__)
| 33.68 | 86 | 0.600356 |
a1b53725330b8354a3bae3c9ca65bdec5434db16 | 2,393 | py | Python | netforce_account/netforce_account/models/account_balance.py | nfco/netforce | 35252eecd0a6633ab9d82162e9e3ff57d4da029a | [
"MIT"
] | 27 | 2015-09-30T23:53:30.000Z | 2021-06-07T04:56:25.000Z | netforce_account/netforce_account/models/account_balance.py | nfco/netforce | 35252eecd0a6633ab9d82162e9e3ff57d4da029a | [
"MIT"
] | 191 | 2015-10-08T11:46:30.000Z | 2019-11-14T02:24:36.000Z | netforce_account/netforce_account/models/account_balance.py | nfco/netforce | 35252eecd0a6633ab9d82162e9e3ff57d4da029a | [
"MIT"
] | 32 | 2015-10-01T03:59:43.000Z | 2022-01-13T07:31:05.000Z | # Copyright (c) 2012-2015 Netforce Co. Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from netforce.model import Model, fields
import time
from netforce.database import get_connection
Balance.register()
| 49.854167 | 211 | 0.732553 |
a1b607b0cbf4748eb3756401b6e1bc4bdb961ebc | 115 | py | Python | ex016.py | Rhodytesla/PythonMundo01 | bac3e1a7ca3934c712423bfc606d16a4ea9af53a | [
"MIT"
] | null | null | null | ex016.py | Rhodytesla/PythonMundo01 | bac3e1a7ca3934c712423bfc606d16a4ea9af53a | [
"MIT"
] | null | null | null | ex016.py | Rhodytesla/PythonMundo01 | bac3e1a7ca3934c712423bfc606d16a4ea9af53a | [
"MIT"
] | null | null | null | import math
a = float(input('insira um valor'))
print('a poro inteira do valor {} {}'.format(a,math.trunc(a))) | 28.75 | 66 | 0.678261 |
a1b6b1c77481492760b6401cbb654aaadb5145b0 | 5,144 | py | Python | models/force_expand.py | DeerKK/Deformable-Modeling | 97b14be152e78f44dd6e783059bc5380a3a74a68 | [
"MIT"
] | 4 | 2020-11-16T16:06:08.000Z | 2022-03-30T03:53:54.000Z | models/force_expand.py | DeerKK/Deformable-Modeling | 97b14be152e78f44dd6e783059bc5380a3a74a68 | [
"MIT"
] | null | null | null | models/force_expand.py | DeerKK/Deformable-Modeling | 97b14be152e78f44dd6e783059bc5380a3a74a68 | [
"MIT"
] | null | null | null | #from data_loader import *
from scipy import signal
import matplotlib.pyplot as plt
import copy
import os
import shutil
import numpy as np
d,fn = data_filter('./', probe_type='point', Xtype='loc',ytype='fn',num_point=94)
print(len(d),len(fn))
plt.plot(np.array(d),np.array(fn),color='b',marker='o',markersize=1)
plt.show() | 37.547445 | 125 | 0.577372 |
a1b6ce12f6da82245af7a016f922874b6b94b4ef | 616 | py | Python | DataStructures Python/parenthesis_matching.py | Kaushik-Pal-2020/DataStructure | 4594e2f6d057db13e45b307d2d42f77e1444bfc1 | [
"MIT"
] | null | null | null | DataStructures Python/parenthesis_matching.py | Kaushik-Pal-2020/DataStructure | 4594e2f6d057db13e45b307d2d42f77e1444bfc1 | [
"MIT"
] | null | null | null | DataStructures Python/parenthesis_matching.py | Kaushik-Pal-2020/DataStructure | 4594e2f6d057db13e45b307d2d42f77e1444bfc1 | [
"MIT"
] | null | null | null | from collections import deque
parenthesis_matching("{[a+b]*[(c-d]/e}")
| 26.782609 | 58 | 0.496753 |
a1b77cdc1daef2b3d3ed0cc366bb55bdefa74e68 | 1,670 | py | Python | hard-gists/7880c101557297beeccda05978aeb278/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
] | 21 | 2019-07-08T08:26:45.000Z | 2022-01-24T23:53:25.000Z | hard-gists/7880c101557297beeccda05978aeb278/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
] | 5 | 2019-06-15T14:47:47.000Z | 2022-02-26T05:02:56.000Z | hard-gists/7880c101557297beeccda05978aeb278/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
] | 17 | 2019-05-16T03:50:34.000Z | 2021-01-14T14:35:12.000Z | # Example of use of Afanasy's API to generate a summary of the state of the
# render farm.
# Copyright (c) 2016 rise|fx (Elie Michel) - Released under MIT License
import af
cmd = af.Cmd()
## Jobs ##
joblist = cmd.getJobList()
job_state_counters = {}
job_count = 0
for job in joblist:
if isSysJob(job):
continue
job_count += 1
for s in job['state'].split():
job_state_counters[s] = job_state_counters.get(s, 0) + 1
print("Out of %d jobs:" % job_count)
print(" * %d are running" % job_state_counters.get('RUN', 0))
print(" * %d have error" % job_state_counters.get('ERR', 0))
print(" * %d are skipped" % job_state_counters.get('SKP', 0))
print(" * %d are off" % job_state_counters.get('OFF', 0))
print(" * %d are ready" % job_state_counters.get('RDY', 0))
print(" * %d are done" % job_state_counters.get('DON', 0))
# Note that the sum may exceed the total number of jobs because a job can have
# several states
print("")
## Renders ##
renderlist = cmd.renderGetList()
render_state_counts = {}
for render in renderlist:
for s in render['state'].split():
render_state_counts[s] = render_state_counts.get(s, 0) + 1
print("Out of %d renders:" % len(renderlist))
print(" * %d are online" % render_state_counts.get('ONL', 0))
print(" * %d are offline" % render_state_counts.get('OFF', 0))
print(" * %d are nimby" % render_state_counts.get('NBY', 0))
print(" * %d are running" % render_state_counts.get('RUN', 0))
print(" * %d are dirty" % render_state_counts.get('DRT', 0))
# Note that the sum may exceed the total number of renders because a render can
# have several states
| 28.305085 | 79 | 0.669461 |
a1b85880b05d9e4a401f9fe16d8f89e466e71f55 | 4,931 | py | Python | cblib/scripts/admin/pack.py | HFriberg/cblib-base | 164a00eb73ef3ac61f5b54f30492209cc69b854b | [
"Zlib"
] | 3 | 2019-06-13T06:57:31.000Z | 2020-06-18T09:58:11.000Z | cblib/scripts/admin/pack.py | HFriberg/cblib-base | 164a00eb73ef3ac61f5b54f30492209cc69b854b | [
"Zlib"
] | 1 | 2019-04-27T18:28:57.000Z | 2019-04-30T17:16:53.000Z | cblib/scripts/admin/pack.py | HFriberg/cblib-base | 164a00eb73ef3ac61f5b54f30492209cc69b854b | [
"Zlib"
] | 3 | 2019-04-30T11:19:34.000Z | 2019-05-31T13:12:17.000Z | # Copyright (c) 2012 by Zuse-Institute Berlin and the Technical University of Denmark.
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
# Direct execution requires top level directory on python path
if __name__ == "__main__":
import os, sys, inspect
scriptdir = os.path.split(inspect.getfile( inspect.currentframe() ))[0]
packagedir = os.path.realpath(os.path.abspath(os.path.join(scriptdir,'..')))
if packagedir not in sys.path:
sys.path.insert(0, packagedir)
import os, sys, inspect, tarfile, glob, stat, getopt
from data.CBFset import CBFset
from filter import filter
if __name__ == "__main__":
try:
# Verify command line arguments
opts, args = getopt.gnu_getopt(sys.argv[1:], "n:s:a", "filter=")
if len(args) >= 1:
raise Exception('Incorrect usage!')
except Exception as e:
print(str(e))
raise Exception(''.join([
'Incorrect usage, try all instances', '\n',
' python ', sys.argv[0], ' -n cblib', '\n',
'or try all mixed-integer second order cone instances:', '\n',
' python ', sys.argv[0], ' -n cblib-misoco --filter="||int|| and ||cones|so|| and not ||psdcones||"']))
sys.exit(2)
packname = None
filtexpr = ""
setexpr = None
packall = False
for opt, arg in opts:
if opt == '-n':
packname = arg
elif opt == "-s":
setexpr = arg
elif opt == "-a":
packall = True
elif opt == "--filter":
filtexpr = arg
try:
if not packname:
if setexpr and os.path.exists(setexpr) and not os.path.isfile(setexpr):
packname = os.path.basename(setexpr)
if not packname:
packname = os.path.basename(os.path.dirname(setexpr))
else:
raise Exception('No pack name specified!')
print(setexpr)
pack(packname, filtexpr, setexpr, packall)
except Exception as e:
print(str(e))
| 35.992701 | 114 | 0.666396 |
a1b91c2b6aa90638bdb1249031654f84dc1518e8 | 35,353 | py | Python | FAEGUI/VisualizationConnection.py | Eggiverse/FAE | 1b953ba6dfcced83e5929eeaa8f525ec4acde5ed | [
"MIT"
] | null | null | null | FAEGUI/VisualizationConnection.py | Eggiverse/FAE | 1b953ba6dfcced83e5929eeaa8f525ec4acde5ed | [
"MIT"
] | null | null | null | FAEGUI/VisualizationConnection.py | Eggiverse/FAE | 1b953ba6dfcced83e5929eeaa8f525ec4acde5ed | [
"MIT"
] | null | null | null | from copy import deepcopy
import os
import re
from PyQt5.QtWidgets import *
from PyQt5 import QtCore, QtGui
from GUI.Visualization import Ui_Visualization
from FAE.FeatureAnalysis.Classifier import *
from FAE.FeatureAnalysis.FeaturePipeline import FeatureAnalysisPipelines, OnePipeline
from FAE.Description.Description import Description
from FAE.Visualization.DrawROCList import DrawROCList
from FAE.Visualization.PlotMetricVsFeatureNumber import DrawCurve, DrawBar
from FAE.Visualization.FeatureSort import GeneralFeatureSort, SortRadiomicsFeature
from Utility.EcLog import eclog
| 50.21733 | 148 | 0.633808 |
a1b98e7fe17a60a91fcb8684f5329153681b1123 | 1,779 | py | Python | bookstore/management/commands/makeratings.py | mirko-lelansky/booksite | f3bcab93a4d9382ed43adaba4b04202333fe4a86 | [
"Apache-2.0"
] | null | null | null | bookstore/management/commands/makeratings.py | mirko-lelansky/booksite | f3bcab93a4d9382ed43adaba4b04202333fe4a86 | [
"Apache-2.0"
] | null | null | null | bookstore/management/commands/makeratings.py | mirko-lelansky/booksite | f3bcab93a4d9382ed43adaba4b04202333fe4a86 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Mirko Lelansky <mlelansky@mail.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.core.management.base import BaseCommand, CommandError
from bookstore.models import Book, Rating
import random
import threading
| 35.58 | 99 | 0.670039 |
a1bbcc80b20916c2b274dcf7f69fc4ce858c7f88 | 735 | py | Python | secondstate/converters.py | fruiti-ltd/secondstate | 81fe6916b92c7024372a95f0eb9d50f6275dfc69 | [
"BSD-3-Clause"
] | 1 | 2021-05-28T23:02:08.000Z | 2021-05-28T23:02:08.000Z | secondstate/converters.py | fruiti-ltd/secondstate | 81fe6916b92c7024372a95f0eb9d50f6275dfc69 | [
"BSD-3-Clause"
] | null | null | null | secondstate/converters.py | fruiti-ltd/secondstate | 81fe6916b92c7024372a95f0eb9d50f6275dfc69 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2021, Fruiti Limited
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from datetime import datetime
| 29.4 | 71 | 0.771429 |
a1bd442cb66a1c8f82b5b33378ae612201ae99f7 | 5,313 | py | Python | Write.py | yukiii-zhong/HandMovementTracking | d39c65ca83862d97c4589dde616c1d8a586a033c | [
"MIT"
] | 1 | 2019-04-09T17:24:49.000Z | 2019-04-09T17:24:49.000Z | Write.py | yukiii-zhong/HandMovementTracking | d39c65ca83862d97c4589dde616c1d8a586a033c | [
"MIT"
] | null | null | null | Write.py | yukiii-zhong/HandMovementTracking | d39c65ca83862d97c4589dde616c1d8a586a033c | [
"MIT"
] | null | null | null | import numpy as np
import cv2
import argparse
from collections import deque
import keyboard as kb
import time
from pynput.keyboard import Key, Controller, Listener
sm_threshold = 100
lg_threshold = 200
guiding = True
keyboard = Controller()
cap = cv2.VideoCapture(0)
pts = deque(maxlen=64)
Lower_green = np.array([110, 50, 50])
Upper_green = np.array([130, 255, 255])
startPoint =endPoint = points(0,0)
recentPoints = deque()
# counter = 0
# prev_x = 0
# prev_y = 0
while True:
if kb.is_pressed('q'):
guiding = False
if kb.is_pressed('w'):
guiding = True
ret, img = cap.read()
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
kernel = np.ones((5, 5), np.uint8)
mask = cv2.inRange(hsv, Lower_green, Upper_green)
mask = cv2.erode(mask, kernel, iterations=2)
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
# mask=cv2.morphologyEx(mask,cv2.MORPH_CLOSE,kernel)
mask = cv2.dilate(mask, kernel, iterations=1)
res = cv2.bitwise_and(img, img, mask=mask)
cnts, heir = cv2.findContours(
mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2:]
center = None
if len(cnts) > 0:
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
# Added code
recentPoints.append(points(x,y))
if len(recentPoints)>16:
recentPoints.popleft()
if len(recentPoints) == 16:
min_X = min([p.x for p in recentPoints])
max_X = max([p.x for p in recentPoints])
min_Y = min([p.y for p in recentPoints])
max_Y = max([p.y for p in recentPoints])
if max_X-min_X <= sm_threshold or max_Y-min_Y<=sm_threshold:
# EndPoint as average of recentPoints
# endPoint_X = sum([p.x for p in recentPoints])/len(recentPoints)
# endPoint_Y = sum([p.y for p in recentPoints])/ len(recentPoints)
# endPoint = points(endPoint_X, endPoint_Y)
endPoint = points(x,y)
if abs(startPoint.x-endPoint.x)*0.625 > abs(startPoint.y- endPoint.y):
if startPoint.x - endPoint.x > lg_threshold:
print('right')
keyboard.press(Key.right)
keyboard.release(Key.right)
startPoint = endPoint
recentPoints = deque()
elif startPoint.x - endPoint.x < -lg_threshold:
print('left')
keyboard.press(Key.left)
keyboard.release(Key.left)
startPoint = endPoint
recentPoints = deque()
else:
if startPoint.y - endPoint.y > lg_threshold*0.625 :
print('up')
keyboard.press(Key.up)
keyboard.release(Key.up)
startPoint = endPoint
recentPoints = deque()
elif startPoint.y - endPoint.y < -lg_threshold*0.625:
print('down')
keyboard.press(Key.down)
keyboard.release(Key.down)
startPoint = endPoint
recentPoints = deque()
#print(x, y)
# time.sleep(0.1)
# counter += 1
# if counter == 32:
# prev_x = x
# prev_y = y
# if counter > 32:
# if abs(x - prev_x) > abs(y - prev_y):
# if x - prev_x > 100:
# print('left')
# keyboard.press(Key.left)
# keyboard.release(Key.left)
# # time.sleep(0.7)
# counter = 0
# elif x - prev_x < -100:
# print('right')
# keyboard.press(Key.right)
# keyboard.release(Key.right)
# counter = 0
# else:
# if y - prev_y > 100:
# print('down')
# keyboard.press(Key.down)
# keyboard.release(Key.down)
# counter = 0
# # time.sleep(0.7)
# elif y - prev_y < -100:
# print('up')
# keyboard.press(Key.up)
# keyboard.release(Key.up)
# counter = 0
# # time.sleep(0.7)
if radius > 5:
cv2.circle(img, (int(x), int(y)), int(radius), (0, 255, 255), 2)
cv2.circle(img, center, 5, (0, 0, 255), -1)
pts.appendleft(center)
for i in range(1, len(pts)):
if pts[i - 1]is None or pts[i] is None:
continue
thick = int(np.sqrt(len(pts) / float(i + 1)) * 2.5)
cv2.line(img, pts[i - 1], pts[i], (0, 0, 225), thick)
cv2.imshow("Frame", img)
# cv2.imshow("mask",mask)
# cv2.imshow("res",res)
k = cv2.waitKey(1) & 0xFF
if k == ord("p"):
break
# cleanup the camera and close any open windows
cap.release()
cv2.destroyAllWindows()
| 31.070175 | 82 | 0.499529 |
a1be04a80f83b1938545b09a34c0a9a1cda47ace | 1,285 | py | Python | server/newsWebsite/models.py | thiagobrez/newsWebsite | 130f01d29dd776eaa096080982274bb27d19ad8f | [
"MIT"
] | null | null | null | server/newsWebsite/models.py | thiagobrez/newsWebsite | 130f01d29dd776eaa096080982274bb27d19ad8f | [
"MIT"
] | 7 | 2020-09-07T18:44:00.000Z | 2022-02-10T19:05:41.000Z | server/newsWebsite/models.py | thiagobrez/newsWebsite | 130f01d29dd776eaa096080982274bb27d19ad8f | [
"MIT"
] | null | null | null | from django.db import models
| 31.341463 | 131 | 0.737743 |
a1be89c5fd04670493098c48a1472acc032f85c5 | 319 | py | Python | Python for Everybody/Using Python to Access Web Data/Assignments/Regular Expression/Finding_Numbers_in_a_Haystack.py | lynnxlmiao/Coursera | 8dc4073e29429dac14998689814388ee84435824 | [
"MIT"
] | null | null | null | Python for Everybody/Using Python to Access Web Data/Assignments/Regular Expression/Finding_Numbers_in_a_Haystack.py | lynnxlmiao/Coursera | 8dc4073e29429dac14998689814388ee84435824 | [
"MIT"
] | null | null | null | Python for Everybody/Using Python to Access Web Data/Assignments/Regular Expression/Finding_Numbers_in_a_Haystack.py | lynnxlmiao/Coursera | 8dc4073e29429dac14998689814388ee84435824 | [
"MIT"
] | null | null | null | import re
data = open('regex_sum_46353.txt')
numlist = list()
for line in data:
line = line.rstrip()
integers = re.findall('[0-9]+', line)
if len(integers) < 1: continue
for i in range(len(integers)):
num = float(integers[i])
numlist.append(num)
num_sum = sum(numlist)
print (num_sum)
| 21.266667 | 41 | 0.630094 |
a1be9584512b198578c74cac68370142c4a6feeb | 121 | py | Python | tuinwolk/server/daemons/tuinwolk_daemon.py | TuinfeesT/TuinWolk | 0af0321948f4f573d8eb5ad1b87ea42bfa6644e1 | [
"MIT"
] | 1 | 2017-09-08T02:34:22.000Z | 2017-09-08T02:34:22.000Z | tuinwolk/server/daemons/tuinwolk_daemon.py | TuinfeesT/TuinWolk | 0af0321948f4f573d8eb5ad1b87ea42bfa6644e1 | [
"MIT"
] | null | null | null | tuinwolk/server/daemons/tuinwolk_daemon.py | TuinfeesT/TuinWolk | 0af0321948f4f573d8eb5ad1b87ea42bfa6644e1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import daemon
| 13.444444 | 36 | 0.719008 |
a1bec1b04d0a00857461f68a4976f6de5f19b088 | 7,205 | py | Python | plugins/mobile_app.py | alustig/OSPi | d3cb0d70d19359daba1265dcb3bf09e87847d214 | [
"CC-BY-3.0"
] | null | null | null | plugins/mobile_app.py | alustig/OSPi | d3cb0d70d19359daba1265dcb3bf09e87847d214 | [
"CC-BY-3.0"
] | null | null | null | plugins/mobile_app.py | alustig/OSPi | d3cb0d70d19359daba1265dcb3bf09e87847d214 | [
"CC-BY-3.0"
] | null | null | null | import json
import time
import datetime
import string
import calendar
from helpers import get_cpu_temp, check_login, password_hash
import web
import gv # Gain access to ospi's settings
from urls import urls # Gain access to ospi's URL list
from webpages import ProtectedPage, WebPage
##############
## New URLs ##
urls.extend([
'/jo', 'plugins.mobile_app.options',
'/jc', 'plugins.mobile_app.cur_settings',
'/js', 'plugins.mobile_app.station_state',
'/jp', 'plugins.mobile_app.program_info',
'/jn', 'plugins.mobile_app.station_info',
'/jl', 'plugins.mobile_app.get_logs',
'/sp', 'plugins.mobile_app.set_password'])
#######################
## Class definitions ##
def utc_to_local(utc_dt):
# get integer timestamp to avoid precision lost
timestamp = calendar.timegm(utc_dt.timetuple())
local_dt = datetime.datetime.fromtimestamp(timestamp)
assert utc_dt.resolution >= datetime.timedelta(microseconds=1)
return local_dt.replace(microsecond=utc_dt.microsecond)
| 33.511628 | 158 | 0.498959 |
a1beca2a104dc1445d55be605545d5222ed38310 | 4,427 | py | Python | utils/iroha.py | LiTrans/BSMD | 2a5660de5a4a5d49d24df4c78469b55f2be5a2d3 | [
"Apache-2.0"
] | 1 | 2021-02-09T16:11:10.000Z | 2021-02-09T16:11:10.000Z | utils/iroha.py | LiTrans/BSMD | 2a5660de5a4a5d49d24df4c78469b55f2be5a2d3 | [
"Apache-2.0"
] | 13 | 2019-11-20T17:23:41.000Z | 2022-03-12T00:47:53.000Z | utils/iroha.py | LiTrans/BSMD | 2a5660de5a4a5d49d24df4c78469b55f2be5a2d3 | [
"Apache-2.0"
] | 1 | 2020-01-20T04:18:08.000Z | 2020-01-20T04:18:08.000Z | """
.. _Iroha:
Iroha
=====
Functions to post transactions in the iroha implementation of the BSMD
"""
from iroha import IrohaCrypto, Iroha, IrohaGrpc
import binascii
import sys
if sys.version_info[0] < 3:
raise Exception('Python 3 or a more recent version is required.')
# Transactions request iroha
def trace(func):
"""
A decorator for tracing methods' begin/end execution points
"""
return tracer
# #################################
# functions available to all users
# #################################
def set_detail_to_node(sender, receiver, private_key, detail_key, detail_value, domain, ip):
"""
This function can be use when the User object is no available. The sender must have permission to write in the
details of the receiver.
In federated learning the details are in JSON format and contains the address (location) where the weight is stored
if the weight is small enough it can be embedded to the block if needed)
:Example:
>>> set_detail_to_node('David', 'Juan', 'private key of david', 'detail key of Juan', 'detail value', 'domain' \
'ip')
:param str sender: Name of the node sending the information
:param str receiver: Name of the node receiving the information
:param str private_key: Private key of the user
:param str detail_key: Name of the detail we want to set
:param str detail_value: Value of the detail
:param str domain: Name of the domain
:param str ip: address for connecting to the BSMD
"""
account = sender + '@' + domain
iroha = Iroha(account)
account_id = receiver + '@' + domain
ip_address = ip + ':50051'
network = IrohaGrpc(ip_address)
tx = iroha.transaction([
iroha.command('SetAccountDetail',
account_id=account_id,
key=detail_key,
value=detail_value)
])
IrohaCrypto.sign_transaction(tx, private_key)
send_transaction_and_print_status(tx, network)
def get_a_detail_written_by(name, writer, private_key, detail_key, domain, ip):
"""
This function can be use when the User object is no available. Consult a details of the node writen by other node
:Example:
>>> juan_detail = get_a_detail_written_by('David', 'Juan', 'private key of david', 'detail_key of Juan', 'domain', \
'ip')
>>> print(juan_detail)
{
"nodeA@domain":{
"Age":"35"
}
:param str name: Name of the node consulting the information
:param str writer: Name of the node who write the detail
:param str private_key: Private key of the user
:param str detail_key: Name of the detail we want to consult
:param str domain: Name of the domain
:param str ip: Address for connecting to the BSMD
:return: returns the detail writen by "the writer"
:rtype: json
"""
account_id = name + '@' + domain
user_id = writer + '@' + domain
iroha = Iroha(account_id)
ip_address = ip + ':50051'
network = IrohaGrpc(ip_address)
query = iroha.query('GetAccountDetail',
account_id=account_id,
key=detail_key,
writer=user_id)
IrohaCrypto.sign_query(query, private_key)
response = network.send_query(query)
data = response.account_detail_response
print('Account id = {}, details = {}'.format(account_id, data.detail))
return data.detail
| 35.416 | 120 | 0.664558 |
a1bee1ce9e04568e61c5f5c3e54c374e370eb72e | 1,068 | py | Python | tibanna_cgap/lambdas/start_run.py | 4dn-dcic/tibanna_ff | 6fcfc056b832c14500e525207afeb5722f366a26 | [
"MIT"
] | 2 | 2019-10-08T17:36:02.000Z | 2019-10-08T18:42:05.000Z | tibanna_cgap/lambdas/start_run.py | 4dn-dcic/tibanna_ff | 6fcfc056b832c14500e525207afeb5722f366a26 | [
"MIT"
] | null | null | null | tibanna_cgap/lambdas/start_run.py | 4dn-dcic/tibanna_ff | 6fcfc056b832c14500e525207afeb5722f366a26 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# import json
from tibanna_ffcommon.exceptions import exception_coordinator
from tibanna_cgap.start_run import start_run
from tibanna_cgap.vars import AWS_REGION, LAMBDA_TYPE
config = {
'function_name': 'start_run_' + LAMBDA_TYPE,
'function_module': 'service',
'function_handler': 'handler',
'handler': 'service.handler',
'region': AWS_REGION,
'runtime': 'python3.6',
'role': 'lambda_full_s3',
'description': 'Tibanna zebra start_run',
'timeout': 300,
'memory_size': 256
}
| 28.105263 | 82 | 0.713483 |
a1bf1dc46f3a24ddc127c89f233fb631f8cdaefb | 3,474 | py | Python | Amplo/Observation/_model_observer.py | Amplo-GmbH/AutoML | eb6cc83b6e4a3ddc7c3553e9c41d236e8b48c606 | [
"MIT"
] | 5 | 2022-01-07T13:34:37.000Z | 2022-03-17T06:40:28.000Z | Amplo/Observation/_model_observer.py | Amplo-GmbH/AutoML | eb6cc83b6e4a3ddc7c3553e9c41d236e8b48c606 | [
"MIT"
] | 5 | 2022-03-22T13:42:22.000Z | 2022-03-31T16:20:44.000Z | Amplo/Observation/_model_observer.py | Amplo-GmbH/AutoML | eb6cc83b6e4a3ddc7c3553e9c41d236e8b48c606 | [
"MIT"
] | 1 | 2021-12-17T22:41:11.000Z | 2021-12-17T22:41:11.000Z | # Copyright by Amplo
"""
Observer for checking production readiness of model.
This part of code is strongly inspired by [1].
References
----------
[1] E. Breck, C. Shanging, E. Nielsen, M. Salib, D. Sculley (2017).
The ML test score: A rubric for ML production readiness and technical debt
reduction. 1123-1132. 10.1109/BigData.2017.8258038.
"""
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from Amplo.Observation.base import PipelineObserver
from Amplo.Observation.base import _report_obs
__all__ = ["ModelObserver"]
| 36.957447 | 78 | 0.670409 |
a1c01c9ff8dac8f635383495ea6d6042923c0487 | 2,849 | py | Python | mini projects/school_manager.py | Tryst480/python-tutorial | 056803f185b9cf31235fdfc221a3a490c353cd70 | [
"MIT"
] | null | null | null | mini projects/school_manager.py | Tryst480/python-tutorial | 056803f185b9cf31235fdfc221a3a490c353cd70 | [
"MIT"
] | null | null | null | mini projects/school_manager.py | Tryst480/python-tutorial | 056803f185b9cf31235fdfc221a3a490c353cd70 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# This is gonna be up to you. But basically I envisioned a system where you have a students in a classroom. Where the
# classroom only has information, like who is the teacher, how many students are there. And it's like an online class,
# so students don't know who their peers are, or who their teacher is, but can do things like study, and take test and
# stuff. Etc. But get used to how objects interact with each other and try to call stuff from other places while being
# commanded all in main():
if __name__ == '__main__':
classroom = ClassRoom()
teacher = Teacher('Doctor Jones')
mike = Student('Mike')
sally = Student('Sally', laziness=1)
lebron = Student('Lebron', laziness=10)
# TODO: Assign a teacher to the classroom and add the students to the classroom. Then make the students study
# TODO: Make Students to homework, etc, exams, then pass or fail them, etc. Play around with it.
| 36.525641 | 119 | 0.67708 |
a1c0267af0e6d173981f4b35aa1b64d0f75f58d2 | 1,650 | py | Python | hparams.py | ishine/EmotionControllableTextToSpeech | 5dcf8afe6a0c1b8d612d6f1d8de315cf419fe594 | [
"MIT"
] | 12 | 2021-07-10T05:18:31.000Z | 2022-03-22T01:04:41.000Z | hparams.py | ishine/EmotionControllableTextToSpeech | 5dcf8afe6a0c1b8d612d6f1d8de315cf419fe594 | [
"MIT"
] | null | null | null | hparams.py | ishine/EmotionControllableTextToSpeech | 5dcf8afe6a0c1b8d612d6f1d8de315cf419fe594 | [
"MIT"
] | 3 | 2021-06-12T05:34:41.000Z | 2022-03-15T06:44:55.000Z | import os
cleaners = 'korean_cleaners'
audio_data_path = os.path.join("/cb_im/datasets/", dataset)
data_path = '/home/prml/hs_oh/dataset/emotion_korea/'
duration_path = "/home/prml/jihyun/dataset/duration_all/duration"
strength_path = "/home/prml/hs_oh/dataset/emotion_strength"
# Text
text_cleaners = ['korean_cleaners']
# Audio and mel
### Emotion Korea ###
sampling_rate = 22050
filter_length = 1024
hop_length = 256
win_length = 1024
max_wav_value = 32768.0
n_mel_channels = 80
mel_fmin = 0
mel_fmax = 8000
f0_min = 71.0
f0_max = 792.8
energy_min = 0.0
energy_max = 283.72
# FastSpeech 2
encoder_layer = 4
encoder_head = 2
encoder_hidden = 256
decoder_layer = 4
decoder_head = 2
decoder_hidden = 256
fft_conv1d_filter_size = 1024
fft_conv1d_kernel_size = (9, 1)
encoder_dropout = 0.2
decoder_dropout = 0.2
variance_predictor_filter_size = 256
variance_predictor_kernel_size = 3
variance_predictor_dropout = 0.5
max_seq_len = 10000
# Checkpoints and synthesis path
preprocessed_path = os.path.join("/home/prml/hs_oh/dataset/", "emotion_korea")
checkpoint_path = os.path.join("/home/prml/hs_oh/checkpoints/FastSpeech2/", "cp")
eval_path = os.path.join("/home/prml/hs_oh/checkpoints/FastSpeech2/", "eval")
log_path = os.path.join("/home/prml/hs_oh/checkpoints/FastSpeech2/", "log")
test_path = os.path.join("/home/prml/hs_oh/checkpoints/FastSpeech2/", "test")
# Optimizer
batch_size = 48
epochs = 1000
n_warm_up_step = 4000
grad_clip_thresh = 1.0
acc_steps = 1
betas = (0.9, 0.98)
eps = 1e-9
weight_decay = 0.
total_step = 100000
# Save, log and synthesis
save_step = 5000
eval_step = 500
eval_size = 256
log_step = 10
clear_Time = 20
| 22.297297 | 81 | 0.758788 |
a1c0825b266bca976c211fbcfde48bbcb725afd2 | 1,083 | py | Python | run_tests.py | dannybrowne86/django-ajax-uploader | 741213e38e9532dd83d8040af17169da9d610660 | [
"BSD-3-Clause"
] | 75 | 2015-02-09T22:49:57.000Z | 2021-01-31T23:47:39.000Z | run_tests.py | dannybrowne86/django-ajax-uploader | 741213e38e9532dd83d8040af17169da9d610660 | [
"BSD-3-Clause"
] | 13 | 2015-02-27T03:01:30.000Z | 2020-11-18T10:11:53.000Z | run_tests.py | dannybrowne86/django-ajax-uploader | 741213e38e9532dd83d8040af17169da9d610660 | [
"BSD-3-Clause"
] | 29 | 2015-02-09T22:50:16.000Z | 2019-12-25T06:41:43.000Z | # from https://github.com/django-extensions/django-extensions/blob/master/run_tests.py
from django.conf import settings
from django.core.management import call_command
if __name__ == '__main__':
main()
| 29.27027 | 86 | 0.600185 |
a1c0c279a861dff85fe4f00eb7ae86cd441ba20b | 7,275 | py | Python | shor.py | rodamber/cps | b78aa7756d24b91476f31b538f51508e2dee48b3 | [
"MIT"
] | null | null | null | shor.py | rodamber/cps | b78aa7756d24b91476f31b538f51508e2dee48b3 | [
"MIT"
] | null | null | null | shor.py | rodamber/cps | b78aa7756d24b91476f31b538f51508e2dee48b3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""Simulation of Shor's algorithm for integer factorization."""
import cmath
import math
import numpy as np
import random
def hadamard(mem):
"""Apply the Hadamard gate to the first t qubits. After this
application, the memory is in a quantum superposition where the
measuring probability is equidistributed between the first t qubits."""
for i, (_, fst, lst) in enumerate(mem):
if lst == 0: # The last n qubits remain in state |0>
mem.amplitudes[i] = 1 / math.sqrt(2**mem.t)
return mem
def mod_exp(mem, x, N):
"""Apply the operator |j, k> |-> |j, k + x^j mod N>. However, in Shor's
algorithm k = 0, so we just apply the modular exponentiation."""
for i, (_, fst, lst) in enumerate(mem):
mem.lst[i] = pow(x, fst, N)
return mem
def qft(mem):
"""Apply quantum Fourier transform to the first t qubits."""
new_amplitudes = []
N = 2**mem.t
# Calculate root of unity in two steps, as complex exponentiation is
# expensive.
w__ = cmath.exp(2 * math.pi * 1j / N)
for k, _ in enumerate(mem):
s = 0
for j in range(N):
wjk = w__**(j * k)
s += wjk * mem.amplitudes[j]
new_amplitudes.append(s / math.sqrt(N))
mem.amplitudes = new_amplitudes
return mem
def denominator(x, qmax):
"""Finds the denominator q of the best rational approximation p/q for x
with q < qmax."""
y = x
q0, q1, q2 = 0, 1, 0
while True:
z = y - math.floor(y) # decimal part of y
if z < 0.5 / qmax**2:
return q1
y = 1 / z
q2 = math.floor(y) * q1 + q0
if q2 >= qmax:
return q1
q0, q1 = q1, q2
def shor(N, a):
"""Simulation of Shor's algorithm for order finding."""
assert 1 < a < N
while True:
n = N.bit_length()
t = math.ceil(math.log(N**2, 2)) # s.t. N^2 <= 2^t < 2N^2
mem = QuMem(t, n)
hadamard(mem)
mod_exp(mem, a, N)
qft(mem)
measure = mem.measure()
if measure == 0:
print("| measured zero, trying again ...")
else:
c = measure / 2**t
q = denominator(c, N)
p = math.floor(q * c + 0.5)
print("| measured {}, approximation for {} is {}/{}"
.format(measure, c, p, q))
mod = pow(a, q, N)
print("| {}^{} mod {} = {}".format(a, q, N, mod))
if mod == 1:
print("| got {}".format(q))
return q
else:
print("| failed, trying again ...")
def prime(n):
"""Primality test by trial division."""
if n == 2:
return True
elif n < 2 or n % 2 == 0:
return False
else:
return not any(n % x == 0
for x in range(3, math.ceil(math.sqrt(n)) + 1, 2))
def odd_prime_power(n):
"""Test if n is a power of an odd prime."""
if n < 3:
return False
factor = 0
for i in range(3, math.ceil(math.sqrt(n)) + 1, 2):
if n % i == 0:
factor = i
break
if factor == 0:
return False
for i in range(2, math.ceil(math.log(n, factor)) + 1):
if factor**i == n:
return True
return False
def factorize(N):
"""Applies Shor's algorithm to the problem of integer factorization."""
assert N > 1
if N % 2 == 0:
print(N, "is even")
elif prime(N):
print(N, "is prime")
elif odd_prime_power(N):
print(N, "is a power of an odd prime")
else:
while True:
a = random.randint(2, N - 1)
d = math.gcd(a, N)
print("| picked random a =", a)
if d != 1:
print("| got lucky, {} = {} * {}, trying again...".format(
N, d, N // d))
print("|---------------------------------------------")
else:
r = shor(N, a)
if r is None:
print("| trying again ...")
print("|-----------------------------------------------")
continue
y = r // 2
if r % 2 == 1:
print("| order {} is odd, trying again ...".format(r))
print("|-----------------------------------------------")
elif not 1 < y < N - 1:
print("| 1 < {} < {} - 1 is false, trying again".format(
y, N))
print("|-----------------------------------------------")
else:
factor = max(math.gcd(y - 1, N), math.gcd(y + 1, N))
if factor == 1:
print("| factor is one, trying again ...")
print("|---------------------------------------------")
else:
print("| found factor: {} = {} * {}".format(
N, factor, N // factor))
return factor
if __name__ == '__main__':
import sys
if len(sys.argv) < 2:
print("USAGE: shor.py <input>")
else:
print(factorize(int(sys.argv[1])))
| 30.567227 | 79 | 0.479175 |
a1c2d77e61f6bdb0c438878369cd53216104adca | 365 | py | Python | Mundo2/lerSexo.py | DanieleMagalhaes/Exercicios-Python | 394c68e8f06a10ec16539addd888960d11d1318f | [
"MIT"
] | null | null | null | Mundo2/lerSexo.py | DanieleMagalhaes/Exercicios-Python | 394c68e8f06a10ec16539addd888960d11d1318f | [
"MIT"
] | null | null | null | Mundo2/lerSexo.py | DanieleMagalhaes/Exercicios-Python | 394c68e8f06a10ec16539addd888960d11d1318f | [
"MIT"
] | null | null | null | print('-'*60)
print('\33[35m[ F ] Feminino\33[m \n\33[32m[ M ] Masculino\33[m \n ')
sexo = str(input('Qual o seu sexo? ')).strip().upper()[0] # s pega a primeira letra
while sexo not in 'MF':
sexo = str(input('\33[31mDados invlidos.\33[m Por favor, informe seu sexo: ')).strip().upper()[0]
print('\nSexo {} registrado com sucesso!'.format(sexo))
print('-'*60) | 52.142857 | 102 | 0.641096 |
a1c39f0658624fc259de69a62271fcd6a8ae59fa | 2,858 | py | Python | src/wordmain.py | keyurmodh00/SimpleHTR | 8031ae481d396714f555bcc0c4cbb23846404a1f | [
"MIT"
] | null | null | null | src/wordmain.py | keyurmodh00/SimpleHTR | 8031ae481d396714f555bcc0c4cbb23846404a1f | [
"MIT"
] | null | null | null | src/wordmain.py | keyurmodh00/SimpleHTR | 8031ae481d396714f555bcc0c4cbb23846404a1f | [
"MIT"
] | null | null | null | import os
import cv2
from WordSegmentation import wordSegmentation, prepareImg
import json
import editdistance
from path import Path
from DataLoaderIAM import DataLoaderIAM, Batch
from Model import Model, DecoderType
from SamplePreprocessor import preprocess
import argparse
import tensorflow as tf
def infer(model, fnImg):
"recognize text in image provided by file path"
img = preprocess(cv2.imread(fnImg, cv2.IMREAD_GRAYSCALE), Model.imgSize)
batch = Batch(None, [img])
(recognized, probability) = model.inferBatch(batch, True)
print(f'Recognized: "{recognized[0]}"')
print(f'Probability: {probability[0]}')
apex=open("D:/SimpleHTR/data/output.txt","a")
apex.write(recognized[0]+" ")
apex.close()
def main():
"""reads images from data/ and outputs the word-segmentation to out/"""
# read input images from 'in' directory
imgFiles = os.listdir('D:/SimpleHTR/input/')
for (i,f) in enumerate(imgFiles):
print('Segmenting words of sample %s'%f)
# read image, prepare it by resizing it to fixed height and converting it to grayscale
img = prepareImg(cv2.imread('D:/SimpleHTR/input/%s'%f), 50)
# execute segmentation with given parameters
# -kernelSize: size of filter kernel (odd integer)
# -sigma: standard deviation of Gaussian function used for filter kernel
# -theta: approximated width/height ratio of words, filter function is distorted by this factor
# - minArea: ignore word candidates smaller than specified area
res = wordSegmentation(img, kernelSize=25, sigma=11, theta=7, minArea=100)
# write output to 'out/inputFileName' directory
'''if not os.path.exists('D:/SimpleHTR/out/%s'%f):
os.mkdir('D:/SimpleHTR/out/%s'%f)'''
# iterate over all segmented words
print('Segmented into %d words'%len(res))
for (j, w) in enumerate(res):
(wordBox, wordImg) = w
(x, y, w, h) = wordBox
cv2.imwrite('D:/SimpleHTR/data/test.png', wordImg) # save word
cv2.rectangle(img,(x,y),(x+w,y+h),0,1) # draw bounding box in summary image
os.path.join(os.path.dirname('D:/SimpleHTR/src/main.py'))
tf.compat.v1.reset_default_graph()
exec(open('main.py').read())
# output summary image with bounding boxes around words
cv2.imwrite('D:/SimpleHTR/data/summary.png', img)
apex = open("D:/SimpleHTR/data/output.txt","a")
apex.write("\n")
apex.close()
if __name__ == '__main__':
main() | 39.150685 | 103 | 0.651854 |
a1c3f7d64e7c7bb239f38c4ddad996fb0bfe247f | 4,746 | py | Python | asrtoolkit/data_structures/audio_file.py | greenkeytech/greenkey-asrtoolkit | f9a5990ee5c67b85dd8ff763777c986b03252ee5 | [
"Apache-2.0"
] | 31 | 2019-08-03T08:42:37.000Z | 2022-01-12T18:00:11.000Z | asrtoolkit/data_structures/audio_file.py | greenkeytech/greenkey-asrtoolkit | f9a5990ee5c67b85dd8ff763777c986b03252ee5 | [
"Apache-2.0"
] | 28 | 2019-07-29T17:58:17.000Z | 2021-08-20T14:30:25.000Z | asrtoolkit/data_structures/audio_file.py | greenkeytech/greenkey-asrtoolkit | f9a5990ee5c67b85dd8ff763777c986b03252ee5 | [
"Apache-2.0"
] | 12 | 2019-07-29T13:16:41.000Z | 2022-02-20T21:19:35.000Z | #!/usr/bin/env python
"""
Module for holding information about an audio file and doing basic conversions
"""
import hashlib
import logging
import os
import subprocess
from asrtoolkit.file_utils.name_cleaners import (
generate_segmented_file_name,
sanitize_hyphens,
strip_extension,
)
from asrtoolkit.file_utils.script_input_validation import valid_input_file
LOGGER = logging.getLogger()
def cut_utterance(
source_audio_file, target_audio_file, start_time, end_time, sample_rate=16000
):
"""
source_audio_file: str, path to file
target_audio_file: str, path to file
start_time: float or str
end_time: float or str
sample_rate: int, default 16000; audio sample rate in Hz
uses sox to segment source_audio_file to create target_audio_file that
contains audio from start_time to end_time
with audio sample rate set to sample_rate
"""
subprocess.call(
"sox -V1 {} -r {} -b 16 -c 1 {} trim {} ={}".format(
source_audio_file,
sample_rate,
target_audio_file,
start_time,
end_time,
),
shell=True,
)
def degrade_audio(source_audio_file, target_audio_file=None):
"""
Degrades audio to typical G711 level.
Useful if models need to target this audio quality.
"""
valid_input_file(source_audio_file, ["mp3", "sph", "wav", "au", "raw"])
target_audio_file = (
source_audio_file if target_audio_file is None else target_audio_file
)
# degrade to 8k
tmp1 = ".".join(source_audio_file.split(".")[:-1]) + "_tmp1.wav"
subprocess.call(
"sox -V1 {} -r 8000 -e a-law {}".format(source_audio_file, tmp1),
shell=True,
)
# convert to u-law
tmp2 = ".".join(source_audio_file.split(".")[:-1]) + "_tmp2.wav"
subprocess.call(
"sox -V1 {} --rate 8000 -e u-law {}".format(tmp1, tmp2),
shell=True,
)
# upgrade to 16k a-law signed
subprocess.call(
"sox -V1 {} --rate 16000 -e signed -b 16 --channel 1 {}".format(
tmp2, target_audio_file
),
shell=True,
)
os.remove(tmp1)
os.remove(tmp2)
def combine_audio(audio_files, output_file, gain=False):
"""
Combine audio files with possible renormalization to 0dB
"""
gain_str = ""
if gain:
gain_str = "gain -n 0"
subprocess.call(
"sox -V1 -m {} {} {}".format(" ".join(audio_files), output_file, gain_str),
shell=True,
)
| 27.917647 | 83 | 0.596292 |
a1c400c5158580105326cc3e84bbb5b7fc61477c | 574 | py | Python | forms.py | qqalexqq/monkeys | df9a43adbda78da1f2ab1cc4c27819da4225d2e5 | [
"MIT"
] | null | null | null | forms.py | qqalexqq/monkeys | df9a43adbda78da1f2ab1cc4c27819da4225d2e5 | [
"MIT"
] | null | null | null | forms.py | qqalexqq/monkeys | df9a43adbda78da1f2ab1cc4c27819da4225d2e5 | [
"MIT"
] | null | null | null | from flask.ext.wtf import Form
from wtforms import (
TextField, IntegerField, HiddenField, SubmitField, validators
)
| 27.333333 | 76 | 0.656794 |
a1c42f46fbea71221d404268be15bf4dbded43e9 | 7,008 | py | Python | src/modules/model/getPretrained.py | sakimilo/transferLearning | 6d5c1e878bf91a34d32add81d4a2a57091946ed3 | [
"MIT"
] | null | null | null | src/modules/model/getPretrained.py | sakimilo/transferLearning | 6d5c1e878bf91a34d32add81d4a2a57091946ed3 | [
"MIT"
] | 8 | 2020-03-24T17:05:21.000Z | 2022-01-13T01:15:54.000Z | src/modules/model/getPretrained.py | sakimilo/transferLearning | 6d5c1e878bf91a34d32add81d4a2a57091946ed3 | [
"MIT"
] | null | null | null | import os
import shutil
import tensorflow as tf
from tensorflow import keras
from logs import logDecorator as lD
import jsonref
import numpy as np
import pickle
import warnings
from tqdm import tqdm
from modules.data import getData
config = jsonref.load(open('../config/config.json'))
logBase = config['logging']['logBase'] + '.modules.model.getPretrained'
### turn off tensorflow info/warning/error or all python warnings
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
warnings.filterwarnings("ignore")
if __name__ == '__main__':
print('tf.__version__ :', tf.__version__)
print('keras.__version__:', keras.__version__) | 37.079365 | 143 | 0.639555 |
a1c4c531f5d93b7c66d5df5fb932a485d12b518b | 492 | py | Python | Python/CountingBits.py | Jspsun/LEETCodePractice | 9dba8c0441201a188b93e4d39a0a9b7602857a5f | [
"MIT"
] | 3 | 2017-10-14T19:49:28.000Z | 2019-01-12T21:51:11.000Z | Python/CountingBits.py | Jspsun/LEETCodePractice | 9dba8c0441201a188b93e4d39a0a9b7602857a5f | [
"MIT"
] | null | null | null | Python/CountingBits.py | Jspsun/LEETCodePractice | 9dba8c0441201a188b93e4d39a0a9b7602857a5f | [
"MIT"
] | 5 | 2017-02-06T19:10:23.000Z | 2020-12-19T01:58:10.000Z | import math
print (Solution().countBits(9)) | 24.6 | 57 | 0.463415 |
a1c56433fe8bc3861e49acb291c03048e0f30a43 | 363 | py | Python | ACM-Solution/4queen.py | wasi0013/Python-CodeBase | 4a7a36395162f68f84ded9085fa34cc7c9b19233 | [
"MIT"
] | 2 | 2016-04-26T15:40:40.000Z | 2018-07-18T10:16:42.000Z | ACM-Solution/4queen.py | wasi0013/Python-CodeBase | 4a7a36395162f68f84ded9085fa34cc7c9b19233 | [
"MIT"
] | 1 | 2016-04-26T15:44:15.000Z | 2016-04-29T14:44:40.000Z | ACM-Solution/4queen.py | wasi0013/Python-CodeBase | 4a7a36395162f68f84ded9085fa34cc7c9b19233 | [
"MIT"
] | 1 | 2018-10-02T16:12:19.000Z | 2018-10-02T16:12:19.000Z | #four queen problem bruteforce solution using permutation
from itertools import permutations
n = 8
cols = range(n)
for vec in permutations(cols):
if n == len(set(vec[i]+i for i in cols)) \
== len(set(vec[i]-i for i in cols)):
board(vec) | 33 | 78 | 0.570248 |
a1c5f16bf229bdace56e1e6f63c0ce9caaa232d9 | 10,362 | py | Python | View/pesquisa_produtos.py | felipezago/ControleEstoque | 229659c4f9888fd01df34375ec92af7a1f734d10 | [
"MIT"
] | null | null | null | View/pesquisa_produtos.py | felipezago/ControleEstoque | 229659c4f9888fd01df34375ec92af7a1f734d10 | [
"MIT"
] | null | null | null | View/pesquisa_produtos.py | felipezago/ControleEstoque | 229659c4f9888fd01df34375ec92af7a1f734d10 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'pesquisa_produtos.ui'
#
# Created by: PyQt5 View code generator 5.14.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
| 43.537815 | 102 | 0.687898 |
a1c62a23cf4d05075c2ce8fd742ceaebabdfcf8f | 7,826 | py | Python | zyc/zyc.py | Sizurka/zyc | 5ed4158617293a613b52cb6197ca601a1b491660 | [
"MIT"
] | null | null | null | zyc/zyc.py | Sizurka/zyc | 5ed4158617293a613b52cb6197ca601a1b491660 | [
"MIT"
] | null | null | null | zyc/zyc.py | Sizurka/zyc | 5ed4158617293a613b52cb6197ca601a1b491660 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# MIT license
#
# Copyright (C) 2019 by XESS Corp.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
GUI for finding/displaying parts and footprints.
"""
from __future__ import print_function
import os
import wx
from skidl import (
KICAD,
SchLib,
footprint_cache,
footprint_search_paths,
lib_search_paths,
skidl_cfg,
)
from .common import *
from .pckg_info import __version__
from .skidl_footprint_search import FootprintSearchPanel
from .skidl_part_search import PartSearchPanel
APP_TITLE = "zyc: SKiDL Part/Footprint Search"
APP_EXIT = 1
SHOW_HELP = 3
SHOW_ABOUT = 4
PART_SEARCH_PATH = 5
FOOTPRINT_SEARCH_PATH = 6
REFRESH = 7
def main():
# import wx.lib.inspection
app = wx.App()
AppFrame(None)
# wx.lib.inspection.InspectionTool().Show()
app.MainLoop()
if __name__ == "__main__":
main()
| 32.882353 | 109 | 0.662663 |
a1c6e9a43d6622094c50a6e5fb6886a83b2efa97 | 516 | py | Python | train/ip.py | VCG/gp | cd106b604f8670a70add469d41180e34df3b1068 | [
"MIT"
] | null | null | null | train/ip.py | VCG/gp | cd106b604f8670a70add469d41180e34df3b1068 | [
"MIT"
] | null | null | null | train/ip.py | VCG/gp | cd106b604f8670a70add469d41180e34df3b1068 | [
"MIT"
] | null | null | null | import cPickle as pickle
import os; import sys; sys.path.append('..')
import gp
import gp.nets as nets
PATCH_PATH = ('iplb')
X_train, y_train, X_test, y_test = gp.Patch.load_rgb(PATCH_PATH)
X_train = X_train[:,:-1,:,:]
X_test = X_test[:,:-1,:,:]
cnn = nets.RGNetPlus()
cnn = cnn.fit(X_train, y_train)
test_accuracy = cnn.score(X_test, y_test)
print test_accuracy
# store CNN
sys.setrecursionlimit(1000000000)
with open(os.path.expanduser('~/Projects/gp/nets/IP_FULL.p'), 'wb') as f:
pickle.dump(cnn, f, -1)
| 21.5 | 73 | 0.705426 |
a1c8a7137ea1d05162f631c75ad27f5dd11e2101 | 1,066 | py | Python | test/TestSourceMissing.py | falcon-org/Falcon | 113b47ea6eef6ebbaba91eca596ca89e211cad67 | [
"BSD-3-Clause"
] | null | null | null | test/TestSourceMissing.py | falcon-org/Falcon | 113b47ea6eef6ebbaba91eca596ca89e211cad67 | [
"BSD-3-Clause"
] | null | null | null | test/TestSourceMissing.py | falcon-org/Falcon | 113b47ea6eef6ebbaba91eca596ca89e211cad67 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# Check that falcon rebuilds an output if it is deleted.
import time
import os
makefile = '''
{
"rules":
[
{
"inputs": [ "source1", "source2" ],
"outputs": [ "output" ],
"cmd": "cat source1 > output && cat source2 >> output"
}
]
}
'''
| 21.755102 | 80 | 0.638837 |
a1c9ea67f9a8ebf42ecee72115e10b2677436a17 | 216 | py | Python | awesimsoss/__init__.py | spacetelescope/AWESim_SOSS | 75669276bd8ce22bc86d6845c771964ffec94d07 | [
"MIT"
] | 4 | 2019-12-17T19:04:25.000Z | 2020-09-22T15:53:09.000Z | awesimsoss/__init__.py | spacetelescope/awesimsoss | 75669276bd8ce22bc86d6845c771964ffec94d07 | [
"MIT"
] | 94 | 2018-10-17T18:03:57.000Z | 2021-03-01T07:34:21.000Z | awesimsoss/__init__.py | spacetelescope/awesimsoss | 75669276bd8ce22bc86d6845c771964ffec94d07 | [
"MIT"
] | 8 | 2018-10-17T20:45:49.000Z | 2021-04-14T11:41:41.000Z | # -*- coding: utf-8 -*-
"""Top-level package for awesimsoss."""
__author__ = """Joe Filippazzo"""
__email__ = 'jfilippazzo@stsci.edu'
__version__ = '0.3.5'
from .awesim import TSO, TestTSO, BlackbodyTSO, ModelTSO
| 21.6 | 56 | 0.689815 |
a1cbe0620d09eccc4613b82d60775050479f1c1b | 6,565 | py | Python | keyboards/inline/in_processing/keyboards_sum_ready.py | itcosplay/cryptobot | 6890cfde64a631bf0e4db55f6873a2217212d801 | [
"MIT"
] | null | null | null | keyboards/inline/in_processing/keyboards_sum_ready.py | itcosplay/cryptobot | 6890cfde64a631bf0e4db55f6873a2217212d801 | [
"MIT"
] | null | null | null | keyboards/inline/in_processing/keyboards_sum_ready.py | itcosplay/cryptobot | 6890cfde64a631bf0e4db55f6873a2217212d801 | [
"MIT"
] | null | null | null |
from data import all_emoji
from aiogram.types import InlineKeyboardMarkup, InlineKeyboardButton
from aiogram.utils.callback_data import CallbackData
from data import all_emoji
from utils.googlesheets import send_to_google
from utils.set_minus_and_plus_currences import set_minus_and_plus
from utils.get_minuses_sum_FGH import get_minus_FGH
from utils.get_values_FGH_MNO import get_plus_FGH
cb_what_sum = CallbackData('cb_ws', 'type_btn')
cb_choose_currency = CallbackData('anprix', 'curr', 'type_btn')
cb_what_sum_correct = CallbackData('cbwsc', 'curr', 'type_btn')
cb_sum_correct_chunk = CallbackData('cbscc', 'curr', 'type_btn') | 26.795918 | 90 | 0.493374 |
a1cc680c5d6f410a35524d1c6900493495131044 | 181 | py | Python | hw4/4.3.py | ArtemNikolaev/gb-hw | b82403e39dc1ca530dc438309fc98ba89ce4337b | [
"Unlicense"
] | null | null | null | hw4/4.3.py | ArtemNikolaev/gb-hw | b82403e39dc1ca530dc438309fc98ba89ce4337b | [
"Unlicense"
] | 40 | 2021-12-30T15:57:10.000Z | 2022-01-26T16:44:24.000Z | hw4/4.3.py | ArtemNikolaev/gb-hw | b82403e39dc1ca530dc438309fc98ba89ce4337b | [
"Unlicense"
] | 1 | 2022-03-12T19:17:26.000Z | 2022-03-12T19:17:26.000Z | # https://github.com/ArtemNikolaev/gb-hw/issues/24
print(list(multiple_of_20_21()))
| 22.625 | 68 | 0.662983 |
a1cd9d12331888d9263e120a221bcfaacd01d426 | 1,153 | py | Python | simulations/gamma_plot.py | austindavidbrown/Centered-Metropolis-Hastings | a96749a31ddcfbcaad081f6f9d2fb7ddcb55991f | [
"BSD-3-Clause"
] | null | null | null | simulations/gamma_plot.py | austindavidbrown/Centered-Metropolis-Hastings | a96749a31ddcfbcaad081f6f9d2fb7ddcb55991f | [
"BSD-3-Clause"
] | null | null | null | simulations/gamma_plot.py | austindavidbrown/Centered-Metropolis-Hastings | a96749a31ddcfbcaad081f6f9d2fb7ddcb55991f | [
"BSD-3-Clause"
] | null | null | null | """
ssh brow5079@compute.cla.umn.edu
#qsub -I -q gpu
qsub -I -l nodes=1:ppn=10
module load python/conda/3.7
source activate env
ipython
"""
from math import sqrt, pi, exp
import time
import torch
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
import seaborn as sns
linewidth = 4
alpha = .8
plt.clf()
plt.style.use("ggplot")
plt.figure(figsize=(10, 8))
iterations = torch.arange(0, 1000, 1)
gammas = [.5, 1, 1.5, 2, 2.5]
colors = sns.color_palette("tab10")
for i in range(0, len(gammas)):
gamma = gammas[i]
color = colors[i]
y = (1 - exp(-(1 + gamma**(1/2))**(2)))**(iterations)
plt.plot(iterations, y,
label = r"$\gamma$ = {}".format(gamma),
alpha = alpha,
color = color,
linewidth = linewidth)
plt.tick_params(axis='x', labelsize=20)
plt.tick_params(axis='y', labelsize=20)
plt.xlabel(r"Iterations", fontsize = 25, color="black")
plt.ylabel(r"Decrease in Wasserstein distance", fontsize = 25, color="black")
plt.legend(loc="best", fontsize=25, borderpad=.05, framealpha=0)
plt.savefig("decrease_plot.png", pad_inches=0, bbox_inches='tight',)
| 23.06 | 77 | 0.657415 |
a1cdf3d6b6757ac8b742a5871545ebfcd99aef04 | 13,761 | py | Python | hopper_controller/src/hexapod/folding_manager.py | CreedyNZ/Hopper_ROS | 1e6354109f034a7d1d41a5b39ddcb632cfee64b2 | [
"MIT"
] | 36 | 2018-12-19T18:03:08.000Z | 2022-02-21T16:20:12.000Z | hopper_controller/src/hexapod/folding_manager.py | CreedyNZ/Hopper_ROS | 1e6354109f034a7d1d41a5b39ddcb632cfee64b2 | [
"MIT"
] | null | null | null | hopper_controller/src/hexapod/folding_manager.py | CreedyNZ/Hopper_ROS | 1e6354109f034a7d1d41a5b39ddcb632cfee64b2 | [
"MIT"
] | 7 | 2019-08-11T20:31:27.000Z | 2021-09-19T04:34:18.000Z | import rospy
MOVE_CYCLE_PERIOD = 0.01
| 44.824104 | 128 | 0.637599 |
a1d0867a1669f7b83b98d82fdaa8c25a6b04cd98 | 2,237 | py | Python | Teil_57_12_Kugeln.py | chrMenzel/A-beautiful-code-in-Python | 92ee43c1fb03c299384d4de8bebb590c5ba1b623 | [
"MIT"
] | 50 | 2018-12-23T15:46:16.000Z | 2022-03-28T15:49:59.000Z | Teil_57_12_Kugeln.py | chrMenzel/A-beautiful-code-in-Python | 92ee43c1fb03c299384d4de8bebb590c5ba1b623 | [
"MIT"
] | 9 | 2018-12-03T10:31:29.000Z | 2022-01-20T14:41:33.000Z | Teil_57_12_Kugeln.py | chrMenzel/A-beautiful-code-in-Python | 92ee43c1fb03c299384d4de8bebb590c5ba1b623 | [
"MIT"
] | 69 | 2019-02-02T11:59:09.000Z | 2022-03-28T15:54:28.000Z | import random as rnd
from itertools import combinations
from time import perf_counter as pfc
start = pfc()
stati = {True: {'?': '+', '-': '='},
False: {'?': '-', '+': '='}}
anz_kugeln = 12
kugeln = [[nr, '?'] for nr in range(anz_kugeln)]
v1 = [0, 1, 2, 3, 4, 5, 6, 7]
v2m = [8, 9, 10, 0, 1, 2]
prfe_varianten(0)
print(f'{pfc()-start:.2f} Sek.')
| 28.679487 | 84 | 0.582924 |
a1d14e136fc6ab73bd62946ee36b52f8b5423c8b | 1,001 | py | Python | util/format_ldtk_battlers.py | Sipondo/ulix-dexflow | de46482fe08e3d600dd5da581f0524b55e5df961 | [
"MIT"
] | 5 | 2021-06-25T16:44:38.000Z | 2021-12-31T01:29:00.000Z | util/format_ldtk_battlers.py | Sipondo/ulix-dexflow | de46482fe08e3d600dd5da581f0524b55e5df961 | [
"MIT"
] | null | null | null | util/format_ldtk_battlers.py | Sipondo/ulix-dexflow | de46482fe08e3d600dd5da581f0524b55e5df961 | [
"MIT"
] | 1 | 2021-06-25T20:33:47.000Z | 2021-06-25T20:33:47.000Z | from pathlib import Path
import os
from PIL import Image, ImageFont, ImageDraw
import numpy as np
import pandas as pd
from math import *
p = Path("resources/graphics/Pokemon/Icons")
df = pd.read_csv(Path("resources/PBS/compressed/pokemon.csv"), index_col=0)
width = 64
height = ceil(len(df) / 64)
canvas = Image.new("RGBA", (width, height), "#00000000")
draw = ImageDraw.Draw(canvas)
for i, row in df.iterrows():
try:
img = (
Image.open(p / f"{row.internalname}.png")
.convert("RGBA")
.resize((64, 32), resample=Image.NEAREST)
.crop((0, 0, 32, 32))
)
canvas.alpha_composite(img, ((i % 64) * 32, (i // 64) * 32))
except Exception as e:
continue
canvas.save(Path("resources/graphics/generated/battler_ldtk_list.png"))
# for pth in p.glob("*.png"):
# img = (
# Image.open(pth)
# .convert("RGBA")
# .resize((64, 32), resample=Image.NEAREST)
# .crop((0, 0, 32, 32))
# )
| 25.025 | 75 | 0.592408 |