hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b2b9128938a7476610fbf31df937ff94978048ae | 1,514 | py | Python | tests/TestMetrics.py | gr33ndata/irlib | 4a518fec994b1a89cdc7d09a8170efec3d7e6615 | [
"MIT"
] | 80 | 2015-02-16T18:33:57.000Z | 2021-05-06T02:03:22.000Z | tests/TestMetrics.py | gr33ndata/irlib | 4a518fec994b1a89cdc7d09a8170efec3d7e6615 | [
"MIT"
] | 2 | 2016-02-05T06:30:21.000Z | 2017-09-24T17:42:58.000Z | tests/TestMetrics.py | gr33ndata/irlib | 4a518fec994b1a89cdc7d09a8170efec3d7e6615 | [
"MIT"
] | 25 | 2015-05-13T17:35:41.000Z | 2020-06-04T01:52:11.000Z | from unittest import TestCase
from irlib.metrics import Metrics
| 24.819672 | 47 | 0.515192 |
b2bb1c7a2af64e0803771a48f87683d4a4a1c0d2 | 50,483 | py | Python | cottonformation/res/lookoutmetrics.py | gitter-badger/cottonformation-project | 354f1dce7ea106e209af2d5d818b6033a27c193c | [
"BSD-2-Clause"
] | null | null | null | cottonformation/res/lookoutmetrics.py | gitter-badger/cottonformation-project | 354f1dce7ea106e209af2d5d818b6033a27c193c | [
"BSD-2-Clause"
] | null | null | null | cottonformation/res/lookoutmetrics.py | gitter-badger/cottonformation-project | 354f1dce7ea106e209af2d5d818b6033a27c193c | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
This module
"""
import attr
import typing
from ..core.model import (
Property, Resource, Tag, GetAtt, TypeHint, TypeCheck,
)
from ..core.constant import AttrMeta
#--- Property declaration ---
#--- Resource declaration ---
| 72.952312 | 244 | 0.792247 |
b2bb4d62eb2627e400fa61b892f36a5ac1c442b5 | 654 | py | Python | mlcomp/db/core/options.py | sUeharaE4/mlcomp | 1e0c23f84622ea4f72eb4e2354cef5ee16b767f4 | [
"Apache-2.0"
] | 166 | 2019-08-21T20:00:04.000Z | 2020-05-14T16:13:57.000Z | mlcomp/db/core/options.py | sUeharaE4/mlcomp | 1e0c23f84622ea4f72eb4e2354cef5ee16b767f4 | [
"Apache-2.0"
] | 14 | 2019-08-22T07:58:39.000Z | 2020-04-13T13:59:07.000Z | mlcomp/db/core/options.py | sUeharaE4/mlcomp | 1e0c23f84622ea4f72eb4e2354cef5ee16b767f4 | [
"Apache-2.0"
] | 22 | 2019-08-23T12:37:20.000Z | 2020-04-20T10:06:29.000Z |
__all__ = ['PaginatorOptions']
| 27.25 | 61 | 0.610092 |
b2bd1fc6f7777c13168c679b65bd978ef82ec6d2 | 164 | py | Python | pbxproj/pbxsections/PBXResourcesBuildPhase.py | JoliChen/mod-pbxproj | 24994416eec9cec838dce696c3cc9262c01ba883 | [
"MIT"
] | 1 | 2020-01-16T08:33:38.000Z | 2020-01-16T08:33:38.000Z | pbxproj/pbxsections/PBXResourcesBuildPhase.py | JoliChen/mod-pbxproj | 24994416eec9cec838dce696c3cc9262c01ba883 | [
"MIT"
] | null | null | null | pbxproj/pbxsections/PBXResourcesBuildPhase.py | JoliChen/mod-pbxproj | 24994416eec9cec838dce696c3cc9262c01ba883 | [
"MIT"
] | null | null | null | from pbxproj.pbxsections.PBXGenericBuildPhase import *
| 23.428571 | 54 | 0.786585 |
b2bd5b9242c3d57e4f9ef3633085d5a608db500a | 1,014 | py | Python | Week-4/points_and_segments.py | AbhiSaphire/Algorithmic-Toolbox | abc2b9f25b3c473b93b7d8905e7da0b38cd24062 | [
"MIT"
] | 3 | 2020-06-04T09:37:57.000Z | 2020-06-15T22:55:55.000Z | Week-4/points_and_segments.py | AbhiSaphire/Algorithmic-Toolbox | abc2b9f25b3c473b93b7d8905e7da0b38cd24062 | [
"MIT"
] | 1 | 2020-06-23T13:04:43.000Z | 2020-06-23T13:06:25.000Z | Week-4/points_and_segments.py | AbhiSaphire/Algorithmic-Toolbox | abc2b9f25b3c473b93b7d8905e7da0b38cd24062 | [
"MIT"
] | 1 | 2020-10-08T13:06:05.000Z | 2020-10-08T13:06:05.000Z | import sys
from itertools import chain
if __name__ == '__main__':
input = sys.stdin.read()
data = list(map(int, input.split()))
n = data[0]
m = data[1]
starts = data[2:2 * n + 2:2]
ends = data[3:2 * n + 2:2]
points = data[2 * n + 2:]
cnt = fast_count_segments(starts, ends, points)
for x in cnt:
print(x, end=' ') | 31.6875 | 71 | 0.580868 |
b2bda88384a662721955747a1c788333f427aa38 | 6,822 | py | Python | main.py | anurendra/Web_IE | 4ba95320fd46d3c6fc090f3f095c7c7de78453bb | [
"Apache-2.0"
] | null | null | null | main.py | anurendra/Web_IE | 4ba95320fd46d3c6fc090f3f095c7c7de78453bb | [
"Apache-2.0"
] | null | null | null | main.py | anurendra/Web_IE | 4ba95320fd46d3c6fc090f3f095c7c7de78453bb | [
"Apache-2.0"
] | null | null | null | import argparse
import numpy as np
import os
import random
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from datasets import custom_collate_fn, load_data, WebDataset
from models import WebObjExtractionNet
from train import train_model, evaluate_model
from utils import print_and_log
########## CMDLINE ARGS ##########
parser = argparse.ArgumentParser('Train Model')
parser.add_argument('-d', '--device', type=int, default=0)
parser.add_argument('-e', '--n_epochs', type=int, default=100)
parser.add_argument('-bb', '--backbone', type=str, default='alexnet', choices=['alexnet', 'resnet'])
parser.add_argument('-tc', '--trainable_convnet', type=int, default=1, choices=[0,1])
parser.add_argument('-lr', '--learning_rate', type=float, default=0.0005)
parser.add_argument('-bs', '--batch_size', type=int, default=25)
parser.add_argument('-cs', '--context_size', type=int, default=6)
parser.add_argument('-att', '--attention', type=int, default=1, choices=[0,1])
parser.add_argument('-hd', '--hidden_dim', type=int, default=300)
parser.add_argument('-r', '--roi', type=int, default=1)
parser.add_argument('-bbf', '--bbox_feat', type=int, default=1, choices=[0,1])
parser.add_argument('-wd', '--weight_decay', type=float, default=0)
parser.add_argument('-dp', '--drop_prob', type=float, default=0.5)
parser.add_argument('-mbb', '--max_bg_boxes', type=int, default=-1)
parser.add_argument('-nw', '--num_workers', type=int, default=8)
args = parser.parse_args()
device = torch.device('cuda:%d' % args.device if torch.cuda.is_available() else 'cpu')
########## MAKING RESULTS REPRODUCIBLE ##########
seed = 1
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
# torch.backends.cudnn.deterministic = True
# torch.backends.cudnn.benchmark = False
########## PARAMETERS ##########
N_CLASSES = 4
CLASS_NAMES = ['BG', 'Price', 'Title', 'Image']
IMG_HEIGHT = 1280 # Image assumed to have same height and width
EVAL_INTERVAL = 3 # Number of Epochs after which model is evaluated
NUM_WORKERS = args.num_workers # multithreaded data loading
DATA_DIR = '/shared/data_product_info/v2_8.3k/' # Contains .png and .pkl files for train and test data
OUTPUT_DIR = 'results_attn' # logs are saved here!
# NOTE: if same hyperparameter configuration is run again, previous log file and saved model will be overwritten
if not os.path.exists(OUTPUT_DIR):
os.makedirs(OUTPUT_DIR)
SPLIT_DIR = 'splits'
train_img_ids = np.loadtxt('%s/train_imgs.txt' % SPLIT_DIR, dtype=np.int32)
val_img_ids = np.loadtxt('%s/val_imgs.txt' % SPLIT_DIR, dtype=np.int32)
test_img_ids = np.loadtxt('%s/test_imgs.txt' % SPLIT_DIR, dtype=np.int32)
test_domains = np.loadtxt('%s/test_domains.txt' % SPLIT_DIR, dtype=str) # for calculating macro accuracy
########## HYPERPARAMETERS ##########
N_EPOCHS = args.n_epochs
BACKBONE = args.backbone
TRAINABLE_CONVNET = bool(args.trainable_convnet)
LEARNING_RATE = args.learning_rate
BATCH_SIZE = args.batch_size
CONTEXT_SIZE = args.context_size
USE_ATTENTION = bool(args.attention)
HIDDEN_DIM = args.hidden_dim
ROI_POOL_OUTPUT_SIZE = (args.roi, args.roi)
USE_BBOX_FEAT = bool(args.bbox_feat)
WEIGHT_DECAY = args.weight_decay
DROP_PROB = args.drop_prob
MAX_BG_BOXES = args.max_bg_boxes if args.max_bg_boxes > 0 else -1
params = '%s lr-%.0e batch-%d cs-%d att-%d hd-%d roi-%d bbf-%d wd-%.0e dp-%.2f mbb-%d' % (BACKBONE, LEARNING_RATE, BATCH_SIZE, CONTEXT_SIZE, USE_ATTENTION,
HIDDEN_DIM, ROI_POOL_OUTPUT_SIZE[0], USE_BBOX_FEAT, WEIGHT_DECAY, DROP_PROB, MAX_BG_BOXES)
log_file = '%s/%s logs.txt' % (OUTPUT_DIR, params)
test_acc_domainwise_file = '%s/%s test_acc_domainwise.csv' % (OUTPUT_DIR, params)
model_save_file = '%s/%s saved_model.pth' % (OUTPUT_DIR, params)
print('logs will be saved in \"%s\"' % (log_file))
print_and_log('Backbone Convnet: %s' % (BACKBONE), log_file, 'w')
print_and_log('Trainable Convnet: %s' % (TRAINABLE_CONVNET), log_file)
print_and_log('Learning Rate: %.0e' % (LEARNING_RATE), log_file)
print_and_log('Batch Size: %d' % (BATCH_SIZE), log_file)
print_and_log('Context Size: %d' % (CONTEXT_SIZE), log_file)
print_and_log('Attention: %s' % (USE_ATTENTION), log_file)
print_and_log('Hidden Dim: %d' % (HIDDEN_DIM), log_file)
print_and_log('RoI Pool Output Size: (%d, %d)' % ROI_POOL_OUTPUT_SIZE, log_file)
print_and_log('BBox Features: %s' % (USE_BBOX_FEAT), log_file)
print_and_log('Weight Decay: %.0e' % (WEIGHT_DECAY), log_file)
print_and_log('Dropout Probability: %.2f' % (DROP_PROB), log_file)
print_and_log('Max BG Boxes: %d\n' % (MAX_BG_BOXES), log_file)
########## DATA LOADERS ##########
train_loader, val_loader, test_loader = load_data(DATA_DIR, train_img_ids, val_img_ids, test_img_ids, CONTEXT_SIZE, BATCH_SIZE, NUM_WORKERS, MAX_BG_BOXES)
########## CREATE MODEL & LOSS FN ##########
model = WebObjExtractionNet(ROI_POOL_OUTPUT_SIZE, IMG_HEIGHT, N_CLASSES, BACKBONE, USE_ATTENTION, HIDDEN_DIM, TRAINABLE_CONVNET, DROP_PROB,
USE_BBOX_FEAT, CLASS_NAMES).to(device)
optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE, weight_decay=WEIGHT_DECAY)
criterion = nn.CrossEntropyLoss(reduction='sum').to(device)
########## TRAIN MODEL ##########
train_model(model, train_loader, optimizer, criterion, N_EPOCHS, device, val_loader, EVAL_INTERVAL, log_file, 'ckpt_%d.pth' % args.device)
########## EVALUATE TEST PERFORMANCE ##########
print('Evaluating test data class wise accuracies...')
evaluate_model(model, test_loader, criterion, device, 'TEST', log_file)
with open (test_acc_domainwise_file, 'w') as f:
f.write('Domain,N_examples,%s,%s,%s\n' % (CLASS_NAMES[1], CLASS_NAMES[2], CLASS_NAMES[3]))
print('Evaluating per domain accuracy for %d test domains...' % len(test_domains))
for domain in test_domains:
print('\n---> Domain:', domain)
test_dataset = WebDataset(DATA_DIR, np.loadtxt('%s/domain_wise_imgs/%s.txt' % (SPLIT_DIR, domain), np.int32).reshape(-1), CONTEXT_SIZE, max_bg_boxes=-1)
test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=NUM_WORKERS, collate_fn=custom_collate_fn, drop_last=False)
per_class_acc = evaluate_model(model, test_loader, criterion, device, 'TEST')
with open (test_acc_domainwise_file, 'a') as f:
f.write('%s,%d,%.2f,%.2f,%.2f\n' % (domain, len(test_dataset), 100*per_class_acc[1], 100*per_class_acc[2], 100*per_class_acc[3]))
macro_acc_test = np.loadtxt(test_acc_domainwise_file, delimiter=',', skiprows=1, dtype=str)[:,2:].astype(np.float32).mean(0)
for i in range(1, len(CLASS_NAMES)):
print_and_log('%s Macro Acc: %.2f%%' % (CLASS_NAMES[i], macro_acc_test[i-1]), log_file)
########## SAVE MODEL ##########
torch.save(model.state_dict(), model_save_file)
print_and_log('Model can be restored from \"%s\"' % (model_save_file), log_file)
| 48.728571 | 156 | 0.726327 |
b2be278f644c23228acf1f8fcc520ef3e2a07fe5 | 2,846 | py | Python | rclpy/actions/minimal_action_client/examples_rclpy_minimal_action_client/client_cancel.py | emersonknapp/examples | 36522787da5de2a2ff322d8953e3ae4b8e8ee9e7 | [
"Apache-2.0"
] | 1 | 2020-03-17T18:19:55.000Z | 2020-03-17T18:19:55.000Z | rclpy/actions/minimal_action_client/examples_rclpy_minimal_action_client/client_cancel.py | emersonknapp/examples | 36522787da5de2a2ff322d8953e3ae4b8e8ee9e7 | [
"Apache-2.0"
] | null | null | null | rclpy/actions/minimal_action_client/examples_rclpy_minimal_action_client/client_cancel.py | emersonknapp/examples | 36522787da5de2a2ff322d8953e3ae4b8e8ee9e7 | [
"Apache-2.0"
] | 1 | 2020-07-11T08:59:03.000Z | 2020-07-11T08:59:03.000Z | # Copyright 2019 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from action_msgs.msg import GoalStatus
from example_interfaces.action import Fibonacci
import rclpy
from rclpy.action import ActionClient
from rclpy.callback_groups import ReentrantCallbackGroup
from rclpy.node import Node
from rclpy.timer import WallTimer
if __name__ == '__main__':
main()
| 29.957895 | 91 | 0.697119 |
b2bf04c3b73ed2e5d7a5d3616651ad7a3f22eac7 | 1,170 | py | Python | buffers/introspective_buffer.py | GittiHab/mbrl-thesis-code | 10ecd6ef7cbb2df4bd03ce9928e344eab4238a2e | [
"MIT"
] | null | null | null | buffers/introspective_buffer.py | GittiHab/mbrl-thesis-code | 10ecd6ef7cbb2df4bd03ce9928e344eab4238a2e | [
"MIT"
] | null | null | null | buffers/introspective_buffer.py | GittiHab/mbrl-thesis-code | 10ecd6ef7cbb2df4bd03ce9928e344eab4238a2e | [
"MIT"
] | null | null | null | import numpy as np
from typing import Union, Optional, List, Dict, Any
from buffers.chunk_buffer import ChunkReplayBuffer
| 35.454545 | 70 | 0.638462 |
b2bff192f3852a8121825cff9ab0d2dc48bcad15 | 999 | py | Python | esp8266/boot.py | AlexGolovko/UltrasonicDeeper | 598020854a1bff433bce1582bf05625a6cb646c8 | [
"MIT"
] | 3 | 2020-04-21T10:51:38.000Z | 2022-03-10T18:23:56.000Z | esp8266/boot.py | AlexGolovko/UltrasonicDeeper | 598020854a1bff433bce1582bf05625a6cb646c8 | [
"MIT"
] | 5 | 2020-09-05T22:53:54.000Z | 2021-05-05T14:31:35.000Z | esp8266/boot.py | AlexGolovko/UltrasonicDeeper | 598020854a1bff433bce1582bf05625a6cb646c8 | [
"MIT"
] | 2 | 2021-01-24T19:18:42.000Z | 2021-02-26T09:41:54.000Z | # This file is executed on every boot (including wake-boot from deepsleep)
import esp
import gc
import machine
import network
esp.osdebug(None)
# machine.freq(160000000)
machine.Pin(2, machine.Pin.OUT).off()
do_connect('royter', 'traveller22')
gc.collect()
print('wifi connected')
| 23.785714 | 74 | 0.672673 |
b2c0e6ac73650986189a517a410915048cd910a4 | 3,326 | py | Python | bin/check_ysim.py | ACTCollaboration/tilec | 11ed8d027ad6ffac09b3e291a047f33e97673f14 | [
"BSD-3-Clause"
] | 1 | 2021-01-04T14:51:44.000Z | 2021-01-04T14:51:44.000Z | bin/check_ysim.py | ACTCollaboration/tilec | 11ed8d027ad6ffac09b3e291a047f33e97673f14 | [
"BSD-3-Clause"
] | 4 | 2019-09-03T22:19:16.000Z | 2020-07-13T12:38:08.000Z | bin/check_ysim.py | ACTCollaboration/tilec | 11ed8d027ad6ffac09b3e291a047f33e97673f14 | [
"BSD-3-Clause"
] | 1 | 2020-08-10T14:51:11.000Z | 2020-08-10T14:51:11.000Z | from __future__ import print_function
from orphics import maps,io,cosmology,stats
from pixell import enmap
import numpy as np
import os,sys
from tilec import utils as tutils
region = 'deep56'
#region = 'boss'
solution = 'comptony'
tdir = "/scratch/r/rbond/msyriac/data/depot/tilec/v1.2.0_20200324"
dcomb = 'joint'
dfile = tutils.get_generic_fname(tdir,region,solution,deproject=None,data_comb=dcomb,version=None,sim_index=None)
dbeam = tutils.get_generic_fname(tdir,region,solution,deproject=None,data_comb=dcomb,version=None,sim_index=None,beam=True)
sfile = tutils.get_generic_fname(tdir,region,solution,deproject=None,data_comb=dcomb,version=None,sim_index=0)
sbeam = tutils.get_generic_fname(tdir,region,solution,deproject=None,data_comb=dcomb,version=None,sim_index=0,beam=True)
tfile = tutils.get_generic_fname(tdir,region,solution,deproject=None,data_comb=dcomb,version=None,sim_index=0)
cdfile = tutils.get_generic_fname(tdir,region,"cmb",deproject=None,data_comb=dcomb,version=None,sim_index=None)
cdbeam = tutils.get_generic_fname(tdir,region,"cmb",deproject=None,data_comb=dcomb,version=None,sim_index=None,beam=True)
csfile = tutils.get_generic_fname(tdir,region,'cmb',deproject=None,data_comb=dcomb,version=None,sim_index=0)
csbeam = tutils.get_generic_fname(tdir,region,'cmb',deproject=None,data_comb=dcomb,version=None,sim_index=0,beam=True)
ctfile = tutils.get_generic_fname(tdir,region,'cmb',deproject=None,data_comb=dcomb,version=None,sim_index=0)
dmap = enmap.read_map(dfile)
smap = enmap.read_map(sfile)
tmap = enmap.read_map(tfile)
cdmap = enmap.read_map(cdfile)
csmap = enmap.read_map(csfile)
ctmap = enmap.read_map(ctfile)
modlmap = dmap.modlmap()
ls,db = np.loadtxt(dbeam,unpack=True)
dbeam = maps.interp(ls,db)(modlmap)
ls,cdb = np.loadtxt(cdbeam,unpack=True)
cdbeam = maps.interp(ls,cdb)(modlmap)
ls,sb = np.loadtxt(sbeam,unpack=True)
sbeam = maps.interp(ls,sb)(modlmap)
ls,csb = np.loadtxt(csbeam,unpack=True)
csbeam = maps.interp(ls,csb)(modlmap)
#io.hplot(smap,"simmap")
bin_edges = np.arange(20,6000,20)
binner = stats.bin2D(modlmap,bin_edges)
p = lambda x: binner.bin((x*x.conj()).real)
dk = enmap.fft(dmap,normalize='phys')/dbeam
sk = enmap.fft(smap,normalize='phys')/sbeam
# tk = enmap.fft(tmap,normalize='phys')/sbeam
cdk = enmap.fft(cdmap,normalize='phys')/cdbeam
csk = enmap.fft(csmap,normalize='phys')/csbeam
# ctk = enmap.fft(ctmap,normalize='phys')/ctbeam
cents,d1d = p(dk)
cents,s1d = p(sk)
# cents,t1d = p(tk)
cents,cd1d = p(cdk)
cents,cs1d = p(csk)
# cents,ct1d = p(ctk)
pl = io.Plotter(xyscale='linlog',xlabel='l',ylabel='$D^{yy}_l$' ,scalefn = lambda x: x**2./2./np.pi)
#pl = io.Plotter('Dell')
pl.add(cents,d1d,label='data')
pl.add(cents,s1d,label='sim')
# pl.add(cents,t1d,label='new sim')
#pl._ax.set_ylim(1e-14,5e-10)
pl.done("dcomp.png")
pl = io.Plotter(xyscale='linlog',xlabel='l',ylabel='$D^{\\rm{CMB}}_l$' ,scalefn = lambda x: x**2./2./np.pi)
pl.add(cents[cents>5000],cd1d[cents>5000],label='data') # blinding ACT CMB data ell<5000
pl.add(cents,cs1d,label='sim')
# pl.add(cents,ct1d,label='new sim')
#pl._ax.set_ylim(1e-14,5e-10)
pl.done("cdcomp.png")
# pl = io.Plotter(xyscale='linlin',xlabel='l',ylabel='$D^{\\rm{CMB-new}}_l / D^{\\rm{CMB-old}}_l$')
# pl.add(cents,ct1d/cs1d)
# pl.hline(y=1)
# pl._ax.set_ylim(0.85,1.05)
# pl.done("cdcompdiff.png")
| 35.382979 | 123 | 0.746242 |
b2c0fe0d284c1df72e6a811ac09a5b401ed7fb9b | 509 | py | Python | tests/schema_mapping/expected/generated_example3.py | loyada/typed-py | 8f946ed0cddb38bf7fd463a4c8111a592ccae31a | [
"MIT"
] | 14 | 2018-02-14T13:28:47.000Z | 2022-02-12T08:03:21.000Z | tests/schema_mapping/expected/generated_example3.py | loyada/typed-py | 8f946ed0cddb38bf7fd463a4c8111a592ccae31a | [
"MIT"
] | 142 | 2017-11-22T14:02:33.000Z | 2022-03-23T21:26:29.000Z | tests/schema_mapping/expected/generated_example3.py | loyada/typed-py | 8f946ed0cddb38bf7fd463a4c8111a592ccae31a | [
"MIT"
] | 4 | 2017-12-14T16:46:45.000Z | 2021-12-15T16:33:31.000Z | from typedpy import *
# ********************
| 17.551724 | 47 | 0.581532 |
b2c10ffac29f7bdf64553c51d96d725e726e49a1 | 3,114 | py | Python | rpesk/morse_code.py | LukeJVinton/pi-projects | 9dfa110bb027b0fb281e3dca831f1547bc15faa5 | [
"MIT"
] | null | null | null | rpesk/morse_code.py | LukeJVinton/pi-projects | 9dfa110bb027b0fb281e3dca831f1547bc15faa5 | [
"MIT"
] | null | null | null | rpesk/morse_code.py | LukeJVinton/pi-projects | 9dfa110bb027b0fb281e3dca831f1547bc15faa5 | [
"MIT"
] | null | null | null |
# 02_blink_twice.py
# From the code for the Electronics Starter Kit for the Raspberry Pi by MonkMakes.com
import RPi.GPIO as GPIO
import time
# Configure the Pi to use the BCM (Broadcom) pin names, rather than the pin positions
GPIO.setmode(GPIO.BCM)
red_pin1 = 18
GPIO.setup(red_pin1, GPIO.OUT)
try:
words = input('Enter a word: ')
for letter in words:
pulse_letter(letter, red_pin1)
finally:
print("Cleaning up")
GPIO.cleanup()
# You could get rid of the try: finally: code and just have the while loop
# and its contents. However, the try: finally: construct makes sure that
# when you CTRL-c the program to end it, all the pins are set back to
# being inputs. This helps protect your Pi from accidental shorts-circuits
# if something metal touches the GPIO pins.
| 33.483871 | 85 | 0.495825 |
b2c14d3bb32a9d0a97a9d773d034e8784a7e69a4 | 5,641 | py | Python | lljs.py | Peter9192/wind_analytics | 604136be1c2ef1155bdb7579c7d123525dbe10d8 | [
"Apache-2.0"
] | null | null | null | lljs.py | Peter9192/wind_analytics | 604136be1c2ef1155bdb7579c7d123525dbe10d8 | [
"Apache-2.0"
] | null | null | null | lljs.py | Peter9192/wind_analytics | 604136be1c2ef1155bdb7579c7d123525dbe10d8 | [
"Apache-2.0"
] | null | null | null | """ Identify low-level jets in wind profile data.
Peter Kalverla
December 2020
"""
import numpy as np
import xarray as xr
def detect_llj(x, axis=None, falloff=0, output='strength', inverse=False):
""" Identify maxima in wind profiles.
args:
- x : ndarray with wind profile data
- axis : specifies the vertical dimension
is internally used with np.apply_along_axis
- falloff : threshold for labeling as low-level jet
default 0; can be masked later, e.g. llj[falloff>2.0]
- output : specifiy return type: 'strength' or 'index'
returns (depending on <output> argument):
- strength : 0 if no maximum identified, otherwise falloff strength
- index : nan if no maximum identified, otherwise index along
<axis>, to get the height of the jet etc.
"""
# Wrapper interface to apply 1d function to ndarray
return np.apply_along_axis(inner, axis, x, output=output)
def detect_llj_vectorized(xs,
axis=-1,
output='falloff',
mask_inv=False,
inverse=False):
""" Identify local maxima in wind profiles.
args:
- x : ndarray with wind profile data
- axis : specifies the vertical dimension
- output : specifiy return type: 'falloff', 'strength' or 'index'
- mask_inv : use np.ma to mask nan values
returns (depending on <output> argument and whether llj is identified):
- falloff : 0 or largest difference between local max and subseq min
- strength : 0 or wind speed at jet height
- index : -1 or index along <axis>
"""
# Move <axis> to first dimension, to easily index and iterate over it.
xv = np.rollaxis(xs, axis)
if inverse:
xv = xv[::-1, ...]
if mask_inv:
xv = np.ma.masked_invalid(xv)
# Set initial arrays
min_elem = xv[-1].copy()
max_elem = np.zeros(min_elem.shape)
max_diff = np.zeros(min_elem.shape)
max_idx = np.ones(min_elem.shape, dtype=int) * (-1)
# Start at end of array and search backwards for larger differences.
for i, elem in reversed(list(enumerate(xv))):
min_elem = np.minimum(elem, min_elem)
new_max_identified = elem - min_elem > max_diff
max_diff = np.where(new_max_identified, elem - min_elem, max_diff)
max_elem = np.where(new_max_identified, elem, max_elem)
max_idx = np.where(new_max_identified, i, max_idx)
if output == 'falloff':
r = max_diff
elif output == 'strength':
r = max_elem
elif output == 'index':
r = max_idx
else:
raise ValueError('Invalid argument for <output>: %s' % output)
return r
def detect_llj_xarray(da, inverse=False):
""" Identify local maxima in wind profiles.
args:
- da : xarray.DataArray with wind profile data
- inverse : to flip the array if the data is stored upside down
returns: : xarray.Dataset with vertical dimension removed containing:
- falloff : 0 or largest difference between local max and subseq min
- strength : 0 or wind speed at jet height
- index : -1 or index along <axis>
Note: vertical dimension should be labeled 'level' and axis=1
"""
# Move <axis> to first dimension, to easily index and iterate over it.
xv = np.rollaxis(da.values, 1)
if inverse:
xv = xv[::-1, ...]
# Set initial arrays
min_elem = xv[-1].copy()
max_elem = np.zeros(min_elem.shape)
max_diff = np.zeros(min_elem.shape)
max_idx = np.ones(min_elem.shape, dtype=int) * (-1)
# Start at end of array and search backwards for larger differences.
for i, elem in reversed(list(enumerate(xv))):
min_elem = np.minimum(elem, min_elem)
new_max_identified = elem - min_elem > max_diff
max_diff = np.where(new_max_identified, elem - min_elem, max_diff)
max_elem = np.where(new_max_identified, elem, max_elem)
max_idx = np.where(new_max_identified, i, max_idx)
# Combine the results in a dataframe
get_height = lambda i: np.where(i > 0, da.level.values[i], da.level.values[
-1])
dims = da.isel(level=0).drop('level').dims
coords = da.isel(level=0).drop('level').coords
lljs = xr.Dataset(
{
'falloff': (dims, max_diff),
'strength': (dims, max_elem),
'level': (dims, get_height(max_idx)),
},
coords=coords)
print(
'Beware! Level is also filled if no jet is detected! '
'Use ds.sel(level=lljs.level).where(lljs.falloff>0) to get rid of them'
)
return lljs | 34.820988 | 80 | 0.591916 |
b2c4382563bdc135f87a0336d22aa149de5f9c44 | 9,203 | py | Python | rollon_erpnext/hooks_property_setter.py | santoshbb/rhplrepo | 8ce4792ea47b66ab2b7aed9da468104a2d37ae2b | [
"MIT"
] | null | null | null | rollon_erpnext/hooks_property_setter.py | santoshbb/rhplrepo | 8ce4792ea47b66ab2b7aed9da468104a2d37ae2b | [
"MIT"
] | null | null | null | rollon_erpnext/hooks_property_setter.py | santoshbb/rhplrepo | 8ce4792ea47b66ab2b7aed9da468104a2d37ae2b | [
"MIT"
] | null | null | null | property_setter = {
"dt": "Property Setter",
"filters": [
["name", "in", [
'Purchase Order-read_only_onload',
'Purchase Order-default_print_format',
'Purchase Invoice-naming_series-options',
'Purchase Invoice-naming_series-default',
'Delivery Note-naming_series-options',
'Delivery Note-naming_series-default',
'Sales Order-naming_series-options',
'Sales Order-naming_series-default',
'Purchase Receipt-naming_series-options',
'Purchase Receipt-naming_series-default',
'Production Order-naming_series-options',
'Production Order-naming_series-default',
'Stock Entry-naming_series-options',
'Stock Entry-naming_series-default',
'Purchase Order-naming_series-options',
'Purchase Order-naming_series-default',
'Sales Invoice-naming_series-options',
'Sales Invoice-naming_series-default',
'Purchase Invoice-read_only_onload',
'Stock Reconciliation-read_only_onload',
'Delivery Note-read_only_onload',
'Stock Entry-read_only_onload',
'Sales Invoice-po_no-read_only',
'Sales Invoice-read_only_onload',
'Purchase Receipt Item-read_only_onload',
'Custom Field-fieldname-width',
'Custom Field-dt-width',
'Sales Invoice Item-read_only_onload',
'Sales Invoice Item-warehouse-default',
'Sales Order-po_no-read_only',
'Sales Order-read_only_onload',
'Item-read_only_onload',
'User-read_only_onload',
'User-sort_field',
'Asset Maintenance Task-periodicity-options',
'Asset Maintenance Task-read_only_onload',
'Asset-read_only_onload',
'Sales Invoice Item-customer_item_code-print_hide',
'Sales Invoice Item-customer_item_code-hidden',
'Sales Order Item-read_only_onload',
'BOM-with_operations-default',
'BOM-read_only_onload',
'Stock Entry-default_print_format',
'Purchase Receipt-read_only_onload',
'Production Order-skip_transfer-default',
'Production Order-skip_transfer-read_only',
'Production Order-use_multi_level_bom-default',
'Production Order-use_multi_level_bom-read_only',
'Production Order-read_only_onload',
'Purchase Order Item-amount-precision',
'Purchase Order Item-read_only_onload',
'Purchase Order Item-rate-precision',
'Stock Entry-use_multi_level_bom-default',
'Stock Entry-use_multi_level_bom-read_only',
'Stock Entry-from_bom-read_only',
'Stock Entry-from_bom-default',
'Stock Entry Detail-barcode-read_only',
'Stock Entry Detail-read_only_onload',
'Stock Entry-to_warehouse-read_only',
'Stock Entry-from_warehouse-read_only',
'Stock Entry-remarks-reqd',
'Purchase Receipt-in_words-print_hide',
'Purchase Receipt-in_words-hidden',
'Purchase Invoice-in_words-print_hide',
'Purchase Invoice-in_words-hidden',
'Purchase Order-in_words-print_hide',
'Purchase Order-in_words-hidden',
'Supplier Quotation-in_words-print_hide',
'Supplier Quotation-in_words-hidden',
'Delivery Note-in_words-print_hide',
'Delivery Note-in_words-hidden',
'Sales Invoice-in_words-print_hide',
'Sales Invoice-in_words-hidden',
'Sales Order-in_words-print_hide',
'Sales Order-in_words-hidden',
'Quotation-in_words-print_hide',
'Quotation-in_words-hidden',
'Purchase Order-rounded_total-print_hide',
'Purchase Order-rounded_total-hidden',
'Purchase Order-base_rounded_total-print_hide',
'Purchase Order-base_rounded_total-hidden',
'Supplier Quotation-rounded_total-print_hide',
'Supplier Quotation-rounded_total-hidden',
'Supplier Quotation-base_rounded_total-print_hide',
'Supplier Quotation-base_rounded_total-hidden',
'Delivery Note-rounded_total-print_hide',
'Delivery Note-rounded_total-hidden',
'Delivery Note-base_rounded_total-print_hide',
'Delivery Note-base_rounded_total-hidden',
'Sales Invoice-rounded_total-print_hide',
'Sales Invoice-rounded_total-hidden',
'Sales Invoice-base_rounded_total-print_hide',
'Sales Invoice-base_rounded_total-hidden',
'Sales Order-rounded_total-print_hide',
'Sales Order-rounded_total-hidden',
'Sales Order-base_rounded_total-print_hide',
'Sales Order-base_rounded_total-hidden',
'Quotation-rounded_total-print_hide',
'Quotation-rounded_total-hidden',
'Quotation-base_rounded_total-print_hide',
'Quotation-base_rounded_total-hidden',
'Dropbox Settings-dropbox_setup_via_site_config-hidden',
'Dropbox Settings-read_only_onload',
'Dropbox Settings-dropbox_access_token-hidden',
'Activity Log-subject-width',
'Employee-employee_number-hidden',
'Employee-employee_number-reqd',
'Employee-naming_series-reqd',
'Employee-naming_series-hidden',
'Supplier-naming_series-hidden',
'Supplier-naming_series-reqd',
'Delivery Note-tax_id-print_hide',
'Delivery Note-tax_id-hidden',
'Sales Invoice-tax_id-print_hide',
'Sales Invoice-tax_id-hidden',
'Sales Order-tax_id-print_hide',
'Sales Order-tax_id-hidden',
'Customer-naming_series-hidden',
'Customer-naming_series-reqd',
'Stock Entry Detail-barcode-hidden',
'Stock Reconciliation Item-barcode-hidden',
'Item-barcode-hidden',
'Delivery Note Item-barcode-hidden',
'Sales Invoice Item-barcode-hidden',
'Purchase Receipt Item-barcode-hidden',
'Item-item_code-reqd',
'Item-item_code-hidden',
'Item-naming_series-hidden',
'Item-naming_series-reqd',
'Item-manufacturing-collapsible_depends_on',
'Purchase Invoice-payment_schedule-print_hide',
'Purchase Invoice-due_date-print_hide',
'Purchase Order-payment_schedule-print_hide',
'Purchase Order-due_date-print_hide',
'Sales Invoice-payment_schedule-print_hide',
'Sales Invoice-due_date-print_hide',
'Sales Order-payment_schedule-print_hide',
'Sales Order-due_date-print_hide',
'Journal Entry Account-sort_order',
'Journal Entry Account-account_currency-print_hide',
'Sales Invoice-taxes_and_charges-reqd',
'Sales Taxes and Charges-sort_order',
'Sales Invoice Item-customer_item_code-label',
'Sales Invoice-default_print_format',
'Purchase Taxes and Charges Template-sort_order',
'Serial No-company-in_standard_filter',
'Serial No-amc_expiry_date-in_standard_filter',
'Serial No-warranty_expiry_date-in_standard_filter',
'Serial No-maintenance_status-in_standard_filter',
'Serial No-customer_name-in_standard_filter',
'Serial No-customer_name-bold',
'Serial No-customer-in_standard_filter',
'Serial No-delivery_document_no-in_standard_filter',
'Serial No-delivery_document_type-in_standard_filter',
'Serial No-supplier_name-bold',
'Serial No-supplier_name-in_standard_filter',
'Serial No-supplier-in_standard_filter',
'Serial No-purchase_date-in_standard_filter',
'Serial No-description-in_standard_filter',
'Delivery Note-section_break1-hidden',
'Delivery Note-sales_team_section_break-hidden',
'Delivery Note-project-hidden',
'Delivery Note-taxes-hidden',
'Delivery Note-taxes_and_charges-hidden',
'Delivery Note-taxes_section-hidden',
'Delivery Note-posting_time-print_hide',
'Delivery Note-posting_time-description',
'Delivery Note Item-warehouse-default',
'Item-income_account-default',
'Item-income_account-depends_on',
'Purchase Receipt-remarks-reqd',
'Purchase Receipt-taxes-hidden',
'Purchase Receipt-taxes_and_charges-hidden',
'Purchase Receipt Item-base_rate-fieldtype',
'Purchase Receipt Item-amount-in_list_view',
'Purchase Receipt Item-rate-fieldtype',
'Purchase Receipt Item-base_price_list_rate-fieldtype',
'Purchase Receipt Item-price_list_rate-fieldtype',
'Purchase Receipt Item-qty-in_list_view',
'Stock Entry-title_field',
'Stock Entry-search_fields',
'Stock Entry-project-hidden',
'Stock Entry-supplier-in_list_view',
'Stock Entry-from_warehouse-in_list_view',
'Stock Entry-to_warehouse-in_list_view',
'Stock Entry-purpose-default',
'ToDo-sort_order',
'Currency Exchange-sort_order',
'Company-abbr-in_list_view',
'Stock Reconciliation-expense_account-in_standard_filter',
'Stock Reconciliation-expense_account-depends_on',
'Sales Order-taxes-hidden',
'Warehouse-sort_order',
'Address-fax-hidden',
'Address-fax-read_only',
'Address-phone-hidden',
'Address-email_id-hidden',
'Address-city-reqd',
'BOM Operation-sort_order',
'BOM Item-scrap-read_only',
'BOM-operations_section-read_only',
'BOM-operations-read_only',
'BOM-rm_cost_as_per-reqd',
'Journal Entry-pay_to_recd_from-allow_on_submit',
'Journal Entry-remark-in_global_search',
'Journal Entry-total_amount-bold',
'Journal Entry-total_amount-print_hide',
'Journal Entry-total_amount-in_list_view',
'Journal Entry-total_credit-print_hide',
'Journal Entry-total_debit-print_hide',
'Journal Entry-total_debit-in_list_view',
'Journal Entry-user_remark-print_hide',
'Stock Entry-to_warehouse-hidden',
'Purchase Order Item-rate-fieldtype',
'Journal Entry Account-exchange_rate-print_hide',
'Sales Invoice Item-item_code-label',
'BOM-rm_cost_as_per-options',
'Purchase Order Item-price_list_rate-fieldtype',
'Reconciliation-expense_account-read_only',
'Customer-tax_id-read_only',
'Purchase Order Item-amount-fieldtype',
'Stock Entry-project-hidden'
]
]
]
} | 40.013043 | 61 | 0.757688 |
b2c5e558b71549ec4885e41eca936b455678ffaf | 1,555 | py | Python | api/client/src/pcluster_client/sigv4_auth.py | maclema/aws-parallelcluster | ade6e5e76201ee43c6e222fcd1c2891aba938838 | [
"Apache-2.0"
] | 415 | 2018-11-13T15:02:15.000Z | 2022-03-31T15:26:06.000Z | api/client/src/pcluster_client/sigv4_auth.py | maclema/aws-parallelcluster | ade6e5e76201ee43c6e222fcd1c2891aba938838 | [
"Apache-2.0"
] | 2,522 | 2018-11-13T16:16:27.000Z | 2022-03-31T13:57:10.000Z | api/client/src/pcluster_client/sigv4_auth.py | yuleiwan/aws-parallelcluster | aad2a3019ef4ad08d702f5acf41b152b3f7a0b46 | [
"Apache-2.0"
] | 164 | 2018-11-14T22:47:46.000Z | 2022-03-22T11:33:22.000Z | """Sigv4 Signing Support"""
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy
# of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import boto3
import botocore
import json
def sigv4_auth(method, host, path, querys, body, headers):
"Adds authorization headers for sigv4 to headers parameter."
endpoint = host.replace('https://', '').replace('http://', '')
_api_id, _service, region, _domain = endpoint.split('.', maxsplit=3)
request_parameters = '&'.join([f"{k}={v}" for k, v in querys])
url = f"{host}{path}?{request_parameters}"
session = botocore.session.Session()
request = botocore.awsrequest.AWSRequest(method=method,
url=url,
data=json.dumps(body) if body else None)
botocore.auth.SigV4Auth(session.get_credentials(),
"execute-api", region).add_auth(request)
prepared_request = request.prepare()
headers['host'] = endpoint.split('/', maxsplit=1)[0]
for k, value in prepared_request.headers.items():
headers[k] = value
| 39.871795 | 85 | 0.659807 |
b2c664ce7bd387984bca2a25d6741d8d39b481e1 | 2,624 | py | Python | django/contrib/sessions/tests.py | rawwell/django | 6b3264671ead4604f26cbd2b71e8d6a02945bf0c | [
"BSD-3-Clause"
] | 1 | 2016-05-08T12:24:22.000Z | 2016-05-08T12:24:22.000Z | django/contrib/sessions/tests.py | rawwell/django | 6b3264671ead4604f26cbd2b71e8d6a02945bf0c | [
"BSD-3-Clause"
] | null | null | null | django/contrib/sessions/tests.py | rawwell/django | 6b3264671ead4604f26cbd2b71e8d6a02945bf0c | [
"BSD-3-Clause"
] | 1 | 2015-11-19T14:45:16.000Z | 2015-11-19T14:45:16.000Z | r"""
>>> from django.conf import settings
>>> from django.contrib.sessions.backends.db import SessionStore as DatabaseSession
>>> from django.contrib.sessions.backends.cache import SessionStore as CacheSession
>>> from django.contrib.sessions.backends.file import SessionStore as FileSession
>>> from django.contrib.sessions.backends.base import SessionBase
>>> db_session = DatabaseSession()
>>> db_session.modified
False
>>> db_session['cat'] = "dog"
>>> db_session.modified
True
>>> db_session.pop('cat')
'dog'
>>> db_session.pop('some key', 'does not exist')
'does not exist'
>>> db_session.save()
>>> db_session.exists(db_session.session_key)
True
>>> db_session.delete(db_session.session_key)
>>> db_session.exists(db_session.session_key)
False
>>> file_session = FileSession()
>>> file_session.modified
False
>>> file_session['cat'] = "dog"
>>> file_session.modified
True
>>> file_session.pop('cat')
'dog'
>>> file_session.pop('some key', 'does not exist')
'does not exist'
>>> file_session.save()
>>> file_session.exists(file_session.session_key)
True
>>> file_session.delete(file_session.session_key)
>>> file_session.exists(file_session.session_key)
False
# Make sure the file backend checks for a good storage dir
>>> settings.SESSION_FILE_PATH = "/if/this/directory/exists/you/have/a/weird/computer"
>>> FileSession()
Traceback (innermost last):
...
ImproperlyConfigured: The session storage path '/if/this/directory/exists/you/have/a/weird/computer' doesn't exist. Please set your SESSION_FILE_PATH setting to an existing directory in which Django can store session data.
>>> cache_session = CacheSession()
>>> cache_session.modified
False
>>> cache_session['cat'] = "dog"
>>> cache_session.modified
True
>>> cache_session.pop('cat')
'dog'
>>> cache_session.pop('some key', 'does not exist')
'does not exist'
>>> cache_session.save()
>>> cache_session.delete(cache_session.session_key)
>>> cache_session.exists(cache_session.session_key)
False
>>> s = SessionBase()
>>> s._session['some key'] = 'exists' # Pre-populate the session with some data
>>> s.accessed = False # Reset to pretend this wasn't accessed previously
>>> s.accessed, s.modified
(False, False)
>>> s.pop('non existant key', 'does not exist')
'does not exist'
>>> s.accessed, s.modified
(True, False)
>>> s.setdefault('foo', 'bar')
'bar'
>>> s.setdefault('foo', 'baz')
'bar'
>>> s.accessed = False # Reset the accessed flag
>>> s.pop('some key')
'exists'
>>> s.accessed, s.modified
(True, True)
>>> s.pop('some key', 'does not exist')
'does not exist'
"""
if __name__ == '__main__':
import doctest
doctest.testmod()
| 27.333333 | 222 | 0.722942 |
b2c83a9626d327c18df6c74ffc572fe2774106fd | 1,504 | py | Python | gopage/web_helper.py | wavegu/gopage | ff83cea34a82570627c74c5bad45ebc02ecaaff6 | [
"MIT"
] | 1 | 2017-02-03T10:24:00.000Z | 2017-02-03T10:24:00.000Z | gopage/web_helper.py | wavegu/gopage | ff83cea34a82570627c74c5bad45ebc02ecaaff6 | [
"MIT"
] | null | null | null | gopage/web_helper.py | wavegu/gopage | ff83cea34a82570627c74c5bad45ebc02ecaaff6 | [
"MIT"
] | null | null | null | # encoding: utf-8
import urllib2
from proxy_helper import ProxyHelper
proxyHelper = ProxyHelper()
if __name__ == '__main__':
page_content = WebHelper.get_page_content_from_url('https://www.google.com/search?hl=en&safe=off&q=wave')
with open('test_result.html', 'w') as test_result:
test_result.write(page_content) | 36.682927 | 122 | 0.621011 |
b2cabd96c3fc001d2729753488a402fc76f755f0 | 8,187 | py | Python | tests/test_skipping.py | pytask-dev/pytask | b6769b48abda44c6261b9a7b58865f8844423c13 | [
"MIT"
] | 41 | 2020-07-24T15:19:19.000Z | 2022-03-17T17:40:57.000Z | tests/test_skipping.py | pytask-dev/pytask | b6769b48abda44c6261b9a7b58865f8844423c13 | [
"MIT"
] | 240 | 2020-06-26T21:37:49.000Z | 2022-03-31T08:56:56.000Z | tests/test_skipping.py | pytask-dev/pytask | b6769b48abda44c6261b9a7b58865f8844423c13 | [
"MIT"
] | null | null | null | import textwrap
from contextlib import ExitStack as does_not_raise # noqa: N813
import pytest
from _pytask.mark import Mark
from _pytask.outcomes import Skipped
from _pytask.outcomes import SkippedAncestorFailed
from _pytask.outcomes import SkippedUnchanged
from _pytask.skipping import pytask_execute_task_setup
from pytask import cli
from pytask import main
| 30.662921 | 88 | 0.696836 |
b2cacdeef0561546d139a9bec5f6cfde666b19a3 | 156 | py | Python | basic/server.py | spinico/django-projects-boilerplates | 22d47f60d282d0edb9c0f1b84bb3e9e84949bd25 | [
"MIT"
] | null | null | null | basic/server.py | spinico/django-projects-boilerplates | 22d47f60d282d0edb9c0f1b84bb3e9e84949bd25 | [
"MIT"
] | null | null | null | basic/server.py | spinico/django-projects-boilerplates | 22d47f60d282d0edb9c0f1b84bb3e9e84949bd25 | [
"MIT"
] | null | null | null | from waitress import serve
from conf.wsgi import application
if __name__ == '__main__':
serve(application, listen='0.0.0.0:8000', url_scheme='https')
| 22.285714 | 65 | 0.737179 |
b2cacff06725bd2d9718bd414438fed14a74ef43 | 589 | py | Python | src/django_version_checks/apps.py | adamchainz/django-version-checks | 94f6d696f5279a7bb579c7d2a177a231d6b61e45 | [
"MIT"
] | 33 | 2020-12-13T23:02:39.000Z | 2022-03-28T06:19:09.000Z | src/django_version_checks/apps.py | adamchainz/django-version-checks | 94f6d696f5279a7bb579c7d2a177a231d6b61e45 | [
"MIT"
] | 47 | 2020-12-14T01:33:56.000Z | 2021-11-06T09:17:38.000Z | src/django_version_checks/apps.py | adamchainz/django-version-checks | 94f6d696f5279a7bb579c7d2a177a231d6b61e45 | [
"MIT"
] | 2 | 2021-11-13T22:56:21.000Z | 2022-02-15T14:24:53.000Z | from django.apps import AppConfig
from django.core.checks import Tags, register
from django_version_checks import checks
| 34.647059 | 65 | 0.7691 |
b2ccd20ece8fce408fc21dd559ba9fc865804c11 | 3,471 | py | Python | app/ocr.py | noahnisbet/human-rights-first-asylum-ds-noahnisbet | c329045e5967253d8b5ac729e315ed03325744b2 | [
"MIT"
] | 1 | 2021-02-25T21:26:08.000Z | 2021-02-25T21:26:08.000Z | app/ocr.py | noahnisbet/human-rights-first-asylum-ds-noahnisbet | c329045e5967253d8b5ac729e315ed03325744b2 | [
"MIT"
] | null | null | null | app/ocr.py | noahnisbet/human-rights-first-asylum-ds-noahnisbet | c329045e5967253d8b5ac729e315ed03325744b2 | [
"MIT"
] | null | null | null | import os
os.environ["OMP_NUM_THREADS"]= '1'
os.environ["OMP_THREAD_LIMIT"] = '1'
os.environ["MKL_NUM_THREADS"] = '1'
os.environ["NUMEXPR_NUM_THREADS"] = '1'
os.environ["OMP_NUM_THREADS"] = '1'
os.environ["PAPERLESS_AVX2_AVAILABLE"]="false"
os.environ["OCR_THREADS"] = '1'
import poppler
import pytesseract
from pdf2image import convert_from_bytes
from fastapi import APIRouter, File
import sqlalchemy
from dotenv import load_dotenv, find_dotenv
from sqlalchemy import create_engine
from app.BIA_Scraper import BIACase
import requests
import pandas as pd
import numpy as np
from PIL import Image
router = APIRouter()
load_dotenv(find_dotenv())
database_url = os.getenv('DATABASE_URL')
engine = sqlalchemy.create_engine(database_url)
| 28.219512 | 75 | 0.669548 |
b2cd1412230dab0559fa3bfa9b195e544581cd4a | 3,889 | py | Python | lp_local_search.py | cddoyle/div-k-median | fa2e3dc01f257602aa83e151c3bc268a76f8075e | [
"MIT"
] | null | null | null | lp_local_search.py | cddoyle/div-k-median | fa2e3dc01f257602aa83e151c3bc268a76f8075e | [
"MIT"
] | null | null | null | lp_local_search.py | cddoyle/div-k-median | fa2e3dc01f257602aa83e151c3bc268a76f8075e | [
"MIT"
] | null | null | null | import time
import sys
import numpy as np
from local_search import kmedian_local_search
import feasibility
from kmedkpm import k_median_k_partitions_LS
import psutil
from sklearn.datasets import make_blobs
import generator
import random
test = False
#end lp_ls_complete()
#end es_fpt_3apx_complete_test()
################################################################################
if __name__ == '__main__':
test_lp_ls_complete()
| 32.408333 | 104 | 0.54487 |
b2cdb0c942905c9f4fb6dbf73dca96d1a9a5f768 | 415 | py | Python | dms/v2/meal/__init__.py | moreal/DMS-api | 9624e28764ec4535002677671e10a09d762d19a8 | [
"MIT"
] | null | null | null | dms/v2/meal/__init__.py | moreal/DMS-api | 9624e28764ec4535002677671e10a09d762d19a8 | [
"MIT"
] | null | null | null | dms/v2/meal/__init__.py | moreal/DMS-api | 9624e28764ec4535002677671e10a09d762d19a8 | [
"MIT"
] | 1 | 2018-09-29T14:35:20.000Z | 2018-09-29T14:35:20.000Z | import datetime
import requests
import json
from dms.v2.config import DMS_URL
| 21.842105 | 66 | 0.636145 |
b2cf11ab3d7e9318bb55599575d25a729b83ace2 | 319 | py | Python | mayan/apps/dependencies/permissions.py | CMU-313/fall-2021-hw2-451-unavailable-for-legal-reasons | 0e4e919fd2e1ded6711354a0330135283e87f8c7 | [
"Apache-2.0"
] | 2 | 2021-09-12T19:41:19.000Z | 2021-09-12T19:41:20.000Z | mayan/apps/dependencies/permissions.py | CMU-313/fall-2021-hw2-451-unavailable-for-legal-reasons | 0e4e919fd2e1ded6711354a0330135283e87f8c7 | [
"Apache-2.0"
] | 37 | 2021-09-13T01:00:12.000Z | 2021-10-02T03:54:30.000Z | mayan/apps/dependencies/permissions.py | CMU-313/fall-2021-hw2-451-unavailable-for-legal-reasons | 0e4e919fd2e1ded6711354a0330135283e87f8c7 | [
"Apache-2.0"
] | 1 | 2021-09-22T13:17:30.000Z | 2021-09-22T13:17:30.000Z | from django.utils.translation import ugettext_lazy as _
from mayan.apps.permissions import PermissionNamespace
namespace = PermissionNamespace(label=_('Dependencies'), name='dependencies')
permission_dependencies_view = namespace.add_permission(
label=_('View dependencies'), name='dependencies_view'
)
| 31.9 | 78 | 0.793103 |
b2d171ee084b4ded299d8d9b2d8e8e0fa604218a | 213 | py | Python | src/about.py | jukeboxroundtable/JukeboxRoundtable | 06670d2e8511848829b68fddac5bc77806606f98 | [
"MIT"
] | 1 | 2019-02-15T17:33:51.000Z | 2019-02-15T17:33:51.000Z | src/about.py | jukeboxroundtable/JukeboxRoundtable | 06670d2e8511848829b68fddac5bc77806606f98 | [
"MIT"
] | 37 | 2019-01-30T18:32:43.000Z | 2019-06-11T18:00:11.000Z | src/about.py | jukeboxroundtable/JukeboxRoundtable | 06670d2e8511848829b68fddac5bc77806606f98 | [
"MIT"
] | null | null | null | from flask import Blueprint, render_template
about_blueprint = Blueprint('about', __name__)
| 21.3 | 46 | 0.7277 |
b2d17e05c973d4afec2889fae68a1be4d13ef5c7 | 2,440 | py | Python | src/lib/template.py | emil-jacero/powerdns-auth-docker | 922f08d6c2182cd8497fc869e42a6218ecc1b105 | [
"MIT"
] | null | null | null | src/lib/template.py | emil-jacero/powerdns-auth-docker | 922f08d6c2182cd8497fc869e42a6218ecc1b105 | [
"MIT"
] | 2 | 2021-05-08T13:30:42.000Z | 2022-02-06T22:28:54.000Z | src/lib/template.py | emil-jacero/powerdns-auth-docker | 922f08d6c2182cd8497fc869e42a6218ecc1b105 | [
"MIT"
] | null | null | null | import os
import jinja2
import logging
from lib.config import Config
| 38.730159 | 109 | 0.607787 |
b2d18501d17a461813526ffc739b3386958593ec | 2,829 | py | Python | backend/fief/schemas/workspace.py | fief-dev/fief | cbfeec11da7a03aa345cb7ceb088b5d8ec9d6ab1 | [
"MIT"
] | 1 | 2022-02-13T17:39:42.000Z | 2022-02-13T17:39:42.000Z | backend/fief/schemas/workspace.py | fief-dev/fief | cbfeec11da7a03aa345cb7ceb088b5d8ec9d6ab1 | [
"MIT"
] | 1 | 2022-02-13T14:46:24.000Z | 2022-02-13T14:46:24.000Z | backend/fief/schemas/workspace.py | fief-dev/fief | cbfeec11da7a03aa345cb7ceb088b5d8ec9d6ab1 | [
"MIT"
] | null | null | null | from typing import Optional
from pydantic import BaseModel, root_validator, validator
from fief.crypto.encryption import decrypt
from fief.db.types import DatabaseType
from fief.errors import APIErrorCode
from fief.schemas.generics import UUIDSchema
from fief.settings import settings
| 28.29 | 85 | 0.721456 |
b2d1a8016c0b95e209c421ed0aa8314cc552c1ba | 491 | py | Python | art/migrations/0007_alter_artimage_project.py | rrozander/Art-Website | 2cedba90f2adc30d9e83e957903e890af7863eac | [
"MIT"
] | null | null | null | art/migrations/0007_alter_artimage_project.py | rrozander/Art-Website | 2cedba90f2adc30d9e83e957903e890af7863eac | [
"MIT"
] | null | null | null | art/migrations/0007_alter_artimage_project.py | rrozander/Art-Website | 2cedba90f2adc30d9e83e957903e890af7863eac | [
"MIT"
] | null | null | null | # Generated by Django 3.2.12 on 2022-03-01 23:06
from django.db import migrations, models
import django.db.models.deletion
| 24.55 | 122 | 0.643585 |
b2d2f0394cea895eb88a51c785769332faca9031 | 844 | py | Python | blog/tasks.py | iloveyougit/ylink2 | a87d8fde79ab259012cd6486299fcf86e1afc740 | [
"MIT"
] | null | null | null | blog/tasks.py | iloveyougit/ylink2 | a87d8fde79ab259012cd6486299fcf86e1afc740 | [
"MIT"
] | null | null | null | blog/tasks.py | iloveyougit/ylink2 | a87d8fde79ab259012cd6486299fcf86e1afc740 | [
"MIT"
] | null | null | null |
from __future__ import absolute_import, unicode_literals
import string
from django.contrib.auth.models import User
from django.utils.crypto import get_random_string
from celery import shared_task, current_task
| 29.103448 | 87 | 0.667062 |
b2d418afad092a7839f43f08bf37f5d322277d2e | 392 | py | Python | fehler_auth/migrations/0003_auto_20220416_1626.py | dhavall13/fehler_core | dd27802d5b227a32aebcc8bfde68e78a69a36d66 | [
"MIT"
] | null | null | null | fehler_auth/migrations/0003_auto_20220416_1626.py | dhavall13/fehler_core | dd27802d5b227a32aebcc8bfde68e78a69a36d66 | [
"MIT"
] | null | null | null | fehler_auth/migrations/0003_auto_20220416_1626.py | dhavall13/fehler_core | dd27802d5b227a32aebcc8bfde68e78a69a36d66 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.27 on 2022-04-16 16:26
from django.db import migrations, models
| 20.631579 | 52 | 0.604592 |
b2d5339e5e531cf1c00d606b9884958e7a82d30b | 5,637 | py | Python | models/gene.py | rogamba/neuropy | e5ee36126537c75e041d3413c45c6cc20d58a58e | [
"MIT"
] | null | null | null | models/gene.py | rogamba/neuropy | e5ee36126537c75e041d3413c45c6cc20d58a58e | [
"MIT"
] | null | null | null | models/gene.py | rogamba/neuropy | e5ee36126537c75e041d3413c45c6cc20d58a58e | [
"MIT"
] | null | null | null | from random import choice, gauss, random
| 35.012422 | 102 | 0.583999 |
b2d6aec912e54487b7271a6bbbbeac36be760ac4 | 552 | py | Python | tapis_cli/commands/taccapis/v2/apps/mixins.py | bpachev/tapis-cli | c3128fb5b63ef74e06b737bbd95ef28fb24f0d32 | [
"BSD-3-Clause"
] | 8 | 2020-10-18T22:48:23.000Z | 2022-01-10T09:16:14.000Z | tapis_cli/commands/taccapis/v2/apps/mixins.py | bpachev/tapis-cli | c3128fb5b63ef74e06b737bbd95ef28fb24f0d32 | [
"BSD-3-Clause"
] | 238 | 2019-09-04T14:37:54.000Z | 2020-04-15T16:24:24.000Z | tapis_cli/commands/taccapis/v2/apps/mixins.py | bpachev/tapis-cli | c3128fb5b63ef74e06b737bbd95ef28fb24f0d32 | [
"BSD-3-Clause"
] | 5 | 2019-09-20T04:23:49.000Z | 2020-01-16T17:45:14.000Z | from tapis_cli.clients.services.mixins import ServiceIdentifier
__all__ = ['AppIdentifier']
| 27.6 | 74 | 0.615942 |
b2d6f5f40b8910601f5ded38d8738f9d70e406e6 | 835 | py | Python | agents/antifa.py | fan-weiwei/mercury-unicorn | 6c36d6baeaaee990a622caa0d7790dbd9982962c | [
"Apache-2.0"
] | null | null | null | agents/antifa.py | fan-weiwei/mercury-unicorn | 6c36d6baeaaee990a622caa0d7790dbd9982962c | [
"Apache-2.0"
] | null | null | null | agents/antifa.py | fan-weiwei/mercury-unicorn | 6c36d6baeaaee990a622caa0d7790dbd9982962c | [
"Apache-2.0"
] | null | null | null | from agents.agent import Agent
from random import randint
| 24.558824 | 65 | 0.635928 |
b2d75b157f57c7832de3185889e5c4f8fbd90377 | 234 | py | Python | faketranslate/metadata.py | HeywoodKing/faketranslate | 683821eccd0004305c9f1bbfa0aae16f5fbcd829 | [
"MIT"
] | null | null | null | faketranslate/metadata.py | HeywoodKing/faketranslate | 683821eccd0004305c9f1bbfa0aae16f5fbcd829 | [
"MIT"
] | null | null | null | faketranslate/metadata.py | HeywoodKing/faketranslate | 683821eccd0004305c9f1bbfa0aae16f5fbcd829 | [
"MIT"
] | null | null | null | # -*- encoding: utf-8 -*-
"""
@File : metadata.py
@Time : 2020/1/1
@Author : flack
@Email : opencoding@hotmail.com
@ide : PyCharm
@project : faketranslate
@description :
""" | 23.4 | 40 | 0.491453 |
b2d8b14f8188a2112f4bdf4db0fef92891d9717a | 6,062 | py | Python | Scaffold_Code/test scripts/test10_vm.py | tzortzispanagiotis/nbc-blockchain-python | 4e59bfd3f8aa6fb72ce89f430909a1d5c90629e2 | [
"MIT"
] | null | null | null | Scaffold_Code/test scripts/test10_vm.py | tzortzispanagiotis/nbc-blockchain-python | 4e59bfd3f8aa6fb72ce89f430909a1d5c90629e2 | [
"MIT"
] | null | null | null | Scaffold_Code/test scripts/test10_vm.py | tzortzispanagiotis/nbc-blockchain-python | 4e59bfd3f8aa6fb72ce89f430909a1d5c90629e2 | [
"MIT"
] | 1 | 2021-03-20T20:18:40.000Z | 2021-03-20T20:18:40.000Z | import requests, json, time
from multiprocessing.dummy import Pool
pool = Pool(100)
transactions0 = []
transactions1 = []
transactions2 = []
transactions3 = []
transactions4 = []
transactions5 = []
transactions6 = []
transactions7 = []
transactions8 = []
transactions9 = []
nodeid = {
'id0': 0,
'id1': 1,
'id2': 2,
'id3': 3,
'id4': 4,
'id5': 5,
'id6': 6,
'id7': 7,
'id8': 8,
'id9': 9,
}
node = {
'0': 'http://192.168.0.1:5000',
'1': 'http://192.168.0.2:5001',
'2': 'http://192.168.0.3:5002',
'3': 'http://192.168.0.4:5003',
'4': 'http://192.168.0.5:5004',
'5': 'http://192.168.0.1:5005',
'6': 'http://192.168.0.2:5006',
'7': 'http://192.168.0.3:5007',
'8': 'http://192.168.0.4:5008',
'9': 'http://192.168.0.5:5009'
}
if __name__ == '__main__':
with open('../assignment_docs/transactions/10nodes/transactions0.txt') as f:
lines = [line.rstrip('\n') for line in f]
for line in lines:
transactions0.append(line.split(' '))
# print(transactions0)
with open('../assignment_docs/transactions/10nodes/transactions1.txt') as f:
lines = [line.rstrip('\n') for line in f]
for line in lines:
transactions1.append(line.split(' '))
# print(transactions1)
with open('../assignment_docs/transactions/10nodes/transactions2.txt') as f:
lines = [line.rstrip('\n') for line in f]
for line in lines:
transactions2.append(line.split(' '))
# print(transactions2)
with open('../assignment_docs/transactions/10nodes/transactions3.txt') as f:
lines = [line.rstrip('\n') for line in f]
for line in lines:
transactions3.append(line.split(' '))
# print(transactions3)
with open('../assignment_docs/transactions/10nodes/transactions4.txt') as f:
lines = [line.rstrip('\n') for line in f]
for line in lines:
transactions4.append(line.split(' '))
# print(transactions4)
with open('../assignment_docs/transactions/10nodes/transactions5.txt') as f:
lines = [line.rstrip('\n') for line in f]
for line in lines:
transactions5.append(line.split(' '))
with open('../assignment_docs/transactions/10nodes/transactions6.txt') as f:
lines = [line.rstrip('\n') for line in f]
for line in lines:
transactions6.append(line.split(' '))
with open('../assignment_docs/transactions/10nodes/transactions7.txt') as f:
lines = [line.rstrip('\n') for line in f]
for line in lines:
transactions7.append(line.split(' '))
with open('../assignment_docs/transactions/10nodes/transactions8.txt') as f:
lines = [line.rstrip('\n') for line in f]
for line in lines:
transactions8.append(line.split(' '))
with open('../assignment_docs/transactions/10nodes/transactions9.txt') as f:
lines = [line.rstrip('\n') for line in f]
for line in lines:
transactions9.append(line.split(' '))
futures = []
r = requests.get(node['1']+'/selfregister')
r = requests.get(node['2']+'/selfregister')
r = requests.get(node['3']+'/selfregister')
r = requests.get(node['4']+'/selfregister')
r = requests.get(node['5']+'/selfregister')
r = requests.get(node['6']+'/selfregister')
r = requests.get(node['7']+'/selfregister')
r = requests.get(node['8']+'/selfregister')
r = requests.get(node['9']+'/selfregister')
r = requests.get(node['0']+'/timerstart')
r = requests.get(node['1']+'/timerstart')
r = requests.get(node['2']+'/timerstart')
r = requests.get(node['3']+'/timerstart')
r = requests.get(node['4']+'/timerstart')
r = requests.get(node['5']+'/timerstart')
r = requests.get(node['6']+'/timerstart')
r = requests.get(node['7']+'/timerstart')
r = requests.get(node['8']+'/timerstart')
r = requests.get(node['9']+'/timerstart')
target_url = node['0']+'/startwork'
futures.append(pool.apply_async(requests.get, [target_url]))
target_url = node['1']+'/startwork'
futures.append(pool.apply_async(requests.get, [target_url]))
target_url = node['2']+'/startwork'
futures.append(pool.apply_async(requests.get, [target_url]))
target_url = node['3']+'/startwork'
futures.append(pool.apply_async(requests.get, [target_url]))
target_url = node['4']+'/startwork'
futures.append(pool.apply_async(requests.get, [target_url]))
target_url = node['5']+'/startwork'
futures.append(pool.apply_async(requests.get, [target_url]))
target_url = node['6']+'/startwork'
futures.append(pool.apply_async(requests.get, [target_url]))
target_url = node['7']+'/startwork'
futures.append(pool.apply_async(requests.get, [target_url]))
target_url = node['8']+'/startwork'
futures.append(pool.apply_async(requests.get, [target_url]))
target_url = node['9']+'/startwork'
futures.append(pool.apply_async(requests.get, [target_url]))
time.sleep(5)
futures = []
futures.append(pool.apply_async(trans, [transactions0,'0']))
futures.append(pool.apply_async(trans, [transactions1,'1']))
futures.append(pool.apply_async(trans, [transactions2,'2']))
futures.append(pool.apply_async(trans, [transactions3,'3']))
futures.append(pool.apply_async(trans, [transactions4,'4']))
futures.append(pool.apply_async(trans, [transactions5,'5']))
futures.append(pool.apply_async(trans, [transactions6,'6']))
futures.append(pool.apply_async(trans, [transactions7,'7']))
futures.append(pool.apply_async(trans, [transactions8,'8']))
futures.append(pool.apply_async(trans, [transactions9,'9']))
for future in futures:
future.get() | 35.040462 | 80 | 0.615473 |
b2d93ba833cf1e0c7961b1add21037470175c381 | 1,550 | py | Python | api/standup/utils/email.py | adoval4/standup | 307200b46952c8129a36931103920d3200640b83 | [
"BSD-2-Clause"
] | null | null | null | api/standup/utils/email.py | adoval4/standup | 307200b46952c8129a36931103920d3200640b83 | [
"BSD-2-Clause"
] | 11 | 2020-02-12T02:27:29.000Z | 2022-03-12T00:08:22.000Z | api/standup/utils/email.py | adoval4/standup | 307200b46952c8129a36931103920d3200640b83 | [
"BSD-2-Clause"
] | null | null | null | # django
from django.core.mail import EmailMessage
from django.conf import settings
from django.template.loader import render_to_string
# utiltities
import threading
def send_mail(subject, content, recipients, is_html=False):
"""
Sends email using EmailThread class
"""
EmailThread(subject, content, recipients, is_html).start()
def send_template_mail(subject, template_name, context, recipients, is_html):
"""
Send email using EmailThread class with a template
"""
if len(recipients) == 0:
return None
content = render_to_string(template_name, context)
send_mail(subject, content, recipients, is_html)
def send_html_template_mail(subject, template_name, context, recipients):
"""
Send email using EmailThread class with a html template
"""
send_template_mail(subject, template_name, context, recipients, True)
def send_text_template_mail(subject, template_name, context, recipients):
"""
Send email using EmailThread class with a plain text template
"""
send_template_mail(subject, template_name, context, recipients, False)
| 24.21875 | 77 | 0.762581 |
b2d93cfb63dcf1ebc579a1abfad61711545c68bf | 628 | py | Python | app/main/controller/sample_controller.py | Eliotdoesprogramming/python.flask.sqlalchemy.Rest_Api_Template | 3f0a98ae4676aef9ecdf0df70eb9d1990fee6182 | [
"MIT"
] | null | null | null | app/main/controller/sample_controller.py | Eliotdoesprogramming/python.flask.sqlalchemy.Rest_Api_Template | 3f0a98ae4676aef9ecdf0df70eb9d1990fee6182 | [
"MIT"
] | null | null | null | app/main/controller/sample_controller.py | Eliotdoesprogramming/python.flask.sqlalchemy.Rest_Api_Template | 3f0a98ae4676aef9ecdf0df70eb9d1990fee6182 | [
"MIT"
] | null | null | null | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from service.api_service import Service | 39.25 | 80 | 0.703822 |
b2d981ecabea84bee53271f8bc9c6bdef3c97cef | 3,533 | py | Python | supervised/preprocessing/datetime_transformer.py | sourcery-ai-bot/mljar-supervised | f60f4ac65516ac759e4b84a198205480a56ada64 | [
"MIT"
] | null | null | null | supervised/preprocessing/datetime_transformer.py | sourcery-ai-bot/mljar-supervised | f60f4ac65516ac759e4b84a198205480a56ada64 | [
"MIT"
] | null | null | null | supervised/preprocessing/datetime_transformer.py | sourcery-ai-bot/mljar-supervised | f60f4ac65516ac759e4b84a198205480a56ada64 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import datetime
import json
| 32.712963 | 69 | 0.565242 |
b2da249453d5ecc88736b8a335fffd1d8b76e78e | 10,592 | py | Python | tac/gui/dashboards/controller.py | fetchai/agents-tac | 9e7de7cf6a43fff789972f6d7a3ed906858009e0 | [
"Apache-2.0"
] | 29 | 2019-07-17T08:58:19.000Z | 2021-12-08T19:25:22.000Z | tac/gui/dashboards/controller.py | fetchai/agents-tac | 9e7de7cf6a43fff789972f6d7a3ed906858009e0 | [
"Apache-2.0"
] | 90 | 2019-07-03T09:19:15.000Z | 2022-01-20T10:37:48.000Z | tac/gui/dashboards/controller.py | fetchai/agents-tac | 9e7de7cf6a43fff789972f6d7a3ed906858009e0 | [
"Apache-2.0"
] | 8 | 2019-07-12T11:06:54.000Z | 2020-05-29T18:54:51.000Z | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""Module containing the controller dashboard and related classes."""
import argparse
import json
import os
from typing import Optional, Dict
import numpy as np
from tac.gui.dashboards.base import start_visdom_server, Dashboard
from tac.agents.controller.base.states import Game
from tac.platform.game.stats import GameStats
DEFAULT_ENV_NAME = "tac_simulation_env_main"
def parse_args():
"""Parse the arguments."""
parser = argparse.ArgumentParser(
"dashboard", description="Data Visualization for the simulation outcome"
)
parser.add_argument(
"--datadir",
type=str,
required=True,
help="The path to the simulation data folder.",
)
parser.add_argument(
"--env_name",
type=str,
default=None,
help="The name of the environment to create.",
)
arguments = parser.parse_args()
return arguments
if __name__ == "__main__":
arguments = parse_args()
process = start_visdom_server()
d = ControllerDashboard.from_datadir(arguments.datadir, arguments.env_name)
d.start()
d.update()
while True:
try:
input()
except KeyboardInterrupt:
break
finally:
d.stop()
process.terminate()
| 31.244838 | 88 | 0.558912 |
b2da2f7f294d67b0e66ebfb594c13ddc9e71fc29 | 13,568 | py | Python | timecat/apps/users/views.py | LinXueyuanStdio/memp | c6f6609cec7c54ec23881838dacb5f4ffba2e68c | [
"Apache-2.0"
] | null | null | null | timecat/apps/users/views.py | LinXueyuanStdio/memp | c6f6609cec7c54ec23881838dacb5f4ffba2e68c | [
"Apache-2.0"
] | null | null | null | timecat/apps/users/views.py | LinXueyuanStdio/memp | c6f6609cec7c54ec23881838dacb5f4ffba2e68c | [
"Apache-2.0"
] | null | null | null | import json
from django.urls import reverse
from django.shortcuts import render
from django.db.models import Q
from django.views.generic.base import View
from django.contrib.auth import authenticate,login,logout
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth.hashers import make_password
from django.http import HttpResponse,HttpResponseRedirect
from rest_framework import viewsets
from rest_framework.permissions import IsAuthenticated
from pure_pagination import Paginator, EmptyPage, PageNotAnInteger
from apps.operation.models import UserCourse,UserFavorite,UserMessage
from apps.organization.models import CourseOrg,Teacher
from apps.course.models import Course
from apps.utils.email_send import send_register_eamil
from apps.utils.mixin_utils import LoginRequiredMixin
from .models import CustomUser,EmailVerifyRecord
from .models import Banner
from .forms import LoginForm,RegisterForm,ForgetPwdForm,ModifyPwdForm
from .forms import UploadImageForm,UserInfoForm
from .serializers import UserSerializer
#
# ModelBackendauthenticate
#
#
#
from django.shortcuts import render_to_response
| 34.176322 | 108 | 0.627874 |
b2dc609945782154da069152ac5405f35cce2d2b | 25 | py | Python | SWSIdentity/Controllers/__init__.py | vanzhiganov/identity | 90936482cc23251ba06121658e6a0a9251e30b3b | [
"Apache-2.0"
] | 1 | 2018-03-26T21:18:52.000Z | 2018-03-26T21:18:52.000Z | SWSIdentity/Controllers/__init__.py | vanzhiganov/identity | 90936482cc23251ba06121658e6a0a9251e30b3b | [
"Apache-2.0"
] | null | null | null | SWSIdentity/Controllers/__init__.py | vanzhiganov/identity | 90936482cc23251ba06121658e6a0a9251e30b3b | [
"Apache-2.0"
] | null | null | null | __all__ = [
'Users'
] | 8.333333 | 11 | 0.48 |
b2dc6929b15ae89ed88d8e3ef2cd52728dbd110e | 5,794 | py | Python | gui/Ui_sales_transaction.py | kim-song/kimsong-apriori | 0f2a4a2b749989ad1305da3836e7404c09482534 | [
"MIT"
] | 2 | 2020-07-17T09:36:56.000Z | 2020-12-11T11:36:11.000Z | gui/Ui_sales_transaction.py | kimsongsao/kimsong-apriori | 0f2a4a2b749989ad1305da3836e7404c09482534 | [
"MIT"
] | null | null | null | gui/Ui_sales_transaction.py | kimsongsao/kimsong-apriori | 0f2a4a2b749989ad1305da3836e7404c09482534 | [
"MIT"
] | 1 | 2020-07-17T09:23:15.000Z | 2020-07-17T09:23:15.000Z | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'd:\MITE12\ksapriori\gui\sales_transaction.ui'
#
# Created by: PyQt5 UI code generator 5.12.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
| 48.283333 | 101 | 0.681049 |
b2dddf20dfadcb66e5efbea5b82b5fce448e57cf | 727 | py | Python | setup.py | dboddie/Beeware-Hello-VOC | a22ffc58121ead7acac850c6edb60576bdb66993 | [
"MIT"
] | 35 | 2017-09-21T03:45:33.000Z | 2021-11-18T01:18:13.000Z | setup.py | dboddie/Beeware-Hello-VOC | a22ffc58121ead7acac850c6edb60576bdb66993 | [
"MIT"
] | 6 | 2017-09-25T12:34:31.000Z | 2021-07-05T03:40:19.000Z | setup.py | dboddie/Beeware-Hello-VOC | a22ffc58121ead7acac850c6edb60576bdb66993 | [
"MIT"
] | 10 | 2018-02-03T12:51:31.000Z | 2022-02-08T18:54:48.000Z | #!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='{{ cookiecutter.app_name }}',
version='0.0.1',
description='{{ cookiecutter.description }}',
author='{{ cookiecutter.author }}',
author_email='{{ cookiecutter.author_email }}',
license='{{ cookiecutter.license }}',
packages=find_packages(
exclude=['docs', 'tests', 'android']
),
classifiers=[
'Development Status :: 1 - Planning',
'License :: OSI Approved :: {{ cookiecutter.license }}',
],
install_requires=[
],
options={
'app': {
'formal_name': '{{ cookiecutter.formal_name }}',
'bundle': '{{ cookiecutter.bundle }}'
},
}
)
| 25.964286 | 64 | 0.569464 |
b2de089e75f188f3482c29fc33bcbb7a91997599 | 27,975 | py | Python | src/app.py | chunyuyuan/NEWS_2019_network-master | 0eec84b383156c82fbd64d900dce578700575d99 | [
"MIT"
] | null | null | null | src/app.py | chunyuyuan/NEWS_2019_network-master | 0eec84b383156c82fbd64d900dce578700575d99 | [
"MIT"
] | null | null | null | src/app.py | chunyuyuan/NEWS_2019_network-master | 0eec84b383156c82fbd64d900dce578700575d99 | [
"MIT"
] | null | null | null | from flask import Flask, request, render_template, send_file, Response
import io
import base64
import csv
import json
import time
from collections import OrderedDict
import numpy
import pandas as pd
from numpy import genfromtxt
from flask import jsonify
from flask_cors import CORS
from LoadingNetwork import EchoWebSocket
import shutil
import gc
from tornado.wsgi import WSGIContainer
from tornado.web import Application, FallbackHandler
from tornado.websocket import WebSocketHandler
from tornado.ioloop import IOLoop
app = Flask('flasknado')
#app = Flask(__name__)
app.debug = True
CORS(app)
##initial netwrok csv data############################
rawdata = open('NetworkWithDistance.txt')
with open('NetworkWithDistance.txt') as f:
rawdata = f.readlines()
# you may also want to remove whitespace characters like `\n` at the end
# of each line
rawdata = [x.strip() for x in rawdata]
my_data = genfromtxt('networkwithdist.csv', delimiter=',')
# my_data=numpy.delete(my_data,(0),axis=0)
header = ['id', 'id_to', 'lon', 'lat', 'basinid']
frame = pd.DataFrame(my_data, columns=header)
data = []
MY_GLOBAL = []
with open('tempcsv.csv') as f:
for line in f:
temp = line.strip().split(',')
data.append(temp)
#############################
data1 = []
with open('MyFile1.txt') as f:
r = 0
for line in f:
if(r > 0):
data2 = []
# print(line)
temp = line.split("\",")
data2.append(temp[0][1:])
temp1 = temp[1].split(",[")
data2.append(temp1[0])
data2.append(temp1[1][:-2])
data1.append(data2)
r += 1
header = ['celllist', 'cellid', 'cellto']
frame_celllist = pd.DataFrame(data1, columns=header)
frame_celllist = frame_celllist.drop_duplicates()
del data1[:]
##################
data_c = []
with open('powerplant_cell_loc.csv') as f:
r = 0
for line in f:
if(r > 0):
data_cc = line.split(",")
data_c.append(data_cc)
# print(line)
r += 1
header = ['cellid', 'loc']
frame_cell = pd.DataFrame(data_c, columns=header)
frame_cell = frame_cell.drop_duplicates()
del data_c[:]
########################################################
import os
import sys
from SimpleHTTPServer import SimpleHTTPRequestHandler
import BaseHTTPServer
# class MyHTTPRequestHandler(SimpleHTTPRequestHandler):
# def translate_path(self,path):
# path = SimpleHTTPRequestHandler.translate_path(self,path)
# if os.path.isdir(path):
# for base in "index", "default":
# for ext in ".html", ".htm", ".txt":
# index = path + "/" + base + ext
# if os.path.exists(index):
# return index
# return path
# def test(HandlerClass = MyHTTPRequestHandler,
# ServerClass = BaseHTTPServer.HTTPServer):
# BaseHTTPServer.test(HandlerClass, ServerClass)
##################travesal network upstream############
'''def find_upstream(value):
gc.collect()
ii=0
li = []
temp=[]
a=frame.ix[int(value)]
temp.append(a)
#print(MY_GLOBAL)
MY_GLOBAL[:]=[]
#x=data[int(value)]
#x=frame[frame['id']==a['id_to']]
#print x
i=0
z=0
zz=0
while zz<len(temp):
item=temp[zz]
zz+=1
##print(z,len(temp))
## item=temp.pop()
## print item
#x=frame[frame['id_to']==item['id']]
x=data[int(float(item['id']))]
#print x
i=1
while i<len(x) :
# d = OrderedDict()
# xx=x.loc[x.index[i]]
xx=frame.ix[int(float(x[i]))]
# d['type'] = 'Feature'
# d['geometry'] = {
# 'type': 'MultiLineString',
# 'coordinates': [[[float(xx['lon']),float(xx['lat'])],[float(item['lon']), float(item['lat'])]]]
# }
# d['properties'] = { "id":int(xx['id']),"id_to":int(xx['id_to']),"lon": float(xx['lon']),"lat": float(xx['lat'])
# }
# li.append(d)
i+=1
# ii+=1
##if ii%1000==0:
## print ii
temp.append(xx)
print(len(temp))
while z<len(temp):
item=temp[z]
z+=1
##print(z,len(temp))
## item=temp.pop()
## print item
#x=frame[frame['id_to']==item['id']]
x=data[int(float(item['id']))]
#print x
i=1
while i<len(x) :
d = OrderedDict()
#xx=x.loc[x.index[i]]
xx=frame.ix[int(float(x[i]))]
d['type'] = 'Feature'
d['geometry'] = {
'type': 'MultiLineString',
'coordinates': [[[float(xx['lon']),float(xx['lat'])],[float(item['lon']), float(item['lat'])]]]
}
d['properties'] = { "id":int(xx['id']),"id_to":int(xx['id_to']),"lon": float(xx['lon']),"lat": float(xx['lat'])
}
li.append(d)
d = OrderedDict()
#xx=x.loc[x.index[i]]
# xx=frame.ix[int(float(x[i]))]
i+=1
ii+=1
if ii%1000==0 or (ii+1)/len(temp)==1:
MY_GLOBAL.append((int)((ii+1)/(len(temp)* 1.0)*100))
## print(checkInt,ii,len(temp))
## print ii
# temp.append(xx)
#d = OrderedDict()
#d['type'] = 'FeatureCollection'
#d['features'] = li
#print li
print(ii)
return li,200'''
##################travesal network downstream############
##################travesal network downstream############
#######################pp upstream#######################
#######################pp downstream#######################
class WebSocket(WebSocketHandler):
# return base64.b64encode(
# output.getvalue()) + "***" + str1 + "***" + str2 + "***" + str3, 200
#
if __name__ == "__main__":
container = WSGIContainer(app)
server = Application([
(r'/websocket/', WebSocket),
(r'/we/', EchoWebSocket),
(r'.*', FallbackHandler, dict(fallback=container))
])
server.listen(5000)
IOLoop.instance().start()
# test()
| 16.913543 | 360 | 0.43378 |
b2df22a6002e061c7249b53a9c05a87dc4e272cf | 697 | py | Python | src/oca/models/__init__.py | goubertbrent/oca-backend | b9f59cc02568aecb55d4b54aec05245790ea25fd | [
"Apache-2.0"
] | null | null | null | src/oca/models/__init__.py | goubertbrent/oca-backend | b9f59cc02568aecb55d4b54aec05245790ea25fd | [
"Apache-2.0"
] | null | null | null | src/oca/models/__init__.py | goubertbrent/oca-backend | b9f59cc02568aecb55d4b54aec05245790ea25fd | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
# flake8: noqa
"""
Our City App
Our City App internal apis # noqa: E501
The version of the OpenAPI document: 0.0.1
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
# import models into model package
from oca.models.home_screen import HomeScreen
from oca.models.home_screen_bottom_navigation import HomeScreenBottomNavigation
from oca.models.home_screen_bottom_sheet import HomeScreenBottomSheet
from oca.models.home_screen_bottom_sheet_header import HomeScreenBottomSheetHeader
from oca.models.home_screen_content import HomeScreenContent
from oca.models.home_screen_navigation_button import HomeScreenNavigationButton
| 30.304348 | 82 | 0.826399 |
b2df334cefd6c85d1832ef9e9b56545aefa460f8 | 428 | py | Python | calibrate.py | jamesbowman/k40-python | 02054fe7f0bf727910c58d634aae7ba4799a2e2c | [
"BSD-3-Clause"
] | 2 | 2018-07-11T00:36:34.000Z | 2018-09-03T06:58:29.000Z | calibrate.py | jamesbowman/k40-python | 02054fe7f0bf727910c58d634aae7ba4799a2e2c | [
"BSD-3-Clause"
] | null | null | null | calibrate.py | jamesbowman/k40-python | 02054fe7f0bf727910c58d634aae7ba4799a2e2c | [
"BSD-3-Clause"
] | null | null | null | import svgwrite
if __name__ == '__main__':
dwg = svgwrite.Drawing('test.svg', size=('150mm', '150mm'), viewBox=('0 0 150 150'))
cross(dwg, 5, 5)
cross(dwg, 145, 5)
cross(dwg, 145, 145)
cross(dwg, 5, 145)
dwg.save()
| 23.777778 | 88 | 0.556075 |
b2df89e53c0696d724b6e84ea61023d0f33bec67 | 597 | py | Python | Pokemon Identifier/app.py | sethuiyer/mlhub | 6be271c0070a0c0bb90dd92aceb344e7415bb1db | [
"MIT"
] | 22 | 2016-12-28T16:14:18.000Z | 2019-09-22T16:39:29.000Z | Pokemon Identifier/app.py | sethuiyer/mlhub | 6be271c0070a0c0bb90dd92aceb344e7415bb1db | [
"MIT"
] | 6 | 2020-03-24T17:48:55.000Z | 2022-03-12T00:04:58.000Z | Pokemon Identifier/app.py | sethuiyer/mlhub | 6be271c0070a0c0bb90dd92aceb344e7415bb1db | [
"MIT"
] | 17 | 2017-01-17T09:45:14.000Z | 2020-04-21T07:19:39.000Z | from poketype import PokemonTypeIdentifier
from flask import Flask, request, make_response,jsonify
import os
id = PokemonTypeIdentifier()
app = Flask(__name__,static_url_path='/static')
if __name__ == '__main__':
port = int(os.environ.get('PORT', 8001))
app.run(debug=True,host='0.0.0.0',port=port,use_reloader=False)
| 33.166667 | 67 | 0.721943 |
b2e0de9c060c5c18b08f58d054b60642948a19ab | 13,975 | py | Python | SViTE/backup/sparselearning/snip.py | VITA-Group/SViTE | b0c62fd153c8b0b99917ab935ee76925c9de1149 | [
"MIT"
] | 50 | 2021-05-29T00:52:45.000Z | 2022-03-17T11:39:47.000Z | SViTE/backup/sparselearning/snip.py | VITA-Group/SViTE | b0c62fd153c8b0b99917ab935ee76925c9de1149 | [
"MIT"
] | 2 | 2022-01-16T07:24:52.000Z | 2022-03-29T01:56:24.000Z | SViTE/backup/sparselearning/snip.py | VITA-Group/SViTE | b0c62fd153c8b0b99917ab935ee76925c9de1149 | [
"MIT"
] | 6 | 2021-06-27T22:24:16.000Z | 2022-01-17T02:45:32.000Z | import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import math
import copy
import types
| 34.506173 | 129 | 0.606082 |
b2e1c343a37dcd72cd8cbdfb5d12721802108339 | 296 | py | Python | digin/leads/views.py | yhung119/cs411-digin | 55e884ff0c26ca06056219e6cb641dc1ceae1f56 | [
"Apache-2.0"
] | null | null | null | digin/leads/views.py | yhung119/cs411-digin | 55e884ff0c26ca06056219e6cb641dc1ceae1f56 | [
"Apache-2.0"
] | null | null | null | digin/leads/views.py | yhung119/cs411-digin | 55e884ff0c26ca06056219e6cb641dc1ceae1f56 | [
"Apache-2.0"
] | null | null | null | from django.shortcuts import render
from leads.models import Lead
from leads.serializers import LeadSerializer
from rest_framework import generics
# Create your views here.
| 29.6 | 49 | 0.820946 |
b2e290a686a1065fd69b5d2e7d362f288caf266f | 117 | py | Python | Module01/OOP/FirstClassDef.py | fenglihanxiao/Python | 872baf3a3a5ee42740161152605ca2b1ddf4cd30 | [
"MIT"
] | null | null | null | Module01/OOP/FirstClassDef.py | fenglihanxiao/Python | 872baf3a3a5ee42740161152605ca2b1ddf4cd30 | [
"MIT"
] | null | null | null | Module01/OOP/FirstClassDef.py | fenglihanxiao/Python | 872baf3a3a5ee42740161152605ca2b1ddf4cd30 | [
"MIT"
] | null | null | null | """ First class definition"""
cat1 = Cat()
cat2 = Cat()
cat3 = Cat() | 9 | 29 | 0.589744 |
b2e37635f83fbc719c3828b77b744bc8d962608e | 870 | py | Python | ddtrace/contrib/aiobotocore/__init__.py | tophatmonocle/dd-trace-py | 7db12f1c398c07cd5baf91c571aed672dbb6496d | [
"BSD-3-Clause"
] | null | null | null | ddtrace/contrib/aiobotocore/__init__.py | tophatmonocle/dd-trace-py | 7db12f1c398c07cd5baf91c571aed672dbb6496d | [
"BSD-3-Clause"
] | null | null | null | ddtrace/contrib/aiobotocore/__init__.py | tophatmonocle/dd-trace-py | 7db12f1c398c07cd5baf91c571aed672dbb6496d | [
"BSD-3-Clause"
] | null | null | null | """
The aiobotocore integration will trace all AWS calls made with the ``aiobotocore``
library. This integration isn't enabled when applying the default patching.
To enable it, you must run ``patch_all(botocore=True)``
::
import aiobotocore.session
from ddtrace import patch
# If not patched yet, you can patch botocore specifically
patch(aiobotocore=True)
# This will report spans with the default instrumentation
aiobotocore.session.get_session()
lambda_client = session.create_client('lambda', region_name='us-east-1')
# This query generates a trace
lambda_client.list_functions()
"""
from ...utils.importlib import require_modules
required_modules = ['aiobotocore.client']
with require_modules(required_modules) as missing_modules:
if not missing_modules:
from .patch import patch
__all__ = ['patch']
| 28.064516 | 82 | 0.74023 |
b2e5f383923565aa615eda28c9950c8812f8c749 | 3,161 | py | Python | luna/__init__.py | ktemkin/luna | 661dc89f7f60ba8a51165f7f8037ad2d5854cf34 | [
"BSD-3-Clause"
] | 4 | 2020-02-11T18:40:02.000Z | 2020-04-03T13:07:38.000Z | luna/__init__.py | ktemkin/luna | 661dc89f7f60ba8a51165f7f8037ad2d5854cf34 | [
"BSD-3-Clause"
] | null | null | null | luna/__init__.py | ktemkin/luna | 661dc89f7f60ba8a51165f7f8037ad2d5854cf34 | [
"BSD-3-Clause"
] | null | null | null | #
# This file is part of LUNA.
#
import shutil
import tempfile
import argparse
from nmigen import Elaboratable
from .gateware.platform import get_appropriate_platform
def top_level_cli(fragment, *pos_args, **kwargs):
""" Runs a default CLI that assists in building and running gateware.
If the user's options resulted in the board being programmed, this returns the fragment
that was programmed onto the board. Otherwise, it returns None.
"""
parser = argparse.ArgumentParser(description="Gateware generation/upload script for '{}' gateware.".format(fragment.__class__.__name__))
parser.add_argument('--output', '-o', metavar='filename', help="Build and output a bitstream to the given file.")
parser.add_argument('--erase', '-E', action='store_true',
help="Clears the relevant FPGA's flash before performing other options.")
parser.add_argument('--upload', '-U', action='store_true',
help="Uploads the relevant design to the target hardware. Default if no options are provided.")
parser.add_argument('--flash', '-F', action='store_true',
help="Flashes the relevant design to the target hardware's configuration flash.")
parser.add_argument('--dry-run', '-D', action='store_true',
help="When provided as the only option; builds the relevant bitstream without uploading or flashing it.")
parser.add_argument('--keep-files', action='store_true',
help="Keeps the local files in the default `build` folder.")
args = parser.parse_args()
platform = get_appropriate_platform()
# If this isn't a fragment directly, interpret it as an object that will build one.
if callable(fragment):
fragment = fragment(*pos_args, **kwargs)
# If we have no other options set, build and upload the relevant file.
if (args.output is None and not args.flash and not args.erase and not args.dry_run):
args.upload = True
# Once the device is flashed, it will self-reconfigure, so we
# don't need an explicitly upload step; and it implicitly erases
# the flash, so we don't need an erase step.
if args.flash:
args.erase = False
args.upload = False
# Build the relevant gateware, uploading if requested.
build_dir = "build" if args.keep_files else tempfile.mkdtemp()
# Build the relevant files.
try:
if args.erase:
platform.toolchain_erase()
products = platform.build(fragment,
do_program=args.upload,
build_dir=build_dir
)
# If we're flashing the FPGA's flash, do so.
if args.flash:
platform.toolchain_flash(products)
# If we're outputting a file, write it.
if args.output:
bitstream = products.get("top.bit")
with open(args.output, "wb") as f:
f.write(bitstream)
# Return the fragment we're working with, for convenience.
if args.upload or args.flash:
return fragment
# Clean up any directories we've created.
finally:
if not args.keep_files:
shutil.rmtree(build_dir)
return None
| 37.188235 | 140 | 0.669092 |
b2e71e54f3ede13551ca6c960041e280c9f907b3 | 761 | py | Python | htdocs/geojson/hsearch.py | akrherz/depbackend | d43053319227a3aaaf7553c823e8e2e748fbe95d | [
"Apache-2.0"
] | null | null | null | htdocs/geojson/hsearch.py | akrherz/depbackend | d43053319227a3aaaf7553c823e8e2e748fbe95d | [
"Apache-2.0"
] | 1 | 2022-02-17T17:43:52.000Z | 2022-02-17T17:43:52.000Z | htdocs/geojson/hsearch.py | akrherz/depbackend | d43053319227a3aaaf7553c823e8e2e748fbe95d | [
"Apache-2.0"
] | 2 | 2021-11-28T11:41:32.000Z | 2022-01-26T17:12:03.000Z | """search for HUC12 by name."""
import json
from paste.request import parse_formvars
from pyiem.util import get_dbconn
def search(q):
"""Search for q"""
pgconn = get_dbconn("idep")
cursor = pgconn.cursor()
d = dict(results=[])
cursor.execute(
"""SELECT huc_12, hu_12_name from huc12
WHERE hu_12_name ~* %s and scenario = 0 LIMIT 10""",
(q,),
)
for row in cursor:
d["results"].append(dict(huc_12=row[0], name=row[1]))
return d
def application(environ, start_response):
"""DO Something"""
form = parse_formvars(environ)
q = form.get("q", "")
headers = [("Content-type", "application/json")]
start_response("200 OK", headers)
return [json.dumps(search(q)).encode("ascii")]
| 23.78125 | 61 | 0.622865 |
b2e84727d200add756532d87eca711fb92b61dde | 1,570 | py | Python | setup.py | Peque/mmsim | b3a78ad0119db6ee8df349a89559ea8006c85db1 | [
"BSD-3-Clause"
] | null | null | null | setup.py | Peque/mmsim | b3a78ad0119db6ee8df349a89559ea8006c85db1 | [
"BSD-3-Clause"
] | null | null | null | setup.py | Peque/mmsim | b3a78ad0119db6ee8df349a89559ea8006c85db1 | [
"BSD-3-Clause"
] | null | null | null | """
Setup module.
"""
from setuptools import setup
from mmsim import __version__
setup(
name='mmsim',
version=__version__,
description='A simple Micromouse Maze Simulator server',
long_description="""The server can load different mazes and any client
can connect to it to ask for the current position walls, move from
one cell to another and visualize the simulated micromouse state.""",
url='https://github.com/Theseus/mmsim',
author='Miguel Snchez de Len Peque',
author_email='peque@neosit.es',
license='BSD License',
classifiers=[
'Development Status :: 3 - Alpha',
'Topic :: Utilities',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
],
keywords='micromouse maze server simulator',
entry_points={
'console_scripts': [
'mmsim = mmsim.commands:launch',
],
},
packages=['mmsim'],
install_requires=[
'click',
'numpy',
'pyqtgraph',
'pyqt5',
'pyzmq'],
extras_require={
'docs': [
'doc8',
'sphinx',
'sphinx_rtd_theme',
],
'lint': [
'flake8',
'flake8-bugbear',
'flake8-per-file-ignores',
'flake8-quotes',
'pep8-naming',
],
'test': [
'pytest',
'pytest-cov',
],
},
)
| 26.610169 | 77 | 0.54586 |
b2e911b19926607cd6e241f7b09f34ddcf0231cd | 2,556 | py | Python | fastinference.py | wkcw/VariousDiscriminator-CycleGan | de9c033aeed1c429f37c531056c1f74cb51a771c | [
"MIT"
] | null | null | null | fastinference.py | wkcw/VariousDiscriminator-CycleGan | de9c033aeed1c429f37c531056c1f74cb51a771c | [
"MIT"
] | null | null | null | fastinference.py | wkcw/VariousDiscriminator-CycleGan | de9c033aeed1c429f37c531056c1f74cb51a771c | [
"MIT"
] | null | null | null | """
A fast version of the original inference.
Constructing one graph to infer all the samples.
Originaly one graph for each sample.
"""
import tensorflow as tf
import os
from model import CycleGAN
import utils
import scipy.misc
import numpy as np
try:
from os import scandir
except ImportError:
# Python 2 polyfill module
from scandir import scandir
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_string('model', '', 'model path (.pb)')
tf.flags.DEFINE_string('input', 'data/apple', 'input image path')
tf.flags.DEFINE_string('output', 'samples/apple', 'output image path')
tf.flags.DEFINE_integer('image_size', 128, 'image size, default: 128')
if __name__ == '__main__':
tf.app.run()
| 31.555556 | 91 | 0.658842 |
b2eaad3cf6ba61735dc17fc8cd249e0de4bd056b | 8,879 | py | Python | examples/json_serializer.py | trisongz/lazycls | 701bad1a358ed3bb136347d0c5eb81de3201f6a3 | [
"MIT"
] | 2 | 2021-12-02T00:13:16.000Z | 2022-02-26T11:18:33.000Z | examples/json_serializer.py | trisongz/lazycls | 701bad1a358ed3bb136347d0c5eb81de3201f6a3 | [
"MIT"
] | null | null | null | examples/json_serializer.py | trisongz/lazycls | 701bad1a358ed3bb136347d0c5eb81de3201f6a3 | [
"MIT"
] | null | null | null | """
This test will show how to use the various Json serializer(s)
"""
from lazy.io.pathz import get_path
from lazy.serialize import SimdJson, OrJson, Json, Serializer
from lazy.utils import get_logger, timer
logger = get_logger('lazytest')
test_file = './files/naics_codes.json'
test_path = get_path(test_file, resolve=True)
test_keys = [
'112930', '11299', '112990', '1131', '11311', '113110', '1132', '11321', '113210', '1133', '11331', '113310', '1141', '11411', '114111', '114112', '114119', '1142', '11421', '114210', '1151', '11511', '115111', '115112', '115113', '115114', '115115', '115116', '1152', '11521', '115210', '1153', '11531', '115310', '2111', '21112', '211120', '21113', '211130', '2121', '21211', '212111', '212112', '212113', '2122', '21221', '212210', '21222', '212221', '212222', '21223', '212230', '21229', '212291', '212299', '2123', '21231', '212311', '212312', '212313', '212319', '21232', '212321', '212322', '212324', '212325', '21239', '212391', '212392', '212393', '212399', '2131', '21311', '213111', '213112', '213113', '213114', '213115', '2211', '22111', '221111', '221112', '221113', '221114', '221115', '221116', '221117', '221118', '22112', '221121', '221122', '2212', '22121', '221210', '2213', '22131', '221310', '22132', '221320', '22133', '221330', '2361', '23611', '236115', '236116', '236117', '236118', '2362', '23621', '236210', '23622', '236220', '2371', '23711', '237110', '23712', '237120', '23713', '237130', '2372', '23721', '237210', '2373', '23731', '237310', '2379', '23799', '237990', '2381', '23811', '238110',
'32412', '324121', '324122', '32419', '324191', '324199', '3251', '32511', '325110', '32512', '325120', '32513', '325130', '32518', '325180', '32519', '325193', '325194', '325199', '3252', '32521', '325211', '325212', '32522', '325220', '3253', '32531', '325311', '325312', '325314', '32532', '325320', '3254', '32541', '325411', '325412', '325413', '325414', '3255', '32551', '325510', '32552', '325520', '3256', '32561', '325611', '325612', '325613', '32562', '325620', '3259', '32591', '325910', '32592', '325920', '32599', '325991', '325992', '325998', '3261', '32611', '326111', '326112', '326113', '32612', '326121', '326122', '32613', '326130', '32614', '326140', '32615', '326150', '32616', '326160', '32619', '326191', '326199', '3262', '32621', '326211', '326212', '32622', '326220', '32629', '326291', '326299', '3271', '32711', '327110', '32712', '327120', '3272', '32721', '327211', '327212', '327213', '327215', '3273', '32731', '327310', '32732', '327320', '32733', '327331', '327332', '32739', '327390', '3274', '32741', '327410', '32742', '327420', '3279', '32791', '327910', '32799', '327991', '327992', '327993', '327999', '3311', '33111', '331110', '3312', '33121', '331210', '33122', '3312',
'333413', '333414', '333415', '3335', '33351', '333511', '333514', '333515', '333517', '333519', '3336', '33361', '333611', '333612', '333613', '333618', '3339', '33391', '333912', '333914', '33392', '333921', '333922', '333923', '333924', '33399', '333991', '333992', '333993', '333994', '333995', '333996', '333997', '333999', '3341', '33411', '334111', '334112', '334118', '3342', '33421', '334210', '33422', '334220', '33429', '334290', '3343', '33431', '334310', '3344', '33441', '334412', '334413', '334416', '334417', '334418', '334419', '3345', '33451', '334510', '334511', '334512', '334513', '334514', '334515', '334516', '334517', '334519', '3346', '33461', '334613', '334614', '3351', '33511', '335110', '33512', '335121', '335122', '335129', '3352', '33521', '335210', '33522', '335220', '3353', '33531', '335311', '335312', '335313', '335314', '3359', '33591', '335911', '335912', '33592', '335921', '335929', '33593', '335931', '335932', '33599', '335991', '335999', '3361', '33611', '336111',
'519190', '5211', '52111', '521110', '5221', '52211', '522110', '52212', '522120', '52213', '522130', '52219', '522190', '5222', '52221', '522210', '52222', '522220', '52229', '522291', '522292', '522293', '522294', '522298', '5223', '52231', '522310', '52232', '522320', '52239', '522390', '5231', '52311', '523110', '52312', '523120', '52313', '523130', '52314', '523140', '5232', '52321', '523210', '5239', '52391',
'7113', '71131', '711310', '71132', '711320', '7114', '71141', '711410', '7115', '71151', '711510', '7121', '71211', '712110', '71212', '712120', '71213', '712130', '71219', '712190', '7131', '71311', '713110', '71312', '713120', '7132', '71321', '713210', '71329', '713290', '7139', '71391', '713910', '71392', '713920', '71393', '713930', '71394', '713940', '71395', '713950', '71399', '713990', '7211', '72111', '721110', '72112', '721120', '72119', '721191', '721199', '7212', '72121', '721211', '721214', '7213', '72131', '721310', '7223', '72231', '722310', '72232', '722320', '72233', '722330', '7224', '72241', '722410', '7225', '72251', '722511', '722513', '722514', '722515', '8111', '81111', '811111', '811112', '811113', '811118', '81112', '811121', '811122', '81119', '811191', '811192', '811198', '8112', '81121', '811211', '811212', '811213', '811219', '8113', '81131', '811310', '8114', '81141', '811411', '811412', '81142', '811420', '81143', '811430', '81149', '811490', '8121', '81211', '812111',
]
"""
Expected Results
Time to Read Text: 0.00095 secs
[OrJson] Time to Load with 2077 Total Items: 0.00378 secs
[OrJson] Time to Read 520 Items: 2e-05 secs
[OrJson] Time to Dump 520 Items: 0.00097 secs
[OrJson] Completed Test in: 0.00497 secs
----------------------------------------------------------------
Time to Read Text: 0.00022 secs
[Json] Time to Load: 0.00234 secs
[Json] Time to [Loads]Read 520 Items: 2e-05 secs
[Json] Time to Parse: 0.0032 secs
[Json] Time to [Parse]Read 520 Items: 0.00237 secs
[Json] Time to Dump 520 Items: 0.00238 secs
[Json] Completed Test in: 0.00814 secs
----------------------------------------------------------------
Time to Read Text: 0.00023 secs
[SimdJson] Time to Parse: 0.00051 secs
[SimdJson] Time to Load 2077 Keys: 0.00214 secs
[SimdJson] Time to Read 520 Items: 0.00011 secs
[SimdJson] Time to Dump 520 Items: 0.00365 secs
[SimdJson] Completed Test in: 0.00611 secs
----------------------------------------------------------------
"""
if __name__ == '__main__':
test_orjson()
test_json()
test_simdjson()
| 75.245763 | 1,231 | 0.582273 |
b2ec6ef197e2fb9ff51a3b958346b6692ead91be | 1,018 | py | Python | donatello/utils.py | adrianchifor/donatello | 5a384b3203965b16324e9d322e83a8f1f1b27fd1 | [
"Apache-2.0"
] | 7 | 2018-12-01T10:41:16.000Z | 2021-04-08T19:04:46.000Z | donatello/utils.py | adrianchifor/donatello | 5a384b3203965b16324e9d322e83a8f1f1b27fd1 | [
"Apache-2.0"
] | 4 | 2018-12-01T15:31:58.000Z | 2018-12-01T23:59:52.000Z | donatello/utils.py | adrianchifor/donatello | 5a384b3203965b16324e9d322e83a8f1f1b27fd1 | [
"Apache-2.0"
] | 2 | 2018-12-01T10:41:29.000Z | 2018-12-02T15:56:30.000Z | import http.client
def non_zero_balance(balance):
"""
Return the balance with zero-value coins removed
"""
non_zero_balance = {}
for coin, amount in balance.items():
if amount > 0:
non_zero_balance[coin] = amount
return non_zero_balance
def supported_coins_balance(balance, tickers):
"""
Return the balance with non-supported coins removed
"""
supported_coins_balance = {}
for coin in balance.keys():
if coin != "BTC":
if f"{coin}/BTC" in tickers:
supported_coins_balance[coin] = balance[coin]
else:
try:
supported_coins_balance["BTC"] = balance[coin]
except KeyError:
print("BTC not in balance")
return supported_coins_balance
| 24.829268 | 62 | 0.609037 |
b2ee1fd8d6c226332f2055e2d1c443475db998ec | 2,442 | py | Python | omop_cdm/utility_programs/load_concept_files_into_db.py | jhajagos/CommonDataModelMapper | 65d2251713e5581b76cb16e36424d61fb194c901 | [
"Apache-2.0"
] | 1 | 2019-06-14T02:26:35.000Z | 2019-06-14T02:26:35.000Z | omop_cdm/utility_programs/load_concept_files_into_db.py | jhajagos/CommonDataModelMapper | 65d2251713e5581b76cb16e36424d61fb194c901 | [
"Apache-2.0"
] | null | null | null | omop_cdm/utility_programs/load_concept_files_into_db.py | jhajagos/CommonDataModelMapper | 65d2251713e5581b76cb16e36424d61fb194c901 | [
"Apache-2.0"
] | 1 | 2019-08-12T20:19:28.000Z | 2019-08-12T20:19:28.000Z | import argparse
import json
import sys
import os
try:
from utility_functions import load_csv_files_into_db, generate_vocabulary_load
except(ImportError):
sys.path.insert(0, os.path.abspath(os.path.join(os.path.split(__file__)[0], os.path.pardir, os.path.pardir, "src")))
from utility_functions import load_csv_files_into_db, generate_vocabulary_load
if __name__ == "__main__":
arg_parse_obj = argparse.ArgumentParser(description="Load concept/vocabulary files into database")
arg_parse_obj.add_argument("-c", "--config-file-name", dest="config_file_name", help="JSON config file", default="../hi_config.json")
arg_parse_obj.add_argument("--connection-uri", dest="connection_uri", default=None)
arg_parse_obj.add_argument("--schema", dest="schema", default=None)
arg_parse_obj.add_argument("--load-concept_ancestor", default=False, action="store_true", dest="load_concept_ancestor")
arg_parse_obj.add_argument("--full-concept-files", default=False, action="store_true", dest="load_full_concept_files")
arg_obj = arg_parse_obj.parse_args()
print("Reading config file '%s'" % arg_obj.config_file_name)
with open(arg_obj.config_file_name) as f:
config = json.load(f)
if arg_obj.connection_uri is None:
connection_uri = config["connection_uri"]
else:
connection_uri = arg_obj.connection_uri
if arg_obj.schema is None:
schema = config["schema"]
else:
schema = arg_obj.schema
if arg_obj.load_full_concept_files:
vocabularies_to_load = ["CONCEPT", "CONCEPT_ANCESTOR", "CONCEPT_CLASS", "CONCEPT_RELATIONSHIP",
"CONCEPT_SYNONYM", "DOMAIN", "DRUG_STRENGTH", "RELATIONSHIP", "VOCABULARY"]
elif arg_obj.load_concept_ancestor:
vocabularies_to_load = ["CONCEPT", "CONCEPT_ANCESTOR"]
else:
vocabularies_to_load = ["CONCEPT"]
main(config["json_map_directory"], connection_uri, schema, vocabularies=vocabularies_to_load)
| 37 | 137 | 0.72154 |
b2ef1e91c18ddeb4d7361450a7de67ebdb4b2e6e | 1,588 | py | Python | triplinker/tests/test_views/test_non_dynamic_urls/test_accounts_views/tests.py | GonnaFlyMethod/triplinker | f4189e499ad48fd9102dd2211a8884078136eae9 | [
"MIT"
] | null | null | null | triplinker/tests/test_views/test_non_dynamic_urls/test_accounts_views/tests.py | GonnaFlyMethod/triplinker | f4189e499ad48fd9102dd2211a8884078136eae9 | [
"MIT"
] | null | null | null | triplinker/tests/test_views/test_non_dynamic_urls/test_accounts_views/tests.py | GonnaFlyMethod/triplinker | f4189e499ad48fd9102dd2211a8884078136eae9 | [
"MIT"
] | null | null | null | # Python modules.
import pytest
# Django modules.
from django.urls import reverse
from django.test import TestCase
# !Triplinker modules:
from tests.helpers.create_user import new_user
| 25.206349 | 47 | 0.691436 |
b2efa45aafbdbf20dc2a3beb6bb3e66667896bb3 | 414 | py | Python | DNN_Experiments/MaskRCNN/convert.py | wmjpillow/FlameDetectionAPP | c3761c9e15adccbd084b17cd6b6f63c561c7f856 | [
"MIT"
] | 2 | 2019-12-28T21:46:18.000Z | 2020-01-10T03:41:03.000Z | DNN_Experiments/MaskRCNN/convert.py | wmjpillow/FlameDetectionAPP | c3761c9e15adccbd084b17cd6b6f63c561c7f856 | [
"MIT"
] | 10 | 2019-12-28T21:31:19.000Z | 2020-04-12T20:01:58.000Z | DNN_Experiments/MaskRCNN/convert.py | wmjpillow/FlameDetectionAPP | c3761c9e15adccbd084b17cd6b6f63c561c7f856 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# convert jpg tp png
from glob import glob
import cv2
pngs = glob('./*.jpg')
for j in pngs:
img = cv2.imread(j)
cv2.imwrite(j[:-3] + 'png', img)
# delete jpg files
import glob
import os
dir = "/Users/wangmeijie/ALLImportantProjects/FlameDetectionAPP/Models/MaskRCNN/02_26_2020/Mask_RCNN/dataset/train"
for jpgpath in glob.iglob(os.path.join(dir, '*.jpg')):
os.remove(jpgpath) | 21.789474 | 115 | 0.707729 |
b2effe0f8a82275a9712cccbfe5467301c5502b1 | 4,544 | py | Python | src/title2Id_redirect_parser.py | mjstrobl/WEXEA | 0af0be1cdb93fc00cd81f885aa15ef8d6579b304 | [
"Apache-2.0"
] | 10 | 2020-06-14T15:46:53.000Z | 2021-04-29T15:02:23.000Z | src/title2Id_redirect_parser.py | mjstrobl/WEXEA | 0af0be1cdb93fc00cd81f885aa15ef8d6579b304 | [
"Apache-2.0"
] | 3 | 2021-08-25T16:16:45.000Z | 2022-02-10T04:29:10.000Z | src/title2Id_redirect_parser.py | mjstrobl/WEXEA | 0af0be1cdb93fc00cd81f885aa15ef8d6579b304 | [
"Apache-2.0"
] | 1 | 2021-02-17T17:44:06.000Z | 2021-02-17T17:44:06.000Z | import xml.sax
import re
import os
import json
import time
current_milli_time = lambda: int(round(time.time() * 1000))
RE_LINKS = re.compile(r'\[{2}(.*?)\]{2}', re.DOTALL | re.UNICODE)
IGNORED_NAMESPACES = [
'wikipedia', 'category', 'file', 'portal', 'template',
'mediaWiki', 'user', 'help', 'book', 'draft', 'wikiProject',
'special', 'talk', 'image','module'
]
"""MediaWiki namespaces that ought to be ignored."""
if (__name__ == "__main__"):
title2Id = {}
id2Title = {}
redirects = {}
config = json.load(open('config/config.json'))
wikipath = config['wikipath']
outputpath = config['outputpath']
dictionarypath = outputpath + 'dictionaries/'
mode = 0o755
os.mkdir(outputpath, mode)
os.mkdir(dictionarypath, mode)
parser = xml.sax.make_parser()
parser.setFeature(xml.sax.handler.feature_namespaces, 0)
Handler = WikiHandler(title2Id,id2Title,redirects)
parser.setContentHandler(Handler)
parser.parse(wikipath)
print('done')
with open(dictionarypath + 'title2Id.json', 'w') as f:
json.dump(title2Id, f)
with open(dictionarypath + 'id2Title.json', 'w') as f:
json.dump(id2Title, f)
with open(dictionarypath + 'redirects.json', 'w') as f:
json.dump(redirects, f)
| 36.352 | 150 | 0.568442 |
b2f00de04b4a4965219cd8965a1067b02342ac09 | 1,571 | bzl | Python | rules/starlark_configurations/cc_test/defs.bzl | CyberFlameGO/examples | 87a4812cb23f7e7969d74cc073579fb82540c0f6 | [
"Apache-2.0"
] | 572 | 2015-09-02T20:26:41.000Z | 2022-03-30T07:43:22.000Z | rules/starlark_configurations/cc_test/defs.bzl | CyberFlameGO/examples | 87a4812cb23f7e7969d74cc073579fb82540c0f6 | [
"Apache-2.0"
] | 158 | 2015-08-31T20:21:50.000Z | 2022-03-20T20:13:14.000Z | rules/starlark_configurations/cc_test/defs.bzl | CyberFlameGO/examples | 87a4812cb23f7e7969d74cc073579fb82540c0f6 | [
"Apache-2.0"
] | 408 | 2015-08-31T20:05:14.000Z | 2022-03-28T02:36:44.000Z | # We can transition on native options using this
# //command_line_option:<option-name> syntax
_BUILD_SETTING = "//command_line_option:test_arg"
_test_arg_transition = transition(
implementation = _test_arg_transition_impl,
inputs = [],
outputs = [_BUILD_SETTING],
)
transition_rule_test = rule(
implementation = _test_transition_rule_impl,
attrs = {
"actual_test": attr.label(cfg = _test_arg_transition, executable = True),
"_allowlist_function_transition": attr.label(
default = "@bazel_tools//tools/allowlists/function_transition_allowlist",
),
},
test = True,
)
| 33.425532 | 85 | 0.700827 |
b2f00fc324ac567aafda05e99a1cb8336f8b4e7a | 7,116 | py | Python | test_cases/apache_avro_adapter.py | QratorLabs/ritfest2016 | cddaaa9e827f5315d2e426c083029124649d6f50 | [
"MIT"
] | null | null | null | test_cases/apache_avro_adapter.py | QratorLabs/ritfest2016 | cddaaa9e827f5315d2e426c083029124649d6f50 | [
"MIT"
] | null | null | null | test_cases/apache_avro_adapter.py | QratorLabs/ritfest2016 | cddaaa9e827f5315d2e426c083029124649d6f50 | [
"MIT"
] | null | null | null | import io
import avro.io
try:
from avro.schema import parse
except ImportError:
from avro.schema import Parse as parse
| 32.054054 | 76 | 0.604272 |
b2f17c3de89d94e2aba8cc14a42ef09cd569851a | 41 | py | Python | tests/test_vec/__init__.py | karin0018/EduNLP | 172c36a2cd2c41a1f1c5833d7b1abcbc5e3bbd5f | [
"Apache-2.0"
] | 18 | 2021-02-15T13:10:42.000Z | 2022-03-17T12:57:34.000Z | tests/test_vec/__init__.py | karin0018/EduNLP | 172c36a2cd2c41a1f1c5833d7b1abcbc5e3bbd5f | [
"Apache-2.0"
] | 81 | 2021-06-02T07:45:20.000Z | 2022-03-29T15:21:32.000Z | tests/test_vec/__init__.py | karin0018/EduNLP | 172c36a2cd2c41a1f1c5833d7b1abcbc5e3bbd5f | [
"Apache-2.0"
] | 29 | 2021-05-18T08:34:58.000Z | 2022-03-12T00:19:09.000Z | # coding: utf-8
# 2021/5/30 @ tongshiwei
| 13.666667 | 24 | 0.658537 |
b2f1d9ab07a35f78efc77316abd28bebe9c01e76 | 4,004 | py | Python | tests/test_nameko_prometheus.py | alfaro28/nameko-prometheus | 0f50006b1510eef375712a1b7c4bd00d5f08eb1b | [
"Apache-2.0"
] | null | null | null | tests/test_nameko_prometheus.py | alfaro28/nameko-prometheus | 0f50006b1510eef375712a1b7c4bd00d5f08eb1b | [
"Apache-2.0"
] | null | null | null | tests/test_nameko_prometheus.py | alfaro28/nameko-prometheus | 0f50006b1510eef375712a1b7c4bd00d5f08eb1b | [
"Apache-2.0"
] | null | null | null | import pytest
from nameko.events import EventDispatcher, event_handler
from nameko.rpc import rpc
from nameko.testing.services import entrypoint_hook, entrypoint_waiter
from nameko.web.handlers import http
from prometheus_client import REGISTRY, Counter
from nameko_prometheus import PrometheusMetrics
my_counter = Counter("my_counter", "My counter")
| 33.932203 | 108 | 0.727273 |
b2f264acc957c78592942a789c9b0334224d0fdd | 604 | py | Python | testing/forms/products.py | Miki761000/storage_podari_s_luibov | a82caccfd40391f6a2609538e4e629d3b113aca9 | [
"MIT"
] | null | null | null | testing/forms/products.py | Miki761000/storage_podari_s_luibov | a82caccfd40391f6a2609538e4e629d3b113aca9 | [
"MIT"
] | null | null | null | testing/forms/products.py | Miki761000/storage_podari_s_luibov | a82caccfd40391f6a2609538e4e629d3b113aca9 | [
"MIT"
] | null | null | null | from django import forms
from warehouse.models import Product, ProductAdditionalInformation
| 25.166667 | 66 | 0.622517 |
b2f42395c99cb32b176d6e1140d0291edc965ff1 | 1,210 | py | Python | src/amuse/community/sei/test_sei.py | joshuawall/amuse | c2034074ee76c08057c4faa96c32044ab40952e9 | [
"Apache-2.0"
] | 1 | 2019-12-28T22:47:51.000Z | 2019-12-28T22:47:51.000Z | src/amuse/community/sei/test_sei.py | joshuawall/amuse | c2034074ee76c08057c4faa96c32044ab40952e9 | [
"Apache-2.0"
] | null | null | null | src/amuse/community/sei/test_sei.py | joshuawall/amuse | c2034074ee76c08057c4faa96c32044ab40952e9 | [
"Apache-2.0"
] | 2 | 2021-11-19T04:41:37.000Z | 2021-11-20T02:11:17.000Z | from amuse.test.amusetest import TestWithMPI
from amuse.units import nbody_system
from amuse.units import units
import os
import sys
import numpy
import math
from amuse.community.sei.interface import SeiInterface
from amuse.community.sei.interface import Sei
from amuse import datamodel
| 29.512195 | 80 | 0.661157 |
b2f4e0a17b1d50d9e628464d9c96026fcbff615c | 20,063 | py | Python | datetimeparser/enums.py | aridevelopment-de/datetimeparser | df63d6f7ed0c362f6d6b4e55d61b973b7fcf3f56 | [
"MIT"
] | 12 | 2021-11-05T21:17:21.000Z | 2022-03-30T17:53:50.000Z | datetimeparser/enums.py | aridevelopment-de/datetimeparser | df63d6f7ed0c362f6d6b4e55d61b973b7fcf3f56 | [
"MIT"
] | 45 | 2021-11-14T16:05:04.000Z | 2022-03-29T18:51:31.000Z | datetimeparser/enums.py | aridevelopment-de/datetimeparser | df63d6f7ed0c362f6d6b4e55d61b973b7fcf3f56 | [
"MIT"
] | 1 | 2021-11-14T13:44:37.000Z | 2021-11-14T13:44:37.000Z | from datetime import datetime, timedelta
from enum import Enum, auto
from dateutil.relativedelta import relativedelta
from .baseclasses import Constant, MethodEnum
from .formulars import days_feb, eastern_calc, thanksgiving_calc, year_start
| 64.719355 | 140 | 0.648358 |
b2f5acf01cacb2c9f5deb948c74813e3ef341bde | 2,295 | py | Python | ontospy/extras/hacks/server.py | michaelyryi/Ontospy | f1a18daa296285ea02a97d1331e94140e801edc4 | [
"MIT"
] | null | null | null | ontospy/extras/hacks/server.py | michaelyryi/Ontospy | f1a18daa296285ea02a97d1331e94140e801edc4 | [
"MIT"
] | null | null | null | ontospy/extras/hacks/server.py | michaelyryi/Ontospy | f1a18daa296285ea02a97d1331e94140e801edc4 | [
"MIT"
] | null | null | null | # !/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
UTILITY TO START A LOCAL SERVER
Copyright (c) 2015 __Michele Pasin__ <http://www.michelepasin.org>. All rights reserved.
Shows local repo within a server
"""
MODULE_VERSION = 0.1
USAGE = "@todo"
import time, optparse, os, rdflib, sys, webbrowser
import SimpleHTTPServer, SocketServer
from .. import main
from ..core.ontospy import Ontospy
from ..core.utils import *
DEFAULT_PORT = 7899
# in order to avoid waiting for a minute after restar
def startServer(port=DEFAULT_PORT, location=None, openbrowser=True):
""" """
if location:
os.chdir(location)
Handler = SimpleHTTPServer.SimpleHTTPRequestHandler
httpd = NoBrokenServer(("", port), Handler)
if openbrowser:
webbrowser.open('http://127.0.0.1:' + str(port))
print("serving at port", port)
httpd.serve_forever()
def parse_options():
"""
parse_options() -> opts, args
Parse any command-line options given returning both
the parsed options and arguments.
https://docs.python.org/2/library/optparse.html
"""
parser = optparse.OptionParser(usage=USAGE, version=ontospy.VERSION)
parser.add_option("-p", "--port",
action="store", type="int", default=DEFAULT_PORT, dest="port",
help="A number specifying which port to use for the server.")
opts, args = parser.parse_args()
# if not opts.all and not opts.query:
# parser.print_help()
# sys.exit(0)
return opts, args
def main():
""" command line script """
# boilerplate
print("OntoSpy " + ontospy.VERSION)
ontospy.get_or_create_home_repo()
ONTOSPY_LOCAL_MODELS = ontospy.get_home_location()
opts, args = parse_options()
sTime = time.time()
# switch dir and start server
startServer(port=DEFAULT_PORT, location=ONTOSPY_LOCAL_MODELS)
# finally:
# print some stats....
eTime = time.time()
tTime = eTime - sTime
printDebug("-" * 10)
printDebug("Time: %0.2fs" % tTime)
if __name__ == '__main__':
# from .. import main
try:
main()
sys.exit(0)
except KeyboardInterrupt as e: # Ctrl-C
raise e
| 19.285714 | 124 | 0.712418 |
b2f71e9c0092918763ba4db64292488e285e5cbe | 14,866 | py | Python | python/dolfinx_contact/unbiased/nitsche_unbiased.py | jorgensd/asimov-contact | 08704ade6343c346bc54dfd38186983cc7ab4485 | [
"MIT"
] | null | null | null | python/dolfinx_contact/unbiased/nitsche_unbiased.py | jorgensd/asimov-contact | 08704ade6343c346bc54dfd38186983cc7ab4485 | [
"MIT"
] | null | null | null | python/dolfinx_contact/unbiased/nitsche_unbiased.py | jorgensd/asimov-contact | 08704ade6343c346bc54dfd38186983cc7ab4485 | [
"MIT"
] | null | null | null | # Copyright (C) 2021 Sarah Roggendorf
#
# SPDX-License-Identifier: MIT
from typing import Callable, Tuple, Union
import dolfinx.common as _common
import dolfinx.fem as _fem
import dolfinx.log as _log
import dolfinx.mesh as _mesh
import dolfinx_cuas
import numpy as np
import ufl
from dolfinx.cpp.graph import AdjacencyList_int32
from dolfinx.cpp.mesh import MeshTags_int32
from petsc4py import PETSc as _PETSc
import dolfinx_contact
import dolfinx_contact.cpp
from dolfinx_contact.helpers import (epsilon, lame_parameters,
rigid_motions_nullspace, sigma_func)
kt = dolfinx_contact.cpp.Kernel
__all__ = ["nitsche_unbiased"]
def nitsche_unbiased(mesh: _mesh.Mesh, mesh_tags: list[MeshTags_int32],
domain_marker: MeshTags_int32,
surfaces: AdjacencyList_int32,
dirichlet: list[Tuple[int, Callable[[np.ndarray], np.ndarray]]],
neumann: list[Tuple[int, Callable[[np.ndarray], np.ndarray]]],
contact_pairs: list[Tuple[int, int]],
body_forces: list[Tuple[int, Callable[[np.ndarray], np.ndarray]]],
physical_parameters: dict[str, Union[bool, np.float64, int]],
nitsche_parameters: dict[str, np.float64],
quadrature_degree: int = 5, form_compiler_params: dict = None, jit_params: dict = None,
petsc_options: dict = None, newton_options: dict = None, initial_guess=None,
outfile: str = None, order: int = 1) -> Tuple[_fem.Function, int, int, float]:
"""
Use custom kernel to compute the contact problem with two elastic bodies coming into contact.
Parameters
==========
mesh
The input mesh
mesh_tags
A list of meshtags. The first element must contain the mesh_tags for all puppet surfaces,
Dirichlet-surfaces and Neumann-surfaces
All further elements may contain candidate_surfaces
domain_marker
marker for subdomains where a body force is applied
surfaces
Adjacency list. Links of i are meshtag values for contact surfaces in ith mesh_tag in mesh_tags
dirichlet
List of Dirichlet boundary conditions as pairs of (meshtag value, function), where function
is a function to be interpolated into the dolfinx function space
neumann
Same as dirichlet for Neumann boundary conditions
contact_pairs:
list of pairs (i, j) marking the ith surface as a puppet surface and the jth surface
as the corresponding candidate surface
physical_parameters
Optional dictionary with information about the linear elasticity problem.
Valid (key, value) tuples are: ('E': float), ('nu', float), ('strain', bool)
nitsche_parameters
Optional dictionary with information about the Nitsche configuration.
Valid (keu, value) tuples are: ('gamma', float), ('theta', float) where theta can be -1, 0 or 1 for
skew-symmetric, penalty like or symmetric enforcement of Nitsche conditions
displacement
The displacement enforced on Dirichlet boundary
quadrature_degree
The quadrature degree to use for the custom contact kernels
form_compiler_params
Parameters used in FFCX compilation of this form. Run `ffcx --help` at
the commandline to see all available options. Takes priority over all
other parameter values, except for `scalar_type` which is determined by
DOLFINX.
jit_params
Parameters used in CFFI JIT compilation of C code generated by FFCX.
See https://github.com/FEniCS/dolfinx/blob/main/python/dolfinx/jit.py
for all available parameters. Takes priority over all other parameter values.
petsc_options
Parameters that is passed to the linear algebra backend
PETSc. For available choices for the 'petsc_options' kwarg,
see the `PETSc-documentation
<https://petsc4py.readthedocs.io/en/stable/manual/ksp/>`
newton_options
Dictionary with Newton-solver options. Valid (key, item) tuples are:
("atol", float), ("rtol", float), ("convergence_criterion", "str"),
("max_it", int), ("error_on_nonconvergence", bool), ("relaxation_parameter", float)
initial_guess
A functon containing an intial guess to use for the Newton-solver
outfile
File to append solver summary
order
The order of mesh and function space
"""
form_compiler_params = {} if form_compiler_params is None else form_compiler_params
jit_params = {} if jit_params is None else jit_params
petsc_options = {} if petsc_options is None else petsc_options
newton_options = {} if newton_options is None else newton_options
strain = physical_parameters.get("strain")
if strain is None:
raise RuntimeError("Need to supply if problem is plane strain (True) or plane stress (False)")
else:
plane_strain = bool(strain)
_E = physical_parameters.get("E")
if _E is not None:
E = np.float64(_E)
else:
raise RuntimeError("Need to supply Youngs modulus")
if physical_parameters.get("nu") is None:
raise RuntimeError("Need to supply Poisson's ratio")
else:
nu = physical_parameters.get("nu")
# Compute lame parameters
mu_func, lambda_func = lame_parameters(plane_strain)
mu = mu_func(E, nu)
lmbda = lambda_func(E, nu)
sigma = sigma_func(mu, lmbda)
# Nitche parameters and variables
theta = nitsche_parameters.get("theta")
if theta is None:
raise RuntimeError("Need to supply theta for Nitsche imposition of boundary conditions")
_gamma = nitsche_parameters.get("gamma")
if _gamma is None:
raise RuntimeError("Need to supply Coercivity/Stabilization parameter for Nitsche condition")
else:
gamma: np.float64 = _gamma * E
lifting = nitsche_parameters.get("lift_bc", False)
# Functions space and FEM functions
V = _fem.VectorFunctionSpace(mesh, ("CG", order))
u = _fem.Function(V)
v = ufl.TestFunction(V)
du = ufl.TrialFunction(V)
h = ufl.CellDiameter(mesh)
n = ufl.FacetNormal(mesh)
# Integration measure and ufl part of linear/bilinear form
# metadata = {"quadrature_degree": quadrature_degree}
dx = ufl.Measure("dx", domain=mesh, subdomain_data=domain_marker)
ds = ufl.Measure("ds", domain=mesh, # metadata=metadata,
subdomain_data=mesh_tags[0])
J = ufl.inner(sigma(du), epsilon(v)) * dx
F = ufl.inner(sigma(u), epsilon(v)) * dx
for contact_pair in contact_pairs:
surface_value = int(surfaces.links(0)[contact_pair[0]])
J += - 0.5 * theta * h / gamma * ufl.inner(sigma(du) * n, sigma(v) * n) * \
ds(surface_value)
F += - 0.5 * theta * h / gamma * ufl.inner(sigma(u) * n, sigma(v) * n) * \
ds(surface_value)
# Dirichle boundary conditions
bcs = []
if lifting:
tdim = mesh.topology.dim
for bc in dirichlet:
facets = mesh_tags[0].find(bc[0])
cells = _mesh.compute_incident_entities(mesh, facets, tdim - 1, tdim)
u_bc = _fem.Function(V)
u_bc.interpolate(bc[1], cells)
u_bc.x.scatter_forward()
bcs.append(_fem.dirichletbc(u_bc, _fem.locate_dofs_topological(V, tdim - 1, facets)))
else:
for bc in dirichlet:
f = _fem.Function(V)
f.interpolate(bc[1])
F += - ufl.inner(sigma(u) * n, v) * ds(bc[0])\
- theta * ufl.inner(sigma(v) * n, u - f) * \
ds(bc[0]) + gamma / h * ufl.inner(u - f, v) * ds(bc[0])
J += - ufl.inner(sigma(du) * n, v) * ds(bc[0])\
- theta * ufl.inner(sigma(v) * n, du) * \
ds(bc[0]) + gamma / h * ufl.inner(du, v) * ds(bc[0])
# Neumann boundary conditions
for bc in neumann:
g = _fem.Function(V)
g.interpolate(bc[1])
F -= ufl.inner(g, v) * ds(bc[0])
# body forces
for bf in body_forces:
f = _fem.Function(V)
f.interpolate(bf[1])
F -= ufl.inner(f, v) * dx(bf[0])
# Custom assembly
# create contact class
with _common.Timer("~Contact: Init"):
contact = dolfinx_contact.cpp.Contact(mesh_tags, surfaces, contact_pairs,
V._cpp_object, quadrature_degree=quadrature_degree)
with _common.Timer("~Contact: Distance maps"):
for i in range(len(contact_pairs)):
contact.create_distance_map(i)
# pack constants
consts = np.array([gamma, theta])
# Pack material parameters mu and lambda on each contact surface
with _common.Timer("~Contact: Interpolate coeffs (mu, lmbda)"):
V2 = _fem.FunctionSpace(mesh, ("DG", 0))
lmbda2 = _fem.Function(V2)
lmbda2.interpolate(lambda x: np.full((1, x.shape[1]), lmbda))
mu2 = _fem.Function(V2)
mu2.interpolate(lambda x: np.full((1, x.shape[1]), mu))
entities = []
with _common.Timer("~Contact: Compute active entities"):
for pair in contact_pairs:
entities.append(contact.active_entities(pair[0]))
material = []
with _common.Timer("~Contact: Pack coeffs (mu, lmbda"):
for i in range(len(contact_pairs)):
material.append(dolfinx_cuas.pack_coefficients([mu2, lmbda2], entities[i]))
# Pack celldiameter on each surface
h_packed = []
with _common.Timer("~Contact: Compute and pack celldiameter"):
surface_cells = np.unique(np.hstack([entities[i][:, 0] for i in range(len(contact_pairs))]))
h_int = _fem.Function(V2)
expr = _fem.Expression(h, V2.element.interpolation_points)
h_int.interpolate(expr, surface_cells)
for i in range(len(contact_pairs)):
h_packed.append(dolfinx_cuas.pack_coefficients([h_int], entities[i]))
# Pack gap, normals and test functions on each surface
gaps = []
normals = []
test_fns = []
with _common.Timer("~Contact: Pack gap, normals, testfunction"):
for i in range(len(contact_pairs)):
gaps.append(contact.pack_gap(i))
normals.append(contact.pack_ny(i, gaps[i]))
test_fns.append(contact.pack_test_functions(i, gaps[i]))
# Concatenate all coeffs
coeffs_const = []
for i in range(len(contact_pairs)):
coeffs_const.append(np.hstack([material[i], h_packed[i], gaps[i], normals[i], test_fns[i]]))
# Generate Jacobian data structures
J_custom = _fem.form(J, form_compiler_params=form_compiler_params, jit_params=jit_params)
with _common.Timer("~Contact: Generate Jacobian kernel"):
kernel_jac = contact.generate_kernel(kt.Jac)
with _common.Timer("~Contact: Create matrix"):
J = contact.create_matrix(J_custom)
# Generate residual data structures
F_custom = _fem.form(F, form_compiler_params=form_compiler_params, jit_params=jit_params)
with _common.Timer("~Contact: Generate residual kernel"):
kernel_rhs = contact.generate_kernel(kt.Rhs)
with _common.Timer("~Contact: Create vector"):
b = _fem.petsc.create_vector(F_custom)
# coefficient arrays
num_coeffs = contact.coefficients_size()
coeffs = np.array([np.zeros((len(entities[i]), num_coeffs)) for i in range(len(contact_pairs))])
newton_solver = dolfinx_contact.NewtonSolver(mesh.comm, J, b, coeffs)
# Set matrix-vector computations
newton_solver.set_residual(compute_residual)
newton_solver.set_jacobian(compute_jacobian_matrix)
newton_solver.set_coefficients(compute_coefficients)
# Set rigid motion nullspace
null_space = rigid_motions_nullspace(V)
newton_solver.A.setNearNullSpace(null_space)
# Set Newton solver options
newton_solver.set_newton_options(newton_options)
# Set initial guess
if initial_guess is None:
u.x.array[:] = 0
else:
u.x.array[:] = initial_guess.x.array[:]
# Set Krylov solver options
newton_solver.set_krylov_options(petsc_options)
dofs_global = V.dofmap.index_map_bs * V.dofmap.index_map.size_global
_log.set_log_level(_log.LogLevel.OFF)
# Solve non-linear problem
timing_str = f"~Contact: {id(dofs_global)} Solve Nitsche"
with _common.Timer(timing_str):
n, converged = newton_solver.solve(u)
if outfile is not None:
viewer = _PETSc.Viewer().createASCII(outfile, "a")
newton_solver.krylov_solver.view(viewer)
newton_time = _common.timing(timing_str)
if not converged:
raise RuntimeError("Newton solver did not converge")
u.x.scatter_forward()
print(f"{dofs_global}\n Number of Newton iterations: {n:d}\n",
f"Number of Krylov iterations {newton_solver.krylov_iterations}\n", flush=True)
return u, n, newton_solver.krylov_iterations, newton_time[1]
| 42.965318 | 108 | 0.654245 |
b2f7f4cc70879c961d4345ed522c0b9c510c8bf6 | 5,218 | py | Python | scratch/movielens-mongodb.py | crcsmnky/movielens-data-exports | f316f1367abef80a1abce64d3adb3bd3effc6365 | [
"Apache-2.0"
] | 1 | 2022-02-01T19:44:36.000Z | 2022-02-01T19:44:36.000Z | scratch/movielens-mongodb.py | crcsmnky/movielens-data-exports | f316f1367abef80a1abce64d3adb3bd3effc6365 | [
"Apache-2.0"
] | null | null | null | scratch/movielens-mongodb.py | crcsmnky/movielens-data-exports | f316f1367abef80a1abce64d3adb3bd3effc6365 | [
"Apache-2.0"
] | null | null | null | """
usage: python movielens-mongodb.py [movies] [ratings] [links]
"""
import sys
import re
import csv
import os
# import tmdbsimple as tmdb
from pymongo import MongoClient
from pymongo import ASCENDING, DESCENDING
from datetime import datetime
from time import sleep
if __name__ == '__main__':
main()
| 26.622449 | 116 | 0.526639 |
b2f7fb8b602bfdd673abb75e7f89ca8dc32301c9 | 1,400 | py | Python | coretabs ATM py/withdraw.py | attia7/Test | c74f09816ba2e0798b0533e31ea8b72249dec598 | [
"MIT"
] | null | null | null | coretabs ATM py/withdraw.py | attia7/Test | c74f09816ba2e0798b0533e31ea8b72249dec598 | [
"MIT"
] | 11 | 2020-03-24T17:40:26.000Z | 2022-01-13T01:42:38.000Z | coretabs ATM py/withdraw.py | attia7/AttiaGit | c74f09816ba2e0798b0533e31ea8b72249dec598 | [
"MIT"
] | null | null | null | balance = 700
papers=[100, 50, 10, 5,4,3,2,1]
balance = withdraw(balance, 777)
balance = withdraw(balance, 276)
balance = withdraw1(balance, 276)
balance = withdraw(balance, 34)
balance = withdraw1(balance, 5)
balance = withdraw1(balance, 500) | 30.434783 | 103 | 0.512857 |
b2f8b3e8d1857a6e5f87e42cb97fafb1e51c9432 | 1,126 | py | Python | dynamicform/widgets.py | cdgagne/django-dynamicform | f60c549d01c6c091addaf0b4121367d7a1d917f0 | [
"MIT"
] | null | null | null | dynamicform/widgets.py | cdgagne/django-dynamicform | f60c549d01c6c091addaf0b4121367d7a1d917f0 | [
"MIT"
] | null | null | null | dynamicform/widgets.py | cdgagne/django-dynamicform | f60c549d01c6c091addaf0b4121367d7a1d917f0 | [
"MIT"
] | null | null | null | from django import forms
from django.forms.utils import flatatt
from django.utils import formats
from django.utils.encoding import force_text
from django.utils.html import format_html
| 41.703704 | 91 | 0.652753 |
b2fa232a245de5b9da5a3e07a8eb834b4df0cb1b | 1,406 | py | Python | ocdskingfisherprocess/maindatabase/migrations/versions/8e3f80979dc9_change_unique_constraint_on_collection.py | matiasSanabria/kingfisher-process | 88cb768aaa562714c8bd53e05717639faf041501 | [
"BSD-3-Clause"
] | 1 | 2019-04-11T10:17:32.000Z | 2019-04-11T10:17:32.000Z | ocdskingfisherprocess/maindatabase/migrations/versions/8e3f80979dc9_change_unique_constraint_on_collection.py | matiasSanabria/kingfisher-process | 88cb768aaa562714c8bd53e05717639faf041501 | [
"BSD-3-Clause"
] | 282 | 2018-12-20T16:49:22.000Z | 2022-02-01T00:48:10.000Z | ocdskingfisherprocess/maindatabase/migrations/versions/8e3f80979dc9_change_unique_constraint_on_collection.py | matiasSanabria/kingfisher-process | 88cb768aaa562714c8bd53e05717639faf041501 | [
"BSD-3-Clause"
] | 7 | 2019-04-15T13:36:18.000Z | 2021-03-02T16:25:41.000Z | """Change unique constraint on collection
Revision ID: 8e3f80979dc9
Revises: 3d5fae27a215
Create Date: 2019-12-18 13:14:56.466907
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '8e3f80979dc9'
down_revision = '3d5fae27a215'
branch_labels = None
depends_on = None
def upgrade():
"""
SELECT source_id, data_version, sample, COUNT(*) FROM collection
WHERE transform_type IS NULL or transform_type = ''
GROUP BY source_id, data_version, sample
HAVING COUNT(*) > 1;
"""
# 0 rows
op.drop_constraint('unique_collection_identifiers', 'collection')
op.create_index('unique_collection_identifiers', 'collection', ['source_id', 'data_version', 'sample'],
unique=True, postgresql_where=sa.text("transform_type = ''"))
op.execute("UPDATE collection SET transform_type = '' WHERE transform_type IS NULL")
op.alter_column('collection', 'transform_type', nullable=False)
| 32.697674 | 107 | 0.72404 |
b2fa730dbd73b043c294390890c9c13d76abf7ce | 1,685 | py | Python | tests/stdlib_test.py | misantroop/jsonpickle | 97f4a05ccffe8593458b4b787c3fc97622f23cec | [
"BSD-3-Clause"
] | null | null | null | tests/stdlib_test.py | misantroop/jsonpickle | 97f4a05ccffe8593458b4b787c3fc97622f23cec | [
"BSD-3-Clause"
] | 1 | 2019-04-03T20:19:40.000Z | 2019-04-03T20:19:40.000Z | tests/stdlib_test.py | parsons-kyle-89/jsonpickle | 2828dd4a247bbae9d37a3d78194caaaeadeb2ed2 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""Test miscellaneous objects from the standard library"""
import uuid
import unittest
import jsonpickle
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(UUIDTestCase))
suite.addTest(unittest.makeSuite(BytesTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| 30.089286 | 70 | 0.629674 |
b2fb2e91c6a7d4ac7cee9919699e52b9663ccb72 | 3,435 | py | Python | app/core.py | JoonyoungYi/lol-recommend | b92efff858e491b68c902abb6de31212c688b47e | [
"Apache-2.0"
] | null | null | null | app/core.py | JoonyoungYi/lol-recommend | b92efff858e491b68c902abb6de31212c688b47e | [
"Apache-2.0"
] | null | null | null | app/core.py | JoonyoungYi/lol-recommend | b92efff858e491b68c902abb6de31212c688b47e | [
"Apache-2.0"
] | null | null | null | import os
import time
import tensorflow as tf
import numpy as np
import pandas as pd
from .configs import *
from .models import init_models
EPOCH_NUMBER = 10000
EARLY_STOP = True
EARLY_STOP_MAX_ITER = 40
| 31.227273 | 78 | 0.546725 |
b2fce30e886d32040df251037d8a8ded7ce043ca | 3,817 | py | Python | aito/client/requests/query_api_request.py | AitoDotAI/aito-python-tools | 891d433222b04f4ff8a4eeafbb9268516fd215dc | [
"MIT"
] | 6 | 2019-10-16T02:35:06.000Z | 2021-02-03T13:39:43.000Z | aito/client/requests/query_api_request.py | AitoDotAI/aito-python-tools | 891d433222b04f4ff8a4eeafbb9268516fd215dc | [
"MIT"
] | 23 | 2020-03-17T13:16:02.000Z | 2021-04-23T15:09:51.000Z | aito/client/requests/query_api_request.py | AitoDotAI/aito-python-tools | 891d433222b04f4ff8a4eeafbb9268516fd215dc | [
"MIT"
] | null | null | null | """Aito `Query API <https://aito.ai/docs/api/#query-api>`__ Request Class"""
import re
from abc import ABC
from typing import Dict, Optional, Union, List
from .aito_request import AitoRequest, _PatternEndpoint, _PostRequest
from ..responses import SearchResponse, PredictResponse, RecommendResponse, EvaluateResponse, SimilarityResponse, \
MatchResponse, RelateResponse, HitsResponse
| 37.058252 | 119 | 0.687451 |
b2fcf5fc344b109dbc4a9623cb76fea9e977c60b | 3,417 | py | Python | tests/test_domain.py | shyams2/pychebfun | 0c1efee54829457b9e1b0d6c34259af6c002e105 | [
"BSD-3-Clause"
] | 72 | 2015-02-23T17:08:38.000Z | 2022-02-09T18:17:08.000Z | tests/test_domain.py | shyams2/pychebfun | 0c1efee54829457b9e1b0d6c34259af6c002e105 | [
"BSD-3-Clause"
] | 6 | 2015-01-19T14:12:23.000Z | 2021-11-05T08:16:27.000Z | tests/test_domain.py | shyams2/pychebfun | 0c1efee54829457b9e1b0d6c34259af6c002e105 | [
"BSD-3-Clause"
] | 14 | 2015-07-31T00:08:36.000Z | 2021-02-01T22:20:41.000Z | #!/usr/bin/env python
# coding: UTF-8
from __future__ import division
from pychebfun import Chebfun
import operator
import unittest
import pytest
from . import tools
import numpy as np
import numpy.testing as npt
#------------------------------------------------------------------------------
# Unit test for arbitrary interval Chebfuns
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# Test the restrict operator
#------------------------------------------------------------------------------
from . import data
#------------------------------------------------------------------------------
# Add the arbitrary interval tests
#------------------------------------------------------------------------------
class TestArbitraryIntervals(object):
"""Test the various operations for Chebfun on arbitrary intervals"""
| 33.831683 | 79 | 0.59643 |
b2fd53048e194cb59b5b4420a7e50d932868c531 | 1,878 | py | Python | glance/rpc/common.py | Quinton/glance | 7674bc8963a3bec21f719c48f40e8a3fc0846e6f | [
"Apache-2.0"
] | 5 | 2017-04-23T05:50:36.000Z | 2019-03-12T09:45:20.000Z | glance/rpc/common.py | Quinton/glance | 7674bc8963a3bec21f719c48f40e8a3fc0846e6f | [
"Apache-2.0"
] | null | null | null | glance/rpc/common.py | Quinton/glance | 7674bc8963a3bec21f719c48f40e8a3fc0846e6f | [
"Apache-2.0"
] | 2 | 2018-08-16T11:41:18.000Z | 2018-10-21T06:56:50.000Z | #!/usr/bin/env python
#encode=utf-8
#vim: tabstop=4 shiftwidth=4 softtabstop=4
#Created on 2013-8-17
#Copyright 2013 nuoqingyun xuqifeng
import copy
import logging
import traceback
def _sage_log(log_func, mes, msg_data):
"""
"""
pass
def serialize_remote_exception(failure_info):
"""
"""
pass
def deserialize_remote_exception(conf, data):
"""
"""
pass
| 22.357143 | 73 | 0.596912 |
b2fdab74611ca607c1a5e2e63e4ac639ef552870 | 12,029 | py | Python | tests/test_simple.py | ImportTaste/WebRequest | 0cc385622624de16ec980e0c12d9080d593cab74 | [
"WTFPL"
] | 8 | 2018-06-04T09:34:28.000Z | 2021-09-16T15:21:24.000Z | tests/test_simple.py | ImportTaste/WebRequest | 0cc385622624de16ec980e0c12d9080d593cab74 | [
"WTFPL"
] | 4 | 2018-03-03T07:45:27.000Z | 2019-12-26T20:38:18.000Z | tests/test_simple.py | ImportTaste/WebRequest | 0cc385622624de16ec980e0c12d9080d593cab74 | [
"WTFPL"
] | 1 | 2019-12-26T20:36:32.000Z | 2019-12-26T20:36:32.000Z | import unittest
import socket
import json
import base64
import zlib
import gzip
import bs4
from http.server import BaseHTTPRequestHandler, HTTPServer
from threading import Thread
import WebRequest
from . import testing_server
| 39.569079 | 203 | 0.750104 |
b2fdbd3d23a1257e8c2fb2f6d739b834e644b93d | 5,220 | py | Python | cplcom/moa/device/mcdaq.py | matham/cplcom | 54b1dc8445ff97bab248418d861354beb7c4e656 | [
"MIT"
] | null | null | null | cplcom/moa/device/mcdaq.py | matham/cplcom | 54b1dc8445ff97bab248418d861354beb7c4e656 | [
"MIT"
] | null | null | null | cplcom/moa/device/mcdaq.py | matham/cplcom | 54b1dc8445ff97bab248418d861354beb7c4e656 | [
"MIT"
] | null | null | null | '''Barst Measurement Computing DAQ Wrapper
==========================================
'''
from functools import partial
from pybarst.mcdaq import MCDAQChannel
from kivy.properties import NumericProperty, ObjectProperty
from moa.threads import ScheduledEventLoop
from moa.device.digital import ButtonViewPort
from cplcom.moa.device import DeviceExceptionBehavior
__all__ = ('MCDAQDevice', )
| 35.033557 | 79 | 0.627203 |
b2fdd34a89c4f597f4f4706f3635728cd6c36c6a | 2,827 | py | Python | train_utils.py | BatyrM/QL-Net | b245aadeb106810d075064137f26d773b2dbd679 | [
"MIT"
] | null | null | null | train_utils.py | BatyrM/QL-Net | b245aadeb106810d075064137f26d773b2dbd679 | [
"MIT"
] | null | null | null | train_utils.py | BatyrM/QL-Net | b245aadeb106810d075064137f26d773b2dbd679 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.optim as optim
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).sum(0)
res.append(100*correct_k/batch_size)
return res
def adjust_learning_rate(lr, optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 after 3 and 6 epochs"""
lr = lr * (0.1 ** (epoch // 6))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
| 36.714286 | 97 | 0.573753 |
b2feb55d6f844492c6231b317cce3362c8ea498f | 69 | py | Python | Bronze/Bronze_V/17496.py | masterTyper/baekjoon_solved_ac | b9ce14d9bdaa5b5b06735ad075fb827de9f44b9c | [
"MIT"
] | null | null | null | Bronze/Bronze_V/17496.py | masterTyper/baekjoon_solved_ac | b9ce14d9bdaa5b5b06735ad075fb827de9f44b9c | [
"MIT"
] | null | null | null | Bronze/Bronze_V/17496.py | masterTyper/baekjoon_solved_ac | b9ce14d9bdaa5b5b06735ad075fb827de9f44b9c | [
"MIT"
] | null | null | null | N, T, C, P = map(int, input().split())
print(((N - 1) // T) * C * P) | 23 | 38 | 0.434783 |
b2ffff75cff848e9cc4d8a6143bf4d9bf43e64d3 | 5,702 | py | Python | sapy_script/SAP.py | fkfouri/sapy_script | 476041288367e2098b955bc2377f442ce503e822 | [
"MIT"
] | 3 | 2018-12-03T15:51:54.000Z | 2020-11-20T01:05:39.000Z | sapy_script/SAP.py | whrocha/sapy_script | 476041288367e2098b955bc2377f442ce503e822 | [
"MIT"
] | null | null | null | sapy_script/SAP.py | whrocha/sapy_script | 476041288367e2098b955bc2377f442ce503e822 | [
"MIT"
] | 3 | 2018-07-28T21:53:32.000Z | 2018-08-22T13:51:17.000Z | from multiprocessing import Pool, Manager
from time import sleep
from wmi import WMI
from win32com.client import GetObject
from subprocess import Popen
from collections import Iterable
from tqdm import tqdm
from os import getpid
from sapy_script.Session import Session
session_process = None
all_processes_id = []
def clear_tasks(self):
self._tasks = []
def add_task(self, func, data):
for dt in data:
self._tasks.append({'func': func, 'data': dt})
| 28.368159 | 113 | 0.573834 |
65004332cb733aa8aa9fc8e64faf35799f2ce289 | 2,158 | py | Python | shepherd/blueprints/editor/__init__.py | Systemetric/shepherd | 28473503130cddd2c40702240f3deaad3a21e52b | [
"BSD-2-Clause"
] | null | null | null | shepherd/blueprints/editor/__init__.py | Systemetric/shepherd | 28473503130cddd2c40702240f3deaad3a21e52b | [
"BSD-2-Clause"
] | 8 | 2017-12-13T15:27:52.000Z | 2019-01-27T21:35:14.000Z | shepherd/blueprints/editor/__init__.py | Systemetric/shepherd | 28473503130cddd2c40702240f3deaad3a21e52b | [
"BSD-2-Clause"
] | null | null | null | import json
import os
import os.path as path
import re
from flask import Blueprint, request
blueprint = Blueprint("editor", __name__)
robotsrc_path = path.join(os.getcwd(), "robotsrc")
if not path.exists(robotsrc_path):
os.mkdir(robotsrc_path)
main_path = path.join(robotsrc_path, 'main.py')
main_file = open(main_path, 'w')
main_file.write('# DO NOT DELETE\n')
main_file.close()
blocks_path = path.join(robotsrc_path, 'blocks.json')
| 28.394737 | 88 | 0.596386 |
6501e436cf727b0f646b61fcf716e2f64d47d65c | 1,131 | py | Python | hai_tests/test_event_emitter.py | valohai/hai | f49c4eae2eb74b1738699e32b4b2aeb0f4d922dd | [
"MIT"
] | 2 | 2018-10-03T11:13:06.000Z | 2020-08-07T12:44:22.000Z | hai_tests/test_event_emitter.py | valohai/hai | f49c4eae2eb74b1738699e32b4b2aeb0f4d922dd | [
"MIT"
] | 16 | 2018-02-07T11:08:53.000Z | 2021-11-26T09:21:57.000Z | hai_tests/test_event_emitter.py | valohai/hai | f49c4eae2eb74b1738699e32b4b2aeb0f4d922dd | [
"MIT"
] | null | null | null | import pytest
from hai.event_emitter import EventEmitter
def test_event_emitter_exceptions():
t = Thing()
t.on('*', handle)
t.emit('one')
with pytest.raises(IOError):
t.emit('one', quiet=False)
def test_event_emitter_unknown_event_types():
t = Thing()
with pytest.raises(ValueError):
t.on('hullo', None)
with pytest.raises(ValueError):
t.emit('hello')
| 18.85 | 47 | 0.528736 |
6502f4ca30fdd305a49eeefeb8dc2c19d45c0e83 | 2,598 | py | Python | dit/divergences/tests/test_jensen_shannon_divergence.py | chebee7i/dit | 59626e34c7938fddeec140522dd2a592ba4f42ef | [
"BSD-2-Clause"
] | null | null | null | dit/divergences/tests/test_jensen_shannon_divergence.py | chebee7i/dit | 59626e34c7938fddeec140522dd2a592ba4f42ef | [
"BSD-2-Clause"
] | null | null | null | dit/divergences/tests/test_jensen_shannon_divergence.py | chebee7i/dit | 59626e34c7938fddeec140522dd2a592ba4f42ef | [
"BSD-2-Clause"
] | null | null | null | """
Tests for dit.divergences.jensen_shannon_divergence.
"""
from nose.tools import assert_almost_equal, assert_raises
from dit import Distribution
from dit.exceptions import ditException
from dit.divergences.jensen_shannon_divergence import (
jensen_shannon_divergence as JSD,
jensen_shannon_divergence_pmf as JSD_pmf
)
def test_jsd0():
""" Test the JSD of a distribution but with weights misspecified."""
d1 = Distribution("AB", [0.5, 0.5])
assert_raises(ditException, JSD, d1, d1)
def test_jsd1():
""" Test the JSD of a distribution with itself """
d1 = Distribution("AB", [0.5, 0.5])
jsd = JSD([d1, d1])
assert_almost_equal(jsd, 0)
def test_jsd2():
""" Test the JSD with half-overlapping distributions """
d1 = Distribution("AB", [0.5, 0.5])
d2 = Distribution("BC", [0.5, 0.5])
jsd = JSD([d1, d2])
assert_almost_equal(jsd, 0.5)
def test_jsd3():
""" Test the JSD with disjoint distributions """
d1 = Distribution("AB", [0.5, 0.5])
d2 = Distribution("CD", [0.5, 0.5])
jsd = JSD([d1, d2])
assert_almost_equal(jsd, 1.0)
def test_jsd4():
""" Test the JSD with half-overlapping distributions with weights """
d1 = Distribution("AB", [0.5, 0.5])
d2 = Distribution("BC", [0.5, 0.5])
jsd = JSD([d1, d2], [0.25, 0.75])
assert_almost_equal(jsd, 0.40563906222956625)
def test_jsd5():
""" Test that JSD fails when more weights than dists are given """
d1 = Distribution("AB", [0.5, 0.5])
d2 = Distribution("BC", [0.5, 0.5])
assert_raises(ditException, JSD, [d1, d2], [0.1, 0.6, 0.3])
def test_jsd_pmf1():
""" Test the JSD of a distribution with itself """
d1 = [0.5, 0.5]
jsd = JSD_pmf([d1, d1])
assert_almost_equal(jsd, 0)
def test_jsd_pmf2():
""" Test the JSD with half-overlapping distributions """
d1 = [0.5, 0.5, 0.0]
d2 = [0.0, 0.5, 0.5]
jsd = JSD_pmf([d1, d2])
assert_almost_equal(jsd, 0.5)
def test_jsd_pmf3():
""" Test the JSD with disjoint distributions """
d1 = [0.5, 0.5, 0.0, 0.0]
d2 = [0.0, 0.0, 0.5, 0.5]
jsd = JSD_pmf([d1, d2])
assert_almost_equal(jsd, 1.0)
def test_jsd_pmf4():
""" Test the JSD with half-overlapping distributions with weights """
d1 = [0.5, 0.5, 0.0]
d2 = [0.0, 0.5, 0.5]
jsd = JSD_pmf([d1, d2], [0.25, 0.75])
assert_almost_equal(jsd, 0.40563906222956625)
def test_jsd_pmf5():
""" Test that JSD fails when more weights than dists are given """
d1 = [0.5, 0.5, 0.0]
d2 = [0.0, 0.5, 0.5]
assert_raises(ditException, JSD_pmf, [d1, d2], [0.1, 0.6, 0.2, 0.1])
| 30.928571 | 73 | 0.624326 |
65036c3303afa1e8b9728043f619a82fc6c9e04f | 261 | py | Python | demo/test_scans.py | zhanwj/multi-task-pytorch | 7d57645ec8be0ca0c258cfa99fb788e3cd37f106 | [
"MIT"
] | 2 | 2019-06-11T16:16:11.000Z | 2020-07-21T10:34:40.000Z | demo/test_scans.py | zhanwj/multi-task-pytorch | 7d57645ec8be0ca0c258cfa99fb788e3cd37f106 | [
"MIT"
] | null | null | null | demo/test_scans.py | zhanwj/multi-task-pytorch | 7d57645ec8be0ca0c258cfa99fb788e3cd37f106 | [
"MIT"
] | 2 | 2019-05-21T11:07:29.000Z | 2019-06-11T16:17:02.000Z | import torch
maxdisp = 10
disp_scans = torch.arange(maxdisp).view(1,maxdisp,1,1)
zeros_scans = torch.arange(maxdisp).view(1,maxdisp,1,1)
semseg = torch.arange(16).view(4,4)
zeros_scans = torch.cat([zeros_scans.repeat(1, repeat,1, 1) for i in range(1)],dim=0)
| 32.625 | 85 | 0.731801 |
65054cfd998e3e6858fee00ff01c36b5dddea1ff | 383 | py | Python | models.py | collingreen/yaib_plugin_leavemessage | c1e7254edee5255167c2015ee2566f9770b35412 | [
"MIT"
] | null | null | null | models.py | collingreen/yaib_plugin_leavemessage | c1e7254edee5255167c2015ee2566f9770b35412 | [
"MIT"
] | 1 | 2015-06-06T06:28:45.000Z | 2015-06-06T06:28:45.000Z | models.py | collingreen/yaib_plugin_leavemessage | c1e7254edee5255167c2015ee2566f9770b35412 | [
"MIT"
] | null | null | null | from sqlalchemy import Table, Column, String, DateTime, Text
from modules.persistence import Base, getModelBase
CustomBase = getModelBase('leavemessage')
| 25.533333 | 60 | 0.723238 |
65055cb608c13b9ada0803b4c98f800c169f7118 | 1,490 | py | Python | data/raw_data/test.py | orion-orion/Cloze_Test | 021e550e4323d17832f992b9cd7000552b568bc8 | [
"MIT"
] | 1 | 2020-02-13T11:13:09.000Z | 2020-02-13T11:13:09.000Z | data/raw_data/test.py | lonelyprince7/cloze_test | 021e550e4323d17832f992b9cd7000552b568bc8 | [
"MIT"
] | 1 | 2020-02-08T06:34:19.000Z | 2020-02-12T13:02:19.000Z | data/raw_data/test.py | lonelyprince7/cloze_test | 021e550e4323d17832f992b9cd7000552b568bc8 | [
"MIT"
] | 1 | 2020-02-13T06:31:17.000Z | 2020-02-13T06:31:17.000Z | '''
Descripttion:
Version: 1.0
Author: ZhangHongYu
Date: 2022-02-05 18:23:00
LastEditors: ZhangHongYu
LastEditTime: 2022-05-17 16:26:12
'''
import os
import sys
import json
import argparse
from transformers import AlbertTokenizer
from pytorch_pretrained_bert import BertTokenizer, BertForMaskedLM
file_path = sys.argv[1]
#bert_model = BertForMaskedLM.from_pretrained('/data/jianghao/ralbert-cloth/model/albert-xxlarge-v2/pytorch_model.bin')
PAD, MASK, CLS, SEP = '[PAD]', '[MASK]', '[CLS]', '[SEP]'
bert_tokenizer = AlbertTokenizer.from_pretrained('albert-xxlarge-v2')
max=-1
cnt=0
tot=0
for file in os.listdir(file_path):
if file.endswith(".json"):
with open(os.path.join(file_path,file),'r') as f:
dict = json.load(f)
sentences=dict['article'].split('.')
str=""
for sentence in sentences:
sentence=sentence.replace('_','[MASK]')
tokens = bert_tokenizer.tokenize(sentence)
if len(tokens) == 0:
continue
if tokens[0] != CLS:
tokens = [CLS] + tokens
if tokens[-1] != SEP:
tokens.append(SEP)
str = ''.join(tokens)
# print(str)
# print('')
tot=tot+1
if len(str)>max:
max=len(str)
if len(str)>512:
cnt=cnt+1
#os.system("rm "+os.path.join(file_path,file))
print(cnt/tot)
| 31.041667 | 119 | 0.573826 |
6506db5995970d6837a7164f54f13ccdaecfc008 | 1,651 | py | Python | steampipe_alchemy/models/aws_route53_resolver_rule.py | RyanJarv/steampipe_alchemy | c8a31303252c1bd8d83d0f9c429d7d0ef7e1690f | [
"BSD-3-Clause"
] | 9 | 2021-04-21T04:21:01.000Z | 2021-06-19T19:33:36.000Z | steampipe_alchemy/models/aws_route53_resolver_rule.py | RyanJarv/steampipe_alchemy | c8a31303252c1bd8d83d0f9c429d7d0ef7e1690f | [
"BSD-3-Clause"
] | null | null | null | steampipe_alchemy/models/aws_route53_resolver_rule.py | RyanJarv/steampipe_alchemy | c8a31303252c1bd8d83d0f9c429d7d0ef7e1690f | [
"BSD-3-Clause"
] | 1 | 2021-04-26T21:08:20.000Z | 2021-04-26T21:08:20.000Z | from sqlalchemy import Column
from sqlalchemy.types import JSON, Text, Boolean, TIMESTAMP, BigInteger
from sqlalchemy.dialects import postgresql as psql
from steampipe_alchemy.mixins import FormatMixins
from steampipe_alchemy import Base | 53.258065 | 90 | 0.740763 |
650ab0f46dda1e9c953f58c0e88233c1aedea04d | 8,825 | py | Python | harvester/sharekit/extraction.py | nppo/search-portal | aedf21e334f178c049f9d6cf37cafd6efc07bc0d | [
"MIT"
] | 1 | 2022-01-10T00:26:12.000Z | 2022-01-10T00:26:12.000Z | harvester/sharekit/extraction.py | nppo/search-portal | aedf21e334f178c049f9d6cf37cafd6efc07bc0d | [
"MIT"
] | 48 | 2021-11-11T13:43:09.000Z | 2022-03-30T11:33:37.000Z | harvester/sharekit/extraction.py | nppo/search-portal | aedf21e334f178c049f9d6cf37cafd6efc07bc0d | [
"MIT"
] | null | null | null | import re
from mimetypes import guess_type
from django.conf import settings
from datagrowth.processors import ExtractProcessor
from datagrowth.utils import reach
from core.constants import HIGHER_EDUCATION_LEVELS, RESTRICTED_MATERIAL_SETS
SHAREKIT_EXTRACTION_OBJECTIVE = {
"url": SharekitMetadataExtraction.get_url,
"files": SharekitMetadataExtraction.get_files,
"title": "$.attributes.title",
"language": "$.attributes.language",
"keywords": "$.attributes.keywords",
"description": "$.attributes.abstract",
"mime_type": SharekitMetadataExtraction.get_mime_type,
"technical_type": SharekitMetadataExtraction.get_technical_type,
"material_types": SharekitMetadataExtraction.get_material_types,
"copyright": SharekitMetadataExtraction.get_copyright,
"copyright_description": SharekitMetadataExtraction.get_none,
"aggregation_level": "$.attributes.aggregationlevel",
"authors": SharekitMetadataExtraction.get_authors,
"publishers": SharekitMetadataExtraction.get_publishers,
"publisher_date": "$.attributes.publishedAt",
"lom_educational_levels": SharekitMetadataExtraction.get_lom_educational_levels,
"lowest_educational_level": SharekitMetadataExtraction.get_lowest_educational_level,
"disciplines": SharekitMetadataExtraction.get_empty_list,
"ideas": SharekitMetadataExtraction.get_ideas,
"from_youtube": SharekitMetadataExtraction.get_from_youtube,
"#is_restricted": SharekitMetadataExtraction.get_is_restricted,
"analysis_allowed": SharekitMetadataExtraction.get_analysis_allowed,
"is_part_of": SharekitMetadataExtraction.get_is_part_of,
"has_parts": "$.attributes.hasParts",
"doi": "$.attributes.doi",
"research_object_type": "$.attributes.typeResearchObject",
"research_themes": SharekitMetadataExtraction.get_research_themes,
"parties": SharekitMetadataExtraction.get_empty_list,
"learning_material_themes": SharekitMetadataExtraction.get_learning_material_themes,
"consortium": "$.attributes.consortium"
}
| 37.394068 | 107 | 0.643853 |
650b61eb839964413b4047a7102a2ba07a9d68e0 | 1,518 | py | Python | jake/test/test_audit.py | lvcarlosja/jake | 0ecbcdd89352d27f50e35d1d73b624b86456e568 | [
"Apache-2.0"
] | null | null | null | jake/test/test_audit.py | lvcarlosja/jake | 0ecbcdd89352d27f50e35d1d73b624b86456e568 | [
"Apache-2.0"
] | 4 | 2021-07-29T18:51:06.000Z | 2021-12-13T20:50:20.000Z | jake/test/test_audit.py | lvcarlosja/jake | 0ecbcdd89352d27f50e35d1d73b624b86456e568 | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2019-Present Sonatype Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" test_audit.py , for all your testing of audit py needs """
import unittest
import json
from pathlib import Path
from ..audit.audit import Audit
from ..types.results_decoder import ResultsDecoder
| 33 | 81 | 0.725296 |
650bd2653086235614c46bb1b73a337dfc0ba477 | 6,046 | py | Python | vintage_commands.py | ktuan89/Vintage | 81f178043d1dad4ec9bd50ad4db2df9ef994f098 | [
"MIT",
"Unlicense"
] | null | null | null | vintage_commands.py | ktuan89/Vintage | 81f178043d1dad4ec9bd50ad4db2df9ef994f098 | [
"MIT",
"Unlicense"
] | null | null | null | vintage_commands.py | ktuan89/Vintage | 81f178043d1dad4ec9bd50ad4db2df9ef994f098 | [
"MIT",
"Unlicense"
] | null | null | null | import sublime, sublime_plugin
import os
#import subprocess
#class MoveFocusedViewToBeginning(sublime_plugin.EventListener):
# def on_activated(self, view):
# view.window().set_view_index(view, 0, 0)
| 35.775148 | 148 | 0.561528 |
650c687c2aa892784fed03faf887190ac6a55992 | 3,718 | py | Python | bitten/tests/notify.py | dokipen/bitten | d4d2829c63eec84bcfab05ec7035a23e85d90c00 | [
"BSD-3-Clause"
] | 1 | 2016-08-28T03:13:03.000Z | 2016-08-28T03:13:03.000Z | bitten/tests/notify.py | dokipen/bitten | d4d2829c63eec84bcfab05ec7035a23e85d90c00 | [
"BSD-3-Clause"
] | null | null | null | bitten/tests/notify.py | dokipen/bitten | d4d2829c63eec84bcfab05ec7035a23e85d90c00 | [
"BSD-3-Clause"
] | null | null | null | #-*- coding: utf-8 -*-
#
# Copyright (C) 2007 Ole Trenner, <ole@jayotee.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
import unittest
from trac.db import DatabaseManager
from trac.test import EnvironmentStub, Mock
from trac.web.session import DetachedSession
from bitten.model import schema, Build, BuildStep, BuildLog
from bitten.notify import BittenNotify, BuildNotifyEmail
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(BittenNotifyTest, 'test'))
suite.addTest(unittest.makeSuite(BuildNotifyEmailTest, 'test'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| 38.329897 | 83 | 0.687197 |
650d0ad17e404144a026ce3f06aafc17ea1fda8f | 1,962 | py | Python | sparse gamma def/gamma_def_score.py | blei-lab/ars-reparameterization | b20a84c28537d85e0aaf62cbbaacb6de9370f0a3 | [
"MIT"
] | 33 | 2017-03-11T10:00:32.000Z | 2022-03-08T14:23:45.000Z | ars-reparameterization/sparse gamma def/gamma_def_score.py | astirn/neural-inverse-cdf-sampling | 80eb2eb7cf396a4e53df62bc126e9a1828f55ca9 | [
"MIT"
] | 2 | 2018-02-05T17:14:00.000Z | 2019-08-02T14:37:25.000Z | ars-reparameterization/sparse gamma def/gamma_def_score.py | astirn/neural-inverse-cdf-sampling | 80eb2eb7cf396a4e53df62bc126e9a1828f55ca9 | [
"MIT"
] | 10 | 2017-03-05T13:31:01.000Z | 2020-03-29T01:09:01.000Z | from autograd import grad
import autograd.numpy as np
import autograd.numpy.random as npr
import autograd.scipy.special as sp
from gamma_def import *
# Define helper functions for score fnc estimator
def logQ(sample, alpha, m):
"""
Evaluates log of variational approximation, vectorized.
"""
temp = alpha*(np.log(alpha)-np.log(m))
temp += (alpha-1.)*np.log(sample)
temp -= alpha*sample/m
temp -= np.log(sp.gamma(alpha))
return temp
def grad_logQ(sample,alpha,m):
"""
Evaluates the gradient of the log of variational approximation, vectorized.
"""
gradient = np.zeros((alpha.shape[0],2))
gradient[:,0] = np.log(alpha) - np.log(m) + 1. + np.log(sample) - sample/m
gradient[:,0] -= sp.digamma(alpha)
gradient[:,1] = -alpha/m + alpha*sample/m**2
return gradient
# Define score function estimator
def score_estimator(alpha,m,x,K,alphaz,S=100):
"""
Form score function estimator based on samples lmbda.
"""
N = x.shape[0]
if x.ndim == 1:
D = 1
else:
D = x.shape[1]
num_z = N*np.sum(K)
L = K.shape[0]
gradient = np.zeros((alpha.shape[0],2))
f = np.zeros((2*S,alpha.shape[0],2))
h = np.zeros((2*S,alpha.shape[0],2))
for s in range(2*S):
lmbda = npr.gamma(alpha,1.)
lmbda[lmbda < 1e-300] = 1e-300
zw = m*lmbda/alpha
lQ = logQ(zw,alpha,m)
gradLQ = grad_logQ(zw,alpha,m)
lP = logp(zw, K, x, alphaz)
temp = lP - np.sum(lQ)
f[s,:,:] = temp*gradLQ
h[s,:,:] = gradLQ
# CV
covFH = np.zeros((alpha.shape[0],2))
covFH[:,0] = np.diagonal(np.cov(f[S:,:,0],h[S:,:,0],rowvar=False)[:alpha.shape[0],alpha.shape[0]:])
covFH[:,1] = np.diagonal(np.cov(f[S:,:,1],h[S:,:,1],rowvar=False)[:alpha.shape[0],alpha.shape[0]:])
a = covFH / np.var(h[S:,:,:],axis=0)
return np.mean(f[:S,:,:],axis=0) - a*np.mean(h[:S,:,:],axis=0)
| 29.727273 | 104 | 0.574414 |
650d548dfcf197b677b3b51c6953dea2bc1cb40b | 305 | py | Python | codes/Ex036.py | BelfortJoao/Curso-phyton01 | 79376233be228f39bf548f90b8d9bd5419ac067a | [
"MIT"
] | 3 | 2021-08-17T14:02:14.000Z | 2021-08-19T02:37:30.000Z | codes/Ex036.py | BelfortJoao/Curso-phyton01 | 79376233be228f39bf548f90b8d9bd5419ac067a | [
"MIT"
] | null | null | null | codes/Ex036.py | BelfortJoao/Curso-phyton01 | 79376233be228f39bf548f90b8d9bd5419ac067a | [
"MIT"
] | null | null | null | x = float(input('Qual o valor da casa que quer comprar? '))
y = int(input("em quantos anos quer comprar a casa? "))
z = int(input("Qual seu salario? "))
w = y*12
if x / w > (z/100)*30:
print("Voce no pode comprar a casa")
else:
print('Voce pode comprar a casa a parcela de {:.2f}'.format(x/y))
| 33.888889 | 71 | 0.636066 |
650e93f40d797c0460bad1ce4c72fe47deb0c2b7 | 3,313 | py | Python | datasets/base_dataset.py | iclr2022submission4/cgca | 3e6ea65c0ebf72a8291dde3ffdb06b50e4d2900a | [
"MIT"
] | 13 | 2022-01-10T05:28:26.000Z | 2022-02-02T10:22:42.000Z | datasets/base_dataset.py | iclr2022submission4/cgca | 3e6ea65c0ebf72a8291dde3ffdb06b50e4d2900a | [
"MIT"
] | null | null | null | datasets/base_dataset.py | iclr2022submission4/cgca | 3e6ea65c0ebf72a8291dde3ffdb06b50e4d2900a | [
"MIT"
] | null | null | null | import torch
import random
from torch.utils.data import Dataset, DataLoader
from abc import ABC
from models.base_model import Model
from torch.utils.tensorboard import SummaryWriter
from typing import List
| 29.061404 | 83 | 0.705101 |
650ec3bb5d3381c505f9bd3240d3f221d5e35e00 | 660 | py | Python | open_data/dataset/migrations/0005_keyword_squashed_0006_remove_keyword_relevancy.py | balfroim/OpenData | f0334dae16c2806e81f7d2d53adeabc72403ecce | [
"MIT"
] | null | null | null | open_data/dataset/migrations/0005_keyword_squashed_0006_remove_keyword_relevancy.py | balfroim/OpenData | f0334dae16c2806e81f7d2d53adeabc72403ecce | [
"MIT"
] | null | null | null | open_data/dataset/migrations/0005_keyword_squashed_0006_remove_keyword_relevancy.py | balfroim/OpenData | f0334dae16c2806e81f7d2d53adeabc72403ecce | [
"MIT"
] | null | null | null | # Generated by Django 3.2 on 2021-04-21 13:01
from django.db import migrations, models
| 28.695652 | 118 | 0.587879 |
650f4d544268699293dfae61c4d5b0971b890ccb | 50 | py | Python | src/converters/__init__.py | Peilonrayz/json_to_object | ae5ba42dcab71010302f42d78dbfd559c12496c9 | [
"MIT"
] | null | null | null | src/converters/__init__.py | Peilonrayz/json_to_object | ae5ba42dcab71010302f42d78dbfd559c12496c9 | [
"MIT"
] | null | null | null | src/converters/__init__.py | Peilonrayz/json_to_object | ae5ba42dcab71010302f42d78dbfd559c12496c9 | [
"MIT"
] | null | null | null | from .converter import Converter, Converters, ron
| 25 | 49 | 0.82 |