hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c400620022eebd6f0df3a706d1f575d077a9ad78 | 6,781 | py | Python | object/test.py | SkinLesionsResearch/NCPL | 562e9664f77e14ed9b2655b82e8498b8a8ce5d2d | [
"MIT"
] | null | null | null | object/test.py | SkinLesionsResearch/NCPL | 562e9664f77e14ed9b2655b82e8498b8a8ce5d2d | [
"MIT"
] | null | null | null | object/test.py | SkinLesionsResearch/NCPL | 562e9664f77e14ed9b2655b82e8498b8a8ce5d2d | [
"MIT"
] | null | null | null | import argparse
import os, sys
os.chdir("/home/jackie/ResearchArea/SkinCancerResearch/semi_skin_cancer")
sys.path.append("/home/jackie/ResearchArea/SkinCancerResearch/semi_skin_cancer")
print(os.getcwd())
import os.path as osp
import torchvision
import numpy as np
import torch
# import torch.nn as nn
# import torch.optim as optim
# from itertools import cycle
from torchvision import transforms
# import network, loss
from torch.utils.data import DataLoader
from data_list import ImageList, ImageList_idx
import random, pdb, math, copy
from evaluation.draw import draw_ROC, draw_TSNE, draw_cm
from evaluation.metrics import get_metrics, get_metrics_sev_class, get_test_data
import matplotlib.pyplot as plt
from transforms import image_test
import utils
plt.rc('font', family='Times New Roman')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='oral_cancer')
parser.add_argument('--gpu_id', type=str, nargs='?', default='0', help="device id to run")
parser.add_argument('--batch_size', type=int, default=32, help="batch_size")
parser.add_argument('--num_classes', type=int, default=7, help="number of classes")
parser.add_argument('--worker', type=int, default=12, help="number of workers")
parser.add_argument('--dir', type=str, default='./ckps/')
parser.add_argument('--subDir', type=str, default='resnet50_sev_cates_2500_0.99_naive_0_afm_0.7_u_0.3')
parser.add_argument('--dset_path', type=str, default='./data/semi_processed')
parser.add_argument('--seed', type=int, default=2021, help="random seed")
parser.add_argument('--which', type=str, default='one', choices=['one', 'all'])
parser.add_argument('--img_dir', type=str, default=None)
parser.add_argument('--save_dir', type=str, default=None)
parser.add_argument('--bin_class', type=str, default=None)
args = parser.parse_args()
if args.num_classes == 2:
args.label_names = [("not " + args.bin_class), args.bin_class]
else:
args.label_names = ['akiec', 'bcc', 'bkl', 'df', 'mel', 'nv', 'vasc']
if args.which == 'one':
args.net = osp.basename(args.subDir).split('_')[0]
# torch.backends.cudnn.deterministic = True
print(args.dir)
args.output_dir_train = os.path.join(args.dir, args.subDir)
print(args.output_dir_train)
args.output_dir = os.path.join('test', args.output_dir_train)
if not osp.exists(args.output_dir):
os.system('mkdir -p ' + args.output_dir)
if not osp.exists(args.output_dir):
os.makedirs(args.output_dir)
args.out_file = open(osp.join(args.output_dir, 'log.txt'), 'w')
args.out_file.write(print_args(args) + '\n')
args.out_file.flush()
test_target(args)
if args.which == 'all':
for dir in os.listdir(args.dir):
args.net = dir.split('_')[0]
# torch.backends.cudnn.deterministic = True
args.output_dir_train = os.path.join(args.dir, dir)
args.output_dir = os.path.join('./test', args.output_dir_train)
if not osp.exists(args.output_dir):
os.system('mkdir -p ' + args.output_dir)
if not osp.exists(args.output_dir):
os.makedirs(args.output_dir)
args.out_file = open(osp.join(args.output_dir, 'log.txt'), 'w')
args.out_file.write(print_args(args) + '\n')
args.out_file.flush()
test_target(args)
| 38.971264 | 113 | 0.626309 |
c4009ade7b5eb056201eed0338579ec28e08eb56 | 226 | py | Python | countdownhype/urls.py | chri4354/BeeMe_platform | b73843d9146c5ba54a63a8839980ee7c8024e80d | [
"CC-BY-4.0"
] | null | null | null | countdownhype/urls.py | chri4354/BeeMe_platform | b73843d9146c5ba54a63a8839980ee7c8024e80d | [
"CC-BY-4.0"
] | 8 | 2020-06-06T01:55:55.000Z | 2022-03-12T00:31:52.000Z | countdownhype/urls.py | chri4354/BeeMe_platform | b73843d9146c5ba54a63a8839980ee7c8024e80d | [
"CC-BY-4.0"
] | null | null | null | from django.urls import path, re_path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('countdown/', views.countdown, name='countdown'),
#re_path(r'.+', views.redir, name='redir'),
]
| 22.6 | 58 | 0.650442 |
c40260dc06f3a35df8d6b1598c7152ecade68c53 | 204 | py | Python | argv.py | christoga/python | 1395b3177e7baf46677a7a7a4ae89d2488c6f0fa | [
"MIT"
] | 5 | 2015-11-15T19:08:31.000Z | 2015-11-27T02:34:28.000Z | argv.py | christoga/python | 1395b3177e7baf46677a7a7a4ae89d2488c6f0fa | [
"MIT"
] | null | null | null | argv.py | christoga/python | 1395b3177e7baf46677a7a7a4ae89d2488c6f0fa | [
"MIT"
] | null | null | null | from sys import argv
script, first, second, third = argv
print "This script called", script
print "The first variable :", first
print "The second variable :", second
print "The third variable :", third
| 22.666667 | 37 | 0.735294 |
c403737a02fdcf7c798629d6151ff7c1e4a813cf | 913 | py | Python | ryu/gui/views/topology.py | uiuc-srg/ryu | 2a597f812270ea9690269a20bf659f334c323eb6 | [
"Apache-2.0"
] | 269 | 2015-03-08T11:32:45.000Z | 2022-03-30T11:18:16.000Z | ryu/gui/views/topology.py | uiuc-srg/ryu | 2a597f812270ea9690269a20bf659f334c323eb6 | [
"Apache-2.0"
] | 4 | 2017-03-07T11:51:24.000Z | 2020-07-07T20:13:55.000Z | ryu/gui/views/topology.py | uiuc-srg/ryu | 2a597f812270ea9690269a20bf659f334c323eb6 | [
"Apache-2.0"
] | 205 | 2015-01-13T04:52:25.000Z | 2022-03-30T13:37:33.000Z | # Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import render_template, request
import view_base
| 33.814815 | 69 | 0.739321 |
c4038c43fba700001a9ef9e5ce94db202c34c7bb | 2,247 | py | Python | allennlp/tests/modules/token_embedders/bag_of_word_counts_token_embedder_test.py | urigoren/allennlp | 236e1fd01ca30409cd736625901292609009f5c4 | [
"Apache-2.0"
] | 1 | 2020-03-30T14:07:02.000Z | 2020-03-30T14:07:02.000Z | allennlp/tests/modules/token_embedders/bag_of_word_counts_token_embedder_test.py | urigoren/allennlp | 236e1fd01ca30409cd736625901292609009f5c4 | [
"Apache-2.0"
] | 123 | 2020-04-26T02:41:30.000Z | 2021-08-02T21:18:00.000Z | allennlp/tests/modules/token_embedders/bag_of_word_counts_token_embedder_test.py | urigoren/allennlp | 236e1fd01ca30409cd736625901292609009f5c4 | [
"Apache-2.0"
] | 2 | 2019-12-21T05:58:44.000Z | 2021-08-16T07:41:21.000Z | import numpy as np
import pytest
import torch
from numpy.testing import assert_almost_equal
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Vocabulary
from allennlp.modules.token_embedders import BagOfWordCountsTokenEmbedder
| 44.94 | 93 | 0.696484 |
c404204e3c66a1ac63a04d196c9f1142497f7ef7 | 1,020 | py | Python | dqn/ops.py | khurshedmemon/DQN-UN-TL | 1a981feff66825b6c35aafd08aba29d3c08ed745 | [
"Apache-2.0"
] | 1 | 2021-12-01T15:08:44.000Z | 2021-12-01T15:08:44.000Z | dqn/ops.py | khurshedmemon/DQN-UN-TL | 1a981feff66825b6c35aafd08aba29d3c08ed745 | [
"Apache-2.0"
] | 1 | 2021-12-02T06:09:05.000Z | 2021-12-02T06:09:05.000Z | dqn/ops.py | khurshedmemon/DQN-UN-TL | 1a981feff66825b6c35aafd08aba29d3c08ed745 | [
"Apache-2.0"
] | null | null | null | import tensorflow as tf
import numpy as np
| 32.903226 | 107 | 0.582353 |
c40422c343f9bc25ecff00b38032cd67afe03520 | 4,081 | py | Python | cellsium/model/initialization.py | modsim/CellSium | 8c3f4f5ccf84fa5555206d01cc3359c89071dcba | [
"BSD-2-Clause"
] | null | null | null | cellsium/model/initialization.py | modsim/CellSium | 8c3f4f5ccf84fa5555206d01cc3359c89071dcba | [
"BSD-2-Clause"
] | null | null | null | cellsium/model/initialization.py | modsim/CellSium | 8c3f4f5ccf84fa5555206d01cc3359c89071dcba | [
"BSD-2-Clause"
] | 1 | 2021-12-29T23:19:17.000Z | 2021-12-29T23:19:17.000Z | """Cell parameter random initializations."""
from typing import Any, Dict
import numpy as np
from ..parameters import (
Height,
NewCellBendLowerLower,
NewCellBendLowerUpper,
NewCellBendOverallLower,
NewCellBendOverallUpper,
NewCellBendUpperLower,
NewCellBendUpperUpper,
NewCellLength1Mean,
NewCellLength1Std,
NewCellLength2Mean,
NewCellLength2Std,
NewCellLengthAbsoluteMax,
NewCellLengthAbsoluteMin,
NewCellRadiusFromCenter,
NewCellWidthAbsoluteMax,
NewCellWidthAbsoluteMin,
NewCellWidthMean,
NewCellWidthStd,
Width,
)
from ..random import RRF, enforce_bounds
RandomSequenceType = Dict[str, Any]
| 32.133858 | 81 | 0.589561 |
c4049f3019aff074a372d03e83e2c871a888286d | 7,540 | py | Python | QAOA_MaxClique.py | bernovie/QAOA-MaxClique | 59b795480e019ae19d25ace274bdb86714ed49e2 | [
"MIT"
] | 2 | 2020-06-19T06:58:11.000Z | 2021-05-18T07:17:22.000Z | QAOA_MaxClique.py | bernovie/QAOA-MaxClique | 59b795480e019ae19d25ace274bdb86714ed49e2 | [
"MIT"
] | 1 | 2020-09-21T20:26:46.000Z | 2020-09-21T20:26:46.000Z | QAOA_MaxClique.py | bernovie/QAOA-MaxClique | 59b795480e019ae19d25ace274bdb86714ed49e2 | [
"MIT"
] | 1 | 2020-09-20T12:42:02.000Z | 2020-09-20T12:42:02.000Z | import qiskit
import numpy as np
import matplotlib.pyplot as plt
import json
from graph import *
# Random comment
P =1
# measure all qubits in q_input register, return dictionary of samples
### gradient ascent optimizer
# graph is graph to optimize over
# epsilon controls how far out the delta is calculated
# eta is learning rate
# threshold is the average of gamma and beta that we will consider a max
main()
| 31.157025 | 116 | 0.589125 |
c406c0be47fb741172f1a8941c81701c0d28eb02 | 253 | py | Python | yakut/cmd/file_server/__init__.py | pavel-kirienko/un | 996e64668d8902bd876fab16b64e3361094a674d | [
"MIT"
] | 1 | 2020-12-23T22:59:12.000Z | 2020-12-23T22:59:12.000Z | yakut/cmd/file_server/__init__.py | pavel-kirienko/un | 996e64668d8902bd876fab16b64e3361094a674d | [
"MIT"
] | null | null | null | yakut/cmd/file_server/__init__.py | pavel-kirienko/un | 996e64668d8902bd876fab16b64e3361094a674d | [
"MIT"
] | null | null | null | # Copyright (c) 2021 OpenCyphal
# This software is distributed under the terms of the MIT License.
# Author: Pavel Kirienko <pavel@opencyphal.org>
from ._app_descriptor import AppDescriptor as AppDescriptor
from ._cmd import file_server as file_server
| 36.142857 | 66 | 0.810277 |
c407355017835f143ce6a0c84504a53fa41a83ee | 15,959 | py | Python | src/learn_mtfixbmodel.py | ornithos/pytorch-mtds-mocap | 3ec10387d3d897e9a20d789bd4a3782a047519f7 | [
"MIT"
] | 2 | 2022-02-09T17:53:31.000Z | 2022-03-02T11:25:35.000Z | src/learn_mtfixbmodel.py | ornithos/pytorch-mtds-mocap | 3ec10387d3d897e9a20d789bd4a3782a047519f7 | [
"MIT"
] | null | null | null | src/learn_mtfixbmodel.py | ornithos/pytorch-mtds-mocap | 3ec10387d3d897e9a20d789bd4a3782a047519f7 | [
"MIT"
] | null | null | null | """Simple code for training an RNN for motion prediction."""
import os
import sys
import time
import numpy as np
import torch
import torch.optim as optim
from torch.autograd import Variable
import mtfixb_model
import mtfixb_model2
import parseopts
def create_model(args, total_num_batches):
"""Create MT model and initialize or load parameters in session."""
if len(args.load) > 0:
print("Loading model")
model = torch.load(args.load, map_location="cpu") if args.use_cpu else torch.load(args.load)
return model
if args.k == 0:
return create_model_k0(args, total_num_batches)
if args.dynamicsdict:
return create_model_DD(args, total_num_batches)
if args.biasonly:
return create_model_BiasOnly(args, total_num_batches)
if args.nobias:
return create_model_NoMTBias(args, total_num_batches)
model = mtfixb_model.MTGRU(
args.seq_length_out,
args.decoder_size,
args.decoder_size2,
args.batch_size,
total_num_batches,
args.k,
args.size_psi_hidden,
args.size_psi_lowrank,
args.bottleneck,
output_dim=args.human_size,
input_dim=args.input_size,
dropout=args.dropout_p,
residual_output=args.residual_velocities,
init_state_noise=args.init_state_noise,
mt_rnn=args.mt_rnn,
psi_affine=args.psi_affine,
)
if len(args.load) <= 0:
if len(args.load_layer1) > 0:
print("Loading GRU2 model")
model = load_layer1(model, args.load_layer1, args.use_cpu)
return model
print("Loading model")
model = torch.load(args.load, map_location="cpu") if args.use_cpu else torch.load(args.load)
return model
def create_model_k0(args, total_num_batches):
"""Create MT model and initialize or load parameters in session."""
model = mtfixb_model.OpenLoopGRU(
args.seq_length_out,
args.decoder_size,
args.batch_size,
args.human_size,
args.input_size,
args.dropout_p,
args.residual_velocities,
args.init_state_noise,
)
return model
def create_model_DD(args, total_num_batches):
"""Create MT model and initialize or load parameters in session."""
if len(args.load_layer1) > 0:
NotImplementedError("Layer 1 load not yet implemented for Dynamics Dict.")
model = mtfixb_model.DynamicsDict(
args.seq_length_out,
args.decoder_size,
total_num_batches,
args.batch_size,
args.k,
args.size_psi_hidden,
args.size_psi_lowrank,
args.human_size,
args.input_size,
args.dropout_p,
args.residual_velocities,
args.init_state_noise,
)
return model
def create_model_BiasOnly(args, total_num_batches):
"""Create MT model and initialize or load parameters in session."""
if len(args.load_layer1) > 0:
NotImplementedError("Layer 1 load not yet implemented for MT Bias Only.")
model = mtfixb_model.MTGRU_BiasOnly(
args.seq_length_out,
args.decoder_size,
args.decoder_size2,
args.batch_size,
total_num_batches,
args.k,
args.size_psi_hidden,
args.size_psi_lowrank,
args.bottleneck,
output_dim=args.human_size,
input_dim=args.input_size,
dropout=args.dropout_p,
residual_output=args.residual_velocities,
init_state_noise=args.init_state_noise,
)
return model
def create_model_NoMTBias(args, total_num_batches):
"""Create MT model and initialize or load parameters in session."""
if len(args.load_layer1) > 0:
NotImplementedError("Layer 1 load not yet implemented for MT Bias Only.")
model = mtfixb_model2.MTGRU_NoBias(
args.seq_length_out,
args.decoder_size,
args.decoder_size2,
args.batch_size,
total_num_batches,
args.k,
args.size_psi_hidden,
args.size_psi_lowrank,
args.bottleneck,
output_dim=args.human_size,
input_dim=args.input_size,
dropout=args.dropout_p,
residual_output=args.residual_velocities,
init_state_noise=args.init_state_noise,
mt_rnn=args.mt_rnn,
psi_affine=args.psi_affine,
)
return model
def train(args):
"""Train a MT model on human motion"""
train_iter = read_all_data(args)
train_iter.shuffle()
total_num_batches = train_iter.total_length()
model = create_model(args, total_num_batches)
model = model if args.use_cpu else model.cuda()
has_weight = not np.isclose(args.first3_prec, 1.0)
is_hard_em = args.hard_em_iters > 0
is_MT = args.k > 0
current_step = 0
previous_losses = []
step_time, loss = 0, 0
mt_lr = args.learning_rate_mt if args.learning_rate_mt >= 0 else args.learning_rate
z_lr = args.learning_rate_z if args.learning_rate_z >= 0 else args.learning_rate
zls_lr = 0 if is_hard_em else z_lr
pars_lrs, zls_ix = model.get_params_optim_dicts(mt_lr, args.learning_rate, z_lr, zls_lr=zls_lr)
if args.optimiser.upper() == "SGD":
optimiser = optim.SGD(pars_lrs, weight_decay=args.weight_decay)
elif args.optimiser.upper() == "NESTEROV":
optimiser = optim.SGD(pars_lrs, momentum=0.8, nesterov=True, weight_decay=args.weight_decay)
elif args.optimiser.upper() == "ADAM":
optimiser = optim.Adam(pars_lrs, betas=(0.9, 0.999), weight_decay=args.weight_decay)
else:
Exception("Unknown optimiser type: {:d}. Try 'SGD', 'Nesterov' or 'Adam'")
has_ar_noise = args.ar_coef > 0
device = "cpu" if args.use_cpu else "cuda"
if has_ar_noise:
assert args.ar_coef < 1, "ar_coef must be in [0, 1)."
# Construct banded AR precision matrix (fn def below)
Prec = ar_prec_matrix(args.ar_coef, args.seq_length_out).float().to(device)
for _ in range(args.iterations):
optimiser.zero_grad()
model.train()
start_time = time.time()
# ------------------------------------------------------- TRAINING
inputs, outputs, c_ids = model.get_batch(train_iter)
inputs, outputs = torchify(inputs, outputs, device=device)
if is_MT:
mu = model.mt_net.Z_mu[c_ids, :]
sd = torch.sigmoid(3 * model.mt_net.Z_logit_s[c_ids, :])
preds, _state = model(inputs, mu, sd)
else:
preds, _state = model(inputs)
err = preds - outputs
if has_weight:
err = err * torch.cat(
(torch.ones(1, 1, 3) * np.sqrt(args.first3_prec), torch.ones(1, 1, args.human_size - 3)), dim=2
).to(err.device)
if not has_ar_noise:
sqerr = err ** 2
else:
sqerr = (Prec @ err) * err
step_loss = args.human_size * args.seq_length_out * sqerr.mean() / 2
# assume \sigma is const. wrt optimisation, and hence normalising constant can be ignored.
# Now for KL term. Since we're descending *negative* L.B., we need to *ADD* KL to loss:
if is_MT:
logstd = torch.log(sd)
KLD = -0.5 * torch.sum(1 + 2 * logstd - mu.pow(2) - torch.exp(2 * logstd))
step_loss = step_loss + KLD
# Actual backpropagation
step_loss.backward()
optimiser.step()
# -------------------------------------------------------
# Reporting / admin
step_loss = step_loss.cpu().data.numpy()
if current_step % 10 == 0:
if is_MT:
KLD_part = KLD.cpu().data.numpy()
print(
"step {0:04d}; step_loss: {1:.4f} ({2:.4f})".format(current_step, step_loss, step_loss - KLD_part)
)
else:
print("step {0:04d}; step_loss: {1:.4f}".format(current_step, step_loss))
step_time += (time.time() - start_time) / args.test_every
loss += step_loss / args.test_every
current_step += 1
if current_step % 20 == 0:
sys.stdout.flush()
# Decay learning rate (if appl.)
if current_step % args.learning_rate_step == 0:
for param_group in optimiser.param_groups:
param_group["lr"] *= args.learning_rate_decay_factor
print("Decay learning rate. New value at " + str(optimiser.param_groups[0]["lr"]))
# remove Hard EM spec (if appl.)
if is_hard_em and zls_ix is not None and current_step == args.hard_em_iters:
optimiser.param_groups[zls_ix]["lr"] = z_lr
model.standardise_aggregate_posterior()
# Once in a while, we save checkpoint, print statistics, and run evals.
if current_step % args.test_every == 0:
model.eval()
# === CANNOT DO TEST SET EVALUATION SINCE DONT KNOW LATENT Z ===
# inputs, outputs = model.get_test_batch(test_set_Y, test_set_U, -1)
# inputs, outputs = torchify(inputs, outputs, device=device)
#
# if is_MT:
# preds, state = model(inputs, mu, sd)
# else:
# preds = model(inputs)
#
# err = (preds - outputs)
# if has_weight:
# err = err * torch.cat((torch.ones(1, 1, 3) * np.sqrt(args.first3_prec),
# torch.ones(1, 1, args.human_size - 3)), dim=2).to(err.device)
#
# if not has_ar_noise:
# sqerr = err ** 2
# else:
# Prec_test = ar_prec_matrix(args.ar_coef, err.size(1)).float().to(device)
# sqerr = (Prec_test @ err) * err
#
# val_loss = args.human_size * args.seq_length_out * sqerr.mean() / 2
#
# if is_MT:
# logstd = torch.log(sd)
# KLD = -0.5 * torch.sum(1 + 2 * logstd - mu.pow(2) - torch.exp(2 * logstd))
# val_loss = val_loss + KLD
#
# print()
# print("{0: <16} |".format("milliseconds"), end="")
# for ms in [60, 240, 480, 750, 990, 1500, 2010]:
# print(" {0:5d} |".format(ms), end="")
# print()
#
# avg_mse_tt = sqerr.detach().cpu().mean(dim=0).numpy().mean(axis=1)
# Pretty print of the results for 60, 240, 480, 750, 990, 1500, 2010 ms
# print("{0: <16} |".format(" "), end="")
# for ms in [1, 7, 15, 24, 32, 49, 66]:
# if args.seq_length_out >= ms + 1:
# print(" {0:.3f} |".format(avg_mse_tt[ms]), end="")
# else:
# print(" n/a |", end="")
# print()
#
# print()
# print("============================\n"
# "Global step: %d\n"
# "Learning rate: %.4f\n"
# "Step-time (ms): %.4f\n"
# "Train loss avg: %.4f\n"
# "--------------------------\n"
# "Test loss: %.4f\n"
# "============================" % (current_step,
# args.learning_rate, step_time * 1000, loss,
# val_loss))
torch.save(model, args.train_dir + "/model_" + str(current_step))
# print()
previous_losses.append(loss)
# Reset global time and loss
step_time, loss = 0, 0
sys.stdout.flush()
def read_all_data(args):
"""
Loads data for training/testing and normalizes it.
Args
data_dir: directory to load the data from
style_ix: style index of the test set (and leave out from the training set)
njoints: number of joints to model (0 or -1 = all)
Returns
train_set: dictionary with normalized training data
test_set: dictionary with test data
data_mean: d-long vector with the mean of the training data
data_std: d-long vector with the standard dev of the training data
dim_to_ignore: dimensions that are not used becaused stdev is too small
dim_to_use: dimensions that we are actually using in the model
"""
# === Read training data ===
print("Reading training data (test index {0:d}).".format(args.style_ix))
njoints = args.human_size
if not args.train_set_size == -1:
style_lkp = {
str(i): range(1 + args.train_set_size * (i - 1), 1 + args.train_set_size * i) for i in range(1, 8 + 1)
}
else:
style_lkp = np.load(os.path.join(args.data_dir, args.stylelkp_fname))
train_set_Y = np.load(os.path.join(args.data_dir, args.output_fname))
train_set_U = np.load(os.path.join(args.data_dir, args.input_fname))
njoints = train_set_Y[str(0)].shape[1] if njoints <= 0 else njoints
if args.train_set_size != 0:
train_ixs = np.concatenate(
[
style_lkp[str(i)] for i in range(1, len(style_lkp.keys()) + 1) if i != args.style_ix
] # CAREFUL: jl is 1-based!
)
train_set_Y = [train_set_Y[str(i)][:, :njoints] for i in train_ixs]
train_set_U = [train_set_U[str(i)] for i in train_ixs]
else:
assert args.style_ix not in range(1, 9), "no support for LOO experiments with max MTL data yet. Use style_ix=9"
train_set_Y = [train_set_Y[str(i + 1)][:, :njoints] for i in range(len(train_set_Y))]
train_set_U = [train_set_U[str(i + 1)] for i in range(len(train_set_U))]
print("Using files {:s}; {:s}".format(args.input_fname, args.output_fname))
print("done reading data.")
return mtfixb_model.DataIterator(train_set_Y, train_set_U, 64, min_size=64, overlap2=args.overlap_windows)
if __name__ == "__main__":
main()
| 33.739958 | 119 | 0.59208 |
c408095eb7ab9da191765321215bacfdbf223067 | 11,260 | py | Python | python/tvm/topi/nn/conv2d_transpose.py | ccjoechou/tvm | 779dc51e1332f417fa4c304b595ce76891dfc33a | [
"Apache-2.0"
] | 4 | 2020-04-14T12:31:45.000Z | 2020-11-02T14:20:59.000Z | python/tvm/topi/nn/conv2d_transpose.py | ccjoechou/tvm | 779dc51e1332f417fa4c304b595ce76891dfc33a | [
"Apache-2.0"
] | null | null | null | python/tvm/topi/nn/conv2d_transpose.py | ccjoechou/tvm | 779dc51e1332f417fa4c304b595ce76891dfc33a | [
"Apache-2.0"
] | 1 | 2020-11-02T14:21:45.000Z | 2020-11-02T14:21:45.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, unused-argument
"""Transposed 2D convolution operators (sometimes called Deconvolution)."""
import collections
import tvm
from tvm import relay, te
from ..utils import simplify
from .dilate import dilate
from .pad import pad
from .utils import get_pad_tuple
_single = _ntuple(1)
_pair = _ntuple(2)
_triple = _ntuple(3)
_quadruple = _ntuple(4)
def conv2d_transpose_nchw(Input, Filter, strides, padding, out_dtype, output_padding):
"""Transposed 2D convolution nchw forward operator.
Parameters
----------
Input : tvm.te.Tensor
4-D with shape [batch, in_channel, in_height, in_width]
Filter : tvm.te.Tensor
4-D with shape [in_channel, num_filter, filter_height, filter_width]
strides : tuple of two ints
The spatial stride along height and width
padding : int or str
Padding size, or ['VALID', 'SAME']
out_dtype : str
The output data type. This is used for mixed precision.
output_padding : tuple of ints
Used to get the right output shape for gradients
Returns
-------
Output : tvm.te.Tensor
4-D with shape [batch, out_channel, out_height, out_width]
"""
return declaration_conv2d_transpose_impl(
Input, Filter, strides, padding, out_dtype, output_padding=output_padding
)
def conv2d_transpose_nchw_preprocess(data, kernel, strides, padding, out_dtype, output_padding):
"""Preprocess data and kernel to make the compute pattern
of conv2d_transpose the same as conv2d"""
batch, in_c, in_h, in_w = data.shape
_, out_c, filter_h, filter_w = kernel.shape
stride_h, stride_w = strides
opad_h, opad_w = output_padding
assert opad_h < stride_h and opad_w < stride_w
# dilate data
data_dilate = dilate(data, [1, 1, stride_h, stride_w], name="data_dilate")
# pad data
fpad_top, fpad_left, fpad_bottom, fpad_right = get_pad_tuple(padding, (filter_h, filter_w))
bpad_top = filter_h - 1 - fpad_top
bpad_bottom = filter_h - 1 - fpad_bottom + opad_h
bpad_left = filter_w - 1 - fpad_left
bpad_right = filter_w - 1 - fpad_right + opad_w
data_pad = pad(
data_dilate, [0, 0, bpad_top, bpad_left], [0, 0, bpad_bottom, bpad_right], name="data_pad"
)
# transform kernel layout from IOHW to OIHW, and rotate kernel by 180 degrees
kernel_transform = te.compute(
(out_c, in_c, filter_h, filter_w),
lambda o, i, h, w: kernel[i][o][filter_h - 1 - h][filter_w - 1 - w],
name="kernel_transform",
)
return data_pad, kernel_transform
def declaration_conv2d_transpose_impl(data, kernel, strides, padding, out_dtype, output_padding):
"""Implementation of conv2d transpose"""
data_pad, kernel_transform = conv2d_transpose_nchw_preprocess(
data, kernel, strides, padding, out_dtype, output_padding
)
batch, in_c, in_h, in_w = data_pad.shape
out_c, _, filter_h, filter_w = kernel_transform.shape
# convolution stage
out_c = simplify(out_c)
out_h = simplify(in_h - filter_h + 1)
out_w = simplify(in_w - filter_w + 1)
dc = te.reduce_axis((0, in_c), name="dc")
dh = te.reduce_axis((0, filter_h), name="dh")
dw = te.reduce_axis((0, filter_w), name="dw")
Output = te.compute(
(batch, out_c, out_h, out_w),
lambda b, c, h, w: te.sum(
data_pad[b, dc, h + dh, w + dw].astype(out_dtype)
* kernel_transform[c, dc, dh, dw].astype(out_dtype),
axis=[dc, dh, dw],
),
tag="conv2d_transpose_nchw",
)
return Output
def group_conv2d_transpose_nchw(data, kernel, stride, padding, out_dtype, output_padding, groups):
"""Group convolution operator in NCHW layout.
Parameters
----------
data : tvm.te.Tensor
4-D with shape [batch, in_channel, in_height, in_width]
kernel : tvm.te.Tensor
4-D with shape [in_channel, out_channel // groups, filter_height, filter_width]
stride : int or a list/tuple of two ints
Stride size, or [stride_height, stride_width]
padding : int or a list/tuple of 2 or 4 ints
padding size, or
[pad_height, pad_width] for 2 ints, or
[pad_top, pad_left, pad_bottom, pad_right] for 4 ints
out_dtype : str
The output data type. This is used for mixed precision.
output_padding : tuple of ints
Used to get the right output shape for gradients
groups : int
number of groups
out_dtype : str
The output type. This is used for mixed precision.
Returns
-------
Output : tvm.te.Tensor
4-D with shape [batch, out_channel, out_height, out_width]
"""
if groups == 1:
return conv2d_transpose_nchw(data, kernel, stride, padding, out_dtype, output_padding)
# some pre-processing and prelimnary checks
if out_dtype is None:
out_dtype = data.dtype
batch, in_channels, in_h, in_w = data.shape
_, out_c, filter_h, filter_w = kernel.shape
assert (
in_channels % groups == 0
), f"input channels {in_channels} must divide group size {groups}"
# assert out_c % groups == 0, f"output channels {in_c} must divide group size {groups}"
strides = _pair(stride)
# padding = _pair(padding)
# output_padding = _pair(output_padding)
# dilation = _pair(dilation)
stride_h, stride_w = strides
opad_h, opad_w = output_padding
assert (
opad_h < stride_h and opad_w < stride_w
), f"[{output_padding}] opad_h:{opad_h} < stride_h:{stride_h} \
and opad_w:{opad_w} < stride_w:{stride_w} does not satisfy."
# dilate data
data_dilate = dilate(data, [1, 1, stride_h, stride_w], name="data_dilate")
# pad data
fpad_top, fpad_left, fpad_bottom, fpad_right = get_pad_tuple(padding, (filter_h, filter_w))
bpad_top = filter_h - 1 - fpad_top
bpad_bottom = filter_h - 1 - fpad_bottom + opad_h
bpad_left = filter_w - 1 - fpad_left
bpad_right = filter_w - 1 - fpad_right + opad_w
data_pad = pad(
data_dilate, [0, 0, bpad_top, bpad_left], [0, 0, bpad_bottom, bpad_right], name="data_pad"
)
# transform kernel layout from IOHW to OIHW, and rotate kernel by 180 degrees
kernel_transform = te.compute(
(out_c, in_channels, filter_h, filter_w),
lambda i, o, h, w: kernel[o][i][filter_h - 1 - h][filter_w - 1 - w],
name="kernel_transform",
)
batch, in_channels, in_h, in_w = data_pad.shape
out_c, _, filter_h, filter_w = kernel_transform.shape
# convolution stage
out_channels = simplify(out_c * groups)
out_h = simplify(in_h - filter_h + 1)
out_w = simplify(in_w - filter_w + 1)
dc = te.reduce_axis((0, in_channels // groups), name="dc")
dh = te.reduce_axis((0, filter_h), name="dh")
dw = te.reduce_axis((0, filter_w), name="dw")
# data: batch, in_channels, out_h, out_w
# weight: out_channels // G, in_channels, out_h, out_w
return te.compute(
(batch, out_channels, out_h, out_w),
lambda b, c, h, w: te.sum(
data_pad[
b, c // (out_channels // groups) * (in_channels // groups) + dc, h + dh, w + dw
].astype(out_dtype)
* kernel_transform[
c % (out_channels // groups),
c // (out_channels // groups) * (in_channels // groups) + dc,
dh,
dw,
].astype(out_dtype),
axis=[dc, dh, dw],
),
tag="group_conv2d_transpose_nchw",
)
def layout_transform(tensor: "relay.Expr", current_layout: str, desired_layout: str):
"""Transform a tensor with the current layout to the desired layout.
E.g. layout_transform(t, "NCHW", "CNHW") --> relay.transpose(t, [1, 0, 2, 3])
Parameters
----------
tensor: relay.Expr
The Tensor to transpose
current_layout: str
The current layout e.g. NCHW or OIHW
desired_layout: str
The desired layout, must be compatible with current_layout
Returns
-------
The layout_transformed tensor.
"""
if sorted(current_layout) != sorted(desired_layout):
raise ValueError(f"Incompatible layouts: {current_layout} vs {desired_layout}")
if current_layout == desired_layout:
return tensor
current_layout_map = {c: i for i, c in enumerate(current_layout)}
desired_layout_map = {c: i for i, c in enumerate(desired_layout)}
axes = [None] * len(current_layout)
for c, i in desired_layout_map.items():
axes[i] = current_layout_map[c]
return relay.transpose(tensor, axes=axes)
| 34.329268 | 99 | 0.653819 |
c40810867a32dd051fe382d63b22b8bac17db49f | 91,964 | py | Python | econml/solutions/causal_analysis/_causal_analysis.py | huigangchen/EconML | 9a56d651e2964ebd05144de52f577f9044a22a0b | [
"BSD-3-Clause"
] | 1,846 | 2019-05-06T21:14:19.000Z | 2022-03-31T11:52:21.000Z | econml/solutions/causal_analysis/_causal_analysis.py | cleeway/EconML | fb2d1139f6c271d4b9a24d9c6d122d4d0891afb0 | [
"BSD-3-Clause"
] | 393 | 2019-05-08T00:55:32.000Z | 2022-03-31T14:26:16.000Z | econml/solutions/causal_analysis/_causal_analysis.py | cleeway/EconML | fb2d1139f6c271d4b9a24d9c6d122d4d0891afb0 | [
"BSD-3-Clause"
] | 414 | 2019-05-14T03:51:08.000Z | 2022-03-31T09:32:17.000Z | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""Module for assessing causal feature importance."""
import warnings
from collections import OrderedDict, namedtuple
import joblib
import lightgbm as lgb
from numba.core.utils import erase_traceback
import numpy as np
from numpy.lib.function_base import iterable
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.compose import ColumnTransformer
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier, RandomForestRegressor
from sklearn.linear_model import Lasso, LassoCV, LogisticRegression, LogisticRegressionCV
from sklearn.pipeline import make_pipeline, Pipeline
from sklearn.preprocessing import OneHotEncoder, PolynomialFeatures, StandardScaler
from sklearn.tree import _tree
from sklearn.utils.validation import column_or_1d
from ...cate_interpreter import SingleTreeCateInterpreter, SingleTreePolicyInterpreter
from ...dml import LinearDML, CausalForestDML
from ...inference import NormalInferenceResults
from ...sklearn_extensions.linear_model import WeightedLasso
from ...sklearn_extensions.model_selection import GridSearchCVList
from ...utilities import _RegressionWrapper, inverse_onehot
# TODO: this utility is documented but internal; reimplement?
from sklearn.utils import _safe_indexing
# TODO: this utility is even less public...
from sklearn.utils import _get_column_indices
def _get_default_shared_insights_output():
"""
Dictionary elements shared among all analyses.
In case of breaking changes to this dictionary output, the major version of this
dictionary should be updated. In case of a change to this dictionary, the minor
version should be updated.
"""
return {
_CausalInsightsConstants.RawFeatureNameKey: [],
_CausalInsightsConstants.EngineeredNameKey: [],
_CausalInsightsConstants.CategoricalColumnKey: [],
_CausalInsightsConstants.TypeKey: [],
_CausalInsightsConstants.Version: '1.0',
_CausalInsightsConstants.CausalComputationTypeKey: "simple",
_CausalInsightsConstants.ConfoundingIntervalKey: None,
_CausalInsightsConstants.InitArgsKey: {}
}
# simplification of sklearn's ColumnTransformer that encodes categoricals and passes through selected other columns
# but also supports get_feature_names with expected signature
# Wrapper to make sure that we get a deep copy of the contents instead of clone returning an untrained copy
def _freeze(transformer):
return _FrozenTransformer(_Wrapper(transformer))
# Convert python objects to (possibly nested) types that can easily be represented as literals
# Convert SingleTreeInterpreter to a python dictionary
# named tuple type for storing results inside CausalAnalysis class;
# must be lifted to module level to enable pickling
_result = namedtuple("_result", field_names=[
"feature_index", "feature_name", "feature_baseline", "feature_levels", "hinds",
"X_transformer", "W_transformer", "estimator", "global_inference", "treatment_value"])
# Unless we're opting into minimal cross-fitting, this is the minimum number of instances of each category
# required to fit a discrete DML model
_CAT_LIMIT = 10
def _pandas_summary(self, get_inference, *, props, n,
expand_arr=False, keep_all_levels=False):
"""
Summarizes results into a dataframe.
Parameters
----------
get_inference : lambda
Method to get the relevant inference results from each result object
props : list of (string, string or lambda)
Set of column names and ways to get the corresponding values from the inference object
n : int
The number of samples in the dataset
expand_arr : boolean, default False
Whether to add a synthetic sample dimension to the result arrays when performing internal computations
keep_all_levels : boolean, default False
Whether to keep all levels, even when they don't take on more than one value;
Note that regardless of this argument the "sample" level will only be present if expand_arr is False
"""
return self._summarize(summary=make_dataframe,
get_inference=get_inference,
props=props,
expand_arr=expand_arr,
drop_sample=False) # dropping the sample dimension is handled above instead
def _dict_summary(self, get_inference, *, n, props, kind, drop_sample=False, expand_arr=False, row_wise=False):
"""
Summarizes results into a dictionary.
Parameters
----------
get_inference : lambda
Method to get the relevant inference results from each result object
n : int
The number of samples in the dataset
props : list of (string, string or lambda)
Set of column names and ways to get the corresponding values from the inference object
kind : string
The kind of inference results to get (e.g. 'global', 'local', or 'cohort')
drop_sample : boolean, default False
Whether to drop the sample dimension from each array
expand_arr : boolean, default False
Whether to add an initial sample dimension to the result arrays
row_wise : boolean, default False
Whether to return a list of dictionaries (one dictionary per row) instead of
a dictionary of lists (one list per column)
"""
return self._summarize(summary=make_dict,
get_inference=get_inference,
props=props,
expand_arr=expand_arr,
drop_sample=drop_sample)
def global_causal_effect(self, *, alpha=0.05, keep_all_levels=False):
"""
Get the global causal effect for each feature as a pandas DataFrame.
Parameters
----------
alpha : float, default 0.05
The confidence level of the confidence interval
keep_all_levels : bool, default False
Whether to keep all levels of the output dataframe ('outcome', 'feature', and 'feature_level')
even if there was only a single value for that level; by default single-valued levels are dropped.
Returns
-------
global_effects : pandas Dataframe
DataFrame with the following structure:
:Columns: ['point', 'stderr', 'zstat', 'pvalue', 'ci_lower', 'ci_upper']
:Index: ['feature', 'feature_value']
:Rows: For each feature that is numerical, we have an entry with index ['{feature_name}', 'num'], where
'num' is literally the string 'num' and feature_name is the input feature name.
For each feature that is categorical, we have an entry with index ['{feature_name}',
'{cat}v{base}'] where cat is the category value and base is the category used as baseline.
If all features are numerical then the feature_value index is dropped in the dataframe, but not
in the serialized dict.
"""
# a global inference indicates the effect of that one feature on the outcome
return self._pandas_summary(lambda res: res.global_inference, props=self._point_props(alpha),
n=1, expand_arr=True, keep_all_levels=keep_all_levels)
def _global_causal_effect_dict(self, *, alpha=0.05, row_wise=False):
"""
Gets the global causal effect for each feature as dictionary.
Dictionary entries for predictions, etc. will be nested lists of shape (d_y, sum(d_t))
Only for serialization purposes to upload to AzureML
"""
return self._dict_summary(lambda res: res.global_inference, props=self._point_props(alpha),
kind='global', n=1, row_wise=row_wise, drop_sample=True, expand_arr=True)
def cohort_causal_effect(self, Xtest, *, alpha=0.05, keep_all_levels=False):
"""
Gets the average causal effects for a particular cohort defined by a population of X's.
Parameters
----------
Xtest : array-like
The cohort samples for which to return the average causal effects within cohort
alpha : float, default 0.05
The confidence level of the confidence interval
keep_all_levels : bool, default False
Whether to keep all levels of the output dataframe ('outcome', 'feature', and 'feature_level')
even if there was only a single value for that level; by default single-valued levels are dropped.
Returns
-------
cohort_effects : pandas Dataframe
DataFrame with the following structure:
:Columns: ['point', 'stderr', 'zstat', 'pvalue', 'ci_lower', 'ci_upper']
:Index: ['feature', 'feature_value']
:Rows: For each feature that is numerical, we have an entry with index ['{feature_name}', 'num'], where
'num' is literally the string 'num' and feature_name is the input feature name.
For each feature that is categorical, we have an entry with index ['{feature_name}', '{cat}v{base}']
where cat is the category value and base is the category used as baseline.
If all features are numerical then the feature_value index is dropped in the dataframe, but not
in the serialized dict.
"""
return self._pandas_summary(self._cohort_effect_inference(Xtest),
props=self._summary_props(alpha), n=1,
expand_arr=True, keep_all_levels=keep_all_levels)
def _cohort_causal_effect_dict(self, Xtest, *, alpha=0.05, row_wise=False):
"""
Gets the cohort causal effects for each feature as dictionary.
Dictionary entries for predictions, etc. will be nested lists of shape (d_y, sum(d_t))
Only for serialization purposes to upload to AzureML
"""
return self._dict_summary(self._cohort_effect_inference(Xtest), props=self._summary_props(alpha),
kind='cohort', n=1, row_wise=row_wise, expand_arr=True, drop_sample=True)
def local_causal_effect(self, Xtest, *, alpha=0.05, keep_all_levels=False):
"""
Gets the local causal effect for each feature as a pandas DataFrame.
Parameters
----------
Xtest : array-like
The samples for which to return the causal effects
alpha : float, default 0.05
The confidence level of the confidence interval
keep_all_levels : bool, default False
Whether to keep all levels of the output dataframe ('sample', 'outcome', 'feature', and 'feature_level')
even if there was only a single value for that level; by default single-valued levels are dropped.
Returns
-------
global_effect : pandas Dataframe
DataFrame with the following structure:
:Columns: ['point', 'stderr', 'zstat', 'pvalue', 'ci_lower', 'ci_upper']
:Index: ['sample', 'feature', 'feature_value']
:Rows: For each feature that is numeric, we have an entry with index
['{sampleid}', '{feature_name}', 'num'],
where 'num' is literally the string 'num' and feature_name is the input feature name and sampleid is
the index of the sample in Xtest.
For each feature that is categorical, we have an entry with index
['{sampleid', '{feature_name}', '{cat}v{base}']
where cat is the category value and base is the category used as baseline.
If all features are numerical then the feature_value index is dropped in the dataframe, but not
in the serialized dict.
"""
return self._pandas_summary(self._local_effect_inference(Xtest),
props=self._point_props(alpha), n=Xtest.shape[0], keep_all_levels=keep_all_levels)
def _local_causal_effect_dict(self, Xtest, *, alpha=0.05, row_wise=False):
"""
Gets the local feature importance as dictionary
Dictionary entries for predictions, etc. will be nested lists of shape (n_rows, d_y, sum(d_t))
Only for serialization purposes to upload to AzureML
"""
return self._dict_summary(self._local_effect_inference(Xtest), props=self._point_props(alpha),
kind='local', n=Xtest.shape[0], row_wise=row_wise)
def whatif(self, X, Xnew, feature_index, y, *, alpha=0.05):
"""
Get counterfactual predictions when feature_index is changed to Xnew from its observational counterpart.
Note that this only applies to regression use cases; for classification what-if analysis is not supported.
Parameters
----------
X: array-like
Features
Xnew: array-like
New values of a single column of X
feature_index: int or string
The index of the feature being varied to Xnew, either as a numeric index or
the string name if the input is a dataframe
y: array-like
Observed labels or outcome of a predictive model for baseline y values
alpha : float in [0, 1], default 0.05
Confidence level of the confidence intervals displayed in the leaf nodes.
A (1-alpha)*100% confidence interval is displayed.
Returns
-------
y_new: DataFrame
The predicted outputs that would have been observed under the counterfactual features
"""
return self._whatif_inference(X, Xnew, feature_index, y).summary_frame(alpha=alpha)
def _whatif_dict(self, X, Xnew, feature_index, y, *, alpha=0.05, row_wise=False):
"""
Get counterfactual predictions when feature_index is changed to Xnew from its observational counterpart.
Note that this only applies to regression use cases; for classification what-if analysis is not supported.
Parameters
----------
X: array-like
Features
Xnew: array-like
New values of a single column of X
feature_index: int or string
The index of the feature being varied to Xnew, either as a numeric index or
the string name if the input is a dataframe
y: array-like
Observed labels or outcome of a predictive model for baseline y values
alpha : float in [0, 1], default 0.05
Confidence level of the confidence intervals displayed in the leaf nodes.
A (1-alpha)*100% confidence interval is displayed.
row_wise : boolean, default False
Whether to return a list of dictionaries (one dictionary per row) instead of
a dictionary of lists (one list per column)
Returns
-------
dict : dict
The counterfactual predictions, as a dictionary
"""
inf = self._whatif_inference(X, Xnew, feature_index, y)
props = self._point_props(alpha=alpha)
res = _get_default_specific_insights('whatif')
if row_wise:
row_data = {}
# remove entries belonging to row data, since we're including them in the list of nested dictionaries
for k in _get_data_causal_insights_keys():
del res[k]
row_data.update([(key, self._make_accessor(attr)(inf).flatten()) for key, attr in props])
# get the length of the list corresponding to the first dictionary key
# `list(row_data)` gets the keys as a list, since `row_data.keys()` can't be indexed into
n_rows = len(row_data[list(row_data)[0]])
res[_CausalInsightsConstants.RowData] = [{key: row_data[key][i]
for key in row_data} for i in range(n_rows)]
else:
res.update([(key, self._make_accessor(attr)(inf).tolist()) for key, attr in props])
return res
# TODO: it seems like it would be better to just return the tree itself rather than plot it;
# however, the tree can't store the feature and treatment names we compute here...
def plot_policy_tree(self, Xtest, feature_index, *, treatment_costs=0,
max_depth=3, min_samples_leaf=2, min_value_increase=1e-4, include_model_uncertainty=False,
alpha=0.05):
"""
Plot a recommended policy tree using matplotlib.
Parameters
----------
X : array-like
Features
feature_index
Index of the feature to be considered as treament
treatment_costs: array-like, default 0
Cost of treatment, as a scalar value or per-sample. For continuous features this is the marginal cost per
unit of treatment; for discrete features, this is the difference in cost between each of the non-default
values and the default value (i.e., if non-scalar the array should have shape (n,d_t-1))
max_depth : int, default 3
maximum depth of the tree
min_samples_leaf : int, default 2
minimum number of samples on each leaf
min_value_increase : float, default 1e-4
The minimum increase in the policy value that a split needs to create to construct it
include_model_uncertainty : bool, default False
Whether to include confidence interval information when building a simplified model of the cate model.
alpha : float in [0, 1], default 0.05
Confidence level of the confidence intervals displayed in the leaf nodes.
A (1-alpha)*100% confidence interval is displayed.
"""
intrp, feature_names, treatment_names, _ = self._tree(True, Xtest, feature_index,
treatment_costs=treatment_costs,
max_depth=max_depth,
min_samples_leaf=min_samples_leaf,
min_impurity_decrease=min_value_increase,
include_model_uncertainty=include_model_uncertainty,
alpha=alpha)
return intrp.plot(feature_names=feature_names, treatment_names=treatment_names)
def _policy_tree_output(self, Xtest, feature_index, *, treatment_costs=0,
max_depth=3, min_samples_leaf=2, min_value_increase=1e-4, alpha=0.05):
"""
Get a tuple of policy outputs.
The first item in the tuple is the recommended policy tree expressed as a dictionary.
The second item is the per-unit-average value of applying the learned policy; if the feature is continuous this
means the gain from increasing the treatment by 10% of the typical amount for units where the treatment should
be increased and decreasing the treatment by 10% of the typical amount when not.
The third item is the value of always treating. This is a list, with one entry per non-control-treatment for
discrete features, or just a single entry for continuous features, again increasing by 10% of a typical amount.
Parameters
----------
X : array-like
Features
feature_index
Index of the feature to be considered as treament
treatment_costs: array-like, default 0
Cost of treatment, as a scalar value or per-sample. For continuous features this is the marginal cost per
unit of treatment; for discrete features, this is the difference in cost between each of the non-default
values and the default value (i.e., if non-scalar the array should have shape (n,d_t-1))
max_depth : int, default 3
maximum depth of the tree
min_samples_leaf : int, default 2
minimum number of samples on each leaf
min_value_increase : float, default 1e-4
The minimum increase in the policy value that a split needs to create to construct it
alpha : float in [0, 1], default 0.05
Confidence level of the confidence intervals displayed in the leaf nodes.
A (1-alpha)*100% confidence interval is displayed.
Returns
-------
output : _PolicyOutput
"""
(intrp, feature_names, treatment_names,
(policy_val, always_trt)) = self._tree(True, Xtest, feature_index,
treatment_costs=treatment_costs,
max_depth=max_depth,
min_samples_leaf=min_samples_leaf,
min_impurity_decrease=min_value_increase,
alpha=alpha)
return _PolicyOutput(_tree_interpreter_to_dict(intrp, feature_names, policy_data),
policy_val,
{treatment_names[i + 1]: val
for (i, val) in enumerate(always_trt.tolist())},
treatment_names[0])
# TODO: it seems like it would be better to just return the tree itself rather than plot it;
# however, the tree can't store the feature and treatment names we compute here...
def plot_heterogeneity_tree(self, Xtest, feature_index, *,
max_depth=3, min_samples_leaf=2, min_impurity_decrease=1e-4,
include_model_uncertainty=False,
alpha=0.05):
"""
Plot an effect hetergoeneity tree using matplotlib.
Parameters
----------
X : array-like
Features
feature_index
Index of the feature to be considered as treament
max_depth : int, default 3
maximum depth of the tree
min_samples_leaf : int, default 2
minimum number of samples on each leaf
min_impurity_decrease : float, default 1e-4
The minimum decrease in the impurity/uniformity of the causal effect that a split needs to
achieve to construct it
include_model_uncertainty : bool, default False
Whether to include confidence interval information when building a simplified model of the cate model.
alpha : float in [0, 1], default 0.05
Confidence level of the confidence intervals displayed in the leaf nodes.
A (1-alpha)*100% confidence interval is displayed.
"""
intrp, feature_names, treatment_names, _ = self._tree(False, Xtest, feature_index,
max_depth=max_depth,
min_samples_leaf=min_samples_leaf,
min_impurity_decrease=min_impurity_decrease,
include_model_uncertainty=include_model_uncertainty,
alpha=alpha)
return intrp.plot(feature_names=feature_names,
treatment_names=treatment_names)
def _heterogeneity_tree_output(self, Xtest, feature_index, *,
max_depth=3, min_samples_leaf=2, min_impurity_decrease=1e-4,
include_model_uncertainty=False, alpha=0.05):
"""
Get an effect heterogeneity tree expressed as a dictionary.
Parameters
----------
X : array-like
Features
feature_index
Index of the feature to be considered as treament
max_depth : int, optional (default=3)
maximum depth of the tree
min_samples_leaf : int, optional (default=2)
minimum number of samples on each leaf
min_impurity_decrease : float, optional (default=1e-4)
The minimum decrease in the impurity/uniformity of the causal effect that a split needs to
achieve to construct it
include_model_uncertainty : bool, default False
Whether to include confidence interval information when building a simplified model of the cate model.
alpha : float in [0, 1], default 0.05
Confidence level of the confidence intervals displayed in the leaf nodes.
A (1-alpha)*100% confidence interval is displayed.
"""
intrp, feature_names, _, _ = self._tree(False, Xtest, feature_index,
max_depth=max_depth,
min_samples_leaf=min_samples_leaf,
min_impurity_decrease=min_impurity_decrease,
include_model_uncertainty=include_model_uncertainty,
alpha=alpha)
return _tree_interpreter_to_dict(intrp, feature_names, hetero_data)
def individualized_policy(self, Xtest, feature_index, *, n_rows=None, treatment_costs=0, alpha=0.05):
"""
Get individualized treatment policy based on the learned model for a feature, sorted by the predicted effect.
Parameters
----------
Xtest: array-like
Features
feature_index: int or string
Index of the feature to be considered as treatment
n_rows: int, optional
How many rows to return (all rows by default)
treatment_costs: array-like, default 0
Cost of treatment, as a scalar value or per-sample. For continuous features this is the marginal cost per
unit of treatment; for discrete features, this is the difference in cost between each of the non-default
values and the default value (i.e., if non-scalar the array should have shape (n,d_t-1))
alpha: float in [0, 1], default 0.05
Confidence level of the confidence intervals
A (1-alpha)*100% confidence interval is returned
Returns
-------
output: DataFrame
Dataframe containing recommended treatment, effect, confidence interval, sorted by effect
"""
result = self._safe_result_index(Xtest, feature_index)
# get dataframe with all but selected column
orig_df = pd.DataFrame(Xtest, columns=self.feature_names_).rename(
columns={self.feature_names_[result.feature_index]: 'Current treatment'})
Xtest = result.X_transformer.transform(Xtest)
if Xtest.shape[1] == 0:
x_rows = Xtest.shape[0]
Xtest = None
if result.feature_baseline is None:
# apply 10% of a typical treatment for this feature
effect = result.estimator.effect_inference(Xtest, T1=result.treatment_value * 0.1)
else:
effect = result.estimator.const_marginal_effect_inference(Xtest)
if Xtest is None: # we got a scalar effect although our original X may have had more rows
effect = effect._expand_outputs(x_rows)
multi_y = (not self._vec_y) or self.classification
if multi_y and result.feature_baseline is not None and np.ndim(treatment_costs) == 2:
# we've got treatment costs of shape (n, d_t-1) so we need to add a y dimension to broadcast safely
treatment_costs = np.expand_dims(treatment_costs, 1)
effect.translate(-treatment_costs)
est = effect.point_estimate
est_lb = effect.conf_int(alpha)[0]
est_ub = effect.conf_int(alpha)[1]
if multi_y: # y was an array, not a vector
est = np.squeeze(est, 1)
est_lb = np.squeeze(est_lb, 1)
est_ub = np.squeeze(est_ub, 1)
if result.feature_baseline is None:
rec = np.empty(est.shape[0], dtype=object)
rec[est > 0] = "increase"
rec[est <= 0] = "decrease"
# set the effect bounds; for positive treatments these agree with
# the estimates; for negative treatments, we need to invert the interval
eff_lb, eff_ub = est_lb, est_ub
eff_lb[est <= 0], eff_ub[est <= 0] = -eff_ub[est <= 0], -eff_lb[est <= 0]
# the effect is now always positive since we decrease treatment when negative
eff = np.abs(est)
else:
# for discrete treatment, stack a zero result in front for control
zeros = np.zeros((est.shape[0], 1))
all_effs = np.hstack([zeros, est])
eff_ind = np.argmax(all_effs, axis=1)
treatment_arr = np.array([result.feature_baseline] + [lvl for lvl in result.feature_levels], dtype=object)
rec = treatment_arr[eff_ind]
# we need to call effect_inference to get the correct CI between the two treatment options
effect = result.estimator.effect_inference(Xtest, T0=orig_df['Current treatment'], T1=rec)
# we now need to construct the delta in the cost between the two treatments and translate the effect
current_treatment = orig_df['Current treatment'].values
if np.ndim(treatment_costs) >= 2:
# remove third dimenions potentially added
if multi_y: # y was an array, not a vector
treatment_costs = np.squeeze(treatment_costs, 1)
assert treatment_costs.shape[1] == len(treatment_arr) - 1, ("If treatment costs are an array, "
" they must be of shape (n, d_t-1),"
" where n is the number of samples"
" and d_t the number of treatment"
" categories.")
all_costs = np.hstack([zeros, treatment_costs])
# find cost of current treatment: equality creates a 2d array with True on each row,
# only if its the location of the current treatment. Then we take the corresponding cost.
current_cost = all_costs[current_treatment.reshape(-1, 1) == treatment_arr.reshape(1, -1)]
target_cost = np.take_along_axis(all_costs, eff_ind.reshape(-1, 1), 1).reshape(-1)
else:
assert isinstance(treatment_costs, (int, float)), ("Treatments costs should either be float or "
"a 2d array of size (n, d_t-1).")
all_costs = np.array([0] + [treatment_costs] * (len(treatment_arr) - 1))
# construct index of current treatment
current_ind = (current_treatment.reshape(-1, 1) ==
treatment_arr.reshape(1, -1)) @ np.arange(len(treatment_arr))
current_cost = all_costs[current_ind]
target_cost = all_costs[eff_ind]
delta_cost = current_cost - target_cost
# add second dimension if needed for broadcasting during translation of effect
if multi_y:
delta_cost = np.expand_dims(delta_cost, 1)
effect.translate(delta_cost)
eff = effect.point_estimate
eff_lb, eff_ub = effect.conf_int(alpha)
if multi_y: # y was an array, not a vector
eff = np.squeeze(eff, 1)
eff_lb = np.squeeze(eff_lb, 1)
eff_ub = np.squeeze(eff_ub, 1)
df = pd.DataFrame({'Treatment': rec,
'Effect of treatment': eff,
'Effect of treatment lower bound': eff_lb,
'Effect of treatment upper bound': eff_ub},
index=orig_df.index)
return df.join(orig_df).sort_values('Effect of treatment',
ascending=False).head(n_rows)
def _individualized_policy_dict(self, Xtest, feature_index, *, n_rows=None, treatment_costs=0, alpha=0.05):
"""
Get individualized treatment policy based on the learned model for a feature, sorted by the predicted effect.
Parameters
----------
Xtest: array-like
Features
feature_index: int or string
Index of the feature to be considered as treatment
n_rows: int, optional
How many rows to return (all rows by default)
treatment_costs: array-like, default 0
Cost of treatment, as a scalar value or per-sample
alpha: float in [0, 1], default 0.05
Confidence level of the confidence intervals
A (1-alpha)*100% confidence interval is returned
Returns
-------
output: dictionary
dictionary containing treatment policy, effects, and other columns
"""
return self.individualized_policy(Xtest, feature_index,
n_rows=n_rows,
treatment_costs=treatment_costs,
alpha=alpha).to_dict('list')
def typical_treatment_value(self, feature_index):
"""
Get the typical treatment value used for the specified feature
Parameters
----------
feature_index: int or string
The index of the feature to be considered as treatment
Returns
-------
treatment_value : float
The treatment value considered 'typical' for this feature
"""
result = [res for res in self._results if res.feature_index == feature_index]
if len(result) == 0:
if self._has_column_names:
result = [res for res in self._results if res.feature_name == feature_index]
assert len(result) == 1, f"Could not find feature with index/name {feature_index}"
return result[0].treatment_value
else:
raise ValueError(f"No feature with index {feature_index}")
return result[0].treatment_value
| 51.319196 | 119 | 0.603562 |
c4083724a00de9c5692943d43c6a11f16b96a31e | 1,365 | py | Python | problem solving/mini-max-sum.py | avnoor-488/hackerrank-solutions | b62315549c254d88104b70755e4dfcd43eba59bf | [
"MIT"
] | 1 | 2020-10-01T16:54:52.000Z | 2020-10-01T16:54:52.000Z | problem solving/mini-max-sum.py | avnoor-488/hackerrank-solutions | b62315549c254d88104b70755e4dfcd43eba59bf | [
"MIT"
] | 2 | 2020-10-07T02:22:13.000Z | 2020-10-22T06:15:50.000Z | problem solving/mini-max-sum.py | avnoor-488/hackerrank-solutions | b62315549c254d88104b70755e4dfcd43eba59bf | [
"MIT"
] | 9 | 2020-10-01T12:30:56.000Z | 2020-10-22T06:10:14.000Z | '''
problem--
Given five positive integers, find the minimum and maximum values that can be calculated by summing exactly four of the five integers. Then print the respective minimum and maximum values as a single line of two space-separated long integers.
For example, arr=[1,3,5,7,9]. Our minimum sum is 1+3+5+7=16 and our maximum sum is 3+5+7+9=24. We would print
16 24
Function Description--
Complete the miniMaxSum function in the editor below. It should print two space-separated integers on one line: the minimum sum and the maximum sum of 4 of 5 elements.
miniMaxSum has the following parameter(s):
arr: an array of 5 integers
Input Format--
A single line of five space-separated integers.
Constraints--
1<arr[i]<=10^9
Output Format--
Print two space-separated long integers denoting the respective minimum and maximum values that can be calculated by summing exactly four of the five integers. (The output can be greater than a 32 bit integer.)
Sample Input---
1 2 3 4 5
Sample Output--
10 14
'''
#code here
#!/bin/python3
import math
import os
import random
import re
import sys
if __name__ == '__main__':
arr = list(map(int, input().rstrip().split()))
miniMaxSum(arr)
| 24.375 | 242 | 0.710623 |
c40b63017932ee0022e50a1cd077dafbac537066 | 4,610 | py | Python | 19/network.py | jcsesznegi/advent-of-code-2017 | 9710e184e092b82aa798076b9ce3915c6e42758d | [
"MIT"
] | 1 | 2020-04-12T17:54:52.000Z | 2020-04-12T17:54:52.000Z | 19/network.py | jcsesznegi/advent-of-code-2017 | 9710e184e092b82aa798076b9ce3915c6e42758d | [
"MIT"
] | null | null | null | 19/network.py | jcsesznegi/advent-of-code-2017 | 9710e184e092b82aa798076b9ce3915c6e42758d | [
"MIT"
] | null | null | null | from pprint import pprint
from enum import Enum
| 37.786885 | 74 | 0.632972 |
c40cb374c8f69dbfb3dd6a423d469c3fd1845232 | 2,639 | py | Python | examples/gan.py | maxferrari/Torchelie | d133f227bebc3c4cbbb6167bd1fae815d2b5fa81 | [
"MIT"
] | 117 | 2019-07-14T20:39:48.000Z | 2021-10-17T19:16:48.000Z | examples/gan.py | maxferrari/Torchelie | d133f227bebc3c4cbbb6167bd1fae815d2b5fa81 | [
"MIT"
] | 41 | 2019-12-06T23:56:44.000Z | 2021-08-02T09:13:30.000Z | examples/gan.py | maxferrari/Torchelie | d133f227bebc3c4cbbb6167bd1fae815d2b5fa81 | [
"MIT"
] | 13 | 2019-09-22T00:46:54.000Z | 2021-04-09T15:53:15.000Z | import argparse
import copy
import torch
from torchvision.datasets import MNIST, CIFAR10
import torchvision.transforms as TF
import torchelie as tch
import torchelie.loss.gan.hinge as gan_loss
from torchelie.recipes.gan import GANRecipe
import torchelie.callbacks as tcb
from torchelie.recipes import Recipe
parser = argparse.ArgumentParser()
parser.add_argument('--cpu', action='store_true')
opts = parser.parse_args()
device = 'cpu' if opts.cpu else 'cuda'
BS = 32
tfms = TF.Compose([
TF.Resize(64),
tch.transforms.AdaptPad((64, 64)),
TF.RandomHorizontalFlip(),
TF.ToTensor()])
ds = CIFAR10('~/.cache/torch/cifar10', download=True, transform=tfms)
dl = torch.utils.data.DataLoader(ds,
num_workers=4,
batch_size=BS,
shuffle=True)
train_net(tch.models.autogan_64, tch.models.snres_discr_4l)
| 29.322222 | 84 | 0.61349 |
c40ce4ea8967938d11ba63e971d617289f172e0d | 22 | py | Python | Python/SCRIPT PYTHON/Hello.py | guimaraesalves/material-python | d56b6b24ae35a67d394b43cb1ef4420805c7bd9b | [
"MIT"
] | null | null | null | Python/SCRIPT PYTHON/Hello.py | guimaraesalves/material-python | d56b6b24ae35a67d394b43cb1ef4420805c7bd9b | [
"MIT"
] | null | null | null | Python/SCRIPT PYTHON/Hello.py | guimaraesalves/material-python | d56b6b24ae35a67d394b43cb1ef4420805c7bd9b | [
"MIT"
] | null | null | null | print ("Hello Word!")
| 11 | 21 | 0.636364 |
c40e9360b8918f73e4cf97eef85c363173d03ce0 | 21,719 | py | Python | hs_geo_raster_resource/serialization.py | tommac7/hydroshare | 87c4543a55f98103d2614bf4c47f7904c3f9c029 | [
"BSD-3-Clause"
] | 1 | 2018-09-17T13:07:29.000Z | 2018-09-17T13:07:29.000Z | hs_geo_raster_resource/serialization.py | tommac7/hydroshare | 87c4543a55f98103d2614bf4c47f7904c3f9c029 | [
"BSD-3-Clause"
] | 100 | 2017-08-01T23:48:04.000Z | 2018-04-03T13:17:27.000Z | hs_geo_raster_resource/serialization.py | tommac7/hydroshare | 87c4543a55f98103d2614bf4c47f7904c3f9c029 | [
"BSD-3-Clause"
] | 2 | 2017-07-27T20:41:33.000Z | 2017-07-27T22:40:57.000Z | import xml.sax
import rdflib
from django.db import transaction
from hs_core.serialization import GenericResourceMeta
| 47.215217 | 97 | 0.55776 |
c410261f2af66c058c52c7122ed945e7bc1bf8e8 | 857 | py | Python | setup.py | mrocklin/pygdf | 2de9407427da9497ebdf8951a12857be0fab31bb | [
"Apache-2.0"
] | 5 | 2019-01-15T12:31:49.000Z | 2021-03-05T21:17:13.000Z | setup.py | mrocklin/pygdf | 2de9407427da9497ebdf8951a12857be0fab31bb | [
"Apache-2.0"
] | 1 | 2019-06-18T20:58:21.000Z | 2019-06-18T20:58:21.000Z | setup.py | mrocklin/pygdf | 2de9407427da9497ebdf8951a12857be0fab31bb | [
"Apache-2.0"
] | null | null | null | from setuptools import setup
import versioneer
packages = ['pygdf',
'pygdf.tests',
]
install_requires = [
'numba',
]
setup(name='pygdf',
description="GPU Dataframe",
version=versioneer.get_version(),
classifiers=[
# "Development Status :: 4 - Beta",
"Intended Audience :: Developers",
# "Operating System :: OS Independent",
"Programming Language :: Python",
# "Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.5",
],
# Include the separately-compiled shared library
author="Continuum Analytics, Inc.",
packages=packages,
package_data={
'pygdf.tests': ['data/*.pickle'],
},
install_requires=install_requires,
license="BSD",
cmdclass=versioneer.get_cmdclass(),
)
| 24.485714 | 54 | 0.588098 |
c412a68b17b363d84e8cdaf62f22ff38191fc6e5 | 335 | py | Python | pymarl/envs/__init__.py | twoodford/pymarl | c78e63e54ed772171fbcaea6c55c703cff0e9302 | [
"Apache-2.0"
] | null | null | null | pymarl/envs/__init__.py | twoodford/pymarl | c78e63e54ed772171fbcaea6c55c703cff0e9302 | [
"Apache-2.0"
] | null | null | null | pymarl/envs/__init__.py | twoodford/pymarl | c78e63e54ed772171fbcaea6c55c703cff0e9302 | [
"Apache-2.0"
] | null | null | null | from functools import partial
from .multiagentenv import MultiAgentEnv
import sys
import os
REGISTRY = {}
#REGISTRY["sc2"] = partial(env_fn, env=StarCraft2Env)
| 22.333333 | 53 | 0.740299 |
c415cf0f1a05df7a1ed0253bc2693cc05cb80cc0 | 4,938 | py | Python | gumtree_watchdog/db.py | undeadparrot/gumtree-telegram-watchdog | 48db6b37876c520bd5d2e0f9a97e19b04d70e12f | [
"MIT"
] | 1 | 2019-03-04T15:38:01.000Z | 2019-03-04T15:38:01.000Z | gumtree_watchdog/db.py | undeadparrot/gumtree-telegram-watchdog | 48db6b37876c520bd5d2e0f9a97e19b04d70e12f | [
"MIT"
] | null | null | null | gumtree_watchdog/db.py | undeadparrot/gumtree-telegram-watchdog | 48db6b37876c520bd5d2e0f9a97e19b04d70e12f | [
"MIT"
] | null | null | null | import os
import os.path
import sqlite3
import logging
from typing import List
from gumtree_watchdog.types import Listing, Contract, ListingWithChatId
TConn = sqlite3.Connection
DB_PATH = os.environ.get('GUMTREE_DB')
| 28.37931 | 92 | 0.584447 |
c4184af48713ebd40a957015c82bc531d4f8d4b7 | 3,601 | py | Python | apps/1d/mhd/shock_tube/this_app_params.py | dcseal/finess | 766e583ae9e84480640c7c3b3c157bf40ab87fe4 | [
"BSD-3-Clause"
] | null | null | null | apps/1d/mhd/shock_tube/this_app_params.py | dcseal/finess | 766e583ae9e84480640c7c3b3c157bf40ab87fe4 | [
"BSD-3-Clause"
] | null | null | null | apps/1d/mhd/shock_tube/this_app_params.py | dcseal/finess | 766e583ae9e84480640c7c3b3c157bf40ab87fe4 | [
"BSD-3-Clause"
] | null | null | null | #section [initial]
parameter_list, accessor_list, check_list = \
_parameters_accessors_checks()
| 31.587719 | 68 | 0.463482 |
c418d7e5abef02bb7493320d6cd67da6e01f6114 | 1,142 | py | Python | async-functions.py | cheezyy/python_scripts | 9db713ca085c6f1fd5ec63d79762a470093e028a | [
"MIT"
] | null | null | null | async-functions.py | cheezyy/python_scripts | 9db713ca085c6f1fd5ec63d79762a470093e028a | [
"MIT"
] | null | null | null | async-functions.py | cheezyy/python_scripts | 9db713ca085c6f1fd5ec63d79762a470093e028a | [
"MIT"
] | null | null | null | '''
Chad Meadowcroft
Credit to Sentdex (https://pythonprogramming.net/)
'''
import asyncio
if __name__ == '__main__':
try:
loop = asyncio.get_event_loop()
loop.set_debug(1)
d1, d2, d3 = loop.run_until_complete(main())
print(d1.result())
except Exception as e:
pass
finally:
loop.close() | 29.282051 | 77 | 0.645359 |
c41996b81d3533341a720d569e52c1e49f5c467b | 1,114 | py | Python | setup.py | jackaraz/ma5_expert | 4d359b5110874c2f44f81e10307bd1ea3f9e20d0 | [
"MIT"
] | 2 | 2021-04-06T08:37:41.000Z | 2022-01-07T09:15:25.000Z | setup.py | jackaraz/ma5_expert | 4d359b5110874c2f44f81e10307bd1ea3f9e20d0 | [
"MIT"
] | null | null | null | setup.py | jackaraz/ma5_expert | 4d359b5110874c2f44f81e10307bd1ea3f9e20d0 | [
"MIT"
] | null | null | null | from setuptools import setup
import os
with open("README.md", "r", encoding="utf-8") as f:
long_description = f.read()
requirements = []
if os.path.isfile("./requirements.txt"):
with open("requirements.txt", "r") as f:
requirements = f.read()
requirements = [x for x in requirements.split("\n") if x != ""]
setup(
name="ma5_expert",
version="0.0.1",
description=("MadAnalysis 5 interpreter for Expert mode"),
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/jackaraz/ma5_expert",
author="Jack Y. Araz",
author_email=("jack.araz@durham.ac.uk"),
license="MIT",
packages=[
"ma5_expert",
"ma5_expert.CutFlow",
"ma5_expert.tools",
],
install_requires=requirements,
python_requires=">=3.6",
classifiers=[
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Physics",
],
)
| 29.315789 | 67 | 0.630162 |
c41a92320c98d0d79eebb92f7c12dfc1830b9325 | 4,977 | py | Python | apitest/api_test/common/auth.py | willhuang1206/apitest | 4b41855710ba8f21788027da83a830f631e11f26 | [
"Apache-2.0"
] | null | null | null | apitest/api_test/common/auth.py | willhuang1206/apitest | 4b41855710ba8f21788027da83a830f631e11f26 | [
"Apache-2.0"
] | 3 | 2020-06-06T01:57:41.000Z | 2021-06-10T22:57:58.000Z | apitest/api_test/common/auth.py | willhuang1206/apitest | 4b41855710ba8f21788027da83a830f631e11f26 | [
"Apache-2.0"
] | null | null | null | from rest_framework.authentication import BaseAuthentication
from rest_framework import exceptions
from rest_framework.parsers import JSONParser
from django.conf import settings
import requests
from api_test.common import MD5
from api_test.models import ProjectMember
from django.contrib.auth.models import User,Group
from rest_framework.authtoken.models import Token
ssoLogin=settings.SSO_LOGIN
ssoClientId=settings.SSO_CLIENTID
ssoClientSecret=settings.SSO_CLIENTSECRET
ssoRedirectUrl=settings.SSO_REDIRECTURL
ssoNotifyUrl=settings.SSO_NOTIFYURL
ssoGetTicketUrl=settings.SSO_GETTICKETURL
#ssotoken
ssoValidateUrl=settings.SSO_VALIDATEURL
ssoLoginUrl=settings.SSO_LOGINURL
ssoLogoutUrl=settings.SSO_LOGOUTURL
def permission_required(*permissions):
''' '''
return wrapper | 42.905172 | 166 | 0.569821 |
c41b88fc454a463ac7213753efc46174f0522ef0 | 12,745 | py | Python | futu/common/pb/Qot_GetPriceReminder_pb2.py | Hason-Cheung/py-futu-api | caa2f136ee07a2b123c79b2d75bbb524d7873e53 | [
"Apache-2.0"
] | 858 | 2018-11-12T12:54:56.000Z | 2022-03-10T17:35:43.000Z | futu/common/pb/Qot_GetPriceReminder_pb2.py | EricChengg/hongkong-futu-user-investment-report-generater | d450260a107f9e053036c31b05b8290b7b22c237 | [
"Apache-2.0"
] | 113 | 2018-11-12T01:52:31.000Z | 2022-02-27T03:53:07.000Z | futu/common/pb/Qot_GetPriceReminder_pb2.py | EricChengg/hongkong-futu-user-investment-report-generater | d450260a107f9e053036c31b05b8290b7b22c237 | [
"Apache-2.0"
] | 201 | 2018-11-19T08:32:45.000Z | 2022-03-23T06:39:02.000Z | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: Qot_GetPriceReminder.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import Common_pb2 as Common__pb2
import Qot_Common_pb2 as Qot__Common__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='Qot_GetPriceReminder.proto',
package='Qot_GetPriceReminder',
syntax='proto2',
serialized_pb=_b('\n\x1aQot_GetPriceReminder.proto\x12\x14Qot_GetPriceReminder\x1a\x0c\x43ommon.proto\x1a\x10Qot_Common.proto\"k\n\x11PriceReminderItem\x12\x0b\n\x03key\x18\x01 \x02(\x03\x12\x0c\n\x04type\x18\x02 \x02(\x05\x12\r\n\x05value\x18\x03 \x02(\x01\x12\x0c\n\x04note\x18\x04 \x02(\t\x12\x0c\n\x04\x66req\x18\x05 \x02(\x05\x12\x10\n\x08isEnable\x18\x06 \x02(\x08\"r\n\rPriceReminder\x12&\n\x08security\x18\x01 \x02(\x0b\x32\x14.Qot_Common.Security\x12\x39\n\x08itemList\x18\x02 \x03(\x0b\x32\'.Qot_GetPriceReminder.PriceReminderItem\"=\n\x03\x43\x32S\x12&\n\x08security\x18\x01 \x01(\x0b\x32\x14.Qot_Common.Security\x12\x0e\n\x06market\x18\x02 \x01(\x05\"E\n\x03S2C\x12>\n\x11priceReminderList\x18\x01 \x03(\x0b\x32#.Qot_GetPriceReminder.PriceReminder\"1\n\x07Request\x12&\n\x03\x63\x32s\x18\x01 \x02(\x0b\x32\x19.Qot_GetPriceReminder.C2S\"j\n\x08Response\x12\x15\n\x07retType\x18\x01 \x02(\x05:\x04-400\x12\x0e\n\x06retMsg\x18\x02 \x01(\t\x12\x0f\n\x07\x65rrCode\x18\x03 \x01(\x05\x12&\n\x03s2c\x18\x04 \x01(\x0b\x32\x19.Qot_GetPriceReminder.S2CBJ\n\x13\x63om.futu.openapi.pbZ3github.com/futuopen/ftapi4go/pb/qotgetpricereminder')
,
dependencies=[Common__pb2.DESCRIPTOR,Qot__Common__pb2.DESCRIPTOR,])
_PRICEREMINDERITEM = _descriptor.Descriptor(
name='PriceReminderItem',
full_name='Qot_GetPriceReminder.PriceReminderItem',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='Qot_GetPriceReminder.PriceReminderItem.key', index=0,
number=1, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='Qot_GetPriceReminder.PriceReminderItem.type', index=1,
number=2, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='Qot_GetPriceReminder.PriceReminderItem.value', index=2,
number=3, type=1, cpp_type=5, label=2,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='note', full_name='Qot_GetPriceReminder.PriceReminderItem.note', index=3,
number=4, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='freq', full_name='Qot_GetPriceReminder.PriceReminderItem.freq', index=4,
number=5, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='isEnable', full_name='Qot_GetPriceReminder.PriceReminderItem.isEnable', index=5,
number=6, type=8, cpp_type=7, label=2,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=84,
serialized_end=191,
)
_PRICEREMINDER = _descriptor.Descriptor(
name='PriceReminder',
full_name='Qot_GetPriceReminder.PriceReminder',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='security', full_name='Qot_GetPriceReminder.PriceReminder.security', index=0,
number=1, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='itemList', full_name='Qot_GetPriceReminder.PriceReminder.itemList', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=193,
serialized_end=307,
)
_C2S = _descriptor.Descriptor(
name='C2S',
full_name='Qot_GetPriceReminder.C2S',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='security', full_name='Qot_GetPriceReminder.C2S.security', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='market', full_name='Qot_GetPriceReminder.C2S.market', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=309,
serialized_end=370,
)
_S2C = _descriptor.Descriptor(
name='S2C',
full_name='Qot_GetPriceReminder.S2C',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='priceReminderList', full_name='Qot_GetPriceReminder.S2C.priceReminderList', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=372,
serialized_end=441,
)
_REQUEST = _descriptor.Descriptor(
name='Request',
full_name='Qot_GetPriceReminder.Request',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='c2s', full_name='Qot_GetPriceReminder.Request.c2s', index=0,
number=1, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=443,
serialized_end=492,
)
_RESPONSE = _descriptor.Descriptor(
name='Response',
full_name='Qot_GetPriceReminder.Response',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='retType', full_name='Qot_GetPriceReminder.Response.retType', index=0,
number=1, type=5, cpp_type=1, label=2,
has_default_value=True, default_value=-400,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='retMsg', full_name='Qot_GetPriceReminder.Response.retMsg', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='errCode', full_name='Qot_GetPriceReminder.Response.errCode', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='s2c', full_name='Qot_GetPriceReminder.Response.s2c', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=494,
serialized_end=600,
)
_PRICEREMINDER.fields_by_name['security'].message_type = Qot__Common__pb2._SECURITY
_PRICEREMINDER.fields_by_name['itemList'].message_type = _PRICEREMINDERITEM
_C2S.fields_by_name['security'].message_type = Qot__Common__pb2._SECURITY
_S2C.fields_by_name['priceReminderList'].message_type = _PRICEREMINDER
_REQUEST.fields_by_name['c2s'].message_type = _C2S
_RESPONSE.fields_by_name['s2c'].message_type = _S2C
DESCRIPTOR.message_types_by_name['PriceReminderItem'] = _PRICEREMINDERITEM
DESCRIPTOR.message_types_by_name['PriceReminder'] = _PRICEREMINDER
DESCRIPTOR.message_types_by_name['C2S'] = _C2S
DESCRIPTOR.message_types_by_name['S2C'] = _S2C
DESCRIPTOR.message_types_by_name['Request'] = _REQUEST
DESCRIPTOR.message_types_by_name['Response'] = _RESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
PriceReminderItem = _reflection.GeneratedProtocolMessageType('PriceReminderItem', (_message.Message,), dict(
DESCRIPTOR = _PRICEREMINDERITEM,
__module__ = 'Qot_GetPriceReminder_pb2'
# @@protoc_insertion_point(class_scope:Qot_GetPriceReminder.PriceReminderItem)
))
_sym_db.RegisterMessage(PriceReminderItem)
PriceReminder = _reflection.GeneratedProtocolMessageType('PriceReminder', (_message.Message,), dict(
DESCRIPTOR = _PRICEREMINDER,
__module__ = 'Qot_GetPriceReminder_pb2'
# @@protoc_insertion_point(class_scope:Qot_GetPriceReminder.PriceReminder)
))
_sym_db.RegisterMessage(PriceReminder)
C2S = _reflection.GeneratedProtocolMessageType('C2S', (_message.Message,), dict(
DESCRIPTOR = _C2S,
__module__ = 'Qot_GetPriceReminder_pb2'
# @@protoc_insertion_point(class_scope:Qot_GetPriceReminder.C2S)
))
_sym_db.RegisterMessage(C2S)
S2C = _reflection.GeneratedProtocolMessageType('S2C', (_message.Message,), dict(
DESCRIPTOR = _S2C,
__module__ = 'Qot_GetPriceReminder_pb2'
# @@protoc_insertion_point(class_scope:Qot_GetPriceReminder.S2C)
))
_sym_db.RegisterMessage(S2C)
Request = _reflection.GeneratedProtocolMessageType('Request', (_message.Message,), dict(
DESCRIPTOR = _REQUEST,
__module__ = 'Qot_GetPriceReminder_pb2'
# @@protoc_insertion_point(class_scope:Qot_GetPriceReminder.Request)
))
_sym_db.RegisterMessage(Request)
Response = _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), dict(
DESCRIPTOR = _RESPONSE,
__module__ = 'Qot_GetPriceReminder_pb2'
# @@protoc_insertion_point(class_scope:Qot_GetPriceReminder.Response)
))
_sym_db.RegisterMessage(Response)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\023com.futu.openapi.pbZ3github.com/futuopen/ftapi4go/pb/qotgetpricereminder'))
# @@protoc_insertion_point(module_scope)
| 36.83526 | 1,141 | 0.748764 |
c41bd740e3e0dc24d155a81087255bfae49c7719 | 903 | py | Python | leave/models.py | shoaibsaikat/Django-Office-Management | 952aa44c2d3c2f99e91c2ed1aada17ee15fc9eb0 | [
"Apache-2.0"
] | null | null | null | leave/models.py | shoaibsaikat/Django-Office-Management | 952aa44c2d3c2f99e91c2ed1aada17ee15fc9eb0 | [
"Apache-2.0"
] | null | null | null | leave/models.py | shoaibsaikat/Django-Office-Management | 952aa44c2d3c2f99e91c2ed1aada17ee15fc9eb0 | [
"Apache-2.0"
] | null | null | null | from django.db import models
from django.db.models.deletion import CASCADE
from accounts.models import User
| 45.15 | 102 | 0.75526 |
c41c16df2e1d607a9a0d2aad44ec758217ef96ce | 22,021 | py | Python | svtk/vtk_animation_timer_callback.py | SimLeek/pglsl-neural | 8daaffded197cf7be4432754bc5941f1bca3239c | [
"MIT"
] | 5 | 2018-03-25T23:43:32.000Z | 2019-05-18T10:35:21.000Z | svtk/vtk_animation_timer_callback.py | PyGPAI/PyGPNeural | 8daaffded197cf7be4432754bc5941f1bca3239c | [
"MIT"
] | 11 | 2017-12-24T20:03:16.000Z | 2017-12-26T00:18:34.000Z | svtk/vtk_animation_timer_callback.py | SimLeek/PyGPNeural | 8daaffded197cf7be4432754bc5941f1bca3239c | [
"MIT"
] | null | null | null | import time
import numpy as np
import vtk
from vtk.util import numpy_support
from svtk.lib.toolbox.integer import minmax
from svtk.lib.toolbox.idarray import IdArray
from svtk.lib.toolbox.numpy_helpers import normalize
import math as m
| 40.629151 | 121 | 0.608374 |
c41c57d2fe8c5d4f03096ac847acc8fe35f19ed2 | 3,679 | py | Python | work/ArchitectureSearch.py | jialiasus2/AI-Studio-Contest-Quantum202103 | 350f20b8805e9696cacacc1339e71bf695571e74 | [
"Apache-2.0"
] | null | null | null | work/ArchitectureSearch.py | jialiasus2/AI-Studio-Contest-Quantum202103 | 350f20b8805e9696cacacc1339e71bf695571e74 | [
"Apache-2.0"
] | null | null | null | work/ArchitectureSearch.py | jialiasus2/AI-Studio-Contest-Quantum202103 | 350f20b8805e9696cacacc1339e71bf695571e74 | [
"Apache-2.0"
] | null | null | null | import time
import numpy as np
from tqdm import tqdm
from utils import RandomCNOT, RandomCNOTs
def RandomSearch(cnot_creater, solver, epochs=100, save_path=None):
'''
Parameters:
cnot_creater: CNOT
solver:
epochs:
save_path:
'''
best_score = 0
start_time = time.time()
for epoch in range(epochs):
cnot_layers = cnot_creater()
sc, model = solver(cnot_layers)
if sc>best_score:
best_score = sc
best_model = model
if save_path is not None:
with open(save_path, 'w') as f:
f.write(best_model)
print('No_%d: score = %g, best_score = %g, time = %gs'%(epoch, sc, best_score, time.time()-start_time))
# print(best_model)
return best_score, best_model
| 38.322917 | 173 | 0.59527 |
c41c9ed8f0eeeb7bc96538ff09de8ee1da20fa88 | 4,113 | py | Python | tests/localyaml/test_localyaml.py | sbussetti/jenkins-job-builder | fc63f1439816d9022a2d538614b0b7592f96b454 | [
"Apache-2.0"
] | 1 | 2021-07-30T04:03:53.000Z | 2021-07-30T04:03:53.000Z | tests/localyaml/test_localyaml.py | sbussetti/jenkins-job-builder | fc63f1439816d9022a2d538614b0b7592f96b454 | [
"Apache-2.0"
] | 12 | 2020-05-29T05:33:48.000Z | 2020-09-29T13:02:29.000Z | tests/localyaml/test_localyaml.py | sbussetti/jenkins-job-builder | fc63f1439816d9022a2d538614b0b7592f96b454 | [
"Apache-2.0"
] | 2 | 2020-05-15T08:29:33.000Z | 2020-06-04T07:27:31.000Z | #!/usr/bin/env python
#
# Copyright 2013 Darragh Bailey
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import yaml
from testtools import ExpectedException
from yaml.composer import ComposerError
from jenkins_jobs.config import JJBConfig
from jenkins_jobs.parser import YamlParser
from tests import base
| 33.991736 | 87 | 0.699733 |
c41dbd4f1116c76a73c6b7f3a90d3a40a1fa6018 | 24,625 | py | Python | seijibot.py | seiji56/bot-tac | b16b8a8a79d6ac2deb0476ab3a9a0e0b136b1d54 | [
"MIT"
] | null | null | null | seijibot.py | seiji56/bot-tac | b16b8a8a79d6ac2deb0476ab3a9a0e0b136b1d54 | [
"MIT"
] | null | null | null | seijibot.py | seiji56/bot-tac | b16b8a8a79d6ac2deb0476ab3a9a0e0b136b1d54 | [
"MIT"
] | null | null | null | from bot_interface import *
import math
GameState(SeijiBot()).connect()
| 37.884615 | 608 | 0.496853 |
c41f3f30efc1128fe0e35981a452b93b464ce15f | 304 | py | Python | configs/gdrn/ycbvPbrSO/resnest50d_AugCosyAAEGray_BG05_visib10_mlBCE_DoubleMask_ycbvPbr100e_SO/resnest50d_AugCosyAAEGray_BG05_visib10_mlBCE_DoubleMask_ycbvPbr100e_SO_09_10PottedMeatCan.py | THU-DA-6D-Pose-Group/self6dpp | c267cfa55e440e212136a5e9940598720fa21d16 | [
"Apache-2.0"
] | 33 | 2021-12-15T07:11:47.000Z | 2022-03-29T08:58:32.000Z | configs/gdrn/ycbvPbrSO/resnest50d_AugCosyAAEGray_BG05_visib10_mlBCE_DoubleMask_ycbvPbr100e_SO/resnest50d_AugCosyAAEGray_BG05_visib10_mlBCE_DoubleMask_ycbvPbr100e_SO_09_10PottedMeatCan.py | THU-DA-6D-Pose-Group/self6dpp | c267cfa55e440e212136a5e9940598720fa21d16 | [
"Apache-2.0"
] | 3 | 2021-12-15T11:39:54.000Z | 2022-03-29T07:24:23.000Z | configs/gdrn/ycbvPbrSO/resnest50d_AugCosyAAEGray_BG05_visib10_mlBCE_DoubleMask_ycbvPbr100e_SO/resnest50d_AugCosyAAEGray_BG05_visib10_mlBCE_DoubleMask_ycbvPbr100e_SO_09_10PottedMeatCan.py | THU-DA-6D-Pose-Group/self6dpp | c267cfa55e440e212136a5e9940598720fa21d16 | [
"Apache-2.0"
] | null | null | null | _base_ = "./resnest50d_AugCosyAAEGray_BG05_visib10_mlBCE_DoubleMask_ycbvPbr100e_SO_01_02MasterChefCan.py"
OUTPUT_DIR = (
"output/gdrn/ycbvPbrSO/resnest50d_AugCosyAAEGray_BG05_visib10_mlBCE_DoubleMask_ycbvPbr100e_SO/09_10PottedMeatCan"
)
DATASETS = dict(TRAIN=("ycbv_010_potted_meat_can_train_pbr",))
| 50.666667 | 117 | 0.871711 |
c41fd9dec58d9f797e213eba1e8064f8aba14576 | 682 | py | Python | days/01-03-datetimes/code/100day_calc.py | rhelmstedter/100daysofcode-with-python-course | 076c99939b5641be541023f61c10ff30a7f05524 | [
"MIT"
] | null | null | null | days/01-03-datetimes/code/100day_calc.py | rhelmstedter/100daysofcode-with-python-course | 076c99939b5641be541023f61c10ff30a7f05524 | [
"MIT"
] | null | null | null | days/01-03-datetimes/code/100day_calc.py | rhelmstedter/100daysofcode-with-python-course | 076c99939b5641be541023f61c10ff30a7f05524 | [
"MIT"
] | null | null | null | from datetime import date, datetime, timedelta
import time
START_DATE = date(2021, 5, 25)
duration = timedelta(days=100)
| 32.47619 | 104 | 0.668622 |
c42001c4593f0af28c9a44cdd561459d12ab258c | 195 | py | Python | output/copilot/python/timeout/palindrome-partitioning.py | nhtnhan/CMPUT663-copilot-eval | 896711d006eb37a78e010cd1b9f79dc285ad054d | [
"Apache-2.0"
] | null | null | null | output/copilot/python/timeout/palindrome-partitioning.py | nhtnhan/CMPUT663-copilot-eval | 896711d006eb37a78e010cd1b9f79dc285ad054d | [
"Apache-2.0"
] | null | null | null | output/copilot/python/timeout/palindrome-partitioning.py | nhtnhan/CMPUT663-copilot-eval | 896711d006eb37a78e010cd1b9f79dc285ad054d | [
"Apache-2.0"
] | null | null | null | # https://leetcode.com/problems/palindrome-partitioning/
| 21.666667 | 56 | 0.54359 |
c42012e1044d2e28166a8361142bd8a07f4789f3 | 6,071 | py | Python | aggregathor/ea_datasource.py | big-data-lab-umbc/autodist | c8514b27cf5608f35254b63c4ac8093c7295a8e7 | [
"Apache-2.0"
] | null | null | null | aggregathor/ea_datasource.py | big-data-lab-umbc/autodist | c8514b27cf5608f35254b63c4ac8093c7295a8e7 | [
"Apache-2.0"
] | null | null | null | aggregathor/ea_datasource.py | big-data-lab-umbc/autodist | c8514b27cf5608f35254b63c4ac8093c7295a8e7 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import keras
import random
from keras.datasets import mnist
from keras import backend as K
K.set_floatx('float64')
if __name__ == "__main__":
# m = Mnist()
# # res = m.partitioned_by_rows(9)
# # print(res["test"][1].shape)
# for _ in range(10):
# print(m.gen_dummy_non_iid_weights())
fake_data = Mnist().fake_non_iid_data(min_train=10,max_train=10,data_split=(0.6, 0.3, 0.1))
train_data, test_data, valid_data = fake_data
x_train, y_train = train_data
x_test, y_test = test_data
x_valid, y_valid = valid_data
print(y_valid)
| 43.056738 | 135 | 0.653434 |
c42094cad42afee256fee1fad8338f794ac45419 | 255 | py | Python | proxySTAR_V3/certbot/venv/lib/python2.7/site-packages/pylint/test/functional/pygtk_enum_crash.py | mami-project/lurk | 98c293251e9b1e9c9a4b02789486c5ddaf46ba3c | [
"Apache-2.0"
] | 2 | 2017-07-05T09:57:33.000Z | 2017-11-14T23:05:53.000Z | Libraries/Python/pylint/v1.4.4/pylint/test/functional/pygtk_enum_crash.py | davidbrownell/Common_Environment | 4015872aeac8d5da30a6aa7940e1035a6aa6a75d | [
"BSL-1.0"
] | 1 | 2019-01-17T14:26:22.000Z | 2019-01-17T22:56:26.000Z | Libraries/Python/pylint/v1.4.4/pylint/test/functional/pygtk_enum_crash.py | davidbrownell/Common_Environment | 4015872aeac8d5da30a6aa7940e1035a6aa6a75d | [
"BSL-1.0"
] | 1 | 2017-08-31T14:33:03.000Z | 2017-08-31T14:33:03.000Z | # pylint: disable=C0121
"""http://www.logilab.org/ticket/124337"""
import gtk
def print_some_constant(arg=gtk.BUTTONS_OK):
"""crash because gtk.BUTTONS_OK, a gtk enum type, is returned by
astroid as a constant
"""
print(arg)
| 21.25 | 69 | 0.662745 |
c4231b8d3eab02f60fcc36025477bf600813aa38 | 1,519 | py | Python | py_at/OrderItem.py | kanghua309/at_py | 8fa7943a9de52cd81d235f06b57a25aa07fb715b | [
"Apache-2.0"
] | null | null | null | py_at/OrderItem.py | kanghua309/at_py | 8fa7943a9de52cd81d235f06b57a25aa07fb715b | [
"Apache-2.0"
] | null | null | null | py_at/OrderItem.py | kanghua309/at_py | 8fa7943a9de52cd81d235f06b57a25aa07fb715b | [
"Apache-2.0"
] | 2 | 2018-09-19T16:07:26.000Z | 2019-11-09T15:46:21.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__title__ = ''
__author__ = 'HaiFeng'
__mtime__ = '2016/8/16'
"""
import time
from py_at.EnumDefine import *
######################################################################## | 26.649123 | 142 | 0.578012 |
c4246529ebfd4899aa1216798277f3b74d90b3f5 | 547 | py | Python | pyscf/nao/m_rf_den.py | mfkasim1/pyscf | 7be5e015b2b40181755c71d888449db936604660 | [
"Apache-2.0"
] | 3 | 2021-02-28T00:52:53.000Z | 2021-03-01T06:23:33.000Z | pyscf/nao/m_rf_den.py | mfkasim1/pyscf | 7be5e015b2b40181755c71d888449db936604660 | [
"Apache-2.0"
] | 36 | 2018-08-22T19:44:03.000Z | 2020-05-09T10:02:36.000Z | pyscf/nao/m_rf_den.py | mfkasim1/pyscf | 7be5e015b2b40181755c71d888449db936604660 | [
"Apache-2.0"
] | 4 | 2018-02-14T16:28:28.000Z | 2019-08-12T16:40:30.000Z | from __future__ import print_function, division
import numpy as np
from numpy import identity, dot, zeros, zeros_like
def rf_den_via_rf0(self, rf0, v):
""" Whole matrix of the interacting response via non-interacting response and interaction"""
rf = zeros_like(rf0)
I = identity(rf0.shape[1])
for ir,r in enumerate(rf0):
rf[ir] = dot(np.linalg.inv(I-dot(r,v)), r)
return rf
def rf_den(self, ww):
""" Full matrix interacting response from NAO GW class"""
rf0 = self.rf0(ww)
return rf_den_via_rf0(self, rf0, self.kernel_sq)
| 28.789474 | 94 | 0.718464 |
c4253c3edd906a40552637d516df1601047e0dd5 | 669 | py | Python | app/model/compare_users.py | dwdraugr/YADS | c8036d8196a3158636aaa4f1910033e70ec8ecb4 | [
"Apache-2.0"
] | 3 | 2019-09-02T11:26:58.000Z | 2019-12-06T15:54:38.000Z | app/model/compare_users.py | dwdraugr/YADS | c8036d8196a3158636aaa4f1910033e70ec8ecb4 | [
"Apache-2.0"
] | null | null | null | app/model/compare_users.py | dwdraugr/YADS | c8036d8196a3158636aaa4f1910033e70ec8ecb4 | [
"Apache-2.0"
] | null | null | null | from app.model.model import Model
| 35.210526 | 75 | 0.560538 |
c425a0389a78978ea2d9dbb437a26224ad54fcc9 | 9,004 | py | Python | venv/Lib/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_histogram.py | mokshagna517/recommendation_sys | bc8ced225dff3c93d619ff5da363f42d0aa0676c | [
"MIT"
] | 25 | 2019-03-08T01:03:03.000Z | 2022-02-14T17:38:32.000Z | venv/Lib/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_histogram.py | mokshagna517/recommendation_sys | bc8ced225dff3c93d619ff5da363f42d0aa0676c | [
"MIT"
] | 9 | 2020-09-25T22:32:02.000Z | 2022-02-09T23:45:10.000Z | venv/Lib/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_histogram.py | mokshagna517/recommendation_sys | bc8ced225dff3c93d619ff5da363f42d0aa0676c | [
"MIT"
] | 31 | 2019-01-15T20:16:50.000Z | 2022-03-01T05:47:38.000Z | import numpy as np
import pytest
from numpy.testing import assert_allclose
from numpy.testing import assert_array_equal
from sklearn.ensemble._hist_gradient_boosting.histogram import (
_build_histogram_naive,
_build_histogram,
_build_histogram_no_hessian,
_build_histogram_root_no_hessian,
_build_histogram_root,
_subtract_histograms
)
from sklearn.ensemble._hist_gradient_boosting.types import HISTOGRAM_DTYPE
from sklearn.ensemble._hist_gradient_boosting.types import G_H_DTYPE
from sklearn.ensemble._hist_gradient_boosting.types import X_BINNED_DTYPE
| 44.35468 | 79 | 0.691804 |
c425a78347ab246234b9b4acc34bdb1ab5a3665b | 349 | py | Python | dgpolygon/gmappolygons/urls.py | mariohmol/django-google-polygon | 9d9448e540a4d100d925d7170425143f126e2174 | [
"MIT"
] | 1 | 2018-04-28T17:06:23.000Z | 2018-04-28T17:06:23.000Z | dgpolygon/gmappolygons/urls.py | mariohmol/django-google-polygon | 9d9448e540a4d100d925d7170425143f126e2174 | [
"MIT"
] | null | null | null | dgpolygon/gmappolygons/urls.py | mariohmol/django-google-polygon | 9d9448e540a4d100d925d7170425143f126e2174 | [
"MIT"
] | null | null | null | from django.conf.urls import patterns, include, url
from django.contrib import admin
from gmappolygons import views
urlpatterns = patterns('',
url(r'^$', views.index, name='index'),
url(r'^search', views.search, name='search'),
url(r'^submit/$', views.submit, name='submit'),
url(r'^show/(?P<area_id>\d+)/', views.show, name='show'),
)
| 31.727273 | 60 | 0.673352 |
c4263856e2d9e9e21750aa2037ab8e37b21086eb | 2,407 | py | Python | apps/user/models.py | mrf-foundation/ckios_v1 | 3556a99ba5e01f00e137fd124903ace77d2cba28 | [
"Apache-2.0"
] | null | null | null | apps/user/models.py | mrf-foundation/ckios_v1 | 3556a99ba5e01f00e137fd124903ace77d2cba28 | [
"Apache-2.0"
] | null | null | null | apps/user/models.py | mrf-foundation/ckios_v1 | 3556a99ba5e01f00e137fd124903ace77d2cba28 | [
"Apache-2.0"
] | null | null | null | from django.db import models
from django import forms
from django.contrib.auth.models import User
from PIL import Image
from django.utils.timezone import now
## User Update Profile
| 44.574074 | 103 | 0.658912 |
c429c3cef7b7daf43f4b36c099ac1e6ca683a4ff | 19,880 | py | Python | slt/chmm/train.py | paper-submit-account/Sparse-CHMM | 8a33dfe375a012cc0cc3324907135b74606a7b5d | [
"Apache-2.0"
] | null | null | null | slt/chmm/train.py | paper-submit-account/Sparse-CHMM | 8a33dfe375a012cc0cc3324907135b74606a7b5d | [
"Apache-2.0"
] | null | null | null | slt/chmm/train.py | paper-submit-account/Sparse-CHMM | 8a33dfe375a012cc0cc3324907135b74606a7b5d | [
"Apache-2.0"
] | null | null | null | import os
import logging
import numpy as np
from typing import Optional
import torch
from torch.utils.data import DataLoader
from ..eval import Metric
from .dataset import CHMMBaseDataset
from .dataset import collate_fn as default_collate_fn
logger = logging.getLogger(__name__)
OUT_RECALL = 0.9
OUT_PRECISION = 0.8
def initialize_matrices(self):
"""
Initialize <HMM> transition and emission matrices
Returns
-------
self
"""
assert self._training_dataset and self._valid_dataset
# inject prior knowledge about transition and emission
self._init_state_prior = torch.zeros(self._config.d_hidden, device=self._config.device) + 1e-2
self._init_state_prior[0] += 1 - self._init_state_prior.sum()
intg_obs = list(map(np.array, self._training_dataset.obs + self._valid_dataset.obs))
# construct/load initial transition matrix
dataset_dir = os.path.split(self._config.train_path)[0]
transmat_path = os.path.join(dataset_dir, "init_transmat.pt")
if getattr(self._config, "load_init_mat", False):
if os.path.isfile(transmat_path):
logger.info("Loading initial transition matrix from disk")
self._init_trans_mat = torch.load(transmat_path)
# if the loaded transmat does not have the proper shape, re-calculate it.
s0_transmat, s1_transmat = self._init_trans_mat.shape
if not (s0_transmat == s1_transmat == self.config.d_obs):
self._init_trans_mat = None
if self._init_trans_mat is None:
self._init_trans_mat = torch.tensor(initialise_transmat(
observations=intg_obs, label_set=self._config.bio_label_types
)[0], dtype=torch.float)
if getattr(self._config, "save_init_mat", False):
logger.info("Saving initial transition matrix")
torch.save(self._init_trans_mat, transmat_path)
# construct/load initial emission matrix
emissmat_path = os.path.join(dataset_dir, "init_emissmat.pt")
if getattr(self._config, "load_init_mat", False):
if os.path.isfile(emissmat_path):
logger.info("Loading initial emission matrix from disk")
self._init_emiss_mat = torch.load(emissmat_path)
# if the loaded emissmat does not have the proper shape, re-calculate it.
s0_emissmat, s1_emissmat, s2_emissmat = self._init_emiss_mat.shape
if not (s0_emissmat == self.config.n_src) and (s1_emissmat == s2_emissmat == self.config.d_obs):
self._init_emiss_mat = None
if self._init_emiss_mat is None:
self._init_emiss_mat = torch.tensor(initialise_emissions(
observations=intg_obs, label_set=self._config.bio_label_types,
sources=self._config.sources, src_priors=self._config.src_priors
)[0], dtype=torch.float)
if getattr(self._config, "save_init_mat", False):
logger.info("Saving initial emission matrix")
torch.save(self._init_emiss_mat, emissmat_path)
return self
def save(self,
output_dir: Optional[str] = None,
save_optimizer: Optional[bool] = False,
model_name: Optional[str] = 'chmm',
optimizer_name: Optional[str] = 'chmm-optimizer',
pretrain_optimizer_name: Optional[str] = 'chmm-pretrain-optimizer'):
"""
Save model parameters as well as trainer parameters
Parameters
----------
output_dir: model directory
save_optimizer: whether to save optimizer
model_name: model name (suffix free)
optimizer_name: optimizer name (suffix free)
pretrain_optimizer_name: pretrain optimizer name (suffix free)
Returns
-------
None
"""
output_dir = output_dir if output_dir is not None else self._config.output_dir
logger.info(f"Saving model to {output_dir}")
model_state_dict = self._model.state_dict()
torch.save(model_state_dict, os.path.join(output_dir, f'{model_name}.bin'))
self._config.save(output_dir)
if save_optimizer:
logger.info("Saving optimizer and scheduler")
torch.save(self._optimizer.state_dict(),
os.path.join(output_dir, f"{optimizer_name}.bin"))
torch.save(self._pretrain_optimizer.state_dict(),
os.path.join(output_dir, f"{pretrain_optimizer_name}.bin"))
return None
def load(self,
input_dir: Optional[str] = None,
load_optimizer: Optional[bool] = False,
model_name: Optional[str] = 'chmm',
optimizer_name: Optional[str] = 'chmm-optimizer',
pretrain_optimizer_name: Optional[str] = 'chmm-pretrain-optimizer'):
"""
Load model parameters.
Parameters
----------
input_dir: model directory
load_optimizer: whether load other trainer parameters
model_name: model name (suffix free)
optimizer_name: optimizer name (suffix free)
pretrain_optimizer_name: pretrain optimizer name (suffix free)
Returns
-------
self
"""
input_dir = input_dir if input_dir is not None else self._config.output_dir
if self._model is not None:
logger.warning(f"The original model {type(self._model)} in {type(self)} is not None. "
f"It will be overwritten by the loaded model!")
logger.info(f"Loading model from {input_dir}")
self.initialize_model()
self._model.load_state_dict(torch.load(os.path.join(input_dir, f'{model_name}.bin')))
self._model.to(self.config.device)
if load_optimizer:
logger.info("Loading optimizer and scheduler")
if self._optimizer is None:
self.initialize_optimizers()
if os.path.isfile(os.path.join(input_dir, f"{optimizer_name}.bin")):
self._optimizer.load_state_dict(
torch.load(os.path.join(input_dir, f"{optimizer_name}.bin"), map_location=self.config.device)
)
else:
logger.warning("Optimizer file does not exist!")
if os.path.isfile(os.path.join(input_dir, f"{pretrain_optimizer_name}.bin")):
self._pretrain_optimizer.load_state_dict(
torch.load(os.path.join(input_dir, f"{pretrain_optimizer_name}.bin"))
)
else:
logger.warning("Pretrain optimizer file does not exist!")
return self
def save_results(self,
output_dir: str,
valid_results: Optional[Metric] = None,
file_name: Optional[str] = 'results',
disable_final_valid: Optional[bool] = False,
disable_test: Optional[bool] = False,
disable_inter_results: Optional[bool] = False) -> None:
"""
Save training (validation) results
Parameters
----------
output_dir: output directory, should be a folder
valid_results: validation results during the training process
file_name: file name
disable_final_valid: disable final validation process (getting validation results of the trained model)
disable_test: disable test process
disable_inter_results: do not save inter-results
Returns
-------
None
"""
if not disable_final_valid:
logger.info("Getting final validation metrics")
valid_metrics = self.valid()
else:
valid_metrics = None
if not disable_test:
logger.info("Getting test metrics.")
test_metrics = self.test()
else:
test_metrics = None
# write validation and test results
result_file = os.path.join(output_dir, f'{file_name}.txt')
logger.info(f"Writing results to {result_file}")
self.write_result(file_path=result_file,
valid_results=valid_results,
final_valid_metrics=valid_metrics,
test_metrics=test_metrics)
if not disable_inter_results:
# save validation inter results
logger.info(f"Saving inter results")
inter_result_file = os.path.join(output_dir, f'{file_name}-inter.pt')
torch.save(valid_results.__dict__, inter_result_file)
return None
def initialise_startprob(observations,
label_set,
src_idx=None):
"""
calculate initial hidden states (not used in our setup since our sequences all begin from
[CLS], which corresponds to hidden state "O".
:param src_idx: source index
:param label_set: a set of all possible label_set
:param observations: n_instances X seq_len X n_src X d_obs
:return: probabilities for the initial hidden states
"""
n_src = observations[0].shape[1]
logger.info("Constructing start distribution prior...")
init_counts = np.zeros((len(label_set),))
if src_idx is not None:
for obs in observations:
init_counts[obs[0, src_idx].argmax()] += 1
else:
for obs in observations:
for z in range(n_src):
init_counts[obs[0, z].argmax()] += 1
for i, label in enumerate(label_set):
if i == 0 or label.startswith("B-"):
init_counts[i] += 1
startprob_prior = init_counts + 1
startprob_ = np.random.dirichlet(init_counts + 1E-10)
return startprob_, startprob_prior
# TODO: try to use a more reliable source to start the transition and emission
def initialise_transmat(observations,
label_set,
src_idx=None):
"""
initialize transition matrix
:param src_idx: the index of the source of which the transition statistics is computed.
If None, use all sources
:param label_set: a set of all possible label_set
:param observations: n_instances X seq_len X n_src X d_obs
:return: initial transition matrix and transition counts
"""
logger.info("Constructing transition matrix prior...")
n_src = observations[0].shape[1]
trans_counts = np.zeros((len(label_set), len(label_set)))
if src_idx is not None:
for obs in observations:
for k in range(0, len(obs) - 1):
trans_counts[obs[k, src_idx].argmax(), obs[k + 1, src_idx].argmax()] += 1
else:
for obs in observations:
for k in range(0, len(obs) - 1):
for z in range(n_src):
trans_counts[obs[k, z].argmax(), obs[k + 1, z].argmax()] += 1
# update transition matrix with prior knowledge
for i, label in enumerate(label_set):
if label.startswith("B-") or label.startswith("I-"):
trans_counts[i, label_set.index("I-" + label[2:])] += 1
elif i == 0 or label.startswith("I-"):
for j, label2 in enumerate(label_set):
if j == 0 or label2.startswith("B-"):
trans_counts[i, j] += 1
transmat_prior = trans_counts + 1
# initialize transition matrix with dirichlet distribution
transmat_ = np.vstack([np.random.dirichlet(trans_counts2 + 1E-10)
for trans_counts2 in trans_counts])
return transmat_, transmat_prior
def initialise_emissions(observations,
label_set,
sources,
src_priors,
strength=1000):
"""
initialize emission matrices
:param sources: source names
:param src_priors: source priors
:param label_set: a set of all possible label_set
:param observations: n_instances X seq_len X n_src X d_obs
:param strength: Don't know what this is for
:return: initial emission matrices and emission counts?
"""
logger.info("Constructing emission probabilities...")
obs_counts = np.zeros((len(sources), len(label_set)), dtype=np.float64)
# extract the total number of observations for each prior
for obs in observations:
obs_counts += obs.sum(axis=0)
for source_index, source in enumerate(sources):
# increase p(O)
obs_counts[source_index, 0] += 1
# increase the "reasonable" observations
for pos_index, pos_label in enumerate(label_set[1:]):
if pos_label[2:] in src_priors[source]:
obs_counts[source_index, pos_index] += 1
# construct probability distribution from counts
obs_probs = obs_counts / (obs_counts.sum(axis=1, keepdims=True) + 1E-3)
# initialize emission matrix
matrix = np.zeros((len(sources), len(label_set), len(label_set)))
for source_index, source in enumerate(sources):
for pos_index, pos_label in enumerate(label_set):
# Simple case: set P(O=x|Y=x) to be the recall
recall = 0
if pos_index == 0:
recall = OUT_RECALL
elif pos_label[2:] in src_priors[source]:
_, recall = src_priors[source][pos_label[2:]]
matrix[source_index, pos_index, pos_index] = recall
for pos_index2, pos_label2 in enumerate(label_set):
if pos_index2 == pos_index:
continue
elif pos_index2 == 0:
precision = OUT_PRECISION
elif pos_label2[2:] in src_priors[source]:
precision, _ = src_priors[source][pos_label2[2:]]
else:
precision = 1.0
# Otherwise, we set the probability to be inversely proportional to the precision
# and the (unconditional) probability of the observation
error_prob = (1 - recall) * (1 - precision) * (0.001 + obs_probs[source_index, pos_index2])
# We increase the probability for boundary errors (i.e. I-ORG -> B-ORG)
if pos_index > 0 and pos_index2 > 0 and pos_label[2:] == pos_label2[2:]:
error_prob *= 5
# We increase the probability for errors with same boundary (i.e. I-ORG -> I-GPE)
if pos_index > 0 and pos_index2 > 0 and pos_label[0] == pos_label2[0]:
error_prob *= 2
matrix[source_index, pos_index, pos_index2] = error_prob
error_indices = [i for i in range(len(label_set)) if i != pos_index]
error_sum = matrix[source_index, pos_index, error_indices].sum()
matrix[source_index, pos_index, error_indices] /= (error_sum / (1 - recall) + 1E-5)
emission_priors = matrix * strength
emission_probs = matrix
return emission_probs, emission_priors
| 38.452611 | 118 | 0.604326 |
c42c480ac786f98d925a893f66e8658af5b8de1c | 6,881 | py | Python | flask_obfuscateids/lib.py | mlenzen/flask-obfuscateids | 22319633b2685f2969bd67eae3fd09d2db6567f1 | [
"BSD-3-Clause"
] | null | null | null | flask_obfuscateids/lib.py | mlenzen/flask-obfuscateids | 22319633b2685f2969bd67eae3fd09d2db6567f1 | [
"BSD-3-Clause"
] | 1 | 2015-01-26T06:23:12.000Z | 2015-01-26T06:23:12.000Z | flask_obfuscateids/lib.py | mlenzen/flask-obfuscateids | 22319633b2685f2969bd67eae3fd09d2db6567f1 | [
"BSD-3-Clause"
] | null | null | null |
from random import Random
from collections_extended import setlist
# The version of seeding to use for random
SEED_VERSION = 2
# Common alphabets to use
ALPHANUM = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
BASE58 = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
def encode_base_n(num, base, min_length=0):
'''Convert an integer into a list of integers storing the number in base base.
If a minimum length is specified, the result will be 0-padded.
'''
out = []
while num > 0 or len(out) < min_length:
num, remainder = divmod(num, base)
out.append(remainder)
return out
def decode_base_n(int_list, base):
'''Convert a list of numbers representing a number in base base to an integer.'''
out = 0
for index, num in enumerate(int_list):
if num >= base or num < 0:
raise ValueError
out += (base ** index) * num
return out
def add_check_digits(int_list, base, num_check_chars):
'''Calculate a checksum for int_list and translate into a number of base base
made up of num_check_chars digits.
Args:
int_list: A list of integers >= 0 and < base
base: The number of characters in the alphabet
num_check_chars: The number of check characters to return
Returns:
A list of integers that represent the checksum in base base.
'''
check_digits = calc_check_digits(int_list, base, num_check_chars)
return int_list + check_digits
def eval_check_digits(decrypted_ints, base, num_check_chars):
'''Evaluate the check digits in decrypted_ints.
Args:
decrypted_ints: A list of integers >=0 and < base (the result of add_check_digits)
Returns:
The decrypted_ints without the check digits
Raises:
ValueError: if the check digits don't match
'''
if num_check_chars == 0:
return decrypted_ints
int_list = decrypted_ints[:-num_check_chars]
check_digits = decrypted_ints[-num_check_chars:]
if calc_check_digits(int_list, base, num_check_chars) != check_digits:
raise ValueError()
return int_list
def encode(int_list, alphabet):
'''Encode ints using alphabet.'''
char_list = []
for i in int_list:
if i > len(alphabet) or i < 0:
raise ValueError
char_list.append(alphabet[i])
return ''.join(char_list)
def decode(s, alphabet):
'''Decode a string s using alphabet returning a list of ints.'''
try:
return [alphabet.index(c) for c in s]
except (TypeError, IndexError):
raise ValueError
def obfuscate(num, key, alphabet, min_chars=0, num_check_chars=1):
''' Obfuscate num using key.
This does some minor encryption by adding values to a key and a moving value.
The moving value is so that one small change makes all of the resulting
characters change.
Args:
num: The integer to obfuscate
key: An int, string or bytes to generate key values (anything that can be passed to random.seed)
alphabet: A list of characters to use for the alphabet
min_chars: A minimum number of chars for the resulting string
num_check_chars: The number of chars to use as a check
Returns:
A string encoding the number in the passed alphabet and encrypted with key.
Raises:
ValueError: if num is not a number or < 0
'''
try:
if num < 0:
raise ValueError()
except TypeError:
raise ValueError()
base = len(alphabet)
num_as_ints = encode_base_n(num, base, min_chars)
unencrypted_digits = add_check_digits(num_as_ints, base, num_check_chars)
encrypted_digits = encrypt(unencrypted_digits, key, base)
return encode(encrypted_digits, alphabet)
def deobfuscate(s, key, alphabet, num_check_chars=1):
'''Deobfuscate a string using key and alphabet.
key, alphabet and num_check_chars must be identical to the values used to obfuscate.
Args:
s: The string to deobfuscate
key: The key used to obfuscate
alphabet: The alphabet used to obfuscate
num_check_chars: The number of chars to use as a check
Returns:
The deobfuscated integer.
Raises:
ValueError: if s isn't a string, s doesn't use alphabet or the checksum doesn't match
'''
base = len(alphabet)
encrypted_ints = decode(s, alphabet)
decrypted_ints = decrypt(encrypted_ints, key, base)
num_as_ints = eval_check_digits(decrypted_ints, base, num_check_chars)
return decode_base_n(num_as_ints, base)
| 30.312775 | 98 | 0.748874 |
c42c74470081e712e5a554684e5bb789162adcd2 | 377 | py | Python | lib/response.py | dpla/akara | 432f14782152dd19931bdbd8f9fad19b5932426d | [
"Apache-2.0"
] | 5 | 2015-01-30T03:50:37.000Z | 2015-09-23T00:46:11.000Z | lib/response.py | dpla/akara | 432f14782152dd19931bdbd8f9fad19b5932426d | [
"Apache-2.0"
] | null | null | null | lib/response.py | dpla/akara | 432f14782152dd19931bdbd8f9fad19b5932426d | [
"Apache-2.0"
] | 3 | 2015-03-09T19:16:56.000Z | 2019-09-19T02:41:29.000Z | """Information for the outgoing response
code - the HTTP response code (default is "200 Ok")
headers - a list of key/value pairs used for the WSGI start_response
"""
code = None
headers = []
def add_header(key, value):
"""Helper function to append (key, value) to the list of response headers"""
headers.append( (key, value) )
# Eventually add cookie support?
| 23.5625 | 80 | 0.700265 |
c42d5c2686fc626989593bdff74f807903b98683 | 1,594 | py | Python | parte 3/desafio93.py | BrunoSoares-DEV/Exercicios-python | fcfd0a7b3e2c6af2b7dd8e5a15ca6585c97f7c67 | [
"MIT"
] | 2 | 2021-02-24T20:05:24.000Z | 2021-02-24T20:05:41.000Z | parte 3/desafio93.py | BrunoSoares-DEV/Exercicios-python | fcfd0a7b3e2c6af2b7dd8e5a15ca6585c97f7c67 | [
"MIT"
] | null | null | null | parte 3/desafio93.py | BrunoSoares-DEV/Exercicios-python | fcfd0a7b3e2c6af2b7dd8e5a15ca6585c97f7c67 | [
"MIT"
] | null | null | null | jog = {}
#pegando dados
jog['Nome do jogador'] = str(input('Digite o nome do jogador: ')).strip().title()
jog['Total partidas'] = int(input('Quantas partidas jogou: '))
#lista de gol
gols = []
#Quantos gols em cada partida
for i in range(0, jog['Total partidas']):
gols.append(int(input(f'Quantos gols na partida {i}: ')))
#total de gol
totGols = 0
for g in gols:
totGols += g
#print(totGols)
#adicionando dicionario
jog['Total gols'] = totGols
jog['Gols em partidas'] = gols
#print(jog)
#Mostrando resultados
print(f'O jogador: {jog["Nome do jogador"]}, jogou {jog["Total partidas"]} partidas e '
f'marcou ao todo no campeonato {jog["Total gols"]} gols')
print('Partidas:')
for pos, v in enumerate(gols):
print(f'Partida {pos}: {v} gols')
'''
Esse programa vai analisar informaes de um jogador
Primeiro criamos um dicionrio vazio, jog, e pedimos interaes ao usurio como nome e total de partidas
criado uma lista vazia chamada gols, e assim entra no loop for para saber quantos gols em cada partida, usando o limite de 0 e o valor de total de partidas
Para cada loop a lista gols da append() no valor
Assim criado uma variavel de controle totGols zerada, e dentro do loop for, onde g iria rodar sobre gols
Onde totGols iria incrimentar g, somando todos os gols
Em seguida adicionamos ao dicionrio, com o indice total de gols e gols em partidas, pelo totGols e gols respectivamente
No print ser mostrado os resultados, e por fim um loop com pos e v rodando sobre o enumarete() de gols para mostrar cada gols nas partidas
'''
| 37.952381 | 161 | 0.717064 |
c42d617d9e6dd57810d5d84da656ddd4e8d82bf1 | 5,891 | py | Python | b2sdk/v1/account_info.py | ehossack/b2-sdk-python | 034bec38671c0862b6956915993061359dbd51f6 | [
"MIT"
] | null | null | null | b2sdk/v1/account_info.py | ehossack/b2-sdk-python | 034bec38671c0862b6956915993061359dbd51f6 | [
"MIT"
] | null | null | null | b2sdk/v1/account_info.py | ehossack/b2-sdk-python | 034bec38671c0862b6956915993061359dbd51f6 | [
"MIT"
] | null | null | null | ######################################################################
#
# File: b2sdk/v1/account_info.py
#
# Copyright 2021 Backblaze Inc. All Rights Reserved.
#
# License https://www.backblaze.com/using_b2_code.html
#
######################################################################
from abc import abstractmethod
import inspect
import logging
import os
from typing import Optional
from b2sdk import _v2 as v2
from b2sdk.account_info.sqlite_account_info import DEFAULT_ABSOLUTE_MINIMUM_PART_SIZE
from b2sdk.utils import limit_trace_arguments
logger = logging.getLogger(__name__)
# Retain legacy get_minimum_part_size and facilitate for optional s3_api_url
# translate legacy "minimum_part_size" to new style "recommended_part_size"
| 30.523316 | 186 | 0.636904 |
c42ddcb403bc1b33c57898bd141f1f505a69b04f | 9,539 | py | Python | src/pyrin/security/hashing/handlers/pbkdf2.py | wilsonGmn/pyrin | 25dbe3ce17e80a43eee7cfc7140b4c268a6948e0 | [
"BSD-3-Clause"
] | null | null | null | src/pyrin/security/hashing/handlers/pbkdf2.py | wilsonGmn/pyrin | 25dbe3ce17e80a43eee7cfc7140b4c268a6948e0 | [
"BSD-3-Clause"
] | null | null | null | src/pyrin/security/hashing/handlers/pbkdf2.py | wilsonGmn/pyrin | 25dbe3ce17e80a43eee7cfc7140b4c268a6948e0 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
pbkdf2 hashing handler module.
"""
import hashlib
import re
import pyrin.configuration.services as config_services
import pyrin.security.utils.services as security_utils_services
from pyrin.security.hashing.decorators import hashing
from pyrin.security.hashing.handlers.base import HashingBase
from pyrin.security.hashing.handlers.exceptions import InvalidHashingRoundsCountError, \
InvalidPBKDF2InternalAlgorithmError, InvalidHashingSaltLengthError
| 39.094262 | 93 | 0.588636 |
c42e18634a20b6733cded46ea5994450f7ae4da0 | 8,652 | py | Python | src/steps/prepare_ner_data.py | allanwright/media-classifier | a0da0799cc0bd6ef7360012c362f9fab273286c6 | [
"MIT"
] | 2 | 2019-08-16T00:49:27.000Z | 2021-08-15T16:37:45.000Z | src/steps/prepare_ner_data.py | allanwright/media-classifier | a0da0799cc0bd6ef7360012c362f9fab273286c6 | [
"MIT"
] | 1 | 2020-02-19T10:17:56.000Z | 2020-07-26T09:42:49.000Z | src/steps/prepare_ner_data.py | allanwright/media-classifier | a0da0799cc0bd6ef7360012c362f9fab273286c6 | [
"MIT"
] | 1 | 2019-06-27T10:57:07.000Z | 2019-06-27T10:57:07.000Z | '''Defines a pipeline step which prepares training and test data for
named entity recognition.
'''
import ast
import json
import pickle
from mccore import EntityRecognizer
from mccore import ner
from mccore import persistence
import pandas as pd
from sklearn.utils import resample
from src.step import Step
| 36.05 | 98 | 0.496417 |
c42e658ca9b791acfc8cc494fe87a5ee5b2f994f | 1,006 | py | Python | jubakit/test/__init__.py | vishalbelsare/jubakit | f6252ba627ce4e2e42eb9aafaaf05c882bc1c678 | [
"MIT"
] | 12 | 2016-04-11T04:49:08.000Z | 2019-02-08T01:43:46.000Z | jubakit/test/__init__.py | vishalbelsare/jubakit | f6252ba627ce4e2e42eb9aafaaf05c882bc1c678 | [
"MIT"
] | 138 | 2016-04-11T05:57:48.000Z | 2020-09-26T03:09:31.000Z | jubakit/test/__init__.py | vishalbelsare/jubakit | f6252ba627ce4e2e42eb9aafaaf05c882bc1c678 | [
"MIT"
] | 10 | 2016-04-11T03:18:45.000Z | 2018-04-14T10:11:15.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
__all__ = ['requireSklearn']
from jubakit.compat import PYTHON3
try:
import embedded_jubatus
embedded_available = True
except ImportError:
embedded_available = False
try:
import numpy
import scipy
import sklearn
sklearn_available = True
except ImportError:
sklearn_available = False
try:
from unittest import skipUnless
except ImportError:
| 26.473684 | 82 | 0.777336 |
c42e88219fc65a0c84a4b46fac98f1c167ea84ef | 9,859 | py | Python | YoLo2Net.py | zhouyc2002/yolo2-cntk | 549cb46365d1750031eee90044b6262f9b94ff49 | [
"Apache-2.0"
] | 3 | 2017-07-27T00:05:39.000Z | 2021-02-25T08:56:10.000Z | YoLo2Net.py | zhouyc2002/yolo2-cntk | 549cb46365d1750031eee90044b6262f9b94ff49 | [
"Apache-2.0"
] | 1 | 2019-08-05T12:55:06.000Z | 2019-08-06T00:43:58.000Z | YoLo2Net.py | zhouyc2002/yolo2-cntk | 549cb46365d1750031eee90044b6262f9b94ff49 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 28 13:03:05 2017
@author: ZHOU Yuncheng
"""
import cntk as C
import _cntk_py
import cntk.layers
import cntk.initializer
import cntk.losses
import cntk.metrics
import cntk.logging
import cntk.io.transforms as xforms
import cntk.io
import cntk.train
import os
import numpy as np
import yolo2
import CloneModel
# default Paths relative to current python file.
abs_path = os.path.dirname(os.path.abspath(__file__))
model_path = os.path.join(abs_path, "Models")
# model dimensions
image_height = 416
image_width = 416
num_channels = 3 # RGB
num_truth_boxes = 14
box_dim = 5 # centerX, centerY, Width, Height, class_type
num_classes = 3 # object type count. i.e. tomato, flower, stem, et, al.
num_anchors = 5
model_name = "Yolo2Net.model"
# Create a minibatch source.
# Create the network.
# Create trainer
# Train and test
# Train and evaluate the network.
#
# get train sample size evaluate sample size
#
if __name__=='__main__':
anchor_data = 'anchor.txt'
if not os.path.exists(anchor_data):
raise RuntimeError("File '%s' does not exist." %anchor_data)
anchors = open_anchor_file(anchor_data)
if anchors.shape[0] < num_anchors:
raise RuntimeError("Anchor dimension is less than %s" %num_anchors)
# network = create_yolo2net(anchors)
# cntk.logging.graph.plot(network['output'], 'yolo2.png')
train_data = 'train.txt'
train_rois = 'train.rois.txt'
test_data = 'train.txt'
test_rois = 'train.rois.txt'
sample_size = get_sample_counts(train_data, test_data)
net_train_and_eval(train_data, train_rois, test_data, test_rois,
priors = anchors,
epoch_size=sample_size[0],
block_size = None,
minibatch_size = 32,
max_epochs = 130,
log_to_file = 'Yolo2Net.log')
# Must call MPI finalize when process exit without exceptions
cntk.train.distributed.Communicator.finalize()
| 39.436 | 171 | 0.647733 |
c42f5f63c17465e80eb4449fa0bfca6ef5e47655 | 948 | py | Python | hqq_tool/rongcloud/demo.py | yaoruda/DRFLearning | 6b17ef0d557142e8563d80788351f8b7ab94f248 | [
"MIT"
] | 1 | 2018-09-21T09:42:02.000Z | 2018-09-21T09:42:02.000Z | hqq_tool/rongcloud/demo.py | yaoruda/DRFLearning | 6b17ef0d557142e8563d80788351f8b7ab94f248 | [
"MIT"
] | null | null | null | hqq_tool/rongcloud/demo.py | yaoruda/DRFLearning | 6b17ef0d557142e8563d80788351f8b7ab94f248 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# __author__= "Ruda"
# Date: 2018/10/16
'''
import os
from rongcloud import RongCloud
app_key = os.environ['APP_KEY']
app_secret = os.environ['APP_SECRET']
rcloud = RongCloud(app_key, app_secret)
r = rcloud.User.getToken(userId='userid1', name='username', portraitUri='http://www.rongcloud.cn/images/logo.png')
print(r)
{'token': 'P9YNVZ2cMQwwaADiNDVrtRZKF+J2pVPOWSNlYMA1yA1g49pxjZs58n4FEufsH9XMCHTk6nHR6unQTuRgD8ZS/nlbkcv6ll4x', 'userId': 'userid1', 'code': 200}
r = rcloud.Message.publishPrivate(
fromUserId='userId1',
toUserId={"userId2","userid3","userId4"},
objectName='RC:VcMsg',
content='{"content":"hello","extra":"helloExtra","duration":20}',
pushContent='thisisapush',
pushData='{"pushData":"hello"}',
count='4',
verifyBlacklist='0',
isPersisted='0',
isCounted='0')
print(r)
{'code': 200}
'''
'''
More:
https://github.com/rongcloud/server-sdk-python
''' | 27.085714 | 143 | 0.679325 |
c4311123dd5258af551865a612948896d2a1bbc9 | 2,132 | py | Python | registration/email.py | openstack-kr/openinfradays-2018 | 9eb0e284ab95e177dc4acca17d63ccbdaff67fb1 | [
"Apache-2.0"
] | null | null | null | registration/email.py | openstack-kr/openinfradays-2018 | 9eb0e284ab95e177dc4acca17d63ccbdaff67fb1 | [
"Apache-2.0"
] | 1 | 2018-06-17T02:21:41.000Z | 2018-06-17T02:21:41.000Z | registration/email.py | openstack-kr/openinfradays-2018 | 9eb0e284ab95e177dc4acca17d63ccbdaff67fb1 | [
"Apache-2.0"
] | 1 | 2018-05-31T11:39:02.000Z | 2018-05-31T11:39:02.000Z | from django.core.mail import EmailMessage
from django.conf import settings
| 38.071429 | 147 | 0.60272 |
c431a581714f033cba2ab3f52062e3fdddf8f0b8 | 5,767 | py | Python | train_ema.py | qym7/WTALFakeLabels | 139738025ab69f287c4fe3c97389a637f1a0b376 | [
"MIT"
] | 3 | 2021-12-24T09:27:42.000Z | 2022-01-03T10:59:47.000Z | train_ema.py | qym7/WTALFakeLabels | 139738025ab69f287c4fe3c97389a637f1a0b376 | [
"MIT"
] | 1 | 2021-12-26T02:40:40.000Z | 2021-12-26T02:50:26.000Z | train_ema.py | qym7/WTALFakeLabels | 139738025ab69f287c4fe3c97389a637f1a0b376 | [
"MIT"
] | null | null | null | '''
Author: your name
Date: 2021-12-25 17:33:51
LastEditTime: 2021-12-29 10:10:14
LastEditors: Please set LastEditors
Description: koroFileHeader : https://github.com/OBKoro1/koro1FileHeader/wiki/%E9%85%8D%E7%BD%AE
FilePath: /yimingqin/code/WTAL-Uncertainty-Modeling/train.py
'''
import torch
import torch.nn as nn
import numpy as np
from collections import OrderedDict
import utils
| 38.704698 | 106 | 0.602393 |
c43395c47fe6f6295740535434326b1a38c6e0c8 | 3,597 | py | Python | scan/fetchers/cli/cli_fetch_oteps_lxb.py | korenlev/calipso-cvim | 39278a5cf09c40b26a8a143ccc0c8d437961abc2 | [
"Apache-2.0"
] | null | null | null | scan/fetchers/cli/cli_fetch_oteps_lxb.py | korenlev/calipso-cvim | 39278a5cf09c40b26a8a143ccc0c8d437961abc2 | [
"Apache-2.0"
] | null | null | null | scan/fetchers/cli/cli_fetch_oteps_lxb.py | korenlev/calipso-cvim | 39278a5cf09c40b26a8a143ccc0c8d437961abc2 | [
"Apache-2.0"
] | null | null | null | ###############################################################################
# Copyright (c) 2017-2020 Koren Lev (Cisco Systems), #
# Yaron Yogev (Cisco Systems), Ilia Abashin (Cisco Systems) and others #
# #
# All rights reserved. This program and the accompanying materials #
# are made available under the terms of the Apache License, Version 2.0 #
# which accompanies this distribution, and is available at #
# http://www.apache.org/licenses/LICENSE-2.0 #
###############################################################################
from scan.fetchers.cli.cli_fetcher import CliFetcher
from scan.fetchers.db.db_access import DbAccess
| 43.865854 | 79 | 0.512093 |
c433cd175dc051909207a6a2031e2dac3b9eff92 | 612 | py | Python | appengine_config.py | ioriwitte/datavocab | 5f99c679a23a164ab93ac1bcaf9a30a01728ee37 | [
"Apache-2.0"
] | 13 | 2019-12-03T15:25:55.000Z | 2021-10-16T00:18:47.000Z | appengine_config.py | jesman/schemaorg | 6649c41e56a9724eaeed25dedf67736258f922bf | [
"Apache-2.0"
] | 11 | 2019-10-16T12:34:11.000Z | 2021-02-04T11:23:03.000Z | appengine_config.py | jesman/schemaorg | 6649c41e56a9724eaeed25dedf67736258f922bf | [
"Apache-2.0"
] | 9 | 2017-12-13T08:07:48.000Z | 2019-06-18T14:30:12.000Z | """`appengine_config` gets loaded when starting a new application instance."""
import vendor
# insert `lib` as a site directory so our `main` module can load
# third-party libraries, and override built-ins with newer
# versions.
vendor.add('lib')
import os
# Called only if the current namespace is not set.
| 38.25 | 78 | 0.756536 |
c433ed35cefab756913c6887caed7bdb03a9f9e5 | 270 | py | Python | 10_KNN_3D/main.py | ManMohan291/PyProgram | edcaa927bd70676bd14355acad7262ae2d32b8e5 | [
"MIT"
] | 2 | 2018-09-07T17:44:54.000Z | 2018-09-07T17:44:57.000Z | 10_KNN_3D/main.py | ManMohan291/PyProgram | edcaa927bd70676bd14355acad7262ae2d32b8e5 | [
"MIT"
] | null | null | null | 10_KNN_3D/main.py | ManMohan291/PyProgram | edcaa927bd70676bd14355acad7262ae2d32b8e5 | [
"MIT"
] | null | null | null |
import KNN as K
K.clearScreen()
dataTraining= K.loadData("dataTraining.txt")
X=dataTraining[:,0:3]
initial_centroids=K.listToArray([[3, 3,3],[6, 2,4],[8,5,7]])
idx=K.KMean_Run(X,initial_centroids,5)
K.SaveData(K.concatenateVectors(X,idx))
K.plotKNN2(X,idx)
| 12.272727 | 60 | 0.703704 |
c434ee7e49ec7f84e8ed989b7259f62a6d292fde | 3,793 | py | Python | hummingbird/graphics/state_plotbox.py | don4get/hummingbird | ec9da37b74f17702201f475d79b842f41694c095 | [
"MIT"
] | null | null | null | hummingbird/graphics/state_plotbox.py | don4get/hummingbird | ec9da37b74f17702201f475d79b842f41694c095 | [
"MIT"
] | null | null | null | hummingbird/graphics/state_plotbox.py | don4get/hummingbird | ec9da37b74f17702201f475d79b842f41694c095 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import pyqtgraph as pg
from pyqtgraph import ViewBox
from hummingbird.graphics.plotter_args import PlotBoxArgs
from hummingbird.graphics.state_plot import StatePlot
| 35.12037 | 108 | 0.627472 |
c4355d1898179dbc210d3d0618bca78d79edd5b7 | 348 | py | Python | quizapp/jsonify_quiz_output.py | malgulam/100ProjectsOfCode | 95026b15d858a6e97dfd847c5ec576bbc260ff61 | [
"MIT"
] | 8 | 2020-12-13T16:15:34.000Z | 2021-11-13T22:45:28.000Z | quizapp/jsonify_quiz_output.py | malgulam/100ProjectsOfCode | 95026b15d858a6e97dfd847c5ec576bbc260ff61 | [
"MIT"
] | 1 | 2021-06-02T03:42:39.000Z | 2021-06-02T03:42:39.000Z | quizapp/jsonify_quiz_output.py | malgulam/100ProjectsOfCode | 95026b15d858a6e97dfd847c5ec576bbc260ff61 | [
"MIT"
] | 1 | 2020-12-14T20:01:14.000Z | 2020-12-14T20:01:14.000Z | import json
#start
print('start')
with open('quizoutput.txt') as f:
lines = f.readlines()
print('loaded quiz data')
print('changing to json')
json_output = json.loads(lines[0])
print(json_output)
with open('quizoutput.txt', 'w') as f:
f.write(json_output)
# for item in json_output:
# print(item['question'])
# print('done')
| 19.333333 | 38 | 0.666667 |
c43704aafbacbc4c468d75623400e2f129cb8ef2 | 6,544 | py | Python | panku/lambdaCollect.py | mccartney/panku-gdzie-jestes | 50a677170162c5255a24eacdbf8062ad876bee3f | [
"MIT"
] | null | null | null | panku/lambdaCollect.py | mccartney/panku-gdzie-jestes | 50a677170162c5255a24eacdbf8062ad876bee3f | [
"MIT"
] | null | null | null | panku/lambdaCollect.py | mccartney/panku-gdzie-jestes | 50a677170162c5255a24eacdbf8062ad876bee3f | [
"MIT"
] | null | null | null | #!/usr/bin/python
import requests
import boto3
import time
import geopy.distance
import xml.etree.ElementTree as ET
import itertools
import sys
import pickle
S3_BUCKET = "panku-gdzie-jestes-latest-storage"
def lambda_handler(event, context):
services = [Traficar, Veturilo, Panek]
for service in services:
print("==== Service %s" % service)
service().getAndSaveLocations()
return "OK"
| 38.046512 | 137 | 0.636461 |
c4372286ca07457197e0279205b6dabde1342c8d | 1,412 | py | Python | data/migrations/0039_2_data_update_questionnaires_vmsettings.py | Duke-GCB/bespin-api | cea5c20fb2ff592adabe6ebb7ca934939aa11a34 | [
"MIT"
] | null | null | null | data/migrations/0039_2_data_update_questionnaires_vmsettings.py | Duke-GCB/bespin-api | cea5c20fb2ff592adabe6ebb7ca934939aa11a34 | [
"MIT"
] | 137 | 2016-12-09T18:59:45.000Z | 2021-06-10T18:55:47.000Z | data/migrations/0039_2_data_update_questionnaires_vmsettings.py | Duke-GCB/bespin-api | cea5c20fb2ff592adabe6ebb7ca934939aa11a34 | [
"MIT"
] | 3 | 2017-11-14T16:05:58.000Z | 2018-12-28T18:07:43.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2017-12-08 18:42
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
def update_questionnaires(apps, schema_editor):
"""
Forward migration function to normalize settings into VMSettings and CloudSettings models
:param apps: Django apps
:param schema_editor: unused
:return: None
"""
VMSettings = apps.get_model("data", "VMSettings")
CloudSettings = apps.get_model("data", "CloudSettings")
JobQuestionnaire = apps.get_model("data", "JobQuestionnaire")
Job = apps.get_model("data", "Job")
for q in JobQuestionnaire.objects.all():
# Create a cloud settings object with the VM project from the questionnaire.
# Object initially just has the project name as its name
cloud_settings, _ = CloudSettings.objects.get_or_create(name=q.vm_project.name, vm_project=q.vm_project)
vm_settings, _ = VMSettings.objects.get_or_create(name=q.vm_project.name, cloud_settings=cloud_settings)
q.vm_settings = vm_settings
q.save()
| 36.205128 | 112 | 0.71813 |
c438178586df87a3168fc1363cc17cdd53b3728e | 4,872 | py | Python | app/models.py | maxnovais/Flapy_Blog | e543faa4c8f99ef3a2cdb1470de507d9cfb330bf | [
"Apache-2.0"
] | null | null | null | app/models.py | maxnovais/Flapy_Blog | e543faa4c8f99ef3a2cdb1470de507d9cfb330bf | [
"Apache-2.0"
] | null | null | null | app/models.py | maxnovais/Flapy_Blog | e543faa4c8f99ef3a2cdb1470de507d9cfb330bf | [
"Apache-2.0"
] | null | null | null | from datetime import datetime
from . import db
from config import COMMENTS_INITIAL_ENABLED
from flask.ext.security import UserMixin, RoleMixin
from markdown import markdown
import bleach
# Define models
roles_users = db.Table(
'roles_users',
db.Column('user_id', db.Integer(), db.ForeignKey('user.id')),
db.Column('role_id', db.Integer(), db.ForeignKey('role.id')))
db.event.listen(User.about, 'set', User.on_changed_body)
objects_tags = db.Table(
'object_tags',
db.Column('object_id', db.Integer, db.ForeignKey('object.id')),
db.Column('tag_id', db.Integer, db.ForeignKey('tag.id')))
db.event.listen(Object.body, 'set', Object.on_changed_body)
db.event.listen(Comment.body, 'set', Comment.on_changed_body)
| 36.909091 | 77 | 0.632389 |
c43aafbe58eb02eba9cd936508eecb607d118824 | 751 | py | Python | 8.1-triple-step.py | rithvikp1998/ctci | 52068e94449e61aef6bac9646a7863260acc7a05 | [
"MIT"
] | null | null | null | 8.1-triple-step.py | rithvikp1998/ctci | 52068e94449e61aef6bac9646a7863260acc7a05 | [
"MIT"
] | null | null | null | 8.1-triple-step.py | rithvikp1998/ctci | 52068e94449e61aef6bac9646a7863260acc7a05 | [
"MIT"
] | null | null | null | '''
If the child is currently on the nth step,
then there are three possibilites as to how
it reached there:
1. Reached (n-3)th step and hopped 3 steps in one time
2. Reached (n-2)th step and hopped 2 steps in one time
3. Reached (n-1)th step and hopped 2 steps in one time
The total number of possibilities is the sum of these 3
'''
n=int(input())
store=[0 for i in range(n+1)] # Stores the number of possibilites for every i<n
store[0]=0
store[1]=1
store[2]=2
store[3]=4
count_possibilities(n, store)
print(store[n])
| 25.896552 | 79 | 0.701731 |
c43e6b9c823f200efcc9e2b9380194f0c4a67a27 | 9,604 | py | Python | terrain_relative_navigation/peak_extractor_algorithm.py | rschwa6308/Landmark-Based-TRN | 5d712221138ec6250ed5bd19caed49810f17014e | [
"Apache-2.0"
] | null | null | null | terrain_relative_navigation/peak_extractor_algorithm.py | rschwa6308/Landmark-Based-TRN | 5d712221138ec6250ed5bd19caed49810f17014e | [
"Apache-2.0"
] | null | null | null | terrain_relative_navigation/peak_extractor_algorithm.py | rschwa6308/Landmark-Based-TRN | 5d712221138ec6250ed5bd19caed49810f17014e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
/***************************************************************************
PeakExtractor
A QGIS plugin
This plugin procedurally extracts morphological peaks from a given DEM.
Generated by Plugin Builder: http://g-sherman.github.io/Qgis-Plugin-Builder/
-------------------
begin : 2021-03-10
copyright : (C) 2021 by NASA JPL
email : russells@jpl.nasa.gov
***************************************************************************/
"""
__author__ = "NASA JPL"
__date__ = "2021-03-10"
__copyright__ = "(C) 2021 by NASA JPL"
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = "$Format:%H$"
from qgis.PyQt.QtCore import QCoreApplication
from qgis.core import (QgsProcessing,
QgsProcessingAlgorithm,
QgsProcessingParameterRasterLayer,
QgsProcessingParameterNumber,
QgsProcessingParameterFeatureSink,
QgsFields,
QgsWkbTypes)
import processing
# import grass.script as grass
import math
def round_up_to_odd(x: float) -> int:
"""round the given float up to the nearest odd integer"""
n = math.ceil(x)
return n + (1 - n%2)
| 32.890411 | 138 | 0.570596 |
c43f47ff2e792fe2c4acc6424f3c4c0fdde3ecb2 | 3,657 | py | Python | manila/tests/api/views/test_quota_class_sets.py | openstack/manila | 1ebae738c235c6f1874ac7b11307e0d5fb567dba | [
"Apache-2.0"
] | 159 | 2015-01-02T09:35:15.000Z | 2022-01-04T11:51:34.000Z | manila/tests/api/views/test_quota_class_sets.py | openstack/manila | 1ebae738c235c6f1874ac7b11307e0d5fb567dba | [
"Apache-2.0"
] | 5 | 2015-07-24T09:28:21.000Z | 2020-11-20T04:33:51.000Z | manila/tests/api/views/test_quota_class_sets.py | openstack/manila | 1ebae738c235c6f1874ac7b11307e0d5fb567dba | [
"Apache-2.0"
] | 128 | 2015-01-05T22:52:28.000Z | 2021-12-29T14:00:58.000Z | # Copyright (c) 2017 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
from manila.api.openstack import api_version_request as api_version
from manila.api.views import quota_class_sets
from manila import test
from manila.tests.api import fakes
| 41.089888 | 79 | 0.654908 |
c441a8d53ebaea6e35e7d68f0992cf2efeee375b | 2,429 | py | Python | tests/sequence_utils_test.py | rmcolq/genofunk | ffa031fb361fc736e839d0e36d36f8ed7ade30dc | [
"MIT"
] | 1 | 2021-01-09T23:25:02.000Z | 2021-01-09T23:25:02.000Z | tests/sequence_utils_test.py | rmcolq/genofunk | ffa031fb361fc736e839d0e36d36f8ed7ade30dc | [
"MIT"
] | null | null | null | tests/sequence_utils_test.py | rmcolq/genofunk | ffa031fb361fc736e839d0e36d36f8ed7ade30dc | [
"MIT"
] | null | null | null | import os
import unittest
import json
import filecmp
from genofunk.sequence_utils import *
this_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) | 34.211268 | 94 | 0.645533 |
c442b7615909101f05f7c648d2d237c13e312b98 | 1,630 | py | Python | Modules/Biophotonics/python/iMC/msi/test/test_nrrdwriter.py | SVRTK/MITK | 52252d60e42702e292d188e30f6717fe50c23962 | [
"BSD-3-Clause"
] | 5 | 2015-02-05T10:58:41.000Z | 2019-04-17T15:04:07.000Z | Modules/Biophotonics/python/iMC/msi/test/test_nrrdwriter.py | wyyrepo/MITK | d0837f3d0d44f477b888ec498e9a2ed407e79f20 | [
"BSD-3-Clause"
] | 141 | 2015-03-03T06:52:01.000Z | 2020-12-10T07:28:14.000Z | Modules/Biophotonics/python/iMC/msi/test/test_nrrdwriter.py | wyyrepo/MITK | d0837f3d0d44f477b888ec498e9a2ed407e79f20 | [
"BSD-3-Clause"
] | 4 | 2015-02-19T06:48:13.000Z | 2020-06-19T16:20:25.000Z | # -*- coding: utf-8 -*-
"""
Created on Thu Aug 13 09:52:47 2015
@author: wirkert
"""
import unittest
import os
import numpy as np
import msi.msimanipulations as msimani
from msi.io.nrrdreader import NrrdReader
from msi.io.nrrdwriter import NrrdWriter
from msi.test import helpers
| 29.636364 | 76 | 0.633129 |
c4438dbc98a70b3fe8296d0282cdfe5e4623856b | 3,369 | py | Python | crossplatformshell/__init__.py | ryanpdwyer/crossplatformshell | d6239ae362cff42faffc85714f7a5e1b56dc6463 | [
"MIT"
] | null | null | null | crossplatformshell/__init__.py | ryanpdwyer/crossplatformshell | d6239ae362cff42faffc85714f7a5e1b56dc6463 | [
"MIT"
] | null | null | null | crossplatformshell/__init__.py | ryanpdwyer/crossplatformshell | d6239ae362cff42faffc85714f7a5e1b56dc6463 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
============================
crossplatformshell
============================
"""
from __future__ import (print_function, division, absolute_import,
unicode_literals)
import pathlib
import io
import os
import shutil
import distutils.dir_util
import platform
# Use subprocess32 if available
try:
import subprocess32 as subprocess
except:
import subprocess as subprocess
def check_output(*args, **kwargs):
"""Subprocess check_output, but prints commands and output by default.
Also allows printing of error message for helpful debugging.
Use print_all=False to turn off all printing."""
print_all = kwargs.pop('print_all', None)
if print_all is not None:
print_in = print_all
print_out = print_all
else:
print_in = kwargs.pop('print_in', True)
print_out = kwargs.pop('print_out', True)
if print_in:
print('')
print(' '.join(args[0]))
try:
out_bytes = subprocess.check_output(*args, **kwargs)
out_lines = out_bytes.decode('utf-8').splitlines()
except subprocess.CalledProcessError as e:
# Wrap in try/except so that check_output can print
raise e
if print_out:
for line in out_lines:
print(line)
return out_lines
windows = platform.system() == 'Windows'
git = find_git_cmd(windows)
def new_path(path_string):
"""Return pathlib.Path, expanding '~' to a user's HOME directory"""
return pathlib.Path(os.path.expanduser(path_string))
def mkdir(*args):
"""Make directories for the specified paths."""
for arg in args:
os.mkdir(str(arg))
def remove(path):
"""Remove the specified path."""
os.remove(str(path))
def rmtree(path):
"""Recursively remove paths."""
shutil.rmtree(str(path))
cp = copy
def copy_tree(src_path, dst_path):
"""Recursively copy all files and folders from src_path to dst_path"""
distutils.dir_util.copy_tree(str(src_path), str(dst_path))
cp_r = copy_tree
def rm(*args):
"""Delete files, if they exist.
Fail silently if a file doesn't exist."""
for path in args:
try:
os.remove(str(path))
except OSError:
pass
def rm_rf(*args):
"""Recursively delete directories, if they exist."""
for path in args:
try:
shutil.rmtree(str(path))
except OSError:
pass
# Versioneer versioning
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
| 23.234483 | 74 | 0.62808 |
c444346fedeae3b1a36842a83b1d34e2d12fa382 | 28,857 | py | Python | collections/nemo_nlp/nemo_nlp/data/data_layers.py | Giuseppe5/NeMo | f946aca100c9a1bf22e6bd25fba9f80299722112 | [
"Apache-2.0"
] | 2 | 2020-05-12T05:16:10.000Z | 2021-12-01T02:30:45.000Z | collections/nemo_nlp/nemo_nlp/data/data_layers.py | Giuseppe5/NeMo | f946aca100c9a1bf22e6bd25fba9f80299722112 | [
"Apache-2.0"
] | 3 | 2020-11-13T17:45:41.000Z | 2022-03-12T00:28:59.000Z | collections/nemo_nlp/nemo_nlp/data/data_layers.py | Giuseppe5/NeMo | f946aca100c9a1bf22e6bd25fba9f80299722112 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2019 NVIDIA Corporation
# If you want to add your own data layer, you should put its name in
# __all__ so that it can be imported with 'from text_data_layers import *'
__all__ = ['TextDataLayer',
'BertSentenceClassificationDataLayer',
'BertJointIntentSlotDataLayer',
'BertJointIntentSlotInferDataLayer',
'LanguageModelingDataLayer',
'BertTokenClassificationDataLayer',
'BertTokenClassificationInferDataLayer',
'BertPretrainingDataLayer',
'BertPretrainingPreprocessedDataLayer',
'TranslationDataLayer',
'GlueDataLayerClassification',
'GlueDataLayerRegression']
# from abc import abstractmethod
import sys
import torch
from torch.utils import data as pt_data
import os
import h5py
import nemo
from nemo.backends.pytorch.nm import DataLayerNM
from nemo.core.neural_types import *
import random
import numpy as np
from .datasets import *
| 34.851449 | 79 | 0.537547 |
c446129e206d55ad3a8c2ed465762b2ddf662a3e | 12,208 | py | Python | h2o-py/h2o/automl/_base.py | vishalbelsare/h2o-3 | 9322fb0f4c0e2358449e339a434f607d524c69fa | [
"Apache-2.0"
] | null | null | null | h2o-py/h2o/automl/_base.py | vishalbelsare/h2o-3 | 9322fb0f4c0e2358449e339a434f607d524c69fa | [
"Apache-2.0"
] | 58 | 2021-10-01T12:43:37.000Z | 2021-12-08T22:58:43.000Z | h2o-py/h2o/automl/_base.py | vishalbelsare/h2o-3 | 9322fb0f4c0e2358449e339a434f607d524c69fa | [
"Apache-2.0"
] | null | null | null | import h2o
from h2o.base import Keyed
from h2o.exceptions import H2OValueError
from h2o.job import H2OJob
from h2o.model import ModelBase
from h2o.utils.typechecks import assert_is_type, is_type
def get_best_model(self, algorithm=None, criterion=None):
"""
Get best model of a given family/algorithm for a given criterion from an AutoML object.
:param algorithm: One of "basemodel", "deeplearning", "drf", "gbm", "glm", "stackedensemble", "xgboost".
If None, pick the best model regardless of the algorithm.
:param criterion: Criterion can be one of the metrics reported in leaderboard. If set to None, the same ordering
as in the leaderboard will be used.
Avaliable criteria:
- Regression metrics: deviance, rmse, mse, mae, rmsle
- Binomial metrics: auc, logloss, aucpr, mean_per_class_error, rmse, mse
- Multinomial metrics: mean_per_class_error, logloss, rmse, mse
The following additional leaderboard information can be also used as a criterion:
- 'training_time_ms': column providing the training time of each model in milliseconds (doesn't include the training of cross validation models).
- 'predict_time_per_row_ms`: column providing the average prediction time by the model for a single row.
:return: An H2OModel or None if no model of a given family is present
:examples:
>>> # Set up an H2OAutoML object
>>> aml = H2OAutoML(max_runtime_secs=30)
>>> # Launch an AutoML run
>>> aml.train(y=y, training_frame=train)
>>> gbm = aml.get_best_model("gbm")
"""
from h2o.exceptions import H2OValueError
higher_is_better = ["auc", "aucpr"]
assert_is_type(algorithm, None, str)
assert_is_type(criterion, None, str)
if criterion is not None:
criterion = criterion.lower()
if "deviance" == criterion:
criterion = "mean_residual_deviance"
if algorithm is not None:
if algorithm.lower() not in ("basemodel", "deeplearning", "drf", "gbm",
"glm", "stackedensemble", "xgboost"):
raise H2OValueError("Algorithm \"{}\" is not supported!".format(algorithm))
algorithm = algorithm.lower()
extra_cols = ["algo"]
if criterion in ("training_time_ms", "predict_time_per_row_ms"):
extra_cols.append(criterion)
leaderboard = h2o.automl.get_leaderboard(self, extra_columns=extra_cols)
leaderboard = leaderboard if algorithm is None else (
leaderboard[leaderboard["algo"].tolower() == algorithm, :] if algorithm != "basemodel"
else leaderboard[leaderboard["algo"].tolower() != "stackedensemble", :])
if leaderboard.nrow == 0:
return None
if criterion is None:
return h2o.get_model(leaderboard[0, "model_id"])
if criterion not in leaderboard.columns:
raise H2OValueError("Criterion \"{}\" is not present in the leaderboard!".format(criterion))
models_in_default_order = _get_models(leaderboard)
sorted_lb = leaderboard.sort(by=criterion, ascending=criterion not in higher_is_better)
selected_models = _get_models(sorted_lb[sorted_lb[criterion] == sorted_lb[0, criterion]])
picked_model = [model for model in models_in_default_order if model in selected_models][0]
return h2o.get_model(picked_model)
def _fetch_leaderboard(aml_id, extensions=None):
assert_is_type(extensions, None, str, [str])
extensions = ([] if extensions is None
else [extensions] if is_type(extensions, str)
else extensions)
resp = h2o.api("GET /99/Leaderboards/%s" % aml_id, data=dict(extensions=extensions))
dest_key = resp['project_name'].split('@', 1)[0]+"_custom_leaderboard"
return _fetch_table(resp['table'], key=dest_key, progress_bar=False)
def _fetch_table(table, key=None, progress_bar=True):
try:
# Intentionally mask the progress bar here since showing multiple progress bars is confusing to users.
# If any failure happens, revert back to user's original setting for progress and display the error message.
ori_progress_state = H2OJob.__PROGRESS_BAR__
H2OJob.__PROGRESS_BAR__ = progress_bar
# Parse leaderboard H2OTwoDimTable & return as an H2OFrame
fr = h2o.H2OFrame(table.cell_values, destination_frame=key, column_names=table.col_header, column_types=table.col_types)
return h2o.assign(fr[1:], key) # removing index and reassign id to ensure persistence on backend
finally:
H2OJob.__PROGRESS_BAR__ = ori_progress_state
def _fetch_state(aml_id, properties=None, verbosity=None):
state_json = h2o.api("GET /99/AutoML/%s" % aml_id, data=dict(verbosity=verbosity))
project_name = state_json["project_name"]
if project_name is None:
raise H2OValueError("No AutoML instance with id {}.".format(aml_id))
leaderboard_list = [key["name"] for key in state_json['leaderboard']['models']]
leader_id = leaderboard_list[0] if (leaderboard_list is not None and len(leaderboard_list) > 0) else None
should_fetch = lambda prop: properties is None or prop in properties
leader = None
if should_fetch('leader'):
leader = h2o.get_model(leader_id) if leader_id is not None else None
leaderboard = None
if should_fetch('leaderboard'):
leaderboard = _fetch_table(state_json['leaderboard_table'], key=project_name+"_leaderboard", progress_bar=False)
event_log = None
if should_fetch('event_log'):
event_log = _fetch_table(state_json['event_log_table'], key=project_name+"_eventlog", progress_bar=False)
return dict(
project_name=project_name,
json=state_json,
leader_id=leader_id,
leader=leader,
leaderboard=leaderboard,
event_log=event_log,
)
| 42.096552 | 173 | 0.63442 |
c44725a87dd7a0e5d3208fe6f2ccd197531d2ad1 | 2,687 | py | Python | Pistol.py | KRHS-GameProgramming-2014/survival-island | 375b2710a2bc29551170b18638e2c00c6b2dc7c5 | [
"BSD-3-Clause"
] | 1 | 2015-04-01T12:46:26.000Z | 2015-04-01T12:46:26.000Z | Pistol.py | KRHS-GameProgramming-2014/survival-island | 375b2710a2bc29551170b18638e2c00c6b2dc7c5 | [
"BSD-3-Clause"
] | null | null | null | Pistol.py | KRHS-GameProgramming-2014/survival-island | 375b2710a2bc29551170b18638e2c00c6b2dc7c5 | [
"BSD-3-Clause"
] | null | null | null | import math,sys,pygame
| 33.17284 | 79 | 0.519166 |
c447c656ac034795409e4bb710eaaca13a84688c | 3,388 | py | Python | appdaemon/apps/common/common.py | Mithras/ha | d37f8673eed27a85f76c97ee3e924d2ddc033ee5 | [
"MIT"
] | 3 | 2019-10-27T06:10:26.000Z | 2020-07-21T01:27:11.000Z | appdaemon/apps/common/common.py | Mithras/ha | d37f8673eed27a85f76c97ee3e924d2ddc033ee5 | [
"MIT"
] | null | null | null | appdaemon/apps/common/common.py | Mithras/ha | d37f8673eed27a85f76c97ee3e924d2ddc033ee5 | [
"MIT"
] | null | null | null | import hassapi as hass
import csv
from collections import namedtuple
Profile = namedtuple(
"Profile", ["profile", "x_color", "y_color", "brightness"])
with open("/config/light_profiles.csv") as profiles_file:
profiles_reader = csv.reader(profiles_file)
next(profiles_reader)
LIGHT_PROFILES = [Profile(row[0], float(row[1]), float(
row[2]), int(row[3])) for row in profiles_reader]
| 41.317073 | 80 | 0.602715 |
c448522cb4d655aac706a30087c1d285bd8f1d0f | 3,133 | py | Python | src/mongo_model.py | zxteloiv/curated-geokb-subsearcher | 8f42dca4cb293ccf3baf25bb31ba9b6cd6a76c8d | [
"MIT"
] | null | null | null | src/mongo_model.py | zxteloiv/curated-geokb-subsearcher | 8f42dca4cb293ccf3baf25bb31ba9b6cd6a76c8d | [
"MIT"
] | null | null | null | src/mongo_model.py | zxteloiv/curated-geokb-subsearcher | 8f42dca4cb293ccf3baf25bb31ba9b6cd6a76c8d | [
"MIT"
] | null | null | null | # coding: utf-8
from pymongo import MongoClient
import conf
| 34.054348 | 99 | 0.452601 |
c448639417746f765b5ac2d5c6459142e8c6a83b | 8,809 | py | Python | src/dcm/agent/plugins/builtin/configure_server.py | JPWKU/unix-agent | 8f1278fc8c2768a8d4d54af642a881bace43652f | [
"Apache-2.0"
] | null | null | null | src/dcm/agent/plugins/builtin/configure_server.py | JPWKU/unix-agent | 8f1278fc8c2768a8d4d54af642a881bace43652f | [
"Apache-2.0"
] | 22 | 2015-09-15T20:52:34.000Z | 2016-03-11T22:44:24.000Z | src/dcm/agent/plugins/builtin/configure_server.py | JPWKU/unix-agent | 8f1278fc8c2768a8d4d54af642a881bace43652f | [
"Apache-2.0"
] | 3 | 2015-09-11T20:21:33.000Z | 2016-09-30T08:30:19.000Z | #
# Copyright (C) 2014 Dell, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import configparser
import json
import logging
import os
import urllib.parse
import dcm.agent.exceptions as exceptions
import dcm.agent.logger as dcm_logger
import dcm.agent.plugins.api.base as plugin_base
import dcm.agent.plugins.api.exceptions as plugin_exceptions
import dcm.agent.plugins.api.utils as plugin_utils
import dcm.agent.utils as utils
_g_logger = logging.getLogger(__name__)
| 38.635965 | 79 | 0.595187 |
c44957a976ba959e51bd70f903dcac90438fe807 | 17,184 | py | Python | phy/plot/interact.py | ycanerol/phy | 7a247f926dd5bf5d8ab95fe138e8f4a0db11b068 | [
"BSD-3-Clause"
] | 118 | 2019-06-03T06:19:43.000Z | 2022-03-25T00:05:26.000Z | phy/plot/interact.py | ycanerol/phy | 7a247f926dd5bf5d8ab95fe138e8f4a0db11b068 | [
"BSD-3-Clause"
] | 761 | 2015-01-08T11:17:41.000Z | 2019-05-27T16:12:08.000Z | phy/plot/interact.py | ycanerol/phy | 7a247f926dd5bf5d8ab95fe138e8f4a0db11b068 | [
"BSD-3-Clause"
] | 70 | 2019-05-30T11:05:26.000Z | 2022-03-30T11:51:23.000Z | # -*- coding: utf-8 -*-
"""Common layouts."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
import logging
import numpy as np
from phylib.utils import emit
from phylib.utils.geometry import get_non_overlapping_boxes, get_closest_box
from .base import BaseLayout
from .transform import Scale, Range, Subplot, Clip, NDC
from .utils import _get_texture, _in_polygon
from .visuals import LineVisual, PolygonVisual
logger = logging.getLogger(__name__)
#------------------------------------------------------------------------------
# Grid
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# Boxed
#------------------------------------------------------------------------------
# Box scaling
#--------------------------------------------------------------------------
def _increment_box_scaling(self, cw=1., ch=1.):
self._box_scaling = (self._box_scaling[0] * cw, self._box_scaling[1] * ch)
self.update()
# Layout scaling
#--------------------------------------------------------------------------
class Stacked(Boxed):
"""Layout showing a number of subplots stacked vertically.
Parameters
----------
n_boxes : int
Number of boxes to stack vertically.
box_var : str
Name of the GLSL variable with the box index.
origin : str
top or bottom
Note
----
To be used in a boxed layout, a visual must define `a_box_index` (by default) or another GLSL
variable specified in `box_var`.
"""
margin = 0
_origin = 'bottom'
def get_box_pos(self, n_boxes):
"""Return the box bounds for a given number of stacked boxes."""
# Signal bounds.
b = np.zeros((n_boxes, 2))
b[:, 1] = np.linspace(-1, 1, n_boxes)
if self._origin == 'top':
b = b[::-1, :]
return b
def attach(self, canvas):
"""Attach the stacked interact to a canvas."""
BaseLayout.attach(self, canvas)
canvas.gpu_transforms += self.gpu_transforms
canvas.inserter.insert_vert("""
#include "utils.glsl"
attribute float {};
uniform float n_boxes;
uniform bool u_top_origin;
uniform vec2 u_box_size;
""".format(self.box_var), 'header', origin=self)
canvas.inserter.insert_vert("""
float margin = .1 / n_boxes;
float a = 1 - 2. / n_boxes + margin;
float b = -1 + 2. / n_boxes - margin;
float u = (u_top_origin ? (n_boxes - 1. - {bv}) : {bv}) / max(1., n_boxes - 1.);
float y0 = -1 + u * (a + 1);
float y1 = b + u * (1 - b);
float ym = .5 * (y0 + y1);
float yh = u_box_size.y * (y1 - ym);
y0 = ym - yh;
y1 = ym + yh;
vec4 box_bounds = vec4(-1., y0, +1., y1);
""".format(bv=self.box_var), 'before_transforms', origin=self)
def update_visual(self, visual):
"""Update a visual."""
BaseLayout.update_visual(self, visual)
if 'n_boxes' in visual.program:
visual.program['n_boxes'] = self.n_boxes
visual.program['u_box_size'] = self._box_scaling
visual.program['u_top_origin'] = self._origin == 'top'
#------------------------------------------------------------------------------
# Interactive tools
#------------------------------------------------------------------------------
| 33.694118 | 97 | 0.557437 |
c44cda7d547bb9bf0fd8879defc0c14046119449 | 623 | py | Python | AutocompleteHandler.py | codeforamerica/sheltraustin | a07ffd4b328a9d961347a85b49c95d8bf5ec1046 | [
"BSD-3-Clause"
] | null | null | null | AutocompleteHandler.py | codeforamerica/sheltraustin | a07ffd4b328a9d961347a85b49c95d8bf5ec1046 | [
"BSD-3-Clause"
] | 1 | 2015-08-03T21:27:36.000Z | 2015-08-03T21:27:36.000Z | AutocompleteHandler.py | codeforamerica/sheltraustin | a07ffd4b328a9d961347a85b49c95d8bf5ec1046 | [
"BSD-3-Clause"
] | 1 | 2021-04-17T10:13:29.000Z | 2021-04-17T10:13:29.000Z |
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
import simplejson
from QueryHandler import QueryHandler | 23.074074 | 62 | 0.746388 |
c44d0eafae3c92e64f9041228d582ce1a1b6ed30 | 1,869 | py | Python | mirari/SV/migrations/0052_auto_20190428_1522.py | gcastellan0s/mirariapp | 24a9db06d10f96c894d817ef7ccfeec2a25788b7 | [
"MIT"
] | null | null | null | mirari/SV/migrations/0052_auto_20190428_1522.py | gcastellan0s/mirariapp | 24a9db06d10f96c894d817ef7ccfeec2a25788b7 | [
"MIT"
] | 18 | 2019-12-27T19:58:20.000Z | 2022-02-27T08:17:49.000Z | mirari/SV/migrations/0052_auto_20190428_1522.py | gcastellan0s/mirariapp | 24a9db06d10f96c894d817ef7ccfeec2a25788b7 | [
"MIT"
] | null | null | null | # Generated by Django 2.0.5 on 2019-04-28 20:22
from django.db import migrations, models
import django.db.models.deletion
| 37.38 | 147 | 0.604066 |
c44d2937a78223f5c0f6b30adbd02a5949d5f2e6 | 3,339 | py | Python | svl/compiler/plot_validators.py | timothyrenner/svl | a74c09c49f2e14046acd4b0eeb861f8fef6bca96 | [
"MIT"
] | 8 | 2019-03-27T12:49:21.000Z | 2020-10-10T11:16:25.000Z | svl/compiler/plot_validators.py | timothyrenner/svl | a74c09c49f2e14046acd4b0eeb861f8fef6bca96 | [
"MIT"
] | 65 | 2018-08-26T14:48:45.000Z | 2020-03-17T12:24:42.000Z | svl/compiler/plot_validators.py | timothyrenner/svl | a74c09c49f2e14046acd4b0eeb861f8fef6bca96 | [
"MIT"
] | 1 | 2019-09-13T19:39:07.000Z | 2019-09-13T19:39:07.000Z | from toolz import get
PLOT_VALIDATORS = [
(
{"line", "scatter", "bar"},
lambda x: ("x" not in x) or ("y" not in x),
"XY plot does not have X and Y.",
),
(
{"histogram"},
lambda x: ("step" in x) and ("bins" in x),
"Histogram cannot have STEP and BINS.",
),
(
{"line", "scatter", "bar"},
lambda x: ("agg" in x["x"]) and ("agg" in x["y"]),
"XY plot cannot have an aggregation on X and Y.",
),
(
{"histogram", "pie"},
lambda x: ("agg" in get("x", x, {}))
or ("agg" in get("y", x, {}))
or ("agg" in get("axis", x, {})),
"Histograms and pie charts cannot have aggregations.",
),
(
{"histogram", "pie"},
lambda x: ("temporal" in get("x", x, {}))
or ("temporal" in get("y", x, {}))
or ("temporal" in get("axis", x, {})),
"Histograms and pie charts cannot have temporal axes.",
),
(
{"histogram"},
lambda x: ("x" in x) and ("y" in x),
"Histograms can have X or Y, not both.",
),
(
{"histogram"},
lambda x: ("x" not in x) and ("y" not in x),
"Histograms must have an X or Y.",
),
({"pie"}, lambda x: "axis" not in x, "Pie charts must have an axis."),
(
{"line", "bar"}, # SORT is a no-op for scatter.
lambda x: ("sort" in x["x"]) and ("sort" in x["y"]),
"Cannot sort by two axes.",
),
(
{"pie"},
lambda x: (get("hole", x, 0.0) < 0) or (get("hole", x, 0.0) > 1),
"HOLE must be between zero and one.",
),
(
{"histogram"},
lambda x: get("step", x, 1) <= 0,
"STEP must be greater than zero.",
),
(
{"histogram"},
lambda x: get("bins", x, 1) <= 0,
"BINS must be greater than zero.",
),
(
{"histogram", "pie"},
lambda x: "color_by" in x,
"Histograms and pie charts cannot have COLOR BY.",
),
({"pie"}, lambda x: "split_by" in x, "Pie charts cannot have SPLIT BY."),
(
{"line", "scatter", "bar"},
lambda x: ("split_by" in x) and ("color_by" in x),
"Cannot have COLOR BY and SPLIT BY on same plot.",
),
(
{"line", "scatter", "bar"},
lambda x: (
# If we don't include this it can throw exceptions for other
# validators.
("x" in x)
and ("y" in x)
)
and (("agg" in x["x"]) or ("agg" in x["y"]))
and (("color_by" in x) and ("agg" not in x["color_by"])),
"If there's an aggregation on X or Y, COLOR BY must also aggregate.",
),
]
def validate_plot(svl_plot):
""" Validates the SVL plot.
Parameters
----------
svl_plot : dict
The SVL plot specifier.
Returns
-------
Tuple[bool, str]
A boolean indicating whether the plot is valid and a message
indicating that the plot is either valid or which validations it
failed.
"""
ok = True
failure_messages = []
for plots, validator, message in PLOT_VALIDATORS:
if (svl_plot["type"] in plots) and validator(svl_plot):
ok = False
failure_messages.append(message)
return ok, "\n".join(failure_messages)
| 29.289474 | 77 | 0.481881 |
c44dbf415c5fb9329410760b9f9c0e517b6fdb6f | 7,421 | py | Python | decision_tree.py | cjbayron/ml-models | b3171c9a82fe5ecdcdc5abcdc20af7c18f9f8ec4 | [
"MIT"
] | 1 | 2018-12-15T16:36:41.000Z | 2018-12-15T16:36:41.000Z | decision_tree.py | cjbayron/ml-models | b3171c9a82fe5ecdcdc5abcdc20af7c18f9f8ec4 | [
"MIT"
] | null | null | null | decision_tree.py | cjbayron/ml-models | b3171c9a82fe5ecdcdc5abcdc20af7c18f9f8ec4 | [
"MIT"
] | null | null | null | '''
Building a Decision Tree using CART (from scratch)
Note: Code was tested only on dataset with numerical features.
Categorical features are not yet fully supported.
'''
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from scikitplot.metrics import plot_confusion_matrix
import common.utils as ut
# get data from:
# https://www.kaggle.com/c/otto-group-product-classification-challenge
TRN_DATA_PATH = 'datasets/otto-group-product-classification/train.csv'
NUM_SAMPLES = 5000
NUM_FEATS = 93
def visualize_data(feats, true_labels, preds):
'''Display labeled data and clustered data
'''
print("Visualizing data...")
red_feats = ut.reduce_to_2D_by_tsne(feats)
label2col_map = ['red', 'orange', 'yellow', 'green', 'blue',
'violet', 'brown', 'gray', 'pink']
label_list = np.unique(true_labels)
_, ax = plt.subplots(ncols=2, figsize=(10, 5))
graph_label_pair = zip(ax, [true_labels, preds])
for graph, labels in graph_label_pair:
for label in label_list:
# get samples with label == label
idxs = np.where(labels == label)
# get components
pc1, pc2 = red_feats['pc1'].values[idxs], red_feats['pc2'].values[idxs]
# scatter plot w/ color based on labels
graph.scatter(x=pc1, y=pc2, color=label2col_map[label-1],
alpha=0.5, label=label)
graph.set_xlabel('PC1')
graph.set_ylabel('PC2')
ax[0].set_title('Labeled Products')
ax[1].set_title('Predicted Labels')
for graph in ax:
graph.legend() # show legend
graph.grid(True) # show gridlines
plt.show()
def get_impurity(labels):
'''Calculate Gini impurity
'''
num_labels = float(len(labels))
imp = 0.0
_, cnts = np.unique(labels, return_counts=True)
for cnt in cnts:
cnt = float(cnt)
imp += float((cnt/num_labels)*(1-(cnt/num_labels)))
return imp
def get_best_split_along_column(data, labels, feat_idx, categorical=False):
'''Get best split using features in a single column
'''
feat_col = data[:, feat_idx]
splitter_pool = np.unique(feat_col) # get splitters
min_im = np.inf
left_idxs = []
right_idxs = []
splitter = None
for val in splitter_pool:
if categorical:
left_labels = labels[feat_col == val]
right_labels = labels[feat_col != val]
else:
left_labels = labels[feat_col >= val]
right_labels = labels[feat_col < val]
# if all data is placed on only one side
# then it is not a meaningful split so we skip
if len(left_labels) == len(data) or len(right_labels) == len(data):
continue
avg_im = len(left_labels) * get_impurity(left_labels) + \
len(right_labels) * get_impurity(right_labels)
if avg_im < min_im:
min_im = avg_im
left_idxs = (feat_col >= val)
right_idxs = (feat_col < val)
splitter = val
if len(left_idxs) + len(right_idxs) > 0:
min_im /= (len(left_idxs) + len(right_idxs))
return min_im, splitter, left_idxs, right_idxs
def main():
'''Main
'''
global TRN_DATA_PATH, NUM_SAMPLES, NUM_FEATS
# no need to rescale for decision tree
feats, labels = ut.get_data_from_csv(TRN_DATA_PATH, rescale=False)
if NUM_SAMPLES < len(feats):
feats, labels = ut.sample(feats, labels, NUM_SAMPLES)
feats = feats.values
if NUM_FEATS < len(feats[0]):
idxs = np.random.choice(range(len(feats[0])), NUM_FEATS, replace=False)
feats = feats[:, idxs]
trn_feats, tst_feats, trn_labels, tst_labels = train_test_split(feats,
labels,
test_size=0.20,
stratify=labels)
# build tree
print("Building decision tree...")
decision_tree = TreeNode()
decision_tree.build_tree(trn_feats, trn_labels.values)
print("Done!")
print("Checking accuracy on training set...")
predictions = []
for sample in trn_feats:
result = decision_tree.classify(sample)
predictions.append(result)
# for checking only. must be 100% accuracy on training set
print("Training Set Results:\n", classification_report(trn_labels, predictions))
print("Using tree to predict labels...")
predictions = []
for sample in tst_feats:
result = decision_tree.classify(sample)
predictions.append(result)
print("Test Set Results:\n", classification_report(tst_labels, predictions))
visualize_data(pd.DataFrame(tst_feats), tst_labels, predictions)
# display confusion matrix
print("Plotting confusion matrix...")
plot_confusion_matrix(tst_labels, predictions, normalize=True)
plt.show()
return 0
if __name__ == "__main__":
main()
| 31.849785 | 87 | 0.597898 |
c44e49588a5ae8bdc21c7e3ab11388f043afd9f0 | 8,816 | py | Python | codegen/codegen/fblas_routine.py | spcl/fblas | 96425fbdbaeab6f43997d839836b8224a04f3b53 | [
"BSD-3-Clause"
] | 68 | 2019-02-07T21:30:21.000Z | 2022-02-16T20:09:27.000Z | codegen/codegen/fblas_routine.py | spcl/fblas | 96425fbdbaeab6f43997d839836b8224a04f3b53 | [
"BSD-3-Clause"
] | 2 | 2019-03-15T17:49:03.000Z | 2019-07-24T14:05:35.000Z | codegen/codegen/fblas_routine.py | spcl/fblas | 96425fbdbaeab6f43997d839836b8224a04f3b53 | [
"BSD-3-Clause"
] | 25 | 2019-03-15T03:00:15.000Z | 2021-08-04T10:21:43.000Z | """
FBlas Routine class: it used to represent a routine definition, specified by the user using JSON file.
It is used by the Host and Module Codegen (specified by the _codegen variable). Accordingly,
some class members could be invalid.
"""
from codegen import fblas_types
from codegen import generator_definitions
def are_elements_A_rowstreamed(self):
"""
:return: True if the elements of A are rowstreamed
"""
return self._elements_A_order == fblas_types.FblasOrder.FblasRowMajor
def add_input_channel(self, routine_channel_name, user_name):
'''
Add the channel to the dictionary of input channels
If already present, it will be overwritten
'''
self._input_channels[routine_channel_name] = user_name
def add_output_channel(self, routine_channel_name, user_name):
'''
Add the channel to the dictionary of input channels
If already present, it will be overwritten
'''
self._output_channels[routine_channel_name] = user_name
| 27.810726 | 153 | 0.678993 |
c451451a751c9fd2575b893cf89c5f54e2fd8166 | 840 | py | Python | test_hoyolab.py | c3kay/hoyolab-json-feed | 43839194a253271c9c2fcbb564eb4b3e6179c01e | [
"Unlicense"
] | 1 | 2021-09-17T12:40:40.000Z | 2021-09-17T12:40:40.000Z | test_hoyolab.py | c3kay/hoyolab-json-feed | 43839194a253271c9c2fcbb564eb4b3e6179c01e | [
"Unlicense"
] | null | null | null | test_hoyolab.py | c3kay/hoyolab-json-feed | 43839194a253271c9c2fcbb564eb4b3e6179c01e | [
"Unlicense"
] | null | null | null | from hoyolab import main
from os import environ
from os.path import exists
import atoma
| 24.705882 | 62 | 0.713095 |
c453263bed6e1f5fabe0695622acd9caad9d7447 | 194,112 | py | Python | thirdweb/abi/nft_collection/__init__.py | princetonwong/python-sdk | f35181d97620e29d055498fca75f3702f3bb2449 | [
"Apache-2.0"
] | 1 | 2022-02-18T16:59:12.000Z | 2022-02-18T16:59:12.000Z | thirdweb/abi/nft_collection/__init__.py | princetonwong/python-sdk | f35181d97620e29d055498fca75f3702f3bb2449 | [
"Apache-2.0"
] | null | null | null | thirdweb/abi/nft_collection/__init__.py | princetonwong/python-sdk | f35181d97620e29d055498fca75f3702f3bb2449 | [
"Apache-2.0"
] | null | null | null | """Generated wrapper for NFTCollection Solidity contract."""
# pylint: disable=too-many-arguments
import json
from typing import ( # pylint: disable=unused-import
Any,
List,
Optional,
Tuple,
Union,
)
from eth_utils import to_checksum_address
from mypy_extensions import TypedDict # pylint: disable=unused-import
from hexbytes import HexBytes
from thirdweb_web3 import Web3
from thirdweb_web3.contract import ContractFunction
from thirdweb_web3.datastructures import AttributeDict
from thirdweb_web3.providers.base import BaseProvider
from zero_ex.contract_wrappers.bases import ContractMethod, Validator
from zero_ex.contract_wrappers.tx_params import TxParams
# Try to import a custom validator class definition; if there isn't one,
# declare one that we can instantiate for the default argument to the
# constructor for NFTCollection below.
try:
# both mypy and pylint complain about what we're doing here, but this
# works just fine, so their messages have been disabled here.
from . import ( # type: ignore # pylint: disable=import-self
NFTCollectionValidator,
)
except ImportError:
try:
from .middleware import MIDDLEWARE # type: ignore
except ImportError:
pass
# pylint: disable=too-many-public-methods,too-many-instance-attributes
# pylint: disable=too-many-lines
| 37.684333 | 17,643 | 0.634865 |
c45419a203ad566f8ae9d52cc297219542ecf9f1 | 237 | py | Python | sausage_grinder/urls.py | jesseerdmann/audiobonsai | ec1edcdbadc6b2aff3b743b5c42515f4d5638830 | [
"Apache-2.0"
] | null | null | null | sausage_grinder/urls.py | jesseerdmann/audiobonsai | ec1edcdbadc6b2aff3b743b5c42515f4d5638830 | [
"Apache-2.0"
] | null | null | null | sausage_grinder/urls.py | jesseerdmann/audiobonsai | ec1edcdbadc6b2aff3b743b5c42515f4d5638830 | [
"Apache-2.0"
] | null | null | null | from django.urls import path
from . import views as sg
urlpatterns = [
path('artist', sg.artist),
path('genre', sg.genre),
path('release', sg.release),
path('track', sg.track),
path('', sg.sausage_grinder_index),
]
| 19.75 | 39 | 0.637131 |
c45577ce768212873fbaadfacdbe638ce864abf9 | 1,194 | py | Python | sails/ui/mmck/parameters/string.py | metrasynth/solar-sails | 3a10774dad29d85834d3acb38171741b3a11ef91 | [
"MIT"
] | 6 | 2016-11-22T14:32:55.000Z | 2021-08-15T01:35:33.000Z | sails/ui/mmck/parameters/string.py | metrasynth/solar-sails | 3a10774dad29d85834d3acb38171741b3a11ef91 | [
"MIT"
] | 2 | 2022-03-18T16:47:43.000Z | 2022-03-18T16:47:44.000Z | sails/ui/mmck/parameters/string.py | metrasynth/solar-sails | 3a10774dad29d85834d3acb38171741b3a11ef91 | [
"MIT"
] | 2 | 2019-07-09T23:44:08.000Z | 2021-08-15T01:35:37.000Z | from PyQt5.QtCore import pyqtSlot
from PyQt5.QtWidgets import QComboBox
from PyQt5.QtWidgets import QLineEdit
from sf.mmck.parameters import String
from .manager import widget_class_for
from .widget import ParameterWidget
| 35.117647 | 89 | 0.707705 |
c455ef3791cf634263613f0736425fbda6d62c4c | 550 | py | Python | plot_top_performers.py | jmphil09/mario_rl | 6e93c1318e9957d679a5ec8d29687756ac7fc4b1 | [
"MIT"
] | null | null | null | plot_top_performers.py | jmphil09/mario_rl | 6e93c1318e9957d679a5ec8d29687756ac7fc4b1 | [
"MIT"
] | null | null | null | plot_top_performers.py | jmphil09/mario_rl | 6e93c1318e9957d679a5ec8d29687756ac7fc4b1 | [
"MIT"
] | null | null | null | from FitnessPlot import FitnessPlot
'''
for n in range(1,6):
plot = FitnessPlot(folder_prefix='data_top{}'.format(n))
plot.plot_all_workers()
plot.plot_workers_as_average()
'''
plot = FitnessPlot(folder_prefix='data_top1', num_workers=16)
worker_dict = plot.create_worker_dict()
#plot.plot_all_workers()
#plot.plot_workers_as_average()
#print(worker_dict)
for key,value in worker_dict.items():
dict_len = len(value)
#if dict_len < 100:
# print(key)
# print(dict_len)
print(key)
print(value[len(value)-1])
| 23.913043 | 61 | 0.703636 |
c456582b3a99628d95abc79fe354227dac957e3b | 3,696 | py | Python | test/PySrc/tests/test_code_tracer_width.py | lifubang/live-py-plugin | 38a3cf447fd7d9c4e6014b71134e178b0d8a01de | [
"MIT"
] | 224 | 2015-03-22T23:40:52.000Z | 2022-03-01T21:45:51.000Z | test/PySrc/tests/test_code_tracer_width.py | lifubang/live-py-plugin | 38a3cf447fd7d9c4e6014b71134e178b0d8a01de | [
"MIT"
] | 371 | 2015-04-28T05:14:00.000Z | 2022-03-28T01:31:22.000Z | test/PySrc/tests/test_code_tracer_width.py | lifubang/live-py-plugin | 38a3cf447fd7d9c4e6014b71134e178b0d8a01de | [
"MIT"
] | 53 | 2015-10-30T07:52:07.000Z | 2022-02-28T12:56:35.000Z | from space_tracer.main import replace_input, TraceRunner
| 25.666667 | 73 | 0.446699 |
c45760cde68ead756aaeedf9a4958bde55f0fdc2 | 458 | py | Python | benchmark/src/benchmark/bench_logging.py | lwanfuturewei/QFlock | 90d6875d9adc8fe2968694904f8421d41e30e189 | [
"Apache-2.0"
] | null | null | null | benchmark/src/benchmark/bench_logging.py | lwanfuturewei/QFlock | 90d6875d9adc8fe2968694904f8421d41e30e189 | [
"Apache-2.0"
] | null | null | null | benchmark/src/benchmark/bench_logging.py | lwanfuturewei/QFlock | 90d6875d9adc8fe2968694904f8421d41e30e189 | [
"Apache-2.0"
] | 2 | 2022-03-03T15:28:23.000Z | 2022-03-04T15:33:19.000Z |
import logging
| 26.941176 | 86 | 0.530568 |
c45814f676d4f4897bba48b176daa7d8a452554e | 6,921 | py | Python | tools/configure-gateway/threescale/proxies.py | jparsai/f8a-3scale-connect-api | a782753d662eee5d450da3c20e9ae9eb13b8b560 | [
"Apache-2.0"
] | 1 | 2018-09-14T05:18:52.000Z | 2018-09-14T05:18:52.000Z | tools/configure-gateway/threescale/proxies.py | jparsai/f8a-3scale-connect-api | a782753d662eee5d450da3c20e9ae9eb13b8b560 | [
"Apache-2.0"
] | 48 | 2017-12-05T12:05:56.000Z | 2021-03-25T22:09:29.000Z | tools/configure-gateway/threescale/proxies.py | jparsai/f8a-3scale-connect-api | a782753d662eee5d450da3c20e9ae9eb13b8b560 | [
"Apache-2.0"
] | 5 | 2018-01-29T04:53:13.000Z | 2020-04-16T13:59:42.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""ThreeScale Proxies Rule interface for APIs."""
from .base import ThreeScale
import logging
import requests
import xmltodict
import json
logger = logging.getLogger(__name__)
| 38.237569 | 91 | 0.566681 |
c458cb4e772b1e30729560fd59117cb1dab40b05 | 241 | py | Python | src/__main__.py | Grox2006/Kayambot | a49cf7fd16fdc049500ae645784cc671b04edf87 | [
"MIT"
] | null | null | null | src/__main__.py | Grox2006/Kayambot | a49cf7fd16fdc049500ae645784cc671b04edf87 | [
"MIT"
] | null | null | null | src/__main__.py | Grox2006/Kayambot | a49cf7fd16fdc049500ae645784cc671b04edf87 | [
"MIT"
] | null | null | null | import sys
from __init__ import Bot
MESSAGE_USAGE = "Usage is python %s [name] [token]"
if __name__ == "__main__":
if len(sys.argv) == 3:
Bot(sys.argv[1], sys.argv[2])
else:
print(MESSAGE_USAGE.format(sys.argv[0]))
| 21.909091 | 51 | 0.630705 |
c45a35a45e18477dcb0c3a971fc4e41ecd533922 | 985 | py | Python | app/__init__.py | logicalicy/flask-react-boilerplate | 2a999c969a7fc7d244830ebba02a00f0feca79dd | [
"MIT"
] | 2 | 2017-02-27T16:48:08.000Z | 2019-05-10T11:22:07.000Z | app/__init__.py | logicalicy/flask-react-boilerplate | 2a999c969a7fc7d244830ebba02a00f0feca79dd | [
"MIT"
] | null | null | null | app/__init__.py | logicalicy/flask-react-boilerplate | 2a999c969a7fc7d244830ebba02a00f0feca79dd | [
"MIT"
] | null | null | null | # Created with tutorials:
# https://www.digitalocean.com/community/tutorials/how-to-structure-large-flask-applications
# http://flask.pocoo.org/docs/0.12/tutorial
from flask import Flask, g, render_template
from flask_sqlalchemy import SQLAlchemy
import sqlite3
# Define WSGI application object.
app = Flask(__name__)
# Configurations
app.config.from_object('config')
app.config.from_envvar('CONFIG', silent=True)
# Define database object.
db = SQLAlchemy(app)
# Import a module / component using its blueprint handler variable (mod_auth)
from app.api.entries.controllers import mod as entries_module
from app.site.controllers import mod as site_module
# Register blueprint(s)
app.register_blueprint(entries_module)
app.register_blueprint(site_module)
# app.register_blueprint(xyz_module)
# ..
# Build the database:
# This will create the database file using SQLAlchemy
db.create_all()
| 25.921053 | 92 | 0.792893 |
c45c0b6aabc6d08c2689d66882739d5b4c1b5f06 | 19,075 | py | Python | dumpcode/cpiter.py | gkfthddk/keras | 46d96c65d69c39df298800336bbb4d867a2561fb | [
"MIT"
] | null | null | null | dumpcode/cpiter.py | gkfthddk/keras | 46d96c65d69c39df298800336bbb4d867a2561fb | [
"MIT"
] | null | null | null | dumpcode/cpiter.py | gkfthddk/keras | 46d96c65d69c39df298800336bbb4d867a2561fb | [
"MIT"
] | null | null | null | import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import subprocess
import numpy as np
import datetime
import random
import warnings
import ROOT as rt
import math
from keras.preprocessing.sequence import pad_sequences
from keras.callbacks import Callback
from array import array
from sklearn.metrics import roc_auc_score, auc, roc_curve
def sampleallnum(self):
return self.Entries
def trainnum(self):
return self.End-self.Begin
def totalnum(self):
return int(math.ceil(1.*(self.gEnd-self.gBegin+self.qEnd-self.qBegin)/(self.batch_size*1.00)))
def next(self):
while self.endfile==0:
self.count+=1
arnum=self.arnum
jetset=[]
variables=[]
labels=[]
for i in range(self.batch_size):
if(random.random()<0.5):
if(self.a-self.gBegin>=len(self.gjetset)):
self.a=self.gBegin
self.endfile=1
break
labels.append([0,1])
jetset.append(self.gjetset[self.a-self.gBegin])
self.a+=1
else:
if(self.b-self.qBegin>=len(self.qjetset)):
self.b=self.qBegin
self.endfile=1
break
labels.append([1,0])
jetset.append(self.qjetset[self.b-self.qBegin])
self.b+=1
data=[]
data.append(np.array(jetset))
label=np.array(labels)
#if(self.totalnum()<=self.count):
# if(self.istrain==1):print "\nreset\n"
# self.reset()
if(self.endfile==1):
#print "\nendd\n"
self.reset()
#print "\n",self.count,self.istrain,"\n"
yield data, label
#else:
#if(self.istrain==1):
# print "\n",datetime.datetime.now()
#raise StopIteration
| 39.739583 | 841 | 0.654522 |
c45d9da847d632f929a40311d340ee5e03a9dfff | 287 | py | Python | addons/iap_crm/models/crm_lead.py | SHIVJITH/Odoo_Machine_Test | 310497a9872db7844b521e6dab5f7a9f61d365a4 | [
"Apache-2.0"
] | null | null | null | addons/iap_crm/models/crm_lead.py | SHIVJITH/Odoo_Machine_Test | 310497a9872db7844b521e6dab5f7a9f61d365a4 | [
"Apache-2.0"
] | null | null | null | addons/iap_crm/models/crm_lead.py | SHIVJITH/Odoo_Machine_Test | 310497a9872db7844b521e6dab5f7a9f61d365a4 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import fields, models
| 26.090909 | 99 | 0.703833 |
c45f0b40e801dd329eac9e771b4dd170e217817c | 6,600 | py | Python | vitrage/tests/unit/datasources/kubernetes/test_kubernetes_transformer.py | openstack/vitrage | 95b33dbf39b040e23915882a2879c87aec239ca9 | [
"Apache-2.0"
] | 89 | 2015-09-30T21:42:17.000Z | 2022-03-28T16:31:19.000Z | vitrage/tests/unit/datasources/kubernetes/test_kubernetes_transformer.py | openstack/vitrage | 95b33dbf39b040e23915882a2879c87aec239ca9 | [
"Apache-2.0"
] | 4 | 2015-12-13T13:06:53.000Z | 2016-01-03T19:51:28.000Z | vitrage/tests/unit/datasources/kubernetes/test_kubernetes_transformer.py | openstack/vitrage | 95b33dbf39b040e23915882a2879c87aec239ca9 | [
"Apache-2.0"
] | 43 | 2015-11-04T15:54:27.000Z | 2021-12-10T14:24:03.000Z | # Copyright 2018 - Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from testtools import matchers
from vitrage.common.constants import DatasourceAction
from vitrage.common.constants import DatasourceOpts as DSOpts
from vitrage.common.constants import DatasourceProperties as DSProps
from vitrage.common.constants import EntityCategory
from vitrage.common.constants import GraphAction
from vitrage.common.constants import UpdateMethod
from vitrage.common.constants import VertexProperties as VProps
from vitrage.datasources.kubernetes.properties import KUBERNETES_DATASOURCE
from vitrage.datasources.kubernetes.properties import KubernetesProperties \
as kubProp
from vitrage.datasources.kubernetes.transformer import KubernetesTransformer
from vitrage.datasources.nova.instance import NOVA_INSTANCE_DATASOURCE
from vitrage.datasources.nova.instance.transformer import InstanceTransformer
from vitrage.datasources import transformer_base as tbase
from vitrage.datasources.transformer_base import TransformerBase
from vitrage.tests import base
from vitrage.tests.mocks import mock_driver as mock_sync
from vitrage.tests.mocks import utils
LOG = logging.getLogger(__name__)
cluster_name = 'kubernetes'
| 39.759036 | 79 | 0.700455 |
c45fabb5527e1d2513cfd056db4a65258232ae26 | 1,058 | py | Python | two_children.py | daniel2019-max/HackerRank-preparation-month | 400f8c0cfaa9fc8e13a683c15ecb5d2341d9c209 | [
"MIT"
] | null | null | null | two_children.py | daniel2019-max/HackerRank-preparation-month | 400f8c0cfaa9fc8e13a683c15ecb5d2341d9c209 | [
"MIT"
] | null | null | null | two_children.py | daniel2019-max/HackerRank-preparation-month | 400f8c0cfaa9fc8e13a683c15ecb5d2341d9c209 | [
"MIT"
] | null | null | null | # Two children, Lily and Ron, want to share a chocolate bar. Each of the squares has an integer on it.
# Lily decides to share a contiguous segment of the bar selected such that:
# The length of the segment matches Ron's birth month, and,
# The sum of the integers on the squares is equal to his birth day.
# Determine how many ways she can divide the chocolate.
# int s[n]: the numbers on each of the squares of chocolate
# int d: Ron's birth day
# int m: Ron's birth month
# Two children
s = '2 5 1 3 4 4 3 5 1 1 2 1 4 1 3 3 4 2 1'
caracteres = '18 7'
array = list(map(int, s.split()))
caracteresList = list(map(int, caracteres.split()))
print(birthday(array, caracteresList[0], caracteresList[1]))
| 31.117647 | 102 | 0.670132 |
c46046acfa73778c21a31da519b8cdbcc2cefaef | 3,517 | py | Python | sdk/python/pulumi_sonarqube/get_users.py | jshield/pulumi-sonarqube | 53664a97903af3ecdf4f613117d83d0acae8e53e | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_sonarqube/get_users.py | jshield/pulumi-sonarqube | 53664a97903af3ecdf4f613117d83d0acae8e53e | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_sonarqube/get_users.py | jshield/pulumi-sonarqube | 53664a97903af3ecdf4f613117d83d0acae8e53e | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = [
'GetUsersResult',
'AwaitableGetUsersResult',
'get_users',
'get_users_output',
]
def get_users(login_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetUsersResult:
"""
Use this data source to access information about an existing resource.
"""
__args__ = dict()
__args__['loginName'] = login_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('sonarqube:index/getUsers:getUsers', __args__, opts=opts, typ=GetUsersResult).value
return AwaitableGetUsersResult(
email=__ret__.email,
id=__ret__.id,
is_local=__ret__.is_local,
login_name=__ret__.login_name,
name=__ret__.name)
| 31.972727 | 119 | 0.644868 |
c460565d7c12782c020979637ad1d6a595e59cab | 504 | py | Python | rl/valuefunction/FeatureExtractor.py | nickswalker/counterpoint-reinforcement-learning | 1d0481bd2c9976533175339e411a41f4eb1650aa | [
"MIT"
] | 1 | 2016-12-09T18:29:30.000Z | 2016-12-09T18:29:30.000Z | rl/valuefunction/FeatureExtractor.py | nickswalker/counterpoint-reinforcement-learning | 1d0481bd2c9976533175339e411a41f4eb1650aa | [
"MIT"
] | null | null | null | rl/valuefunction/FeatureExtractor.py | nickswalker/counterpoint-reinforcement-learning | 1d0481bd2c9976533175339e411a41f4eb1650aa | [
"MIT"
] | null | null | null | from abc import abstractmethod
from typing import List
from rl.action import Action
from rl.state import State
| 19.384615 | 67 | 0.684524 |
c4606fc8327fb668e73c4a4c899e2f956318eefb | 563 | py | Python | yxf_utils/jsonx.py | yanyaming/yxf_utils | c1e1f7012f089374088210e55a953548b3c4da8b | [
"MIT"
] | null | null | null | yxf_utils/jsonx.py | yanyaming/yxf_utils | c1e1f7012f089374088210e55a953548b3c4da8b | [
"MIT"
] | null | null | null | yxf_utils/jsonx.py | yanyaming/yxf_utils | c1e1f7012f089374088210e55a953548b3c4da8b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
json
"""
import json
# jsonpythonor
# json-pythonor
# pythonjsonjson
| 20.851852 | 58 | 0.735346 |
c4611f97e3d7c75a5d43b772cd3ffe6b29e5f96b | 1,044 | py | Python | ggshield/scan/scannable_errors.py | rgajason/gg-shield | 45c3534bdd174880710b97aedac068f6ddd52eaf | [
"MIT"
] | null | null | null | ggshield/scan/scannable_errors.py | rgajason/gg-shield | 45c3534bdd174880710b97aedac068f6ddd52eaf | [
"MIT"
] | 1 | 2021-06-02T04:28:09.000Z | 2021-06-02T04:28:09.000Z | ggshield/scan/scannable_errors.py | rgajason/gg-shield | 45c3534bdd174880710b97aedac068f6ddd52eaf | [
"MIT"
] | null | null | null | from ast import literal_eval
from typing import Dict, List
import click
from pygitguardian.models import Detail
from ggshield.text_utils import STYLE, display_error, format_text, pluralize
| 32.625 | 80 | 0.598659 |
c461b7cff1ea76d96382e29fc4f6db6ef1e4b933 | 18,049 | py | Python | Packs/Base/Scripts/DBotPreprocessTextData/DBotPreprocessTextData.py | matan-xmcyber/content | 7f02301c140b35956af3cd20cb8dfc64f34afb3e | [
"MIT"
] | 1 | 2021-08-07T00:21:58.000Z | 2021-08-07T00:21:58.000Z | Packs/Base/Scripts/DBotPreprocessTextData/DBotPreprocessTextData.py | matan-xmcyber/content | 7f02301c140b35956af3cd20cb8dfc64f34afb3e | [
"MIT"
] | 48 | 2022-03-08T13:45:00.000Z | 2022-03-31T14:32:05.000Z | Packs/Base/Scripts/DBotPreprocessTextData/DBotPreprocessTextData.py | matan-xmcyber/content | 7f02301c140b35956af3cd20cb8dfc64f34afb3e | [
"MIT"
] | 2 | 2020-12-10T12:02:45.000Z | 2020-12-15T09:20:01.000Z | # pylint: disable=no-member
from CommonServerUserPython import *
from CommonServerPython import *
from sklearn.feature_extraction.text import TfidfVectorizer
import pickle
import uuid
import spacy
import string
from html.parser import HTMLParser
from html import unescape
from re import compile as _Re
import pandas as pd
# define global parsers
DBOT_TEXT_FIELD = 'dbot_text'
DBOT_PROCESSED_TEXT_FIELD = 'dbot_processed_text'
CONTEXT_KEY = 'DBotPreProcessTextData'
HTML_PATTERNS = [
re.compile(r"(?is)<(script|style).*?>.*?(</\1>)"),
re.compile(r"(?s)<!--(.*?)-->[\n]?"),
re.compile(r"(?s)<.*?>"),
re.compile(r" "),
re.compile(r" +")
]
html_parser = HTMLParser()
tokenizer = None
PRE_PROCESS_TYPES = {
'none': pre_process_none,
'nlp': pre_process_tokenizer,
}
if __name__ in ['builtins', '__main__']:
entry = main()
demisto.results(entry)
| 40.559551 | 120 | 0.633165 |
c464ae6c792d78df3c469e563d6a59248c7a5e64 | 2,799 | py | Python | punc_recover/tester/punc_tester.py | Z-yq/audioSamples.github.io | 53c474288f0db1a3acfe40ba57a4cd5f2aecbcd3 | [
"Apache-2.0"
] | 1 | 2022-03-03T02:51:55.000Z | 2022-03-03T02:51:55.000Z | punc_recover/tester/punc_tester.py | RapidAI/TensorflowASR | 084519b5a0464f465e1d72c24cba07c1ec55cd26 | [
"Apache-2.0"
] | null | null | null | punc_recover/tester/punc_tester.py | RapidAI/TensorflowASR | 084519b5a0464f465e1d72c24cba07c1ec55cd26 | [
"Apache-2.0"
] | null | null | null | import logging
import os
import tensorflow as tf
from punc_recover.models.punc_transformer import PuncTransformer
from punc_recover.tester.base_tester import BaseTester
from utils.text_featurizers import TextFeaturizer
| 36.350649 | 94 | 0.599857 |
c464fcd2cef62b8d92f36f587767dd193dd32e2a | 13,293 | py | Python | moztrap/model/core/migrations/0003_auto__add_field_productversion_cc_version__add_field_product.py | mbeko/moztrap | db75e1f8756ef2c0c39652a66302b19c8afa0256 | [
"BSD-2-Clause"
] | null | null | null | moztrap/model/core/migrations/0003_auto__add_field_productversion_cc_version__add_field_product.py | mbeko/moztrap | db75e1f8756ef2c0c39652a66302b19c8afa0256 | [
"BSD-2-Clause"
] | null | null | null | moztrap/model/core/migrations/0003_auto__add_field_productversion_cc_version__add_field_product.py | mbeko/moztrap | db75e1f8756ef2c0c39652a66302b19c8afa0256 | [
"BSD-2-Clause"
] | null | null | null | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
| 85.76129 | 187 | 0.568871 |