hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
43c2d697bacb0820c4e842d6861cb1732909d8a0 | 11,386 | py | Python | main_fed.py | gao969/scaffold-dgc-clustering | 9f259dfdf0897dcb1dece2e1197268f585f54a69 | [
"MIT"
] | null | null | null | main_fed.py | gao969/scaffold-dgc-clustering | 9f259dfdf0897dcb1dece2e1197268f585f54a69 | [
"MIT"
] | null | null | null | main_fed.py | gao969/scaffold-dgc-clustering | 9f259dfdf0897dcb1dece2e1197268f585f54a69 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Python version: 3.6
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import copy
import numpy as np
from torchvision import datasets, transforms
import torch
import os
import torch.distributed as dist
from utils.sampling import mnist_iid, mnist_noniid, cifar_iid
from utils.options import args_parser
from models.Update import LocalUpdate
from models.Update import LocalUpdateF
from models.Nets import MLP, CNNMnist, CNNCifar
from models.Fed import FedAvg
from models.test import test_img
from torch.multiprocessing import Process
from deep_gradient_compression import DGC
import json
# __name__main_fed.py__main__
# .pyimportmain_fed.pymain_fed.py__name__,main_fed.py__name__main_fed.py
if __name__ == '__main__':
# parse args
args = args_parser()
args.device = torch.device('cuda:{}'.format(args.gpu))
torch.manual_seed(0)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
rank = 0
device_id = rank
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '29500'
dist.init_process_group(backend='gloo', rank=rank, world_size=args.world_size)
# if torch.cuda.is_available() and args.gpu != -1 else 'cpu'
# load dataset and split users
if args.dataset == 'mnist':
# ToTensor():0,1Normalizedate-0.1307/0.3081,-1 1
trans_mnist = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
if trans_mnist is not None:
print(1)
print(trans_mnist)
# 6000010000
dataset_train = datasets.MNIST('../data/mnist/', train=True, download=True, transform=trans_mnist)
dataset_test = datasets.MNIST('../data/mnist/', train=False, download=True, transform=trans_mnist)
# sample users
# Noniid
if args.iid:
dict_users = mnist_iid(dataset_train, args.num_users)
else:
dict_users = mnist_noniid(dataset_train, args.num_users)
elif args.dataset == 'cifar':
trans_cifar = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
dataset_train = datasets.CIFAR10('../data/cifar', train=True, download=True, transform=trans_cifar)
dataset_test = datasets.CIFAR10('../data/cifar', train=False, download=True, transform=trans_cifar)
if args.iid:
dict_users = cifar_iid(dataset_train, args.num_users)
else:
exit('Error: only consider IID setting in CIFAR10')
else:
exit('Error: unrecognized dataset')
img_size = dataset_train[0][0].shape
# print('df ',img_size) [1,28,28]
# build model
# print(args.model)
if args.model == 'cnn' and args.dataset == 'cifar':
net_glob = CNNCifar(args=args).to(args.device)
elif args.model == 'cnn' and args.dataset == 'mnist':
net_glob = CNNMnist(args=args).to(args.device)
elif args.model == 'mlp':
len_in = 1
for x in img_size:
# print('x',x)
len_in *= x
net_glob = MLP(dim_in=len_in, dim_hidden=200, dim_out=args.num_classes).to(args.device)
# add
control_global = MLP(dim_in=len_in, dim_hidden=200, dim_out=args.num_classes).to(args.device)
else:
exit('Error: unrecognized model')
#
net_glob.train()
print(net_glob)
control_weights =control_global.state_dict()
# copy weights
#
w_glob = net_glob.state_dict()
c_glob = copy.deepcopy(net_glob.state_dict())
# print(w_glob)
# training
loss_train = []
accuracy = []
cv_loss, cv_acc = [], []
val_loss_pre, counter = 0, 0
net_best = None
best_loss = None
val_acc_list, net_list = [], []
count = 0, 0
test_acc_list = []
if args.all_clients:
print("Aggregation over all clients")
w_locals = [w_glob for i in range(args.num_users)]
# add
else:
#
c_local = [MLP(dim_in=len_in, dim_hidden=200, dim_out=args.num_classes).to(args.device) for i in
range(args.num_users)]
for net in c_local:
net.load_state_dict(control_weights)
delta_c = copy.deepcopy(net_glob.state_dict())
# delta_x = copy.deepcopy(net_glob.state_dict())
# with open("test.txt", "w") as f:
# for i in range(0, len(c_local)):
# for k,v in c_local[i].state_dict().items():
# f.write(f"{k},{v}\n".format(k,v))
# with open("test.txt", "a") as f:
# for i in range(0, len(c_local)):
# for k, v in w_locals[i].items():
# f.write(f"{k},{v}\n".format(k, v))
# add
# print("why?")
for iter in range(args.epochs):
#
for i in delta_c:
delta_c[i] = 0.0
# for i in delta_x:
# delta_x[i] = 0.0
loss_locals = []
if not args.all_clients:
w_locals = []
m = max(int(args.frac * args.num_users), 1)
#
idxs_users = np.random.choice(range(args.num_users), m, replace=False)
for idx in idxs_users:
# momentumSGD
local = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx])
w, loss, local_delta_c, local_delta, control_local_w= local.train(net=copy.deepcopy(net_glob).to(args.device), control_local
= c_local[idx], control_global=control_global, rank=rank, device_id=device_id, size=args.world_size)
# add
if iter != 0:
c_local[idx].load_state_dict(control_local_w)
if args.all_clients:
w_locals[idx] = copy.deepcopy(w)
else:
w_locals.append(copy.deepcopy(w))
# add
loss_locals.append(copy.deepcopy(loss))
# add
for i in delta_c:
if iter != 0:
delta_c[i] += w[i]
else:
delta_c[i] += local_delta_c[i]
# delta_x[i] += local_delta[i]
# add
# update the delta C
for i in delta_c:
delta_c[i] /= m
# delta_x[i] /= m
# update global weights
w_glob = FedAvg(w_locals)
# add cw
# w_glob = net_glob.state_dict()
control_global_w = control_global.state_dict()
for i in control_global_w:
if iter !=0:
# w_glob[i] = delta_x[i]
# else:
# w_glob[i] += delta_x[i]
control_global_w[i] += (m / args.num_users) * delta_c[i]
# copy weight to net_glob
net_glob.load_state_dict(w_glob)
# add
control_global.load_state_dict(control_global_w)
# print loss
loss_avg = sum(loss_locals) / len(loss_locals)
print('Round {:3d}, Average loss {:.3f}'.format(iter, loss_avg))
loss_train.append(loss_avg)
# acc_train, loss_train = test_img(net_glob, dataset_train, args)
acc_test, loss_test = test_img(net_glob, dataset_test, args)
accuracy.append(acc_test)
# add
for c in range(args.num_users):
local_model = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx])
torch.cuda.empty_cache()
# net_glob.eval()
# print("Training accuracy: {:.2f}".format(acc_train))
# print("Testing accuracy: {:.2f}".format(acc_test))
#######################################################################################################################
#######################################################################################################################
#######################################################################################################################
#######################################################################################################################
# Fedavg
# build model
if args.model == 'cnn' and args.dataset == 'cifar':
net_globF = CNNCifar(args=args).to(args.device)
elif args.model == 'cnn' and args.dataset == 'mnist':
net_globF = CNNMnist(args=args).to(args.device)
elif args.model == 'mlp':
len_in = 1
for x in img_size:
len_in *= x
net_globF = MLP(dim_in=len_in, dim_hidden=200, dim_out=args.num_classes).to(args.device)
else:
exit('Error: unrecognized model')
print(net_globF)
net_globF.train()
# copy weights
w_globF = net_globF.state_dict()
# training
loss_trainF = []
accuracyF = []
cv_loss, cv_acc = [], []
val_loss_pre, counter = 0, 0
net_best = None
best_loss = None
val_acc_list, net_list = [], []
if args.all_clients:
print("Aggregation over all clients")
w_localsF = [w_globF for i in range(args.num_users)]
for iter in range(args.epochs):
loss_locals = []
if not args.all_clients:
w_localsF = []
m = max(int(args.frac * args.num_users), 1)
idxs_users = np.random.choice(range(args.num_users), m, replace=False)
for idx in idxs_users:
localF = LocalUpdateF(args=args, dataset=dataset_train, idxs=dict_users[idx])
w, loss = localF.train(net=copy.deepcopy(net_globF).to(args.device))
if args.all_clients:
w_localsF[idx] = copy.deepcopy(w)
else:
w_localsF.append(copy.deepcopy(w))
loss_locals.append(copy.deepcopy(loss))
# update global weights
w_globF = FedAvg(w_localsF)
# copy weight to net_globF
net_globF.load_state_dict(w_globF)
# print loss
loss_avgF = sum(loss_locals) / len(loss_locals)
print('Round {:3d}, Average loss {:.3f}'.format(iter, loss_avgF))
loss_trainF.append(loss_avgF)
acc_test, loss_test = test_img(net_globF, dataset_test, args)
accuracyF.append(acc_test)
# plot loss curve
plt.figure()
print(loss_train, loss_trainF)
plt.plot(range(len(loss_train)), loss_train, label='Scaffold', zorder=2)
plt.plot(range(len(loss_trainF)), loss_trainF, 'r', label='FedAvg',zorder=1)
plt.ylabel('train_loss')
plt.xlabel('epochs')
plt.legend(loc='best')
plt.savefig('./save/fed_{}_{}_{}_{}_iid{}.png'.format(args.dataset, args.model, args.epochs, 'train_loss', args.iid))
# testing
net_glob.eval()
acc_train, loss_train = test_img(net_glob, dataset_train, args)
acc_test, loss_test = test_img(net_glob, dataset_test, args)
print("Training accuracy: {:.2f}".format(acc_train))
print("Testing accuracy: {:.2f}".format(acc_test))
# plot loss curve
plt.figure()
# plt.plot((np.arange(1, len(accuracy)), 1), accuracy, 'r')
plt.plot(range(len(accuracy)), accuracy, label='Scaffold', zorder=2)
plt.plot(range(len(accuracyF)), accuracyF, 'r', label='FedAvg', zorder=1)
plt.ylabel('test_acc')
plt.xlabel('epochs')
plt.legend(loc='best')
plt.savefig('./save/fed_{}_{}_{}_{}_iid{}.png'.format(args.dataset, args.model, args.epochs, 'acc_test', args.iid))
| 35.033846 | 136 | 0.584578 |
43c3bca28b83f4b20caa188f5ac7f59f03173404 | 2,085 | py | Python | b_lambda_layer_common_test/integration/infrastructure/function_with_unit_tests.py | gkazla/B.LambdaLayerCommon | 1a4f9cd3d8b7e447c8467bd7dde50cb9e9a6e980 | [
"Apache-2.0"
] | null | null | null | b_lambda_layer_common_test/integration/infrastructure/function_with_unit_tests.py | gkazla/B.LambdaLayerCommon | 1a4f9cd3d8b7e447c8467bd7dde50cb9e9a6e980 | [
"Apache-2.0"
] | null | null | null | b_lambda_layer_common_test/integration/infrastructure/function_with_unit_tests.py | gkazla/B.LambdaLayerCommon | 1a4f9cd3d8b7e447c8467bd7dde50cb9e9a6e980 | [
"Apache-2.0"
] | null | null | null | from aws_cdk.aws_lambda import Function, Code, Runtime
from aws_cdk.core import Stack, Duration
from b_aws_testing_framework.tools.cdk_testing.testing_stack import TestingStack
from b_cfn_lambda_layer.package_version import PackageVersion
from b_lambda_layer_common.layer import Layer
from b_lambda_layer_common_test.unit import root
| 47.386364 | 129 | 0.601918 |
43c4a0c547cce9ae68639184c6cd8640efc21e50 | 857 | py | Python | tests/metarl/tf/baselines/test_baselines.py | neurips2020submission11699/metarl | ae4825d21478fa1fd0aa6b116941ea40caa152a5 | [
"MIT"
] | 2 | 2021-02-07T12:14:52.000Z | 2021-07-29T08:07:22.000Z | tests/metarl/tf/baselines/test_baselines.py | neurips2020submission11699/metarl | ae4825d21478fa1fd0aa6b116941ea40caa152a5 | [
"MIT"
] | null | null | null | tests/metarl/tf/baselines/test_baselines.py | neurips2020submission11699/metarl | ae4825d21478fa1fd0aa6b116941ea40caa152a5 | [
"MIT"
] | null | null | null | """
This script creates a test that fails when
metarl.tf.baselines failed to initialize.
"""
import tensorflow as tf
from metarl.envs import MetaRLEnv
from metarl.tf.baselines import ContinuousMLPBaseline
from metarl.tf.baselines import GaussianMLPBaseline
from tests.fixtures import TfGraphTestCase
from tests.fixtures.envs.dummy import DummyBoxEnv
| 31.740741 | 76 | 0.772462 |
43c4fed77cd489496d3337fe3e83cfcc13582afb | 2,390 | py | Python | api/files/api/app/monthly_report.py | trackit/trackit-legacy | 76cfab7941eddb9d390dd6c7b9a408a9ad4fc8da | [
"Apache-2.0"
] | 2 | 2018-02-01T09:18:05.000Z | 2020-03-12T18:11:11.000Z | api/files/api/app/monthly_report.py | trackit/trackit-legacy | 76cfab7941eddb9d390dd6c7b9a408a9ad4fc8da | [
"Apache-2.0"
] | null | null | null | api/files/api/app/monthly_report.py | trackit/trackit-legacy | 76cfab7941eddb9d390dd6c7b9a408a9ad4fc8da | [
"Apache-2.0"
] | 5 | 2018-05-11T10:32:52.000Z | 2021-05-26T12:09:47.000Z | import jinja2
import json
from send_email import send_email
from app.models import User, MyResourcesAWS, db
from app.es.awsdetailedlineitem import AWSDetailedLineitem
from sqlalchemy import desc
import subprocess
import datetime
from flask import render_template
| 47.8 | 205 | 0.65523 |
43c657c522f9cb22a9a0ca2bb0912e5da035332c | 7,309 | py | Python | slow_tests/boot_test.py | rdturnermtl/mlpaper | 5da5cb7b3a56d3cfdc7162d01fac2679c9050e76 | [
"Apache-2.0"
] | 9 | 2020-07-23T02:12:48.000Z | 2021-06-24T08:19:08.000Z | slow_tests/boot_test.py | rdturnermtl/benchmark_tools | 5da5cb7b3a56d3cfdc7162d01fac2679c9050e76 | [
"Apache-2.0"
] | 14 | 2017-11-29T04:17:04.000Z | 2018-03-07T00:35:00.000Z | slow_tests/boot_test.py | rdturnermtl/mlpaper | 5da5cb7b3a56d3cfdc7162d01fac2679c9050e76 | [
"Apache-2.0"
] | 1 | 2017-12-29T01:46:31.000Z | 2017-12-29T01:46:31.000Z | # Ryan Turner (turnerry@iro.umontreal.ca)
from __future__ import division, print_function
from builtins import range
import numpy as np
import scipy.stats as ss
import mlpaper.constants as cc
import mlpaper.mlpaper as bt
import mlpaper.perf_curves as pc
from mlpaper.classification import DEFAULT_NGRID, curve_boot
from mlpaper.test_constants import FPR
from mlpaper.util import area, interp1d
_FPR = FPR / 3.0 # Divide by number of test funcs
def test_boot_EB_and_test(runs=100):
"""Arguably this should do out to its own file since it tests bt core."""
mu = np.random.randn()
stdev = np.abs(np.random.randn())
N = 201
confidence = 0.95
fail = [0] * 3
for ii in range(runs):
x = mu + stdev * np.random.randn(N)
fail_CI, fail_CI2, fail_P = run_trial(x, mu)
fail[0] += fail_CI
fail[1] += fail_CI2
fail[2] += fail_P
expect_p_fail = 1.0 - confidence
print("boot mean and test")
fail_check_stat(fail, runs, expect_p_fail, _FPR)
if __name__ == "__main__":
np.random.seed(56467)
test_boot()
test_boot_mean()
test_boot_EB_and_test()
print("passed")
| 35.480583 | 118 | 0.623341 |
43c90a0a29279010bde058050d6af3ae4d07f61d | 3,047 | py | Python | core/test/test_timeseries_study.py | ajmal017/amp | 8de7e3b88be87605ec3bad03c139ac64eb460e5c | [
"BSD-3-Clause"
] | null | null | null | core/test/test_timeseries_study.py | ajmal017/amp | 8de7e3b88be87605ec3bad03c139ac64eb460e5c | [
"BSD-3-Clause"
] | null | null | null | core/test/test_timeseries_study.py | ajmal017/amp | 8de7e3b88be87605ec3bad03c139ac64eb460e5c | [
"BSD-3-Clause"
] | null | null | null | from typing import Any, Dict
import numpy as np
import pandas as pd
import core.artificial_signal_generators as sig_gen
import core.statistics as stats
import core.timeseries_study as tss
import helpers.unit_test as hut
| 33.855556 | 72 | 0.628159 |
43cb99a95c79677af08d364cd292e9e06fb31368 | 718 | py | Python | util.py | takat0m0/infoGAN | bc3ba0d4e407851e97f49322add98ea2e7e429de | [
"MIT"
] | null | null | null | util.py | takat0m0/infoGAN | bc3ba0d4e407851e97f49322add98ea2e7e429de | [
"MIT"
] | null | null | null | util.py | takat0m0/infoGAN | bc3ba0d4e407851e97f49322add98ea2e7e429de | [
"MIT"
] | null | null | null | #! -*- coding:utf-8 -*-
import os
import sys
import cv2
import numpy as np
| 23.933333 | 64 | 0.639276 |
43cc7a30161b57bb1e1d6f7efc6e267ff0a84af5 | 471 | py | Python | myhoodApp/migrations/0002_healthfacilities_hospital_image.py | MutuaFranklin/MyHood | 6ddd21c4a67936c8926d6f5a8665a06edf81f39e | [
"MIT"
] | null | null | null | myhoodApp/migrations/0002_healthfacilities_hospital_image.py | MutuaFranklin/MyHood | 6ddd21c4a67936c8926d6f5a8665a06edf81f39e | [
"MIT"
] | null | null | null | myhoodApp/migrations/0002_healthfacilities_hospital_image.py | MutuaFranklin/MyHood | 6ddd21c4a67936c8926d6f5a8665a06edf81f39e | [
"MIT"
] | null | null | null | # Generated by Django 3.2.7 on 2021-09-23 20:01
import cloudinary.models
from django.db import migrations
| 23.55 | 111 | 0.647558 |
43cc95eb28ba86bd35c1811cb4456f10d8f69c56 | 380 | py | Python | forecasting_algorithms/Multiple_Timeseries/VAR/var.py | ans682/SafePredict_and_Forecasting | 30ac5a0b665fce090567476bc07b54489b2f3d0f | [
"BSD-3-Clause"
] | 1 | 2021-08-05T23:01:47.000Z | 2021-08-05T23:01:47.000Z | forecasting_algorithms/Multiple_Timeseries/VAR/var.py | ans682/SafePredict_and_Forecasting | 30ac5a0b665fce090567476bc07b54489b2f3d0f | [
"BSD-3-Clause"
] | 1 | 2021-12-22T08:26:13.000Z | 2021-12-22T08:26:13.000Z | forecasting_algorithms/Multiple_Timeseries/VAR/var.py | ans682/SafePredict_and_Forecasting | 30ac5a0b665fce090567476bc07b54489b2f3d0f | [
"BSD-3-Clause"
] | null | null | null | # VAR example
from statsmodels.tsa.vector_ar.var_model import VAR
from random import random
# contrived dataset with dependency
data = list()
for i in range(100):
v1 = i + random()
v2 = v1 + random()
row = [v1, v2]
data.append(row)
# fit model
model = VAR(data)
model_fit = model.fit()
# make prediction
yhat = model_fit.forecast(model_fit.y, steps=1)
print(yhat)
| 22.352941 | 51 | 0.697368 |
43ccba90b50389b99008103e1fcff4ea674ca290 | 2,140 | py | Python | candidate-scrape.py | jonykarki/hamroscraper | a7e34a9cdca89be10422d045f1ed34e9956bd75f | [
"MIT"
] | 2 | 2019-09-23T23:41:44.000Z | 2019-10-06T03:13:17.000Z | candidate-scrape.py | jonykarki/hamroscraper | a7e34a9cdca89be10422d045f1ed34e9956bd75f | [
"MIT"
] | null | null | null | candidate-scrape.py | jonykarki/hamroscraper | a7e34a9cdca89be10422d045f1ed34e9956bd75f | [
"MIT"
] | 4 | 2019-11-26T18:29:20.000Z | 2021-01-22T06:30:20.000Z | import json
import urllib.request
import MySQLdb
db = MySQLdb.connect(host="localhost", # your host, usually localhost
user="root", # your username
passwd="", # your password
db="election")
cur = db.cursor()
# user_agent for sending headers with the request
user_agent = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7'
# header
headers={'User-Agent':user_agent,}
district = input("Enter the Name of the district: ")
url = "http://election.ujyaaloonline.com/api/candidates?district=" + district
request = urllib.request.Request(url, None, headers)
response = urllib.request.urlopen(request)
source = response.read()
# print(source)
data = json.loads(source)
#print(data['candidates']['2']['400'][0]['cName'])
election_area = data['election_areas']
# get all the possible election-areas from the district
# data needed for the database
'''
resultno :> autoincrement
constituencyname :>
stateno :> Remove the column?
districtno :>
candidate :>
gender :> Remove the column???
votes :> set to zero for now
'''
i = 0
j = 0
for key, value in election_area.items():
area_key = key
district_name = data['district_slug']
try:
for item in data["candidates"]['1'][area_key]:
print(item['aName'])
print(item["cName"])
i = i + 1
except:
for item in data["candidates"]['2'][area_key]:
constituencyname = item['aName'].encode('utf-8')
candidatename = item["cName"].encode('utf-8')
sql = "INSERT INTO `test` (`id`, `candidatename`, `constituencyname`) VALUES (NULL, %s, %s)"
cur.execute(sql, (candidatename, constituencyname))
db.commit()
print('INSERTED ' + item["cName"] + " into the database")
j = j + 1
print(data['district_slug'] + " has " + str(i) + " candidates in provincial election")
print(data['district_slug'] + " has " + str(j) + " candidates in federal election")
print("Total: " + str(i + j) + " candidates added to the database")
| 27.792208 | 105 | 0.619159 |
43cde366d5fb7850e5493e9384c566462676fb5d | 3,101 | py | Python | sangita/hindi/lemmatizer.py | ashiscs/sangita | b90c49859339147137db1c2bdb60a1039a00c706 | [
"Apache-2.0"
] | 36 | 2017-05-30T04:41:06.000Z | 2019-02-17T08:41:10.000Z | sangita/hindi/lemmatizer.py | 07kshitij/sangita | b90c49859339147137db1c2bdb60a1039a00c706 | [
"Apache-2.0"
] | 13 | 2018-06-25T11:14:48.000Z | 2021-05-15T17:57:47.000Z | sangita/hindi/lemmatizer.py | 07kshitij/sangita | b90c49859339147137db1c2bdb60a1039a00c706 | [
"Apache-2.0"
] | 33 | 2018-06-23T21:46:39.000Z | 2022-03-01T15:55:37.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 9 23:28:21 2017
@author: samriddhi
"""
import re
import sangita.hindi.tokenizer as tok
import sangita.hindi.corpora.lemmata as lt
if __name__ == '__main__':
input_str = ' - - - . '
print(lookupLemmatizer(input_str))
print(numericLemmatizer(input_str))
print(defaultLemmatizer(input_str))
print(Lemmatizer(input_str))
| 27.936937 | 209 | 0.507256 |
43cee9ce3aeb6af7cef400c841ab802c88461d4b | 8,148 | py | Python | gslib/tests/test_stet_util.py | ttobisawa/gsutil | ef665b590aa8e6cecfe251295bce8bf99ea69467 | [
"Apache-2.0"
] | null | null | null | gslib/tests/test_stet_util.py | ttobisawa/gsutil | ef665b590aa8e6cecfe251295bce8bf99ea69467 | [
"Apache-2.0"
] | null | null | null | gslib/tests/test_stet_util.py | ttobisawa/gsutil | ef665b590aa8e6cecfe251295bce8bf99ea69467 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2021 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for stet_util.py."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import shutil
from gslib import storage_url
from gslib.tests import testcase
from gslib.tests import util
from gslib.tests.util import unittest
from gslib.utils import execution_util
from gslib.utils import stet_util
import mock
| 40.74 | 80 | 0.689494 |
43cfdd42faa2065cb7d2cefc439413b4ed53c719 | 4,471 | py | Python | markdown_editing/tests/test_extension.py | makyo/markdown-editing | ecbc8970f4d416038f9d2c46fae22d4dbb79c647 | [
"MIT"
] | null | null | null | markdown_editing/tests/test_extension.py | makyo/markdown-editing | ecbc8970f4d416038f9d2c46fae22d4dbb79c647 | [
"MIT"
] | null | null | null | markdown_editing/tests/test_extension.py | makyo/markdown-editing | ecbc8970f4d416038f9d2c46fae22d4dbb79c647 | [
"MIT"
] | null | null | null | from markdown import markdown
from unittest import TestCase
from markdown_editing.extension import EditingExtension
| 39.566372 | 224 | 0.636547 |
43cffed323ab5de7f6be36b25de0a210ece3af09 | 15,477 | py | Python | apps/siren/test_handlers.py | thomasyi17/diana2 | 2167053dfe15b782d96cb1e695047433f302d4dd | [
"MIT"
] | 15 | 2019-02-12T23:26:09.000Z | 2021-12-21T08:53:58.000Z | apps/siren/test_handlers.py | thomasyi17/diana2 | 2167053dfe15b782d96cb1e695047433f302d4dd | [
"MIT"
] | 2 | 2019-01-23T21:13:12.000Z | 2019-06-28T15:45:51.000Z | apps/siren/test_handlers.py | thomasyi17/diana2 | 2167053dfe15b782d96cb1e695047433f302d4dd | [
"MIT"
] | 6 | 2019-01-23T20:22:50.000Z | 2022-02-03T03:27:04.000Z | """
SIREN/DIANA basic functionality testing framework
Requires env vars:
- GMAIL_USER
- GMAIL_APP_PASSWORD
- GMAIL_BASE_NAME -- ie, abc -> abc+hobitduke@gmail.com
These env vars are set to default:
- ORTHANC_PASSWORD
- SPLUNK_PASSWORD
- SPLUNK_HEC_TOKEN
TODO: Move stuff to archive after collected
TODO: Write data into daily folder or something from mi-share ingress
TODO: Suppress dicom-simplify missing (series) creation time
"""
import time
import logging
import shutil
import io
import tempfile
from pathlib import Path
from pprint import pformat
from contextlib import redirect_stdout
from multiprocessing import Process
from datetime import datetime, timedelta
from interruptingcow import timeout
from crud.manager import EndpointManager
from crud.abc import Watcher, Trigger
from crud.endpoints import Splunk
from wuphf.endpoints import SmtpMessenger
from diana.apis import Orthanc, ObservableOrthanc, DcmDir, ObservableDcmDir
from diana.dixel import Dixel, ShamDixel
from diana.utils.dicom import DicomLevel as DLv, DicomEventType as DEv
from wuphf.cli.string_descs import *
from diana.utils import unpack_data
from crud.utils import deserialize_dict
from diana.utils.gateways import suppress_urllib_debug
from diana.utils.endpoint.watcher import suppress_watcher_debug
from handlers import handle_upload_dir, handle_upload_zip, handle_notify_study, \
handle_file_arrived, start_watcher, tagged_studies
from trial_dispatcher import TrialDispatcher as Dispatcher
LOCAL_SERVICES = False # Set False to use UMich services
USE_GMAIL = True # Set False to use UMich smtp
DO_DIR_UPLOAD = False
CHECK_SPLUNK = False # Set False to skip long wait for dixel to index
CHECK_WATCH_STUDIES= False # Set False to skip long wait for orthanc watcher
EMAIL_DRYRUN = False # Set False to send live emails
# CONFIG
_services = "@services.yaml"
_subscriptions = "@subscriptions.yaml"
os.environ["SPLUNK_INDEX"] = "testing"
SMTP_MESSENGER_NAME = "smtp_server"
if LOCAL_SERVICES:
# Set everythin back to default
os.environ["UMICH_HOST"] = "localhost" # For testing
del os.environ["ORTHANC_USER"]
del os.environ["ORTHANC_PASSWORD"]
del os.environ["SPLUNK_USER"]
del os.environ["SPLUNK_PASSWORD"]
if USE_GMAIL:
SMTP_MESSENGER_NAME = "gmail:"
test_email_addr1 = "derek.merck@ufl.edu"
#test_email_addr1 = "ejacob@med.umich.edu"
#test_email_addr1 = os.environ.get("TEST_EMAIL_ADDR1")
# os.environ["TEST_GMAIL_BASE"] = test_email_addr1.split("@")[0]
anon_salt = "Test+Test+Test"
fkey = b'o-KzB3u1a_Vlb8Ji1CdyfTFpZ2FvdsPK4yQCRzFCcss='
msg_t = """to: {{ recipient.email }}\nfrom: {{ from_addr }}\nsubject: Test Message\n\nThis is the message text: "{{ item.msg_text }}"\n"""
notify_msg_t = "@./notify.txt.j2"
# TESTING CONfIG
test_sample_zip = os.path.abspath("../../tests/resources/dcm_zip/test.zip")
test_sample_file = os.path.abspath("../../tests/resources/dcm/IM2263")
test_sample_dir = os.path.expanduser("~/data/test") # Need to dl separately
# TESTS
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
suppress_urllib_debug()
suppress_watcher_debug()
# Create service endpoints
services = EndpointManager(serialized_ep_descs=_services)
print(pformat(services.ep_descs))
orth: ObservableOrthanc = services.get("hobit")
orth.polling_interval = 2.0
messenger: SmtpMessenger = services.get(SMTP_MESSENGER_NAME)
messenger.msg_t = msg_t
splunk: Splunk = services.get("splunk")
dcm_dir = DcmDir(path=test_sample_dir)
# Load a dixel
dixel = dcm_dir.get("HOBIT1172/IM0", file=True)
# assert( dixel )
# assert( dixel.file )
#
# # Verify that all endpoints are online
# assert( orth.check() )
# assert( messenger.check() )
# assert( splunk.check() )
#
# # Verify basic capabilities:
# # - upload
# # - anonymize
# # - index
# # - message
# # - distribute
#
# assert( test_upload_one(orth, dixel) )
# assert( test_anonymize_one(orth, dixel) )
# assert( test_index_one(splunk, dixel) )
assert( test_email_messenger(messenger) )
# assert( test_distribute(_subscriptions, messenger) )
exit()
# Verify observer daemons:
# - watch dir
# - watch orth
assert( test_watch_dir(test_sample_file) )
assert( test_watch_orthanc(dixel, orth) )
# Verify handlers:
# - directory
# - zip
# - file
# - notify
if DO_DIR_UPLOAD:
assert( test_upload_dir_handler(dcm_dir, orth) )
assert( test_upload_zip_handler(test_sample_zip, orth) )
assert( test_file_arrived_handler(test_sample_file, test_sample_zip, orth) )
assert( test_notify_handler(dixel, orth, _subscriptions, messenger, splunk) )
# Verify watcher pipeline
# - run watcher
assert( test_siren_receiver(test_sample_file, orth, _subscriptions, messenger, splunk) )
| 27.588235 | 151 | 0.648511 |
43d0fea901e478a41a7213fecbddf4d86fc4b79e | 6,735 | py | Python | deptree.py | jeking3/boost-deptree | 27eda54df2d022af17347df4ba4892c39392e474 | [
"BSL-1.0"
] | null | null | null | deptree.py | jeking3/boost-deptree | 27eda54df2d022af17347df4ba4892c39392e474 | [
"BSL-1.0"
] | null | null | null | deptree.py | jeking3/boost-deptree | 27eda54df2d022af17347df4ba4892c39392e474 | [
"BSL-1.0"
] | null | null | null | #
# Copyright (c) 2019 James E. King III
#
# Use, modification, and distribution are subject to the
# Boost Software License, Version 1.0. (See accompanying file
# LICENSE_1_0.txt or copy at https://www.boost.org/LICENSE_1_0.txt)
#
import json
import networkx
import re
from pathlib import Path
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Generate PlantUML dependency tree.')
parser.add_argument('root', type=str, help='Boost root directory.')
parser.add_argument('out', type=str, help='Output filename.')
require_one = parser.add_mutually_exclusive_group(required=True)
require_one.add_argument('--cycles', action='store_true', help='Show direct repository dependency cycles.')
require_one.add_argument('--from', help='Show dependencies from a given repository.')
args = parser.parse_args()
root = Path(args.root)
assert root.is_dir(), "root is not a directory"
out = Path(args.out)
tree = BoostDependencyTree(root, out)
tree.load()
if args.cycles:
tree.report_cycles()
else:
tree.report_dependencies_from(args.__dict__["from"])
| 40.572289 | 111 | 0.515367 |
43d13fbbdf77afe2138ccc76bfc3468760cf2d47 | 7,357 | py | Python | uberbackend.py | adiHusky/uber_backend | adc78882c081f7636b809d6e1889ba3297309e20 | [
"MIT"
] | null | null | null | uberbackend.py | adiHusky/uber_backend | adc78882c081f7636b809d6e1889ba3297309e20 | [
"MIT"
] | null | null | null | uberbackend.py | adiHusky/uber_backend | adc78882c081f7636b809d6e1889ba3297309e20 | [
"MIT"
] | null | null | null | from flask import Flask, flash, request, jsonify, render_template, redirect, url_for, g, session, send_from_directory, abort
from flask_cors import CORS
# from flask import status
from datetime import date, datetime, timedelta
from calendar import monthrange
from dateutil.parser import parse
import pytz
import os
import sys
import time
import uuid
import json
import random
import string
import pathlib
import io
from uuid import UUID
from bson.objectid import ObjectId
# straight mongo access
from pymongo import MongoClient
import sentry_sdk
from sentry_sdk.integrations.flask import FlaskIntegration
sentry_sdk.init(
dsn="https://acea88276810494e96828c4fd0e1471f@o555579.ingest.sentry.io/5685529",
integrations=[FlaskIntegration()],
# Set traces_sample_rate to 1.0 to capture 100%
# of transactions for performance monitoring.
# We recommend adjusting this value in production.
traces_sample_rate=1.0,
# By default the SDK will try to use the SENTRY_RELEASE
# environment variable, or infer a git commit
# SHA as release, however you may want to set
# something more human-readable.
# release="myapp@1.0.0",
)
# mongo
# mongo_client = MongoClient('mongodb://localhost:27017/')
mongo_client = MongoClient(
"mongodb+srv://Mahitha-Maddi:Mahitha%4042@cluster0.1z0g8.mongodb.net/test")
app = Flask(__name__)
# CORS(app)
CORS(app, resources={r"/*": {"origins": "*"}})
basedir = os.path.abspath(os.path.dirname(__file__))
# Here are my datasets
bookings = dict()
################
# Apply to mongo
################
# database access layer
# endpoint to check Availability
# endpoint to create new Booking
##################
# Apply from mongo
##################
def applyRecordLevelUpdates():
return None
##################
# ADMINISTRATION #
##################
# This runs once before the first single request
# Used to bootstrap our collections
# This runs once before any request
############################
# INFO on containerization #
############################
# To containerize a flask app:
# https://pythonise.com/series/learning-flask/building-a-flask-app-with-docker-compose
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0')
| 29.079051 | 124 | 0.652984 |
43d2040db0a01d747e5d0a9ffdc2859f95f69610 | 6,359 | py | Python | sppas/sppas/src/models/acm/htkscripts.py | mirfan899/MTTS | 3167b65f576abcc27a8767d24c274a04712bd948 | [
"MIT"
] | null | null | null | sppas/sppas/src/models/acm/htkscripts.py | mirfan899/MTTS | 3167b65f576abcc27a8767d24c274a04712bd948 | [
"MIT"
] | null | null | null | sppas/sppas/src/models/acm/htkscripts.py | mirfan899/MTTS | 3167b65f576abcc27a8767d24c274a04712bd948 | [
"MIT"
] | null | null | null | """
..
---------------------------------------------------------------------
___ __ __ __ ___
/ | \ | \ | \ / the automatic
\__ |__/ |__/ |___| \__ annotation and
\ | | | | \ analysis
___/ | | | | ___/ of speech
http://www.sppas.org/
Use of this software is governed by the GNU Public License, version 3.
SPPAS is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
SPPAS is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with SPPAS. If not, see <http://www.gnu.org/licenses/>.
This banner notice must not be removed.
---------------------------------------------------------------------
src.models.acm.htkscripts.py
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
import os
import os.path
import logging
# ---------------------------------------------------------------------------
| 32.610256 | 80 | 0.492845 |
43d3b50d90e2618726a0619c25ddcb995a36172f | 2,961 | py | Python | icekit/plugins/map/tests.py | ic-labs/django-icekit | c507ea5b1864303732c53ad7c5800571fca5fa94 | [
"MIT"
] | 52 | 2016-09-13T03:50:58.000Z | 2022-02-23T16:25:08.000Z | icekit/plugins/map/tests.py | ic-labs/django-icekit | c507ea5b1864303732c53ad7c5800571fca5fa94 | [
"MIT"
] | 304 | 2016-08-11T14:17:30.000Z | 2020-07-22T13:35:18.000Z | icekit/plugins/map/tests.py | ic-labs/django-icekit | c507ea5b1864303732c53ad7c5800571fca5fa94 | [
"MIT"
] | 12 | 2016-09-21T18:46:35.000Z | 2021-02-15T19:37:50.000Z | from mock import patch
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.contrib.auth import get_user_model
from django.core import exceptions
from django_dynamic_fixture import G
from django_webtest import WebTest
from icekit.models import Layout
from icekit.page_types.layout_page.models import LayoutPage
from icekit.utils import fluent_contents
from . import models
User = get_user_model()
| 38.960526 | 381 | 0.67207 |
43d418c8d833bba41481c7b2cbeab0fbbe8f44c5 | 548 | py | Python | example/example.py | saravanabalagi/imshowtools | ea81af888c69223ff8b42b5c4b8c034483eebe21 | [
"MIT"
] | 4 | 2019-07-18T17:24:02.000Z | 2020-10-14T06:09:05.000Z | example/example.py | saravanabalagi/imshowtools | ea81af888c69223ff8b42b5c4b8c034483eebe21 | [
"MIT"
] | 1 | 2020-04-18T01:05:22.000Z | 2020-04-18T01:10:53.000Z | example/example.py | saravanabalagi/imshowtools | ea81af888c69223ff8b42b5c4b8c034483eebe21 | [
"MIT"
] | null | null | null | from imshowtools import imshow
import cv2
if __name__ == '__main__':
image_lenna = cv2.imread("lenna.png")
imshow(image_lenna, mode='BGR', window_title="LennaWindow", title="Lenna")
image_lenna_bgr = cv2.imread("lenna_bgr.png")
imshow(image_lenna, image_lenna_bgr, mode=['BGR', 'RGB'], title=['lenna_rgb', 'lenna_bgr'])
imshow(*[image_lenna for _ in range(12)], title=["Lenna" for _ in range(12)], window_title="LennaWindow")
imshow(*[image_lenna for _ in range(30)], title="Lenna", padding=(1, 1, 0, (0, 0, 0.8, 0.8)))
| 39.142857 | 109 | 0.678832 |
43d619ff813d6467445c26ac811f7e5c110c5dd3 | 729 | py | Python | terminalone/models/concept.py | amehta1/t1-python | 4f7eb0bec7671b29baf3105b8cafafb373107e7b | [
"Apache-2.0"
] | 24 | 2015-07-09T18:49:10.000Z | 2021-06-07T18:36:58.000Z | terminalone/models/concept.py | amehta1/t1-python | 4f7eb0bec7671b29baf3105b8cafafb373107e7b | [
"Apache-2.0"
] | 100 | 2015-07-13T20:24:50.000Z | 2020-08-10T11:16:39.000Z | terminalone/models/concept.py | amehta1/t1-python | 4f7eb0bec7671b29baf3105b8cafafb373107e7b | [
"Apache-2.0"
] | 36 | 2015-07-09T18:51:48.000Z | 2022-02-14T22:44:37.000Z | # -*- coding: utf-8 -*-
"""Provides concept object."""
from __future__ import absolute_import
from .. import t1types
from ..entity import Entity
| 22.78125 | 68 | 0.581619 |
43d690157e44125280f30cea5097fb9b835832b6 | 932 | py | Python | videofeed.py | dmeklund/asyncdemo | 956f193c0fa38744965362966ac7f8ef224409b4 | [
"MIT"
] | null | null | null | videofeed.py | dmeklund/asyncdemo | 956f193c0fa38744965362966ac7f8ef224409b4 | [
"MIT"
] | null | null | null | videofeed.py | dmeklund/asyncdemo | 956f193c0fa38744965362966ac7f8ef224409b4 | [
"MIT"
] | null | null | null | """
Mock up a video feed pipeline
"""
import asyncio
import logging
import sys
import cv2
logging.basicConfig(format="[%(thread)-5d]%(asctime)s: %(message)s")
logger = logging.getLogger('async')
logger.setLevel(logging.INFO)
def main():
loop = asyncio.get_event_loop()
loop.run_until_complete(process_video(sys.argv[1]))
logger.info("Completed")
if __name__ == '__main__':
main()
| 22.731707 | 76 | 0.687768 |
43d763b4860a448a07b1ac979d461dd9025028b9 | 11,807 | py | Python | parsers/read_lspci_and_glxinfo.py | mikeus9908/peracotta | c54c351acae8afec250185f4bc714a2f86c47c90 | [
"MIT"
] | 3 | 2019-04-01T17:28:20.000Z | 2020-11-19T17:25:32.000Z | parsers/read_lspci_and_glxinfo.py | mikeus9908/peracotta | c54c351acae8afec250185f4bc714a2f86c47c90 | [
"MIT"
] | 142 | 2018-11-05T18:13:13.000Z | 2022-03-12T17:43:40.000Z | parsers/read_lspci_and_glxinfo.py | mikeus9908/peracotta | c54c351acae8afec250185f4bc714a2f86c47c90 | [
"MIT"
] | 10 | 2019-10-25T12:28:37.000Z | 2021-05-17T17:32:56.000Z | #!/usr/bin/python3
"""
Read "lspci -v" and "glxinfo" outputs
"""
import re
from dataclasses import dataclass
from InputFileNotFoundError import InputFileNotFoundError
if __name__ == "__main__":
import argparse
import json
parser = argparse.ArgumentParser(description="Parse lspci/glxinfo output")
parser.add_argument("lspci", type=str, nargs=1, help="path to lspci output")
parser.add_argument("glxinfo", type=str, nargs=1, help="path to glxinfo output")
parser.add_argument(
"-d",
"--dedicated",
action="store_true",
default=False,
help="computer has dedicated GPU",
)
args = parser.parse_args()
try:
print(
json.dumps(
read_lspci_and_glxinfo(args.dedicated, args.lspci[0], args.glxinfo[0]),
indent=2,
)
)
except InputFileNotFoundError as e:
print(str(e))
exit(1)
| 40.023729 | 121 | 0.510206 |
43d8185a62fc1d316a49c5b7d44a50853bf56a88 | 9,682 | py | Python | upload.py | snymainn/tools- | af57a1a4d0f1aecff33ab28c6f27acc893f37fbc | [
"MIT"
] | null | null | null | upload.py | snymainn/tools- | af57a1a4d0f1aecff33ab28c6f27acc893f37fbc | [
"MIT"
] | null | null | null | upload.py | snymainn/tools- | af57a1a4d0f1aecff33ab28c6f27acc893f37fbc | [
"MIT"
] | null | null | null | #!/usr/bin/python
import sys
from loglib import SNYLogger
import ftplib
import argparse
import re
import os
import calendar
import time
#GET LOCAL FILELIST
#
# login to ftp server
#
#
# get remote files
#
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--host", help="ftp hostname", required=True)
parser.add_argument("-u", "--user", help="username on ftp server", required=True)
parser.add_argument("-p", "--password", help="password", required=True)
parser.add_argument("-d", "--debug",
help="print debug to terminal, default 0, use multiple times to increase verbosity, i.e. -d -d",
action="count")
parser.add_argument("-b", "--basedir", help="Toplevel directory on ftp server, default www")
parser.add_argument("-t", "--path", help="Local toplevel directory, default ., i.e. current dir")
parser.add_argument("-s", "--skipfile", help="Do not upload files in <skipfile>, default name upload.skip")
parser.set_defaults(debug=0)
parser.set_defaults(skipfile="upload.skip")
parser.set_defaults(basedir="www")
parser.set_defaults(path=".")
args = parser.parse_args()
log = SNYLogger(basename="upload", size_limit=10, no_logfiles=2, stdout=args.debug)
skiplines = read_skipfile(args.skipfile, log)
ftp = ftp_login(args, log)
sync_files(ftp, args, skiplines, args.path, args.basedir, log)
ftp.quit()
| 37.968627 | 122 | 0.567858 |
43d87b5ab1e5e10305ebbe366e85481beb47273f | 2,637 | py | Python | chapter2/intogen-arrays/src/mrna/mrna_comb_gene_classif.py | chris-zen/phd-thesis | 1eefdff8e7ca1910304e27ae42551dc64496b101 | [
"Unlicense"
] | 1 | 2015-12-22T00:53:18.000Z | 2015-12-22T00:53:18.000Z | chapter2/intogen-arrays/src/mrna/mrna_comb_gene_classif.py | chris-zen/phd-thesis | 1eefdff8e7ca1910304e27ae42551dc64496b101 | [
"Unlicense"
] | null | null | null | chapter2/intogen-arrays/src/mrna/mrna_comb_gene_classif.py | chris-zen/phd-thesis | 1eefdff8e7ca1910304e27ae42551dc64496b101 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python
"""
Classify oncodrive gene results and prepare for combination
* Configuration parameters:
- The ones required by intogen.data.entity.EntityManagerFactory
* Input:
- oncodrive_ids: The mrna.oncodrive_genes to process
* Output:
- combinations: The mrna.combination prepared to be calculated
* Entities:
- mrna.oncodrive_genes
- mrna.combination
"""
import uuid
import json
from wok.task import Task
from wok.element import DataElement
from intogen.data.entity.server import EntityServer
from intogen.data.entity import types
if __name__ == "__main__":
Task(run).start()
| 21.975 | 115 | 0.680319 |
43d8dcfde4fc817f885eb2d557c4f9603d6da4be | 86 | py | Python | src/FunctionApps/DevOps/tests/test_get_ip.py | CDCgov/prime-public-health-data-infrastructure | 7e4849c3a486a84e94765bf0023b80261c510c57 | [
"Apache-2.0"
] | 3 | 2022-02-24T18:16:39.000Z | 2022-03-29T20:21:41.000Z | src/FunctionApps/DevOps/tests/test_get_ip.py | CDCgov/prime-public-health-data-infrastructure | 7e4849c3a486a84e94765bf0023b80261c510c57 | [
"Apache-2.0"
] | 17 | 2022-02-08T17:13:55.000Z | 2022-03-28T16:49:00.000Z | src/FunctionApps/DevOps/tests/test_get_ip.py | CDCgov/prime-public-health-data-infrastructure | 7e4849c3a486a84e94765bf0023b80261c510c57 | [
"Apache-2.0"
] | 3 | 2022-02-27T23:12:50.000Z | 2022-03-17T04:51:47.000Z | def test_get_ip_placeholder():
"""placeholder so pytest does not fail"""
pass
| 21.5 | 45 | 0.697674 |
43d92304705312e029e4656dd5bbcccaf8cbee7d | 861 | py | Python | data/models/svm_benchmark.py | Laurenhut/Machine_Learning_Final | 4fca33754ef42acde504cc64e6bbe4e463caadf8 | [
"MIT"
] | null | null | null | data/models/svm_benchmark.py | Laurenhut/Machine_Learning_Final | 4fca33754ef42acde504cc64e6bbe4e463caadf8 | [
"MIT"
] | null | null | null | data/models/svm_benchmark.py | Laurenhut/Machine_Learning_Final | 4fca33754ef42acde504cc64e6bbe4e463caadf8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from sklearn import svm
import csv_io
if __name__=="__main__":
main()
| 31.888889 | 72 | 0.615563 |
43d983edaa81a2f049c07647c3d3908b2dea574f | 1,605 | py | Python | configs/utils/config_generator.py | user-wu/SOD_eval_metrics | d5b8804580cb52a4237c8e613818d10591dc6597 | [
"MIT"
] | null | null | null | configs/utils/config_generator.py | user-wu/SOD_eval_metrics | d5b8804580cb52a4237c8e613818d10591dc6597 | [
"MIT"
] | null | null | null | configs/utils/config_generator.py | user-wu/SOD_eval_metrics | d5b8804580cb52a4237c8e613818d10591dc6597 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from matplotlib import colors
# max = 148
_COLOR_Genarator = iter(
sorted(
[
color
for name, color in colors.cnames.items()
if name not in ["red", "white"] or not name.startswith("light") or "gray" in name
]
)
)
| 29.181818 | 93 | 0.598754 |
43db9748cf12932e64e00e512404058350f2661e | 1,151 | py | Python | core/sms_service.py | kartik1000/jcc-registration-portal | 053eade1122fa760ae112a8599a396d68dfb16b8 | [
"MIT"
] | null | null | null | core/sms_service.py | kartik1000/jcc-registration-portal | 053eade1122fa760ae112a8599a396d68dfb16b8 | [
"MIT"
] | null | null | null | core/sms_service.py | kartik1000/jcc-registration-portal | 053eade1122fa760ae112a8599a396d68dfb16b8 | [
"MIT"
] | null | null | null | from urllib.parse import urlencode
from decouple import config
import hashlib
import requests
BASE = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
auth_key = config('AUTH_KEY')
url = 'http://sms.globehost.com/api/sendhttp.php?'
| 23.979167 | 72 | 0.644657 |
43dc511c1276023b6e01df3b43e2f8d7dd243462 | 1,522 | py | Python | scripts/fetch_images.py | Protagonistss/sanic-for-v3 | ba7e94273b77914b8d85d67cf513041ada00780d | [
"MIT"
] | null | null | null | scripts/fetch_images.py | Protagonistss/sanic-for-v3 | ba7e94273b77914b8d85d67cf513041ada00780d | [
"MIT"
] | null | null | null | scripts/fetch_images.py | Protagonistss/sanic-for-v3 | ba7e94273b77914b8d85d67cf513041ada00780d | [
"MIT"
] | null | null | null | import sys
import os
sys.path.append(os.pardir)
import random
import time
import requests
from contextlib import closing
from help import utils
from threading import Thread
if __name__ == '__main__':
t1 = Thread(target=main)
t2 = Thread(target=main)
t3 = Thread(target=main)
t4 = Thread(target=main)
t1.start()
t2.start()
t3.start()
t4.start()
| 24.15873 | 79 | 0.660972 |
43dd49ec321203c525ba8f13879673eb4d300e9f | 3,912 | py | Python | GeneralStats/example.py | haoruilee/statslibrary | 01494043bc7fb82d4aa6d7d550a4e7dc2ac0503a | [
"MIT"
] | 58 | 2019-02-04T13:53:16.000Z | 2022-02-24T02:59:55.000Z | GeneralStats/example.py | haoruilee/statslibrary | 01494043bc7fb82d4aa6d7d550a4e7dc2ac0503a | [
"MIT"
] | null | null | null | GeneralStats/example.py | haoruilee/statslibrary | 01494043bc7fb82d4aa6d7d550a4e7dc2ac0503a | [
"MIT"
] | 19 | 2019-03-21T01:54:55.000Z | 2021-12-03T13:55:16.000Z | import GeneralStats as gs
import numpy as np
from scipy.stats import skew
from scipy.stats import kurtosistest
import pandas as pd
if __name__ == "__main__":
gen=gs.GeneralStats()
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
print("data = ", data)
print("data1 = ", data1)
res=gen.average(data,rowvar=True)
res1=gen.average(data1,rowvar=True)
print("data = ",res)
print("data1 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
res=gen.median(data,rowvar=True)
res1=gen.median(data1,rowvar=True)
print("data = ",res)
print("data1 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
res=gen.mode(data,rowvar=True)
res1=gen.mode(data1,rowvar=True)
print("data = ",res)
print("data1 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
res=gen.quantile(data,0.5,rowvar=True,interpolation='lower') #'midpoint'0.5
res1=gen.quantile(data1,0.5,rowvar=True,interpolation='lower') #'lower'0.5
print("data 0.5 = ",res)
print("data1 0.5 = ",res1)
res=gen.quantile(data,0.25,rowvar=True,interpolation='lower')
res1=gen.quantile(data1,0.25,rowvar=True,interpolation='lower')
print("data 0.25s = ",res)
print("data1 0.25 = ",res1)
res=gen.quantile(data,0.75,rowvar=True,interpolation='lower')
res1=gen.quantile(data1,0.75,rowvar=True,interpolation='lower')
print("data 0.75 = ",res)
print("data1 0.75 = ",res1)
res=gen.quantile(data,1.0,rowvar=True,interpolation='lower')
res1=gen.quantile(data1,1.0,rowvar=True,interpolation='lower')
print("data 1.0 = ",res)
print("data1 1.0 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
res=gen.range(data,rowvar=True)
res1=gen.range(data1,rowvar=True)
print("data = ",res)
print("data1 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
res=gen.variance(data,rowvar=True)
res1=gen.variance(data1,rowvar=True)
print("data = ",res)
print("data1 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
res=gen.standard_dev(data,rowvar=True)
res1=gen.standard_dev(data1,rowvar=True)
print("data = ",res)
print("data1 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
res=gen.skewness(data,rowvar=True)
res1=gen.skewness(data1,rowvar=True)
print("data = ",res)
print("data1 = ",res1)
res=np.array([skew(data[0]),skew(data[1]),skew(data[2]),skew(data[3])])
print("scipy skewdata = ",res)
res1=np.array(skew(data1))
print("scipy skewdata1 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([53, 61, 49, 66, 78, 47])
res=gen.kurtosis(data,rowvar=True)
res1=gen.kurtosis(data1,rowvar=True)
print("data = ",res)
print("data1 = ",res1)
data_0=pd.Series(data[0])
data_1=pd.Series(data[1])
data_2=pd.Series(data[2])
data_3=pd.Series(data[3])
print("pandas kurtdata = ",[data_0.kurt(),data_1.kurt(),data_2.kurt(),data_3.kurt()])
data1=pd.Series(data1)
print("pandas kurtdata1 = ",data1.kurt())
| 36.222222 | 109 | 0.576431 |
43ddbd75df809ab6f556d3498600ef7c94a80521 | 16,408 | py | Python | bootstrap.py | tqchen/yarn-ec2 | 303f3980ad41770011b72532ed9f7c6bbe876508 | [
"Apache-2.0"
] | 35 | 2016-02-23T19:15:46.000Z | 2021-01-01T02:57:43.000Z | bootstrap.py | tqchen/cloud-scripts | 303f3980ad41770011b72532ed9f7c6bbe876508 | [
"Apache-2.0"
] | 4 | 2016-11-12T16:49:16.000Z | 2018-11-02T21:20:23.000Z | bootstrap.py | tqchen/yarn-ec2 | 303f3980ad41770011b72532ed9f7c6bbe876508 | [
"Apache-2.0"
] | 25 | 2016-02-26T20:28:13.000Z | 2020-07-26T12:02:34.000Z | #!/usr/bin/env python
# encoding: utf-8
"""
script to install all the necessary things
for working on a linux machine with nothing
Installing minimum dependencies
"""
import sys
import os
import logging
import subprocess
import xml.etree.ElementTree as ElementTree
import xml.dom.minidom as minidom
import socket
import time
import pwd
###---------------------------------------------------##
# Configuration Section, will be modified by script #
###---------------------------------------------------##
node_apt_packages = [
'emacs',
'git',
'g++',
'make',
'python-numpy',
'libprotobuf-dev',
'libcurl4-openssl-dev']
# master only packages
master_apt_packages = [
'protobuf-compiler']
# List of r packages to be installed in master
master_r_packages = [
'r-base-dev',
'r-base',
'r-cran-statmod',
'r-cran-RCurl',
'r-cran-rjson'
]
# download link of hadoop.
hadoop_url = 'http://apache.claz.org/hadoop/common/hadoop-2.8.0/hadoop-2.8.0.tar.gz'
hadoop_dir = 'hadoop-2.8.0'
# customized installation script.
# See optional installation scripts for options.
# customized installation script for all nodes.
###---------------------------------------------------##
# Automatically set by script #
###---------------------------------------------------##
USER_NAME = 'ubuntu'
# setup variables
MASTER = os.getenv('MY_MASTER_DNS', '')
# node type the type of current node
NODE_TYPE = os.getenv('MY_NODE_TYPE', 'm3.xlarge')
NODE_VMEM = int(os.getenv('MY_NODE_VMEM', str(1024*15)))
NODE_VCPU = int(os.getenv('MY_NODE_VCPU', '4'))
AWS_ID = os.getenv('AWS_ACCESS_KEY_ID', 'undefined')
AWS_KEY = os.getenv('AWS_SECRET_ACCESS_KEY', 'undefined')
JAVA_HOME = os.getenv('JAVA_HOME')
HADOOP_HOME = os.getenv('HADOOP_HOME')
DISK_LIST = [('xvd' + chr(ord('b') + i)) for i in range(10)]
ENVIRON = os.environ.copy()
###--------------------------------##
# Optional installation scripts. #
###--------------------------------##
### Script section ###
### Installation helpers ###
# install g++4.9, needed for regex match.
def install_java():
"""
install java and setup environment variables
Returns environment variables that needs to be exported
"""
if not os.path.exists('jdk1.8.0_131'):
run('wget --no-check-certificate --no-cookies'\
' --header \"Cookie: oraclelicense=accept-securebackup-cookie\"'\
' http://download.oracle.com/otn-pub/java/jdk/8u131-b11/d54c1d3a095b4ff2b6607d096fa80163/jdk-8u131-linux-x64.tar.gz')
run('tar xf jdk-8u131-linux-x64.tar.gz')
run('rm -f jdk-8u131-linux-x64.tar.gz')
global JAVA_HOME
if JAVA_HOME is None:
JAVA_HOME = os.path.abspath('jdk1.8.0_131')
return [('JAVA_HOME', JAVA_HOME)]
# main script to install all dependencies
# Make startup script for bulding
if __name__ == '__main__':
pw_record = pwd.getpwnam(USER_NAME)
user_name = pw_record.pw_name
user_home_dir = pw_record.pw_dir
user_uid = pw_record.pw_uid
user_gid = pw_record.pw_gid
env = os.environ.copy()
cwd = user_home_dir
ENVIRON['HOME'] = user_home_dir
os.setgid(user_gid)
os.setuid(user_uid)
os.chdir(user_home_dir)
main()
| 37.461187 | 133 | 0.585629 |
43de15a64fd73557d8ace8fe63e08534f03c9747 | 400 | py | Python | intro/matplotlib/examples/plot_good.py | zmoon/scipy-lecture-notes | 75a89ddedeb48930dbdb6fe25a76e9ef0587ae21 | [
"CC-BY-4.0"
] | 2,538 | 2015-01-01T04:58:41.000Z | 2022-03-31T21:06:05.000Z | intro/matplotlib/examples/plot_good.py | zmoon/scipy-lecture-notes | 75a89ddedeb48930dbdb6fe25a76e9ef0587ae21 | [
"CC-BY-4.0"
] | 362 | 2015-01-18T14:16:23.000Z | 2021-11-18T16:24:34.000Z | intro/matplotlib/examples/plot_good.py | zmoon/scipy-lecture-notes | 75a89ddedeb48930dbdb6fe25a76e9ef0587ae21 | [
"CC-BY-4.0"
] | 1,127 | 2015-01-05T14:39:29.000Z | 2022-03-25T08:38:39.000Z | """
A simple, good-looking plot
===========================
Demoing some simple features of matplotlib
"""
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(5, 4), dpi=72)
axes = fig.add_axes([0.01, 0.01, .98, 0.98])
X = np.linspace(0, 2, 200)
Y = np.sin(2*np.pi*X)
plt.plot(X, Y, lw=2)
plt.ylim(-1.1, 1.1)
plt.grid()
plt.show()
| 18.181818 | 44 | 0.625 |
43de29ccab29a96dd8a22a7b82fb926f80943d99 | 4,087 | py | Python | pfio/_context.py | HiroakiMikami/pfio | 1ac997dcba7babd5d91dd8c4f2793d27a6bab69b | [
"MIT"
] | 24 | 2020-05-23T13:00:27.000Z | 2022-02-17T05:20:51.000Z | pfio/_context.py | HiroakiMikami/pfio | 1ac997dcba7babd5d91dd8c4f2793d27a6bab69b | [
"MIT"
] | 88 | 2020-05-01T06:56:50.000Z | 2022-03-16T07:15:34.000Z | pfio/_context.py | HiroakiMikami/pfio | 1ac997dcba7babd5d91dd8c4f2793d27a6bab69b | [
"MIT"
] | 9 | 2020-05-07T05:47:35.000Z | 2022-02-09T05:42:56.000Z | import os
import re
from typing import Tuple
from pfio._typing import Union
from pfio.container import Container
from pfio.io import IO, create_fs_handler
| 35.232759 | 79 | 0.614387 |
43e09c3343b0c13466ea8190e66d19dfafb80ae6 | 9,330 | py | Python | parser/fase2/team19/Analisis_Ascendente/Instrucciones/PLPGSQL/Ifpl.py | Josue-Zea/tytus | f9e4be9a8c03eb698fade7a748972e4f52d46685 | [
"MIT"
] | 35 | 2020-12-07T03:11:43.000Z | 2021-04-15T17:38:16.000Z | parser/fase2/team19/Analisis_Ascendente/Instrucciones/PLPGSQL/Ifpl.py | Josue-Zea/tytus | f9e4be9a8c03eb698fade7a748972e4f52d46685 | [
"MIT"
] | 47 | 2020-12-09T01:29:09.000Z | 2021-01-13T05:37:50.000Z | parser/fase2/team19/Analisis_Ascendente/Instrucciones/PLPGSQL/Ifpl.py | Josue-Zea/tytus | f9e4be9a8c03eb698fade7a748972e4f52d46685 | [
"MIT"
] | 556 | 2020-12-07T03:13:31.000Z | 2021-06-17T17:41:10.000Z | import Analisis_Ascendente.Instrucciones.PLPGSQL.EjecutarFuncion as EjecutarFuncion
from Analisis_Ascendente.Instrucciones.PLPGSQL.plasignacion import Plasignacion
from Analisis_Ascendente.Instrucciones.instruccion import Instruccion
from Analisis_Ascendente.Instrucciones.Create.createTable import CreateTable
from Analisis_Ascendente.Instrucciones.Create.createDatabase import CreateReplace
from Analisis_Ascendente.Instrucciones.Select.select import Select
from Analisis_Ascendente.Instrucciones.Use_Data_Base.useDB import Use
from Analisis_Ascendente.Instrucciones.Select.select1 import selectTime
import Analisis_Ascendente.Instrucciones.Insert.insert as insert_import
from Analisis_Ascendente.Instrucciones.Select.Select2 import Selectp3
from Analisis_Ascendente.Instrucciones.Select import selectInst
from Analisis_Ascendente.Instrucciones.Expresiones.Expresion import Expresion
from Analisis_Ascendente.Instrucciones.Drop.drop import Drop
from Analisis_Ascendente.Instrucciones.Alter.alterDatabase import AlterDatabase
from Analisis_Ascendente.Instrucciones.Alter.alterTable import AlterTable
from Analisis_Ascendente.Instrucciones.Update.Update import Update
from Analisis_Ascendente.Instrucciones.Delete.delete import Delete
from Analisis_Ascendente.Instrucciones.Select import SelectDist
from Analisis_Ascendente.Instrucciones.Type.type import CreateType
#----------------------------------Imports FASE2--------------------------
from Analisis_Ascendente.Instrucciones.Index.Index import Index
from Analisis_Ascendente.Instrucciones.PLPGSQL.createFunction import CreateFunction
from Analisis_Ascendente.Instrucciones.Index.DropIndex import DropIndex
from Analisis_Ascendente.Instrucciones.Index.AlterIndex import AlterIndex
from Analisis_Ascendente.Instrucciones.PLPGSQL.DropProcedure import DropProcedure
from Analisis_Ascendente.Instrucciones.PLPGSQL.CreateProcedure import CreateProcedure
from Analisis_Ascendente.Instrucciones.PLPGSQL.CasePL import CasePL
from Analisis_Ascendente.Instrucciones.PLPGSQL.plCall import plCall
from Analisis_Ascendente.Instrucciones.PLPGSQL.dropFunction import DropFunction
import C3D.GeneradorEtiquetas as GeneradorEtiquetas
import C3D.GeneradorTemporales as GeneradorTemporales
import Analisis_Ascendente.reportes.Reportes as Reportes
| 47.121212 | 111 | 0.653805 |
43e0bf1b8f706e0abd42a5ac8a65294eb668c3ab | 183 | py | Python | epages_client/dataobjects/enum_fetch_operator.py | vilkasgroup/epages_client | 10e63d957ee45dc5d4df741064806f724fb1be1f | [
"MIT"
] | 3 | 2018-01-26T13:44:26.000Z | 2020-05-13T13:58:19.000Z | epages_client/dataobjects/enum_fetch_operator.py | vilkasgroup/epages_client | 10e63d957ee45dc5d4df741064806f724fb1be1f | [
"MIT"
] | 53 | 2018-02-05T10:59:22.000Z | 2022-01-01T19:31:08.000Z | epages_client/dataobjects/enum_fetch_operator.py | vilkasgroup/epages_client | 10e63d957ee45dc5d4df741064806f724fb1be1f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
| 18.3 | 44 | 0.650273 |
43e232a6058aefed0715e6e5fea4ed4fd550c388 | 6,067 | py | Python | pyhwpscan/hwp_scan.py | orca-eaa5a/dokkaebi_scanner | 756314376e2cbbce6c03fd908ebd0b8cc27aa7fc | [
"MIT"
] | null | null | null | pyhwpscan/hwp_scan.py | orca-eaa5a/dokkaebi_scanner | 756314376e2cbbce6c03fd908ebd0b8cc27aa7fc | [
"MIT"
] | 1 | 2022-02-17T15:01:29.000Z | 2022-02-20T07:15:31.000Z | pyhwpscan/hwp_scan.py | orca-eaa5a/dokkaebi_scanner | 756314376e2cbbce6c03fd908ebd0b8cc27aa7fc | [
"MIT"
] | null | null | null | from threading import current_thread
from jsbeautifier.javascript.beautifier import remove_redundant_indentation
from pyparser.oleparser import OleParser
from pyparser.hwp_parser import HwpParser
from scan.init_scan import init_hwp5_scan
from scan.bindata_scanner import BinData_Scanner
from scan.jscript_scanner import JS_Scanner
from scan.paratext_scanner import ParaText_Scanner
import zipfile
import os
import sys
import platform
from common.errors import *
from utils.dumphex import print_hexdump
js_scanner = None
bindata_scanner = None
paratext_scanner = None
_platform = None
binary_info = {
"type": "",
"p": None
} | 29.309179 | 102 | 0.543926 |
43e2d67fdf43b1951abb85a9aaab6711fb8852be | 1,132 | py | Python | tests/core/test_plugins.py | franalgaba/nile | f771467f27f03c8d20b8032bac64b3ab60436d3c | [
"MIT"
] | null | null | null | tests/core/test_plugins.py | franalgaba/nile | f771467f27f03c8d20b8032bac64b3ab60436d3c | [
"MIT"
] | null | null | null | tests/core/test_plugins.py | franalgaba/nile | f771467f27f03c8d20b8032bac64b3ab60436d3c | [
"MIT"
] | null | null | null | """
Tests for plugins in core module.
Only unit tests for now.
"""
from unittest.mock import patch
import click
from nile.core.plugins import get_installed_plugins, load_plugins, skip_click_exit
| 22.64 | 82 | 0.681095 |
43e3929f6d656cd5f3e6cf6054493ace5b92bd70 | 1,255 | py | Python | history/tests.py | MPIB/Lagerregal | 3c950dffcf4fa164008c5a304c4839bc282a3388 | [
"BSD-3-Clause"
] | 24 | 2017-03-19T16:17:37.000Z | 2021-11-07T15:35:33.000Z | history/tests.py | MPIB/Lagerregal | 3c950dffcf4fa164008c5a304c4839bc282a3388 | [
"BSD-3-Clause"
] | 117 | 2016-04-19T12:35:10.000Z | 2022-02-22T13:19:05.000Z | history/tests.py | MPIB/Lagerregal | 3c950dffcf4fa164008c5a304c4839bc282a3388 | [
"BSD-3-Clause"
] | 11 | 2017-08-08T12:11:39.000Z | 2021-12-08T05:34:06.000Z | from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
from django.test.client import Client
from model_mommy import mommy
from devices.models import Device
from users.models import Lageruser
| 34.861111 | 88 | 0.67251 |
78d8d23f31a9ec6e42dd56f7cc23f8c31fbd70c2 | 376 | py | Python | django_git_info/management/commands/get_git_info.py | spapas/django-git | a62215d315263bce5d5d0afcfa14152601f76901 | [
"MIT"
] | 1 | 2019-03-15T10:32:21.000Z | 2019-03-15T10:32:21.000Z | django_git_info/management/commands/get_git_info.py | spapas/django-git | a62215d315263bce5d5d0afcfa14152601f76901 | [
"MIT"
] | null | null | null | django_git_info/management/commands/get_git_info.py | spapas/django-git | a62215d315263bce5d5d0afcfa14152601f76901 | [
"MIT"
] | 1 | 2016-03-25T03:57:49.000Z | 2016-03-25T03:57:49.000Z | # -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand, CommandError
from django_git_info import get_git_info
| 26.857143 | 65 | 0.656915 |
78db0363110019cfe555b18f1fdc95de024b7945 | 19,306 | py | Python | mevis/_internal/conversion.py | robert-haas/mevis | 1bbf8dfb56aa8fc52b8f38c570ee7b2d2a9d3327 | [
"Apache-2.0"
] | 2 | 2022-01-12T23:08:52.000Z | 2022-01-12T23:21:23.000Z | mevis/_internal/conversion.py | robert-haas/mevis | 1bbf8dfb56aa8fc52b8f38c570ee7b2d2a9d3327 | [
"Apache-2.0"
] | null | null | null | mevis/_internal/conversion.py | robert-haas/mevis | 1bbf8dfb56aa8fc52b8f38c570ee7b2d2a9d3327 | [
"Apache-2.0"
] | null | null | null | from collections.abc import Callable as _Callable
import networkx as _nx
from opencog.type_constructors import AtomSpace as _AtomSpace
from .args import check_arg as _check_arg
def convert(data, graph_annotated=True, graph_directed=True,
node_label=None, node_color=None, node_opacity=None, node_size=None, node_shape=None,
node_border_color=None, node_border_size=None,
node_label_color=None, node_label_size=None, node_hover=None, node_click=None,
node_image=None, node_properties=None,
edge_label=None, edge_color=None, edge_opacity=None, edge_size=None,
edge_label_color=None, edge_label_size=None, edge_hover=None, edge_click=None):
"""Convert an Atomspace or list of Atoms to a NetworkX graph with annotations.
Several arguments accept a Callable.
- In case of node annotations, the Callable gets an Atom as input,
which the node represents in the graph.
The Callable needs to return one of the other types accepted by the argument,
e.g. ``str`` or ``int``/``float``.
- In case of edge annotations, the Callable gets two Atoms as input,
which the edge connects in the graph.
The Callable needs to return one of the other types accepted by the argument,
e.g. ``str`` or ``int``/``float``.
Several arguments accept a color, which can be in following formats:
- Name: ``"black"``, ``"red"``, ``"green"``, ...
- Color code
- 6 digit hex RGB code: ``"#05ac05"``
- 3 digit hex RGB code: ``"#0a0"`` (equivalent to ``"#00aa00"``)
Parameters
----------
data : Atomspace, list of Atoms
Input that gets converted to a graph.
graph_annotated : bool
If ``False``, no annotations are added to the graph. This could be used for
converting large AtomSpaces quickly to graphs that use less RAM and can
be exported to smaller files (e.g. also compressed as gml.gz) for inspection
with other tools.
graph_directed : bool
If ``True``, a NetworkX DiGraph is created. If ``False``, a NetworkX Graph is created.
node_label : str, Callable
Set a label for each node, which is shown as text below it.
node_color : str, Callable
Set a color for each node, which becomes the fill color of its shape.
node_opacity : float between 0.0 and 1.0
Set an opacity for each node, which becomes the opacity of its shape.
Caution: This is only supported by d3.
node_size : int, float, Callable
Set a size for each node, which becomes the height and width of its shape.
node_shape : str, Callable
Set a shape for each node, which is some geometrical form that has the
node coordinates in its center.
Possible values: ``"circle"``, ``"rectangle"``, ``"hexagon"``
node_border_color : str, Callable
Set a border color for each node, which influences the border drawn around its shape.
node_border_size : int, float, Callable
Set a border size for each node, which influences the border drawn around its shape.
node_label_color : str, Callable
Set a label color for each node, which determines the font color
of the text below the node.
node_label_size : int, float, Callable
Set a label size for each node, which determines the font size
of the text below the node.
node_hover : str, Callable
Set a hover text for each node, which shows up besides the mouse cursor
when hovering over a node.
node_click : str, Callable
Set a click text for each node, which shows up in a div element below the plot
when clicking on a node and can easily be copied and pasted.
node_image : str, Callable
Set an image for each node, which appears within its shape.
Possible values:
- URL pointing to an image
- Data URL encoding the image
node_properties : str, dict, Callable
Set additional properties for each node, which may not immediately be translated
into a visual element, but can be chosen in the data selection menu in the
interactive HTML visualizations to map them on some plot element.
These properties also appear when exporting a graph to a file in a format
such as GML and may be recognized by external visualization tools.
Note that a Callable needs to return a dict in this case, and each key becomes
a property, which is equivalent to the other properties such as node_size and
node_color.
Special cases:
- ``node_properties="tv"`` is a shortcut for using a function that returns
``{"mean": atom.tv.mean, "confidence": atom.tv.confidence}``
- Keys ``"x"``, ``"y"`` and ``"z"`` properties are translated into node coordinates.
Examples:
- ``dict(x=0.0)``: This fixes the x coordinate of each node to 0.0, so that the
JavaScript layout algorithm does not influence it, but the nodes remain
free to move in the y and z directions.
- ``lambda atom: dict(x=2.0) if atom.is_node() else None``:
This fixes the x coordinate of each Atom of type Node to 2.0
but allows each Atom of type Link to move freely.
- ``lambda atom: dict(y=-len(atom.out)*100) if atom.is_link() else dict(y=0)``
This fixes the y coordinates of Atoms at different heights. Atoms of type Node
are put at the bottom and Atoms of type Link are ordered by the number of their
outgoing edges. The results is a hierarchical visualization that has some
similarity with the "dot" layout.
- ``lambda atom: dict(x=-100) if atom.is_node() else dict(x=100)``:
This fixes the x coordinate of Node Atoms at -100 and of Link Atoms at 100.
The results is a visualization with two lines of nodes that has some
similarity with the "bipartite" layout.
edge_label : str, Callable
Set a label for each edge, which becomes the text plotted in the middle of the edge.
edge_color : str, Callable
Set a color for each edge, which becomes the color of the line representing the edge.
edge_opacity : int, float, Callable
Set an opacity for each edge, which allows to make it transparent to some degree.
edge_size : int, float, Callable
Set a size for each edge, which becomes the width of the line representing the edge.
edge_label_color : str, Callable
Set a color for each edge label, which becomes the color of the text in the midpoint
of the edge.
edge_label_size : int, float, Callable
Set a size for each edge label, which becomes the size of the text in the midpoint
of the edge.
edge_hover : str, Callable
edge_click : str, Callable
Returns
-------
graph : NetworkX Graph or DiGraph
Whether an undirected or directed graph is created depends on the argument "directed".
"""
# Argument processing
_check_arg(data, 'data', (list, _AtomSpace))
_check_arg(graph_annotated, 'graph_annotated', bool)
_check_arg(graph_directed, 'graph_directed', bool)
_check_arg(node_label, 'node_label', (str, _Callable), allow_none=True)
_check_arg(node_color, 'node_color', (str, _Callable), allow_none=True)
_check_arg(node_opacity, 'node_opacity', (int, float, _Callable), allow_none=True)
_check_arg(node_size, 'node_size', (int, float, _Callable), allow_none=True)
_check_arg(node_shape, 'node_shape', (str, _Callable), allow_none=True)
_check_arg(node_border_color, 'node_border_color', (str, _Callable), allow_none=True)
_check_arg(node_border_size, 'node_border_size', (int, float, _Callable), allow_none=True)
_check_arg(node_label_color, 'node_label_color', (str, _Callable), allow_none=True)
_check_arg(node_label_size, 'node_label_size', (int, float, _Callable), allow_none=True)
_check_arg(node_hover, 'node_hover', (str, _Callable), allow_none=True)
_check_arg(node_click, 'node_click', (str, _Callable), allow_none=True)
_check_arg(node_image, 'node_image', (str, _Callable), allow_none=True)
_check_arg(node_properties, 'node_properties', (str, dict, _Callable), allow_none=True)
_check_arg(edge_label, 'edge_label', (str, _Callable), allow_none=True)
_check_arg(edge_color, 'edge_color', (str, _Callable), allow_none=True)
_check_arg(edge_opacity, 'edge_opacity', (int, float, _Callable), allow_none=True)
_check_arg(edge_size, 'edge_size', (int, float, _Callable), allow_none=True)
_check_arg(edge_label_color, 'edge_label_color', (str, _Callable), allow_none=True)
_check_arg(edge_label_size, 'edge_label_size', (int, float, _Callable), allow_none=True)
_check_arg(edge_hover, 'edge_hover', (str, _Callable), allow_none=True)
_check_arg(edge_click, 'edge_click', (str, _Callable), allow_none=True)
# Prepare annoation functions
if graph_annotated:
node_ann = prepare_node_func(
node_label, node_color, node_opacity, node_size, node_shape, node_border_color,
node_border_size, node_label_color, node_label_size, node_hover, node_click,
node_image, node_properties)
edge_ann = prepare_edge_func(
edge_label, edge_color, edge_opacity, edge_size,
edge_label_color, edge_label_size, edge_hover, edge_click)
else:
empty = dict()
# Create the NetworkX graph
graph = _nx.DiGraph() if graph_directed else _nx.Graph()
# 0) Set graph annotations
graph.graph['node_click'] = '$hover' # node_click will by default show content of node_hover
# 1) Add vertices and their annotations
for atom in data:
graph.add_node(to_uid(atom), **node_ann(atom))
# 2) Add edges and their annotations (separate step to exclude edges to filtered vertices)
for atom in data:
uid = to_uid(atom)
if atom.is_link():
# for all that is incoming to the Atom
for atom2 in atom.incoming:
uid2 = to_uid(atom2)
if uid2 in graph.nodes:
graph.add_edge(uid2, uid, **edge_ann(atom2, atom))
# for all that is outgoing of the Atom
for atom2 in atom.out:
uid2 = to_uid(atom2)
if uid2 in graph.nodes:
graph.add_edge(uid, uid2, **edge_ann(atom, atom2))
return graph
def prepare_node_func(node_label, node_color, node_opacity, node_size, node_shape,
node_border_color, node_border_size, node_label_color, node_label_size,
node_hover, node_click, node_image, node_properties):
"""Prepare a function that calculates all annoations for a node representing an Atom."""
# individual node annotation functions
node_label = use_node_def_or_str(node_label, node_label_default)
node_color = use_node_def_or_str(node_color, node_color_default)
node_opacity = use_node_def_or_num(node_opacity, node_opacity_default)
node_size = use_node_def_or_num(node_size, node_size_default)
node_shape = use_node_def_or_str(node_shape, node_shape_default)
node_border_color = use_node_def_or_str(node_border_color, node_border_color_default)
node_border_size = use_node_def_or_num(node_border_size, node_border_size_default)
node_label_color = use_node_def_or_str(node_label_color, node_label_color_default)
node_label_size = use_node_def_or_num(node_label_size, node_label_size_default)
node_hover = use_node_def_or_str(node_hover, node_hover_default)
node_click = use_node_def_or_str(node_click, node_click_default)
node_image = use_node_def_or_str(node_image, node_image_default)
# special case: additional user-defined node properties by a function that returns a dict
if node_properties is None:
node_properties = node_properties_default
elif isinstance(node_properties, dict):
val = node_properties
elif node_properties == 'tv':
node_properties = node_properties_tv
# combined node annotation function: calls each of the individual ones
name_func = (
('label', node_label),
('color', node_color),
('opacity', node_opacity),
('size', node_size),
('shape', node_shape),
('border_color', node_border_color),
('border_size', node_border_size),
('label_color', node_label_color),
('label_size', node_label_size),
('hover', node_hover),
('click', node_click),
('image', node_image),
)
return func
def prepare_edge_func(edge_label, edge_color, edge_opacity, edge_size, edge_label_color,
edge_label_size, edge_hover, edge_click):
"""Prepare a function that calculates all annoations for an edge between Atoms."""
# individual edge annotation functions
edge_label = use_edge_def_or_str(edge_label, edge_label_default)
edge_color = use_edge_def_or_str(edge_color, edge_color_default)
edge_opacity = use_edge_def_or_num(edge_opacity, edge_opacity_default)
edge_size = use_edge_def_or_num(edge_size, edge_size_default)
edge_label_color = use_edge_def_or_str(edge_label_color, edge_label_color_default)
edge_label_size = use_edge_def_or_num(edge_label_size, edge_label_size_default)
edge_hover = use_edge_def_or_str(edge_hover, edge_hover_default)
edge_click = use_edge_def_or_str(edge_click, edge_click_default)
# combined edge annotation function: calls each of the individual ones
name_func = (
('label', edge_label),
('color', edge_color),
('opacity', edge_opacity),
('size', edge_size),
('label_color', edge_label_color),
('label_size', edge_label_size),
('hover', edge_hover),
('click', edge_click),
)
return func
def use_node_def_or_str(given_value, default_func):
"""Transform a value of type (None, str, Callable) to a node annotation function."""
# Default: use pre-defined function from this module
if given_value is None:
func = default_func
# Transform: value to function that returns the value
elif isinstance(given_value, str):
given_value = str(given_value)
# Passthrough: value itself is a function
else:
func = given_value
return func
def use_node_def_or_num(given_value, default_func):
"""Transform a value of type (None, int, float, Callable) to a node annotation function."""
# Default: use pre-defined function from this module
if given_value is None:
func = default_func
# Transform: value to function that returns the value
elif isinstance(given_value, (int, float)):
given_value = float(given_value)
# Passthrough: value itself is a function
else:
func = given_value
return func
def use_edge_def_or_str(given_value, default_func):
"""Transform a value of type (None, str, Callable) to an edge annotation function."""
# Default: use pre-defined function from this module
if given_value is None:
func = default_func
# Transform: value to function that returns the value
elif isinstance(given_value, str):
given_value = str(given_value)
# Passthrough: value itself is a function
else:
func = given_value
return func
def use_edge_def_or_num(given_value, default_func):
"""Transform a value of type (None, int, float, Callable) to an edge annotation function."""
# Default: use pre-defined function from this module
if given_value is None:
func = default_func
# Transform: value to function that returns the value
elif isinstance(given_value, (int, float)):
given_value = float(given_value)
# Passthrough: value itself is a function
else:
func = given_value
return func
def to_uid(atom):
"""Return a unique identifier for an Atom."""
return atom.id_string()
# Default functions for node annotations
# - "return None" means that the attribute and value won't be included
# to the output data, so that defaults of the JS library are used and files get smaller
# - A return of a value in some cases and None in other cases means that the
# default value of the JS library is used in None cases and again files get smaller
# Default functions for edge annotations
| 38.923387 | 97 | 0.682897 |
78db1f0ed3fd45150eca94cbff8fdb625dd1d917 | 156 | py | Python | testData/completion/classMethodCls.py | seandstewart/typical-pycharm-plugin | 4f6ec99766239421201faae9d75c32fa0ee3565a | [
"MIT"
] | null | null | null | testData/completion/classMethodCls.py | seandstewart/typical-pycharm-plugin | 4f6ec99766239421201faae9d75c32fa0ee3565a | [
"MIT"
] | null | null | null | testData/completion/classMethodCls.py | seandstewart/typical-pycharm-plugin | 4f6ec99766239421201faae9d75c32fa0ee3565a | [
"MIT"
] | null | null | null | from builtins import *
from pydantic import BaseModel
| 11.142857 | 30 | 0.647436 |
78db3efa5c77dd290cf1467f8ac973b8fc19949b | 13,168 | py | Python | watcher_metering/tests/agent/test_agent.py | b-com/watcher-metering | 7c09b243347146e5a421700d5b07d1d0a5c4d604 | [
"Apache-2.0"
] | 2 | 2015-10-22T19:44:57.000Z | 2017-06-15T15:01:07.000Z | watcher_metering/tests/agent/test_agent.py | b-com/watcher-metering | 7c09b243347146e5a421700d5b07d1d0a5c4d604 | [
"Apache-2.0"
] | 1 | 2015-10-26T13:52:58.000Z | 2015-10-26T13:52:58.000Z | watcher_metering/tests/agent/test_agent.py | b-com/watcher-metering | 7c09b243347146e5a421700d5b07d1d0a5c4d604 | [
"Apache-2.0"
] | 4 | 2015-10-10T13:59:39.000Z | 2020-05-29T11:47:07.000Z | # -*- encoding: utf-8 -*-
# Copyright (c) 2015 b<>com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import unicode_literals
from collections import OrderedDict
import os
import types
from mock import MagicMock
from mock import Mock
from mock import patch
from mock import PropertyMock
import msgpack
import operator
from oslo_config import cfg
from oslotest.base import BaseTestCase
from stevedore.driver import DriverManager
from stevedore.extension import Extension
from watcher_metering.agent.agent import Agent
from watcher_metering.agent.measurement import Measurement
from watcher_metering.tests.agent.agent_fixtures import ConfFixture
from watcher_metering.tests.agent.agent_fixtures import DummyMetricPuller
from watcher_metering.tests.agent.agent_fixtures import FakeMetricPuller
| 40.024316 | 79 | 0.659478 |
78dce9aa3f78b6fd58cffc69a08166742b99da9b | 31,044 | py | Python | mmtbx/bulk_solvent/mosaic.py | ndevenish/cctbx_project | 1f1a2627ae20d01d403f367948e7269cef0f0217 | [
"BSD-3-Clause-LBNL"
] | null | null | null | mmtbx/bulk_solvent/mosaic.py | ndevenish/cctbx_project | 1f1a2627ae20d01d403f367948e7269cef0f0217 | [
"BSD-3-Clause-LBNL"
] | null | null | null | mmtbx/bulk_solvent/mosaic.py | ndevenish/cctbx_project | 1f1a2627ae20d01d403f367948e7269cef0f0217 | [
"BSD-3-Clause-LBNL"
] | null | null | null | from __future__ import absolute_import, division, print_function
from cctbx.array_family import flex
from scitbx import matrix
import math
from libtbx import adopt_init_args
import scitbx.lbfgs
from mmtbx.bulk_solvent import kbu_refinery
from cctbx import maptbx
import mmtbx.masks
import boost_adaptbx.boost.python as bp
asu_map_ext = bp.import_ext("cctbx_asymmetric_map_ext")
from libtbx import group_args
from mmtbx import bulk_solvent
from mmtbx.ncs import tncs
from collections import OrderedDict
import mmtbx.f_model
import sys
from libtbx.test_utils import approx_equal
from mmtbx import masks
from cctbx.masks import vdw_radii_from_xray_structure
ext = bp.import_ext("mmtbx_masks_ext")
mosaic_ext = bp.import_ext("mmtbx_mosaic_ext")
APPLY_SCALE_K1_TO_FOBS = False
# Utilities used by algorithm 2 ------------------------------------------------
#-------------------------------------------------------------------------------
def write_map_file(crystal_symmetry, map_data, file_name):
from iotbx import mrcfile
mrcfile.write_ccp4_map(
file_name = file_name,
unit_cell = crystal_symmetry.unit_cell(),
space_group = crystal_symmetry.space_group(),
map_data = map_data,
labels = flex.std_string([""]))
def algorithm_0(f_obs, F, kt):
"""
Grid search
"""
fc, f_masks = F[0], F[1:]
k_mask_trial_range=[]
s = -1
while s<1:
k_mask_trial_range.append(s)
s+=0.0001
r = []
fc_data = fc.data()
for i, f_mask in enumerate(f_masks):
#print("mask ",i)
assert f_obs.data().size() == fc.data().size()
assert f_mask.data().size() == fc.data().size()
#print (bulk_solvent.r_factor(f_obs.data(),fc_data))
kmask_, k_ = \
bulk_solvent.k_mask_and_k_overall_grid_search(
f_obs.data()*kt,
fc_data*kt,
f_mask.data()*kt,
flex.double(k_mask_trial_range),
flex.bool(fc.data().size(),True))
r.append(kmask_)
fc_data += fc_data*k_ + kmask_*f_mask.data()
#print (bulk_solvent.r_factor(f_obs.data(),fc_data + kmask_*f_mask.data(),k_))
r = [1,]+r
return r
def algorithm_2(i_obs, F, x, use_curvatures=True, macro_cycles=10):
"""
Unphased one-step search
"""
calculator = tg(i_obs = i_obs, F=F, x = x, use_curvatures=use_curvatures)
for it in range(macro_cycles):
if(use_curvatures):
m = minimizer(max_iterations=100, calculator=calculator)
else:
#upper = flex.double([1.1] + [1]*(x.size()-1))
#lower = flex.double([0.9] + [-1]*(x.size()-1))
upper = flex.double([1.1] + [5]*(x.size()-1))
lower = flex.double([0.9] + [-5]*(x.size()-1))
#upper = flex.double([10] + [5]*(x.size()-1))
#lower = flex.double([0.1] + [-5]*(x.size()-1))
#upper = flex.double([10] + [0.65]*(x.size()-1))
#lower = flex.double([0.1] + [0]*(x.size()-1))
#upper = flex.double([1] + [0.65]*(x.size()-1))
#lower = flex.double([1] + [0]*(x.size()-1))
#upper = flex.double([1] + [5.65]*(x.size()-1))
#lower = flex.double([1] + [-5]*(x.size()-1))
m = tncs.minimizer(
potential = calculator,
use_bounds = 2,
lower_bound = lower,
upper_bound = upper,
initial_values = x).run()
calculator = tg(i_obs = i_obs, F=F, x = m.x, use_curvatures=use_curvatures)
if(use_curvatures):
for it in range(10):
m = minimizer(max_iterations=100, calculator=calculator)
calculator = tg(i_obs = i_obs, F=F, x = m.x, use_curvatures=use_curvatures)
m = minimizer2(max_iterations=100, calculator=calculator).run(use_curvatures=True)
calculator = tg(i_obs = i_obs, F=F, x = m.x, use_curvatures=use_curvatures)
return m.x
def algorithm_3(i_obs, fc, f_masks):
"""
Unphased two-step search
"""
F = [fc]+f_masks
Gnm = []
cs = {}
cntr=0
nm=[]
# Compute and store Gnm
for n, Fn in enumerate(F):
for m, Fm in enumerate(F):
if m < n:
continue
Gnm.append( flex.real( Fn.data()*flex.conj(Fm.data()) ) )
cs[(n,m)] = cntr
cntr+=1
nm.append((n,m))
# Keep track of indices for "upper triangular matrix vs full"
for k,v in zip(list(cs.keys()), list(cs.values())):
i,j=k
if i==j: continue
else: cs[(j,i)]=v
# Generate and solve system Ax=b, x = A_1*b
A = []
b = []
for u, Gnm_u in enumerate(Gnm):
for v, Gnm_v in enumerate(Gnm):
scale = 2
n,m=nm[v]
if n==m: scale=1
A.append( flex.sum(Gnm_u*Gnm_v)*scale )
b.append( flex.sum(Gnm_u * i_obs.data()) )
A = matrix.sqr(A)
A_1 = A.inverse()
b = matrix.col(b)
x = A_1 * b
# Expand Xmn from solution x
Xmn = []
for n, Fn in enumerate(F):
rows = []
for m, Fm in enumerate(F):
x_ = x[cs[(n,m)]]
rows.append(x_)
Xmn.append(rows)
# Do formula (19)
lnK = []
for j, Fj in enumerate(F):
t1 = flex.sum( flex.log( flex.double(Xmn[j]) ) )
t2 = 0
for n, Fn in enumerate(F):
for m, Fm in enumerate(F):
t2 += math.log(Xmn[n][m])
t2 = t2 / (2*len(F))
lnK.append( 1/len(F)*(t1-t2) )
return [math.exp(x) for x in lnK]
def algorithm_4(f_obs, F, phase_source, max_cycles=100, auto_converge_eps=1.e-7,
use_cpp=True):
"""
Phased simultaneous search (alg4)
"""
fc, f_masks = F[0], F[1:]
fc = fc.deep_copy()
F = [fc]+F[1:]
# C++ version
if(use_cpp):
return mosaic_ext.alg4(
[f.data() for f in F],
f_obs.data(),
phase_source.data(),
max_cycles,
auto_converge_eps)
# Python version (1.2-3 times slower, but much more readable!)
cntr = 0
x_prev = None
while True:
f_obs_cmpl = f_obs.phase_transfer(phase_source = phase_source)
A = []
b = []
for j, Fj in enumerate(F):
A_rows = []
for n, Fn in enumerate(F):
Gjn = flex.real( Fj.data()*flex.conj(Fn.data()) )
A_rows.append( flex.sum(Gjn) )
Hj = flex.real( Fj.data()*flex.conj(f_obs_cmpl.data()) )
b.append(flex.sum(Hj))
A.extend(A_rows)
A = matrix.sqr(A)
A_1 = A.inverse()
b = matrix.col(b)
x = A_1 * b
#
fc_d = flex.complex_double(phase_source.indices().size(), 0)
for i, f in enumerate(F):
fc_d += f.data()*x[i]
phase_source = phase_source.customized_copy(data = fc_d)
x_ = x[:]
#
cntr+=1
if(cntr>max_cycles): break
if(x_prev is None): x_prev = x_[:]
else:
max_diff = flex.max(flex.abs(flex.double(x_prev)-flex.double(x_)))
if(max_diff<=auto_converge_eps): break
x_prev = x_[:]
return x_
| 36.266355 | 107 | 0.60862 |
78ddef69c8c618801719da4ee218c45f1df458b0 | 25,941 | py | Python | mars/tensor/execution/tests/test_base_execute.py | lmatz/mars | 45f9166b54eb91b21e66cef8b590a41aa8ac9569 | [
"Apache-2.0"
] | 1 | 2018-12-26T08:37:04.000Z | 2018-12-26T08:37:04.000Z | mars/tensor/execution/tests/test_base_execute.py | lmatz/mars | 45f9166b54eb91b21e66cef8b590a41aa8ac9569 | [
"Apache-2.0"
] | null | null | null | mars/tensor/execution/tests/test_base_execute.py | lmatz/mars | 45f9166b54eb91b21e66cef8b590a41aa8ac9569 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import scipy.sparse as sps
from mars.tensor.execution.core import Executor
from mars import tensor as mt
from mars.tensor.expressions.datasource import tensor, ones, zeros, arange
from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, \
expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, \
hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, \
flip, flipud, fliplr, repeat, tile, isin
from mars.tensor.expressions.merge import stack
from mars.tensor.expressions.reduction import all as tall
| 34.132895 | 110 | 0.596623 |
78ddf0916f6002f2dfd416cfa16eaf9855682728 | 77 | py | Python | comix-imagenet/init_paths.py | drumpt/Co-Mixup | 4c43f0ec873ce6c1e8ab446c7cb9e25089b9b91a | [
"MIT"
] | 86 | 2021-02-05T03:13:09.000Z | 2022-03-29T03:10:50.000Z | comix-imagenet/init_paths.py | drumpt/Co-Mixup | 4c43f0ec873ce6c1e8ab446c7cb9e25089b9b91a | [
"MIT"
] | 4 | 2021-06-01T13:07:06.000Z | 2022-02-15T03:08:30.000Z | comix-imagenet/init_paths.py | drumpt/Co-Mixup | 4c43f0ec873ce6c1e8ab446c7cb9e25089b9b91a | [
"MIT"
] | 7 | 2021-02-09T01:27:03.000Z | 2021-09-01T14:07:40.000Z | import sys
import matplotlib
matplotlib.use('Agg')
sys.path.insert(0, 'lib')
| 15.4 | 25 | 0.753247 |
78de98de938be5cc3ac224e5095778425f0adabc | 14,828 | py | Python | members_abundances_in_out_uncertainties.py | kcotar/Gaia_clusters_potential | aee2658c40446891d31528f8dec3cec899b63c68 | [
"MIT"
] | null | null | null | members_abundances_in_out_uncertainties.py | kcotar/Gaia_clusters_potential | aee2658c40446891d31528f8dec3cec899b63c68 | [
"MIT"
] | null | null | null | members_abundances_in_out_uncertainties.py | kcotar/Gaia_clusters_potential | aee2658c40446891d31528f8dec3cec899b63c68 | [
"MIT"
] | null | null | null | import matplotlib
matplotlib.use('Agg')
import numpy as np
import matplotlib.pyplot as plt
from glob import glob
from astropy.table import Table, join
from os import chdir, system
from scipy.stats import norm as gauss_norm
from sys import argv
from getopt import getopt
# turn off polyfit ranking warnings
import warnings
warnings.filterwarnings('ignore')
simulation_dir = '/shared/data-camelot/cotar/'
data_dir_clusters = simulation_dir+'GaiaDR2_open_clusters_2001_GALAH/'
data_dir = '/shared/ebla/cotar/'
USE_DR3 = True
Q_FLAGS = True
P_INDIVIDUAL = False
suffix = ''
if len(argv) > 1:
# parse input options
opts, args = getopt(argv[1:], '', ['dr3=', 'suffix=', 'flags=', 'individual='])
# set parameters, depending on user inputs
print(opts)
for o, a in opts:
if o == '--dr3':
USE_DR3 = int(a) > 0
if o == '--suffix':
suffix += str(a)
if o == '--flags':
Q_FLAGS = int(a) > 0
if o == '--individual':
P_INDIVIDUAL = int(a) > 0
CG_data = Table.read(data_dir+'clusters/Cantat-Gaudin_2018/members.fits')
tails_data = Table.read(data_dir+'clusters/cluster_tails/members_open_gaia_tails.fits')
# remove cluster members from tails data
print('Cluster members all:', len(CG_data), len(tails_data))
idx_not_in_cluster = np.in1d(tails_data['source_id'], CG_data['source_id'], invert=True)
tails_data = tails_data[idx_not_in_cluster]
print('Cluster members all:', len(CG_data), len(tails_data))
if USE_DR3:
# cannon_data = Table.read(data_dir+'GALAH_iDR3_main_alpha_190529.fits')
cannon_data = Table.read(data_dir+'GALAH_iDR3_main_191213.fits')
fe_col = 'fe_h'
teff_col = 'teff'
q_flag = 'flag_sp'
suffix += '_DR3'
else:
pass
if Q_FLAGS:
suffix += '_flag0'
# determine all possible simulation subdirs
chdir(data_dir_clusters)
for cluster_dir in glob('Cluster_orbits_GaiaDR2_*'):
chdir(cluster_dir)
print('Working on clusters in ' + cluster_dir)
for sub_dir in glob('*'):
current_cluster = '_'.join(sub_dir.split('_')[0:2])
source_id_cg = CG_data[CG_data['cluster'] == current_cluster]['source_id']
source_id_tail = tails_data[tails_data['cluster'] == current_cluster]['source_id']
idx_cg_memb = np.in1d(cannon_data['source_id'], np.array(source_id_cg))
idx_tail = np.in1d(cannon_data['source_id'], np.array(source_id_tail))
if '.png' in sub_dir or 'individual-abund' in sub_dir:
continue
print(' ')
print(sub_dir)
chdir(sub_dir)
try:
g_init = Table.read('members_init_galah.csv', format='ascii', delimiter='\t')
idx_init = np.in1d(cannon_data['source_id'], g_init['source_id'])
except:
idx_init = np.full(len(cannon_data), False)
try:
g_in_all = Table.read('possible_ejected-step1.csv', format='ascii', delimiter='\t')
g_in = Table.read('possible_ejected-step1_galah.csv', format='ascii', delimiter='\t')
# further refinement of results to be plotted here
g_in_all = g_in_all[np.logical_and(g_in_all['time_in_cluster'] >= 1., # [Myr] longest time (of all incarnations) inside cluster
g_in_all['in_cluster_prob'] >= 68.)] # percentage of reincarnations inside cluster
g_in = g_in[np.logical_and(g_in['time_in_cluster'] >= 1.,
g_in['in_cluster_prob'] >= 68.)]
idx_in = np.in1d(cannon_data['source_id'], g_in['source_id'])
idx_in_no_CG = np.logical_and(idx_in,
np.logical_not(np.in1d(cannon_data['source_id'], CG_data['source_id'])))
except:
idx_in = np.full(len(cannon_data), False)
idx_in_no_CG = np.full(len(cannon_data), False)
try:
g_out = Table.read('possible_outside-step1_galah.csv', format='ascii', delimiter='\t')
# further refinement of results to be plotted here
g_out = g_out[np.logical_and(g_out['time_in_cluster'] <= 0,
g_out['in_cluster_prob'] <= 0)]
idx_out = np.in1d(cannon_data['source_id'], g_out['source_id'])
except:
idx_out = np.full(len(cannon_data), False)
chdir('..')
if np.sum(idx_init) == 0 or np.sum(idx_in) == 0 or np.sum(idx_out) == 0:
print(' Some Galah lists are missing')
if USE_DR3:
abund_cols = [c for c in cannon_data.colnames if '_fe' in c and 'nr_' not in c and 'diff_' not in c and 'e_' not in c and 'Li' not in c and 'alpha' not in c] # and ('I' in c or 'II' in c or 'III' in c)]
else:
abund_cols = [c for c in cannon_data.colnames if '_abund' in c and len(c.split('_')) == 3]
# abund_cols = ['e_' + cc for cc in abund_cols]
# rg = (0., 0.35)
# yt = [0., 0.1, 0.2, 0.3]
# medfix = '-snr-sigma_'
abund_cols = ['diff_' + cc for cc in abund_cols]
rg = (-0.45, 0.45)
yt = [-0.3, -0.15, 0.0, 0.15, 0.3]
medfix = '-detrended-snr_'
# ------------------------------------------------------------------------------
# NEW: plot with parameter dependency trends
# ------------------------------------------------------------------------------
bs = 40
x_cols_fig = 7
y_cols_fig = 5
param_lims = {'snr_c2_iraf': [5, 175], 'age': [0., 14.], 'teff': [3000, 7000], 'logg': [0.0, 5.5], 'fe_h': [-1.2, 0.5]}
for param in ['snr_c2_iraf']: #list(param_lims.keys()):
cannon_data['abund_det'] = 0
cannon_data['abund_det_elems'] = 0
print('Estimating membership using parameter', param)
fig, ax = plt.subplots(y_cols_fig, x_cols_fig, figsize=(15, 10))
for i_c, col in enumerate(abund_cols):
# print(col)
x_p = i_c % x_cols_fig
y_p = int(1. * i_c / x_cols_fig)
fit_x_param = 'teff'
cur_abund_col = '_'.join(col.split('_')[1:])
cannon_data['diff_' + cur_abund_col] = cannon_data[cur_abund_col]
idx_val = np.isfinite(cannon_data[col])
if Q_FLAGS:
idx_val = np.logical_and(idx_val, cannon_data[q_flag] == 0)
idx_u1 = np.logical_and(idx_out, idx_val)
idx_u2 = np.logical_and(idx_init, idx_val)
idx_u3 = np.logical_and(idx_in, idx_val)
idx_u4 = np.logical_and(idx_cg_memb, idx_val)
idx_u5 = np.logical_and(idx_tail, idx_val)
fit_model, col_std = fit_abund_trend(cannon_data[fit_x_param][idx_u2],
cannon_data[cur_abund_col][idx_u2],
order=3, steps=2, func='poly',
sigma_low=2.5, sigma_high=2.5, n_min_perc=10.)
if fit_model is not None:
cannon_data['diff_' + cur_abund_col] = cannon_data[cur_abund_col] - eval_abund_trend(cannon_data[fit_x_param], fit_model, func='poly')
else:
cannon_data['diff_' + cur_abund_col] = np.nan
ax[y_p, x_p].scatter(cannon_data[param][idx_u1], cannon_data[col][idx_u1],
lw=0, s=3, color='C2', label='Field')
ax[y_p, x_p].scatter(cannon_data[param][idx_u2], cannon_data[col][idx_u2],
lw=0, s=3, color='C0', label='Initial')
ax[y_p, x_p].scatter(cannon_data[param][idx_u3], cannon_data[col][idx_u3],
lw=0, s=3, color='C1', label='Ejected')
if np.sum(idx_u5) > 0:
print('Ejected in tail:', np.sum(np.logical_and(idx_u3, idx_u5)))
ax[y_p, x_p].scatter(cannon_data[param][idx_u5], cannon_data[col][idx_u5],
lw=0, s=3, color='C4', label='Tail')
label_add = ' = {:.0f}, {:.0f}, {:.0f}'.format(np.sum(idx_u1), np.sum(idx_u2), np.sum(idx_u3))
ax[y_p, x_p].set(xlim=param_lims[param], title=' '.join(col.split('_')[:2]) + label_add,
ylim=rg,
yticks=yt,)
ax[y_p, x_p].grid(ls='--', alpha=0.2, color='black')
rg = (-0.6, 0.6)
idx_val = np.isfinite(cannon_data[teff_col])
if Q_FLAGS:
idx_val = np.logical_and(idx_val, cannon_data[q_flag] == 0)
x_p = -1
y_p = -1
idx_u1 = np.logical_and(idx_out, idx_val)
idx_u2 = np.logical_and(idx_init, idx_val)
idx_u3 = np.logical_and(idx_in, idx_val)
idx_u5 = np.logical_and(idx_tail, idx_val)
sl1 = ax[y_p, x_p].scatter(cannon_data[param][idx_u1], cannon_data[fe_col][idx_u1],
lw=0, s=3, color='C2', label='Field')
sl2 = ax[y_p, x_p].scatter(cannon_data[param][idx_u2], cannon_data[fe_col][idx_u2],
lw=0, s=3, color='C0', label='Initial')
sl3 = ax[y_p, x_p].scatter(cannon_data[param][idx_u3], cannon_data[fe_col][idx_u3],
lw=0, s=3, color='C1', label='Ejected')
fit_model, col_std = fit_abund_trend(cannon_data[param][idx_u2], cannon_data[fe_col][idx_u2],
order=3, steps=2, sigma_low=2.5, sigma_high=2.5, n_min_perc=10.,
func='poly')
if np.sum(idx_u5) > 0:
sl5 = ax[y_p, x_p].scatter(cannon_data[param][idx_u5], cannon_data[fe_col][idx_u5],
lw=0, s=3, color='C4', label='Tail')
ax[-1, -3].legend(handles=[sl1, sl1, sl3, sl5])
else:
ax[-1, -3].legend(handles=[sl1, sl1, sl3])
label_add = ' = {:.0f}, {:.0f}, {:.0f}'.format(np.sum(idx_u1), np.sum(idx_u2), np.sum(idx_u3))
ax[y_p, x_p].set(ylim=rg, title='Fe/H' + label_add, xlim=param_lims[param])
ax[y_p, x_p].grid(ls='--', alpha=0.2, color='black')
x_p = -2
y_p = -1
ax[y_p, x_p].scatter(cannon_data['age'][idx_u1], cannon_data[param][idx_u1],
lw=0, s=3, color='C2', label='Field')
ax[y_p, x_p].scatter(cannon_data['age'][idx_u2], cannon_data[param][idx_u2],
lw=0, s=3, color='C0', label='Initial')
ax[y_p, x_p].scatter(cannon_data['age'][idx_u3], cannon_data[param][idx_u3],
lw=0, s=3, color='C1', label='Ejected')
if np.sum(idx_u5) > 0:
ax[y_p, x_p].scatter(cannon_data['age'][idx_u5], cannon_data[param][idx_u5],
lw=0, s=3, color='C4', label='Tail')
label_add = ' = {:.0f}, {:.0f}, {:.0f}'.format(np.sum(idx_u1), np.sum(idx_u2), np.sum(idx_u3))
ax[y_p, x_p].set(ylim=param_lims[param], title='age' + label_add, xlim=[0., 14.])
ax[y_p, x_p].grid(ls='--', alpha=0.2, color='black')
plt.subplots_adjust(top=0.97, bottom=0.02, left=0.04, right=0.98, hspace=0.3, wspace=0.3)
# plt.show()
plt.savefig('p_' + param + '_abundances' + medfix + sub_dir + '' + suffix + '.png', dpi=250)
plt.close(fig)
chdir('..')
| 43.740413 | 215 | 0.55874 |
78df11b8ab67a00fef993f03b911ed0dd7fc3180 | 707 | py | Python | src/python_minifier/transforms/remove_pass.py | donno2048/python-minifier | 9a9ff4dd5d2bb8dc666cae5939c125d420c2ffd5 | [
"MIT"
] | null | null | null | src/python_minifier/transforms/remove_pass.py | donno2048/python-minifier | 9a9ff4dd5d2bb8dc666cae5939c125d420c2ffd5 | [
"MIT"
] | null | null | null | src/python_minifier/transforms/remove_pass.py | donno2048/python-minifier | 9a9ff4dd5d2bb8dc666cae5939c125d420c2ffd5 | [
"MIT"
] | null | null | null | import ast
from python_minifier.transforms.suite_transformer import SuiteTransformer
| 27.192308 | 106 | 0.649222 |
78df4f62738c15a3903b9ac814a118e7bd487166 | 1,214 | py | Python | test/tests.py | gzu300/Linear_Algebra | 437a285b0230f4da8b0573b04da32ee965b09233 | [
"MIT"
] | null | null | null | test/tests.py | gzu300/Linear_Algebra | 437a285b0230f4da8b0573b04da32ee965b09233 | [
"MIT"
] | null | null | null | test/tests.py | gzu300/Linear_Algebra | 437a285b0230f4da8b0573b04da32ee965b09233 | [
"MIT"
] | null | null | null | import unittest
from pkg import Linear_Algebra
import numpy as np
if __name__ == '__main__':
unittest.main() | 41.862069 | 128 | 0.629325 |
78e0a22b8b4b6603603bcdb8feefa51265cf9c14 | 345 | py | Python | src/backend/common/models/favorite.py | ofekashery/the-blue-alliance | df0e47d054161fe742ac6198a6684247d0713279 | [
"MIT"
] | 266 | 2015-01-04T00:10:48.000Z | 2022-03-28T18:42:05.000Z | src/backend/common/models/favorite.py | ofekashery/the-blue-alliance | df0e47d054161fe742ac6198a6684247d0713279 | [
"MIT"
] | 2,673 | 2015-01-01T20:14:33.000Z | 2022-03-31T18:17:16.000Z | src/backend/common/models/favorite.py | ofekashery/the-blue-alliance | df0e47d054161fe742ac6198a6684247d0713279 | [
"MIT"
] | 230 | 2015-01-04T00:10:48.000Z | 2022-03-26T18:12:04.000Z | from backend.common.models.mytba import MyTBAModel
| 28.75 | 77 | 0.704348 |
78e27b1810b0eb666d13182e83f2f3c881794f6e | 17,296 | py | Python | Cartwheel/lib/Python26/Lib/site-packages/wx-2.8-msw-unicode/wx/lib/filebrowsebutton.py | MontyThibault/centre-of-mass-awareness | 58778f148e65749e1dfc443043e9fc054ca3ff4d | [
"MIT"
] | 27 | 2020-11-12T19:24:54.000Z | 2022-03-27T23:10:45.000Z | Cartwheel/lib/Python26/Lib/site-packages/wx-2.8-msw-unicode/wx/lib/filebrowsebutton.py | MontyThibault/centre-of-mass-awareness | 58778f148e65749e1dfc443043e9fc054ca3ff4d | [
"MIT"
] | 2 | 2020-11-02T06:30:39.000Z | 2022-02-23T18:39:55.000Z | Cartwheel/lib/Python26/Lib/site-packages/wx-2.8-msw-unicode/wx/lib/filebrowsebutton.py | MontyThibault/centre-of-mass-awareness | 58778f148e65749e1dfc443043e9fc054ca3ff4d | [
"MIT"
] | 3 | 2021-08-16T00:21:08.000Z | 2022-02-23T19:19:36.000Z | #----------------------------------------------------------------------
# Name: wxPython.lib.filebrowsebutton
# Purpose: Composite controls that provide a Browse button next to
# either a wxTextCtrl or a wxComboBox. The Browse button
# launches a wxFileDialog and loads the result into the
# other control.
#
# Author: Mike Fletcher
#
# RCS-ID: $Id: filebrowsebutton.py 59674 2009-03-20 21:00:16Z RD $
# Copyright: (c) 2000 by Total Control Software
# Licence: wxWindows license
#----------------------------------------------------------------------
# 12/02/2003 - Jeff Grimmett (grimmtooth@softhome.net)
#
# o 2.5 Compatability changes
#
import os
import types
import wx
#----------------------------------------------------------------------
#----------------------------------------------------------------------
if __name__ == "__main__":
#from skeletonbuilder import rulesfile
def test( ):
app = DemoApp(0)
app.MainLoop()
print 'Creating dialog'
test( )
| 36.721868 | 95 | 0.566142 |
78e3235c058d0f0d01fe78bcda45b0e5210cc956 | 3,798 | py | Python | modules/pygsm/devicewrapper.py | whanderley/eden | 08ced3be3d52352c54cbd412ed86128fbb68b1d2 | [
"MIT"
] | 205 | 2015-01-20T08:26:09.000Z | 2022-03-27T19:59:33.000Z | modules/pygsm/devicewrapper.py | nursix/eden-asp | e49f46cb6488918f8d5a163dcd5a900cd686978c | [
"MIT"
] | 249 | 2015-02-10T09:56:35.000Z | 2022-03-23T19:54:36.000Z | modules/pygsm/devicewrapper.py | nursix/eden-asp | e49f46cb6488918f8d5a163dcd5a900cd686978c | [
"MIT"
] | 231 | 2015-02-10T09:33:17.000Z | 2022-02-18T19:56:05.000Z | #!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4 encoding=utf-8
# arch: pacman -S python-pyserial
# debian/ubuntu: apt-get install python-serial
import serial
import re
import errors | 35.166667 | 84 | 0.561611 |
78e3d8480adc030df86059c4a34f7c8aad96d287 | 306 | py | Python | day1/loops.py | alqmy/The-Garage-Summer-Of-Code | af310d5e5194a62962db2fc1e601099468251efa | [
"MIT"
] | null | null | null | day1/loops.py | alqmy/The-Garage-Summer-Of-Code | af310d5e5194a62962db2fc1e601099468251efa | [
"MIT"
] | null | null | null | day1/loops.py | alqmy/The-Garage-Summer-Of-Code | af310d5e5194a62962db2fc1e601099468251efa | [
"MIT"
] | null | null | null | # while True:
# # ejecuta esto
# print("Hola")
real = 7
print("Entre un numero entre el 1 y el 10")
guess = int(input())
# =/=
while guess != real:
print("Ese no es el numero")
print("Entre un numero entre el 1 y el 10")
guess = int(input())
# el resto
print("Yay! Lo sacastes!")
| 16.105263 | 47 | 0.591503 |
78e51986ef4ee9e7c7af6f2a83426baeaab981b9 | 1,426 | py | Python | pentest-scripts/learning-python-for-forensics/Chapter 6/rot13.py | paulveillard/cybersecurity-penetration-testing | a5afff13ec25afd0cf16ef966d35bddb91518af4 | [
"Apache-2.0"
] | 6 | 2021-12-07T21:02:12.000Z | 2022-03-03T12:08:14.000Z | pentest-scripts/learning-python-for-forensics/Chapter 6/rot13.py | paulveillard/cybersecurity-penetration-testing | a5afff13ec25afd0cf16ef966d35bddb91518af4 | [
"Apache-2.0"
] | null | null | null | pentest-scripts/learning-python-for-forensics/Chapter 6/rot13.py | paulveillard/cybersecurity-penetration-testing | a5afff13ec25afd0cf16ef966d35bddb91518af4 | [
"Apache-2.0"
] | 1 | 2022-01-15T23:57:36.000Z | 2022-01-15T23:57:36.000Z | def rotCode(data):
"""
The rotCode function encodes/decodes data using string indexing
:param data: A string
:return: The rot-13 encoded/decoded string
"""
rot_chars = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
substitutions = []
# Walk through each individual character
for c in data:
# Walk through each individual character
if c.isupper():
try:
# Find the position of the character in rot_chars list
index = rot_chars.index(c.lower())
except ValueError:
substitutions.append(c)
continue
# Calculate the relative index that is 13 characters away from the index
substitutions.append((rot_chars[(index-13)]).upper())
else:
try:
# Find the position of the character in rot_chars list
index = rot_chars.index(c)
except ValueError:
substitutions.append(c)
continue
substitutions.append(rot_chars[((index-13))])
return ''.join(substitutions)
if __name__ == '__main__':
print rotCode('Jul, EBG-13?')
| 33.162791 | 90 | 0.47756 |
78e6e9a7d73aab5ad3ba5822b10f0996d16afd5b | 1,762 | py | Python | examples/sim_tfidf.py | sunyilgdx/CwVW-SIF | 85ef56d80512e2f6bff1266e030552075566b240 | [
"MIT"
] | 12 | 2019-05-14T10:31:53.000Z | 2022-01-20T17:16:59.000Z | examples/sim_tfidf.py | sunyilgdx/CwVW-SIF | 85ef56d80512e2f6bff1266e030552075566b240 | [
"MIT"
] | null | null | null | examples/sim_tfidf.py | sunyilgdx/CwVW-SIF | 85ef56d80512e2f6bff1266e030552075566b240 | [
"MIT"
] | 1 | 2020-12-21T09:16:51.000Z | 2020-12-21T09:16:51.000Z | import pickle, sys
sys.path.append('../src')
import data_io, sim_algo, eval, params
## run
# wordfiles = [#'../data/paragram_sl999_small.txt', # need to download it from John Wieting's github (https://github.com/jwieting/iclr2016)
# '../data/glove.840B.300d.txt' # need to download it first
# ]
wordfiles = [#'../data/paragram_sl999_small.txt', # need to download it from John Wieting's github (https://github.com/jwieting/iclr2016)
'../data/glove.6B.50d.txt' # need to download it first
]
rmpcs = [0,1]
comment4para = [ # need to align with the following loop
['word vector files', wordfiles], # comments and values,
['remove principal component or not', rmpcs]
]
params = params.params()
parr4para = {}
sarr4para = {}
for wordfile in wordfiles:
(words, We) = data_io.getWordmap(wordfile)
weight4ind = data_io.getIDFWeight(wordfile)
for rmpc in rmpcs:
print('word vectors loaded from %s' % wordfile)
print('word weights computed from idf')
params.rmpc = rmpc
print('remove the first %d principal components' % rmpc)
# eval just one example dataset
parr, sarr = eval.sim_evaluate_one(We, words, weight4ind, sim_algo.weighted_average_sim_rmpc, params)
## eval all datasets; need to obtained datasets from John Wieting (https://github.com/jwieting/iclr2016)
# parr, sarr = eval.sim_evaluate_all(We, words, weight4ind, sim_algo.weighted_average_sim_rmpc, params)
paras = (wordfile, rmpc)
parr4para[paras] = parr
sarr4para[paras] = sarr
## save result
save_result = False # True
result_file = 'result/sim_tfidf.result'
if save_result:
with open(result_file, 'w') as f:
pickle.dump([parr4para, sarr4para, comment4para] , f)
| 39.155556 | 139 | 0.685585 |
78e748ebc4d60824e0cdf86518ddf127e1b97b2b | 120 | py | Python | tests/cases/cls.py | div72/py2many | 60277bc13597bd32d078b88a7390715568115fc6 | [
"MIT"
] | 345 | 2021-01-28T17:33:08.000Z | 2022-03-25T16:07:56.000Z | tests/cases/cls.py | mkos11/py2many | be6cfaad5af32c43eb24f182cb20ad63b979d4ef | [
"MIT"
] | 291 | 2021-01-31T13:15:06.000Z | 2022-03-23T21:28:49.000Z | tests/cases/cls.py | mkos11/py2many | be6cfaad5af32c43eb24f182cb20ad63b979d4ef | [
"MIT"
] | 23 | 2021-02-09T17:15:03.000Z | 2022-02-03T05:57:44.000Z |
if __name__ == "__main__":
f = Foo()
b = f.bar()
print(b) | 13.333333 | 26 | 0.483333 |
78e74ab110d94c6516104012ed887badd152a66c | 1,602 | py | Python | theano-rfnn/mnist_loader.py | jhja/RFNN | a63641d6e584df743a5e0a9efaf41911f057a977 | [
"MIT"
] | 55 | 2016-05-11T18:53:30.000Z | 2022-02-22T12:31:08.000Z | theano-rfnn/mnist_loader.py | jhja/RFNN | a63641d6e584df743a5e0a9efaf41911f057a977 | [
"MIT"
] | null | null | null | theano-rfnn/mnist_loader.py | jhja/RFNN | a63641d6e584df743a5e0a9efaf41911f057a977 | [
"MIT"
] | 14 | 2016-08-16T02:00:47.000Z | 2022-03-08T13:16:00.000Z | import numpy as np
import os
from random import shuffle
datasets_dir = './../data/'
| 26.262295 | 63 | 0.624844 |
78e7d5ba18b9d335d132f7d6ec0d73b6ca3d020d | 686 | py | Python | Ejercicio 2.py | crltsnch/Ejercicios-grupales | 72e01d6489816ea1b9308af1abd62792e5464c93 | [
"Apache-2.0"
] | null | null | null | Ejercicio 2.py | crltsnch/Ejercicios-grupales | 72e01d6489816ea1b9308af1abd62792e5464c93 | [
"Apache-2.0"
] | null | null | null | Ejercicio 2.py | crltsnch/Ejercicios-grupales | 72e01d6489816ea1b9308af1abd62792e5464c93 | [
"Apache-2.0"
] | null | null | null | import math
import os
import random
import re
import sys
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'] + 'solucion2.txt', 'w')
print("Escribe las notas de a")
a = list(map(int, input().rstrip().split()))
print("Escribe las notas de b")
b = list(map(int, input().rstrip().split()))
result = compareTriplets(a, b)
fptr.write(' '.join(map(str, result)))
fptr.write('\n')
fptr.close() | 21.4375 | 65 | 0.580175 |
78ed1b7fc24c0d300d3ad14111db8c17f3c020fd | 5,401 | py | Python | app/routes/router.py | nityagautam/ReportDashboard-backend | d23fe008cb0df6a703fcd665181897a75b71d5b2 | [
"MIT"
] | 1 | 2021-05-06T09:48:46.000Z | 2021-05-06T09:48:46.000Z | app/routes/router.py | nityagautam/ReportDashboard | d23fe008cb0df6a703fcd665181897a75b71d5b2 | [
"MIT"
] | 2 | 2021-09-09T05:34:33.000Z | 2021-12-13T15:31:36.000Z | app/routes/router.py | nityagautam/ReportDashboard | d23fe008cb0df6a703fcd665181897a75b71d5b2 | [
"MIT"
] | null | null | null | #===============================================================
# @author: nityanarayan44@live.com
# @written: 08 December 2021
# @desc: Routes for the Backend server
#===============================================================
# Import section with referecne of entry file or main file;
from __main__ import application
from flask import jsonify, render_template, url_for, request, redirect
# Local sample data import
from app.config.uiconfig import app_ui_config
from app import sample_data
# ==============================================================
# App Routes/Gateways
# ==============================================================
# ==============================================================
# Error Handlers Starts
# ==============================================================
# 404 Handler; We can also pass the specific request errors codes to the decorator;
# Exception/Error handler; We can also pass the specific errors to the decorator;
# Exception/Error handler; We can also pass the specific errors to the decorator;
# ==============================================================
# Error Handlers Ends
# ==============================================================
# Route For Sample data
# ==============================================================
# Extra routes starts
# ==============================================================
| 38.035211 | 99 | 0.549713 |
78eed98843af7c2acb54d95dbb60b3f984e9337b | 15,624 | py | Python | idaes/generic_models/properties/core/examples/ASU_PR.py | carldlaird/idaes-pse | cc7a32ca9fa788f483fa8ef85f3d1186ef4a596f | [
"RSA-MD"
] | 112 | 2019-02-11T23:16:36.000Z | 2022-03-23T20:59:57.000Z | idaes/generic_models/properties/core/examples/ASU_PR.py | carldlaird/idaes-pse | cc7a32ca9fa788f483fa8ef85f3d1186ef4a596f | [
"RSA-MD"
] | 621 | 2019-03-01T14:44:12.000Z | 2022-03-31T19:49:25.000Z | idaes/generic_models/properties/core/examples/ASU_PR.py | carldlaird/idaes-pse | cc7a32ca9fa788f483fa8ef85f3d1186ef4a596f | [
"RSA-MD"
] | 154 | 2019-02-01T23:46:33.000Z | 2022-03-23T15:07:10.000Z | #################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
"""
Air separation phase equilibrium package using Peng-Robinson EoS.
Example property package using the Generic Property Package Framework.
This example shows how to set up a property package to do air separation
phase equilibrium in the generic framework using Peng-Robinson equation
along with methods drawn from the pre-built IDAES property libraries.
The example includes two dictionaries.
1. The dictionary named configuration contains parameters obtained from
The Properties of Gases and Liquids (1987) 4th edition and NIST.
2. The dictionary named configuration_Dowling_2015 contains parameters used in
A framework for efficient large scale equation-oriented flowsheet optimization
(2015) Dowling. The parameters are extracted from Properties of Gases and
Liquids (1977) 3rd edition for Antoine's vapor equation and acentric factors
and converted values from the Properties of Gases and Liquids (1977)
3rd edition to j.
"""
# Import Python libraries
import logging
# Import Pyomo units
from pyomo.environ import units as pyunits
# Import IDAES cores
from idaes.core import LiquidPhase, VaporPhase, Component
from idaes.generic_models.properties.core.state_definitions import FTPx
from idaes.generic_models.properties.core.eos.ceos import Cubic, CubicType
from idaes.generic_models.properties.core.phase_equil import SmoothVLE
from idaes.generic_models.properties.core.phase_equil.bubble_dew import \
LogBubbleDew
from idaes.generic_models.properties.core.phase_equil.forms import log_fugacity
from idaes.generic_models.properties.core.pure import RPP4
from idaes.generic_models.properties.core.pure import NIST
from idaes.generic_models.properties.core.pure import RPP3
# Set up logger
_log = logging.getLogger(__name__)
# ---------------------------------------------------------------------
# Configuration dictionary for a Peng-Robinson Oxygen-Argon-Nitrogen system
# Data Sources:
# [1] The Properties of Gases and Liquids (1987)
# 4th edition, Chemical Engineering Series - Robert C. Reid
# [2] NIST, https://webbook.nist.gov/
# Retrieved 16th August, 2020
# [3] The Properties of Gases and Liquids (1987)
# 3rd edition, Chemical Engineering Series - Robert C. Reid
# Cp parameters where converted to j in Dowling 2015
# [4] A framework for efficient large scale equation-oriented flowsheet optimization (2015)
# Computers and Chemical Engineering - Alexander W. Dowling
configuration = {
# Specifying components
"components": {
"nitrogen": {"type": Component,
"enth_mol_ig_comp": RPP4,
"entr_mol_ig_comp": RPP4,
"pressure_sat_comp": NIST,
"phase_equilibrium_form": {("Vap", "Liq"): log_fugacity},
"parameter_data": {
"mw": (28.0135E-3, pyunits.kg/pyunits.mol), # [1]
"pressure_crit": (34e5, pyunits.Pa), # [1]
"temperature_crit": (126.2, pyunits.K), # [1]
"omega": 0.037, # [1]
"cp_mol_ig_comp_coeff": {
"A": (3.115E1,
pyunits.J/pyunits.mol/pyunits.K), # [1]
"B": (-1.357E-2,
pyunits.J/pyunits.mol/pyunits.K**2),
"C": (2.680E-5,
pyunits.J/pyunits.mol/pyunits.K**3),
"D": (-1.168E-8,
pyunits.J/pyunits.mol/pyunits.K**4)},
"enth_mol_form_vap_comp_ref": (
0.0, pyunits.J/pyunits.mol), # [2]
"entr_mol_form_vap_comp_ref": (
191.61, pyunits.J/pyunits.mol/pyunits.K), # [2]
"pressure_sat_comp_coeff": {
"A": (3.7362, None), # [2]
"B": (264.651, pyunits.K),
"C": (-6.788, pyunits.K)}}},
"argon": {"type": Component,
"enth_mol_ig_comp": RPP4,
"entr_mol_ig_comp": RPP4,
"pressure_sat_comp": NIST,
"phase_equilibrium_form": {("Vap", "Liq"): log_fugacity},
"parameter_data": {
"mw": (39.948E-3, pyunits.kg/pyunits.mol), # [1]
"pressure_crit": (48.98e5, pyunits.Pa), # [1]
"temperature_crit": (150.86, pyunits.K), # [1]
"omega": 0.001, # [1]
"cp_mol_ig_comp_coeff": {
"A": (2.050E1,
pyunits.J/pyunits.mol/pyunits.K), # [1]
"B": (0.0, pyunits.J/pyunits.mol/pyunits.K**2),
"C": (0.0, pyunits.J/pyunits.mol/pyunits.K**3),
"D": (0.0, pyunits.J/pyunits.mol/pyunits.K**4)},
"enth_mol_form_vap_comp_ref": (
0.0, pyunits.J/pyunits.mol), # [2]
"entr_mol_form_vap_comp_ref": (
154.8, pyunits.J/pyunits.mol/pyunits.K), # [2]
"pressure_sat_comp_coeff": {"A": (3.29555, None), # [2]
"B": (215.24, pyunits.K),
"C": (-22.233, pyunits.K)}}},
"oxygen": {"type": Component,
"enth_mol_ig_comp": RPP4,
"entr_mol_ig_comp": RPP4,
"pressure_sat_comp": NIST,
"phase_equilibrium_form": {("Vap", "Liq"): log_fugacity},
"parameter_data": {
"mw": (31.999E-3, pyunits.kg/pyunits.mol), # [1]
"pressure_crit": (50.43e5, pyunits.Pa), # [1]
"temperature_crit": (154.58, pyunits.K), # [1]
"omega": 0.025, # [1]
"cp_mol_ig_comp_coeff": {
"A": (2.811E1, pyunits.J/pyunits.mol/pyunits.K),
"B": (-3.680E-6,
pyunits.J/pyunits.mol/pyunits.K**2),
"C": (1.746E-5, pyunits.J/pyunits.mol/pyunits.K**3),
"D": (-1.065E-8,
pyunits.J/pyunits.mol/pyunits.K**4)},
"enth_mol_form_vap_comp_ref": (
0.0, pyunits.J/pyunits.mol), # [2]
"entr_mol_form_vap_comp_ref": (
205.152, pyunits.J/pyunits.mol/pyunits.K), # [2]
"pressure_sat_comp_coeff": {
"A": (3.85845, None), # [2]
"B": (325.675, pyunits.K),
"C": (-5.667, pyunits.K)}}}},
# Specifying phases
"phases": {"Liq": {"type": LiquidPhase,
"equation_of_state": Cubic,
"equation_of_state_options": {
"type": CubicType.PR}},
"Vap": {"type": VaporPhase,
"equation_of_state": Cubic,
"equation_of_state_options": {
"type": CubicType.PR}}},
# Set base units of measurement
"base_units": {"time": pyunits.s,
"length": pyunits.m,
"mass": pyunits.kg,
"amount": pyunits.mol,
"temperature": pyunits.K},
# Specifying state definition
"state_definition": FTPx,
"state_bounds": {"flow_mol": (0, 100, 1000, pyunits.mol/pyunits.s),
"temperature": (10, 300, 350, pyunits.K),
"pressure": (5e4, 1e5, 1e7, pyunits.Pa)},
"pressure_ref": (101325, pyunits.Pa),
"temperature_ref": (298.15, pyunits.K),
# Defining phase equilibria
"phases_in_equilibrium": [("Vap", "Liq")],
"phase_equilibrium_state": {("Vap", "Liq"): SmoothVLE},
"bubble_dew_method": LogBubbleDew,
"parameter_data": {"PR_kappa": {("nitrogen", "nitrogen"): 0.000,
("nitrogen", "argon"): -0.26e-2,
("nitrogen", "oxygen"): -0.119e-1,
("argon", "nitrogen"): -0.26e-2,
("argon", "argon"): 0.000,
("argon", "oxygen"): 0.104e-1,
("oxygen", "nitrogen"): -0.119e-1,
("oxygen", "argon"): 0.104e-1,
("oxygen", "oxygen"): 0.000}}}
configuration_Dowling_2015 = {
# Specifying components
"components": {
"nitrogen": {"type": Component,
"enth_mol_ig_comp": RPP4,
"entr_mol_ig_comp": RPP4,
"pressure_sat_comp": RPP3,
"phase_equilibrium_form": {("Vap", "Liq"): log_fugacity},
"parameter_data": {
"mw": (28.0135E-3, pyunits.kg/pyunits.mol), # [3]
"pressure_crit": (33.943875e5, pyunits.Pa), # [4]
"temperature_crit": (126.2, pyunits.K), # [4]
"omega": 0.04, # [3]
"cp_mol_ig_comp_coeff": {
'A': (3.112896E1, pyunits.J/pyunits.mol/pyunits.K), # [3]
'B': (-1.356E-2, pyunits.J/pyunits.mol/pyunits.K**2),
'C': (2.6878E-5, pyunits.J/pyunits.mol/pyunits.K**3),
'D': (-1.167E-8, pyunits.J/pyunits.mol/pyunits.K**4)},
"enth_mol_form_vap_comp_ref": (
0.0, pyunits.J/pyunits.mol), # [2]
"entr_mol_form_vap_comp_ref": (
191.61, pyunits.J/pyunits.mol/pyunits.K), # [2]
"pressure_sat_comp_coeff": {
'A': (14.9342, None), # [3]
'B': (588.72, pyunits.K),
'C': (-6.60, pyunits.K)}}},
"argon": {"type": Component,
"enth_mol_ig_comp": RPP4,
"entr_mol_ig_comp": RPP4,
"pressure_sat_comp": RPP3,
"phase_equilibrium_form": {("Vap", "Liq"): log_fugacity},
"parameter_data": {
"mw": (39.948E-3, pyunits.kg/pyunits.mol), # [3]
"pressure_crit": (48.737325e5, pyunits.Pa), # [4]
"temperature_crit": (150.86, pyunits.K), # [4]
"omega": -0.004, # [1]
"cp_mol_ig_comp_coeff": {
'A': (2.0790296E1, pyunits.J/pyunits.mol/pyunits.K), # [3]
'B': (-3.209E-05, pyunits.J/pyunits.mol/pyunits.K**2),
'C': (5.163E-08, pyunits.J/pyunits.mol/pyunits.K**3),
'D': (0.0, pyunits.J/pyunits.mol/pyunits.K**4)},
"enth_mol_form_vap_comp_ref": (
0.0, pyunits.J/pyunits.mol), # [3]
"entr_mol_form_vap_comp_ref": (
154.8, pyunits.J/pyunits.mol/pyunits.K), # [3]
"pressure_sat_comp_coeff": {
'A': (15.2330, None), # [3]
'B': (700.51, pyunits.K),
'C': (-5.84, pyunits.K)}}},
"oxygen": {"type": Component,
"enth_mol_ig_comp": RPP4,
"entr_mol_ig_comp": RPP4,
"pressure_sat_comp": RPP3,
"phase_equilibrium_form": {("Vap", "Liq"): log_fugacity},
"parameter_data": {
"mw": (31.999E-3, pyunits.kg/pyunits.mol), # [3]
"pressure_crit": (50.45985e5, pyunits.Pa), # [4]
"temperature_crit": (154.58, pyunits.K), # [4]
"omega": 0.021, # [1]
"cp_mol_ig_comp_coeff": {
'A': (2.8087192E1, pyunits.J/pyunits.mol/pyunits.K), # [3]
'B': (-3.678E-6, pyunits.J/pyunits.mol/pyunits.K**2),
'C': (1.745E-5, pyunits.J/pyunits.mol/pyunits.K**3),
'D': (-1.064E-8, pyunits.J/pyunits.mol/pyunits.K**4)},
"enth_mol_form_vap_comp_ref": (
0.0, pyunits.J/pyunits.mol), # [2]
"entr_mol_form_vap_comp_ref": (
205.152, pyunits.J/pyunits.mol/pyunits.K), # [2]
"pressure_sat_comp_coeff": {
'A': (15.4075, None), # [3]
'B': (734.55, pyunits.K),
'C': (-6.45, pyunits.K)}}}},
# Specifying phases
"phases": {"Liq": {"type": LiquidPhase,
"equation_of_state": Cubic,
"equation_of_state_options": {
"type": CubicType.PR}},
"Vap": {"type": VaporPhase,
"equation_of_state": Cubic,
"equation_of_state_options": {
"type": CubicType.PR}}},
# Set base units of measurement
"base_units": {"time": pyunits.s,
"length": pyunits.m,
"mass": pyunits.kg,
"amount": pyunits.mol,
"temperature": pyunits.K},
# Specifying state definition
"state_definition": FTPx,
"state_bounds": {"flow_mol": (0, 100, 1000, pyunits.mol/pyunits.s),
"temperature": (10, 300, 350, pyunits.K),
"pressure": (5e4, 1e5, 1e7, pyunits.Pa)},
"pressure_ref": (101325, pyunits.Pa),
"temperature_ref": (298.15, pyunits.K),
# Defining phase equilibria
"phases_in_equilibrium": [("Vap", "Liq")],
"phase_equilibrium_state": {("Vap", "Liq"): SmoothVLE},
"bubble_dew_method": LogBubbleDew,
"parameter_data": {"PR_kappa": {("nitrogen", "nitrogen"): 0.000,
("nitrogen", "argon"): -0.26e-2,
("nitrogen", "oxygen"): -0.119e-1,
("argon", "nitrogen"): -0.26e-2,
("argon", "argon"): 0.000,
("argon", "oxygen"): 0.104e-1,
("oxygen", "nitrogen"): -0.119e-1,
("oxygen", "argon"): 0.104e-1,
("oxygen", "oxygen"): 0.000}}}
| 51.394737 | 91 | 0.473374 |
78efdc29bbe17ba841a42c2ad2e6e9e8b6de242a | 34 | py | Python | tests/functional/test_calculator.py | bellanov/calculator | a66e68a368a5212247aeff3291c9cb8b508e91be | [
"Apache-2.0"
] | null | null | null | tests/functional/test_calculator.py | bellanov/calculator | a66e68a368a5212247aeff3291c9cb8b508e91be | [
"Apache-2.0"
] | null | null | null | tests/functional/test_calculator.py | bellanov/calculator | a66e68a368a5212247aeff3291c9cb8b508e91be | [
"Apache-2.0"
] | 1 | 2021-05-26T16:54:17.000Z | 2021-05-26T16:54:17.000Z | """TODO: Move the Threads Here"""
| 17 | 33 | 0.647059 |
78f03cf1af94e18c9a855dfd8bbdda1565566674 | 17,569 | py | Python | autokeras/hypermodel/graph.py | Sette/autokeras | c5a83607a899ad545916b3794561d6908d9cdbac | [
"MIT"
] | null | null | null | autokeras/hypermodel/graph.py | Sette/autokeras | c5a83607a899ad545916b3794561d6908d9cdbac | [
"MIT"
] | null | null | null | autokeras/hypermodel/graph.py | Sette/autokeras | c5a83607a899ad545916b3794561d6908d9cdbac | [
"MIT"
] | null | null | null | import functools
import pickle
import kerastuner
import tensorflow as tf
from tensorflow.python.util import nest
from autokeras.hypermodel import base
from autokeras.hypermodel import compiler
def copy(old_instance):
instance = old_instance.__class__()
instance.set_state(old_instance.get_state())
return instance
| 37.620985 | 85 | 0.594001 |
78f06ac9567797f0104f062bd9b9ac12e57cffa6 | 474 | py | Python | Python/longest-valid-parentheses.py | shreyventure/LeetCode-Solutions | 74423d65702b78974e390f17c9d6365d17e6eed5 | [
"MIT"
] | 388 | 2020-06-29T08:41:27.000Z | 2022-03-31T22:55:05.000Z | Python/longest-valid-parentheses.py | shreyventure/LeetCode-Solutions | 74423d65702b78974e390f17c9d6365d17e6eed5 | [
"MIT"
] | 178 | 2020-07-16T17:15:28.000Z | 2022-03-09T21:01:50.000Z | Python/longest-valid-parentheses.py | shreyventure/LeetCode-Solutions | 74423d65702b78974e390f17c9d6365d17e6eed5 | [
"MIT"
] | 263 | 2020-07-13T18:33:20.000Z | 2022-03-28T13:54:10.000Z | '''
Speed: 95.97%
Memory: 24.96%
Time complexity: O(n)
Space complexity: O(n)
''' | 23.7 | 44 | 0.436709 |
78f17ff49e114c184b6a1474d4e3188bcdc4d56c | 447 | py | Python | setup.py | i25ffz/openaes | a0dbde40d4ce0e4186ea14c4dc9519fe152c018c | [
"BSD-2-Clause"
] | null | null | null | setup.py | i25ffz/openaes | a0dbde40d4ce0e4186ea14c4dc9519fe152c018c | [
"BSD-2-Clause"
] | null | null | null | setup.py | i25ffz/openaes | a0dbde40d4ce0e4186ea14c4dc9519fe152c018c | [
"BSD-2-Clause"
] | null | null | null | from distutils.core import setup, Extension
import os.path
kw = {
'name':"PyOpenAES",
'version':"0.10.0",
'description':"OpenAES cryptographic library for Python.",
'ext_modules':[
Extension(
'openaes',
include_dirs = ['inc', 'src/isaac'],
# define_macros=[('ENABLE_PYTHON', '1')],
sources = [
os.path.join('src/oaes_lib.c'),
os.path.join('src/oaes_py.c'),
os.path.join('src/isaac/rand.c')
]
)
]
}
setup(**kw) | 20.318182 | 59 | 0.624161 |
78f2293017d6edca3048eb7b10371f7d73e4c830 | 967 | py | Python | examples/isosurface_demo2.py | jayvdb/scitools | 8df53a3a3bc95377f9fa85c04f3a329a0ec33e67 | [
"BSD-3-Clause"
] | 62 | 2015-03-28T18:07:51.000Z | 2022-02-12T20:32:36.000Z | examples/isosurface_demo2.py | jayvdb/scitools | 8df53a3a3bc95377f9fa85c04f3a329a0ec33e67 | [
"BSD-3-Clause"
] | 7 | 2015-06-09T09:56:03.000Z | 2021-05-20T17:53:15.000Z | examples/isosurface_demo2.py | jayvdb/scitools | 8df53a3a3bc95377f9fa85c04f3a329a0ec33e67 | [
"BSD-3-Clause"
] | 29 | 2015-04-16T03:48:57.000Z | 2022-02-03T22:06:52.000Z | #!/usr/bin/env python
# Example taken from:
# http://www.mathworks.com/access/helpdesk/help/techdoc/visualize/f5-3371.html
from scitools.easyviz import *
from time import sleep
from scipy import io
setp(interactive=False)
# Displaying an Isosurface:
mri = io.loadmat('mri_matlab_v6.mat')
D = mri['D']
#Ds = smooth3(D);
isosurface(D,5,indexing='xy')
#hiso = isosurface(Ds,5),
# 'FaceColor',[1,.75,.65],...
# 'EdgeColor','none');
shading('interp')
# Adding an Isocap to Show a Cutaway Surface:
#hcap = patch(isocaps(D,5),...
# 'FaceColor','interp',...
# 'EdgeColor','none');
#colormap(map)
# Define the View:
view(45,30)
axis('tight')
daspect([1,1,.4])
# Add Lighting:
#lightangle(45,30);
#set(gcf,'Renderer','zbuffer'); lighting phong
#isonormals(Ds,hiso)
#set(hcap,'AmbientStrength',.6)
#set(hiso,'SpecularColorReflectance',0,'SpecularExponent',50)
show()
raw_input('Press Return key to quit: ')
#savefig('tmp_isosurf2a.eps')
#savefig('tmp_isosurf2a.png')
| 20.574468 | 78 | 0.701138 |
78f2658f7e058410b484a9d45fd69949bca2813c | 4,099 | py | Python | structural_model/util_morphology.py | zibneuro/udvary-et-al-2022 | 8b456c41e72958677cb6035028d9c23013cb7c7e | [
"MIT"
] | 1 | 2022-03-11T13:43:50.000Z | 2022-03-11T13:43:50.000Z | structural_model/util_morphology.py | zibneuro/udvary-et-al-2022 | 8b456c41e72958677cb6035028d9c23013cb7c7e | [
"MIT"
] | null | null | null | structural_model/util_morphology.py | zibneuro/udvary-et-al-2022 | 8b456c41e72958677cb6035028d9c23013cb7c7e | [
"MIT"
] | null | null | null | import os
import numpy as np
import json
import util_amira
| 32.275591 | 138 | 0.613808 |
78f33bf3b80a0a0d98e998f783441284fa1b3068 | 3,503 | py | Python | invenio_madmp/views.py | FAIR-Data-Austria/invenio-madmp | 74372ee794f81666f5e9cf08ef448c21b2e428be | [
"MIT"
] | 1 | 2022-03-02T10:37:29.000Z | 2022-03-02T10:37:29.000Z | invenio_madmp/views.py | FAIR-Data-Austria/invenio-madmp | 74372ee794f81666f5e9cf08ef448c21b2e428be | [
"MIT"
] | 9 | 2020-08-25T12:03:08.000Z | 2020-10-20T11:45:32.000Z | invenio_madmp/views.py | FAIR-Data-Austria/invenio-madmp | 74372ee794f81666f5e9cf08ef448c21b2e428be | [
"MIT"
] | null | null | null | """Blueprint definitions for maDMP integration."""
from flask import Blueprint, jsonify, request
from invenio_db import db
from .convert import convert_dmp
from .models import DataManagementPlan
def _summarize_dmp(dmp: DataManagementPlan) -> dict:
"""Create a summary dictionary for the given DMP."""
res = {"dmp_id": dmp.dmp_id, "datasets": []}
for ds in dmp.datasets:
dataset = {"dataset_id": ds.dataset_id, "record": None}
if ds.record:
dataset["record"] = ds.record.model.json
res["datasets"].append(dataset)
return res
def create_rest_blueprint(app) -> Blueprint:
"""Create the blueprint for the REST endpoints using the current app extensions."""
# note: using flask.current_app isn't directly possible, because Invenio-MaDMP is
# registered as an extension in the API app, not the "normal" app
# (which is the one usually returned by current_app)
rest_blueprint = Blueprint("invenio_madmp", __name__)
auth = app.extensions["invenio-madmp"].auth
return rest_blueprint
| 35.744898 | 87 | 0.643163 |
78f362e6e499abd6ba76d1b520e7369bf25061c9 | 257 | py | Python | retrieval/urls.py | aipassio/visual_retrieval | ce8dae2ad517a9edb5e278163dd6d0f7ffc1b5f4 | [
"MIT"
] | null | null | null | retrieval/urls.py | aipassio/visual_retrieval | ce8dae2ad517a9edb5e278163dd6d0f7ffc1b5f4 | [
"MIT"
] | null | null | null | retrieval/urls.py | aipassio/visual_retrieval | ce8dae2ad517a9edb5e278163dd6d0f7ffc1b5f4 | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('retrieval_insert', views.retrieval_insert, name='retrieval_insert'),
path('retrieval_get', views.retrieval_get, name='retrieval_get')
] | 28.555556 | 78 | 0.723735 |
78f3cd314838c8b00373f5ff15a91db4a0e4e749 | 1,427 | py | Python | scripts/Interfacing/encoder_class.py | noshluk2/Wifi-Signal-Robot-localization | 538e6c4e7a63486f22ab708908c476cd808f720c | [
"MIT"
] | null | null | null | scripts/Interfacing/encoder_class.py | noshluk2/Wifi-Signal-Robot-localization | 538e6c4e7a63486f22ab708908c476cd808f720c | [
"MIT"
] | null | null | null | scripts/Interfacing/encoder_class.py | noshluk2/Wifi-Signal-Robot-localization | 538e6c4e7a63486f22ab708908c476cd808f720c | [
"MIT"
] | null | null | null | import RPi.GPIO as GPIO
import threading
# r_en_a = 27
# r_en_b = 10
# l_en_a = 5
# l_en_b = 6
# enc_obj = Encoder(27,10,5,6)
# def update_encoders():
# threading.Timer(1,update_encoders).start()
# print(" looping ")
# update_encoders() | 26.425926 | 75 | 0.618781 |
78f527fe8104b4c467eef06ba01999f8a1c7339e | 2,286 | py | Python | systori/apps/equipment/urls.py | systori/systori | e309c63e735079ff6032fdaf1db354ec872b28b1 | [
"BSD-3-Clause"
] | 12 | 2018-01-30T00:44:06.000Z | 2020-07-13T05:20:48.000Z | systori/apps/equipment/urls.py | systori/systori | e309c63e735079ff6032fdaf1db354ec872b28b1 | [
"BSD-3-Clause"
] | 36 | 2018-03-06T17:49:50.000Z | 2020-06-23T19:26:00.000Z | systori/apps/equipment/urls.py | systori/systori | e309c63e735079ff6032fdaf1db354ec872b28b1 | [
"BSD-3-Clause"
] | 3 | 2018-08-03T07:03:09.000Z | 2020-07-09T20:21:10.000Z | from django.conf.urls import url
from django.urls import path, include
from systori.apps.user.authorization import office_auth
from systori.apps.equipment.views import EquipmentListView, EquipmentView, EquipmentCreate, EquipmentDelete, EquipmentUpdate, RefuelingStopCreate, RefuelingStopDelete, RefuelingStopUpdate, MaintenanceCreate, MaintenanceDelete, MaintenanceUpdate
urlpatterns = [
# two url rules to make the active_filter keyword optional
url(
r"^equipment/$", office_auth(EquipmentListView.as_view()), name="equipment.list"
),
url(
r"^equipment/(?P<active_filter>[\w-]+)$",
office_auth(EquipmentListView.as_view()),
name="equipment.list",
),
url(
r"^equipment-(?P<pk>\d+)$",
office_auth(EquipmentView.as_view()),
name="equipment.view",
),
url(
r"^create-equipment$",
office_auth(EquipmentCreate.as_view()),
name="equipment.create",
),
url(
r"^equipment-(?P<pk>\d+)/edit$",
office_auth(EquipmentUpdate.as_view()),
name="equipment.edit",
),
url(
r"^equipment-(?P<pk>\d+)/delete$",
office_auth(EquipmentDelete.as_view()),
name="equipment.delete",
),
url(
r"^equipment-(?P<pk>\d+)/create-refueling-stop$",
office_auth(RefuelingStopCreate.as_view()),
name="refueling_stop.create",
),
url(
r"^equipment-(?P<equipment_pk>\d+)/refueling-stop-(?P<pk>\d+)/update$",
office_auth(RefuelingStopUpdate.as_view()),
name="refueling_stop.update",
),
url(
r"^equipment-(?P<equipment_pk>\d+)/refueling-stop-(?P<pk>\d+)/delete",
office_auth(RefuelingStopDelete.as_view()),
name="refueling_stop.delete",
),
url(
r"^equipment-(?P<pk>\d+)/create-maintenance",
office_auth(MaintenanceCreate.as_view()),
name="maintenance.create",
),
url(
r"^equipment-(?P<equipment_pk>\d+)/maintenance-(?P<pk>\d+)/update$",
office_auth(MaintenanceUpdate.as_view()),
name="maintenance.update",
),
url(
r"^equipment-(?P<equipment_pk>\d+)/maintenance-(?P<pk>\d+)/delete",
office_auth(MaintenanceDelete.as_view()),
name="maintenance.delete",
),
]
| 33.130435 | 244 | 0.624672 |
78f5546c49c417508d26fa0f809340459987fc66 | 13,697 | py | Python | paddlehub/module/check_info_pb2.py | MRXLT/PaddleHub | a9cd941bef2ac5a2d81b2f20422a4fbd9a87eb90 | [
"Apache-2.0"
] | 1 | 2019-07-03T13:08:39.000Z | 2019-07-03T13:08:39.000Z | paddlehub/module/check_info_pb2.py | binweiwu/PaddleHub | f92d0edd18057044ef248d7f2c42d8f347b62fbf | [
"Apache-2.0"
] | null | null | null | paddlehub/module/check_info_pb2.py | binweiwu/PaddleHub | f92d0edd18057044ef248d7f2c42d8f347b62fbf | [
"Apache-2.0"
] | null | null | null | #coding:utf-8
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: check_info.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='check_info.proto',
package='paddlehub.module.checkinfo',
syntax='proto3',
serialized_pb=_b(
'\n\x10\x63heck_info.proto\x12\x1apaddlehub.module.checkinfo\"\x85\x01\n\x08\x46ileInfo\x12\x11\n\tfile_name\x18\x01 \x01(\t\x12\x33\n\x04type\x18\x02 \x01(\x0e\x32%.paddlehub.module.checkinfo.FILE_TYPE\x12\x0f\n\x07is_need\x18\x03 \x01(\x08\x12\x0b\n\x03md5\x18\x04 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x05 \x01(\t\"\x84\x01\n\x08Requires\x12>\n\x0crequire_type\x18\x01 \x01(\x0e\x32(.paddlehub.module.checkinfo.REQUIRE_TYPE\x12\x0f\n\x07version\x18\x02 \x01(\t\x12\x12\n\ngreat_than\x18\x03 \x01(\x08\x12\x13\n\x0b\x64\x65scription\x18\x04 \x01(\t\"\xc8\x01\n\tCheckInfo\x12\x16\n\x0epaddle_version\x18\x01 \x01(\t\x12\x13\n\x0bhub_version\x18\x02 \x01(\t\x12\x1c\n\x14module_proto_version\x18\x03 \x01(\t\x12\x38\n\nfile_infos\x18\x04 \x03(\x0b\x32$.paddlehub.module.checkinfo.FileInfo\x12\x36\n\x08requires\x18\x05 \x03(\x0b\x32$.paddlehub.module.checkinfo.Requires*\x1e\n\tFILE_TYPE\x12\x08\n\x04\x46ILE\x10\x00\x12\x07\n\x03\x44IR\x10\x01*[\n\x0cREQUIRE_TYPE\x12\x12\n\x0ePYTHON_PACKAGE\x10\x00\x12\x0e\n\nHUB_MODULE\x10\x01\x12\n\n\x06SYSTEM\x10\x02\x12\x0b\n\x07\x43OMMAND\x10\x03\x12\x0e\n\nPY_VERSION\x10\x04\x42\x02H\x03\x62\x06proto3'
))
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_FILE_TYPE = _descriptor.EnumDescriptor(
name='FILE_TYPE',
full_name='paddlehub.module.checkinfo.FILE_TYPE',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='FILE', index=0, number=0, options=None, type=None),
_descriptor.EnumValueDescriptor(
name='DIR', index=1, number=1, options=None, type=None),
],
containing_type=None,
options=None,
serialized_start=522,
serialized_end=552,
)
_sym_db.RegisterEnumDescriptor(_FILE_TYPE)
FILE_TYPE = enum_type_wrapper.EnumTypeWrapper(_FILE_TYPE)
_REQUIRE_TYPE = _descriptor.EnumDescriptor(
name='REQUIRE_TYPE',
full_name='paddlehub.module.checkinfo.REQUIRE_TYPE',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='PYTHON_PACKAGE', index=0, number=0, options=None, type=None),
_descriptor.EnumValueDescriptor(
name='HUB_MODULE', index=1, number=1, options=None, type=None),
_descriptor.EnumValueDescriptor(
name='SYSTEM', index=2, number=2, options=None, type=None),
_descriptor.EnumValueDescriptor(
name='COMMAND', index=3, number=3, options=None, type=None),
_descriptor.EnumValueDescriptor(
name='PY_VERSION', index=4, number=4, options=None, type=None),
],
containing_type=None,
options=None,
serialized_start=554,
serialized_end=645,
)
_sym_db.RegisterEnumDescriptor(_REQUIRE_TYPE)
REQUIRE_TYPE = enum_type_wrapper.EnumTypeWrapper(_REQUIRE_TYPE)
FILE = 0
DIR = 1
PYTHON_PACKAGE = 0
HUB_MODULE = 1
SYSTEM = 2
COMMAND = 3
PY_VERSION = 4
_FILEINFO = _descriptor.Descriptor(
name='FileInfo',
full_name='paddlehub.module.checkinfo.FileInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='file_name',
full_name='paddlehub.module.checkinfo.FileInfo.file_name',
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode('utf-8'),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='type',
full_name='paddlehub.module.checkinfo.FileInfo.type',
index=1,
number=2,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_need',
full_name='paddlehub.module.checkinfo.FileInfo.is_need',
index=2,
number=3,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='md5',
full_name='paddlehub.module.checkinfo.FileInfo.md5',
index=3,
number=4,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode('utf-8'),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='description',
full_name='paddlehub.module.checkinfo.FileInfo.description',
index=4,
number=5,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode('utf-8'),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[],
serialized_start=49,
serialized_end=182,
)
_REQUIRES = _descriptor.Descriptor(
name='Requires',
full_name='paddlehub.module.checkinfo.Requires',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='require_type',
full_name='paddlehub.module.checkinfo.Requires.require_type',
index=0,
number=1,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='version',
full_name='paddlehub.module.checkinfo.Requires.version',
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode('utf-8'),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='great_than',
full_name='paddlehub.module.checkinfo.Requires.great_than',
index=2,
number=3,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='description',
full_name='paddlehub.module.checkinfo.Requires.description',
index=3,
number=4,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode('utf-8'),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[],
serialized_start=185,
serialized_end=317,
)
_CHECKINFO = _descriptor.Descriptor(
name='CheckInfo',
full_name='paddlehub.module.checkinfo.CheckInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='paddle_version',
full_name='paddlehub.module.checkinfo.CheckInfo.paddle_version',
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode('utf-8'),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='hub_version',
full_name='paddlehub.module.checkinfo.CheckInfo.hub_version',
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode('utf-8'),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='module_proto_version',
full_name=
'paddlehub.module.checkinfo.CheckInfo.module_proto_version',
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode('utf-8'),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='file_infos',
full_name='paddlehub.module.checkinfo.CheckInfo.file_infos',
index=3,
number=4,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='requires',
full_name='paddlehub.module.checkinfo.CheckInfo.requires',
index=4,
number=5,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[],
serialized_start=320,
serialized_end=520,
)
_FILEINFO.fields_by_name['type'].enum_type = _FILE_TYPE
_REQUIRES.fields_by_name['require_type'].enum_type = _REQUIRE_TYPE
_CHECKINFO.fields_by_name['file_infos'].message_type = _FILEINFO
_CHECKINFO.fields_by_name['requires'].message_type = _REQUIRES
DESCRIPTOR.message_types_by_name['FileInfo'] = _FILEINFO
DESCRIPTOR.message_types_by_name['Requires'] = _REQUIRES
DESCRIPTOR.message_types_by_name['CheckInfo'] = _CHECKINFO
DESCRIPTOR.enum_types_by_name['FILE_TYPE'] = _FILE_TYPE
DESCRIPTOR.enum_types_by_name['REQUIRE_TYPE'] = _REQUIRE_TYPE
FileInfo = _reflection.GeneratedProtocolMessageType(
'FileInfo',
(_message.Message, ),
dict(
DESCRIPTOR=_FILEINFO,
__module__='check_info_pb2'
# @@protoc_insertion_point(class_scope:paddlehub.module.checkinfo.FileInfo)
))
_sym_db.RegisterMessage(FileInfo)
Requires = _reflection.GeneratedProtocolMessageType(
'Requires',
(_message.Message, ),
dict(
DESCRIPTOR=_REQUIRES,
__module__='check_info_pb2'
# @@protoc_insertion_point(class_scope:paddlehub.module.checkinfo.Requires)
))
_sym_db.RegisterMessage(Requires)
CheckInfo = _reflection.GeneratedProtocolMessageType(
'CheckInfo',
(_message.Message, ),
dict(
DESCRIPTOR=_CHECKINFO,
__module__='check_info_pb2'
# @@protoc_insertion_point(class_scope:paddlehub.module.checkinfo.CheckInfo)
))
_sym_db.RegisterMessage(CheckInfo)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(),
_b('H\003'))
# @@protoc_insertion_point(module_scope)
| 33.653563 | 1,160 | 0.611083 |
78f57ad1256f2c324b8101344d3e6ef85566b84c | 632 | py | Python | 40_3.py | rursvd/pynumerical2 | 4b2d33125b64a39099ac8eddef885e0ea11b237d | [
"MIT"
] | null | null | null | 40_3.py | rursvd/pynumerical2 | 4b2d33125b64a39099ac8eddef885e0ea11b237d | [
"MIT"
] | null | null | null | 40_3.py | rursvd/pynumerical2 | 4b2d33125b64a39099ac8eddef885e0ea11b237d | [
"MIT"
] | 1 | 2019-12-03T01:34:19.000Z | 2019-12-03T01:34:19.000Z | from numpy import zeros
# Define ab2 function
# Define functions
# Set initial conditions
t0 = 0.0
tf = 1.0
y0 = 1.0
n = 5
# Execute AB2
t, yab2 = ab2(f,t0,tf,y0,n)
# Print results
print("%5s %8s" % ('t','y'))
for i in range(n+1):
print("%8.4f %8.4f" % (t[i],yab2[i]))
| 18.588235 | 83 | 0.463608 |
78f5d63c04bc9e40555fc089be45ac3e10cbd62a | 40,331 | py | Python | test/test_parse_cs.py | NeonDaniel/lingua-franca | eee95702016b4013b0d81dc74da98cd2d2f53358 | [
"Apache-2.0"
] | null | null | null | test/test_parse_cs.py | NeonDaniel/lingua-franca | eee95702016b4013b0d81dc74da98cd2d2f53358 | [
"Apache-2.0"
] | null | null | null | test/test_parse_cs.py | NeonDaniel/lingua-franca | eee95702016b4013b0d81dc74da98cd2d2f53358 | [
"Apache-2.0"
] | 1 | 2020-09-22T12:39:17.000Z | 2020-09-22T12:39:17.000Z | #
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from datetime import datetime, timedelta
from lingua_franca import get_default_lang, set_default_lang, \
load_language, unload_language
from lingua_franca.parse import extract_datetime
from lingua_franca.parse import extract_duration
from lingua_franca.parse import extract_number, extract_numbers
from lingua_franca.parse import fuzzy_match
from lingua_franca.parse import get_gender
from lingua_franca.parse import match_one
from lingua_franca.parse import normalize
if __name__ == "__main__":
unittest.main()
| 54.208333 | 114 | 0.564851 |
78f63355867462f1a454c939b07a72f40e12bd55 | 955 | py | Python | src/net/pluto_ftp.py | WardenAllen/Uranus | 0d20cac631320b558254992c17678ddd1658587b | [
"MIT"
] | null | null | null | src/net/pluto_ftp.py | WardenAllen/Uranus | 0d20cac631320b558254992c17678ddd1658587b | [
"MIT"
] | null | null | null | src/net/pluto_ftp.py | WardenAllen/Uranus | 0d20cac631320b558254992c17678ddd1658587b | [
"MIT"
] | null | null | null | # !/usr/bin/python
# -*- coding: utf-8 -*-
# @Time : 2020/9/18 12:02
# @Author : WardenAllen
# @File : pluto_ftp.py
# @Brief :
import paramiko | 31.833333 | 73 | 0.655497 |
78f6f92a5932a9d711316ff3341b072e7d33ca29 | 99 | py | Python | piped/processors/test/__init__.py | alexbrasetvik/Piped | 0312c14d6c4c293df378c915cc9787bcc7faed36 | [
"MIT"
] | 3 | 2015-02-12T20:34:30.000Z | 2016-08-06T06:54:48.000Z | piped/processors/test/__init__.py | alexbrasetvik/Piped | 0312c14d6c4c293df378c915cc9787bcc7faed36 | [
"MIT"
] | null | null | null | piped/processors/test/__init__.py | alexbrasetvik/Piped | 0312c14d6c4c293df378c915cc9787bcc7faed36 | [
"MIT"
] | 2 | 2015-12-16T14:18:14.000Z | 2019-04-12T01:43:10.000Z | # Copyright (c) 2010-2011, Found IT A/S and Piped Project Contributors.
# See LICENSE for details.
| 33 | 71 | 0.747475 |
78f83610f02792ce2cf026a72886ebff9b5ef71f | 579 | py | Python | assistance_bot/app.py | reakfog/personal_computer_voice_assistant | 3483f633c57cd2e930f94bcbda9739cde34525aa | [
"BSD-3-Clause"
] | null | null | null | assistance_bot/app.py | reakfog/personal_computer_voice_assistant | 3483f633c57cd2e930f94bcbda9739cde34525aa | [
"BSD-3-Clause"
] | null | null | null | assistance_bot/app.py | reakfog/personal_computer_voice_assistant | 3483f633c57cd2e930f94bcbda9739cde34525aa | [
"BSD-3-Clause"
] | 2 | 2021-07-26T20:22:31.000Z | 2021-07-29T12:58:03.000Z | import sys
sys.path = ['', '..'] + sys.path[1:]
import daemon
from assistance_bot import core
from functionality.voice_processing import speaking, listening
from functionality.commands import *
if __name__ == '__main__':
speaking.setup_assistant_voice(core.ttsEngine, core.assistant)
while True:
# start speech recording and speech recognition
recognized_speech = listening.get_listening_and_recognition_result(
core.recognizer,
core.microphone)
# executing the given command
execute_command(recognized_speech)
| 32.166667 | 75 | 0.723661 |
78f942b69039b6e57cce7169cc8dc3ffec50e359 | 107 | py | Python | python/testData/resolve/AssignmentExpressionsAndOuterVar.py | tgodzik/intellij-community | f5ef4191fc30b69db945633951fb160c1cfb7b6f | [
"Apache-2.0"
] | 2 | 2019-04-28T07:48:50.000Z | 2020-12-11T14:18:08.000Z | python/testData/resolve/AssignmentExpressionsAndOuterVar.py | Cyril-lamirand/intellij-community | 60ab6c61b82fc761dd68363eca7d9d69663cfa39 | [
"Apache-2.0"
] | 2 | 2022-02-19T09:45:05.000Z | 2022-02-27T20:32:55.000Z | python/testData/resolve/AssignmentExpressionsAndOuterVar.py | Cyril-lamirand/intellij-community | 60ab6c61b82fc761dd68363eca7d9d69663cfa39 | [
"Apache-2.0"
] | 2 | 2020-03-15T08:57:37.000Z | 2020-04-07T04:48:14.000Z | total = 0
partial_sums = [total := total + v for v in values]
print("Total:", total)
<ref> | 26.75 | 51 | 0.551402 |
78fa9f898e64c035eed240732e89631cf36a87b3 | 18,049 | py | Python | exhale/deploy.py | florianhumblot/exhale | d6fa84fa32ee079c6b70898a1b0863a38e703591 | [
"BSD-3-Clause"
] | null | null | null | exhale/deploy.py | florianhumblot/exhale | d6fa84fa32ee079c6b70898a1b0863a38e703591 | [
"BSD-3-Clause"
] | null | null | null | exhale/deploy.py | florianhumblot/exhale | d6fa84fa32ee079c6b70898a1b0863a38e703591 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf8 -*-
########################################################################################
# This file is part of exhale. Copyright (c) 2017-2022, Stephen McDowell. #
# Full BSD 3-Clause license available here: #
# #
# https://github.com/svenevs/exhale/blob/master/LICENSE #
########################################################################################
'''
The deploy module is responsible for two primary actions:
1. Executing Doxygen (if requested in ``exhale_args``).
2. Launching the full API generation via the :func:`~exhale.deploy.explode` function.
'''
from __future__ import unicode_literals
from . import configs
from . import utils
from .graph import ExhaleRoot
import os
import sys
import six
import re
import codecs
import tempfile
import textwrap
from subprocess import PIPE, Popen, STDOUT
def _generate_doxygen(doxygen_input):
'''
This method executes doxygen based off of the specified input. By the time this
method is executed, it is assumed that Doxygen is intended to be run in the
**current working directory**. Search for ``returnPath`` in the implementation of
:func:`~exhale.configs.apply_sphinx_configurations` for handling of this aspect.
This method is intended to be called by :func:`~exhale.deploy.generateDoxygenXML`,
which is in turn called by :func:`~exhale.configs.apply_sphinx_configurations`.
Two versions of the
doxygen command can be executed:
1. If ``doxygen_input`` is exactly ``"Doxyfile"``, then it is assumed that a
``Doxyfile`` exists in the **current working directory**. Meaning the command
being executed is simply ``doxygen``.
2. For all other values, ``doxygen_input`` represents the arguments as to be
specified on ``stdin`` to the process.
**Parameters**
``doxygen_input`` (str)
Either the string ``"Doxyfile"`` to run vanilla ``doxygen``, or the
selection of doxygen inputs (that would ordinarily be in a ``Doxyfile``)
that will be ``communicate``d to the ``doxygen`` process on ``stdin``.
.. note::
If using Python **3**, the input **must** still be a ``str``. This
method will convert the input to ``bytes`` as follows:
.. code-block:: py
if sys.version[0] == "3":
doxygen_input = bytes(doxygen_input, "utf-8")
**Return**
``str`` or ``None``
If an error occurs, a string describing the error is returned with the
intention of the caller raising the exception. If ``None`` is returned,
then the process executed without error. Example usage:
.. code-block:: py
status = _generate_doxygen("Doxygen")
if status:
raise RuntimeError(status)
Though a little awkward, this is done to enable the intended caller of this
method to restore some state before exiting the program (namely, the working
directory before propagating an exception to ``sphinx-build``).
'''
if not isinstance(doxygen_input, six.string_types):
return "Error: the `doxygen_input` variable must be of type `str`."
doxyfile = doxygen_input == "Doxyfile"
try:
# Setup the arguments to launch doxygen
if doxyfile:
args = ["doxygen"]
kwargs = {}
else:
args = ["doxygen", "-"]
kwargs = {"stdin": PIPE}
if configs._on_rtd:
# On RTD, any capturing of Doxygen output can cause buffer overflows for
# even medium sized projects. So it is disregarded entirely to ensure the
# build will complete (otherwise, it silently fails after `cat conf.py`)
devnull_file = open(os.devnull, "w")
kwargs["stdout"] = devnull_file
kwargs["stderr"] = STDOUT
else:
# TL;DR: strictly enforce that (verbose) doxygen output doesn't cause the
# `communicate` to hang due to buffer overflows.
#
# See excellent synopsis:
# https://thraxil.org/users/anders/posts/2008/03/13/Subprocess-Hanging-PIPE-is-your-enemy/
if six.PY2:
tempfile_kwargs = {}
else:
# encoding argument introduced in python 3
tempfile_kwargs = {"encoding": "utf-8"}
tempfile_kwargs["mode"] = "r+"
tmp_out_file = tempfile.TemporaryFile(
prefix="doxygen_stdout_buff", **tempfile_kwargs
)
tmp_err_file = tempfile.TemporaryFile(
prefix="doxygen_stderr_buff", **tempfile_kwargs
)
# Write to the tempfiles over PIPE to avoid buffer overflowing
kwargs["stdout"] = tmp_out_file
kwargs["stderr"] = tmp_err_file
# Note: overload of args / kwargs, Popen is expecting a list as the first
# parameter (aka no *args, just args)!
doxygen_proc = Popen(args, **kwargs)
# Communicate can only be called once, arrange whether or not stdin has value
if not doxyfile:
# In Py3, make sure we are communicating a bytes-like object which is no
# longer interchangeable with strings (as was the case in Py2).
if sys.version[0] == "3":
doxygen_input = bytes(doxygen_input, "utf-8")
comm_kwargs = {"input": doxygen_input}
else:
comm_kwargs = {}
# Waits until doxygen has completed
doxygen_proc.communicate(**comm_kwargs)
# Print out what was written to the tmpfiles by doxygen
if not configs._on_rtd and not configs.exhaleSilentDoxygen:
# Doxygen output (some useful information, mostly just enumeration of the
# configurations you gave it {useful for debugging...})
if tmp_out_file.tell() > 0:
tmp_out_file.seek(0)
print(tmp_out_file.read())
# Doxygen error (e.g. any warnings, or invalid input)
if tmp_err_file.tell() > 0:
# Making them stick out, ideally users would reduce this output to 0 ;)
# This will print a yellow [~] before every line, but not make the
# entire line yellow because it's definitively not helpful
prefix = utils._use_color(
utils.prefix("[~]", " "), utils.AnsiColors.BOLD_YELLOW, sys.stderr
)
tmp_err_file.seek(0)
sys.stderr.write(utils.prefix(prefix, tmp_err_file.read()))
# Close the file handles opened for communication with subprocess
if configs._on_rtd:
devnull_file.close()
else:
# Delete the tmpfiles
tmp_out_file.close()
tmp_err_file.close()
# Make sure we had a valid execution of doxygen
exit_code = doxygen_proc.returncode
if exit_code != 0:
raise RuntimeError("Non-zero return code of [{0}] from 'doxygen'...".format(exit_code))
except Exception as e:
return "Unable to execute 'doxygen': {0}".format(e)
# returning None signals _success_
return None
def _valid_config(config, required):
'''
.. todo:: add documentation of this method
``config``: doxygen input we're looking for
``required``: if ``True``, must be present. if ``False``, NOT ALLOWED to be present
'''
re_template = r"\s*{config}\s*=.*".format(config=config)
found = re.search(re_template, configs.exhaleDoxygenStdin)
if required:
return found is not None
else:
return found is None
########################################################################################
#
##
###
####
##### Primary entry point.
####
###
##
#
########################################################################################
def explode():
'''
This method **assumes** that :func:`~exhale.configs.apply_sphinx_configurations` has
already been applied. It performs minimal sanity checking, and then performs in
order
1. Creates a :class:`~exhale.graph.ExhaleRoot` object.
2. Executes :func:`~exhale.graph.ExhaleRoot.parse` for this object.
3. Executes :func:`~exhale.graph.ExhaleRoot.generateFullAPI` for this object.
4. Executes :func:`~exhale.graph.ExhaleRoot.toConsole` for this object (which will
only produce output when :data:`~exhale.configs.verboseBuild` is ``True``).
This results in the full API being generated, and control is subsequently passed
back to Sphinx to now read in the source documents (many of which were just
generated in :data:`~exhale.configs.containmentFolder`), and proceed to writing the
final output.
'''
# Quick sanity check to make sure the bare minimum have been set in the configs
err_msg = "`configs.{config}` was `None`. Do not call `deploy.explode` directly."
if configs.containmentFolder is None:
raise RuntimeError(err_msg.format(config="containmentFolder"))
if configs.rootFileName is None:
raise RuntimeError(err_msg.format(config="rootFileName"))
if configs.doxygenStripFromPath is None:
raise RuntimeError(err_msg.format(config="doxygenStripFromPath"))
# From here on, we assume that everything else has been checked / configured.
try:
textRoot = ExhaleRoot()
except:
utils.fancyError("Unable to create an `ExhaleRoot` object:")
try:
sys.stdout.write("{0}\n".format(utils.info("Exhale: parsing Doxygen XML.")))
start = utils.get_time()
textRoot.parse()
end = utils.get_time()
sys.stdout.write("{0}\n".format(
utils.progress("Exhale: finished parsing Doxygen XML in {0}.".format(
utils.time_string(start, end)
))
))
except:
utils.fancyError("Exception caught while parsing:")
try:
sys.stdout.write("{0}\n".format(
utils.info("Exhale: generating reStructuredText documents.")
))
start = utils.get_time()
textRoot.generateFullAPI()
end = utils.get_time()
sys.stdout.write("{0}\n".format(
utils.progress("Exhale: generated reStructuredText documents in {0}.".format(
utils.time_string(start, end)
))
))
except:
utils.fancyError("Exception caught while generating:")
# << verboseBuild
# toConsole only prints if verbose mode is enabled
textRoot.toConsole()
# allow access to the result after-the-fact
configs._the_app.exhale_root = textRoot
| 42.468235 | 106 | 0.588066 |
78fb0646e467b92a38f001788a56ced3c1f8a48d | 3,816 | py | Python | src/bayesian_reliability_comparison.py | rloganiv/bayesian-blackbox | 6a111553200b6aa755149e08174abe1a61d37198 | [
"MIT"
] | 8 | 2019-12-23T13:27:15.000Z | 2021-12-01T13:33:34.000Z | src/bayesian_reliability_comparison.py | rloganiv/bayesian-blackbox | 6a111553200b6aa755149e08174abe1a61d37198 | [
"MIT"
] | 11 | 2020-03-31T11:06:55.000Z | 2022-02-10T00:39:33.000Z | src/bayesian_reliability_comparison.py | disiji/bayesian-blackbox | 6a111553200b6aa755149e08174abe1a61d37198 | [
"MIT"
] | 2 | 2020-01-24T10:21:57.000Z | 2020-02-22T04:41:14.000Z | import argparse
import multiprocessing
import os
import random
import numpy as np
from data_utils import DATAFILE_LIST, DATASET_LIST, prepare_data, RESULTS_DIR
from models import SumOfBetaEce
random.seed(2020)
num_cores = multiprocessing.cpu_count()
NUM_BINS = 10
NUM_RUNS = 100
N_list = [100, 200, 500, 1000, 2000, 5000, 10000]
OUTPUT_DIR = RESULTS_DIR + "bayesian_reliability_comparison/"
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('dataset', type=str, default='cifar100', help='input dataset')
parser.add_argument('-pseudocount', type=int, default=1, help='strength of prior')
parser.add_argument('-ground_truth_type', type=str, default='bayesian',
help='compute ground truth in a Bayesian or frequentist way, bayesian or frequentist')
parser.add_argument('-weight_type', type=str, default='pool',
help='weigh each bin with all data or only data seen so far, online or pool')
parser.add_argument('--num_runs', type=int, default=NUM_RUNS, help='number of runs')
parser.add_argument('--num_bins', type=int, default=NUM_BINS, help='number of bins in reliability diagram')
args, _ = parser.parse_known_args()
if args.dataset not in DATASET_LIST:
raise ValueError("%s is not in DATASET_LIST." % args.dataset)
main(args)
| 41.032258 | 120 | 0.70152 |
78fbbb7e97d40f03f6fe9dcf3d1d397ff5d9dbb9 | 29,044 | py | Python | psyneulink/core/components/functions/statefulfunctions/statefulfunction.py | SamKG/PsyNeuLink | 70558bcd870868e1688cb7a7c424d29ca336f2df | [
"Apache-2.0"
] | null | null | null | psyneulink/core/components/functions/statefulfunctions/statefulfunction.py | SamKG/PsyNeuLink | 70558bcd870868e1688cb7a7c424d29ca336f2df | [
"Apache-2.0"
] | 77 | 2020-10-01T06:27:19.000Z | 2022-03-31T02:03:33.000Z | psyneulink/core/components/functions/statefulfunctions/statefulfunction.py | SamKG/PsyNeuLink | 70558bcd870868e1688cb7a7c424d29ca336f2df | [
"Apache-2.0"
] | null | null | null | #
# Princeton University licenses this file to You under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy of the License at:
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
#
#
# ***************************************** STATEFUL FUNCTION *********************************************************
"""
* `StatefulFunction`
* `IntegratorFunctions`
* `MemoryFunctions`
"""
import abc
import typecheck as tc
import warnings
import numbers
import numpy as np
from psyneulink.core import llvm as pnlvm
from psyneulink.core.components.component import DefaultsFlexibility, _has_initializers_setter
from psyneulink.core.components.functions.function import Function_Base, FunctionError
from psyneulink.core.components.functions.distributionfunctions import DistributionFunction
from psyneulink.core.globals.keywords import STATEFUL_FUNCTION_TYPE, STATEFUL_FUNCTION, NOISE, RATE
from psyneulink.core.globals.parameters import Parameter
from psyneulink.core.globals.utilities import parameter_spec, iscompatible, object_has_single_value, convert_to_np_array
from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set
from psyneulink.core.globals.context import ContextFlags, handle_external_context
__all__ = ['StatefulFunction']
| 48.895623 | 168 | 0.596302 |
78fc7bd4cfef4c55a9ccedee325481258419cb94 | 11,929 | py | Python | ee/clickhouse/sql/person.py | wanderlog/posthog | a88b81d44ab31d262be07e84a85d045c4e28f2a3 | [
"MIT"
] | null | null | null | ee/clickhouse/sql/person.py | wanderlog/posthog | a88b81d44ab31d262be07e84a85d045c4e28f2a3 | [
"MIT"
] | null | null | null | ee/clickhouse/sql/person.py | wanderlog/posthog | a88b81d44ab31d262be07e84a85d045c4e28f2a3 | [
"MIT"
] | null | null | null | from ee.clickhouse.sql.clickhouse import KAFKA_COLUMNS, STORAGE_POLICY, kafka_engine
from ee.clickhouse.sql.table_engines import CollapsingMergeTree, ReplacingMergeTree
from ee.kafka_client.topics import KAFKA_PERSON, KAFKA_PERSON_DISTINCT_ID, KAFKA_PERSON_UNIQUE_ID
from posthog.settings import CLICKHOUSE_CLUSTER, CLICKHOUSE_DATABASE
TRUNCATE_PERSON_TABLE_SQL = f"TRUNCATE TABLE IF EXISTS person ON CLUSTER '{CLICKHOUSE_CLUSTER}'"
DROP_PERSON_TABLE_SQL = f"DROP TABLE IF EXISTS person ON CLUSTER '{CLICKHOUSE_CLUSTER}'"
TRUNCATE_PERSON_DISTINCT_ID_TABLE_SQL = f"TRUNCATE TABLE IF EXISTS person_distinct_id ON CLUSTER '{CLICKHOUSE_CLUSTER}'"
TRUNCATE_PERSON_DISTINCT_ID2_TABLE_SQL = (
f"TRUNCATE TABLE IF EXISTS person_distinct_id2 ON CLUSTER '{CLICKHOUSE_CLUSTER}'"
)
PERSONS_TABLE = "person"
PERSONS_TABLE_BASE_SQL = """
CREATE TABLE IF NOT EXISTS {table_name} ON CLUSTER '{cluster}'
(
id UUID,
created_at DateTime64,
team_id Int64,
properties VARCHAR,
is_identified Int8,
is_deleted Int8 DEFAULT 0
{extra_fields}
) ENGINE = {engine}
"""
PERSONS_TABLE_ENGINE = lambda: ReplacingMergeTree(PERSONS_TABLE, ver="_timestamp")
PERSONS_TABLE_SQL = lambda: (
PERSONS_TABLE_BASE_SQL
+ """Order By (team_id, id)
{storage_policy}
"""
).format(
table_name=PERSONS_TABLE,
cluster=CLICKHOUSE_CLUSTER,
engine=PERSONS_TABLE_ENGINE(),
extra_fields=KAFKA_COLUMNS,
storage_policy=STORAGE_POLICY(),
)
KAFKA_PERSONS_TABLE_SQL = lambda: PERSONS_TABLE_BASE_SQL.format(
table_name="kafka_" + PERSONS_TABLE, cluster=CLICKHOUSE_CLUSTER, engine=kafka_engine(KAFKA_PERSON), extra_fields="",
)
# You must include the database here because of a bug in clickhouse
# related to https://github.com/ClickHouse/ClickHouse/issues/10471
PERSONS_TABLE_MV_SQL = """
CREATE MATERIALIZED VIEW {table_name}_mv ON CLUSTER '{cluster}'
TO {database}.{table_name}
AS SELECT
id,
created_at,
team_id,
properties,
is_identified,
is_deleted,
_timestamp,
_offset
FROM {database}.kafka_{table_name}
""".format(
table_name=PERSONS_TABLE, cluster=CLICKHOUSE_CLUSTER, database=CLICKHOUSE_DATABASE,
)
GET_LATEST_PERSON_SQL = """
SELECT * FROM person JOIN (
SELECT id, max(_timestamp) as _timestamp, max(is_deleted) as is_deleted
FROM person
WHERE team_id = %(team_id)s
GROUP BY id
) as person_max ON person.id = person_max.id AND person._timestamp = person_max._timestamp
WHERE team_id = %(team_id)s
AND person_max.is_deleted = 0
{query}
"""
GET_LATEST_PERSON_ID_SQL = """
(select id from (
{latest_person_sql}
))
""".format(
latest_person_sql=GET_LATEST_PERSON_SQL
)
#
# person_distinct_id table - use this still in queries, but this will eventually get removed.
#
PERSONS_DISTINCT_ID_TABLE = "person_distinct_id"
PERSONS_DISTINCT_ID_TABLE_BASE_SQL = """
CREATE TABLE IF NOT EXISTS {table_name} ON CLUSTER '{cluster}'
(
distinct_id VARCHAR,
person_id UUID,
team_id Int64,
_sign Int8 DEFAULT 1,
is_deleted Int8 ALIAS if(_sign==-1, 1, 0)
{extra_fields}
) ENGINE = {engine}
"""
PERSONS_DISTINCT_ID_TABLE_SQL = lambda: (
PERSONS_DISTINCT_ID_TABLE_BASE_SQL
+ """Order By (team_id, distinct_id, person_id)
{storage_policy}
"""
).format(
table_name=PERSONS_DISTINCT_ID_TABLE,
cluster=CLICKHOUSE_CLUSTER,
engine=CollapsingMergeTree(PERSONS_DISTINCT_ID_TABLE, ver="_sign"),
extra_fields=KAFKA_COLUMNS,
storage_policy=STORAGE_POLICY(),
)
# :KLUDGE: We default is_deleted to 0 for backwards compatibility for when we drop `is_deleted` from message schema.
# Can't make DEFAULT if(_sign==-1, 1, 0) because Cyclic aliases error.
KAFKA_PERSONS_DISTINCT_ID_TABLE_SQL = lambda: """
CREATE TABLE {table_name} ON CLUSTER '{cluster}'
(
distinct_id VARCHAR,
person_id UUID,
team_id Int64,
_sign Nullable(Int8),
is_deleted Nullable(Int8)
) ENGINE = {engine}
""".format(
table_name="kafka_" + PERSONS_DISTINCT_ID_TABLE,
cluster=CLICKHOUSE_CLUSTER,
engine=kafka_engine(KAFKA_PERSON_UNIQUE_ID),
)
# You must include the database here because of a bug in clickhouse
# related to https://github.com/ClickHouse/ClickHouse/issues/10471
PERSONS_DISTINCT_ID_TABLE_MV_SQL = """
CREATE MATERIALIZED VIEW {table_name}_mv ON CLUSTER '{cluster}'
TO {database}.{table_name}
AS SELECT
distinct_id,
person_id,
team_id,
coalesce(_sign, if(is_deleted==0, 1, -1)) AS _sign,
_timestamp,
_offset
FROM {database}.kafka_{table_name}
""".format(
table_name=PERSONS_DISTINCT_ID_TABLE, cluster=CLICKHOUSE_CLUSTER, database=CLICKHOUSE_DATABASE,
)
#
# person_distinct_ids2 - new table!
#
PERSON_DISTINCT_ID2_TABLE = "person_distinct_id2"
PERSON_DISTINCT_ID2_TABLE_BASE_SQL = """
CREATE TABLE IF NOT EXISTS {table_name} ON CLUSTER '{cluster}'
(
team_id Int64,
distinct_id VARCHAR,
person_id UUID,
is_deleted Int8,
version Int64 DEFAULT 1
{extra_fields}
) ENGINE = {engine}
"""
PERSON_DISTINCT_ID2_TABLE_ENGINE = lambda: ReplacingMergeTree(PERSON_DISTINCT_ID2_TABLE, ver="version")
PERSON_DISTINCT_ID2_TABLE_SQL = lambda: (
PERSON_DISTINCT_ID2_TABLE_BASE_SQL
+ """
ORDER BY (team_id, distinct_id)
SETTINGS index_granularity = 512
"""
).format(
table_name=PERSON_DISTINCT_ID2_TABLE,
cluster=CLICKHOUSE_CLUSTER,
engine=PERSON_DISTINCT_ID2_TABLE_ENGINE(),
extra_fields=KAFKA_COLUMNS + "\n, _partition UInt64",
)
KAFKA_PERSON_DISTINCT_ID2_TABLE_SQL = lambda: PERSON_DISTINCT_ID2_TABLE_BASE_SQL.format(
table_name="kafka_" + PERSON_DISTINCT_ID2_TABLE,
cluster=CLICKHOUSE_CLUSTER,
engine=kafka_engine(KAFKA_PERSON_DISTINCT_ID),
extra_fields="",
)
# You must include the database here because of a bug in clickhouse
# related to https://github.com/ClickHouse/ClickHouse/issues/10471
PERSON_DISTINCT_ID2_MV_SQL = """
CREATE MATERIALIZED VIEW {table_name}_mv ON CLUSTER '{cluster}'
TO {database}.{table_name}
AS SELECT
team_id,
distinct_id,
person_id,
is_deleted,
version,
_timestamp,
_offset,
_partition
FROM {database}.kafka_{table_name}
""".format(
table_name=PERSON_DISTINCT_ID2_TABLE, cluster=CLICKHOUSE_CLUSTER, database=CLICKHOUSE_DATABASE,
)
#
# Static Cohort
#
PERSON_STATIC_COHORT_TABLE = "person_static_cohort"
PERSON_STATIC_COHORT_BASE_SQL = """
CREATE TABLE IF NOT EXISTS {table_name} ON CLUSTER '{cluster}'
(
id UUID,
person_id UUID,
cohort_id Int64,
team_id Int64
{extra_fields}
) ENGINE = {engine}
"""
PERSON_STATIC_COHORT_TABLE_ENGINE = lambda: ReplacingMergeTree(PERSON_STATIC_COHORT_TABLE, ver="_timestamp")
PERSON_STATIC_COHORT_TABLE_SQL = lambda: (
PERSON_STATIC_COHORT_BASE_SQL
+ """Order By (team_id, cohort_id, person_id, id)
{storage_policy}
"""
).format(
table_name=PERSON_STATIC_COHORT_TABLE,
cluster=CLICKHOUSE_CLUSTER,
engine=PERSON_STATIC_COHORT_TABLE_ENGINE(),
storage_policy=STORAGE_POLICY(),
extra_fields=KAFKA_COLUMNS,
)
TRUNCATE_PERSON_STATIC_COHORT_TABLE_SQL = (
f"TRUNCATE TABLE IF EXISTS {PERSON_STATIC_COHORT_TABLE} ON CLUSTER '{CLICKHOUSE_CLUSTER}'"
)
INSERT_PERSON_STATIC_COHORT = (
f"INSERT INTO {PERSON_STATIC_COHORT_TABLE} (id, person_id, cohort_id, team_id, _timestamp) VALUES"
)
#
# Other queries
#
GET_TEAM_PERSON_DISTINCT_IDS = """
SELECT distinct_id, argMax(person_id, _timestamp) as person_id
FROM (
SELECT distinct_id, person_id, max(_timestamp) as _timestamp
FROM person_distinct_id
WHERE team_id = %(team_id)s %(extra_where)s
GROUP BY person_id, distinct_id, team_id
HAVING max(is_deleted) = 0
)
GROUP BY distinct_id
"""
# Query to query distinct ids using the new table, will be used if 0003_fill_person_distinct_id2 migration is complete
GET_TEAM_PERSON_DISTINCT_IDS_NEW_TABLE = """
SELECT distinct_id, argMax(person_id, version) as person_id
FROM person_distinct_id2
WHERE team_id = %(team_id)s %(extra_where)s
GROUP BY distinct_id
HAVING argMax(is_deleted, version) = 0
"""
GET_PERSON_IDS_BY_FILTER = """
SELECT DISTINCT p.id
FROM ({latest_person_sql}) AS p
INNER JOIN ({GET_TEAM_PERSON_DISTINCT_IDS}) AS pdi ON p.id = pdi.person_id
WHERE team_id = %(team_id)s
{distinct_query}
{limit}
{offset}
""".format(
latest_person_sql=GET_LATEST_PERSON_SQL,
distinct_query="{distinct_query}",
limit="{limit}",
offset="{offset}",
GET_TEAM_PERSON_DISTINCT_IDS="{GET_TEAM_PERSON_DISTINCT_IDS}",
)
INSERT_PERSON_SQL = """
INSERT INTO person (id, created_at, team_id, properties, is_identified, _timestamp, _offset, is_deleted) SELECT %(id)s, %(created_at)s, %(team_id)s, %(properties)s, %(is_identified)s, %(_timestamp)s, 0, 0
"""
INSERT_PERSON_DISTINCT_ID = """
INSERT INTO person_distinct_id SELECT %(distinct_id)s, %(person_id)s, %(team_id)s, %(_sign)s, now(), 0 VALUES
"""
INSERT_PERSON_DISTINCT_ID2 = """
INSERT INTO person_distinct_id2 (distinct_id, person_id, team_id, is_deleted, version, _timestamp, _offset, _partition) SELECT %(distinct_id)s, %(person_id)s, %(team_id)s, 0, %(version)s, now(), 0, 0 VALUES
"""
DELETE_PERSON_BY_ID = """
INSERT INTO person (id, created_at, team_id, properties, is_identified, _timestamp, _offset, is_deleted) SELECT %(id)s, %(created_at)s, %(team_id)s, %(properties)s, %(is_identified)s, %(_timestamp)s, 0, 1
"""
DELETE_PERSON_EVENTS_BY_ID = """
ALTER TABLE events DELETE
WHERE distinct_id IN (
SELECT distinct_id FROM person_distinct_id WHERE person_id=%(id)s AND team_id = %(team_id)s
)
AND team_id = %(team_id)s
"""
INSERT_COHORT_ALL_PEOPLE_THROUGH_PERSON_ID = """
INSERT INTO {cohort_table} SELECT generateUUIDv4(), actor_id, %(cohort_id)s, %(team_id)s, %(_timestamp)s, 0 FROM (
SELECT actor_id FROM ({query})
)
"""
INSERT_COHORT_ALL_PEOPLE_SQL = """
INSERT INTO {cohort_table} SELECT generateUUIDv4(), id, %(cohort_id)s, %(team_id)s, %(_timestamp)s, 0 FROM (
SELECT id FROM (
{latest_person_sql}
) as person INNER JOIN (
SELECT person_id, distinct_id FROM ({GET_TEAM_PERSON_DISTINCT_IDS}) WHERE person_id IN ({content_sql})
) as pdi ON person.id = pdi.person_id
WHERE team_id = %(team_id)s
GROUP BY id
)
"""
GET_DISTINCT_IDS_BY_PROPERTY_SQL = """
SELECT distinct_id
FROM (
{GET_TEAM_PERSON_DISTINCT_IDS}
)
WHERE person_id IN
(
SELECT id
FROM (
SELECT id, argMax(properties, person._timestamp) as properties, max(is_deleted) as is_deleted
FROM person
WHERE team_id = %(team_id)s
GROUP BY id
HAVING is_deleted = 0
)
WHERE {filters}
)
"""
GET_DISTINCT_IDS_BY_PERSON_ID_FILTER = """
SELECT distinct_id
FROM ({GET_TEAM_PERSON_DISTINCT_IDS})
WHERE {filters}
"""
GET_PERSON_PROPERTIES_COUNT = """
SELECT tupleElement(keysAndValues, 1) as key, count(*) as count
FROM person
ARRAY JOIN JSONExtractKeysAndValuesRaw(properties) as keysAndValues
WHERE team_id = %(team_id)s
GROUP BY tupleElement(keysAndValues, 1)
ORDER BY count DESC, key ASC
"""
GET_ACTORS_FROM_EVENT_QUERY = """
SELECT
{id_field} AS actor_id
{matching_events_select_statement}
FROM ({events_query})
GROUP BY actor_id
{limit}
{offset}
"""
COMMENT_DISTINCT_ID_COLUMN_SQL = (
lambda: f"ALTER TABLE person_distinct_id ON CLUSTER '{CLICKHOUSE_CLUSTER}' COMMENT COLUMN distinct_id 'skip_0003_fill_person_distinct_id2'"
)
SELECT_PERSON_PROP_VALUES_SQL = """
SELECT
value,
count(value)
FROM (
SELECT
{property_field} as value
FROM
person
WHERE
team_id = %(team_id)s AND
is_deleted = 0 AND
{property_field} IS NOT NULL AND
{property_field} != ''
ORDER BY id DESC
LIMIT 100000
)
GROUP BY value
ORDER BY count(value) DESC
LIMIT 20
"""
SELECT_PERSON_PROP_VALUES_SQL_WITH_FILTER = """
SELECT
value,
count(value)
FROM (
SELECT
{property_field} as value
FROM
person
WHERE
team_id = %(team_id)s AND
is_deleted = 0 AND
{property_field} ILIKE %(value)s
ORDER BY id DESC
LIMIT 100000
)
GROUP BY value
ORDER BY count(value) DESC
LIMIT 20
"""
| 28.200946 | 206 | 0.748093 |
78fcfe3906cb71dfe94a77355e4db2bd1f039142 | 335 | py | Python | scripts/dump_training_data.py | davmre/sigvisa | 91a1f163b8f3a258dfb78d88a07f2a11da41bd04 | [
"BSD-3-Clause"
] | null | null | null | scripts/dump_training_data.py | davmre/sigvisa | 91a1f163b8f3a258dfb78d88a07f2a11da41bd04 | [
"BSD-3-Clause"
] | null | null | null | scripts/dump_training_data.py | davmre/sigvisa | 91a1f163b8f3a258dfb78d88a07f2a11da41bd04 | [
"BSD-3-Clause"
] | null | null | null | from sigvisa.learn.train_coda_models import get_shape_training_data
import numpy as np
X, y, evids = get_shape_training_data(runid=4, site="AS12", chan="SHZ", band="freq_2.0_3.0", phases=["P",], target="amp_transfer", max_acost=np.float("inf"), min_amp=-2)
np.savetxt("X.txt", X)
np.savetxt("y.txt", y)
np.savetxt("evids.txt", evids)
| 41.875 | 169 | 0.728358 |
78fe8574d8b2d8646e13f689bf2f902a5d2ca204 | 2,637 | py | Python | shdw/tools/welford.py | wbrandenburger/ShadowDetection | 2a58df93e32e8baf99806555655a7daf7e68735a | [
"MIT"
] | 2 | 2020-09-06T16:45:37.000Z | 2021-04-25T15:16:20.000Z | dl_multi/utils/welford.py | wbrandenburger/MTPIA | 02c773ce60b7efd5b15f270f047a6da5a8f00b7e | [
"MIT"
] | null | null | null | dl_multi/utils/welford.py | wbrandenburger/MTPIA | 02c773ce60b7efd5b15f270f047a6da5a8f00b7e | [
"MIT"
] | 1 | 2020-04-30T03:08:56.000Z | 2020-04-30T03:08:56.000Z | import math
import numpy as np
# plt.style.use('seaborn')
# plt.rcParams['figure.figsize'] = (12, 8)
| 23.972727 | 67 | 0.525597 |
78feca6a377149a92c2667955b4f314e64f31df6 | 819 | py | Python | day3/functions.py | lilbond/bitis | 58e5eeebade6cea99fbf86fdf285721fb602e4ef | [
"MIT"
] | null | null | null | day3/functions.py | lilbond/bitis | 58e5eeebade6cea99fbf86fdf285721fb602e4ef | [
"MIT"
] | null | null | null | day3/functions.py | lilbond/bitis | 58e5eeebade6cea99fbf86fdf285721fb602e4ef | [
"MIT"
] | null | null | null |
greet()
greet_again("Hello Again")
greet_again_with_type("One Last Time")
greet_again_with_type(1234)
# multiple types
print(multiple_types(-2))
print(multiple_types(10))
# variable arguments
var_arguments(1, 2, 3)
a = [1, 2, 3]
var_arguments(a)
var_arguments(*a) # expanding
v
b = {"first" : "python", "second" : "python again"}
key_arg(b) | 14.625 | 73 | 0.664225 |
78ff50d0ef3b81ac606726766e87dc4af67964c3 | 480 | py | Python | test.py | KipCrossing/Micropython-AD9833 | c684f5a9543bc5b67dcbf357c50f4d8f4057b2bf | [
"MIT"
] | 11 | 2018-12-13T23:39:18.000Z | 2022-02-24T11:59:36.000Z | test.py | KipCrossing/Micropython-AD9833 | c684f5a9543bc5b67dcbf357c50f4d8f4057b2bf | [
"MIT"
] | 1 | 2019-12-02T20:54:05.000Z | 2019-12-04T00:34:25.000Z | test.py | KipCrossing/Micropython-AD9833 | c684f5a9543bc5b67dcbf357c50f4d8f4057b2bf | [
"MIT"
] | 2 | 2019-05-03T10:58:36.000Z | 2020-02-20T10:21:43.000Z | from ad9833 import AD9833
# DUMMY classes for testing without board
# Code
SBI1 = SBI()
PIN3 = Pin()
wave = AD9833(SBI1, PIN3)
wave.set_freq(14500)
wave.set_type(2)
wave.send()
print(wave.shape_type)
| 13.333333 | 41 | 0.566667 |
600132a2e2c79c041002d7861851e7ef109318b7 | 14,276 | py | Python | tests/test_api_network.py | devicehive/devicehive-plugin-python-template | ad532a57ebf9ae52f12afc98eeb867380707d47d | [
"Apache-2.0"
] | null | null | null | tests/test_api_network.py | devicehive/devicehive-plugin-python-template | ad532a57ebf9ae52f12afc98eeb867380707d47d | [
"Apache-2.0"
] | 1 | 2018-03-07T07:36:44.000Z | 2018-03-07T07:36:44.000Z | tests/test_api_network.py | devicehive/devicehive-plugin-python-template | ad532a57ebf9ae52f12afc98eeb867380707d47d | [
"Apache-2.0"
] | 4 | 2018-03-10T20:59:37.000Z | 2021-10-18T23:25:30.000Z | # Copyright (C) 2018 DataArt
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from six.moves import range
| 44.061728 | 79 | 0.591202 |
6001e3cd1b64684fad98768a1d1677fc7dbf592e | 1,043 | py | Python | filehandler.py | miciux/telegram-bot-admin | feb267ba6ce715b734b1a5911487c1080410a4a9 | [
"MIT"
] | 1 | 2017-04-30T13:12:32.000Z | 2017-04-30T13:12:32.000Z | filehandler.py | miciux/telegram-bot-admin | feb267ba6ce715b734b1a5911487c1080410a4a9 | [
"MIT"
] | null | null | null | filehandler.py | miciux/telegram-bot-admin | feb267ba6ce715b734b1a5911487c1080410a4a9 | [
"MIT"
] | null | null | null | import logging
import abstracthandler
import os
| 32.59375 | 92 | 0.637584 |
6001f3dc9b3e815ad90ab2f8d8d4027fbf828f6c | 6,276 | py | Python | tensorflow_federated/python/learning/federated_evaluation.py | Tensorflow-Devs/federated | 5df96d42d72fa43a050df6465271a38175a5fd7a | [
"Apache-2.0"
] | null | null | null | tensorflow_federated/python/learning/federated_evaluation.py | Tensorflow-Devs/federated | 5df96d42d72fa43a050df6465271a38175a5fd7a | [
"Apache-2.0"
] | null | null | null | tensorflow_federated/python/learning/federated_evaluation.py | Tensorflow-Devs/federated | 5df96d42d72fa43a050df6465271a38175a5fd7a | [
"Apache-2.0"
] | null | null | null | # Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple implementation of federated evaluation."""
import collections
from typing import Callable, Optional
import tensorflow as tf
from tensorflow_federated.python.core.api import computation_base
from tensorflow_federated.python.core.api import computations
from tensorflow_federated.python.core.impl.federated_context import intrinsics
from tensorflow_federated.python.core.impl.types import computation_types
from tensorflow_federated.python.core.templates import measured_process
from tensorflow_federated.python.learning import model as model_lib
from tensorflow_federated.python.learning import model_utils
from tensorflow_federated.python.learning.framework import dataset_reduce
from tensorflow_federated.python.learning.framework import optimizer_utils
# Convenience aliases.
SequenceType = computation_types.SequenceType
def build_federated_evaluation(
model_fn: Callable[[], model_lib.Model],
broadcast_process: Optional[measured_process.MeasuredProcess] = None,
use_experimental_simulation_loop: bool = False,
) -> computation_base.Computation:
"""Builds the TFF computation for federated evaluation of the given model.
Args:
model_fn: A no-arg function that returns a `tff.learning.Model`. This method
must *not* capture TensorFlow tensors or variables and use them. The model
must be constructed entirely from scratch on each invocation, returning
the same pre-constructed model each call will result in an error.
broadcast_process: A `tff.templates.MeasuredProcess` that broadcasts the
model weights on the server to the clients. It must support the signature
`(input_values@SERVER -> output_values@CLIENTS)` and have empty state. If
set to default None, the server model is broadcast to the clients using
the default tff.federated_broadcast.
use_experimental_simulation_loop: Controls the reduce loop function for
input dataset. An experimental reduce loop is used for simulation.
Returns:
A federated computation (an instance of `tff.Computation`) that accepts
model parameters and federated data, and returns the evaluation metrics
as aggregated by `tff.learning.Model.federated_output_computation`.
"""
if broadcast_process is not None:
if not isinstance(broadcast_process, measured_process.MeasuredProcess):
raise ValueError('`broadcast_process` must be a `MeasuredProcess`, got '
f'{type(broadcast_process)}.')
if optimizer_utils.is_stateful_process(broadcast_process):
raise ValueError(
'Cannot create a federated evaluation with a stateful '
'broadcast process, must be stateless, has state: '
f'{broadcast_process.initialize.type_signature.result!r}')
# Construct the model first just to obtain the metadata and define all the
# types needed to define the computations that follow.
# TODO(b/124477628): Ideally replace the need for stamping throwaway models
# with some other mechanism.
with tf.Graph().as_default():
model = model_fn()
model_weights_type = model_utils.weights_type_from_model(model)
batch_type = computation_types.to_type(model.input_spec)
return server_eval
| 47.18797 | 80 | 0.755736 |
6002ace185388c888ba705ff8de6efa12833e498 | 5,226 | py | Python | pylibcontainer/image.py | joaompinto/pylibcontainer | 794f12e7511dc2452521bad040a7873eff40f50b | [
"Apache-2.0"
] | 7 | 2018-05-14T14:35:29.000Z | 2020-12-04T11:26:19.000Z | pylibcontainer/image.py | joaompinto/pylibcontainer | 794f12e7511dc2452521bad040a7873eff40f50b | [
"Apache-2.0"
] | 8 | 2018-05-16T17:52:09.000Z | 2019-05-26T15:54:45.000Z | pylibcontainer/image.py | joaompinto/pylibcontainer | 794f12e7511dc2452521bad040a7873eff40f50b | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
import os
import shutil
import hashlib
import requests
import click
from tempfile import NamedTemporaryFile
from hashlib import sha256
from os.path import expanduser, join, exists, basename
from .utils import HumanSize
from .tar import extract_layer
from . import trust
from . import container
from .colorhelper import print_info, print_error, print_warn, print_success
from .colorhelper import success
from .image_index import get_url
from clint.textui import progress
from dateutil.parser import parse as parsedate
from datetime import datetime
CACHE_PATH = join(expanduser("~"), ".pylibcontainer", "images_cache")
def download(image_url):
""" Download image (if not found in cache) and return it's filename """
response = requests.head(image_url)
file_size = remote_file_size = int(response.headers.get("Content-Length"))
remote_last_modified = parsedate(response.headers.get("Last-Modified")).replace(
tzinfo=None
)
remote_is_valid = response.status_code == 200 and file_size and remote_last_modified
# Check if image is on cache
cache = Cache()
cached_image = cache.get(image_url)
if cached_image:
if remote_is_valid:
cache_fn, cache_hash, last_modified, file_size = cached_image
if remote_file_size == file_size and remote_last_modified < last_modified:
print_info("Using file from cache", CACHE_PATH)
return cache_hash, cache_fn
print_info("Downloading new remote file because an update was found")
else:
print_warn("Unable to check the status for " + image_url)
print_warn("Assuming local cache is valid")
# Not cached, and no valid remote information was found
if not remote_is_valid:
print_error(
"Unable to get file, http_code=%s, size=%s, last_modified=%s"
% (response.status_code, remote_file_size, remote_last_modified)
)
exit(2)
# Dowload image
print_info(
"Downloading image... ",
"{0} [{1:.2S}]".format(basename(image_url), HumanSize(file_size)),
)
remote_sha256 = hashlib.sha256()
response = requests.get(image_url, stream=True)
with NamedTemporaryFile(delete=False) as tmp_file:
for chunk in progress.bar(
response.iter_content(chunk_size=1024), expected_size=(file_size / 1024) + 1
):
if chunk:
remote_sha256.update(chunk)
tmp_file.write(chunk)
tmp_file.flush()
# Verify image integrity
trust_verify = trust.verify(image_url, tmp_file.name, remote_sha256.hexdigest())
if not trust_verify or not trust_verify.valid or not trust_verify.username:
print_error("Integrity/authenticity error - GPG signature mismatch!")
exit(3)
print("{0:>10}: {1}".format("GPG Signer", success(trust_verify.username)))
print("{0:>10}: {1}".format("GPG ID", success(trust_verify.pubkey_fingerprint)))
print("{0:>10}: {1}".format("Creation", success(trust_verify.creation_date)))
return cache.put(tmp_file.name, image_url)
| 36.291667 | 88 | 0.670876 |
600499594e77d8bfa05380ea38ed8a59d559483e | 987 | py | Python | utest/test_compareimage.py | michel117/robotframework-doctestlibrary | 305b220b73846bd389c47d74c2e0431c7bfaff94 | [
"Apache-2.0"
] | 1 | 2021-07-03T08:04:44.000Z | 2021-07-03T08:04:44.000Z | utest/test_compareimage.py | michel117/robotframework-doctestlibrary | 305b220b73846bd389c47d74c2e0431c7bfaff94 | [
"Apache-2.0"
] | null | null | null | utest/test_compareimage.py | michel117/robotframework-doctestlibrary | 305b220b73846bd389c47d74c2e0431c7bfaff94 | [
"Apache-2.0"
] | null | null | null | from DocTest.CompareImage import CompareImage
import pytest
from pathlib import Path
import numpy
| 25.973684 | 63 | 0.755826 |
60050fc2440b99d0cc01c8e1d51ae9219c2ec9ce | 46 | py | Python | cvstudio/view/widgets/labels_tableview/__init__.py | haruiz/PytorchCvStudio | ccf79dd0cc0d61f3fd01b1b5d96f7cda7b681eef | [
"MIT"
] | 32 | 2019-10-31T03:10:52.000Z | 2020-12-23T11:50:53.000Z | cvstudio/view/widgets/labels_tableview/__init__.py | haruiz/CvStudio | ccf79dd0cc0d61f3fd01b1b5d96f7cda7b681eef | [
"MIT"
] | 19 | 2019-10-31T15:06:05.000Z | 2020-06-15T02:21:55.000Z | cvstudio/view/widgets/labels_tableview/__init__.py | haruiz/PytorchCvStudio | ccf79dd0cc0d61f3fd01b1b5d96f7cda7b681eef | [
"MIT"
] | 8 | 2019-10-31T03:32:50.000Z | 2020-07-17T20:47:37.000Z | from .labels_tableview import LabelsTableView
| 23 | 45 | 0.891304 |
600533785dbd02d51d6674d42a21d63ffcb7660b | 16,243 | py | Python | experiments/solve_different_methods.py | vishalbelsare/ags_nlp_solver | 3558e8aae5507285d0c5e74f163c01d09a9cb805 | [
"MIT"
] | 8 | 2018-10-23T11:19:26.000Z | 2022-01-10T19:18:45.000Z | experiments/solve_different_methods.py | sovrasov/Algorithm-of-Global-Search | 3558e8aae5507285d0c5e74f163c01d09a9cb805 | [
"MIT"
] | null | null | null | experiments/solve_different_methods.py | sovrasov/Algorithm-of-Global-Search | 3558e8aae5507285d0c5e74f163c01d09a9cb805 | [
"MIT"
] | 2 | 2018-10-07T20:02:40.000Z | 2018-10-23T11:19:29.000Z | import functools
import numpy as np
import math
import argparse
import ags_solver
import go_problems
import nlopt
import sys
from Simple import SimpleTuner
import itertools
from scipy.spatial import Delaunay
from scipy.optimize import differential_evolution
from scipy.optimize import basinhopping
from sdaopt import sda
from stochopy import Evolutionary
from pyOpt import Optimization
from pyOpt import MIDACO
import pyOpt
from shgo import shgo
from benchmark_tools.core import Solver, solve_class, GrishClass, GKLSClass
from benchmark_tools.plot import plot_cmcs
from benchmark_tools.stats import save_stats, compute_stats
from bayes_opt import BayesianOptimization
algos = {'scd': SCDEWrapper, 'ags': AGSWrapper,
'agsd': functools.partial(AGSWrapper, mixedFast=True),
'direct': functools.partial(NLOptWrapper, method=nlopt.GN_ORIG_DIRECT),
'directl': functools.partial(NLOptWrapper, method=nlopt.GN_ORIG_DIRECT_L),
'stogo': functools.partial(NLOptWrapper, method=nlopt.GD_STOGO),
'mlsl': functools.partial(NLOptWrapper, method=nlopt.G_MLSL_LDS),
'crs': functools.partial(NLOptWrapper, method=nlopt.GN_CRS2_LM),
'simple': SimpleWrapper, 'scb': SCBasinhoppingWrapper,
'sda': SDAWrapper, 'stochopy': StochOpyWrapper, 'shgo': SHGOWrapper,
'pyopt': PyOptWrapper}
algo2cature = {'scd': 'Scipy DE', 'ags': 'AGS', 'direct': 'DIRECT', 'agsd': 'AGSd',
'directl': 'DIRECTl', 'simple': 'Simple',
'stogo': 'StoGO', 'mlsl': 'MLSL', 'crs':'CRS', 'scb': 'Scipy B-H',
'sda': 'SDA', 'stochopy': 'Stochopy', 'pysot': 'PySOT', 'pyopt': 'PyOpt', 'shgo': 'SHGO'}
serg_eps = {2: 0.01, 3: 0.01, 4: math.pow(1e-6, 1./4), 5: math.pow(1e-7, 1./5)}
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Sample for AGS solver')
parser.add_argument('--max_iters', type=int, default=10000, help='limit of iterations for the method')
parser.add_argument('--problems_class', type=str, choices=['grish','gklss','gklsh'], default='grish')
parser.add_argument('--algo', type=str, choices=algos.keys(), default='scd')
parser.add_argument('--problems_dim', type=int, default=2)
parser.add_argument('--verbose', action='store_true', help='Print additional info to console')
parser.add_argument('--dist_stop', action='store_true', help='Stop algorithm then the next point is close enough to the optimum')
parser.add_argument('--serg_eps', action='store_true')
parser.add_argument('--stats_fname', type=str, default='')
main(parser.parse_args())
| 38.490521 | 134 | 0.623284 |
6006beb0722b92f412b4c3f2503f64cd54b33641 | 8,288 | py | Python | src/py/fc.py | mattyschell/geodatabase-toiler | c8231999c3156bf41f9b80f151085afa97ba8586 | [
"CC0-1.0"
] | null | null | null | src/py/fc.py | mattyschell/geodatabase-toiler | c8231999c3156bf41f9b80f151085afa97ba8586 | [
"CC0-1.0"
] | 4 | 2021-04-05T16:03:30.000Z | 2022-03-02T21:28:06.000Z | src/py/fc.py | mattyschell/geodatabase-toiler | c8231999c3156bf41f9b80f151085afa97ba8586 | [
"CC0-1.0"
] | null | null | null | import arcpy
import logging
import pathlib
import subprocess
import gdb
import cx_sde
| 39.279621 | 132 | 0.532939 |
60072c1b66f06352dd6eb1cdc5675eed8c8c537e | 602 | py | Python | desafiosCursoEmVideo/ex004.py | gomesGabriel/Pythonicos | b491cefbb0479dd83fee267304d0fa30b99786a5 | [
"MIT"
] | 1 | 2019-09-02T12:14:58.000Z | 2019-09-02T12:14:58.000Z | desafiosCursoEmVideo/ex004.py | gomesGabriel/Pythonicos | b491cefbb0479dd83fee267304d0fa30b99786a5 | [
"MIT"
] | null | null | null | desafiosCursoEmVideo/ex004.py | gomesGabriel/Pythonicos | b491cefbb0479dd83fee267304d0fa30b99786a5 | [
"MIT"
] | null | null | null | n = input('Digite algo: ')
print('O tipo primitivo da varivel : ', type(n))
print('O que foi digitado alfa numrico? ', n.isalnum())
print('O que foi digitado alfabtico? ', n.isalpha())
print('O que foi digitado um decimal? ', n.isdecimal())
print('O que foi digitado minsculo? ', n.islower())
print('O que foi digitado numrico? ', n.isnumeric())
print('O que foi digitado pode ser impresso? ', n.isprintable())
print('O que foi digitado apenas espao? ', n.isspace())
print('O que foi digitado est capitalizada? ', n.istitle())
print('O que foi digitado maisculo? ', n.isupper())
| 50.166667 | 64 | 0.694352 |
60072f56e1c7453c15f0f4342de268f2dd1b42f7 | 640 | py | Python | Machine learning book/3 - MultiLayer Perceptron/test_regression.py | dalmia/Lisa-Lab-Tutorials | ee1b0b4fcb82914085420bb289ebda09f248c8d1 | [
"MIT"
] | 25 | 2017-01-14T08:17:23.000Z | 2022-02-26T13:53:17.000Z | Machine learning book/3 - MultiLayer Perceptron/test_regression.py | dalmia/Lisa-Lab-Tutorials | ee1b0b4fcb82914085420bb289ebda09f248c8d1 | [
"MIT"
] | 1 | 2020-06-20T02:49:16.000Z | 2020-06-20T02:49:16.000Z | Machine learning book/3 - MultiLayer Perceptron/test_regression.py | dalmia/Lisa-Lab-Tutorials | ee1b0b4fcb82914085420bb289ebda09f248c8d1 | [
"MIT"
] | 6 | 2017-08-24T08:40:41.000Z | 2020-03-17T00:01:56.000Z | from numpy import *
import numpy as np
import matplotlib.pyplot as plt
from mlp import mlp
x = ones((1, 40)) * linspace(0, 1, 40)
t = sin(2 * pi * x) + cos(2 * pi * x) + np.random.randn(40) * 0.2
x = transpose(x)
t = transpose(t)
n_hidden = 3
eta = 0.25
n_iterations = 101
plt.plot(x, t, '.')
plt.show()
train = x[0::2, :]
test = x[1::4, :]
valid = x[3::4, :]
train_targets = t[0::2, :]
test_targets = t[1::4, :]
valid_targets = t[3::4, :]
net = mlp(train, train_targets, n_hidden, out_type='linear')
net.mlptrain(train, train_targets, eta, n_iterations)
best_err = net.earlystopping(train, train_targets, valid, valid_targets, eta)
| 21.333333 | 77 | 0.65 |
6007f9657a1d3a19cb045cca61bc7716d4f2e22f | 144 | py | Python | gomoku/networks/__init__.py | IllIIIllll/reinforcement-learning-omok | 1c76ba76c203a3b7c99095fde0626aff45b1b94b | [
"Apache-2.0"
] | 1 | 2020-07-07T14:41:35.000Z | 2020-07-07T14:41:35.000Z | gomoku/networks/__init__.py | IllIIIllll/reinforcement-learning-omok | 1c76ba76c203a3b7c99095fde0626aff45b1b94b | [
"Apache-2.0"
] | 1 | 2020-08-27T08:22:03.000Z | 2020-08-27T08:22:03.000Z | gomoku/networks/__init__.py | IllIIIllll/gomoku | 1c76ba76c203a3b7c99095fde0626aff45b1b94b | [
"Apache-2.0"
] | null | null | null | # 2020 . all rights reserved.
# <llllllllll@kakao.com>
# Apache License 2.0
from .small import *
from .medium import *
from .large import * | 20.571429 | 33 | 0.708333 |
60099617ee434da572759077c7ea7be632ca1953 | 2,020 | py | Python | alipay/aop/api/domain/AlipayEbppInvoiceAuthSignModel.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 1 | 2022-03-07T06:11:10.000Z | 2022-03-07T06:11:10.000Z | alipay/aop/api/domain/AlipayEbppInvoiceAuthSignModel.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/AlipayEbppInvoiceAuthSignModel.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 1 | 2021-10-05T03:01:09.000Z | 2021-10-05T03:01:09.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
| 28.450704 | 87 | 0.614851 |
6009cc193c9712b5bd11ff6e9909ef949c64bd53 | 12,219 | py | Python | sdk/python/tekton_pipeline/models/v1beta1_embedded_task.py | jmcshane/experimental | 3c47c7e87bcdadc6172941169f3f24fc3f159ae0 | [
"Apache-2.0"
] | null | null | null | sdk/python/tekton_pipeline/models/v1beta1_embedded_task.py | jmcshane/experimental | 3c47c7e87bcdadc6172941169f3f24fc3f159ae0 | [
"Apache-2.0"
] | null | null | null | sdk/python/tekton_pipeline/models/v1beta1_embedded_task.py | jmcshane/experimental | 3c47c7e87bcdadc6172941169f3f24fc3f159ae0 | [
"Apache-2.0"
] | 1 | 2020-07-30T15:55:45.000Z | 2020-07-30T15:55:45.000Z | # Copyright 2020 The Tekton Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Tekton
Tekton Pipeline # noqa: E501
The version of the OpenAPI document: v0.17.2
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from tekton_pipeline.configuration import Configuration
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1EmbeddedTask):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1EmbeddedTask):
return True
return self.to_dict() != other.to_dict()
| 31.903394 | 222 | 0.628857 |
6009fd2ff57dced5db01fbc3398709e54f5b6bf1 | 3,122 | py | Python | tzp.py | gmlunesa/zhat | 3bf62625d102bd40274fcd39c91f21c169e334a8 | [
"MIT"
] | 1 | 2018-06-14T04:00:43.000Z | 2018-06-14T04:00:43.000Z | tzp.py | gmlunesa/zhat | 3bf62625d102bd40274fcd39c91f21c169e334a8 | [
"MIT"
] | null | null | null | tzp.py | gmlunesa/zhat | 3bf62625d102bd40274fcd39c91f21c169e334a8 | [
"MIT"
] | 1 | 2020-11-01T13:06:56.000Z | 2020-11-01T13:06:56.000Z | import zmq
import curses
import argparse
import configparser
import threading
import time
from curses import wrapper
from client import Client
from ui import UI
if '__main__' == __name__:
try:
args = parse_args()
wrapper(main)
except KeyboardInterrupt as e:
pass
except:
raise
| 26.235294 | 99 | 0.65663 |
600a51b33a2bb56f14ea62360d44dde1324b6215 | 1,968 | py | Python | anonlink-entity-service/backend/entityservice/tasks/solver.py | Sam-Gresh/linkage-agent-tools | f405c7efe3fa82d99bc047f130c0fac6f3f5bf82 | [
"Apache-2.0"
] | 1 | 2020-05-19T07:29:31.000Z | 2020-05-19T07:29:31.000Z | backend/entityservice/tasks/solver.py | hardbyte/anonlink-entity-service | 3c1815473bc8169ca571532c18e0913a45c704de | [
"Apache-2.0"
] | null | null | null | backend/entityservice/tasks/solver.py | hardbyte/anonlink-entity-service | 3c1815473bc8169ca571532c18e0913a45c704de | [
"Apache-2.0"
] | null | null | null | import anonlink
from anonlink.candidate_generation import _merge_similarities
from entityservice.object_store import connect_to_object_store
from entityservice.async_worker import celery, logger
from entityservice.settings import Config as config
from entityservice.tasks.base_task import TracedTask
from entityservice.tasks.permutation import save_and_permute
| 48 | 117 | 0.760163 |
600ca7297733fdb91cfe20784d6ed193a6eb6593 | 3,239 | py | Python | portal/migrations/0007_auto_20170824_1341.py | nickmvincent/ugc-val-est | b5cceda14ef5830f1befaddfccfd90a694c9677a | [
"MIT"
] | 2 | 2019-11-13T19:56:05.000Z | 2020-09-05T03:21:14.000Z | portal/migrations/0007_auto_20170824_1341.py | nickmvincent/ugc-val-est | b5cceda14ef5830f1befaddfccfd90a694c9677a | [
"MIT"
] | 6 | 2018-03-02T16:49:20.000Z | 2021-06-10T18:55:02.000Z | portal/migrations/0007_auto_20170824_1341.py | nickmvincent/ugc-val-est | b5cceda14ef5830f1befaddfccfd90a694c9677a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-24 13:41
from __future__ import unicode_literals
from django.db import migrations, models
| 33.739583 | 76 | 0.592467 |