blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
55eb160926cb77920b63568d4be18c54eeebdb2d | 41b59a9c8381fa3a92f5d2c37c91261afb9c82c4 | /QCDEventShape/2017/MC/test/crab_bin_py8_3200_inf.py | ad911d60a95de92ad286c8ea8f0a46bafbafeab1 | [] | no_license | Sumankkundu/ChargedParticle | c6d4f90b55df49321df2ecd758bb1f39db896f8c | eb5bada24b37a58ded186d6e5d2d7bd00898fefe | refs/heads/master | 2023-07-15T03:34:33.377203 | 2021-08-31T05:01:32 | 2021-08-31T05:01:32 | 231,091,587 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,495 | py | #from CRABClient.UserUtilities import config, getUsernameFromSiteDB
from CRABClient.UserUtilities import config
config = config()
config.General.requestName ='ESVQCD_UL_Ptbinned_3200toinf_tuneCP5_bin'
#config.General.workArea = 'crab_projects_1'
config.General.workArea = 'crab_projects'
config.General.transferOutputs = True
config.General.transferLogs = True
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = 'Run_QCD_test_miaod_v2_106x_mc_cfg.py'
#config.JobType.maxMemoryMB = 9000 # Default is 2500 : Max I have used is 13000
#config.JobType.maxJobRuntimeMin = 2750 #Default is 1315; 2750 minutes guaranteed to be available; Max I have used is 9000
#config.JobType.numCores = 4
config.JobType.inputFiles= [
"/afs/cern.ch/work/s/sukundu/private/ESV_charge_CMSSW/Uncertainty2017/AK4PFCHS_Summer19UL/Summer19UL17_JRV2_MC_PtResolution_AK4PFchs.txt",
"/afs/cern.ch/work/s/sukundu/private/ESV_charge_CMSSW/Uncertainty2017/AK4PFCHS_Summer19UL/Summer19UL17_JRV2_MC_SF_AK4PFchs.txt",
"/afs/cern.ch/work/s/sukundu/private/ESV_charge_CMSSW/Uncertainty2017/AK4PFCHS_Summer19UL/Summer19UL17_RunB_V5_DATA_UncertaintySources_AK4PFchs.txt",
"/afs/cern.ch/work/s/sukundu/private/ESV_charge_CMSSW/Uncertainty2017/AK4PFCHS_Summer19UL/Summer19UL17_RunC_V5_DATA_UncertaintySources_AK4PFchs.txt",
"/afs/cern.ch/work/s/sukundu/private/ESV_charge_CMSSW/Uncertainty2017/AK4PFCHS_Summer19UL/Summer19UL17_RunD_V5_DATA_UncertaintySources_AK4PFchs.txt",
"/afs/cern.ch/work/s/sukundu/private/ESV_charge_CMSSW/Uncertainty2017/AK4PFCHS_Summer19UL/Summer19UL17_RunE_V5_DATA_UncertaintySources_AK4PFchs.txt",
"/afs/cern.ch/work/s/sukundu/private/ESV_charge_CMSSW/Uncertainty2017/AK4PFCHS_Summer19UL/Summer19UL17_RunF_V5_DATA_UncertaintySources_AK4PFchs.txt"
]
config.Data.inputDataset ='/QCD_Pt_3200toInf_TuneCP5_13TeV_pythia8/RunIISummer19UL17MiniAOD-106X_mc2017_realistic_v6-v2/MINIAODSIM'
config.Data.inputDBS = 'global'
#config.Data.splitting = 'EventBased'
#config.Data.splitting = 'LumiBased'
config.Data.splitting = 'FileBased'
#config.Data.splitting = 'Automatic'
#config.Data.unitsPerJob = 10 # for Automatic must be 180-2700 range
config.Data.unitsPerJob = 1 #For Filebased or Lumibased
#config.Data.outLFNDirBase = '/store/user/%s/' % (getUsernameFromSiteDB())
#config.Data.outLFNDirBase = '/store/user/%s/' % (sukundu)
config.Data.publication = True
config.Data.outputDatasetTag = 'MC_PY82017UL_Bin'
config.JobType.allowUndistributedCMSSW = True
config.Site.storageSite ='T2_IN_TIFR'
| [
"skundu91phys@gmail.com"
] | skundu91phys@gmail.com |
8e5759f7185b426930f438446f26ae2be4b62f17 | 87800bce1a4829402f1f37932e0f01b3feb87fa1 | /viminidb/videbugging.py | ca32c92bee1baf28ba396517ab65e8e468614e23 | [] | no_license | sixkey/py-scripts | f5ca51434bf45faa79692933d5c4824b8cfe59ae | 7166515ba1dd30b7a48bb82c5d39710774200366 | refs/heads/main | 2023-03-15T08:27:20.363569 | 2021-03-02T18:04:56 | 2021-03-02T18:04:56 | 333,176,217 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 750 | py | def create_loud_function(function):
def loud_function(*args, **kwargs):
res = function(*args, **kwargs)
print(f"{function.__name__}(" +
f"{', '.join([str(x) for x in args])}" +
(f"{', '.join([str(x) for x in kwargs])}" if kwargs else "") +
f") = {str(res)}")
return res
return loud_function
def create_louder_function(function):
def louder_function(*args, **kwargs):
print(f"\n{function.__name__}(" +
f"{', '.join([str(x) for x in args])}" +
(f"{', '.join([str(x) for x in kwargs])}" if kwargs else "") +
")")
res = function(*args, **kwargs)
print(str(res))
return res
return louder_function
| [
"filipku4a@gmail.com"
] | filipku4a@gmail.com |
64592d3ee4f2219d3ea1f98f687bdb1984f866da | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02780/s702903623.py | ef5c9f02ab956fe90728da489ecd4bc87f90841f | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 326 | py | n,k = map(int,input().split())
P = list(map(int,input().split()))
P[0] = (P[0]+1.)/2
for i in range(1,len(P)):
P[i] = (P[i]+1.)/2
P[i] = P[i-1]+P[i]
ans = 0.
if n==1:
ans = P[0]
elif len(P)-k==0:
ans = P[k-1]
else:
for i in range(len(P)-k):
ans = max(ans,(P[i+k]-P[i]))
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
5208906c09939f76f644bef4f999ef65b8a1cfae | 37438771565238194ea997fa65619bd32c823706 | /catkin_ws/17-11-16/LPH/build/catkin_generated/order_packages.py | 24ce42469160b8cc3411cbaef6a5190b3592e0f2 | [] | no_license | Aaron9477/restore | b040b8be695c513946c0243c4acb735f427d8bba | 8dc13ed7cf0c4e5cde911169d11e330d826f40bd | refs/heads/master | 2021-09-15T10:50:59.969952 | 2018-05-31T03:11:55 | 2018-05-31T03:11:55 | 110,834,815 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 310 | py | # generated from catkin/cmake/template/order_packages.context.py.in
source_root_dir = "/home/zq610/LPH/src"
whitelisted_packages = "".split(';') if "" != "" else []
blacklisted_packages = "".split(';') if "" != "" else []
underlay_workspaces = "/opt/ros/kinetic".split(';') if "/opt/ros/kinetic" != "" else []
| [
"869788668@qq.com"
] | 869788668@qq.com |
31b6e3eaebde03b0a8cc8012f96bd4403011b0ee | e9aef5affd441b13a443dc2e0ae1613db28870e7 | /Test_sum_two_lowest_numbers.py | d2f714b1457d17ee5f7655d1e73cc3d2944827b1 | [] | no_license | glicodin21/Bundyusoft_Test | 06327b051ee7453bfd3bd3037c8e67a1dc86a6ec | 551cd640fc95f03f9b4c8682fc5f4bd28839da6c | refs/heads/master | 2020-07-10T13:38:11.268176 | 2019-08-30T14:29:31 | 2019-08-30T14:29:31 | 204,274,807 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,212 | py | import unittest
from Sum_two_lowest_numbers import sum_two_lowest_numbers
class Test(unittest.TestCase):
def test_sum_two_lowest_numbers(self):
self.assertEqual(sum_two_lowest_numbers(
[1, 2, 3]), 3, "Should be 3")
self.assertEqual(sum_two_lowest_numbers(
[1000, 2000, 3000]), 3000, "Should be 3000")
self.assertEqual(sum_two_lowest_numbers(
[-7234123, 857322, 115]), -7234008, "Should be -7234008")
def test_pick_two_lowest_numbers_with_len_validation(self):
self.assertEqual(sum_two_lowest_numbers(
[10, 1, 9, 2]), 3, "Should be 3")
self.assertEqual(sum_two_lowest_numbers(
[-10, 1, 9, -2]), -12, "Should be -12")
self.assertEqual(sum_two_lowest_numbers(
[0, 0, 0]), 0, "Should be 0")
def test_raises_value_error_if_list_is_empty(self):
self.assertRaises(ValueError,
sum_two_lowest_numbers, [])
def test_raises_value_error_if_list_is_contain_one_element(self):
self.assertRaises(ValueError,
sum_two_lowest_numbers, [1])
if __name__ == '__main__':
unittest.main()
| [
"noreply@github.com"
] | glicodin21.noreply@github.com |
59e0938b9889a36090c117f11970b5d8d271260e | 24eb991aa1a5bad9d89e7c7fdafc0db0f65f820e | /Janken/JankenOutput.py | 9dd10476bbadfd3429305c27345446e4e81f2710 | [] | no_license | yemarn510/YM_Python | 1e4ba16775b920f9ab84991d6d61c98d437ccfde | 72181672d800ec59bac06978cab08a59e734933e | refs/heads/master | 2022-04-15T07:02:11.085185 | 2020-03-15T16:09:00 | 2020-03-15T16:09:00 | 247,501,498 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 705 | py | """
This class is for printing message and showing who is
"""
from random import randrange
def show_winner(player_one_count, player_two_count):
"""
This is the function for outputting who is the winner
args : player_one_count : the number of games player 1 won
: player_two_count : the number of games player 2 won
"""
print("The Score is {}-{}. ".format(str(player_one_count),
str(player_two_count)),
end='')
if player_one_count > player_two_count:
print("Player 1 wins.")
elif player_one_count < player_two_count:
print("player 2 wins.")
else:
print("The Game is Draw")
| [
"yemarn.510@gmail.com"
] | yemarn.510@gmail.com |
b272becc0f407ed87dd639981bfb2e547cf74bd2 | b0295fc59e46a71c3235f0f19c2631d92678275d | /1-50/016.py | 1383b1a10202a4cc83da8030163d3d04c66bf9e3 | [] | no_license | Marmot93/Project-Euler | 67db19e67d29d9291bdfb155961d11bf027d5635 | a23ba931a56443b92c74a5bb048bfdbe881ad303 | refs/heads/master | 2021-01-12T02:13:51.395321 | 2018-02-08T09:02:45 | 2018-02-08T09:02:45 | 82,549,491 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 154 | py | """2的1000次方各位数之和"""
# 送分题,一行代码解决问题
num = sum([int(i) for i in str(2 ** 1000)])
print('答案是: ' + str(num))
| [
"654332905@qq.com"
] | 654332905@qq.com |
988e2af58befb5de29f610f0a755afa3463f72dd | 6d64861c997a710b95c3378a2a5283ec85d110d5 | /setup.py | 8d7f4e8b5a1e5b1fa6d67cfe991169be68cd3a4c | [
"Apache-2.0"
] | permissive | badmutex/openstack-client-shell | 1f761b9af161d5275e4e0dbce9b67bf9e7432014 | 10c1b4ad58a2562a0d23be4f6d34a36237cbc6f2 | refs/heads/master | 2021-05-29T01:54:49.849567 | 2015-06-12T19:06:53 | 2015-06-12T19:06:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 862 | py | from setuptools import setup, find_packages
import os.path
# IMPORTANT: use semantic versioning
# http://semver.org
VERSION = '0.4.4'
NAME = 'openstack-client-shell'
# #################################################################### version
module_dir = 'openstack'
version_file = os.path.join(module_dir, 'version.py')
version_module_contents = """\
# WARNING
# This file is automatically generated by setup.py
# Do not modify by hand
version = {version}
""".format(version=VERSION)
with open(version_file, 'w') as fd:
fd.write(version_module_contents)
# #################################################################### setup
setup(name=NAME,
version=VERSION,
description="OpenStack API using shell commands",
author="Badi' Abdul-Wahid",
author_email='abdulwahidc@gmail.com',
packages=find_packages(),
)
| [
"abdulwahidc@gmail.com"
] | abdulwahidc@gmail.com |
a849badaacd47915c426af336330853080f870d0 | 19e9e246100603b507005dec0c82cfd3d0d08e75 | /settings.py | decd4426e821649c4f3b4c02a4ef00c8b42451bb | [
"MIT"
] | permissive | embrace-inpe/swds-api-downloader | e0ab31860c2d5870486ebaf99a71a26f7b954700 | f4a4f40fca6de713d6eb1d26dc29e36c094ecb32 | refs/heads/master | 2020-04-12T15:20:27.686621 | 2019-04-02T13:08:40 | 2019-04-02T13:08:40 | 162,577,734 | 5 | 4 | MIT | 2019-04-02T13:08:40 | 2018-12-20T12:38:00 | Python | UTF-8 | Python | false | false | 458 | py | # Search filters
# You must passing these values if aren't using the command line args
SEARCH = {
'application': 2,
'start_date': '2019-03-01',
'end_date': '2019-03-01',
'resolution': None,
'station': 2,
'swfilter': 7,
'swtype': None,
'network': None,
'equipment': None
}
# Path to save the files
PATH_TO_SAVE = './tmp/imager/'
# Credentials
# the username key can be your username or e-mail
USERNAME = ''
PASSWORD = ''
| [
"silviolleite@gmail.com"
] | silviolleite@gmail.com |
310ba0cb9368a175620ca3cbcbd62104bf3f9f8b | edc1f1369794a4a1c499c6e9d5fe49a712657611 | /algorithms/leetcode_all/560.subarray-sum-equals-k/subarray-sum-equals-k.py | 74c28e9996672f15fe435da46bf9edd7cf5ffdc2 | [] | no_license | williamsyb/mycookbook | 93d4aca1a539b506c8ed2797863de6da8a0ed70f | dd917b6eba48eef42f1086a54880bab6cd1fbf07 | refs/heads/master | 2023-03-07T04:16:18.384481 | 2020-11-11T14:36:54 | 2020-11-11T14:36:54 | 280,005,004 | 2 | 0 | null | 2023-03-07T02:07:46 | 2020-07-15T23:34:24 | Python | UTF-8 | Python | false | false | 379 | py | class Solution(object):
def subarraySum(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
preSum = ans = 0
visit = {0: 1}
for i, n in enumerate(nums):
preSum += n
ans += visit.get(preSum - k, 0)
visit[preSum] = visit.get(preSum, 0) + 1
return ans | [
"william_sun1990@hotmail.com"
] | william_sun1990@hotmail.com |
9243f350125d286dd52323819b4b4de309f7bcd2 | e09439c9dd7ff01caae74e08ea333a8a11eb8f57 | /src/spectrosegment/CNN_train/10_train.py | 50068b546710be323ad3afd786fc910d18a6b58c | [] | no_license | muachilin/Freesound-General-Purpose-Audio-Tagging-Challenge | f57f81e08bfc7b2d3ca4fe03dc0aa17d9ea812b0 | 18fdd2d9e5dacccce3eef60c0a454e52777bd453 | refs/heads/master | 2020-09-17T01:10:19.274061 | 2019-11-25T13:34:24 | 2019-11-25T13:34:24 | 223,942,714 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,930 | py | import numpy as np
import pickle
from sys import argv
import os
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers import PReLU
from keras.layers import Conv2D, MaxPooling2D, Flatten, AveragePooling2D, GlobalAveragePooling2D
from keras.layers.normalization import BatchNormalization
from keras.optimizers import SGD, Adam
from keras.utils import np_utils, plot_model
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import EarlyStopping, ModelCheckpoint
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
import xgboost as xgb
#os.environ["CUDA_VISIBLE_DEVICES"]="0"
#gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.4)
#sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
#set_session(sess)
def cnn_model():
model = Sequential()
model.add(Conv2D(100, (10, 3), input_shape = (128, 10, 1), padding = 'same') )
model.add(PReLU())
model.add(BatchNormalization())
model.add(MaxPooling2D((2,2))) # 64 * 5
model.add(Dropout(0.35))
model.add(Conv2D(150, (10, 3), padding = 'same'))
model.add(PReLU())
model.add(BatchNormalization())
model.add(MaxPooling2D((2,2))) # 32 * 2
model.add(Dropout(0.35))
model.add(Conv2D(200, (10, 3), padding = 'same'))
model.add(PReLU())
model.add(BatchNormalization())
model.add(MaxPooling2D((2,2))) # 32 * 2
model.add(Dropout(0.35))
model.add(Conv2D(300, (10, 3), padding = 'same'))
model.add(PReLU())
model.add(BatchNormalization(axis = -1))
#model.add(MaxPooling2D((2,2))) # 16 * 1
model.add(Dropout(0.35))
model.add(Conv2D(400, (10, 3), padding = 'same'))
model.add(PReLU())
#model.add(MaxPooling2D((2,2)))
model.add(Dropout(0.35))
model.add(Flatten())
model.add(Dense(units = 200, activation = 'relu'))
model.add(PReLU(alpha_initializer='zeros'))
model.add(BatchNormalization())
model.add(Dropout(0.3))
model.add(Dense(units = 100, activation = 'relu'))
model.add(PReLU(alpha_initializer='zeros'))
model.add(BatchNormalization())
model.add(Dropout(0.3))
model.add(Dense(units = 41, activation = 'softmax'))
model.summary()
return model
def train(modelname):
train_X0 = np.load('train_X_verified.npy')
train_Y0 = np.load('train_Y_verified.npy')
train_X0 = train_X0.reshape(train_X0.shape[0],128,10,1)
train_Y0 = train_Y0.reshape(train_Y0.shape[0],train_Y0.shape[1])
#train_X1 = np.load('train_X_nonveri.npy')
#train_Y1 = np.load('train_Y_nonveri.npy')
#train_X1 = train_X1.reshape(train_X1.shape[0],128,10,1)
#train_Y1 = train_Y1.reshape(train_Y1.shape[0],train_Y1.shape[1])
#train_X = np.concatenate((train_X0[:81876],train_X1[:300000]), axis=0)
#train_Y = np.concatenate((train_Y0[:81876],train_Y1[:300000]), axis=0)
np.random.seed(1200)
index = np.random.permutation(len(train_X0))
train_X0 = train_X0[index]
train_Y0 = train_Y0[index]
train_X = train_X0[13000:]
train_Y = train_Y0[13000:]
val_X = train_X0[:13000]
val_Y = train_Y0[:13000]
model = cnn_model()
checkpoint =[ModelCheckpoint('models/'+modelname, # model filename
monitor='val_loss', # quantity to monitor
verbose=1, # verbosity - 0 or 1
save_best_only= True, # The latest best model will not be overwritten
mode='auto'), # The decision to overwrite model is made
EarlyStopping(monitor = 'val_loss',
patience = 20,
verbose = 0)]
model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
model.fit(train_X,train_Y,epochs=150,batch_size=128,validation_data=(val_X,val_Y),verbose=1,callbacks=checkpoint)
def retrain(modelname):
train_X0 = np.load('train_X_verified.npy')
train_Y0 = np.load('train_Y_verified.npy')
train_X0 = train_X0.reshape(train_X0.shape[0],128,10,1)
train_Y0 = train_Y0.reshape(train_Y0.shape[0],train_Y0.shape[1])
train_X1 = np.load('X_nonveri_filtered.npy')
train_Y1 = np.load('Y_nonveri_filtered.npy')
train_X1 = train_X1.reshape(train_X1.shape[0],128,10,1)
train_Y1 = train_Y1.reshape(train_Y1.shape[0],train_Y1.shape[1])
total_X = np.concatenate((train_X0,train_X1), axis=0)
total_Y = np.concatenate((train_Y0,train_Y1), axis=0)
total_len = total_X.shape[0]
print(total_Y.shape)
print(total_X.shape)
print(total_len)
model = cnn_model()
checkpoint =[ModelCheckpoint('models/'+modelname, # model filename
monitor='val_loss', # quantity to monitor
verbose=0, # verbosity - 0 or 1
save_best_only= True, # The latest best model will not be overwritten
mode='auto'), # The decision to overwrite model is made
EarlyStopping(monitor = 'val_loss',
patience = 20,
verbose = 0)]
model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
model.fit(total_X[int(np.floor(total_len*0.1)):],total_Y[int(np.floor(total_len*0.1)):],epochs=100,batch_size=128,validation_data=(total_X[:int(np.floor(total_len*0.1))],total_Y[:int(np.floor(total_len*0.1))]),verbose=1,callbacks=checkpoint)
def main():
modelname = argv[1]
if argv[2] == 'train':
train(modelname)
elif argv[2] == 'retrain':
retrain(modelname)
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | muachilin.noreply@github.com |
835f35f32d97ac1b55d4dda8b712add353ad0796 | 66052f5ba08ddac0a56ee140af17cf78b1ff1174 | /PLURALSIGHT_BEGINNERS/lib/python3.9/site-packages/holoviews/tests/plotting/matplotlib/testpointplot.py | ad2dbfe315b9cd12e93a78996c55f6e2f0f001f8 | [] | no_license | enriquefariasrdz/Python | 34704ceed001bbe8a23471eebefbe536b00031a5 | b9191f7ad87b709a1b83c5cb3797a866b56aaa0d | refs/heads/master | 2022-12-26T03:06:26.481456 | 2022-04-20T14:09:57 | 2022-04-20T14:09:57 | 27,020,899 | 1 | 1 | null | 2022-12-18T21:02:43 | 2014-11-23T03:33:52 | Python | UTF-8 | Python | false | false | 14,958 | py | import numpy as np
from holoviews.core.overlay import NdOverlay
from holoviews.core.spaces import HoloMap
from holoviews.element import Points
from .testplot import TestMPLPlot, mpl_renderer
from ..utils import ParamLogStream
try:
from matplotlib import pyplot
except:
pass
class TestPointPlot(TestMPLPlot):
def test_points_non_numeric_size_warning(self):
data = (np.arange(10), np.arange(10), list(map(chr, range(94,104))))
points = Points(data, vdims=['z']).opts(plot=dict(size_index=2))
with ParamLogStream() as log:
mpl_renderer.get_plot(points)
log_msg = log.stream.read()
warning = ('z dimension is not numeric, '
'cannot use to scale Points size.\n')
self.assertEqual(log_msg, warning)
def test_points_cbar_extend_both(self):
img = Points(([0, 1], [0, 3])).redim(y=dict(range=(1,2)))
plot = mpl_renderer.get_plot(img.opts(colorbar=True, color_index=1))
self.assertEqual(plot.handles['cbar'].extend, 'both')
def test_points_cbar_extend_min(self):
img = Points(([0, 1], [0, 3])).redim(y=dict(range=(1, None)))
plot = mpl_renderer.get_plot(img.opts(colorbar=True, color_index=1))
self.assertEqual(plot.handles['cbar'].extend, 'min')
def test_points_cbar_extend_max(self):
img = Points(([0, 1], [0, 3])).redim(y=dict(range=(None, 2)))
plot = mpl_renderer.get_plot(img.opts(colorbar=True, color_index=1))
self.assertEqual(plot.handles['cbar'].extend, 'max')
def test_points_cbar_extend_clime(self):
img = Points(([0, 1], [0, 3])).opts(style=dict(clim=(None, None)))
plot = mpl_renderer.get_plot(img.opts(colorbar=True, color_index=1))
self.assertEqual(plot.handles['cbar'].extend, 'neither')
def test_points_rcparams_do_not_persist(self):
opts = dict(fig_rcparams={'text.usetex': True})
points = Points(([0, 1], [0, 3])).opts(plot=opts)
mpl_renderer.get_plot(points)
self.assertFalse(pyplot.rcParams['text.usetex'])
def test_points_rcparams_used(self):
opts = dict(fig_rcparams={'grid.color': 'red'})
points = Points(([0, 1], [0, 3])).opts(plot=opts)
plot = mpl_renderer.get_plot(points)
ax = plot.state.axes[0]
lines = ax.get_xgridlines()
self.assertEqual(lines[0].get_color(), 'red')
def test_points_padding_square(self):
points = Points([1, 2, 3]).options(padding=0.1)
plot = mpl_renderer.get_plot(points)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], -0.2)
self.assertEqual(x_range[1], 2.2)
self.assertEqual(y_range[0], 0.8)
self.assertEqual(y_range[1], 3.2)
def test_curve_padding_square_per_axis(self):
curve = Points([1, 2, 3]).options(padding=((0, 0.1), (0.1, 0.2)))
plot = mpl_renderer.get_plot(curve)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], 0)
self.assertEqual(x_range[1], 2.2)
self.assertEqual(y_range[0], 0.8)
self.assertEqual(y_range[1], 3.4)
def test_points_padding_hard_xrange(self):
points = Points([1, 2, 3]).redim.range(x=(0, 3)).options(padding=0.1)
plot = mpl_renderer.get_plot(points)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], 0)
self.assertEqual(x_range[1], 3)
self.assertEqual(y_range[0], 0.8)
self.assertEqual(y_range[1], 3.2)
def test_points_padding_soft_xrange(self):
points = Points([1, 2, 3]).redim.soft_range(x=(0, 3)).options(padding=0.1)
plot = mpl_renderer.get_plot(points)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], 0)
self.assertEqual(x_range[1], 3)
self.assertEqual(y_range[0], 0.8)
self.assertEqual(y_range[1], 3.2)
def test_points_padding_unequal(self):
points = Points([1, 2, 3]).options(padding=(0.05, 0.1))
plot = mpl_renderer.get_plot(points)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], -0.1)
self.assertEqual(x_range[1], 2.1)
self.assertEqual(y_range[0], 0.8)
self.assertEqual(y_range[1], 3.2)
def test_points_padding_nonsquare(self):
points = Points([1, 2, 3]).options(padding=0.1, aspect=2)
plot = mpl_renderer.get_plot(points)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], -0.1)
self.assertEqual(x_range[1], 2.1)
self.assertEqual(y_range[0], 0.8)
self.assertEqual(y_range[1], 3.2)
def test_points_padding_logx(self):
points = Points([(1, 1), (2, 2), (3,3)]).options(padding=0.1, logx=True)
plot = mpl_renderer.get_plot(points)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], 0.89595845984076228)
self.assertEqual(x_range[1], 3.3483695221017129)
self.assertEqual(y_range[0], 0.8)
self.assertEqual(y_range[1], 3.2)
def test_points_padding_logy(self):
points = Points([1, 2, 3]).options(padding=0.1, logy=True)
plot = mpl_renderer.get_plot(points)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], -0.2)
self.assertEqual(x_range[1], 2.2)
self.assertEqual(y_range[0], 0.89595845984076228)
self.assertEqual(y_range[1], 3.3483695221017129)
def test_points_padding_datetime_square(self):
points = Points([(np.datetime64('2016-04-0%d' % i), i) for i in range(1, 4)]).options(
padding=0.1
)
plot = mpl_renderer.get_plot(points)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], 16891.8)
self.assertEqual(x_range[1], 16894.2)
self.assertEqual(y_range[0], 0.8)
self.assertEqual(y_range[1], 3.2)
def test_points_padding_datetime_nonsquare(self):
points = Points([(np.datetime64('2016-04-0%d' % i), i) for i in range(1, 4)]).options(
padding=0.1, aspect=2
)
plot = mpl_renderer.get_plot(points)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], 16891.9)
self.assertEqual(x_range[1], 16894.1)
self.assertEqual(y_range[0], 0.8)
self.assertEqual(y_range[1], 3.2)
def test_points_sizes_scalar_update(self):
hmap = HoloMap({i: Points([1, 2, 3]).opts(s=i*10) for i in range(1, 3)})
plot = mpl_renderer.get_plot(hmap)
artist = plot.handles['artist']
plot.update((1,))
self.assertEqual(artist.get_sizes(), np.array([10]))
plot.update((2,))
self.assertEqual(artist.get_sizes(), np.array([20]))
###########################
# Styling mapping #
###########################
def test_point_color_op(self):
points = Points([(0, 0, '#000000'), (0, 1, '#FF0000'), (0, 2, '#00FF00')],
vdims='color').options(color='color')
plot = mpl_renderer.get_plot(points)
artist = plot.handles['artist']
self.assertEqual(artist.get_facecolors(),
np.array([[0, 0, 0, 1], [1, 0, 0, 1], [0, 1, 0, 1]]))
def test_point_color_op_update(self):
points = HoloMap({0: Points([(0, 0, '#000000'), (0, 1, '#FF0000'), (0, 2, '#00FF00')],
vdims='color'),
1: Points([(0, 0, '#0000FF'), (0, 1, '#00FF00'), (0, 2, '#FF0000')],
vdims='color')}).options(color='color')
plot = mpl_renderer.get_plot(points)
artist = plot.handles['artist']
plot.update((1,))
self.assertEqual(artist.get_facecolors(),
np.array([[0, 0, 1, 1], [0, 1, 0, 1], [1, 0, 0, 1]]))
def test_point_line_color_op(self):
points = Points([(0, 0, '#000000'), (0, 1, '#FF0000'), (0, 2, '#00FF00')],
vdims='color').options(edgecolors='color')
plot = mpl_renderer.get_plot(points)
artist = plot.handles['artist']
self.assertEqual(artist.get_edgecolors(),
np.array([[0, 0, 0, 1], [1, 0, 0, 1], [0, 1, 0, 1]]))
def test_point_line_color_op_update(self):
points = HoloMap({0: Points([(0, 0, '#000000'), (0, 1, '#FF0000'), (0, 2, '#00FF00')],
vdims='color'),
1: Points([(0, 0, '#0000FF'), (0, 1, '#00FF00'), (0, 2, '#FF0000')],
vdims='color')}).options(edgecolors='color')
plot = mpl_renderer.get_plot(points)
artist = plot.handles['artist']
plot.update((1,))
self.assertEqual(artist.get_edgecolors(),
np.array([[0, 0, 1, 1], [0, 1, 0, 1], [1, 0, 0, 1]]))
def test_point_fill_color_op(self):
points = Points([(0, 0, '#000000'), (0, 1, '#FF0000'), (0, 2, '#00FF00')],
vdims='color').options(facecolors='color')
plot = mpl_renderer.get_plot(points)
artist = plot.handles['artist']
self.assertEqual(artist.get_facecolors(),
np.array([[0, 0, 0, 1], [1, 0, 0, 1], [0, 1, 0, 1]]))
def test_point_linear_color_op(self):
points = Points([(0, 0, 0), (0, 1, 1), (0, 2, 2)],
vdims='color').options(color='color')
plot = mpl_renderer.get_plot(points)
artist = plot.handles['artist']
self.assertEqual(np.asarray(artist.get_array()), np.array([0, 1, 2]))
self.assertEqual(artist.get_clim(), (0, 2))
def test_point_linear_color_op_update(self):
points = HoloMap({0: Points([(0, 0, 0), (0, 1, 1), (0, 2, 2)],
vdims='color'),
1: Points([(0, 0, 2.5), (0, 1, 3), (0, 2, 1.2)],
vdims='color')}).options(color='color', framewise=True)
plot = mpl_renderer.get_plot(points)
artist = plot.handles['artist']
self.assertEqual(artist.get_clim(), (0, 2))
plot.update((1,))
self.assertEqual(np.asarray(artist.get_array()), np.array([2.5, 3, 1.2]))
self.assertEqual(artist.get_clim(), (1.2, 3))
def test_point_categorical_color_op(self):
points = Points([(0, 0, 'A'), (0, 1, 'B'), (0, 2, 'A')],
vdims='color').options(color='color')
plot = mpl_renderer.get_plot(points)
artist = plot.handles['artist']
self.assertEqual(np.asarray(artist.get_array()), np.array([0, 1, 0]))
self.assertEqual(artist.get_clim(), (0, 1))
def test_point_size_op(self):
points = Points([(0, 0, 1), (0, 1, 4), (0, 2, 8)],
vdims='size').options(s='size')
plot = mpl_renderer.get_plot(points)
artist = plot.handles['artist']
self.assertEqual(artist.get_sizes(), np.array([1, 4, 8]))
def test_point_size_op_update(self):
points = HoloMap({0: Points([(0, 0, 3), (0, 1, 1), (0, 2, 2)],
vdims='size'),
1: Points([(0, 0, 2.5), (0, 1, 3), (0, 2, 1.2)],
vdims='size')}).options(s='size')
plot = mpl_renderer.get_plot(points)
artist = plot.handles['artist']
self.assertEqual(artist.get_sizes(), np.array([3, 1, 2]))
plot.update((1,))
self.assertEqual(artist.get_sizes(), np.array([2.5, 3, 1.2]))
def test_point_line_width_op(self):
points = Points([(0, 0, 1), (0, 1, 4), (0, 2, 8)],
vdims='line_width').options(linewidth='line_width')
plot = mpl_renderer.get_plot(points)
artist = plot.handles['artist']
self.assertEqual(artist.get_linewidths(), [1, 4, 8])
def test_point_line_width_op_update(self):
points = HoloMap({0: Points([(0, 0, 3), (0, 1, 1), (0, 2, 2)],
vdims='line_width'),
1: Points([(0, 0, 2.5), (0, 1, 3), (0, 2, 1.2)],
vdims='line_width')}).options(linewidth='line_width')
plot = mpl_renderer.get_plot(points)
artist = plot.handles['artist']
self.assertEqual(artist.get_linewidths(), [3, 1, 2])
plot.update((1,))
self.assertEqual(artist.get_linewidths(), [2.5, 3, 1.2])
def test_point_marker_op(self):
points = Points([(0, 0, 'circle'), (0, 1, 'triangle'), (0, 2, 'square')],
vdims='marker').options(marker='marker')
with self.assertRaises(Exception):
mpl_renderer.get_plot(points)
def test_point_alpha_op(self):
points = Points([(0, 0, 0), (0, 1, 0.2), (0, 2, 0.7)],
vdims='alpha').options(alpha='alpha')
with self.assertRaises(Exception):
mpl_renderer.get_plot(points)
def test_op_ndoverlay_value(self):
markers = ['d', 's']
overlay = NdOverlay({marker: Points(np.arange(i))
for i, marker in enumerate(markers)},
'Marker').options('Points', marker='Marker')
plot = mpl_renderer.get_plot(overlay)
for subplot, marker in zip(plot.subplots.values(), markers):
style = dict(subplot.style[subplot.cyclic_index])
style = subplot._apply_transforms(subplot.current_frame, {}, style)
self.assertEqual(style['marker'], marker)
def test_point_color_index_color_clash(self):
points = Points([(0, 0, 0), (0, 1, 1), (0, 2, 2)],
vdims='color').options(color='color', color_index='color')
with ParamLogStream() as log:
mpl_renderer.get_plot(points)
log_msg = log.stream.read()
warning = ("Cannot declare style mapping for 'color' option "
"and declare a color_index; ignoring the color_index.\n")
self.assertEqual(log_msg, warning)
def test_point_size_index_size_clash(self):
points = Points([(0, 0, 0), (0, 1, 1), (0, 2, 2)],
vdims='size').options(s='size', size_index='size')
with ParamLogStream() as log:
mpl_renderer.get_plot(points)
log_msg = log.stream.read()
warning = ("Cannot declare style mapping for 's' option "
"and declare a size_index; ignoring the size_index.\n")
self.assertEqual(log_msg, warning)
| [
"enriquefariasrdz@gmail.com"
] | enriquefariasrdz@gmail.com |
d6b0539a2cd34a3318a634029493799c8d1029ff | 2aec9c5e8c72b731d3abf22f2a407fe09c1cde09 | /ZQZ510/ZQZ510/spiders/zqz.py | 3a4ae2a8fc42615dd7eaaf1a56965897c452c5d3 | [] | no_license | jiangyg/ZWFproject | 8b24cc34970ae0a9c2a2b0039dc527c83a5862b5 | aa35bc59566d92721f23d2dd00b0febd268ac2dd | refs/heads/master | 2020-09-26T17:01:00.229380 | 2019-11-15T13:16:21 | 2019-11-15T13:16:21 | 226,297,631 | 0 | 1 | null | 2019-12-06T09:55:37 | 2019-12-06T09:55:36 | null | UTF-8 | Python | false | false | 5,422 | py | # -*- coding: utf-8 -*-
import scrapy
import time
import json
from ZQZ510.items import Zqz510Item
empty_word = 'null'
class ZqzSpider(scrapy.Spider):
name = 'zqz'
allowed_domains = ['zqz510.com']
start_urls = ['http://login.zqz510.com/judgmentDoc']
def parse(self, response):
url = 'http://api.zqz510.com//tmof/query?ftxt=&ti=&apS=&pdStart=&pdEnd=&ty=&psty=&law=&litem=&pageNum=1' \
'&apS=&apD=&ag=&judgd=&tid=&cid=&callback=_jqjsp&_{}='.format(str(int(time.time() * 1000)))
self.cookie = {
'uid': '213facea-5ac7-4069-ae4a-97168d559ebc',
'oid': 'UAGAP00003919',
'JSESSIONID': '9867C3C37D24634CB9D44D1AA5C6188F',
'c': '82f5dd5f-f8ae-459b-9907-fd0bb01d97cb',
}
yield scrapy.Request(url=url, callback=self.parse_first, cookies=self.cookie)
def parse_first(self, response):
json_text = json.loads(response.text[7:-1], encoding='utf-8')
total = int(json_text['total'])
all_page = int(total / 10) + 1
for page in range(all_page):
url = 'http://api.zqz510.com//tmof/query?ftxt=&ti=&apS=&pdStart=&pdEnd=&ty=&psty=&law=&litem=&pageNum={}' \
'&apS=&apD=&ag=&judgd=&tid=&cid=&callback=_jqjsp&_{}='.format(str(page + 1), str(int(time.time() * 1000)))
yield scrapy.Request(url=url, callback=self.parse_list, cookies=self.cookie)
def parse_list(self, response):
json_text = json.loads(response.text[7:-1], encoding='utf-8')
for data in json_text['data']:
item = Zqz510Item()
if 'agS' in data:
item['agS'] = data['agS']
else:
item['agS'] = empty_word
if 'agidS' in data:
item['agidS'] = data['agidS']
else:
item['agidS'] = empty_word
if 'an' in data:
item['an'] = data['an']
else:
item['an'] = empty_word
if 'anDest' in data:
item['anDest'] = data['anDest']
else:
item['anDest'] = empty_word
if 'anList' in data:
item['anList'] = str(data['anList'])
else:
item['anList'] = empty_word
if 'apS' in data:
item['apS'] = data['apS']
else:
item['apS'] = empty_word
if 'apidS' in data:
item['apidS'] = data['apidS']
else:
item['apidS'] = empty_word
if 'cid' in data:
item['cid'] = data['cid']
else:
item['cid'] = empty_word
if 'docid' in data:
item['docid'] = data['docid']
else:
item['docid'] = empty_word
if 'law' in data:
item['law'] = data['law']
else:
item['law'] = empty_word
if 'link' in data:
item['link'] = data['link']
else:
item['link'] = empty_word
if 'litem' in data:
item['litem'] = data['litem']
else:
item['litem'] = empty_word
if 'ltid' in data:
item['ltid'] = data['ltid']
else:
item['ltid'] = empty_word
if 'pd' in data:
item['pd'] = data['pd']
else:
item['pd'] = empty_word
if 'psty' in data:
item['psty'] = data['psty']
else:
item['psty'] = empty_word
if 'rid' in data:
item['rid'] = data['rid']
else:
item['rid'] = empty_word
if 'ti' in data:
item['ti'] = data['ti']
else:
item['ti'] = empty_word
if 'ty' in data:
item['ty'] = data['ty']
else:
item['ty'] = empty_word
detail_url = 'http://api.zqz510.com/tmof/detail?docid={}&callback=_jqjsp&_{}='.format(item['docid'], str(int(time.time() * 1000)))
yield scrapy.Request(url=detail_url, callback=self.parse_detail, meta={'item': item}, cookies=self.cookie)
def parse_detail(self, response):
json_text = json.loads(response.text[7:-1], encoding='utf-8')
item = response.meta['item']
if 'dtls' in json_text:
item['dtls'] = str(json_text['dtls'])
else:
item['dtls'] = empty_word
if 'ftxt' in json_text:
item['ftxt'] = json_text['ftxt']
else:
item['ftxt'] = empty_word
if 'judg' in json_text:
item['judg'] = str(json_text['judg'])
else:
item['judg'] = empty_word
if 'judgList' in json_text:
item['judgList'] = str(json_text['judgList'])
else:
item['judgList'] = empty_word
if 'links' in json_text:
item['links'] = str(json_text['links'])
else:
item['links'] = empty_word
if 'ltidAll' in json_text:
item['ltidAll'] = str(json_text['ltidAll'])
else:
item['ltidAll'] = empty_word
if 'pdCn' in json_text:
item['pdCn'] = str(json_text['pdCn'])
else:
item['pdCn'] = empty_word
yield item | [
"34021500@qq.com"
] | 34021500@qq.com |
d72f0e6e1d8aaabc1a02b10a8fbc864b8f6d0b65 | 29345337bf86edc938f3b5652702d551bfc3f11a | /python/src/main/python/pyalink/alink/tests/examples/from_docs/test_totensorstreamop.py | 78c1de91112c783148b8652120fe7425e975fcf9 | [
"Apache-2.0"
] | permissive | vacaly/Alink | 32b71ac4572ae3509d343e3d1ff31a4da2321b6d | edb543ee05260a1dd314b11384d918fa1622d9c1 | refs/heads/master | 2023-07-21T03:29:07.612507 | 2023-07-12T12:41:31 | 2023-07-12T12:41:31 | 283,079,072 | 0 | 0 | Apache-2.0 | 2020-07-28T02:46:14 | 2020-07-28T02:46:13 | null | UTF-8 | Python | false | false | 553 | py | import unittest
from pyalink.alink import *
import numpy as np
import pandas as pd
class TestToTensorStreamOp(unittest.TestCase):
def test_totensorstreamop(self):
df = pd.DataFrame(["FLOAT#6#0.0 0.1 1.0 1.1 2.0 2.1 "])
source = StreamOperator.fromDataframe(df, schemaStr='vec string')
source.link(
ToTensorStreamOp()
.setSelectedCol("vec")
.setTensorShape([2, 3])
.setTensorDataType("float")
).print()
StreamOperator.execute()
pass | [
"shaomeng.wang.w@gmail.com"
] | shaomeng.wang.w@gmail.com |
9ab8c1cfef72c9b54df1a43e0a919da8d13a725c | 9c81c170f03ba925bf3d0682526245c202e384a7 | /superset/cli/test.py | f175acec470cd59f06f6d1ad8de07765a2520901 | [
"Apache-2.0",
"OFL-1.1"
] | permissive | zcong1993/incubator-superset | 2a08177641eff178dee9db852887ad2d19d70d54 | 269c99293f42089958dc98b5d6e5899509fc3111 | refs/heads/master | 2023-08-17T12:24:59.438120 | 2023-08-17T10:50:24 | 2023-08-17T10:50:24 | 209,522,299 | 0 | 0 | Apache-2.0 | 2023-03-06T08:10:31 | 2019-09-19T10:09:21 | TypeScript | UTF-8 | Python | false | false | 2,860 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import click
from colorama import Fore
from flask.cli import with_appcontext
import superset.utils.database as database_utils
from superset import app, security_manager
logger = logging.getLogger(__name__)
@click.command()
@with_appcontext
def load_test_users() -> None:
"""
Loads admin, alpha, and gamma user for testing purposes
Syncs permissions for those users/roles
"""
print(Fore.GREEN + "Loading a set of users for unit tests")
load_test_users_run()
def load_test_users_run() -> None:
"""
Loads admin, alpha, and gamma user for testing purposes
Syncs permissions for those users/roles
"""
if app.config["TESTING"]:
sm = security_manager
examples_db = database_utils.get_example_database()
examples_pv = sm.add_permission_view_menu("database_access", examples_db.perm)
sm.sync_role_definitions()
gamma_sqllab_role = sm.add_role("gamma_sqllab")
sm.add_permission_role(gamma_sqllab_role, examples_pv)
gamma_no_csv_role = sm.add_role("gamma_no_csv")
sm.add_permission_role(gamma_no_csv_role, examples_pv)
for role in ["Gamma", "sql_lab"]:
for perm in sm.find_role(role).permissions:
sm.add_permission_role(gamma_sqllab_role, perm)
if str(perm) != "can csv on Superset":
sm.add_permission_role(gamma_no_csv_role, perm)
users = (
("admin", "Admin"),
("gamma", "Gamma"),
("gamma2", "Gamma"),
("gamma_sqllab", "gamma_sqllab"),
("alpha", "Alpha"),
("gamma_no_csv", "gamma_no_csv"),
)
for username, role in users:
user = sm.find_user(username)
if not user:
sm.add_user(
username,
username,
"user",
username + "@fab.org",
sm.find_role(role),
password="general",
)
sm.get_session.commit()
| [
"noreply@github.com"
] | zcong1993.noreply@github.com |
7bef991e0b0cc4a0ccbb6365b1ad0fae7bce83cf | 7ca46f6b2f115712600b052bab8da6e16f90aad1 | /PythonProjects/fibonacciCalc.py | ead0942aa916e6739346a5663abd717f51bffe96 | [] | no_license | KristinCovert/Bootcamp | 73f83e5e1ce0b03fa1242f355bfbc13a099e9b6c | e4af8a3999bc8e4b9708e35268c619151d77f581 | refs/heads/master | 2021-05-28T01:44:06.211008 | 2014-11-26T23:16:20 | 2014-11-26T23:16:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 478 | py | __author__ = 'Kristin'
def range_top():
top_range = input('Up to what number would you like to calculate the Fibonacci: ')
return top_range
def fibonacci(rt):
i = 0
j = 1
fibonacci_list = []
for number in range(0, rt):
k = i + j
fibonacci_list.append(k)
i = j
j = k
return fibonacci_list
if __name__ == '__main__':
x = range_top()
result = fibonacci(x)
print result
l = len(result)
print l
| [
"kristin.bratland@gmail.com"
] | kristin.bratland@gmail.com |
f4771bd090478972d022ce9b450d530bb2408052 | 6c3ab38e350734f1bc4f0c746ea55a12838ce5ee | /pcserver/mainapp/handlers.py | 93a7d32aa090f9a76b8f6ab1bca16d7d2eda3868 | [] | no_license | joelsemar/Programming-Challenge | 1dd4fb487d02e05ed494e66da99a627970832988 | b8bf8e115dc3c242d62bf696d3268a4b31019592 | refs/heads/master | 2020-05-17T15:16:45.892328 | 2011-08-31T19:17:15 | 2011-08-31T19:17:15 | 2,298,598 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,662 | py | from webservice_tools.utils import BaseHandler, AutoListHandler
from webservice_tools.decorators import login_required
from mainapp.models import * #@UnusedWildImport
#Create your handlers here
class PhotosHandler(AutoListHandler):
model = Photo
allowed_methods = ('GET',)
extra_fields = ('image_url',)
exclude = ('image', )
@login_required
def read(self, request, response):
"""
Returns a list of Photo objects.
API Handler: GET /photos
Params:
@key [string] your api key
Returns:
@photos [Photo] list of photos, see Photo docs for details
"""
return super(PhotosHandler, self).read(request, response)
class PhotoHandler(BaseHandler):
model = Photo
allowed_methods = ('GET',)
extra_fields = ('image_url',)
exclude = ('image', )
@login_required
def read(self, request, id, response):
"""
Fetch the details of a photo by id
API Handler: GET /photo/{id}
Params:
@id [id] id of the photo (in the url)
@key [string] your api key
Returns:
@title [string] title
@description [string] a short description
@image_url [url] a url to the corresponding image
"""
return super(PhotoHandler, self).read(request, id, response)
#ALL DEFINITION EOF
module_name = globals().get('__name__')
handlers = sys.modules[module_name]
handlers._all_ = []
for handler_name in dir():
m = getattr(handlers, handler_name)
if type(m) == type(BaseHandler):
handlers._all_.append(handler_name)
| [
"semarjt@gmail.com"
] | semarjt@gmail.com |
f375b00087150e1f82d65a9e60ddf607f3a4fdbd | 3845fdd6010cfd86e0cc8dded06c964a81fdf6ac | /registration/views.py | b1b62c20f6649595bc5f6a5473c0b66a0df90ab9 | [
"MIT"
] | permissive | Yash1256/Django-Intern | 72511e9bc3f262cd6509a7353ec8fc64fe9ade0b | c1d42ff344324b56d462ae8c3d5b6682a2b255b6 | refs/heads/master | 2021-09-25T11:45:17.994744 | 2020-10-03T21:38:15 | 2020-10-03T21:38:15 | 249,540,783 | 1 | 1 | MIT | 2021-09-22T18:47:32 | 2020-03-23T20:48:25 | Python | UTF-8 | Python | false | false | 3,741 | py | import math
from django.core.exceptions import ValidationError
from django.shortcuts import render, redirect
from django.urls import reverse, reverse_lazy
from django.views import View
from .forms import AuthorForm
from django.contrib.auth import authenticate, login
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseNotFound
from .models import Author
from post.models import Post
class Landing(View):
def get(self, request):
return render(request, 'registration/index.html')
class Register(View):
def get(self, request):
return render(request, 'registration/register.html')
def post(self, request):
a = AuthorForm(request.POST)
try:
if a.is_valid():
a.save()
return redirect('login')
else:
return render(request, 'registration/register.html', context={'error': a.errors})
except ValidationError as V:
return render(request, 'registration/register.html', context={'error': V.message_dict})
class Login(View):
def get(self, request):
return render(request, 'registration/login.html')
def post(self, request):
email = request.POST.get('email', None)
password = request.POST.get('password', None)
if email is None or password is None:
return render(request, 'registration/login.html', context={'error': 'Fields are required.'})
u = authenticate(request, email=email, password=password)
if u is None:
return render(request, 'registration/login.html', context={'error': 'Email or Password is not valid.'})
else:
login(request, u)
if request.GET.get('next', None):
return redirect(reverse(request.GET.get('next')))
else:
a = Author.objects.get(email=u.email)
return redirect('author', a.pk)
@login_required(redirect_field_name='next', login_url=reverse_lazy('login'))
def author(request, aid):
try:
a = Author.objects.get(pk=aid)
return render(request, 'registration/author.html',
context={'count': Post.objects.filter(author_id=a.pk).count(), 'bdate': a.birthdate,
'name': a.name})
except Author.DoesNotExist:
return HttpResponseNotFound()
@login_required(redirect_field_name='next', login_url=reverse_lazy('login'))
def authors_table(request):
try:
page = request.GET.get('page', '1')
p = int(page)
except ValueError:
p = 1
total = Author.objects.count()
count = math.ceil(total/10)
context = dict()
if count < p:
return HttpResponseNotFound()
else:
if p > 1:
context['prev'] = p - 1
if count > p:
context['next'] = p + 1
if p > 2:
context['prev_pg'] = []
for i in range(p-2, 0, -1):
context['prev_pg'].append(i)
if len(context['prev_pg']) > 2:
context['prev_pg'] = context['prev_pg'][:2]
context['ellipsisP'] = True
if count > p + 1:
context['next_pg'] = []
for i in range(p + 2, count + 1, 1):
context['next_pg'].append(i)
if len(context['next_pg']) > 2:
context['next_pg'] = context['next_pg'][:2]
context['ellipsisN'] = True
context['next_pg'].reverse()
st = (p-1)*10
ed = min(p*10, total)
context['page'] = p
context['authors'] = Author.objects.all()[st:ed]
return render(request, 'registration/authors.html', context=context)
| [
"shuklayash1256@gmail.com"
] | shuklayash1256@gmail.com |
43e3f69a4d43e8fd97a6995fa95b1197d002dc0e | 0315255c749b12216a7c8ac26378d8921466284a | /tests/integration/client/standard.py | 969611b4d0a0800f10b1c10258875138538f5b08 | [
"Apache-2.0"
] | permissive | jhutchins/salt | a32de1362c6787ec96df7ce57bf9b98f20eaf30a | 22ec0cee6a8a842ec426b7a3e634723ea7ce7256 | refs/heads/master | 2021-01-21T00:05:05.782149 | 2012-04-06T22:03:19 | 2012-04-06T22:03:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,254 | py | # Import python libs
import subprocess
# Import salt libs
import integration
class StdTest(integration.ModuleCase):
'''
Test standard client calls
'''
def test_cli(self):
'''
Test cli function
'''
cmd_iter = self.client.cmd_cli(
'minion',
'test.ping',
)
for ret in cmd_iter:
self.assertTrue(ret['minion'])
def test_iter(self):
'''
test cmd_iter
'''
cmd_iter = self.client.cmd_iter(
'minion',
'test.ping',
)
for ret in cmd_iter:
self.assertTrue(ret['minion'])
def test_iter_no_block(self):
'''
test cmd_iter_no_block
'''
cmd_iter = self.client.cmd_iter_no_block(
'minion',
'test.ping',
)
for ret in cmd_iter:
if ret is None:
continue
self.assertTrue(ret['minion'])
def test_full_returns(self):
'''
test cmd_iter
'''
ret = self.client.cmd_full_return(
'minion',
'test.ping',
)
self.assertTrue(ret['minion'])
| [
"thatch45@gmail.com"
] | thatch45@gmail.com |
2622b01ab6dbae6e902dd8647177dd9c4dd6092a | 64e899ae6199b7915585f7f903b2ec221aa0b8df | /common/modelanalysis.py | a0b3977391b8709375b3571f290d6da0707ec2f6 | [] | no_license | sharma0611/dsworkflows | 642f36dc0f7369c319eddbf27cd7bb734c9ab7a4 | 95dacb81581193d57fbc318ad1d9ef2c83b897cd | refs/heads/master | 2021-09-15T01:38:11.201611 | 2018-05-23T14:31:52 | 2018-05-23T14:31:52 | 118,946,923 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,940 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
****************************************
Shivam Sharma (CIH745)
Data Science Co-op
Summer 2017
****************************************
Module: Model Analysis
Purpose: Hosts the functions that interact with sklearn-interface models to provide analysis on statistical
metrics
"""
from common.utils import load_obj, save_obj
import pandas as pd
from numpy import array, random
import numpy as np
import os
import re
from sklearn.metrics import mean_squared_error, r2_score, confusion_matrix
from importlib import import_module
import importlib.util
import matplotlib as mpl
mpl.use("TkAgg")
import matplotlib.backends.backend_pdf
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import seaborn as sns
from common.univariateanalysis import apply_spec_to_df
from common.transforms import inverse_dictionary
#metrics
def insert_random_var(seed, new_var, dataframe):
"""Function to insert random variable into Pandas DataFrame.
"""
random.seed(seed)
dataframe[new_var] = random.random_sample(dataframe.shape[0])
#function used to select subset of another list, elements in order of first list
def ordered_subset(preserve_order_list, cut_list):
d = {k:v for v,k in enumerate(preserve_order_list)}
new = set(preserve_order_list).intersection(cut_list)
new = list(new)
new.sort(key=d.get)
return new
def mse_r2_perf_output(actual_y, pred_y, title, fig=None, ax=None):
if fig is None and ax is None:
fig, ax = plt.subplots()
elif fig is None:
fig = ax.get_figure()
elif ax is None:
ax = fig.gca()
mse = mean_squared_error(actual_y, pred_y)
print(title + ":")
print("MSE: %.4f" % mse)
r2 = r2_score(actual_y, pred_y)
print("r2: %.4f" % r2)
ax.scatter(actual_y, pred_y, rasterized=True)
ax.plot([actual_y.min(), actual_y.max()], [actual_y.min(), actual_y.max()], 'k--', lw=4)
ax.set_xlabel('Actual')
ax.set_ylabel('Predicted')
ax.set_title(title)
return mse, r2
def r2_mse_grab(test_y, test_y_pred):
mse_test = mean_squared_error(test_y, test_y_pred)
r2_test = r2_score(test_y, test_y_pred)
return r2_test, mse_test
def get_accuracy(actual_bands, predict_bands, interval):
len_a = len(actual_bands)
len_b = len(predict_bands)
if len_a == len_b:
accuracy = len([1 for x,y in zip(actual_bands, predict_bands) if x - interval <= y <= x + interval])/len_a
return accuracy
else:
print("Different shapes between actual & predict of {0} and {1}".format(len_a, len_b))
exit(0)
def cf_twentile_matrix(actual_twentile, predicted_twentile, title, normalize=True, fig=None, ax=None):
if fig is None and ax is None:
fig, ax = plt.subplots()
elif fig is None:
fig = ax.get_figure()
elif ax is None:
ax = fig.gca()
cnf_matrix = confusion_matrix(actual_twentile, predicted_twentile)
if normalize==True:
cnf_matrix = cnf_matrix.astype('float') / np.amax(cnf_matrix)
sns.heatmap(cnf_matrix, linewidths=.5, ax=ax)
for i in range(4):
accuracy = get_accuracy(actual_twentile, predicted_twentile, i)
ax.text(s='{:.2%}'.format(accuracy) + " (x:x+-{0})".format(i), transform=ax.transAxes,
x=0.75,y=0.95-i*0.07, fontsize=12)
ax.set_title(title)
ax.set_xlabel("Predicted")
ax.set_ylabel("Actual")
return fig, ax
def tile_accuracy_hist(actual_tiles, predicted_tiles, tag="", predict_bands=True):
cnf_matrix = confusion_matrix(actual_tiles, predicted_tiles)
num_tiles = len(cnf_matrix)
all_hist_figs = []
all_tiles = [x + 1 for x in range(num_tiles)]
for i in range(num_tiles):
if predict_bands:
row = cnf_matrix[:,i]
tiletype = "Predict"
else:
row = cnf_matrix[i]
tiletype = "Actual"
fig = plt.figure(figsize=(18, 6))
ax = fig.add_subplot(111)
sns.set_style("darkgrid")
ax.bar(left=all_tiles, height=row, tick_label=all_tiles, width=1)
ax.set_title(tag + " Distribution for "+tiletype+" Tile " + str(i+1))
ax.set_ylabel("# of predictions")
ax.set_xlabel("Predicted Tile")
ax.set_xlim(0, num_tiles+1)
#annotate each bar with percent of predictions
total_predictions = sum(row)
for p in ax.patches:
ax.annotate('{:.2%}'.format(p.get_height()/total_predictions), (p.get_x() + (0 * p.get_width()),
p.get_height() * 1.005), fontsize=10)
all_hist_figs.append(fig)
return all_hist_figs
def mse_r2_graph(actual_y, pred_y, title, fig=None, ax=None):
if fig is None and ax is None:
fig, ax = plt.subplots()
elif fig is None:
fig = ax.get_figure()
elif ax is None:
ax = fig.gca()
mse = mean_squared_error(actual_y, pred_y)
print("MSE: %.4f" % mse)
r2 = r2_score(actual_y, pred_y)
print("r2: %.4f" % r2)
ax.scatter(actual_y, pred_y)
ax.plot([actual_y.min(), actual_y.max()], [actual_y.min(), actual_y.max()], 'k--', lw=4)
ax.set_xlabel('Actual')
ax.set_ylabel('Predicted')
ax.set_title(title)
return mse, r2
def predict_metrics(actual_y, predict_y, actual_bands, predict_bands, tag, fig, ax_array):
#plot scatter plot of actual & predicted
mse, r2 = mse_r2_perf_output(actual_y, predict_y, tag, fig, ax_array[0])
#create confusion matrix
cf_twentile_matrix(actual_bands, predict_bands, tag, True, fig, ax_array[1])
mean_camaro = pd.DataFrame({'actual_y': actual_y,
'actual_twentiles': actual_bands,
'predicted_y': predict_y,
'predicted_twentiles': predict_bands
})
actual = mean_camaro[['actual_twentiles', 'actual_y']].groupby(['actual_twentiles']).mean()
pred = mean_camaro[['predicted_twentiles', 'predicted_y']].groupby(['predicted_twentiles']).mean()
ax = actual.plot(ax=ax_array[2],rasterized=True)
ax1 = pred.plot(ax=ax,rasterized=True)
ax1.set_title(tag)
ax1.set_ylabel("Mean predicted value for each tile")
ax1.set_xlabel("Tile")
return mse, r2
def r2_and_mse(clf, test_y, test_X, tag, estimators, fig=None, ax=None):
if fig is None and ax is None:
fig, ax = plt.subplots()
elif fig is None:
fig = ax.get_figure()
elif ax is None:
ax = fig.gca()
test_r2 = np.zeros((estimators,), dtype=np.float64)
test_mse = np.zeros((estimators,), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_predict(test_X)):
test_r2[i] = r2_score(test_y , y_pred)
test_mse[i] = mean_squared_error(test_y, y_pred)
ax.plot(np.arange(estimators) + 1, test_r2, 'b-',
label='R2', rasterized=True)
ax.plot(np.arange(estimators) + 1, test_mse, 'r-',
label='MSE', rasterized=True)
ax.legend(loc='upper right')
ax.set_xlabel('Boosting Iterations')
ax.set_ylabel('R2 / MSE')
ax.set_title(tag)
return fig,ax
def deviance(clf, test_y, test_X, tag, estimators, fig=None, ax=None):
if fig is None and ax is None:
fig, ax = plt.subplots()
elif fig is None:
fig = ax.get_figure()
elif ax is None:
ax = fig.gca()
test_score = np.zeros((estimators,), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_predict(test_X)):
test_score[i] = clf.loss_(test_y, y_pred)
ax.set_title('Deviance')
ax.plot(np.arange(estimators) + 1, clf.train_score_, 'b-',
label='Training Set Deviance', rasterized=True)
ax.plot(np.arange(estimators) + 1, test_score, 'r-',
label='Test Set Deviance', rasterized=True)
ax.legend(loc='upper right')
ax.set_xlabel('Boosting Iterations')
ax.set_ylabel('Deviance')
return fig, ax
import lightgbm as lgb
def model_metrics_lgb(clf):
fig3 = plt.figure(figsize=(8, 11))
gs3 = gridspec.GridSpec(2, 1)
ax7 = fig3.add_subplot(gs3[0])
ax8 = fig3.add_subplot(gs3[1])
lgb.plot_metric(clf, metric="l2", ax=ax7, title="l2 during Training")
lgb.plot_metric(clf, metric="huber", ax=ax8, title="Huber Loss during Training")
gs3.tight_layout(fig3, rect=[0.05,0.05,0.95,0.95], pad=0.5)
return [fig3]
def model_metrics_sklearn(clf, estimators, actual_test_y, test_X, tag):
fig3 = plt.figure(figsize=(8, 11))
gs3 = gridspec.GridSpec(2, 1)
ax7 = fig3.add_subplot(gs3[0])
ax8 = fig3.add_subplot(gs3[1])
r2_and_mse(clf, actual_test_y, test_X, tag, estimators, fig3, ax7)
deviance(clf, actual_test_y, test_X, tag, estimators, fig3, ax8)
gs3.tight_layout(fig3, rect=[0.05,0.05,0.95,0.95], pad=0.5)
return [fig3]
#getting an r2 from model
def r2_model():
#all output columns in dataframe
y_pattern = ".*_" + str(y) + '$'
r = re.compile(y_pattern)
arr_y = filter(r.match, train_df.columns.tolist())
if features_to_use:
if type(features_to_use) == str:
features_to_use = eval(features_to_use)
X = ordered_subset(X, features_to_use)
train_X_df = train_df[X]
train_X_arr = array(train_X_df)
test_X_df = test_df[X]
test_X_arr = array(test_X_df)
#getting all y arrays
y_label = transform + "_" + y
train_y_series = train_df[y_label]
train_y_arr = array(train_y_series)
test_y_series = test_df[y_label]
test_y_arr = array(test_y_series)
#predict
test_y_pred = model.predict(test_X_arr)
train_y_pred = model.predict(train_X_arr)
#get metrics
r2_test, mse_test = r2_mse_grab(test_y_arr, test_y_pred)
r2_train, mse_train = r2_mse_grab(train_y_arr, train_y_pred)
#normalised mse by dividing by range of test_y_arr & train_y_arr respectively
#nmse_test = mse_test / abs(max(test_y_arr) - min(test_y_arr))
#nmse_train = mse_train / abs(max(train_y_arr) - min(train_y_arr))
#print("test r2 {0:.2f}".format(r2_test))
#print("test mse {0:.2f}".format(mse_test))
#print("train r2 {0:.2f}".format(r2_train))
#print("train mse {0:.2f}".format(mse_train))
myseries = pd.Series([model_num, r2_test, mse_test, r2_train, mse_train])
myseries.index = ["ModelName", "test_r2", "test_mse", "train_r2", "train_mse"]
return myseries
def r2_compare(modeldb_path, impute_dir, y, exportpath=None, SpecialTag=None):
tag = SpecialTag
if os.path.isfile(modeldb_path):
modeldb = load_obj(modeldb_path)
else:
print("modeldb not found")
return
cols = modeldb.columns.tolist()
if "test_r2" not in cols:
curr_db = modeldb
elif tag:
query = "r2_test > 0 | SpecialTag == " + str(tag)
curr_db = modeldb.query(query)
else:
curr_db = modeldb.query("r2_test > 0")
#load imputed data
cooked_data_file = impute_dir + "/imputed.pk"
train_fp = impute_dir + "/train.pk"
test_fp = impute_dir + "/test.pk"
cooked_df = load_obj(cooked_data_file)
train_i = load_obj(train_fp)
train_df = cooked_df.iloc[train_i]
test_i = load_obj(test_fp)
test_df = cooked_df.iloc[test_i]
#get all metrics from DF
temp_metrics_df = curr_db.apply(lambda row: r2_model(row["FullPath"],row["TransformTag"], y, row['ModelNum'],
train_df, test_df), axis=1)
new_columns = ['ModelNum', 'r2_test', 'mse_test', 'r2_train', 'mse_train']
temp_metrics_df.columns = new_columns
modeldb = pd.merge(modeldb, temp_metrics_df, how='left', on='ModelNum')
#save_obj(modeldb, modeldb_path)
#CODE TO MAKE A CONFUSION MATRIX
def train_test_confusion_plot_full(predicted_train, predicted_test, actual_train, actual_test, y, curr_tile, rev_transform_spec, full_dist=True):
pred_train_df = pd.DataFrame({y: predicted_train})
pred_train_df = apply_spec_to_df(y, rev_transform_spec, pred_train_df)
predicted_train_tile = pred_train_df[y].apply(curr_tile)
pred_test_df = pd.DataFrame({y: predicted_test})
pred_test_df = apply_spec_to_df(y, rev_transform_spec, pred_test_df)
predicted_test_tile = pred_test_df[y].apply(curr_tile)
actual_train_df = pd.DataFrame({y: actual_train})
actual_train_df = apply_spec_to_df(y, rev_transform_spec, actual_train_df)
actual_train_tile = actual_train_df[y].apply(curr_tile)
actual_test_df = pd.DataFrame({y: actual_test})
actual_test_df = apply_spec_to_df(y, rev_transform_spec, actual_test_df)
actual_test_tile = actual_test_df[y].apply(curr_tile)
#setup figure for test items
fig1 = plt.figure(figsize=(8, 11))
gs1 = gridspec.GridSpec(3, 1)
ax1 = fig1.add_subplot(gs1[0])
ax2 = fig1.add_subplot(gs1[1])
ax3 = fig1.add_subplot(gs1[2])
ax_array1 = [ax1, ax2, ax3]
test_mse, test_r2 = predict_metrics(actual_test, predicted_test, actual_test_tile.values,
predicted_test_tile.values, y + ' - Test' , fig1, ax_array1)
gs1.tight_layout(fig1, rect=[0.05,0.05,0.95,0.95], pad=0.5)
#setup figure for train items
fig2 = plt.figure(figsize=(8, 11))
gs2 = gridspec.GridSpec(3, 1)
ax4 = fig2.add_subplot(gs2[0])
ax5 = fig2.add_subplot(gs2[1])
ax6 = fig2.add_subplot(gs2[2])
ax_array2 = [ax4, ax5, ax6]
train_mse, train_r2 = predict_metrics(actual_train, predicted_train, actual_train_tile.values,
predicted_train_tile.values, y + ' - Train' , fig2, ax_array2)
gs2.tight_layout(fig2, rect=[0.05,0.05,0.95,0.95], pad=0.5)
all_figs = [fig1, fig2]
if full_dist:
hist_figs = tile_accuracy_hist(actual_test_tile.values, predicted_test_tile.values, "Test", True)
all_figs = all_figs + hist_figs
return all_figs
| [
"sharma.shivam0611@gmail.com"
] | sharma.shivam0611@gmail.com |
0aa4fad95e735af119da27c643164c508715fe23 | 58c122786263edf8aec4a6b6b4986b2f3d4ff1d5 | /modules/s3/pyvttbl/qsturng.py | 49e432d4a21df43c96c06038b370da50df36ded8 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | andygimma/eden | c9d0819e87ef05ff607cac6120dbddc86e55bc31 | 716d5e11ec0030493b582fa67d6f1c35de0af50d | refs/heads/master | 2021-01-15T21:54:03.240072 | 2012-11-16T05:13:11 | 2012-11-16T05:13:11 | 6,726,106 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 47,225 | py | # Copyright (c) 2011, Roger Lew [see LICENSE.txt]
# This software is funded in part by NIH Grant P20 RR016454.
"""
Implementation of Gleason's (1999) non-iterative upper quantile
studentized range approximation.
According to Gleason this method should be more accurate than the
AS190 FORTRAN algorithm of Lund and Lund (1983) and works from .5
<= p <= .999 (The AS190 only works from .9 <= p <= .99).
It is more efficient then the Copenhaver & Holland (1988) algorithm
(used by the _qtukey_ R function) although it requires storing the A
table in memory. (q distribution) approximations in Python.
see:
Gleason, J. R. (1999). An accurate, non-iterative approximation
for studentized range quantiles. Computational Statistics &
Data Analysis, (31), 147-158.
Gleason, J. R. (1998). A table of quantile points of the
Studentized range distribution.
http://www.stata.com/stb/stb46/dm64/sturng.pdf
"""
import math
import scipy.stats
import numpy as np
inf = float('inf')
__version__ = '0.2.1'
# changelog
# 0.1 - initial release
# 0.1.1 - vectorized
# 0.2 - psturng added
# 0.2.1 - T, R generation script relegated to make_tbls.py
# Gleason's table was derived using least square estimation on the tabled
# r values for combinations of p and v. In total there are 206
# estimates over p-values of .5, .75, .9, .95, .975, .99, .995,
# and .999, and over v (degrees of freedom) of (1) - 20, 24, 30, 40,
# 60, 120, and inf. combinations with p < .95 don't have coefficients
# for v = 1. Hence the parentheses. These coefficients allow us to
# form f-hat. f-hat with the inverse t transform of tinv(p,v) yields
# a fairly accurate estimate of the studentized range distribution
# across a wide range of values. According to Gleason this method
# should be more accurate than algorithm AS190 of Lund and Lund (1983)
# and work across a wider range of values (The AS190 only works
# from .9 <= p <= .99). R's qtukey algorithm was used to add tables
# at .675, .8, and .85. These aid approximations when p < .9.
#
# The code that generated this table is called make_tbls.py and is
# located in version control.
A = {(0.1, 2.0): [-2.2485085243379075, -1.5641014278923464, 0.55942294426816752, -0.060006608853883377],
(0.1, 3.0): [-2.2061105943901564, -1.8415406600571855, 0.61880788039834955, -0.062217093661209831],
(0.1, 4.0): [-2.1686691786678178, -2.008196172372553, 0.65010084431947401, -0.06289005500114471],
(0.1, 5.0): [-2.145077200277393, -2.112454843879346, 0.66701240582821342, -0.062993502233654797],
(0.1, 6.0): [-2.0896098049743155, -2.2400004934286497, 0.70088523391700142, -0.065907568563272748],
(0.1, 7.0): [-2.0689296655661584, -2.3078445479584873, 0.71577374609418909, -0.067081034249350552],
(0.1, 8.0): [-2.0064956480711262, -2.437400413087452, 0.76297532367415266, -0.072805518121505458],
(0.1, 9.0): [-2.3269477513436061, -2.0469494712773089, 0.60662518717720593, -0.054887108437009016],
(0.1, 10.0): [-2.514024350177229, -1.8261187841127482, 0.51674358077906746, -0.044590425150963633],
(0.1, 11.0): [-2.5130181309130828, -1.8371718595995694, 0.51336701694862252, -0.043761825829092445],
(0.1, 12.0): [-2.5203508109278823, -1.8355687130611862, 0.5063486549107169, -0.042646205063108261],
(0.1, 13.0): [-2.5142536438310477, -1.8496969402776282, 0.50616991367764153, -0.042378379905665363],
(0.1, 14.0): [-2.3924634153781352, -2.013859173066078, 0.56421893251638688, -0.048716888109540266],
(0.1, 15.0): [-2.3573552940582574, -2.0576676976224362, 0.57424068771143233, -0.049367487649225841],
(0.1, 16.0): [-2.3046427483044871, -2.1295959138627993, 0.59778272657680553, -0.051864829216301617],
(0.1, 17.0): [-2.2230551072316125, -2.2472837435427127, 0.64255758243215211, -0.057186665209197643],
(0.1, 18.0): [-2.3912859179716897, -2.0350604070641269, 0.55924788749333332, -0.047729331835226464],
(0.1, 19.0): [-2.4169773092220623, -2.0048217969339146, 0.54493039319748915, -0.045991241346224065],
(0.1, 20.0): [-2.4264087194660751, -1.9916614057049267, 0.53583555139648154, -0.04463049934517662],
(0.1, 24.0): [-2.3969903132061869, -2.0252941869225345, 0.53428382141200137, -0.043116495567779786],
(0.1, 30.0): [-2.2509922780354623, -2.2309248956124894, 0.60748041324937263, -0.051427415888817322],
(0.1, 40.0): [-2.1310090183854946, -2.3908466074610564, 0.65844375382323217, -0.05676653804036895],
(0.1, 60.0): [-1.9240060179027036, -2.6685751031012233, 0.75678826647453024, -0.067938584352398995],
(0.1, 120.0): [-1.9814895487030182, -2.5962051736978373, 0.71793969041292693, -0.063126863201511618],
(0.1, inf): [-1.913410267066703, -2.6947367328724732, 0.74742335122750592, -0.06660897234304515],
(0.5, 2.0): [-0.88295935738770648, -0.1083576698911433, 0.035214966839394388, -0.0028576288978276461],
(0.5, 3.0): [-0.89085829205846834, -0.10255696422201063, 0.033613638666631696, -0.0027101699918520737],
(0.5, 4.0): [-0.89627345339338116, -0.099072524607668286, 0.032657774808907684, -0.0026219007698204916],
(0.5, 5.0): [-0.89959145511941052, -0.097272836582026817, 0.032236187675182958, -0.0025911555217019663],
(0.5, 6.0): [-0.89959428735702474, -0.098176292411106647, 0.032590766960226995, -0.0026319890073613164],
(0.5, 7.0): [-0.90131491102863937, -0.097135907620296544, 0.032304124993269533, -0.0026057965808244125],
(0.5, 8.0): [-0.90292500599432901, -0.096047500971337962, 0.032030946615574568, -0.0025848748659053891],
(0.5, 9.0): [-0.90385598607803697, -0.095390771554571888, 0.031832651111105899, -0.0025656060219315991],
(0.5, 10.0): [-0.90562524936125388, -0.093954488089771915, 0.031414451048323286, -0.0025257834705432031],
(0.5, 11.0): [-0.90420347371173826, -0.095851656370277288, 0.0321150356209743, -0.0026055056400093451],
(0.5, 12.0): [-0.90585973471757664, -0.094449306296728028, 0.031705945923210958, -0.0025673330195780191],
(0.5, 13.0): [-0.90555437067293054, -0.094792991050780248, 0.031826594964571089, -0.0025807109129488545],
(0.5, 14.0): [-0.90652756604388762, -0.093792156994564738, 0.031468966328889042, -0.0025395175361083741],
(0.5, 15.0): [-0.90642323700400085, -0.094173017520487984, 0.031657517378893905, -0.0025659271829033877],
(0.5, 16.0): [-0.90716338636685234, -0.093785178083820434, 0.031630091949657997, -0.0025701459247416637],
(0.5, 17.0): [-0.90790133816769714, -0.093001147638638884, 0.031376863944487084, -0.002545143621663892],
(0.5, 18.0): [-0.9077432927051563, -0.093343516378180599, 0.031518139662395313, -0.0025613906133277178],
(0.5, 19.0): [-0.90789499456490286, -0.09316964789456067, 0.031440782366342901, -0.0025498353345867453],
(0.5, 20.0): [-0.90842707861030725, -0.092696016476608592, 0.031296040311388329, -0.0025346963982742186],
(0.5, 24.0): [-0.9083281347135469, -0.092959308144970776, 0.031464063190077093, -0.0025611384271086285],
(0.5, 30.0): [-0.90857624050016828, -0.093043139391980514, 0.031578791729341332, -0.0025766595412777147],
(0.5, 40.0): [-0.91034085045438684, -0.091978035738914568, 0.031451631000052639, -0.0025791418103733297],
(0.5, 60.0): [-0.91084356681030032, -0.091452675572423425, 0.031333147984820044, -0.0025669786958144843],
(0.5, 120.0): [-0.90963649561463833, -0.093414563261352349, 0.032215602703677425, -0.0026704024780441257],
(0.5, inf): [-0.91077157500981665, -0.092899220350334571, 0.032230422399363315, -0.0026696941964372916],
(0.675, 2.0): [-0.67231521026565144, -0.097083624030663451, 0.027991378901661649, -0.0021425184069845558],
(0.675, 3.0): [-0.65661724764645824, -0.08147195494632696, 0.02345732427073333, -0.0017448570400999351],
(0.675, 4.0): [-0.65045677697461124, -0.071419073399450431, 0.020741962576852499, -0.0015171262565892491],
(0.675, 5.0): [-0.64718875357808325, -0.064720611425218344, 0.019053450246546449, -0.0013836232986228711],
(0.675, 6.0): [-0.64523003702018655, -0.059926313672731824, 0.017918997181483924, -0.0012992250285556828],
(0.675, 7.0): [-0.64403313148478836, -0.056248191513784476, 0.017091446791293721, -0.0012406558789511822],
(0.675, 8.0): [-0.64325095865764359, -0.053352543126426684, 0.016471879286491072, -0.0011991839050964099],
(0.675, 9.0): [-0.64271152754911653, -0.051023769620449078, 0.01599799600547195, -0.0011693637984597086],
(0.675, 10.0): [-0.64232244408502626, -0.049118327462884373, 0.015629704966568955, -0.0011477775513952285],
(0.675, 11.0): [-0.64203897854353564, -0.047524627960277892, 0.015334801262767227, -0.0011315057284007177],
(0.675, 12.0): [-0.64180344973512771, -0.046205907576003291, 0.015108290595438166, -0.0011207364514518488],
(0.675, 13.0): [-0.64162086456823342, -0.045076099336874231, 0.0149226565346125, -0.0011126140690497352],
(0.675, 14.0): [-0.64146906480198984, -0.044108523550512715, 0.014772954218646743, -0.0011069708562369386],
(0.675, 15.0): [-0.64133915151966603, -0.043273370927039825, 0.014651691599222836, -0.0011032216539514398],
(0.675, 16.0): [-0.64123237842752079, -0.042538925012463868, 0.014549992487506169, -0.0011005633864334021],
(0.675, 17.0): [-0.64113034037536609, -0.041905699463005854, 0.014470805560767184, -0.0010995286436738471],
(0.675, 18.0): [-0.64104137391561256, -0.041343885546229336, 0.014404563657113593, -0.0010991304223377683],
(0.675, 19.0): [-0.64096064882827297, -0.04084569291139839, 0.014350159655133801, -0.0010993656711121901],
(0.675, 20.0): [-0.64088647405089572, -0.040402175957178085, 0.014305769823654429, -0.0011001304776712105],
(0.675, 24.0): [-0.64063763965937837, -0.039034716348048545, 0.014196703837251648, -0.0011061961945598175],
(0.675, 30.0): [-0.64034987716294889, -0.037749651156941719, 0.014147040999127263, -0.0011188251352919833],
(0.675, 40.0): [-0.6399990514713938, -0.036583307574857803, 0.014172070700846548, -0.0011391004138624943],
(0.675, 60.0): [-0.63955586202430248, -0.035576938958184395, 0.014287299153378865, -0.0011675811805794236],
(0.675, 120.0): [-0.63899242674778622, -0.034763757512388853, 0.014500726912982405, -0.0012028491454427466],
(0.675, inf): [-0.63832682579247613, -0.034101476695520404, 0.014780921043580184, -0.0012366204114216408],
(0.75, 2.0): [-0.60684073638504454, -0.096375192078057031, 0.026567529471304554, -0.0019963228971914488],
(0.75, 3.0): [-0.57986144519102656, -0.078570292718034881, 0.021280637925009449, -0.0015329306898533772],
(0.75, 4.0): [-0.56820771686193594, -0.0668113563896649, 0.018065284051059189, -0.0012641485481533648],
(0.75, 5.0): [-0.56175292435740221, -0.058864526929603825, 0.016046735025708799, -0.0011052560286524044],
(0.75, 6.0): [-0.55773449282066356, -0.053136923269827351, 0.014684258167069347, -0.0010042826823561605],
(0.75, 7.0): [-0.55509524598867332, -0.048752649191139405, 0.013696566605823626, -0.00093482210003133898],
(0.75, 8.0): [-0.55324993686191515, -0.045305558708724644, 0.012959681992062138, -0.00088583541601696021],
(0.75, 9.0): [-0.55189259054026196, -0.042539819902381634, 0.012398791106424769, -0.00085083962241435827],
(0.75, 10.0): [-0.55085384656956893, -0.040281425755686585, 0.01196442242722482, -0.00082560322161492677],
(0.75, 11.0): [-0.55003198103541273, -0.038410176100193948, 0.011623294239447784, -0.00080732975034320073],
(0.75, 12.0): [-0.54936541596319177, -0.036838543267887103, 0.011351822637895701, -0.0007940703654926442],
(0.75, 13.0): [-0.54881015972753833, -0.035506710625568455, 0.011134691307865171, -0.0007846360016355809],
(0.75, 14.0): [-0.54834094346071949, -0.034364790609906569, 0.010958873929274728, -0.00077796645357008291],
(0.75, 15.0): [-0.54793602418304255, -0.033379237455748029, 0.010816140998057593, -0.00077344175064785099],
(0.75, 16.0): [-0.54758347689728037, -0.032520569145898917, 0.010699240399358219, -0.00077050847328596678],
(0.75, 17.0): [-0.54727115963795303, -0.031769277192927527, 0.010603749751170481, -0.0007688642392748113],
(0.75, 18.0): [-0.54699351808826535, -0.031105476267880995, 0.010524669113016114, -0.00076810656837464093],
(0.75, 19.0): [-0.54674357626419079, -0.030516967201954001, 0.010459478822937069, -0.00076808652582440037],
(0.75, 20.0): [-0.54651728378950126, -0.029992319199769232, 0.010405694998386575, -0.0007686417223966138],
(0.75, 24.0): [-0.54578309546828363, -0.028372628574010936, 0.010269939602271542, -0.00077427370647261838],
(0.75, 30.0): [-0.54501246434397554, -0.026834887880579802, 0.010195603314317611, -0.00078648615954105515],
(0.75, 40.0): [-0.54418127442022624, -0.025413224488871379, 0.010196455193836855, -0.00080610785749523739],
(0.75, 60.0): [-0.543265189207915, -0.024141961069146383, 0.010285001019536088, -0.00083332193364294587],
(0.75, 120.0): [-0.54224757817994806, -0.023039071833948214, 0.010463365295636302, -0.00086612828539477918],
(0.75, inf): [-0.54114579815367159, -0.02206592527426093, 0.01070374099737127, -0.00089726564005122183],
(0.8, 2.0): [-0.56895274046831146, -0.096326255190541957, 0.025815915364208686, -0.0019136561019354845],
(0.8, 3.0): [-0.5336038380862278, -0.077585191014876181, 0.020184759265389905, -0.0014242746007323785],
(0.8, 4.0): [-0.51780274285934258, -0.064987738443608709, 0.016713309796866204, -0.001135379856633562],
(0.8, 5.0): [-0.50894361222268403, -0.056379186603362705, 0.014511270339773345, -0.00096225604117493205],
(0.8, 6.0): [-0.50335153028630408, -0.050168860294790812, 0.01302807093593626, -0.00085269812692536306],
(0.8, 7.0): [-0.49960934380896432, -0.045417333787806033, 0.011955593330247398, -0.00077759605604250882],
(0.8, 8.0): [-0.49694518248979763, -0.041689151516021969, 0.011158986677273709, -0.00072497430103953366],
(0.8, 9.0): [-0.4949559974898507, -0.038702217132906024, 0.010554360004521268, -0.0006875213117164109],
(0.8, 10.0): [-0.49341407910162483, -0.036266788741325398, 0.010087354421936092, -0.00066060835062865602],
(0.8, 11.0): [-0.49218129312493897, -0.034252403643273498, 0.0097218584838579536, -0.00064123459335201907],
(0.8, 12.0): [-0.49117223957112183, -0.032563269730499021, 0.0094318583096021404, -0.00062725253852419032],
(0.8, 13.0): [-0.49032781145131277, -0.031132495018324432, 0.0091999762562792898, -0.0006172944366003854],
(0.8, 14.0): [-0.48961049628464259, -0.029906921170494854, 0.009012451847823854, -0.00061026211968669543],
(0.8, 15.0): [-0.48899069793054922, -0.028849609914548158, 0.0088602820002619594, -0.00060548991575179055],
(0.8, 16.0): [-0.48844921216636505, -0.027929790075266154, 0.00873599263877896, -0.00060242119796859379],
(0.8, 17.0): [-0.48797119683309537, -0.027123634910159868, 0.0086338139869481887, -0.00060061821593399998],
(0.8, 18.0): [-0.48754596864745836, -0.026411968723496961, 0.0085493196604705755, -0.00059977083160833624],
(0.8, 19.0): [-0.48716341805691843, -0.025781422230819986, 0.0084796655915025769, -0.00059970031758323466],
(0.8, 20.0): [-0.48681739197185547, -0.025219629852198749, 0.0084221844254287765, -0.00060023212822886711],
(0.8, 24.0): [-0.48570639629281365, -0.023480608772518948, 0.008274490561114187, -0.000605681105792215],
(0.8, 30.0): [-0.48455867067770253, -0.021824655071720423, 0.0081888502974720567, -0.00061762126933785633],
(0.8, 40.0): [-0.48335478729267423, -0.020279958998363389, 0.0081765095914194709, -0.00063657117129829635],
(0.8, 60.0): [-0.48207351944996679, -0.018875344346672228, 0.0082473997191472338, -0.00066242478479277243],
(0.8, 120.0): [-0.48070356185330182, -0.017621686995755746, 0.0084009638803223801, -0.00069300383808949318],
(0.8, inf): [-0.47926687718713606, -0.016476575352367202, 0.0086097059646591811, -0.00072160843492730911],
(0.85, 2.0): [-0.53366806986381743, -0.098288178252723263, 0.026002333446289064, -0.0019567144268844896],
(0.85, 3.0): [-0.48995919239619989, -0.077312722648418056, 0.019368984865418108, -0.0013449670192265796],
(0.85, 4.0): [-0.46956079162382858, -0.063818518513946695, 0.015581608910696544, -0.0010264315084377606],
(0.85, 5.0): [-0.45790853796153624, -0.054680511194530226, 0.013229852432203093, -0.00084248430847535898],
(0.85, 6.0): [-0.4505070841695738, -0.048050936682873302, 0.011636407582714191, -0.00072491480033529815],
(0.85, 7.0): [-0.44548337477336181, -0.042996612516383016, 0.010493052959891263, -0.00064528784792153239],
(0.85, 8.0): [-0.44186624932664148, -0.039040005821657585, 0.0096479530794160544, -0.00058990874360967567],
(0.85, 9.0): [-0.43914118689812259, -0.035875693030752713, 0.0090088804130628187, -0.00055071480339399694],
(0.85, 10.0): [-0.43701255390953769, -0.033300997407157376, 0.0085172159355344848, -0.00052272770799695464],
(0.85, 11.0): [-0.43530109064899053, -0.031174742038490313, 0.0081335619868386066, -0.00050268353809787927],
(0.85, 12.0): [-0.43389220376610071, -0.02939618314990838, 0.007830626267772851, -0.00048836431712678222],
(0.85, 13.0): [-0.43271026958463166, -0.027890759135246888, 0.0075886916668632936, -0.00047819339710596971],
(0.85, 14.0): [-0.43170230265007209, -0.026604156062396189, 0.0073939099688705547, -0.00047109996854335419],
(0.85, 15.0): [-0.43083160459377423, -0.025494228911600785, 0.0072358738657550868, -0.00046630677052262481],
(0.85, 16.0): [-0.4300699280587239, -0.024529612608808794, 0.0071069227026219683, -0.00046323869860941791],
(0.85, 17.0): [-0.42939734931902857, -0.023685025616054269, 0.0070011541609695891, -0.00046147954942994158],
(0.85, 18.0): [-0.42879829041505324, -0.022940655682782165, 0.006914006369119409, -0.00046070877994711774],
(0.85, 19.0): [-0.42826119448419875, -0.022280181781634649, 0.0068417746905826433, -0.00046066841214091982],
(0.85, 20.0): [-0.42777654887094479, -0.021690909076747832, 0.0067817408643717969, -0.00046118620289068032],
(0.85, 24.0): [-0.42622450033640852, -0.019869646711890065, 0.0066276799593494029, -0.00046668820637553747],
(0.85, 30.0): [-0.42463810443233418, -0.018130114737381745, 0.0065344613060499164, -0.00047835583417510423],
(0.85, 40.0): [-0.42299917804589382, -0.016498222901308417, 0.0065120558343578407, -0.00049656043685325469],
(0.85, 60.0): [-0.42129387265810464, -0.014992121475265813, 0.0065657795990087635, -0.00052069705640687698],
(0.85, 120.0): [-0.41951580476366368, -0.013615722489371183, 0.0066923911275726814, -0.00054846911649167492],
(0.85, inf): [-0.41768751825428968, -0.012327525092266726, 0.0068664920569562592, -0.00057403720261753539],
(0.9, 1.0): [-0.65851063279096722, -0.126716242078905, 0.036318801917603061, -0.002901283222928193],
(0.9, 2.0): [-0.50391945369829139, -0.096996108021146235, 0.024726437623473398, -0.0017901399938303017],
(0.9, 3.0): [-0.44799791843058734, -0.077180370333307199, 0.018584042055594469, -0.0012647038118363408],
(0.9, 4.0): [-0.42164091756145167, -0.063427071006287514, 0.014732203755741392, -0.00094904174117957688],
(0.9, 5.0): [-0.40686856251221754, -0.053361940054842398, 0.012041802076025801, -0.00072960198292410612],
(0.9, 6.0): [-0.39669926026535285, -0.046951517438004242, 0.010546647213094956, -0.00062621198002366064],
(0.9, 7.0): [-0.39006553675807426, -0.04169480606532109, 0.0093687546601737195, -0.00054648695713273862],
(0.9, 8.0): [-0.38570205067061908, -0.037083910859179794, 0.0083233218526375836, -0.00047177586974035451],
(0.9, 9.0): [-0.38190737267892938, -0.034004585655388865, 0.0077531991574119183, -0.00044306547308527872],
(0.9, 10.0): [-0.37893272918125737, -0.031394677600916979, 0.0072596802503533536, -0.0004160518834299966],
(0.9, 11.0): [-0.37692512492705132, -0.028780793403136471, 0.0066937909049060379, -0.00037420010136784526],
(0.9, 12.0): [-0.37506345200129187, -0.026956483290567372, 0.0064147730707776523, -0.00036595383207062906],
(0.9, 13.0): [-0.37339516122383209, -0.02543949524844704, 0.0061760656530197187, -0.00035678737379179527],
(0.9, 14.0): [-0.37216979891087842, -0.02396347606956644, 0.0059263234465969641, -0.0003439784452550796],
(0.9, 15.0): [-0.371209456600122, -0.022696132732654414, 0.0057521677184623147, -0.00033961108561770848],
(0.9, 16.0): [-0.36958924377983338, -0.022227885445863002, 0.0057691706799383926, -0.00035042762538099682],
(0.9, 17.0): [-0.36884224719083203, -0.021146977888668726, 0.0055957928269732716, -0.00034283810412697531],
(0.9, 18.0): [-0.36803087186793326, -0.020337731477576542, 0.0054655378095212759, -0.00033452966946535248],
(0.9, 19.0): [-0.3676700404163355, -0.019370115848857467, 0.0053249296207149655, -0.00032975528909580403],
(0.9, 20.0): [-0.36642276267188811, -0.019344251412284838, 0.0054454968582897528, -0.00034868111677540948],
(0.9, 24.0): [-0.36450650753755193, -0.017284255499990679, 0.0052337500059176749, -0.00034898202845747288],
(0.9, 30.0): [-0.36251868940168608, -0.015358560437631397, 0.0050914299956134786, -0.00035574528891633978],
(0.9, 40.0): [-0.36008886676510943, -0.014016835682905486, 0.0051930835959111514, -0.00038798316011984165],
(0.9, 60.0): [-0.35825590690268061, -0.011991568926537646, 0.0050632208542414191, -0.00039090198974493085],
(0.9, 120.0): [-0.35543612237284411, -0.011074403997811812, 0.0053504570752765162, -0.00043647137428074178],
(0.9, inf): [-0.35311806343057167, -0.0096254020092145353, 0.0054548591208177181, -0.00045343916634968493],
(0.95, 1.0): [-0.65330318136020071, -0.12638310760474375, 0.035987535130769424, -0.0028562665467665315],
(0.95, 2.0): [-0.47225160417826934, -0.10182570362271424, 0.025846563499059158, -0.0019096769058043243],
(0.95, 3.0): [-0.4056635555586528, -0.077067172693350297, 0.017789909647225533, -0.001182961668735774],
(0.95, 4.0): [-0.37041675177340955, -0.063815687118939465, 0.014115210247737845, -0.00089996098435117598],
(0.95, 5.0): [-0.35152398291152309, -0.052156502640669317, 0.010753738086401853, -0.0005986841939451575],
(0.95, 6.0): [-0.33806730015201264, -0.045668399809578597, 0.0093168898952878162, -0.00051369719615782102],
(0.95, 7.0): [-0.32924041072104465, -0.040019601775490091, 0.0080051199552865163, -0.00042054536135868043],
(0.95, 8.0): [-0.32289030266989077, -0.035575345931670443, 0.0070509089344694669, -0.00035980773304803576],
(0.95, 9.0): [-0.31767304201477375, -0.032464945930165703, 0.0064755950437272143, -0.0003316676253661824],
(0.95, 10.0): [-0.31424318064708656, -0.029133461621153, 0.0057437449431074795, -0.00027894252261209191],
(0.95, 11.0): [-0.31113589620384974, -0.02685115250591049, 0.0053517905282942889, -0.00026155954116874666],
(0.95, 12.0): [-0.30848983612414582, -0.025043238019239168, 0.0050661675913488829, -0.00025017202909614005],
(0.95, 13.0): [-0.3059212907410393, -0.023863874699213077, 0.0049618051135807322, -0.00025665425781125703],
(0.95, 14.0): [-0.30449676902720035, -0.021983976741572344, 0.0045740513735751968, -0.00022881166323945914],
(0.95, 15.0): [-0.30264908294481396, -0.02104880307520084, 0.0044866571614804382, -0.00023187587597844057],
(0.95, 16.0): [-0.30118294463097917, -0.020160231061926728, 0.0044170780759056859, -0.00023733502359045826],
(0.95, 17.0): [-0.30020013353427744, -0.018959271614471574, 0.0041925333038202285, -0.00022274025630789767],
(0.95, 18.0): [-0.29857886556874402, -0.018664437456802001, 0.0042557787632833697, -0.00023758868868853716],
(0.95, 19.0): [-0.29796289236978263, -0.017632218552317589, 0.0040792779937959866, -0.00022753271474613109],
(0.95, 20.0): [-0.29681506554838077, -0.017302563243037392, 0.0041188426221428964, -0.00023913038468772782],
(0.95, 24.0): [-0.29403146911167666, -0.015332330986025032, 0.0039292170319163728, -0.00024003445648641732],
(0.95, 30.0): [-0.29080775563775879, -0.013844059210779323, 0.0039279165616059892, -0.00026085104496801666],
(0.95, 40.0): [-0.28821583032805109, -0.011894686715666892, 0.0038202623278839982, -0.00026933325102031252],
(0.95, 60.0): [-0.28525636737751447, -0.010235910558409797, 0.0038147029777580001, -0.00028598362144178959],
(0.95, 120.0): [-0.28241065885026539, -0.0086103836327305026, 0.0038450612886908714, -0.00030206053671559411],
(0.95, inf): [-0.27885570064169296, -0.0078122455524849222, 0.0041798538053623453, -0.0003469494881774609],
(0.975, 1.0): [-0.65203598304297983, -0.12608944279227957, 0.035710038757117347, -0.0028116024425349053],
(0.975, 2.0): [-0.46371891130382281, -0.096954458319996509, 0.023958312519912289, -0.0017124565391080503],
(0.975, 3.0): [-0.38265282195259875, -0.076782539231612282, 0.017405078796142955, -0.0011610853687902553],
(0.975, 4.0): [-0.34051193158878401, -0.063652342734671602, 0.013528310336964293, -0.00083644708934990761],
(0.975, 5.0): [-0.31777655705536484, -0.051694686914334619, 0.010115807205265859, -0.00054517465344192009],
(0.975, 6.0): [-0.30177149019958716, -0.044806697631189059, 0.008483551848413786, -0.00042827853925009264],
(0.975, 7.0): [-0.29046972313293562, -0.039732822689098744, 0.007435356037378946, -0.00037562928283350671],
(0.975, 8.0): [-0.28309484007368141, -0.034764904940713388, 0.0062932513694928518, -0.00029339243611357956],
(0.975, 9.0): [-0.27711707948119785, -0.031210465194810709, 0.0055576244284178435, -0.00024663798208895803],
(0.975, 10.0): [-0.27249203448553611, -0.028259756468251584, 0.00499112012528406, -0.00021535380417035389],
(0.975, 11.0): [-0.26848515860011007, -0.026146703336893323, 0.0046557767110634073, -0.00020400628148271448],
(0.975, 12.0): [-0.26499921540008192, -0.024522931106167097, 0.0044259624958665278, -0.00019855685376441687],
(0.975, 13.0): [-0.2625023751891592, -0.022785875653297854, 0.004150277321193792, -0.00018801223218078264],
(0.975, 14.0): [-0.26038552414321758, -0.021303509859738341, 0.0039195608280464681, -0.00017826200169385824],
(0.975, 15.0): [-0.25801244886414665, -0.020505508012402567, 0.0038754868932712929, -0.00018588907991739744],
(0.975, 16.0): [-0.25685316062360508, -0.018888418269740373, 0.0035453092842317293, -0.00016235770674204116],
(0.975, 17.0): [-0.25501132271353549, -0.018362951972357794, 0.0035653933105288631, -0.00017470353354992729],
(0.975, 18.0): [-0.25325045404452656, -0.017993537285026156, 0.0036035867405376691, -0.00018635492166426884],
(0.975, 19.0): [-0.25236899494677928, -0.016948921372207198, 0.0034138931781330802, -0.00017462253414687881],
(0.975, 20.0): [-0.25134498025027691, -0.016249564498874988, 0.0033197284005334333, -0.00017098091103245596],
(0.975, 24.0): [-0.24768690797476625, -0.014668160763513996, 0.0032850791186852558, -0.00019013480716844995],
(0.975, 30.0): [-0.24420834707522676, -0.012911171716272752, 0.0031977676700968051, -0.00020114907914487053],
(0.975, 40.0): [-0.24105725356215926, -0.010836526056169627, 0.0030231303550754159, -0.00020128696343148667],
(0.975, 60.0): [-0.23732082703955223, -0.0095442727157385391, 0.0031432904473555259, -0.00023062224109383941],
(0.975, 120.0): [-0.23358581879594578, -0.0081281259918709343, 0.0031877298679120094, -0.00024496230446851501],
(0.975, inf): [-0.23004105093119268, -0.0067112585174133573, 0.0032760251638919435, -0.00026244001319462992],
(0.99, 1.0): [-0.65154119422706203, -0.1266603927572312, 0.03607480609672048, -0.0028668112687608113],
(0.99, 2.0): [-0.45463403324378804, -0.098701236234527367, 0.024412715761684689, -0.0017613772919362193],
(0.99, 3.0): [-0.36402060051035778, -0.079244959193729148, 0.017838124021360584, -0.00119080116484847],
(0.99, 4.0): [-0.31903506063953818, -0.061060740682445241, 0.012093154962939612, -0.00067268347188443093],
(0.99, 5.0): [-0.28917014580689182, -0.052940780099313689, 0.010231009146279354, -0.00057178339184615239],
(0.99, 6.0): [-0.27283240161179012, -0.042505435573209085, 0.0072753401118264534, -0.00031314034710725922],
(0.99, 7.0): [-0.25773968720546719, -0.039384214480463406, 0.0069120882597286867, -0.00032994068754356204],
(0.99, 8.0): [-0.24913629282433833, -0.033831567178432859, 0.0055516244725724185, -0.00022570786249671376],
(0.99, 9.0): [-0.24252380896373404, -0.029488280751457097, 0.0045215453527922998, -0.00014424552929022646],
(0.99, 10.0): [-0.23654349556639986, -0.02705600214566789, 0.0041627255469343632, -0.00013804427029504753],
(0.99, 11.0): [-0.23187404969432468, -0.024803662094970855, 0.0037885852786822475, -0.00012334999287725012],
(0.99, 12.0): [-0.22749929386320905, -0.023655085290534145, 0.0037845051889055896, -0.00014785715789924055],
(0.99, 13.0): [-0.22458989143485605, -0.021688394892771506, 0.0034075294601425251, -0.00012436961982044268],
(0.99, 14.0): [-0.22197623872225777, -0.020188830700102918, 0.0031648685865587473, -0.00011320740119998819],
(0.99, 15.0): [-0.2193924323730066, -0.019327469111698265, 0.0031295453754886576, -0.00012373072900083014],
(0.99, 16.0): [-0.21739436875855705, -0.018215854969324128, 0.0029638341057222645, -0.00011714667871412003],
(0.99, 17.0): [-0.21548926805467686, -0.017447822179412719, 0.0028994805120482812, -0.00012001887015183794],
(0.99, 18.0): [-0.21365014687077843, -0.01688869353338961, 0.0028778031289216546, -0.00012591199104792711],
(0.99, 19.0): [-0.21236653761262406, -0.016057151563612645, 0.0027571468998022017, -0.00012049196593780046],
(0.99, 20.0): [-0.21092693178421842, -0.015641706950956638, 0.0027765989877361293, -0.00013084915163086915],
(0.99, 24.0): [-0.20681960327410207, -0.013804298040271909, 0.0026308276736585674, -0.0001355061502101814],
(0.99, 30.0): [-0.20271691131071576, -0.01206095288359876, 0.0025426138004198909, -0.00014589047959047533],
(0.99, 40.0): [-0.19833098054449289, -0.010714533963740719, 0.0025985992420317597, -0.0001688279944262007],
(0.99, 60.0): [-0.19406768821236584, -0.0093297106482013985, 0.0026521518387539584, -0.00018884874193665104],
(0.99, 120.0): [-0.19010213174677365, -0.0075958207221300924, 0.0025660823297025633, -0.00018906475172834352],
(0.99, inf): [-0.18602070255787137, -0.0062121155165363188, 0.0026328293420766593, -0.00020453366529867131],
(0.995, 1.0): [-0.65135583544951825, -0.1266868999507193, 0.036067522182457165, -0.0028654516958844922],
(0.995, 2.0): [-0.45229774013072793, -0.09869462954369547, 0.024381858599368908, -0.0017594734553033394],
(0.995, 3.0): [-0.35935765236429706, -0.076650408326671915, 0.016823026893528978, -0.0010835134496404637],
(0.995, 4.0): [-0.30704474720931169, -0.063093047731613019, 0.012771683306774929, -0.00075852491621809955],
(0.995, 5.0): [-0.27582551740863454, -0.052533353137885791, 0.0097776009845174372, -0.00051338031756399129],
(0.995, 6.0): [-0.25657971464398704, -0.043424914996692286, 0.0074324147435969991, -0.00034105188850494067],
(0.995, 7.0): [-0.24090407819707738, -0.039591604712200287, 0.0068848429451020387, -0.00034737131709273414],
(0.995, 8.0): [-0.23089540800827862, -0.034353305816361958, 0.0056009527629820111, -0.00024389336976992433],
(0.995, 9.0): [-0.22322694848310584, -0.030294770709722547, 0.0046751239747245543, -0.00017437479314218922],
(0.995, 10.0): [-0.21722684126671632, -0.026993563560163809, 0.0039811592710905491, -0.00013135281785826703],
(0.995, 11.0): [-0.21171635822852911, -0.025156193618212551, 0.0037507759652964205, -0.00012959836685175671],
(0.995, 12.0): [-0.20745332165849167, -0.023318819535607219, 0.0034935020002058903, -0.00012642826898405916],
(0.995, 13.0): [-0.20426054591612508, -0.021189796175249527, 0.003031472176128759, -9.0497733877531618e-05],
(0.995, 14.0): [-0.20113536905578902, -0.020011536696623061, 0.0029215880889956729, -9.571527213951222e-05],
(0.995, 15.0): [-0.19855601561006403, -0.018808533734002542, 0.0027608859956002344, -9.2472995256929217e-05],
(0.995, 16.0): [-0.19619157579534008, -0.017970461530551096, 0.0027113719105000371, -9.9864874982890861e-05],
(0.995, 17.0): [-0.19428015140726104, -0.017009762497670704, 0.0025833389598201345, -9.6137545738061124e-05],
(0.995, 18.0): [-0.19243180236773033, -0.01631617252107519, 0.0025227443561618621, -9.8067580523432881e-05],
(0.995, 19.0): [-0.19061294393069844, -0.01586226613672222, 0.0025207005902641781, -0.00010466151274918466],
(0.995, 20.0): [-0.18946302696580328, -0.014975796567260896, 0.0023700506576419867, -9.5507779057884629e-05],
(0.995, 24.0): [-0.18444251428695257, -0.013770955893918012, 0.0024579445553339903, -0.00012688402863358003],
(0.995, 30.0): [-0.18009742499570078, -0.011831341846559026, 0.0022801125189390046, -0.00012536249967254906],
(0.995, 40.0): [-0.17562721880943261, -0.010157142650455463, 0.0022121943861923474, -0.000134542652873434],
(0.995, 60.0): [-0.17084630673594547, -0.0090224965852754805, 0.0023435529965815565, -0.00016240306777440115],
(0.995, 120.0): [-0.16648414081054147, -0.0074792163241677225, 0.0023284585524533607, -0.00017116464012147041],
(0.995, inf): [-0.16213921875452461, -0.0058985998630496144, 0.0022605819363689093, -0.00016896211491119114],
(0.999, 1.0): [-0.65233994072089363, -0.12579427445444219, 0.035830577995679271, -0.0028470555202945564],
(0.999, 2.0): [-0.45050164311326341, -0.098294804380698292, 0.024134463919493736, -0.0017269603956852841],
(0.999, 3.0): [-0.35161741499307819, -0.076801152272374273, 0.016695693063138672, -0.0010661121974071864],
(0.999, 4.0): [-0.29398448788574133, -0.06277319725219685, 0.012454220010543127, -0.00072644165723402445],
(0.999, 5.0): [-0.25725364564365477, -0.053463787584337355, 0.0099664236557431545, -0.00054866039388980659],
(0.999, 6.0): [-0.23674225795168574, -0.040973155890031254, 0.0062599481191736696, -0.00021565734226586692],
(0.999, 7.0): [-0.21840108878983297, -0.037037020271877719, 0.0055908063671900703, -0.00020238790479809623],
(0.999, 8.0): [-0.2057964743918449, -0.032500885103194356, 0.0046441644585661756, -0.00014769592268680274],
(0.999, 9.0): [-0.19604592954882674, -0.029166922919677936, 0.0040644333111949814, -0.00012854052861297006],
(0.999, 10.0): [-0.18857328935948367, -0.026316705703161091, 0.0035897350868809275, -0.00011572282691335702],
(0.999, 11.0): [-0.18207431428535406, -0.024201081944369412, 0.0031647372098056077, -8.1145935982296439e-05],
(0.999, 12.0): [-0.17796358148991101, -0.021054306118620879, 0.0023968085939602055, -1.5907156771296993e-05],
(0.999, 13.0): [-0.17371965962745489, -0.019577162950177709, 0.0022391783473999739, -2.0613023472812558e-05],
(0.999, 14.0): [-0.16905298116759873, -0.01967115985443986, 0.0026495208325889269, -9.1074275220634073e-05],
(0.999, 15.0): [-0.16635662558214312, -0.017903767183469876, 0.0022301322677100496, -5.1956773935885426e-05],
(0.999, 16.0): [-0.16388776549525449, -0.016671918839902419, 0.0020365289602744382, -4.3592447599724942e-05],
(0.999, 17.0): [-0.16131934177990759, -0.015998918405126326, 0.0019990454743285904, -4.8176277491327653e-05],
(0.999, 18.0): [-0.15880633110376571, -0.015830715141055916, 0.0021688405343832091, -8.061825248932771e-05],
(0.999, 19.0): [-0.15644841913314136, -0.015729364721105681, 0.0022981443610378136, -0.00010093672643417343],
(0.999, 20.0): [-0.15516596606222705, -0.014725095968258637, 0.0021117117014292155, -8.8806880297328484e-05],
(0.999, 24.0): [-0.14997437768645827, -0.012755323295476786, 0.0018871651510496939, -8.0896370662414938e-05],
(0.999, 30.0): [-0.14459974882323703, -0.011247323832877647, 0.0018637400643826279, -9.6415323191606741e-05],
(0.999, 40.0): [-0.13933285919392555, -0.0097151769692496587, 0.0018131251876208683, -0.00010452598991994023],
(0.999, 60.0): [-0.13424555343804143, -0.0082163027951669444, 0.0017883427892173382, -0.00011415865110808405],
(0.999, 120.0): [-0.12896119523040372, -0.0070426701112581112, 0.0018472364154226955, -0.00012862202979478294],
(0.999, inf): [-0.12397213562666673, -0.0056901201604149998, 0.0018260689406957129, -0.00013263452567995485]}
# p values that are defined in the A table
p_keys = [.1,.5,.675,.75,.8,.85,.9,.95,.975,.99,.995,.999]
# v values that are defined in the A table
v_keys = range(2, 21) + [24, 30, 40, 60, 120, inf]
def _isfloat(x):
"""
returns True if x is a float,
returns False otherwise
"""
try:
float(x)
except:
return False
return True
def _phi(p):
"""returns the pth quantile inverse norm"""
return scipy.stats.norm.isf(p)
def _ptransform(p):
"""function for p-value abcissa transformation"""
return -1. / (1. + 1.5 * _phi((1. + p)/2.))
def _select_points(a, list_like):
"""
returns one above a, one below a, and the third
closest point to a sorted in ascending order
for quadratic interpolation. Assumes that points
above and below a exist.
"""
foo = [x for x in list(list_like) if x-a <= 0]
z = [min(foo, key=lambda x : abs(x-a))]
foo = [x for x in list(list_like) if x-a > 0]
z.append(min(foo, key=lambda x : abs(x-a)))
foo = [x for x in list(list_like) if x not in z]
z.append(min(foo, key=lambda x : abs(x-a)))
return sorted(z)
def _func(a, p, r, v):
"""
calculates f-hat for the coefficients in a, probability p,
sample mean difference r, and degrees of freedom v.
"""
# eq. 2.3
f = a[0]*math.log(r-1.) + \
a[1]*math.log(r-1.)**2 + \
a[2]*math.log(r-1.)**3 + \
a[3]*math.log(r-1.)**4
# eq. 2.7 and 2.8 corrections
if r == 3:
f += -0.002 / (1. + 12. * _phi(p)**2)
if v <= 4.364:
f += 1./517. - 1./(312.*(v,1e38)[v==inf])
else:
f += 1./(191.*(v,1e38)[v==inf])
return -f
def _interpolate_p(p, r, v):
"""
interpolates p based on the values in the A table for the
scalar value of r and the scalar value of v
"""
# interpolate p (v should be in table)
# if .5 < p < .75 use linear interpolation in q
# if p > .75 use quadratic interpolation in log(y + r/v)
# by -1. / (1. + 1.5 * _phi((1. + p)/2.))
# find the 3 closest v values
p0, p1, p2 = _select_points(p, p_keys)
y0 = _func(A[(p0, v)], p0, r, v) + 1.
y1 = _func(A[(p1, v)], p1, r, v) + 1.
y2 = _func(A[(p2, v)], p2, r, v) + 1.
y_log0 = math.log(y0 + float(r)/float(v))
y_log1 = math.log(y1 + float(r)/float(v))
y_log2 = math.log(y2 + float(r)/float(v))
# If p < .85 apply only the ordinate transformation
# if p > .85 apply the ordinate and the abcissa transformation
# In both cases apply quadratic interpolation
if p > .85:
p_t = _ptransform(p)
p0_t = _ptransform(p0)
p1_t = _ptransform(p1)
p2_t = _ptransform(p2)
# calculate derivatives for quadratic interpolation
d2 = 2*((y_log2-y_log1)/(p2_t-p1_t) - \
(y_log1-y_log0)/(p1_t-p0_t))/(p2_t-p0_t)
if (p2+p0)>=(p1+p1):
d1 = (y_log2-y_log1)/(p2_t-p1_t) - 0.5*d2*(p2_t-p1_t)
else:
d1 = (y_log1-y_log0)/(p1_t-p0_t) + 0.5*d2*(p1_t-p0_t)
d0 = y_log1
# interpolate value
y_log = (d2/2.) * (p_t-p1_t)**2. + d1 * (p_t-p1_t) + d0
# transform back to y
y = math.exp(y_log) - float(r)/float(v)
elif p > .5:
# calculate derivatives for quadratic interpolation
d2 = 2*((y_log2-y_log1)/(p2-p1) - \
(y_log1-y_log0)/(p1-p0))/(p2-p0)
if (p2+p0)>=(p1+p1):
d1 = (y_log2-y_log1)/(p2-p1) - 0.5*d2*(p2-p1)
else:
d1 = (y_log1-y_log0)/(p1-p0) + 0.5*d2*(p1-p0)
d0 = y_log1
# interpolate values
y_log = (d2/2.) * (p-p1)**2. + d1 * (p-p1) + d0
# transform back to y
y = math.exp(y_log) - float(r)/float(v)
else:
# linear interpolation in q and p
q0 = math.sqrt(2) * -y0 * \
scipy.stats.t.isf((1.+p0)/2., (v,1e38)[v>1e38])
q1 = math.sqrt(2) * -y1 * \
scipy.stats.t.isf((1.+p1)/2., (v,1e38)[v>1e38])
d1 = (q1-q0)/(p1-p0)
d0 = q0
# interpolate values
q = d1 * (p-p0) + d0
# transform back to y
y = -q / (math.sqrt(2) * \
scipy.stats.t.isf((1.+p)/2., (v,1e38)[v>1e38]))
return y
def _interpolate_v(p, r, v):
"""
interpolates v based on the values in the A table for the
scalar value of r and th
"""
# interpolate v (p should be in table)
# ordinate: y**2
# abcissa: 1./v
# find the 3 closest v values
v0, v1, v2 = _select_points(v, v_keys+([],[1])[p>=.90])
# y = f - 1.
y0 = _func(A[(p,v0)], p, r, v0) + 1.
y1 = _func(A[(p,v1)], p, r, v1) + 1.
y2 = _func(A[(p,v2)], p, r, v2) + 1.
# if v2 is inf set to a big number so interpolation
# calculations will work
if v2 > 1e38: v2 = 1e38
# calculate derivatives for quadratic interpolation
d2 = 2.*((y2**2-y1**2)/(1./v2-1./v1) - \
(y0**2-y1**2)/(1./v0-1./v1)) / (1./v2-1./v0)
if (1./v2 + 1./v0) >= (1./v1+1./v1):
d1 = (y2**2-y1**2) / (1./v2-1./v1) - 0.5*d2*(1./v2-1./v1)
else:
d1 = (y1**2-y0**2) / (1./v1-1./v0) + 0.5*d2*(1./v1-1./v0)
d0 = y1**2
# calculate y
y = math.sqrt((d2/2.)*(1./v-1./v1)**2. + d1*(1./v-1./v1)+ d0)
return y
def _qsturng(p, r, v):
# r is interpolated through the q to y here we only need to
# account for when p and/or v are not found in the table.
global A, p_keys, v_keys
if p < .1 or p > .999:
raise ValueError('p must be between .1 and .999')
if p < .9:
if v < 2:
raise ValueError('v must be > 2 when p < .9')
else:
if v < 1:
raise ValueError('v must be > 1 when p >= .9')
if A.has_key((p,v)):
f = _func(A[(p,v)], p, r, v)
y = f + 1.
elif p not in p_keys and v not in v_keys+([],[1])[p>=.90]:
# apply bilinear (quadratic) interpolation
#
# p0,v2 + o + p1,v2 + p2,v2
# r2
#
# 1
# - (p,v)
# v x
#
# r1
# p0,v1 + o + p1,v1 + p2,v1
#
#
# p0,v0 + o r0 + p1,v0 + p2,v0
#
# _ptransform(p)
#
# (p1 and v1 may be below or above (p,v). The algorithm
# works in both cases. For diagramatic simplicity it is
# shown as above)
#
# 1. at v0, v1, and v2 use quadratic interpolation
# to find r0, r1, r2
#
# 2. use r0, r1, r2 and quadratic interpolaiton
# to find y and (p,v)
# find the 3 closest v values
v0, v1, v2 = _select_points(v, v_keys+([],[1])[p>=.90])
# find the 2 closest p values
p0, p1, p2 = _select_points(p, p_keys)
r0 = _interpolate_p(p, r, v0)
r1 = _interpolate_p(p, r, v1)
r2 = _interpolate_p(p, r, v2)
# calculate derivatives for quadratic interpolation
d2 = 2.*((r2**2-r1**2)/(1./v2-1./v1) - \
(r0**2-r1**2)/(1./v0-1./v1)) / (1./v2-1./v0)
if (1./v2 + 1./v0) >= (1./v1+1./v1):
d1 = (r2**2-r1**2) / (1./v2-1./v1) - 0.5*d2*(1./v2-1./v1)
else:
d1 = (r1**2-r0**2) / (1./v1-1./v0) + 0.5*d2*(1./v1-1./v0)
d0 = r1**2
# calculate y
y = math.sqrt((d2/2.)*(1./v-1./v1)**2. + d1*(1./v-1./v1)+ d0)
elif v not in v_keys+([],[1])[p>=.90]:
y = _interpolate_v(p, r, v)
elif p not in p_keys:
y = _interpolate_p(p, r, v)
return math.sqrt(2) * -y * \
scipy.stats.t.isf((1.+p)/2., (v,1e38)[v>1e38])
# make a qsturng functinon that will accept list-like objects
_vqsturng = np.vectorize(_qsturng)
def qsturng(p, r, v):
"""
returns the q-value of the Studentized Range q-distribution as a
function of the probability (p), number of sample means (r), and
the degrees of freedom (v).
"""
if all(map(_isfloat, [p, r, v])):
return _qsturng(p, r, v)
return _vqsturng(p, r, v)
import scipy.optimize
def _psturng(q, r, v):
opt_func = lambda p, r, v: abs(_qsturng(p, r, v) - q)
return 1. - scipy.optimize.fminbound(opt_func, .1, .999, args=(r,v))
_vpsturng = np.vectorize(_psturng)
def psturng(q, r, v):
"""
returns the probability for the Studentized q-distribution where
the value q cooresponds to qsturng(1 - p, r, v)
If .001 is returned the probability should be interpreted
as,
p <= .001.
Likewise if .9 is returned the probability should be
interpreted as,
p >= .9.
"""
if all(map(_isfloat, [q, r, v])):
return _psturng(q, r, v)
return _vpsturng(q, r, v)
##p, r, v = .9, 10, 20
##print
##print 'p and v interpolation'
##print '\t20\t22\t24'
##print '.75',qsturng(.75, r, 20),qsturng(.75, r, 22),qsturng(.75, r, 24)
##print '.85',qsturng(.85, r, 20),qsturng(.85, r, 22),qsturng(.85, r, 24)
##print '.90',qsturng(.90, r, 20),qsturng(.90, r, 22),qsturng(.90, r, 24)
##print
##print 'p and v interpolation'
##print '\t120\t500\tinf'
##print '.950',qsturng(.95, r, 120),qsturng(.95, r, 500),qsturng(.95, r, inf)
##print '.960',qsturng(.96, r, 120),qsturng(.96, r, 500),qsturng(.96, r, inf)
##print '.975',qsturng(.975, r, 120),qsturng(.975, r, 500),qsturng(.975, r, inf)
##print
##print 'p and v interpolation'
##print '\t40\t50\t60'
##print '.950',qsturng(.95, r, 40),qsturng(.95, r, 50),qsturng(.95, r, 60)
##print '.960',qsturng(.96, r, 40),qsturng(.96, r, 50),qsturng(.96, r, 60)
##print '.975',qsturng(.975, r, 40),qsturng(.975, r, 50),qsturng(.975, r, 60)
##print
##print 'p and v interpolation'
##print '\t20\t22\t24'
##print '.50',qsturng(.5, r, 20),qsturng(.5, r, 22),qsturng(.5, r, 24)
##print '.60',qsturng(.6, r, 20),qsturng(.6, r, 22),qsturng(.6, r, 24)
##print '.75',qsturng(.75, r, 20),qsturng(.75, r, 22),qsturng(.75, r, 24)
| [
"dominic@nursix.org"
] | dominic@nursix.org |
2b342274bc429eacca9e41da3bbf739877dd0db6 | a1ff3f4a3909b672a31338aa7a1e25d7953111fb | /twitter_app/ttc_debug.py | a9c691ffbfd6b244b1ba4828a633dfd53ce6f660 | [] | no_license | wayne927/RPi-stuff | 0301831576bbc0d3e9a480d57d04279452dd2d84 | cae95547e96e0b1e80f2dc3b6de9b7dfeef777b9 | refs/heads/master | 2020-05-21T04:36:02.799577 | 2018-12-11T11:00:02 | 2018-12-11T11:00:02 | 58,220,398 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,788 | py | #!/usr/bin/python
import twitter
import datetime
from datetime import timedelta
import calendar
import os
import sys
from readkeys import *
from send_gmail import *
def getLocalTime(date_in) :
utc = datetime.datetime.strptime(date_in, '%a %b %d %H:%M:%S +0000 %Y')
# TODO: deal with daylight saving time...
offset = -5
local_time = utc + timedelta(hours=offset)
return local_time
def shouldPrintTweet(status) :
# Don't care if it's weekend (note: weekday() is 0 for Monday)
local_time = getLocalTime(status.created_at)
if local_time.weekday() >= 5 :
return False
# convert to lower cases, strip all white spaces
tweet = ''.join(status.text.lower().split())
include_any = ['line1', 'line2', 'line4']
exclude_all = ['elevator']
inc = False
for w in include_any :
if w in tweet :
inc = True
break
# Doesn't have any include keywords. Don't need to filter excludes
if inc == False :
return False
for w in exclude_all :
if w in tweet :
return False
return True
def printTweet(status, output) :
local_time = getLocalTime(status.created_at)
out = ""
#out = out + str(status.id) + '\n'
out = out + calendar.day_name[local_time.weekday()] + " "
out = out + str(local_time) + '\n' + status.text + '\n\n'
return output + out
keys = readKeys('binkeys.apikey')
api = twitter.Api(consumer_key=keys[0],
consumer_secret=keys[1],
access_token_key=keys[2],
access_token_secret=keys[3])
# read the most recent status (MRS) id that we got last time
# try :
# fileMRS = open('mrs.txt', 'r')
# MRS_id = int(fileMRS.readline())
# fileMRS.close()
# print('Most recent status ID read from file = ' + str(MRS_id))
# except :
# # File not found? Bad id? Meh
# MRS_id = 0
MRS_id = 1014528457500385280
if MRS_id == 0 :
print('MRS ID invalid. Just read the last 100.')
statuses = api.GetUserTimeline(screen_name='TTCnotices', count=100)
MRS_id = statuses[0].id
else :
statuses = api.GetUserTimeline(screen_name='TTCnotices', since_id=MRS_id, count=1000)
print('Number of statuses since last MRS = ' + str(len(statuses)))
if len(statuses) == 0 :
sys.exit()
else :
MRS_id = statuses[0].id
# fileMRS = open('mrs.txt', 'w')
# fileMRS.write(str(MRS_id))
# fileMRS.close()
output = ""
for s in statuses :
tweet = ''.join(s.text.lower().split())
if shouldPrintTweet(s) :
output = printTweet(s, output)
if not output :
sys.exit()
#timenow = datetime.datetime.now()
#email_subject = 'TTC Update: ' + timenow.strftime('%a %b %d %H:%M:%S')
#send_gmail(email_subject, output)
print(output)
| [
"wayne.ngan@gmail.com"
] | wayne.ngan@gmail.com |
75ad2856c5ff6b4ca39329ba99881252b8c694e8 | 6140bcfbdfad6fef444acfd8d40a776f7f467598 | /sbmOpenMM/datasets/__init__.py | 1820e9b8e9b585d1513c82d6822d7f0a0098220f | [
"MIT"
] | permissive | CompBiochBiophLab/sbm-openmm | bfa799f305d32b3b9de147d99bf8f2354867ee06 | 113b04eb2c78664a48600a05ff6778898fbb2fd9 | refs/heads/master | 2022-08-10T00:49:30.874501 | 2022-07-24T11:12:31 | 2022-07-24T11:12:31 | 219,484,731 | 18 | 7 | MIT | 2019-11-26T12:59:44 | 2019-11-04T11:23:38 | Jupyter Notebook | UTF-8 | Python | false | false | 28 | py | from . import foxp1_folding
| [
"martin.floor@uvic.cat"
] | martin.floor@uvic.cat |
11b9bf5a469cbefb5d55ecbc166fdf0b95d5e6a5 | d2bb13cec7faf28e3d268312298f03c99806bd8b | /IPTS-16891-Dy2Ti2O7/norm_mesh_symm_All_rwp_100mK_7.py | d66eaa8f6dc140ec0ed3f53c2db9c0369b379c0f | [] | no_license | rosswhitfield/corelli | 06a91c26556ea788f20f973a1018a56e82a8c09a | d9e47107e3272c4457aa0d2e0732fc0446f54279 | refs/heads/master | 2021-08-07T14:04:24.426151 | 2021-08-03T19:19:05 | 2021-08-03T19:19:05 | 51,771,543 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,488 | py | from mantid.simpleapi import *
from mantid.geometry import SymmetryOperationFactory
import numpy as np
# about information on where the data are and where to save
iptsfolder= "/SNS/CORELLI/IPTS-16891/"
outputdir="/SNS/users/rwp/corelli/IPTS-16891-Dy2Ti2O7/"
nxfiledir=iptsfolder + "nexus/"
ccfiledir = iptsfolder +"shared/autoreduce/"
UBfile = iptsfolder+"shared/DTO_UB_111Vertical.mat"
reducedfile_prefix = "DTO_cc"
LoadNexus(Filename='/SNS/CORELLI/shared/Vanadium/2016B/SolidAngle20160720NoCC.nxs', OutputWorkspace='sa')
LoadNexus(Filename='/SNS/CORELLI/shared/Vanadium/2016B/Spectrum20160720NoCC.nxs', OutputWorkspace='flux')
MaskBTP(Workspace='sa',Bank="1-30,62-91")
MaskBTP(workspace='sa',Pixel='1-16,200-256') #Mask the magnet
MaskBTP(Workspace='sa',Bank="49",Tube="1")
MaskBTP(Workspace='sa',Bank="54",Tube="1")
MaskBTP(Workspace='sa',Bank="58",Tube="13-16",Pixel="80-130")
MaskBTP(Workspace='sa',Bank="59",Tube="1-4",Pixel="80-130")
# Get UBs
LoadEmptyInstrument(Filename='/SNS/CORELLI/shared/Calibration/CORELLI_Definition_cal_20160310.xml', OutputWorkspace='ub')
LoadIsawUB(InputWorkspace='ub', Filename=UBfile)
ub=mtd['ub'].sample().getOrientedLattice().getUB()
print "Starting UB :"
print ub
#DTO Fd-3m (227) general position has 192 symmety operations.
symOps = SymmetryOperationFactory.createSymOps(\
"x,y,z; -x,-y,z; -x,y,-z; x,-y,-z;\
z,x,y; z,-x,-y; -z,-x,y; -z,x,-y;\
y,z,x; -y,z,-x; y,-z,-x; -y,-z,x;\
y,x,-z; -y,-x,-z; y,-x,z; -y,x,z;\
x,z,-y; -x,z,y; -x,-z,-y; x,-z,y;\
z,y,-x; z,-y,x; -z,y,x; -z,-y,-x;\
-x,-y,-z; x,y,-z; x,-y,z; -x,y,z;\
-z,-x,-y; -z,x,y; z,x,-y; z,-x,y;\
-y,-z,-x; y,-z,x; -y,z,x; y,z,-x;\
-y,-x,z; y,x,z; -y,x,-z; y,-x,-z;\
-x,-z,y; x,-z,-y; x,z,y; -x,z,-y;\
-z,-y,x; -z,y,-x; z,-y,-x; z,y,x")
ub_list=[]
for sym in symOps:
UBtrans = np.zeros((3,3))
UBtrans[0] = sym.transformHKL([1,0,0])
UBtrans[1] = sym.transformHKL([0,1,0])
UBtrans[2] = sym.transformHKL([0,0,1])
UBtrans=np.matrix(UBtrans.T)
new_ub = ub*UBtrans
print "Symmetry transform for "+sym.getIdentifier()
print UBtrans
print "New UB:"
print new_ub
ub_list.append(new_ub)
#load in background
#bkg=LoadEventNexus('/SNS/CORELLI/IPTS-15796/nexus/CORELLI_28124.nxs.h5')
#bkg=LoadNexus('/SNS/CORELLI/IPTS-15796/shared/autoreduce/CORELLI_28124_elastic.nxs')
#MaskDetectors(Workspace=bkg,MaskedWorkspace='sa')
#pc_bkg=sum(bkg.getRun()['proton_charge'].value)
#print 'pc_bkg=:'+str(pc_bkg)
#T=1.8 K
runs = range(34599,34635,1)
#T=100 mK
runs = range(34635,34653,1)
totalrun = len(runs)
print "Total number of runs %d" %totalrun
if mtd.doesExist('normMD'):
DeleteWorkspace('normMD')
if mtd.doesExist('dataMD'):
DeleteWorkspace('dataMD')
#for r in runs:
for index, r in enumerate(runs):
print index, ' Processing run : %s' %r
num=0
print 'Loading run number:'+ str(r)
#filename='/SNS/CORELLI/IPTS-15526/nexus/CORELLI_'+str(r)+'.nxs.h5'
#dataR=LoadEventNexus(Filename=filename)
filename=ccfiledir+'CORELLI_'+str(r)+'_elastic.nxs'
dataR=LoadNexus(Filename=filename)
LoadInstrument(Workspace= dataR, Filename='/SNS/CORELLI/shared/Calibration/CORELLI_Definition_cal_20160310.xml',RewriteSpectraMap=False)
MaskDetectors(Workspace=dataR,MaskedWorkspace='sa')
pc_data=sum(dataR.getRun()['proton_charge'].value)
print 'pc_data=:'+str(pc_data)
#dataR=dataR - bkg*pc_data/pc_bkg
# subtract the background if a background file was provided. Please make sure that the data were treated in the same way in terms of proton charge.
if mtd.doesExist('Bkg'):
bkg = mtd['Bkg']
ratio = pc_data/pc_bkg
bkg_c = bkg*ratio
Minus(LHSWorkspace=dataR, RHSWorkspace=bkg_c, OutputWorkspace=dataR)
dataR=ConvertUnits(dataR,Target="Momentum",EMode="Elastic")
dataR=CropWorkspace(dataR,XMin=2.5,XMax=10)
SetGoniometer(dataR,Axis0="BL9:Mot:Sample:Axis2,0,1,0,1")
LoadIsawUB(InputWorkspace=dataR,Filename=UBfile)
for ub in ub_list:
#for index, ub in enumerate(ub_list):
#print "index, using UB ", (index+1), ":"
num += 1
print "Run number"+str(r)+" Using UB:"+str(num)
print ub
SetUB(dataR, UB=ub)
md=ConvertToMD(InputWorkspace=dataR,QDimensions='Q3D',dEAnalysisMode='Elastic', Q3DFrames='HKL',
QConversionScales='HKL',MinValues='-7.1,-7.1,-7.1',MaxValues='7.1,7.1,7.1')
a1,b1=MDNormSCD(InputWorkspace='md',FluxWorkspace='flux',SolidAngleWorkspace='sa',
AlignedDim0="[H,0,0],-7.01,7.01,701",
AlignedDim1="[0,K,0],-7.01,7.01,701",
AlignedDim2="[0,0,L],-7.01,7.01,701")
if mtd.doesExist('dataMD'):
dataMD=dataMD+a1
else:
dataMD=CloneMDWorkspace(a1)
if mtd.doesExist('normMD'):
normMD=normMD+b1
else:
normMD=CloneMDWorkspace(b1)
normData_CC=dataMD/normMD
SaveMD('dataMD',Filename=outputdir+'DTO_datacc_48sym_Temp100mK_7.nxs')
SaveMD('normMD',Filename=outputdir+'DTO_normcc_48sym_Temp100mK_7.nxs')
SaveMD('normData_CC',Filename=outputdir+'DTO_normdatacc_48sym_Temp100mK_7.nxs')
# group the data
#data6K=GroupWorkspaces(datatoMerge)
#md6K=GroupWorkspaces(mdtoMerge)
| [
"whitfieldre@ornl.gov"
] | whitfieldre@ornl.gov |
5e070c29c4a7e73ae079e481606dc53c90ecce4e | 32a60828c347544bffad94bf9716078c5717d760 | /landing/apps/customuser/models.py | 13cfc581f89337a9e3ba60cff0cd353c1ef9af35 | [] | no_license | flikn/lazonaflikn | 6c0b9c6eee85a0f23d1443af358ec8077b07b4f4 | c8bfc2f50a03fc224f734ee2143280ea87a0964e | refs/heads/master | 2020-08-11T17:19:49.092468 | 2015-02-16T22:35:41 | 2015-02-16T22:35:41 | 29,598,846 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,727 | py | from django.db import models
from django.core import validators
from django.contrib.auth.models import BaseUserManager, AbstractBaseUser
from apps.subscribe.models import Subscriptor
class MyUserManager(BaseUserManager):
def create_user(self, email, password, username="",
first_name="", last_name="", *args, **kwargs):
"""
Creates and saves a User.
"""
if not email:
raise ValueError("Users must have an email address")
user = self.model(
email=self.normalize_email(email),
username=username,
first_name=first_name,
last_name=last_name,
raw_password=password,
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
"""
Creates and saves a superuser with the given email and password.
"""
user = self.create_user(
email,
password=password,
)
user.is_admin = True
user.save(using=self._db)
return user
class MyUser(AbstractBaseUser):
subscriptor = models.ForeignKey(
Subscriptor,
blank=True,
null=True,
)
email = models.EmailField(
verbose_name="email address",
max_length=50,
unique=True,
)
username = models.CharField(
max_length=50,
blank=True,
)
first_name = models.CharField(
max_length=50,
blank=True,
validators=[
validators.RegexValidator(
r"^[a-zA-Z ]",
"Invalid name."
),
],
)
last_name = models.CharField(
max_length=255,
blank=True,
validators=[
validators.RegexValidator(
r"^[a-zA-Z ]",
"Invalid name."
),
],
)
is_admin = models.BooleanField(
default=False,
)
was_registered = models.BooleanField(
default=False,
)
was_subscribed = models.BooleanField(
default=False,
)
raw_password = models.CharField(
max_length=100,
blank=True,
null=True,
)
objects = MyUserManager()
USERNAME_FIELD = "email"
def get_full_name(self):
return " ".join([self.first_name, self.last_name])
def get_short_name(self):
return self.email
def __str__(self):
return self.email
def has_perm(self, perm, obj=None):
return True
def has_module_perms(self, app_label):
return True
@property
def is_staff(self):
return self.is_admin
from .signals import update_user
| [
"oscar.gi.cast@gmail.com"
] | oscar.gi.cast@gmail.com |
d341c94a528f020251ed8186095bde571f3fc3e8 | e98d4ac448b838712616f8fe408a4073b799a478 | /currency_converter_api.py | e1a8943aeb23ec5613a3b4583276540f55642fdc | [] | no_license | KachusovStepan/CurrencyConverterApi | 2a15bbe0e5376480b4c752d4585611647d9f0f8d | 485460ee9a0c04c29f26c4ca6ac87406e8a39932 | refs/heads/master | 2023-04-12T10:10:25.928466 | 2021-05-10T17:55:12 | 2021-05-10T17:55:12 | 366,129,289 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,041 | py | import asyncio
from aiohttp import web
import json
import os
from services.exchange_rate_repository import ExchangeRateRepository
routes = web.RouteTableDef()
repo = ExchangeRateRepository(host="redis", port=6379)
loop = asyncio.get_event_loop()
@routes.get('/convert')
async def handle_converion(request: web.Request):
resp = web.Response(status=400)
resp.content_type = "application/json"
resp.text = json.dumps({'status': 'failed'})
params = ["from", "to", "amount"]
all_params_present = all(param in request.query for param in params)
if not all_params_present:
return web.Response(
status=400,
content_type='application/json',
text=json.dumps({
'status': 'failed',
'message': 'not all parameters specified'})
)
qfrom = request.query['from']
qto = request.query['to']
qamount = num(request.query['amount'])
if qamount is None:
return web.Response(
status=400,
content_type='application/json',
text=json.dumps({
'status': 'failed',
'message': 'parameter "amount" must be 0 or 1'})
)
factor = repo.get_rates(qfrom, qto)
if factor is None:
return web.Response(
status=404,
content_type='application/json',
text=json.dumps({
'status': 'failed',
'message': 'unknown currency names'})
)
resp = web.Response(status=200)
resp_obj = {
'status': 'success',
'amount': qamount * factor
}
resp.text = json.dumps(resp_obj)
return resp
@routes.post('/database')
async def handle_converion(request: web.Request):
if 'merge' not in request.query or num(request.query['merge'] is None):
return web.Response(
status=400,
text=json.dumps({
'status': 'failed',
'message': 'parameter "merge" is not present'}
)
)
merge = int(request.query['merge'])
body = await request.content.read()
new_rates = {}
try:
new_rates = json.loads(body)
except Exception:
return web.Response(
status=400,
text=json.dumps({
'status': 'failed',
'message': 'data must be in json format'}
)
)
repo.merge_rates(new_rates, merge)
return web.Response(
status=200,
text=json.dumps({
'status': 'success',
'message': 'successfuly updated'}
)
)
def num(s):
res = try_parse_int(s)
if res:
return res
try:
return float(s)
except ValueError:
return None
def try_parse_int(s):
try:
return int(s)
except ValueError:
return None
def main():
port = int(os.environ['APP_PORT'])
app = web.Application(loop=loop)
app.add_routes(routes)
web.run_app(app, port=port)
if __name__ == "__main__":
main()
| [
"stepanurfu@gmail.com"
] | stepanurfu@gmail.com |
2da22543b0df8381720ed441872ed8dab2d2f559 | 4948250bd7ca92e47742bb40e89dfc74c3f50463 | /projectwork/testapp/migrations/0007_auto_20210201_1657.py | 60b45419969915a73692c4cfa10d98d26bb73a53 | [] | no_license | sumitnicmar/PetCover | e145c3b5dddb946aa05feb23f4ad8e0c17c54707 | a4d478d3e141b9c4e5c0050800aa37e90580fad9 | refs/heads/main | 2023-03-10T16:04:40.744012 | 2021-02-15T18:58:02 | 2021-02-15T18:58:02 | 339,175,398 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 476 | py | # Generated by Django 3.1.3 on 2021-02-01 11:27
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('testapp', '0006_auto_20210201_1633'),
]
operations = [
migrations.AlterField(
model_name='appointment',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='testapp.customer'),
),
]
| [
"sumitsoft1993@gmail.com"
] | sumitsoft1993@gmail.com |
dc10e73e97008209d65dd1570fac21edd864e507 | ce7776a87f57454a70e1d2bde351fda20b43d7ef | /postgres-appliance/scripts/spilo_commons.py | 7742e8c2657ce5997bfff42fc04608f923549a45 | [
"Apache-2.0"
] | permissive | munjalpatel/spilo | 6387640dfe46b3b80076acd7dd3cf59fae2f57eb | f8179d9a5de5e9a78e5a447130f759f97811879b | refs/heads/master | 2023-09-04T19:06:47.323964 | 2021-11-11T16:04:17 | 2021-11-11T16:04:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,646 | py | import logging
import os
import subprocess
import re
import yaml
logger = logging.getLogger('__name__')
RW_DIR = os.environ.get('RW_DIR', '/run')
PATRONI_CONFIG_FILE = os.path.join(RW_DIR, 'postgres.yml')
LIB_DIR = '/usr/lib/postgresql'
# (min_version, max_version, shared_preload_libraries, extwlist.extensions)
extensions = {
'timescaledb': (9.6, 13, True, True),
'pg_cron': (9.5, 14, True, False),
'pg_stat_kcache': (9.4, 14, True, False),
'pg_partman': (9.4, 14, False, True)
}
if os.environ.get('ENABLE_PG_MON') == 'true':
extensions['pg_mon'] = (11, 14, True, False)
def adjust_extensions(old, version, extwlist=False):
ret = []
for name in old.split(','):
name = name.strip()
value = extensions.get(name)
if name not in ret and value is None or value[0] <= version <= value[1] and (not extwlist or value[3]):
ret.append(name)
return ','.join(ret)
def append_extentions(old, version, extwlist=False):
extwlist = 3 if extwlist else 2
ret = []
def maybe_append(name):
value = extensions.get(name)
if name not in ret and (value is None or value[0] <= version <= value[1] and value[extwlist]):
ret.append(name)
for name in old.split(','):
maybe_append(name.strip())
for name in extensions.keys():
maybe_append(name)
return ','.join(ret)
def get_binary_version(bin_dir):
postgres = os.path.join(bin_dir or '', 'postgres')
version = subprocess.check_output([postgres, '--version']).decode()
version = re.match(r'^[^\s]+ [^\s]+ (\d+)(\.(\d+))?', version)
return '.'.join([version.group(1), version.group(3)]) if int(version.group(1)) < 10 else version.group(1)
def get_bin_dir(version):
return '{0}/{1}/bin'.format(LIB_DIR, version)
def is_valid_pg_version(version):
bin_dir = get_bin_dir(version)
postgres = os.path.join(bin_dir, 'postgres')
# check that there is postgres binary inside
return os.path.isfile(postgres) and os.access(postgres, os.X_OK)
def write_file(config, filename, overwrite):
if not overwrite and os.path.exists(filename):
logger.warning('File %s already exists, not overwriting. (Use option --force if necessary)', filename)
else:
with open(filename, 'w') as f:
logger.info('Writing to file %s', filename)
f.write(config)
def get_patroni_config():
with open(PATRONI_CONFIG_FILE) as f:
return yaml.safe_load(f)
def write_patroni_config(config, force):
write_file(yaml.dump(config, default_flow_style=False, width=120), PATRONI_CONFIG_FILE, force)
| [
"noreply@github.com"
] | munjalpatel.noreply@github.com |
3fc7db5e967c82b33be3e2e96772d7a1ad400835 | 2c3e61c057d00399e94985914556224fff6cc5be | /marketingFirm.py | 077936d4cf5899dbe6303192c805fd836d3831aa | [] | no_license | wrightzachary/sweepstakes | 7b09a7b41017cd870402bb639a6ad047ca314478 | b5cc71a533d270b8cc9b6b23059a45dc798a9f18 | refs/heads/main | 2023-06-25T20:37:58.659130 | 2021-07-08T19:31:14 | 2021-07-08T19:31:14 | 379,325,841 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,524 | py | from sweepstakes import Sweepstakes
from userInterface import UserInterface
class MarketingFirm:
def __init__(self):
self.marketing_firm_name = ""
self.sweepstakes_storage = ["Boat", "Weekend getaway"]
# create sweepstakes
def create_sweepstakes(self):
chosen_name = UserInterface.get_user_input_string("\tEnter new sweepstakes name")
UserInterface.display_message('')
sweepstakes = Sweepstakes()
self.sweepstakes_storage.append(chosen_name)
print(f"\tYou created {chosen_name} ")
UserInterface.display_message('')
MarketingFirm.menu(self)
# change firm name
def change_marketing_firm_name(self):
change_name = UserInterface.get_user_input_string("\tChange firm name:")
new_name = change_name
print(f'\tMarketing firm name changed to {new_name}')
UserInterface.display_message('')
self.marketing_firm_name = f'{new_name}'
UserInterface.display_message('')
# UserInterface.display_marketing_firm_menu_options(self.marketing_firm_name)
MarketingFirm.menu(self)
# select a sweepstake
def select_sweepstakes(self):
self.marketing_firm_name = UserInterface.get_user_input_string("\tSelect your desired sweepstake")
UserInterface.display_message('')
UserInterface.display_message(f'\tYou Selected {self.marketing_firm_name}')
UserInterface.display_message('')
UserInterface.display_sweepstakes_info('')
response = int(input("\tPlease enter your selection: "))
if response == 1:
UserInterface.display_marketing_firm_menu_options(self.marketing_firm_name)
else:
UserInterface.display_message("Not a valid selection")
MarketingFirm.select_sweepstakes(self)
# menu
def menu(self):
UserInterface.display_marketing_firm_menu_options(self.marketing_firm_name)
response = int(input("\tPlease enter your selection: "))
UserInterface.display_message('')
if response == 1:
self.create_sweepstakes()
elif response == 2:
self.change_marketing_firm_name()
elif response == 3:
self.select_sweepstakes()
elif response == 4:
UserInterface.display_sweepstakes_menu_options(self.sweepstakes_storage)
else:
UserInterface.display_message("\tNot a valid selection")
UserInterface.display_message('')
MarketingFirm.menu(self)
| [
"taylorzw96@gmail.com"
] | taylorzw96@gmail.com |
abd7adc1822c7a3ded2bfbb351e303bc38039614 | 99a310f6bb6c7a6c728f1b3ae78054487372042d | /aoc2019/intcode/state_machine.py | b68372737c28f105cbb818391176e19138743da5 | [] | no_license | jepebe/aoc2018 | 46ce6b46479a0faf2c2970413af14a071dcfdb79 | 4bf91b99bec4b59529533ef70f24bf6496bada99 | refs/heads/master | 2023-01-11T16:44:42.125394 | 2023-01-06T06:27:14 | 2023-01-06T06:27:14 | 159,912,721 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,926 | py | from collections import defaultdict
def get_address(state_machine, parameter, write_mode=False):
mode = state_machine['parameter_modes'][parameter]
pos = state_machine['pos']
if mode == 0:
addr = state_machine['instructions'][pos]
elif mode == 1:
if write_mode:
print('Writing in immediate mode?')
addr = pos
elif mode == 2:
addr = state_machine['instructions'][pos]
relative_pos = state_machine['relative_pos']
addr = addr + relative_pos
else:
raise ('Unknown addressing mode %i for read' % mode)
return addr
def read(state_machine, parameter):
addr = get_address(state_machine, parameter)
state_machine['pos'] += 1
if addr >= len(state_machine['instructions']):
return state_machine['memory'][addr]
else:
return state_machine['instructions'][addr]
def write(state_machine, parameter, value):
addr = get_address(state_machine, parameter, write_mode=True)
state_machine['pos'] += 1
if addr >= len(state_machine['instructions']):
state_machine['memory'][addr] = value
else:
state_machine['instructions'][addr] = value
def add(state_machine):
a = read(state_machine, 0)
b = read(state_machine, 1)
write(state_machine, 2, a + b)
def multiply(state_machine):
a = read(state_machine, 0)
b = read(state_machine, 1)
write(state_machine, 2, a * b)
def get_input(state_machine):
if len(state_machine['input']) == 0:
state_machine['wait'] = True
state_machine['pos'] -= 1
state_machine['instruction_count'] -= 1
else:
data = state_machine['input'].pop(0)
write(state_machine, 0, data)
def output(state_machine):
value = read(state_machine, 0)
state_machine['output'].append(value)
if state_machine['output_enabled']:
print('Output from state machine %s' % value)
def jump_if_true(state_machine):
a = read(state_machine, 0)
b = read(state_machine, 1)
if a != 0:
state_machine['pos'] = b
def jump_if_false(state_machine):
a = read(state_machine, 0)
b = read(state_machine, 1)
if a == 0:
state_machine['pos'] = b
def less_than(state_machine):
a = read(state_machine, 0)
b = read(state_machine, 1)
write(state_machine, 2, 1 if a < b else 0)
def equals(state_machine):
a = read(state_machine, 0)
b = read(state_machine, 1)
write(state_machine, 2, 1 if a == b else 0)
def adjust_relative(state_machine):
a = read(state_machine, 0)
state_machine['relative_pos'] += a
def halt(state_machine):
state_machine['halt'] = True
# print('Instruction count: %i' % state_machine['instruction_count'])
def create_state_machine(instructions):
return {
'instructions': list(instructions),
'backup_instructions': list(instructions),
'memory': defaultdict(int),
'operation': 0,
'parameter_modes': [0],
'pos': 0,
'relative_pos': 0,
'instruction_count': 0,
'input': [],
'output': [],
'last_output': None,
'output_enabled': False,
'opcodes': {
1: add,
2: multiply,
3: get_input,
4: output,
5: jump_if_true,
6: jump_if_false,
7: less_than,
8: equals,
9: adjust_relative,
99: halt
},
'halt': False,
'wait': False
}
def reset_state_machine(state_machine):
state_machine['instructions'] = list(state_machine['backup_instructions'])
state_machine['memory'] = defaultdict(int)
state_machine['operation'] = 0
state_machine['parameter_modes'] = [0]
state_machine['pos'] = 0
state_machine['relative_pos'] = 0
state_machine['instruction_count'] = 0
state_machine['input'] = []
state_machine['output'] = []
state_machine['last_output'] = None
state_machine['output_enabled'] = False
state_machine['halt'] = False
state_machine['wait'] = False
def parse(state_machine):
pos = state_machine['pos']
opcode = state_machine['instructions'][pos]
op = opcode % 100
p1 = ((opcode - op) // 100) % 10
p2 = ((opcode - op) // 1000) % 10
p3 = ((opcode - op) // 10000) % 10
state_machine['operation'] = state_machine['opcodes'][op]
state_machine['parameter_modes'] = [p1, p2, p3]
state_machine['pos'] += 1
def run_state_machine(state_machine):
while not state_machine['halt'] and not state_machine['wait']:
parse(state_machine)
operation = state_machine['operation']
operation(state_machine)
state_machine['instruction_count'] += 1
def add_input(state_machine, data):
state_machine['input'].append(data)
if state_machine['wait']:
state_machine['wait'] = False
def get_output(state_machine):
if not has_output(state_machine):
raise UserWarning('No output available!')
state_machine['last_output'] = state_machine['output'][0]
return state_machine['output'].pop(0)
def has_output(state_machine):
return len(state_machine['output']) > 0
def get_last_output(state_machine):
return state_machine['last_output']
def flush_output(state_machine):
while has_output(state_machine):
get_output(state_machine)
def load_instructions(filename):
with open(filename) as f:
instructions = f.readline().split(',')
instructions = [int(x) for x in instructions]
return instructions
def load_state_machine(filename):
instructions = load_instructions(filename)
return create_state_machine(instructions)
def is_running(state_machine):
return not state_machine['halt']
def print_output(state_machine):
import sys
while has_output(state_machine):
v = get_output(state_machine)
sys.stdout.write(str(v) if v > 255 else chr(v))
| [
"jepebe@users.noreply.github.com"
] | jepebe@users.noreply.github.com |
9ca0eebcc0fb73790d6d40f0aa355c00af6bea88 | 77f5208e62d4c222702184c920323029c15d37fe | /ml_project/work/features/nope_transformer.py | 6efb522acc035a8202f9f84e79f6d47d4e768f6c | [] | no_license | made-ml-in-prod-2021/korowood | 1f89a58b9126458d7f0d43068a0d28e6f230d11f | 591cfd3341aae10618075123757011cf9465b751 | refs/heads/main | 2023-06-06T15:07:20.537003 | 2021-06-17T08:16:09 | 2021-06-17T08:16:09 | 354,288,201 | 0 | 0 | null | 2021-06-17T08:16:10 | 2021-04-03T12:37:46 | Jupyter Notebook | UTF-8 | Python | false | false | 555 | py | import numpy as np
import pandas as pd
from typing import List
from sklearn.base import BaseEstimator, TransformerMixin
class NopeTransformer(BaseEstimator, TransformerMixin):
""" Dummy class to do pass-through dataset as is. """
def __init__(self, cols: List[str] = None, quantile: float = 0.05):
self.cols = cols
self.quantile = quantile
def fit(self, X: pd.DataFrame, y: np.array = None) -> pd.DataFrame:
return self
def transform(self, X: pd.DataFrame, y: np.array = None) -> pd.DataFrame:
return X
| [
"yakorovka@gmail.com"
] | yakorovka@gmail.com |
d96d507a3a852249066e493e07e09227d669afb0 | cd8ea41cd625440bfacbee84e7a7632e1243c407 | /code/recipe/tests/test_ingredients_api.py | e22fc54e3b2e1e1bab6a44b16230688e9876e504 | [] | no_license | DamianCzajkowski/recipe-app | 826c332f5b6699cb3f63c5005dda953928467e20 | a593acbfedc6b0142f3e51289cc4a4dbd19bf793 | refs/heads/main | 2023-07-18T06:16:28.872407 | 2021-08-15T19:12:49 | 2021-08-15T19:12:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,346 | py | from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Ingredient, Recipe
from recipe.serializers import IngredientSerializer
INGREDIENTS_URL = reverse('recipe:ingredient-list')
class PublicIngredientsApiTests(TestCase):
"""Test the publicly available ingredients API"""
def setUp(self):
self.client = APIClient()
def test_login_required(self):
"""Test that login is required to access the endpoint"""
response = self.client.post(INGREDIENTS_URL)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateIngredientsApiTests(TestCase):
"""Test the private ingredients API"""
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user('test@test.com', 'passwd123')
self.client.force_authenticate(self.user)
def test_retrieve_ingredients_list(self):
"""Test retrieving a list of ingredients"""
Ingredient.objects.create(user=self.user, name="Kale")
Ingredient.objects.create(user=self.user, name="Salt")
response = self.client.get(INGREDIENTS_URL)
ingredients = Ingredient.objects.all().order_by("-name")
serializer = IngredientSerializer(ingredients, many=True)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, serializer.data)
def test_ingredients_limited_to_user(self):
"""Test that ingredients for the authenticated user are returned"""
user2 = get_user_model().objects.create_user('test2@test.com', 'passwd123')
Ingredient.objects.create(user=user2, name='Vinegar')
ingredient = Ingredient.objects.create(user=self.user, name='Tumeric')
response = self.client.get(INGREDIENTS_URL)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 1)
self.assertEqual(response.data[0]['name'], ingredient.name)
def test_create_ingredient_successful(self):
"""Test create a new ingredient"""
payload = {"name": "Cabbage"}
self.client.post(INGREDIENTS_URL, payload)
exists = Ingredient.objects.filter(
user=self.user, name=payload["name"]).exists()
self.assertTrue(exists)
def test_create_ingredient_invalid(self):
"""Test creating ingredients invalid"""
payload = {"name": ""}
response = self.client.post(INGREDIENTS_URL, payload)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_ingredients_assigned_to_recipes(self):
"""Test filtering ingredients by those assigned to recipes"""
ingredient1 = Ingredient.objects.create(user=self.user, name='Apples')
ingredient2 = Ingredient.objects.create(user=self.user, name='Turkey')
recipe = Recipe.objects.create(
title='Apple crumble',
time_minutes=5,
price=10.00,
user=self.user
)
recipe.ingredients.add(ingredient1)
response = self.client.get(INGREDIENTS_URL, {'assigned_only': 1})
serializer1 = IngredientSerializer(ingredient1)
serializer2 = IngredientSerializer(ingredient2)
self.assertIn(serializer1.data, response.data)
self.assertNotIn(serializer2.data, response.data)
def test_retrieve_ingredients_assigned_unique(self):
"""Test filtering ingredients by assigned returns unique items"""
ingredient = Ingredient.objects.create(user=self.user, name='Eggs')
Ingredient.objects.create(user=self.user, name='Cheese')
recipe1 = Recipe.objects.create(
title='Eggs benedict',
time_minutes=30,
price=12.00,
user=self.user
)
recipe1.ingredients.add(ingredient)
recipe2 = Recipe.objects.create(
title='Coriander eggs on toast',
time_minutes=20,
price=5.00,
user=self.user
)
recipe2.ingredients.add(ingredient)
response = self.client.get(INGREDIENTS_URL, {'assigned_only': 1})
self.assertEqual(len(response.data), 1)
| [
"damianczajkowski@bluerider.software"
] | damianczajkowski@bluerider.software |
5959f52e07a61191622f22f7491aa2229167184c | ba104a6a6c3f84d2e96fb3c304af781d881bed8e | /bbs/dbproxy.py | 1d37004c38872fff079b3090d675d41028b73a59 | [] | no_license | jonny290/yos-x84 | 463a7f166ee59f2dd2a7a197352db1341dad8b66 | 7b03a35f12d2b7a10fa4709b09107935c6f14000 | refs/heads/master | 2021-01-20T06:59:22.869289 | 2015-01-17T06:13:32 | 2015-01-17T06:13:32 | 24,814,644 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,902 | py | """
Database proxy helper for X/84.
"""
import time
class DBProxy(object):
"""
Provide dictionary-like object interface to a database. a database call,
such as __len__() or keys() is issued as a command to the main engine,
which spawns a thread to acquire a lock on the database and return the
results via IPC pipe transfer.
"""
def __init__(self, schema, table='unnamed'):
"""
Arguments:
schema: database key, to become basename of .sqlite3 files.
"""
self.schema = schema
self.table = table
def proxy_iter(self, method, *args):
"""
Iterable proxy for dictionary methods called over IPC pipe.
"""
import x84.bbs.session
event = 'db=%s' % (self.schema,)
session = x84.bbs.session.getsession()
session.flush_event(event)
session.send_event(event, (self.table, method, args))
data = session.read_event(event)
assert data == (None, 'StartIteration'), (
'iterable proxy used on non-iterable, %r' % (data,))
data = session.read_event(event)
while data != (None, StopIteration):
yield data
data = session.read_event(event)
session.flush_event(event)
def proxy_method(self, method, *args):
"""
Proxy for dictionary methods called over IPC pipe.
"""
import x84.bbs.session
event = 'db-%s' % (self.schema,)
session = x84.bbs.session.getsession()
session.send_event(event, (self.table, method, args))
return session.read_event(event)
def acquire(self, blocking=True, stale=2.0):
"""
Acquire a fine-grained BBS-global lock, blocking or non-blocking.
When invoked with the blocking argument set to True (the default),
block until the lock is acquired, and return True.
When invoked with the blocking argument set to False, do not block.
Returns False if lock is not acquired.
If the engine has held the lock longer than ``stale`` seconds, the
lock is granted anyway.
"""
import x84.bbs.session
event = 'lock-%s/%s' % (self.schema, self.table)
session = x84.bbs.session.getsession()
while True:
session.send_event(event, ('acquire', stale))
data = session.read_event(event)
if data is True or not blocking:
return data
time.sleep(0.1)
def release(self):
"""
Release bbs-global lock on database.
"""
import x84.bbs.session
event = 'lock-%s/%s' % (self.schema, self.table)
session = x84.bbs.session.getsession()
return session.send_event(event, ('release', None))
# pylint: disable=C0111
# Missing docstring
def __contains__(self, key):
return self.proxy_method('__contains__', key)
__contains__.__doc__ = dict.__contains__.__doc__
def __getitem__(self, key):
return self.proxy_method('__getitem__', key)
__getitem__.__doc__ = dict.__getitem__.__doc__
def __setitem__(self, key, value):
return self.proxy_method('__setitem__', key, value)
__setitem__.__doc__ = dict.__setitem__.__doc__
def __delitem__(self, key):
return self.proxy_method('__delitem__', key)
__delitem__.__doc__ = dict.__delitem__.__doc__
def get(self, key, default=None):
return self.proxy_method('get', key, default)
get.__doc__ = dict.get.__doc__
def has_key(self, key):
return self.proxy_method('has_key', key)
has_key.__doc__ = dict.has_key.__doc__
def setdefault(self, key, value):
return self.proxy_method('setdefault', key, value)
setdefault.__doc__ = dict.setdefault.__doc__
def update(self, *args):
return self.proxy_method('update', *args)
update.__doc__ = dict.update.__doc__
def __len__(self):
return self.proxy_method('__len__')
__len__.__doc__ = dict.__len__.__doc__
def values(self):
return self.proxy_method('values')
values.__doc__ = dict.values.__doc__
def items(self):
return self.proxy_method('items')
items.__doc__ = dict.items.__doc__
def iteritems(self):
return self.proxy_iter('iteritems')
iteritems.__doc__ = dict.iteritems.__doc__
def iterkeys(self):
return self.proxy_iter('iterkeys')
iterkeys.__doc__ = dict.iterkeys.__doc__
def itervalues(self):
return self.proxy_iter('itervalues')
itervalues.__doc__ = dict.itervalues.__doc__
def keys(self):
return self.proxy_method('keys')
keys.__doc__ = dict.keys.__doc__
def pop(self):
return self.proxy_method('pop')
pop.__doc__ = dict.pop.__doc__
def popitem(self):
return self.proxy_method('popitem')
popitem.__doc__ = dict.popitem.__doc__
| [
"root@5c2973472a80.(none)"
] | root@5c2973472a80.(none) |
234c003b3b7516f29ad666cdb2a012b2e5e63f70 | a90ce0aaa95b712c6b1a7c4fa584254f93d80451 | /1-6-8.py | 491c4a751d6c8962c237e878bf408bad937b28d0 | [] | no_license | vvFell/stepic_course | a82743356144899d6e04ca51df67bbbaf501c91f | e8c662437b7ffb1abb7be4a8e08903184710b59f | refs/heads/master | 2020-09-17T00:04:55.346737 | 2019-11-25T11:04:48 | 2019-11-25T11:04:48 | 223,927,087 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 901 | py | from selenium import webdriver
import time
link = "http://suninjuly.github.io/find_xpath_form"
try:
browser = webdriver.Chrome()
browser.get(link)
input1 = browser.find_element_by_tag_name('input')
input1.send_keys("Ivan")
input2 = browser.find_element_by_name('last_name')
input2.send_keys("Petrov")
input3 = browser.find_element_by_class_name('form-control.city')
input3.send_keys("Smolensk")
input4 = browser.find_element_by_id('country')
input4.send_keys("Russia")
button = browser.find_element_by_xpath("//button[@type='submit']")
button.click()
finally:
# успеваем скопировать код за 30 секунд
time.sleep(30)
# закрываем браузер после всех манипуляций
browser.quit()
# не забываем оставить пустую строку в конце файла
| [
"VSerov@profix.com"
] | VSerov@profix.com |
355606e8e2db99254d740fee5aa6d2a68fae9623 | 08c4c7ed66ba9f2312b914b639c8e63d6a094552 | /shoppingX/asgi.py | 45071adf66eacba11b97fbf427fd7cbef0540f2e | [] | no_license | aman-ash/E---Commerce_Website_Project | c93ab4c314de1de87e98d693515999f5dc8fc969 | 4e5fd2e982b96d9bc111027a87b15381ecc7b687 | refs/heads/master | 2023-07-15T06:33:53.290197 | 2021-08-24T06:44:24 | 2021-08-24T06:44:24 | 382,203,247 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | """
ASGI config for shoppingX project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'shoppingX.settings')
application = get_asgi_application()
| [
"amanmahore.ash@gmail.com"
] | amanmahore.ash@gmail.com |
2a4fbb6eebb89b4e2f3e140179c0dd9d6b1d9665 | b9ab57c699ab64c349098bba8ae3f954cd08b152 | /quora-question-pairs/Model.py | 0616eae7ed7f71ac72faf6cf5a75f06b53036b5d | [] | no_license | tyxr/my_tensorflow_code | c1e24eb3dc628b26ed7aeed92dcc3e0f8143fb0f | 860acaee74a8ca6edcaf0ad3121399a94b79643e | refs/heads/master | 2022-01-10T17:45:30.339149 | 2019-05-08T03:18:24 | 2019-05-08T03:18:24 | 106,514,006 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,393 | py | import tensorflow as tf
from tensorflow.contrib.rnn import LSTMCell, DropoutWrapper
from tensorflow.keras.layers import Embedding,TimeDistributed,Dense,Lambda
from Process_data import print_shape
import numpy as np
class SimpleNetwork():
def __init__(self,num_words,embedding_dim,sequence_length,n_classes,L2,hidden_size,optimizer,learning_rate,clip_value):
#para init
self.num_words = num_words
self.embedding_dim = embedding_dim
self.sequence_length = sequence_length
self.n_classes = n_classes
self.L2 = L2
self.hidden_size = hidden_size
self.optimizer = optimizer
self.learning_rate = learning_rate
self.clip_value = clip_value
self._placeholder_init()
# model operation
self.logits = self._logits_op()
self.loss = self._loss_op()
self.acc = self._acc_op()
self.train = self._training_op()
tf.add_to_collection('train_mini', self.train)
def _placeholder_init(self):
self.q1 = tf.placeholder(tf.float32, [None, self.sequence_length], 'q1')
self.q2 = tf.placeholder(tf.float32, [None, self.sequence_length], 'q2')
self.y = tf.placeholder(tf.float32, None, 'y_true')#这里的1本身应该是类别,我们二分类就输出一个就好了。
#self.embed_matrix = tf.placeholder(tf.float32, [self.num_words+1, self.embedding_dim], 'embed_matrix')
self.embed_matrix = np.load('./data/word_embedding_matrix.npy')
self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
def _logits_op(self):
print(self.q1)
print(self.embed_matrix)
print(self.num_words+1)
q1 = Embedding(self.num_words+1, self.embedding_dim,weights=[self.embed_matrix],input_length=self.sequence_length,trainable=False )(self.q1)
q1 = TimeDistributed(Dense(self.embedding_dim, activation='relu'))(q1)
q1 = Lambda(lambda x: tf.reduce_max(x, axis=1), output_shape=(self.embedding_dim,))(q1)
q2 = Embedding(self.num_words+1, self.embedding_dim,
weights=[self.embed_matrix],input_length=self.sequence_length,
trainable=False )(self.q2)
q2 = TimeDistributed(Dense(self.embedding_dim, activation='relu'))(q2)
q2 = Lambda(lambda x: tf.reduce_max(x, axis=1), output_shape=(self.embedding_dim,))(q2)
features = tf.concat([q1, q2], axis=1)
logits = self._feedForwardBlock(features, self.hidden_size, self.n_classes, 'feed_forward')
return logits
def _loss_op(self):
with tf.name_scope('cost'):
losses = tf.nn.sigmoid_cross_entropy_with_logits(labels=self.y, logits=self.logits)
loss = tf.reduce_mean(losses, name='loss_val')
weights = [v for v in tf.trainable_variables() if ('w' in v.name) or ('kernel' in v.name)]
l2_loss = tf.add_n([tf.nn.l2_loss(w) for w in weights]) * self.L2
loss += l2_loss
return loss
def _acc_op(self):
with tf.name_scope('acc'):
label_pred = self.logits
label_true = self.y
correct_pred = tf.equal(tf.round(label_pred), tf.round(label_true))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name='Accuracy')
return accuracy
def _feedForwardBlock(self, inputs, hidden_dims, num_units, scope):
"""
:param inputs: tensor with shape (batch_size, 4 * 2 * hidden_size)
:param scope: scope name
:return: output: tensor with shape (batch_size, num_units)
"""
with tf.variable_scope(scope):
initializer = tf.random_normal_initializer(0.0, 0.1)
with tf.variable_scope('feed_foward_layer1'):
inputs = tf.nn.dropout(inputs, self.dropout_keep_prob)
outputs = tf.layers.dense(inputs, hidden_dims, tf.nn.relu, kernel_initializer = initializer)
with tf.variable_scope('feed_foward_layer2'):
outputs = tf.nn.dropout(outputs, self.dropout_keep_prob)
results = tf.layers.dense(outputs, 1, tf.nn.sigmoid, kernel_initializer = initializer)
return results
def _training_op(self):
with tf.name_scope('training'):
if self.optimizer == 'adam':
optimizer = tf.train.AdamOptimizer(self.learning_rate)
elif self.optimizer == 'rmsprop':
optimizer = tf.train.RMSPropOptimizer(self.learning_rate)
elif self.optimizer == 'momentum':
optimizer = tf.train.MomentumOptimizer(self.learning_rate, momentum=0.9)
elif self.optimizer == 'sgd':
optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)
elif self.optimizer == 'adadelta':
optimizer = tf.train.AdadeltaOptimizer(self.learning_rate)
elif self.optimizer == 'adagrad':
optimizer = tf.train.AdagradOptimizer(self.learning_rate)
else:
ValueError('Unknown optimizer : {0}'.format(self.optimizer))
gradients, v = zip(*optimizer.compute_gradients(self.loss))
if self.clip_value is not None:
gradients, _ = tf.clip_by_global_norm(gradients, self.clip_value)
train_op = optimizer.apply_gradients(zip(gradients, v))
return train_op | [
"xingkai@icarbonx.com"
] | xingkai@icarbonx.com |
b02ec88d3c3a9fda595bfaded4054a2a7b2b905d | a1ff3c1e1f633f8c793c69214bf70ed6321891f2 | /frequencyAnalyzer.py | b163360268f399de2fb79d30583ca65f3292d0bb | [] | no_license | yanone/frequencyAnalyzer | 6f857a72a5e3a2b64b01a99816dd6ac33c3580be | 0312f21a0662db3524382f80b88dea28d437cd65 | refs/heads/master | 2021-09-23T03:26:46.349196 | 2018-09-20T08:39:43 | 2018-09-20T08:39:43 | 115,872,586 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,520 | py |
import plistlib
import threading
import pyaudio
import numpy as np
import wave
import audioop
import time, os
import math
import json
import wx
from ynlib.maths import Interpolate
#from pysine import sine
###############################################################
frequencies = []
interpolatedFrequencies = []
volumes = {}
clipping = {}
intermediateSteps = 3
minimumVolume = None
maximumVolume = None
averageVolume = 0
currentVolume = 0
peakVolume = 80
volumeScope = 120.0
###############################################################
# output
out = pyaudio.PyAudio()
_volume = 1.0 # range [0.0, 1.0]
fs = 44100 # sampling rate, Hz, must be integer
duration = 0.10 # in seconds, may be float
# for paFloat32 sample values must be in range [-1.0, 1.0]
# input
CHUNK = 4096
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 44100
###############################################################
def CleanFloat(number, locale = 'en'):
"""\
Return number without decimal points if .0, otherwise with .x)
"""
try:
if number % 1 == 0:
return str(int(number))
else:
return str(float(number))
except:
pass
###############################################################
outputStream = out.open(format=pyaudio.paFloat32,
channels=CHANNELS,
rate=RATE,
output=True,
frames_per_buffer=2048,
)
class AppKitNSUserDefaults(object):
def __init__(self, name = None):
from AppKit import NSUserDefaults
if name:
self.defaults = NSUserDefaults.alloc().initWithSuiteName_(name)
else:
self.defaults = NSUserDefaults.standardUserDefaults()
def get(self, key):
if self.defaults.objectForKey_(key):
return json.loads(self.defaults.objectForKey_(key))
def set(self, key, value):
self.defaults.setObject_forKey_(json.dumps(value), key)
def remove(self, key):
self.defaults.removeObjectForKey_(key)
def play(f, thread):
# generate samples, note conversion to float32 array
# samples = (np.sin(2*np.pi*np.arange(fs*duration*2.0)*f/fs)).astype(np.float32)
samples = (np.sin(2*np.pi*np.arange(fs*duration)*f/fs)).astype(np.float32).tobytes()
# print samples
# def sine_wave(frequency=440.0, framerate=RATE, amplitude=0.5):
# amplitude = max(min(amplitude, 1), 0)
# return (float(amplitude) * math.sin(2.0*math.pi*float(frequency)*(float(i)/float(framerate))) for i in count(0))
# samples = (f, RATE)
# print samples
def sine_wave(frequency=440.0, framerate=44100, amplitude=0.5, duration = 1.0):
period = int(framerate / frequency)
if amplitude > 1.0: amplitude = 1.0
if amplitude < 0.0: amplitude = 0.0
lookup_table = [float(amplitude) * math.sin(2.0*math.pi*float(frequency)*(float(i%period)/float(framerate))) for i in range(period)]
return lookup_table(lookup_table[i%period] for i in range(period))
#eq
# samples = sine_wave(f, RATE, .5, .1)
# print samples
# ramp = int(len(samples) * .1)
# # print samples[-ramp:]
# for i in range(ramp):
# samples[i] = Interpolate(0, samples[i], i/float(ramp))
# samples[-i-1] = Interpolate(0, samples[-i-1], i/float(ramp))
# print samples[-ramp:]
# play. May repeat with different volume values (if done interactively)
outputStream.write(samples)
def volume(f, thread):
# print f
global averageVolume, clipping, currentVolume
time.sleep(max(0, duration / 2.0 ))
input = pyaudio.PyAudio()
inputStream = input.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
_max = 0
values = []
for i in range(0, int(RATE / CHUNK * max(.1, duration * .2))):
data = inputStream.read(CHUNK)
rms = audioop.rms(data, 2) # here's where you calculate the volume
values.append(rms)
_max = max(_max, rms)
# for i in range(10): #to it a few times just to see
# data = np.fromstring(inputStream.read(CHUNK),dtype=np.int16)
# data = data * np.hanning(len(data)) # smooth the FFT by windowing data
# fft = abs(np.fft.fft(data).real)
# fft = fft[:int(len(fft)/2)] # keep only first half
# freq = np.fft.fftfreq(CHUNK,1.0/RATE)
# freq = freq[:int(len(freq)/2)] # keep only first half
# freqPeak = freq[np.where(fft==np.max(fft))[0][0]]+1
# print(f, "peak frequency: %d Hz"%freqPeak)
# value = 0
value = sum(values) / float(len(values))
# print(value)
# dB(A)
value = 20 * math.log10(value) + 2.0
if value > peakVolume:
clipping[f] = True
else:
clipping[f] = False
currentVolume = value
# print value
# print f, _max
volumes[f] = value
averageVolume = sum(volumes.values())/ float(len(list(volumes.values())))
thread.frame._max = max(thread.frame._max, value)
# thread.frame.Refresh()
# print volumes
# print min(volumes.values()), max(volumes.values())
inputStream.stop_stream()
inputStream.close()
input.terminate()
class Record(threading.Thread):
def __init__(self, frame):
threading.Thread.__init__(self)
self.frame = frame
def run(self):
while self.frame.alive:
for f in interpolatedFrequencies:
if self.frame.playing:
self.frame.currentFrequency = f
self.frame.Refresh()
p = threading.Thread(target=play, args=(f, self,))
p.start()
v = threading.Thread(target=volume, args=(f, self,))
v.start()
time.sleep(duration)
# for f in interpolatedFrequencies:
# print '%s: %s' % (f, volumes[f])
time.sleep(duration)
class Example(wx.Frame):
def __init__(self, parent, title):
super(Example, self).__init__(parent, title=title,
size=(1000, 600))
self.preferences = AppKitNSUserDefaults('de.yanone.frequencyAnalyzer')
self.alive = True
self._max = 0
self.currentFrequency = None
self.playing = False
self.recorder = Record(self)
self.recorder.start()
self.deviceButton = wx.Button(self, -1, "Device")
self.deviceButton.Bind(wx.EVT_BUTTON, self.OnDevice)
self.playButton = wx.Button(self, -1, "Play")
self.playButton.Bind(wx.EVT_BUTTON, self.OnPlay)
self.stopButton = wx.Button(self, -1, "Stop")
self.stopButton.Bind(wx.EVT_BUTTON, self.OnStop)
self.Centre()
self.Bind(wx.EVT_CLOSE, self.OnClose)
self.Bind(wx.EVT_PAINT, self.OnPaint)
dc = wx.ClientDC(self)
dc.DrawLine(50, 60, 190, 60)
if self.preferences.get('deviceFile'):
self.openDeviceFile(self.preferences.get('deviceFile'))
def OnPaint(self, event=None):
dc = wx.PaintDC(self)
dc.Clear()
dc.SetPen(wx.Pen(wx.BLACK, 4))
size = dc.GetSize()
dc.SetBackground(wx.Brush(wx.Colour(30,30,30)))
dc.Clear()
# dc.DrawRectangle(0, 0, size[0], size[1])
marginHorizontal = max(size[0] * .05, 100)
marginTop = max(size[1] * .1, 100)
marginBottom = max(size[1] * .2, 200)
left = marginHorizontal
right = size[0] - marginHorizontal - 100
top = marginTop
bottom = size[1] - marginBottom
height = max(1, (bottom - top))
width = max(1, (right - left))
colour = wx.Colour(223,219,0)
colour = wx.Colour(223,219,0)
activeColour = wx.Colour(229,53,45)
fontSize = max(width / 80.0, 10)
# dB(A) label
font = wx.Font(fontSize, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL)
dc.SetTextForeground(colour)
dc.SetFont(font)
dc.DrawLabel('dB(A)', wx.Rect(left - 50, top + height / 2.0, 40, 20), wx.ALIGN_RIGHT)
# Volume
pen=wx.Pen(colour ,4)
dc.SetPen(pen)
y = bottom - height * currentVolume / volumeScope
dc.DrawLine(right + 85, bottom, right + 85, y)
pen=wx.Pen(activeColour ,4)
dc.SetPen(pen)
dc.DrawLine(right + 70, top, right + 100, top)
dc.DrawLine(right + 100, top, right + 100, bottom)
dc.DrawLine(right + 70, bottom, right + 100, bottom)
y = bottom - height * peakVolume / volumeScope
dc.DrawLine(right + 70, y, right + 100, y)
if frequencies:
_min = min(volumes.values())
_max = max(volumes.values())
# self._max = max(_max, self._max)
if self._max:
factor = float(height) / float(self._max)
else:
factor = 1
factor = height / volumeScope
# print 'max', self._max, 'height', height, 'factor', factor
# print factor
# Peak
y = bottom - peakVolume * factor
pen=wx.Pen(activeColour ,4)
dc.SetPen(pen)
dc.DrawLine(left, y, right, y)
for i, f in enumerate(interpolatedFrequencies):
x = left + i * (right - left) / float(len(interpolatedFrequencies) - 1)
# Average
x2 = left + (i+1) * (right - left) / float(len(interpolatedFrequencies) - 1)
if f <= 80:
volumeAdjust = 12
elif f <= 100:
volumeAdjust = 9
elif f <= 125:
volumeAdjust = 6
elif f <= 160:
volumeAdjust = 3
else:
volumeAdjust = 0
y = bottom - (averageVolume + volumeAdjust) * factor
pen=wx.Pen(wx.Colour(125,125,125) ,4)
dc.SetPen(pen)
dc.DrawLine(x, y, x2, y)
# Grid
if f in frequencies:
if f in clipping and clipping[f] == True:
pen=wx.Pen(colour ,4)
else:
pen=wx.Pen(activeColour ,4)
dc.SetPen(pen)
dc.DrawLine(x, top, x, bottom)
# connecting lines
pointPosition = (x, bottom - volumes[f] * factor)
if i > 0:
previousPointPosition = (left + (i-1) * (right - left) / float(len(interpolatedFrequencies) - 1), bottom - volumes[interpolatedFrequencies[i-1]] * factor)
pen=wx.Pen(colour ,4)
dc.SetPen(pen)
dc.DrawLine(pointPosition[0], pointPosition[1], previousPointPosition[0], previousPointPosition[1])
if f in frequencies:
font = wx.Font(fontSize, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL)
dc.SetTextForeground(colour)
dc.SetFont(font)
text = str(f)
if f >= 1000:
text = '%sk' % (f // 1000)
if f % 1000:
text += CleanFloat(f % 1000 / 100)
dc.DrawLabel(text, wx.Rect(x - 20, bottom + max(20, size[0] / 75.0), 40, 20), wx.ALIGN_CENTER)
if f in frequencies:
dc.SetPen(wx.Pen(colour ,0))
dc.SetBrush(wx.Brush(colour))
pointSize = float(width) / float(len(frequencies) - 1) * .2
dc.DrawCircle(pointPosition[0], pointPosition[1], pointSize)
self.deviceButton.SetPosition((marginHorizontal, size[1] - marginBottom + max(100, size[0] / 15.0)))
self.playButton.SetPosition((marginHorizontal + 100, size[1] - marginBottom + max(100, size[0] / 15.0)))
self.stopButton.SetPosition((marginHorizontal + 200, size[1] - marginBottom + max(100, size[0] / 15.0)))
font = wx.Font(12, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL)
dc.SetTextForeground(colour)
dc.SetFont(font)
dc.DrawLabel('Input: %s, Output: %s' % (out.get_default_input_device_info()['name'], out.get_default_output_device_info()['name']), wx.Rect(marginHorizontal + 310, size[1] - marginBottom + 2 + max(100, size[0] / 15.0), 200, 100))
def OnClose(self, event):
outputStream.stop_stream()
outputStream.close()
out.terminate()
self.playing = False
self.alive = False
# self.recorder.join(1)
self.Destroy()
# exit()
def OnDevice(self, event):
# otherwise ask the user what new file to open
with wx.FileDialog(self, "Open EQ .plist file", wildcard="plist files (*.plist)|*.plist",
style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST) as fileDialog:
if fileDialog.ShowModal() == wx.ID_CANCEL:
return # the user changed their mind
# Proceed loading the file chosen by the user
pathname = fileDialog.GetPath()
self.openDeviceFile(pathname)
def openDeviceFile(self, pathname):
global frequencies, interpolatedFrequencies, volumes, averageVolume, clipping
try:
frequencies = plistlib.readPlist(pathname)
self.SetTitle(os.path.basename(os.path.splitext(pathname)[0]))
interpolatedFrequencies = []
for i, f in enumerate(frequencies):
if i > 0 and intermediateSteps > 0:
for s in range(intermediateSteps):
interpolatedFrequencies.append(Interpolate(frequencies[i-1], f, (s + 1)/float(intermediateSteps + 1)))
interpolatedFrequencies.append(f)
volumes = {}
clipping = {}
averageVolume = 0
for f in interpolatedFrequencies:
volumes[f] = 0
self.Refresh()
self.preferences.set('deviceFile', pathname)
except IOError:
wx.LogError("Cannot open file '%s'." % pathname)
def OnPlay(self, event):
if frequencies:
self.playing = True
def OnStop(self, event):
if frequencies:
self.currentFrequency = None
self.Refresh()
self.playing = False
def DrawLine(self):
dc = wx.ClientDC(self)
dc.DrawLine(50, 60, 190, 60)
if __name__ == '__main__':
app = wx.App()
e = Example(None, 'Frequency Response')
e.DrawLine()
e.Show()
# e.play()
e.DrawLine()
app.MainLoop()
| [
"post@yanone.de"
] | post@yanone.de |
646357d0513992ba17b94e2a0ec1f976826b4817 | f16a17a6faa673007f69b9892c4558e11958c55d | /cmddocs/articles.py | 477cd909cf357279d062fd6df3261881a72a0240 | [
"MIT"
] | permissive | TaurusOlson/cmddocs | 6b707110774d9a7f7449e08c32846e8b263d4b48 | 7c84a7ba32b21c7a3d15304c4590f6eab56c572a | refs/heads/master | 2021-01-20T22:50:51.512550 | 2015-01-08T11:33:26 | 2015-01-08T11:33:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,952 | py | import os
import re
import tempfile
import subprocess
# Function definitions
def list_articles(dir):
"lists all articles in current dir and below"
subprocess.call(["tree", dir])
def list_directories(dir):
"lists all directories in current dir and below"
subprocess.call(["tree", "-d", dir])
def change_directory(dir,datadir):
"changes directory"
d = os.path.join(os.getcwd(),dir)
# dont cd out of datadir
if not datadir in d:
d = datadir
# if empty, switch to datadir
if not dir:
d = datadir
# switch to dir
try:
os.chdir(d)
return d
except OSError:
print("Directory %s not found" % dir)
def edit_article(article, directory, editor, repo, default_commit_msg):
"""edit an article within your docs"""
# set paths
a = os.path.join(directory, article)
d = os.path.dirname(a)
# create dir(s)
if not os.path.isdir(d):
os.makedirs(d)
# start editor
try:
subprocess.call([editor, a])
except OSError:
print "'%s' No such file or directory" % editor
# commit into git
try:
repo.git.add(a)
if repo.is_dirty():
msg = raw_input("Commit message: ")
if not msg:
msg = default_commit_msg
repo.git.commit(m=msg)
else:
print "Nothing to commit"
except:
pass
def view_article(article,dir,pager):
"view an article within your docs"
a = os.path.join(dir,article)
# read original file
try:
article = open(a, "r")
except IOError:
print "Error: Could not find %s" % article
return
content = article.read()
article.close()
# create tmp file and convert markdown to ansi
with tempfile.NamedTemporaryFile(delete=False) as tmp:
h = re.compile('^#{3,5}\s*(.*)\ *$',re.MULTILINE)
content = h.sub('\033[1m\033[37m\\1\033[0m', content)
h = re.compile('^#{1,2}\s*(.*)\ *$',re.MULTILINE)
content = h.sub('\033[4m\033[1m\033[37m\\1\033[0m', content)
h = re.compile('^\ {4}(.*)',re.MULTILINE)
content = h.sub('\033[92m\\1\033[0m', content)
h = re.compile('~~~\s*([^~]*)~~~[^\n]*\n',re.DOTALL)
content = h.sub('\033[92m\\1\033[0m', content)
tmp.write(content)
# start pager and cleanup tmp file afterwards
# -fr is needed for showing binary+ansi colored files to
# be properly displayed
try:
subprocess.call([pager, "-fr", tmp.name])
except OSError:
print "'%s' No such file or directory" % pager
try:
os.remove(tmp.name)
except OSError:
print "Error: Could not remove %s" % tmp.name
def delete_article(article,dir,repo):
"delete an article"
a = os.path.join(dir,article)
try:
repo.git.rm(a)
repo.git.commit(m="%s deleted" % article)
print("%s deleted" % article)
except:
if os.path.isdir(a):
try:
os.rmdir(a)
print("Removed directory %s which was not under version control" % a)
except OSError:
print("Could not remove %s - its maybe not empty" % a)
else:
try:
os.remove(a)
print("Removed file %s which was not under version control" % a)
except OSError:
print("File %s could not be removed" % a)
return
def move_article(dir,args,repo):
"move an article from source to destination"
args = args.split()
if len(args)!=2:
print "Invalid usage\nUse: mv source dest"
return
a = os.path.join(dir,args[0])
e = os.path.join(dir,args[1])
d = os.path.dirname(e)
# create dir(s)
if not os.path.isdir(d):
os.makedirs(d)
# move file in git and commit
repo.git.mv(a,e)
repo.git.commit(m="Moved %s to %s" % (a,e))
print("Moved %s to %s" % (a,e))
def search_article(keyword, directory, datadir, exclude):
"""
search for a keyword in every article within your current directory and
below. Much like recursive grep.
"""
c = 0
r = re.compile(keyword)
for dirpath, dirs, files in os.walk(directory):
dirs[:] = [d for d in dirs if d not in exclude]
for fname in files:
path = os.path.join(dirpath, fname)
f = open(path, "rt")
for i, line in enumerate(f):
if r.search(line):
c = c + 1
print "* \033[92m%s\033[39m: %s" % (os.path.relpath(path, datadir),
line.rstrip('\n'))
return "Results: %s" % c
def show_log(args,repo):
"""
Show latest git logs with specified number of entries and maybe for a
specific file.
"""
args = args.split()
format="format:%C(blue)%h %Cgreen%C(bold)%ad %Creset%s"
dateformat="short"
if len(args) >= 1:
if os.path.isfile(os.path.join(os.getcwd(), args[0])):
file = args[0]
try:
count = args[1]
print "Last %s commits for %s" % (count, file)
print repo.git.log(file, pretty=format, n=count, date=dateformat)
except IndexError:
count = 10
print "Last %s commits for %s" % (count, file)
print repo.git.log(file, pretty=format, n=count, date=dateformat)
else:
count = args[0]
try:
file = args[1]
print "Last %s commits for %s" % (count, file)
print repo.git.log(file, pretty=format, n=count, date=dateformat)
except IndexError:
print "Last %s commits" % count
print repo.git.log(pretty=format, n=count, date=dateformat)
elif len(args) == 0:
count = 10
print "Last %s commits" % count
print repo.git.log(pretty=format, n=count,date=dateformat)
| [
"flo@noqqe.de"
] | flo@noqqe.de |
c8d1f101c814d254035989eb2539d01b9c470f46 | 5b7413d0de8f2dc34df372489d233f59b12a76c6 | /2.3.4.py | e4a37510affe30a6cde8198dd5b085c0fdf4c18e | [] | no_license | vekhnyk/stepik---auto-tests-course | cd1e9ce54f242a49b1b4a7b6fe9a25c99866dd07 | 3508794aa955c58c79fd31d1412faec5dbd2a2cc | refs/heads/master | 2022-08-21T20:39:31.324410 | 2020-05-11T07:46:17 | 2020-05-11T07:46:17 | 262,959,713 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 569 | py | from selenium import webdriver
from time import sleep
import math
link = 'http://suninjuly.github.io/alert_accept.html'
try:
browser = webdriver.Chrome()
browser.get(link)
browser.find_element_by_class_name('btn-primary').click()
confirm = browser.switch_to.alert
confirm.accept()
x = browser.find_element_by_id('input_value').text
rez = str(math.log(abs(12*math.sin(int(x)))))
browser.find_element_by_id('answer').send_keys(rez)
browser.find_element_by_class_name('btn-primary').click()
finally:
sleep(15)
browser.quit()
| [
"vekhnyk@gmail.com"
] | vekhnyk@gmail.com |
26f2e95b46c06356ab3fa1f80b77aab1536140f8 | 67e1ccd069d6194158d32961c6a1f0fa49a0681f | /01_intro/recombination.py | 6413cf48e0cd0ee4a97fad5584378a820d17259f | [
"Unlicense"
] | permissive | arantzardzm/software-art-text | a9a5e8e601876334049d484b2ef238ae233d47c5 | 2af98427892126ee00b1de12f7fd60b061386247 | refs/heads/master | 2021-07-21T16:17:12.191445 | 2017-10-31T09:20:59 | 2017-10-31T09:20:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 917 | py | import sys
import random
from time import sleep
actor_adjectives = ['imperceptible', 'defiant', 'howling', 'subtle', 'eerie', 'cute']
actor = ['feline', 'cat', 'purr', 'claws', 'friend', 'crawler']
act = ['jumps', 'caresses', 'envelops', 'touches', 'reaches into', 'pets']
victim_adjectives = ['frightened', 'foolish', 'eager', 'loud', 'quiet', 'unconscious', 'wide-eyed']
victim = ['shadow', 'shiver', 'glimpse', 'crawl', 'sinner', 'saviour', 'hand', 'smile', 'cry']
while True:
print "the %s %s %s the %s." % (random.choice(actor_adjectives), random.choice(actor), random.choice(act), random.choice(victim_adjectives)),
conclusion = random.random()
if conclusion > 0.8:
print "there is a %s." % random.choice(victim)
exit()
elif conclusion > 0.6:
print "there is no %s." % random.choice(victim)
exit()
else:
print ""
sys.stdout.flush()
sleep(3)
| [
"pierre.depaz@gmail.com"
] | pierre.depaz@gmail.com |
c895843d8e49416fd67fe7412ef83108b18815d3 | 1e12dc07222de77f9344b8c260d7b848a76d8d5e | /word_cloud.py | 70771c0f40ffd11a3a814b39c076130f1220d916 | [] | no_license | ljyw17/douban_movie_analysis | 4dd61f3a090189ccd411844cae929f12e81b005c | 8d520f582b19f8d85add8ad72540a27db98bba60 | refs/heads/master | 2020-07-11T03:38:22.936473 | 2019-08-26T12:42:42 | 2019-08-26T12:42:42 | 204,436,858 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 733 | py | from wordcloud import WordCloud
import PIL .Image as image
import numpy as np
import jieba, pandas
def trans_CN(text):
word_list = jieba.cut(text)
# 分词后在单独个体之间加上空格
result = " ".join(word_list)
return result
if __name__=="__main__":
file_path = r"Pure Hearts Into Chinese Showbiz.xlsx"
df = pandas.read_excel(file_path, header=0)
text = ""
for i in range(len(df)):
text += df["short"][i]
text = trans_CN(text)
wordcloud = WordCloud(
font_path = r"C:\Windows\Fonts\msyh.ttc"
).generate(text)
image_produce = wordcloud.to_image()
image_produce.show()
image_produce.save("{}.jpg".format(file_path.split(".")[0])) | [
"noreply@github.com"
] | ljyw17.noreply@github.com |
df931b896dc092c4cf234d2b30a65601baa984cb | 06d42cdf96dc12d3582ee2b107c481800eea95ae | /exercises/ex8_8.py | b57df9a6d5477ca300240ac35589a9f63c1a0c20 | [] | no_license | GiaDieu/Python-exercises | 236773137b1156dd9e5c2aef91b8e0ab4ecdb6fb | 81ae81fc91d27c64f4f70922fb4c09cf8d73902d | refs/heads/master | 2020-09-11T18:06:47.769509 | 2019-11-16T19:20:38 | 2019-11-16T19:20:38 | 222,147,357 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,179 | py | #!/usr/bin/env python3
__doc__ = '''
Viết script get_version nhận vào ngày ở format <month>/<day>/<year>.
VD: 03/28/16 làm parameter và in ra một version được tính theo quy luật sau:
- Version ở dạng format: <MAJOR>.<MINOR>.<PATCH>, vd: "6.9.2"
- Từ ngày 09 tháng 02 năm 2016, phiên bản bắt đầu là "1.0.0"
- Mỗi 28 ngày, MAJOR lại tăng thêm 1, MINOR và PATCH set về 0
- Mỗi tuần, MINOR tăng thêm 1 và PATCH sẽ set về 0
- Cứ mỗi ngày, PATCH lại tăng thêm 1.
Yêu cầu:
- Kiểm tra version thu được với lần lượt các input là "02/03/16", "09/06/16"
với thởi điểm cuối là "06/23/17"
Gợi ý: học viên sử dụng `sys.argv` hoặc module `argparse`
'''
import sys
import datetime
def get_version(input_data):
'''Trả về tên phiên bản như yêu cầu tại ``__doc__``
:param input_data: ngày format ở dạng <month>/<day>/<year>,
ví dụ: "02/03/16"
:rtype str:
'''
# Sửa tên và function cho phù hợp, trả về kết quả yêu cầu.
result = None
start_time = datetime.datetime.strptime("02/09/16","%m/%d/%y")
delta = datetime.datetime.strptime(input_data,"%m/%d/%y") - start_time
days = delta.days
MAJOR = days // 28 + 1
MINOR = (days % 28) // 7
PATCH = days % 7
result = "{}.{}.{}".format(MAJOR, MINOR, PATCH)
return result
def solve(input_data):
'''Function `solve` dùng để `test`, học viên không cần chỉnh sửa gì thêm
Chỉ thay đổi lại tên function của mình bên dưới cho phù hợp
Gía trị trả về của hàm `solve` và `your_function` là như nhau
:rtype str:
'''
result = get_version(input_data)
return result
def main():
input_data = sys.argv[1]
print(solve(input_data))
# sử dụng `sys.argv` hoặc `argparse` để gán gía trị yêu cầu
# vào biến `input_data`
# Xoá dòng sau và viết code vào đây set các giá trị phù hợp
# raise NotImplementedError("Học viên chưa thực hiện truyền input_data")
if __name__ == "__main__":
main()
| [
"giadieuly@gmail.com"
] | giadieuly@gmail.com |
64cddf5250ac60f94ef5c62aedfa3eb120d3e5f8 | 8ca70628ca811e08fb77b8e251fc8e5049486a65 | /airbyte-integrations/bases/base-python/base_python/cdk/streams/exceptions.py | 6727216dd5dd50496241a0890070cb87439e8f82 | [
"MIT"
] | permissive | Tana8M/airbyte | a19544d2f7997ec7551793f7077d3e02bfe6ac84 | 49296ef657be272684c7259ed0d6be06e574dbe1 | refs/heads/master | 2023-04-15T15:04:22.849307 | 2021-04-23T23:12:55 | 2021-04-23T23:12:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,825 | py | """
MIT License
Copyright (c) 2020 Airbyte
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from typing import Union
import requests
class BaseBackoffException(requests.exceptions.HTTPError):
pass
class UserDefinedBackoffException(BaseBackoffException):
"""
An exception that exposes how long it attempted to backoff
"""
def __init__(self, backoff: Union[int, float], request: requests.PreparedRequest, response: requests.Response):
"""
:param backoff: how long to backoff in seconds
:param request: the request that triggered this backoff exception
:param response: the response that triggered the backoff exception
"""
self.backoff = backoff
super().__init__(request=request, response=response)
class DefaultBackoffException(BaseBackoffException):
pass
| [
"noreply@github.com"
] | Tana8M.noreply@github.com |
d600fb5bea2fd528892cf24f66259ab17cd92938 | 98dbbcd09777a3071c4decefff8b19d477f225a1 | /plot_functions.py | e532e7d0e6c7f802fb7e4a06c37470423110bd63 | [] | no_license | ChristianMorup/RL_Pong | bbcc54f20f44be68ffb80b8257af067755a87476 | 8304df69ef0cabbb4b12b65a1b547fc32eb49b8d | refs/heads/main | 2023-01-23T10:03:09.283135 | 2020-12-01T18:01:38 | 2020-12-01T18:01:38 | 313,404,719 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,335 | py | import numpy as np
import matplotlib.pyplot as plt
from file_util import FileUtil
from modes_and_types import *
apply_filters = True
#TODO:
## Proportional to the distance
## Font and width of figures is too small
## Put labels on top of the figure
## Number of missed balls
## Invert the scores
#reward_types = all_rewards
#modes = all_modes
modes = [Mode.BOTH]
reward_types = [RewardType.TRACKING_PROPORTIONAL_UNIDIRECTIONAL, RewardType.TRACKING_PROPORTIONAL_UNIDIRECTIONAL_WEIGHTED]
def moving_average(a, n=3):
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
def map_reward_type_to_title(reward):
if reward == RewardType.TRACKING:
title = 'Simple tracking '
elif reward == RewardType.HITTING:
title = 'Hitting '
elif reward == RewardType.TRACKING_AND_HITTING:
title = 'Simple tracking + Hitting '
elif reward == RewardType.TRACKING_PROPORTIONAL:
title = 'Tracking \n(Proportional to the distance - Unidirectional) '
elif reward == RewardType.TRACKING_PROPORTIONAL_UNIDIRECTIONAL:
title = 'Tracking \n(Proportional to the distance - Both directions) '
elif reward == RewardType.TRACKING_PROPORTIONAL_UNIDIRECTIONAL_WEIGHTED:
title = 'Tracking \n(Weighted and proportional to the distance - Unidirectional)'
elif reward == RewardType.TRACKING_STEADY_NERVES:
title = 'Steady nerves'
else:
title = ''
return 'Cumulative reward - ' + title, 'Score - ' + title
def filter_by_picking_less_values(data):
x = []
y = []
for i in range(len(data)):
if i % 5 == 0:
x = np.append(x, data[i])
y = np.append(y, i)
return x, y
def summarize_simulations(score_data, reward_data, n_simulations=15):
score_sum = []
reward_sum = []
# lowest_reward = find_lowest_first_reward_score(reward_data, n_simulations)
for column in range(1000):
score_total = 0
reward_total = 0
for row in range(n_simulations):
score_total += (100 - score_data[row][column])
reward_total += reward_data[row][column]
#reward_total += normalize_reward_score(lowest_reward, reward_data[row][column])
new_score_value = score_total // n_simulations
new_reward_value = reward_total // n_simulations
score_sum = np.append(score_sum, new_score_value)
reward_sum = np.append(reward_sum, new_reward_value)
return filter(score_sum, reward_sum)
def summarize_reward_simulations(reward_data, n_simulations=15):
reward_sum = []
# lowest_reward = find_lowest_first_reward_score(reward_data, n_simulations)
for column in range(1000):
reward_total = 0
for row in range(n_simulations):
reward_total += reward_data[row][column]
# reward_total += normalize_reward_score(lowest_reward, reward_data[row][column])
new_reward_value = reward_total // n_simulations
reward_sum = np.append(reward_sum, new_reward_value)
reward_sum = moving_average(reward_sum, 3)
reward_sum, reward_episodes = filter_by_picking_less_values(reward_sum)
return reward_sum, reward_episodes
def find_lowest_first_reward_score(data, n_simulations=15):
lowest_reward = 0
for row in range(n_simulations):
if lowest_reward > data[row][0]:
lowest_reward = data[row][0]
return lowest_reward
def normalize_reward_score(lowest_reward, value):
return value - lowest_reward
def filter(score_sum, reward_sum):
if apply_filters:
score_sum = moving_average(score_sum, 3)
reward_sum = moving_average(reward_sum, 3)
score_sum, score_episodes = filter_by_picking_less_values(score_sum)
reward_sum, reward_episodes = filter_by_picking_less_values(reward_sum)
else:
score_episodes = range(1000)
reward_episodes = range(1000)
return score_sum, score_episodes, reward_sum, reward_episodes
def plot_simulations(folder_path, n_modes=3, n_simulations=10):
for reward_type in reward_types:
scores = []
rewards = []
pos_rewards = []
neg_rewards = []
episodes = []
for mode in modes:
file_util = FileUtil(folder_path)
s_data, r_data, r_pos_data, r_neg_data = file_util.read_files(reward_type, mode)
s_sum, episodes, r_sum, r_episodes = summarize_simulations(s_data, r_data, n_simulations=n_simulations)
scores = np.append(scores, s_sum)
rewards = np.append(rewards, r_sum)
if len(r_pos_data) > 0:
r_pos_sum, pos_episodes = summarize_reward_simulations(r_pos_data, n_simulations=n_simulations)
pos_rewards = np.append(pos_rewards, r_pos_sum)
if len(r_neg_data) > 0:
r_neg_sum, neg_episodes = summarize_reward_simulations(r_neg_data, n_simulations=n_simulations)
neg_rewards = np.append(neg_rewards, r_neg_sum)
if n_modes > 0:
shape = (n_modes, len(episodes))
scores = scores.reshape(shape)
rewards = rewards.reshape(shape)
if len(pos_rewards) > 0:
shape = (n_modes-1, len(episodes))
pos_rewards = pos_rewards.reshape(shape)
if len(neg_rewards) > 0:
shape = (n_modes-1, len(episodes))
neg_rewards = neg_rewards.reshape(shape)
reward_title, score_title = map_reward_type_to_title(reward_type) # create_title(reward_type, mode)
plt.clf()
# plt.plot(episodes, scores, label="Both")
plt.plot(episodes, scores[0], label="Both")
if n_modes >= 2:
plt.plot(episodes, scores[1], label="Negative")
if n_modes >= 3:
plt.plot(episodes, scores[2], label="Positive")
plt.legend(loc='upper center', bbox_to_anchor=(0.5, 1.0), ncol=3, fancybox=True, shadow=False)
plt.xlim(xmin=0, xmax=1000)
plt.ylim(ymin=0, ymax=100)
plt.ylabel('n Hits (max=100)')
plt.xlabel('Episodes')
plt.title(score_title)
plt.show()
plt.clf()
# plt.plot(episodes, rewards, label="Both")
plt.plot(episodes, rewards[0], label="Both")
if n_modes >= 2:
plt.plot(episodes, rewards[1], label="Negative")
if n_modes >= 3:
plt.plot(episodes, rewards[2], label="Positive")
plt.legend(loc='upper center', bbox_to_anchor=(0.5, 1.0), ncol=3, fancybox=True, shadow=False)
plt.xlim(xmin=0, xmax=1000)
plt.ylabel('Cumulative reward')
plt.xlabel('Episodes')
plt.title(reward_title)
plt.show()
if len(pos_rewards) > 0:
plt.clf()
# plt.plot(episodes, rewards, label="Both")
if n_modes - 1 >= 1:
plt.plot(episodes, pos_rewards[0], label="Both")
if n_modes - 1 >= 2:
plt.plot(episodes, pos_rewards[1], label="Positive")
plt.legend(loc='upper center', bbox_to_anchor=(0.5, 1.0), ncol=3, fancybox=True, shadow=False)
plt.xlim(xmin=0, xmax=1000)
plt.ylabel('Cumulative positive reward')
plt.xlabel('Episodes')
plt.title(reward_title)
plt.show()
if len(neg_rewards) > 0:
plt.clf()
# plt.plot(episodes, rewards, label="Both")
if n_modes - 1 >= 1:
plt.plot(episodes, neg_rewards[0], label="Both")
if n_modes - 1 >= 2:
plt.plot(episodes, neg_rewards[1].flatten(), label="Negative")
plt.legend(loc='upper center', bbox_to_anchor=(0.5, 1.0), ncol=3, fancybox=True, shadow=False)
plt.xlim(xmin=0, xmax=1000)
plt.ylabel('Cumulative negative reward')
plt.xlabel('Episodes')
plt.title(reward_title)
plt.show()
def do_simple_plot(score):
plt.clf()
plt.plot(range(len(score)), score, label="0.1")
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
plt.xlim(xmin=0, xmax=400)
plt.ylim(ymin=0)
plt.ylabel('Number of \'back wall\'-hits')
plt.xlabel('Episodes')
plt.title('Test')
plt.show()
plot_simulations('sim_data/test/', n_modes=1, n_simulations=5)
| [
"chrmorup@gmail.com"
] | chrmorup@gmail.com |
be9bfbbae588fe4bff415889569809c937fc371a | c6ac57175975de3353faee0c2f1f796e2cd16a16 | /bot1.py | 4ee830465388df215de35511828cc2758ed29857 | [] | no_license | 3rdmonth/env_pizzabot | 9c37062567dbdd010b40d1eb19de91f96cdd9b9a | ebe8bdad23206514a5f7ac3c6a3fca3b302bde78 | refs/heads/master | 2022-12-14T14:23:47.930610 | 2018-09-29T18:22:03 | 2018-09-29T18:22:03 | 150,890,812 | 0 | 0 | null | 2021-06-01T22:42:00 | 2018-09-29T18:12:21 | Python | UTF-8 | Python | false | false | 689 | py | import requests
url = "https://api.telegram.org/bot645781983:AAHslgjt-LRnVX57f-40hlQwQMBC-oYoQtc/"
def get_updates_json(request):
response = requests.get(request + 'getUpdates')
return response.json()
def last_update(data):
results = data['result']
total_updates = len(results) - 1
return results[total_updates]
def get_chat_id(update):
chat_id = update['message']['chat']['id']
return chat_id
def send_mess(chat, text):
params = {'chat_id': chat, 'text': text}
response = requests.post(url + 'sendMessage', data=params)
return response
chat_id = get_chat_id(last_update(get_updates_json(url)))
send_mess(chat_id, 'Your message goes here')
| [
"vas.viktorov@ya.ru"
] | vas.viktorov@ya.ru |
f73086d0105b3dbd3762dcdd037934533567ee3a | 3f8ef6ce1c878e6aa358714ea3c85a988de8948f | /Evolution Software Engineer/tenth_year.py | c97bbc78b102ba361ad8c494668ff7c884f5cee6 | [] | no_license | gchacaltana/PythonToolkit | 1ae320bb091996a5214fc446345f115cdcb28202 | 05d683fb5170294e95f4d42ed5bbe96135e9538e | refs/heads/master | 2022-03-08T02:47:04.823608 | 2022-02-11T23:11:22 | 2022-02-11T23:11:22 | 24,420,758 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 271 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Programa que imprime en pantalla el mensaje Hola Mundo"""
__author__ = 'Gonzalo Chacaltana'
__email__ = "gchacaltanab@outlook.com"
class HelloWorld(object):
def __init__(self):
print ("Hola Mundo")
obj = HelloWorld() | [
"gchacaltanab@gmail.com"
] | gchacaltanab@gmail.com |
62be2d8d4dbc67fc085acf0750f18d8471c8142f | f008fe1057b00d19d7812e0373669763c9992f42 | /python/utils/hpatch.py | 1047e32c3a097bb0f224c9141aa398b918144cee | [
"BSD-2-Clause"
] | permissive | baiyancheng20/hpatches-benchmark | 68a265f5e23f7ceabce06635818b8c6a73c3e578 | 23c3a6dcf92bff7bda492363908cbc52b2533472 | refs/heads/master | 2021-01-20T00:52:08.069991 | 2017-04-22T19:02:21 | 2017-04-22T19:02:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,448 | py | import cv2
import numpy as np
from glob import glob
from joblib import Parallel, delayed
import multiprocessing
import pandas as pd
import json
import os
import time
import scipy
import copy
# all types of patches
tps = ['ref','e1','e2','e3','e4','e5','h1','h2','h3','h4','h5',\
't1','t2','t3','t4','t5']
def vis_patches(seq,tp,ids):
"""Visualises a set of types and indices for a sequence"""
w = len(tp)*65
vis = np.empty((0, w))
# add the first line with the patch type names
vis_tmp = np.empty((35, 0))
for t in tp:
tp_patch = 255*np.ones((35,65))
cv2.putText(tp_patch,t,(5,25),cv2.FONT_HERSHEY_DUPLEX , 1,0,1)
vis_tmp = np.hstack((vis_tmp,tp_patch))
vis = np.vstack((vis,vis_tmp))
# add the actual patches
for idx in ids:
vis_tmp = np.empty((65, 0))
for t in tp:
vis_tmp = np.hstack((vis_tmp,get_patch(seq,t,idx)))
vis = np.vstack((vis,vis_tmp))
return vis
def get_patch(seq,t,idx):
"""Gets a patch from a sequence with type=t and id=idx"""
return getattr(seq, t)[idx]
def get_im(seq,t):#rename this as a general method
"""Gets a patch from a sequence with type=t and id=idx"""
return getattr(seq, t)
def load_splits(f_splits):
"""Loads the json encoded splits"""
with open(f_splits) as f:
splits = json.load(f)
return splits
def load_descrs(path,dist='L2',descr_type='',sep=','):
"""Loads *all* saved patch descriptors from a root folder"""
print('>> Please wait, loading the descriptor files...')
# get all folders in the descriptor root folder, except the 1st which is '.'
t = [x[0] for x in os.walk(path)][1::]
try:
len(t) == 116
except:
print("%r does not seem like a valid HPatches descriptor root folder." % (path))
seqs_l = Parallel(n_jobs=multiprocessing.cpu_count())\
(delayed(hpatch_descr)(f,name,descr_type,sep) for f in t)
seqs = dict((l.name, l) for l in seqs_l)
seqs['distance'] = dist
seqs['dim'] = seqs_l[0].dim
print('>> Descriptor files loaded.')
return seqs
###############
# PCA Methods #
###############
# TODO add error if no training set - cant do PCA on test. (e.g. the
# full/view/illum split)
def compute_pcapl(descr,split):
X = np.empty((0,descr['dim']))
for seq in split['train']:
X = np.vstack((X,get_im(descr[seq],'ref')))
X -= np.mean(X, axis=0)
Xcov = np.dot(X.T,X)
Xcov = (Xcov + Xcov.T) / (2 * X.shape[0]);
d, V = np.linalg.eigh(Xcov)
vv = np.sort(d)
cl = vv[int(0.6*len(vv))]
d[d<=cl]=cl
D = np.diag(1. / np.sqrt(d))
W = np.dot(np.dot(V, D), V.T)
for seq in split['test']:
print(seq)
for t in tps:
X = get_im(descr[seq],t)
X -= np.mean(X, axis=0)
X_pca = np.dot(X,W)
X_pcapl = np.sign(X_pca) * np.power(np.abs(X_pca),0.5)
norms = np.linalg.norm(X_pcapl,axis=1)
X_proj = (X_pcapl.T / norms).T
X_proj = np.nan_to_num(X_proj)
setattr(descr[seq], t, X_proj)
################################
# Patch and descriptor classes #
################################
class hpatch_descr:
"""Class for loading an HPatches descriptor result .csv file"""
itr = tps
def __init__(self,base,name,descr_type='',sep=','):
self.base = base
self.name = base.split("/")[-1]
for t in self.itr:
descr_path = os.path.join(base, t+'.csv')
df = pd.read_csv(descr_path,header=None,sep=sep).as_matrix()
df = df.astype(np.float32)
if descr_type=="bin_packed":
df = df.astype(np.uint8)
df = np.unpackbits(df, axis=1)
setattr(self, t, df)
self.N = df.shape[0]
self.dim = df.shape[1]
assert self.dim != 1, \
"Problem loading the .csv files. Please check the delimiter."
class hpatch_sequence:
"""Class for loading an HPatches sequence from a sequence folder"""
itr = tps
def __init__(self,base):
name = base.split('/')
self.name = name[-1]
self.base = base
for t in self.itr:
im_path = os.path.join(base, t+'.png')
im = cv2.imread(im_path,0)
self.N = im.shape[0]/65
setattr(self, t, np.split(im, self.N))
| [
"v.balntas@imperial.ac.uk"
] | v.balntas@imperial.ac.uk |
072124f7bf3fc900a33e0ea67e58898b947b9d5c | 87998e751e753dd4d75848ee292631d96b71d608 | /aoc/year2020/day2.py | c37501a97f7ff8492b062238bfec7e3aaeb5d164 | [
"Unlicense"
] | permissive | ongyx/aoc.py | 7da09592985c6688fb766f6c4b80526f51d2c290 | 48d23b2611384cf87b2722d6fd65728bb950c3f9 | refs/heads/main | 2023-01-31T07:50:44.349391 | 2020-12-16T03:43:06 | 2020-12-16T03:43:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 934 | py | # coding: utf8
import re
from aoc import aoc
# We use a regex to parse each line, in the format:
# (min)-(max) (letter): (password)
# For part 2, we need to fidde with the Boolean logic abit.
RE_POLICY = re.compile(r"(\d+)-(\d+) (\w): (\w+)")
class Problem(aoc.Problem):
def solve(self, part):
vaild_counter = 0
for line in self.dataset_lines:
lower, upper, letter, password = RE_POLICY.findall(line)[0]
lower = int(lower)
upper = int(upper)
if part == 1:
if lower <= password.count(letter) <= upper:
vaild_counter += 1
elif part == 2:
# only one of the positions can have the letter
# so we have to use a XOR (exclusive OR).
if (password[lower - 1] == letter) ^ (password[upper - 1] == letter):
vaild_counter += 1
return vaild_counter | [
"ongyongxin2020@gmail.com"
] | ongyongxin2020@gmail.com |
80d37d02b7af083195de59d9af45ee86907bdc6d | 06651ac98fd2e89fa9a6b48fe536e5b93172909d | /object_decorator.py | e6dbf69910053c38780b152039108f16845707d1 | [] | no_license | stephclleung/Worksheet-Generator | 3c8e5d8d4e022c30073ea2d67074732074c3afc2 | e3c5226db1ce949f1a2f22448b2bdadd493786f3 | refs/heads/master | 2020-04-12T10:14:38.948411 | 2019-02-25T07:34:43 | 2019-02-25T07:34:43 | 162,423,619 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,559 | py | # Objects - Decorator Pattern
#
# Stephanie Leung (2019)
from random import randint
from numpy.random import choice
import nouns_gen as nouns_gen
import adjective_gen as adj_gen
DEBUG = False
class AbstractObject:
pass #Simplifying for now but may need to add in more later.
class Noun(AbstractObject):
def __init__(self, item_type):
self.item_type = item_type
self.noun = nouns_gen.noun_generate(self.item_type)
def get_noun(self):
return self.noun
class AdjectiveDecorator(AbstractObject):
def __init__(self, decorate_type):
self.decorate_type = decorate_type
def get_adj(self):
if self.adj:
return self.adj
class State(AdjectiveDecorator):
def __init__(self, decorate_type):
AdjectiveDecorator.__init__(self, decorate_type)
self.adj = adj_gen.state_adj_generator(decorate_type)
class Quality(AdjectiveDecorator):
def __init__(self, decorate_type):
AdjectiveDecorator.__init__(self, decorate_type)
self.adj = adj_gen.quality_adj_generator(decorate_type)
class Size(AdjectiveDecorator):
def __init__(self, decorate_type):
AdjectiveDecorator.__init__(self, decorate_type)
self.adj = adj_gen.size_adj_generator(decorate_type)
class Color(AdjectiveDecorator):
def __init__(self, decorate_type):
AdjectiveDecorator.__init__(self, decorate_type)
self.adj = adj_gen.color_adj_generator(decorate_type)
def get_adjective(decorate_type):
#adj_type = randint(1,3)
# Color = 1
# Quality = 2
# Size = 3
# State = 4
if decorate_type in ["food", "drink", "appliance", "clothes"]:
# all adjectives are fine
adj_type = choice([1,2,3,4], p=[0.1, 0.2, 0.35, 0.35])
elif decorate_type == "activities":
# quality and size only
adj_type = choice([2,3], p=[0.65, 0.35])
elif decorate_type == "raw_mats":
#color, quality only
adj_type = choice([3,1], p=[0.35, 0.65])
if DEBUG:
print(f"{decorate_type} {adj_type}")
if adj_type == 1:
return Color(decorate_type).get_adj()
elif adj_type == 2:
return Quality(decorate_type).get_adj()
elif adj_type == 3:
return Size(decorate_type).get_adj()
elif adj_type == 4:
return State(decorate_type).get_adj()
def get_quantity_countable(decorate_type):
return randint(1, 8)
# 1/19 : Note on uncountable nouns, may need to come back and fix.
# 1/19: Implementing temp fix on uncountable nouns (drinks, raw mats)
def get_quantity_uncountable(decorate_type):
return Quantity(decorate_type).get_adj()
def get_noun(decorate_type):
return Noun(decorate_type).get_noun()
# test1 = Noun("food")
# print(test1.noun)
# print(set_adjective("food")) | [
"iLoveSalmon!1"
] | iLoveSalmon!1 |
78e5bb79876dae249b0a9fd6b8d331c441ff4a64 | f00f97b6450a39ad148eec02609b059c244a2e9c | /meaning of life.py | 26a471413d33aac06d6f1109d17895d5bc102dcc | [] | no_license | BridgeFour4/pythonguiexamples | 0eaae8fe71268161995af8269f40eab39cc719ed | 79b7304ab2d207b215eb48064aa1b309ec169b6a | refs/heads/master | 2020-04-27T14:43:32.889899 | 2019-03-11T20:11:17 | 2019-03-11T20:11:17 | 174,418,733 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,362 | py | from tkinter import *
class Application(Frame):
def __init__(self,master):
super(Application,self).__init__(master)
self.grid()
self.create_widgets()
def create_widgets(self):
self.configure(bg="silver")
self.labelone = Label(self,text= "Enter password for the secret life")
self.labelone.grid(row=0, column=0,columnspan=2,sticky=EW)
self.labeltwo= Label(self, text="password:")
self.labeltwo.grid(row=1, column=0, sticky=EW)
self.pw_ent =Entry(self,bg="black")
self.pw_ent.grid(row=2, column=0, columnspan=2, sticky=W)
self.buttonsubmit = Button(self, text="Submit", command=self.reveal)
self.buttonsubmit.grid(row=2, column=1,columnspan=2, sticky=EW)
self.secret_txt = Text(self, width=35, height=5, wrap=WORD,bg='orange',fg="white")
self.secret_txt.grid(row=4, column=0, columnspan=3, sticky=E)
def reveal(self):
contents = self.pw_ent.get()
if contents =="secret":
message="42"
else:
message="That's not the correct password so I can't share the secret with you"
self.secret_txt.delete(0.0, END)
self.secret_txt.insert(0.0, message)
root=Tk()
root.title("Click Counter")
root.geometry("300x200")
root.configure(bg="black")
app=Application(root)
root.mainloop() | [
"nathan.broadbent@tooeleschools.org"
] | nathan.broadbent@tooeleschools.org |
5b17fbdae955c7e13c8c9c2642f0d53af96add6b | 03f00d55672f0cd1edf2a5e5aacb4896aee624a9 | /company/models.py | d28c471e7823cccb51433b71f60d03287a7ee2c9 | [] | no_license | progiri/aiu_site | fe34cb884f01010e522855ade73c0a3be6476f02 | 17e81163c12d4861500d8d7d8e0207ded144f305 | refs/heads/main | 2023-06-11T15:04:50.658387 | 2021-06-29T04:17:17 | 2021-06-29T04:17:17 | 378,497,650 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,524 | py | from django.db import models
class Location(models.Model):
title = models.CharField(
max_length=255,
verbose_name='Название'
)
class Meta:
verbose_name = 'Местоположение (город)'
verbose_name_plural = 'Местоположения (город)'
def __str__(self) -> str:
return self.title
class Position(models.Model):
title = models.CharField(
max_length=255,
verbose_name='Название'
)
class Meta:
verbose_name = 'Должность'
verbose_name_plural = 'Должности'
def __str__(self) -> str:
return self.title
class Company(models.Model):
title = models.CharField(
max_length=255,
verbose_name='Название'
)
description = models.CharField(
max_length=2555,
verbose_name='Описание'
)
location = models.ForeignKey(
to=Location,
on_delete=models.SET_NULL,
related_name='companies',
null=True, blank=True,
verbose_name='Местоположение (город)'
)
position = models.ForeignKey(
to=Position,
on_delete=models.SET_NULL,
related_name='companies',
null=True, blank=True,
verbose_name='Должность'
)
class Meta:
verbose_name = 'Компания'
verbose_name_plural = 'Компании'
def __str__(self) -> str:
return self.title
| [
"yerassyl.ak@mail.ru"
] | yerassyl.ak@mail.ru |
adfd6bfcac5e5eed8bffd7e1f79d115c31bb2607 | da04aa71c2802fa6f9c1fd463f31ad4e4e514df9 | /heartbeat plot.py | dba47c3bd4c508f9c1ad21e9e5c136d7508d4c77 | [] | no_license | JayDosunmu/shield | e698dde7de57277d361e212c41ea986c0c7f21ad | 89f9444f6f98f06db0c04858f1b2bee1ce01ca43 | refs/heads/master | 2021-01-17T09:46:29.085320 | 2017-03-05T18:52:47 | 2017-03-05T18:52:47 | 83,992,746 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 509 | py | import plotly.plotly as py
import plotly.graph_objs as go
import pyrebase
import time
config = {
"apiKey":"AlzaSyBcPkjp50hVFQj3jL7NdCMul0Cw9jP5gkc",
"authDomain":"hacktech-12dad.firebaseapp.com",
"databaseURL":"https://hacktech-12dad.firebaseio.com",
"storageBucket":"",
}
fb = pyrebase.initialize_app(config)
db = fb.database()
data = db.child('snapshots').get()
data = data.each()
print(data)
y = [change.get('heartbeat').get('change') for change in data]
#print(y)
| [
"tjdosunmu@gmail.com"
] | tjdosunmu@gmail.com |
f67485b6750b8322f509252eb0862cc7b39aa952 | 38c4f683d6fe10d671834549fe80406b1de75ced | /api/users/migrations/0001_initial.py | 830889b99ddfc74ce22d6d468ae8a4c0f8853664 | [
"BSD-2-Clause"
] | permissive | charles-vdulac/django-rest-skeleton | 1a3b05e349eaad82e0e99477aedc8dad4ff1698a | 2e621c030046f317b01359b716dae2120e1f7525 | refs/heads/master | 2020-12-24T10:53:55.305379 | 2014-06-12T13:09:12 | 2014-06-12T13:09:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,961 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
needed_by = (
("authtoken", "0001_initial"),
)
def forwards(self, orm):
# Adding model 'User'
db.create_table(u'users_user', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('password', self.gf('django.db.models.fields.CharField')(max_length=128)),
('last_login', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('is_superuser', self.gf('django.db.models.fields.BooleanField')(default=False)),
('uid', self.gf('django.db.models.fields.CharField')(max_length=36, blank=True)),
('email', self.gf('django.db.models.fields.EmailField')(unique=True, max_length=254)),
('first_name', self.gf('django.db.models.fields.CharField')(max_length=30, blank=True)),
('last_name', self.gf('django.db.models.fields.CharField')(max_length=30, blank=True)),
('is_staff', self.gf('django.db.models.fields.BooleanField')(default=False)),
('is_active', self.gf('django.db.models.fields.BooleanField')(default=True)),
('date_joined', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
))
db.send_create_signal(u'users', ['User'])
# Adding M2M table for field groups on 'User'
m2m_table_name = db.shorten_name(u'users_user_groups')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('user', models.ForeignKey(orm[u'users.user'], null=False)),
('group', models.ForeignKey(orm[u'auth.group'], null=False))
))
db.create_unique(m2m_table_name, ['user_id', 'group_id'])
# Adding M2M table for field user_permissions on 'User'
m2m_table_name = db.shorten_name(u'users_user_user_permissions')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('user', models.ForeignKey(orm[u'users.user'], null=False)),
('permission', models.ForeignKey(orm[u'auth.permission'], null=False))
))
db.create_unique(m2m_table_name, ['user_id', 'permission_id'])
def backwards(self, orm):
# Deleting model 'User'
db.delete_table(u'users_user')
# Removing M2M table for field groups on 'User'
db.delete_table(db.shorten_name(u'users_user_groups'))
# Removing M2M table for field user_permissions on 'User'
db.delete_table(db.shorten_name(u'users_user_user_permissions'))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'users.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '254'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'uid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
}
}
complete_apps = ['users']
| [
"sebastibe@gmail.com"
] | sebastibe@gmail.com |
7d718cf991da1edadb32ecc20b535482d1412c2f | 9c6e055fc8c15d5a2e1dbec40cf2ab6534b49793 | /nourisher/settings.py | 264a24d78a7ca4f660a040e60b4b486ec63c91ab | [] | no_license | hnykda/nourisher | 0d70bac6a19ed95656e7ef51a4bb18e8b4aae9d2 | 6049d5b47c9004b3badfee89bcf7c259c84c4442 | refs/heads/master | 2021-01-20T16:12:04.940861 | 2015-07-27T16:42:04 | 2015-07-27T16:42:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,541 | py | import logging
log = logging.getLogger(__name__)
"""
Created on Dec 22, 2014
@author: dan
"""
"""
Here is possible to set customizables - global variables
"""
### DATABASE ###
DB_IP = "localhost" # IP where is MongoDB running
DB_PORT = 5432 # port where is database running
DB_NAME = "testdb" # Name of default database
DB_COLLECTION = "feeds" # Name of collection in database
### MATERNALSITE ###
# which selenium.webdriver and settings
# should be used for scrapping. Possible values in maternaSite.Scraper
DEFAULT_DRIVER = "phantomjs"
ARTICLES_LIMIT = 25
### VERBOSITY ###
#VERBOSITY = 1 # Verbosity of std output (currently implemented 0, 1, 2)
def get_setings():
"""Print current settings"""
log.debug(DB_COLLECTION + " " + str(DB_PORT) + " " + DB_IP + " " + DB_NAME + " " + DEFAULT_DRIVER)
# class SETTER:
# ### DATABASE ###
# DB_IP = "localhost" # IP where is MongoDB running
# DB_PORT = 5432 # port where is database running
# DB_NAME = "testdb" # Name of default database
# DB_COLLECTION = "feeds" # Name of collection in database
#
#
# ### MATERNALSITE ###
# # which selenium.webdriver and settings
# # should be used for scrapping. Possible values in maternaSite.Scraper
# DEFAULT_DRIVER = "phantomjs"
#
# ### VERBOSITY ###
# VERBOSITY = 1 # Verbosity of std output (currently implemented 0, 1, 2)
#
# def set_db_collection( self, name ):
# self.DB_COLLECTION = name
#
# def get_db_collection( self ):
# return( self.DB_COLLECTION )
| [
"kotrfa@gmail.com"
] | kotrfa@gmail.com |
284cf89f5485e9276d982fb582c9377e25489e30 | 920a5a8df0a6afbdcc1363fb74292074dec3b516 | /tf2onnx/rewriter/random_uniform.py | 23c450964d2db499b4dcc134d45f931bca7fd276 | [
"MIT"
] | permissive | anttisaukko/tensorflow-onnx | a674a6ce0002912c42914a69e8333b379391af46 | 1341bdf476df6023b75bc6b3c6e4cda00cc58a29 | refs/heads/master | 2020-04-08T21:17:35.616038 | 2018-11-28T14:47:27 | 2018-11-28T14:47:27 | 159,737,860 | 0 | 0 | MIT | 2018-11-29T22:54:42 | 2018-11-29T22:54:41 | null | UTF-8 | Python | false | false | 3,044 | py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
"""
tf2onnx.rewrite - rewrite tensorflow subgraph to onnx random_uniform op
"""
from onnx import helper
from tf2onnx.graph import Node
from tf2onnx.graph_matcher import OpTypePattern, GraphMatcher
from tf2onnx import utils
from tf2onnx.utils import port_name
# pylint: disable=missing-docstring
def rewrite_random_uniform(g, ops):
pattern = \
OpTypePattern('Add', name='output', inputs=[
OpTypePattern('Mul', inputs=[
OpTypePattern('RandomUniform', name='input1', inputs=["*"]),
OpTypePattern('Sub', name='input2', inputs=["*", "*"]),
]), None
])
matcher = GraphMatcher(pattern)
match_results = list(matcher.match_ops(ops))
for match in match_results:
input2 = match.get_op('input2')
output = match.get_op('output')
ru_op = match.get_op('input1')
# max is on input 0
tmax = input2.inputs[0].get_tensor_value()[0]
tmin = input2.inputs[1].get_tensor_value()[0]
new_node = create_onnx_random_uniform_op(g, tmax, tmin, ru_op, output)
ops = g.replace_subgraph(ops, match, [], [output], [], [new_node])
return ops
# rewriter function when fold_const is enabled
def rewrite_random_uniform_fold_const(g, ops):
pattern = \
OpTypePattern('Add', name='output', inputs=[
OpTypePattern('Mul', name='mul', inputs=[
OpTypePattern('RandomUniform', name='input1', inputs=["*"]),
None,
]),
None,
])
matcher = GraphMatcher(pattern)
match_results = list(matcher.match_ops(ops))
for match in match_results:
output = match.get_op('output')
mul = match.get_op('mul')
ru_op = match.get_op('input1')
tmax_minus_tmin = mul.inputs[1].get_tensor_value()[0]
tmin = output.inputs[1].get_tensor_value()[0]
tmax = tmin + tmax_minus_tmin
new_node = create_onnx_random_uniform_op(g, tmax, tmin, ru_op, output)
ops = g.replace_subgraph(ops, match, [], [output], [], [new_node])
return ops
def create_onnx_random_uniform_op(g, tmax, tmin, ru_op, output):
dtype = output.dtype
op_name = utils.make_name("RandomUniform")
out_name = port_name(op_name)
if ru_op.inputs[0].type == "Shape":
shape_node = ru_op.inputs[0]
new_node = Node(helper.make_node("RandomUniformLike",
[shape_node.input[0]], [out_name], name=op_name,
low=tmin, high=tmax, dtype=dtype), g)
else:
shape = g.get_shape(output.output[0])
new_node = Node(helper.make_node("RandomUniform",
[], [out_name], name=op_name,
low=tmin, high=tmax, dtype=dtype, shape=shape), g)
return new_node
| [
"pengwa@microsoft.com"
] | pengwa@microsoft.com |
ddc6ecb74c82d9731a0332ca8a4960eb0ace0285 | b78b3cb27b84b79246dd24b88108a50d7e52edb6 | /pprzrest/mSimProcMon.py | 8c89289003c555db1ba0caef5ff130560f99494f | [] | no_license | savass/paparazzi_box_frontend | 93012f2f0919785c41b5a14509c1a6103f1605ce | 1b91659b890d16f897b63dc224836af29d0d2f06 | refs/heads/master | 2021-01-25T07:34:31.278819 | 2015-04-22T13:43:44 | 2015-04-22T13:43:44 | 34,392,545 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,442 | py | #Simple monitoring tool to watch agents
import time
from threading import Thread
import fcntl
import os
from mProcessHolder import ProcessHolder
class SimProcMon:
ProcessDemonThread = None
RunProcessDeamon = True
ProcessList = []
ProcessOutput = ""
def __init__(self, Logger, SocketIo=None):
self.mLog = Logger
self.SocketIo = SocketIo
self.mLog.debug("New SimProcMon class created.")
def inform_clients(self):
self.mLog.debug("Informing clients..")
self.SocketIo.emit('StatusMsg',{'data': self.ProcessOutput,
'link_sts': self.get_status("link"),
'server_sts': self.get_status("server"),
'app_server_sts': self.get_status("app_server"),
'link_arg': self.get_process_arg("link"),
'server_arg':self.get_process_arg("server"),
'app_server_arg': self.get_process_arg("app_server")},namespace='/PprzOnWeb')
def get_status(self, ProcessName):
for mProcess in self.ProcessList:
if mProcess.ProcName == ProcessName:
return mProcess.ProcessRunning
return False
def stop_process(self, ProcessName):
for mProcess in self.ProcessList:
if mProcess.ProcName == ProcessName:
mProcess.RunProcess = False
if not mProcess.ProcessAgent is None:
mProcess.ProcessAgent.kill()
mProcess.ProcessAgent = None
self.mLog.info("%s agent terminated.", mProcess.ProcName)
#remove from ProcessList
self.ProcessList.remove(mProcess)
self.ProcessOutput = self.ProcessOutput + mProcess.ProcName + " agent terminated."+ "<br />"
self.inform_clients()
return
else:
self.mLog.error("%s process agent is None.", mProcess.ProcName)
def start_process(self, ProcessName, RunStr):
#need to check if process already started.
if self.get_status(ProcessName):
self.mLog.error("%s is already started.", ProcessName)
return
nPH = ProcessHolder(ProcessName, RunStr, True)
nPH.run()
#append the process to process list
self.ProcessList.append(nPH)
self.mLog.info("Trying to start %s agent.", ProcessName)
def process_deamon(self):
self.mLog.info("Process monitor started.")
while self.RunProcessDeamon:
time.sleep(0.5)
for mProcess in self.ProcessList:
#first need to check if process is running or not.
if mProcess.RunProcess and not mProcess.ProcessAgent is None:
#print "mProcess ", mProcess.ProcessRunning
fd = mProcess.ProcessAgent.stdout.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
try:
AppOut = mProcess.ProcessAgent.stdout.read()
if AppOut:
self.ProcessOutput = self.ProcessOutput + "<br />".join(AppOut.split("\n"))
self.inform_clients()
except:
pass
if not mProcess.ProcessRunning:
#give some time to process
self.mLog.info("%s seems to be fresh started. Waiting..", mProcess.ProcName)
time.sleep(1.5)
if not mProcess.ProcessAgent.poll() is None:
#process error <span class="myClass">test</span>
self.ProcessOutput = self.ProcessOutput + '<span class="MonError">' + mProcess.ProcName + ' stopped.</span> <br />'
if mProcess.RunProcess and mProcess.ProcessRunning:
mProcess.run()
self.ProcessOutput = self.ProcessOutput + '<span class="MonOutput">' + mProcess.ProcName + ' restarted. </span> <br />'
self.mLog.info("%s.ProcessAgent.poll() is None > ProcessRunning.", mProcess.ProcName)
self.inform_clients()
else:
self.mLog.info("%s.ProcessAgent.poll() is None > startup failed!", mProcess.ProcName)
self.ProcessList.remove(mProcess)
self.inform_clients()
continue
#seems like process started successfully
if not mProcess.ProcessRunning:
mProcess.ProcessRunning = True
self.ProcessOutput = self.ProcessOutput + '<span class="MonOutput">' + mProcess.ProcName + ' agent started.. </span> <br />'
self.mLog.info("%s agent started.", mProcess.ProcName)
self.inform_clients()
#print len(ProcessList)
self.mLog.info("Process monitor stopped.")
def get_process_arg(self,ProcessName):
for mProcess in self.ProcessList:
if mProcess.ProcName == ProcessName:
return mProcess.ProcArg
return None
def run_process_mon_deamon(self):
if self.ProcessDemonThread is None:
self.ProcessDemonThread = Thread(target=self.process_deamon)
self.ProcessDemonThread.daemon = True
self.ProcessDemonThread.start()
else:
self.mLog.error("Process monitor deamon already stared!") | [
"sen.savas@gmail.com"
] | sen.savas@gmail.com |
cdede1a5eecbcfedafb5177344e00ab2dc3e9821 | a65e5dc54092a318fc469543c3b96f6699d0c60b | /Personel/Yash/Python/april5/prog5.py | a9186b792a3f46d892bdf941b3d00acb7fecf2ff | [] | no_license | shankar7791/MI-10-DevOps | e15bfda460ffd0afce63274f2f430445d04261fe | f0b9e8c5be7b28298eb6d3fb6badf11cd033881d | refs/heads/main | 2023-07-04T15:25:08.673757 | 2021-08-12T09:12:37 | 2021-08-12T09:12:37 | 339,016,230 | 1 | 0 | null | 2021-08-12T09:12:37 | 2021-02-15T08:50:08 | JavaScript | UTF-8 | Python | false | false | 281 | py | # To delete a file, you must import the OS module, and run its os.remove() function:
import os
if os.path.exists("demofile.txt"):
os.remove("demofile.txt")
else:
print("The file does not exist")
# To delete an entire folder, use the os.rmdir() method:
# os.rmdir("myfolder") | [
"malavade47@gmail.com"
] | malavade47@gmail.com |
1aaceb54e5b237cd1f0061a9184ee10f4afd7a63 | cba5017525d30f84f4555bc0e10f1f83126f1d4a | /PowerClient/bin/main.py | 2b7af5a8bdb8d84c2687ddb6d2eed2ca8d1375e7 | [
"Apache-2.0"
] | permissive | cycmay/SolarS | 66e97a0de6b459f8bb05b03c2690d9852d92209a | 284bcafa5da210e5c4200d19e46b3fa6bb5acb20 | refs/heads/master | 2020-05-23T18:09:09.760666 | 2019-05-24T15:42:25 | 2019-05-24T15:42:25 | 186,882,616 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
完全可以把信息收集客户端做成Windows和Linux两个不同版本。
"""
import os
import sys
BASE_DIR = os.path.dirname(os.getcwd())
# 设置工作目录,使得包和模块能够正常导入
sys.path.append(BASE_DIR)
from core import handler
if __name__ == '__main__':
handler.ArgvHandler(sys.argv)
| [
"1769614470@qq.com"
] | 1769614470@qq.com |
56255c3a178b2823db3fdb697e972ac01d1e2f03 | 3a5dab20e575c9d6d1f671faba7da8f6085d9ed8 | /RepositorioInfo/chacoferia/apps/inicio/views.py | eab1b18c67ddcafd6cb4c9da16ca566ff43dd7c6 | [] | no_license | Diego-Caza/ProyectoFinal | 77cb45310375e31c2924b10ecd2dfad9533f2982 | 47ce682027d4230a494e4b875367a65e386a0746 | refs/heads/master | 2022-12-19T12:35:49.020185 | 2020-10-01T16:37:31 | 2020-10-01T16:37:31 | 300,335,385 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 455 | py | from django.shortcuts import render
from apps.publicaciones.models import Producto
from django.views.generic import ListView, DetailView, CreateView
# Create your views here.
class Populares(ListView):
model = Producto
template_name = 'inicio/inicio.html'
queryset = Producto.objects.order_by('-id')[:4]
class PopularesUser(ListView):
model = Producto
template_name = 'inicio/usuario/inicio.html'
queryset = Producto.objects.order_by('-id')[:4]
| [
"cazadiegoy@gmail.com"
] | cazadiegoy@gmail.com |
38840ebcb8cb679446cceb6b8beace276bf9b1a7 | 415e9349304a1fc2555a55ff0e3caff3fce26e68 | /hydrogen_atom.py | 214b1a0a8884d76d855e9fc4a7fc2faa8d7bf7eb | [] | no_license | einathauser/tau-dmc-2017 | 9a3ba554e5565628c227c8ba04167ef8790f6571 | 154a953b5905545739908f4216e33b2204c151ff | refs/heads/master | 2021-01-23T09:39:49.761259 | 2017-09-13T13:12:54 | 2017-09-13T13:12:54 | 102,589,773 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,560 | py | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
import matplotlib.mlab as mlab
MAX_X_SIZE = 1e6
def sample_bins(x_max, x, n_boxes):
r_max = np.sqrt(float(3*x_max**2))
radius = np.array(np.sqrt(x[:, 0]**2 + x[:, 1]**2 + x[:, 2]**2)).astype(float)
bins, _ = np.histogram(radius, bins=np.linspace(0.0, r_max, n_boxes + 1))
return bins
def wave_function(density, x_max, n_boxes, bin_size):
sum_of_square = 0
delta_r = np.sqrt(3*x_max**2).astype(float)/n_boxes
new_matrix = (density**2)
for item in new_matrix:
sum_of_square += item
psi_x = density.astype(float) / ((bin_size)*float(np.sqrt(delta_r*(sum_of_square))))
return psi_x
def m(W_x):
return np.minimum(np.trunc(W_x + np.random.uniform(0.0, 1.0, W_x.size)), 3).astype(int)
def V(e, x):
distance = np.array(np.sqrt(x[:, 0]**2 + x[:, 1]**2 + x[:, 2]**2)).astype(float)
return ((-e**2) / distance)
def W(V_x, e_r, dt):
return np.exp(-(V_x - e_r) * dt)
def particle_locations(x, dt):
sigma = np.sqrt(dt)
row = np.random.randn(len(x), 3)
location = x + sigma*row
return location
def energy(v_x, n_pre, n_0, dt):
return np.average(v_x) + (1.0 - float(n_pre) / n_0) / dt
# if (previous * 100 / avg_e_r) <= 5:
# time_interval.append(time)
def run_dmc(dt, n_times, n_0, x_min, x_max, n_boxes, sample_from_iteration, e):
x = np.array([[0, 0, 1]]*n_0).astype(float)
V_x = V(e, x)
e_r = np.average(V_x)
e_rs = [e_r]
bins = np.zeros(n_boxes)
psi = 0
a = np.arange(n_boxes)
bin_size = (np.sqrt(3*x_max**2).astype(float)/n_boxes)*(a + 0.5)
matrix_of_volumes = (0+(4*np.pi*(float(np.sqrt(3*(x_max**2)))**3) / (3*n_boxes**3)))*(a + 0.5)
for i in range(n_times):
# creates a vector of number with step dt. i gives the items in the list
x = particle_locations(x, dt)
V_x = V(e,x)
W_x = W(V_x, e_r, dt)
m_x = m(W_x)
x = np.repeat(x, m_x, axis=0)
n_previous = len(x)
# print('Round %d m_x: %s' % (i, np.mean(m_x)))
e_r = energy(V_x, n_previous, n_0, dt)
print e_r
# previous_avg = np.average(e_rs)
e_rs.append(e_r)
if len(x) > MAX_X_SIZE:
raise Exception('x is too big, aborting!')
if i > sample_from_iteration:
bins += sample_bins(x_max, x, n_boxes)
density = bins/matrix_of_volumes
psi = wave_function(density, x_max, n_boxes, bin_size)
avg_e_r = np.average(e_rs[sample_from_iteration:])
standard_dev = np.std(e_rs[sample_from_iteration:])
r_psi = psi*bin_size
plt.xlim(0.0, 7.5)
plt.ylim(0.0, 2.2)
plt.title("DMC Hydrogen atom")
psi_analytic = 2*np.exp(-bin_size)
r_psi_analytic = bin_size*psi_analytic
plt.plot(bin_size, r_psi, color = 'green')
# plt.plot(e_rs)
plt.plot(bin_size, psi_analytic, color = 'blue')
plt.plot(bin_size, psi, color = 'red')
plt.plot(bin_size, r_psi_analytic, color='yellow')
plt.show()
return standard_dev, avg_e_r
if __name__ == "__main__":
# execute only if run as a script
n_0 = 5000
x_min = 0.0
x_max = 10.0
# x_max=y_max=z_max. these are the values which indicate location that you can enter the matrix
# the values can be different but their range can't because it's a sphere.
n_boxes = 2000
dt = 0.1
n_times = 2000
sample_from_iteration = 100
e = 1
print run_dmc(dt, n_times, n_0, x_min, x_max, n_boxes, sample_from_iteration, e) | [
"ehauzer11@gmail.com"
] | ehauzer11@gmail.com |
d3d12734a381e7185f0368cdd0b926088c0b7116 | 648f5819d70686852720d6b598b66ea37cc49b71 | /setup.py | 1a9e1c6cd8a870c6d9ed3796176d7514507caf98 | [
"MIT"
] | permissive | itoldman/chinese_province_city_area_mapper | 88981f1e74507ac3cdd8ff0b60a7b5716bb701f4 | 7d7537a412dc1c543f3481f5520b3c75cf49ebb0 | refs/heads/master | 2020-03-30T12:46:11.678976 | 2018-09-26T17:59:50 | 2018-09-26T17:59:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,033 | py | # -*- coding: utf-8 -*-
from setuptools import setup
LONGDOC = """
chinese_province_city_area_mapper
==================================
chinese_province_city_area_mapper:一个用于识别简体中文字符串中省,市和区并能够进行映射,检验和简单绘图的python模块
举个例子::
["徐汇区虹漕路461号58号楼5楼", "泉州市洛江区万安塘西工业区"]
↓ 转换
|省 |市 |区 |地址 |
|上海市|上海市|徐汇区|虹漕路461号58号楼5楼 |
|福建省|泉州市|洛江区|万安塘西工业区 |
chinese_province_city_area_mapper: built to be recognize Chinese province,city and area in simplified Chinese string, it can automaticall map area to city
and map city to province.
for example::
["徐汇区虹漕路461号58号楼5楼", "泉州市洛江区万安塘西工业区"]
↓ transform
|省 |市 |区 |地址 |
|上海市|上海市|徐汇区|虹漕路461号58号楼5楼 |
|福建省|泉州市|洛江区|万安塘西工业区 |
完整文档见该模块的Github,
GitHub: `https://github.com/DQinYuan/chinese_province_city_area_mapper <https://github.com/DQinYuan/chinese_province_city_area_mapper>`_
特点
====
- 基于jieba分词进行匹配,同时加入了一些额外的校验匹配逻辑保证了准确率
- 因为jieba分词本身只有80%的准确率,经常会分错,所以引入了全文匹配的模式,这种模式下能够提高准确率,但会导致性能降低,关于如何开启这个模式见Github上的使用文档
- 如果地址数据比较脏的,不能指望依靠这个模块达到100%的准确,本模块只能保证尽可能地提取信息,如果想要达到100%准确率的话,最好在匹配完后再人工核验一下
- 自带完整的省,市,区三级地名及其经纬度的数据
- 支持自定义省,市,区映射
- 输出的是基于pandas的DataFrame类型的表结构,易于理解和使用
- 封装了简单的绘图功能,可以很方便地进行简单的数据可视化
- MIT 授权协议
安装说明
========
代码目前仅仅支持python3
pip install cpca
Get Started
============
本模块中最主要的方法是cpca.transform,
该方法可以输入任意的可迭代类型(如list,pandas的Series类型等),
然后将其转换为一个DataFrame,下面演示一个最为简单的使用方法::
location_str = ["徐汇区虹漕路461号58号楼5楼", "泉州市洛江区万安塘西工业区", "朝阳区北苑华贸城"]
from cpca import *
df = transform(location_str)
df
输出的结果为::
区 市 省 地址
0 徐汇区 上海市 上海市 虹漕路461号58号楼5楼
1 洛江区 泉州市 福建省 万安塘西工业区
2 朝阳区 北京市 北京市 北苑华贸城
**全文模式**:
jieba分词并不能百分之百保证分词的正确性,在分词错误的情况下会造成奇怪的结果,比如下面::
location_str = ["浙江省杭州市下城区青云街40号3楼","广东省东莞市莞城区东莞大道海雅百货"]
from cpca import *
df = transform(location_str)
df
输出的结果为::
区 市 省 地址
城区 东莞市 广东省 莞大道海雅百货自然堂专柜
城区 杭州市 浙江省 下青云街40号3楼
这种诡异的结果因为jieba本身就将词给分错了,所以我们引入了全文模式,不进行分词,直接全文匹配,使用方法如下::
location_str = ["浙江省杭州市下城区青云街40号3楼","广东省东莞市莞城区东莞大道海雅百货"]
from cpca import *
df = transform(location_str, cut=False)
df
输出结果::
区 市 省 地址
下城区 杭州市 浙江省 青云街40号3楼
莞城区 东莞市 广东省 大道海雅百货
这些就完全正确了,不过全文匹配模式会造成效率低下,我默认会向前看8个字符(对应transform中的lookahead参数默认值为8),这个是比较保守的,因为有的地名会比较长(比如“新疆维吾尔族自治区”),如果你的地址库中都是些短小的省市区名的话,可以选择将lookahead设置得小一点,比如::
location_str = ["浙江省杭州市下城区青云街40号3楼","广东省东莞市莞城区东莞大道海雅百货"]
from cpca import *
df = transform(location_str, cut=False, lookahead=3)
df
输出结果与上面一样。
如果还想知道更多的细节,请访问该
模块的github地址 `https://github.com/DQinYuan/chinese_province_city_area_mapper <https://github.com/DQinYuan/chinese_province_city_area_mapper>`_,
在那里我写了更多的细节.
"""
requires = ['pandas(>=0.20.0)',
'jieba(>=0.39)',
]
setup(name='cpca',
version='0.3.4',
description='Chinese Province, City and Area Recognition Utilities',
long_description=LONGDOC,
author='DQinYuan',
author_email='sa517067@mail.ustc.edu.cn',
url='https://github.com/DQinYuan/chinese_province_city_area_mapper',
license="MIT",
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Natural Language :: Chinese (Simplified)',
'Programming Language :: Python :: 3.6',
'Topic :: Text Processing',
'Topic :: Text Processing :: Indexing',
],
keywords='Simplified Chinese,Chinese geographic information,Chinese province city and area recognition and map',
packages=['chinese_province_city_area_mapper', ''],
package_dir={'chinese_province_city_area_mapper':'chinese_province_city_area_mapper',
'':'.',}, #必须写成'.',而不能写成'./'
install_requires = requires,
) | [
"932087612@qq.com"
] | 932087612@qq.com |
e9a4c0ee8774a16092863b3972e7e903593cac32 | 492cb86b533bc74962a0e25ad190dab131f7cb09 | /humanScape/urls.py | d66fdef307d8290976f7ee67668986092280f3c9 | [] | no_license | acdacd66/humanscape | 75f27815f6c1ac5975b3822e5abc5738aa9b3118 | 6fbeeca3346569c7f861bbffcbec731a6a9d6e51 | refs/heads/main | 2023-09-02T01:55:49.806746 | 2021-11-16T17:29:36 | 2021-11-16T17:29:36 | 428,570,173 | 0 | 1 | null | 2021-11-16T11:22:32 | 2021-11-16T08:10:30 | Python | UTF-8 | Python | false | false | 820 | py | """humanScape URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
urlpatterns = [
path('admin/', admin.site.urls),
path("clinical/", include("clinicalInformation.urls")),
]
| [
"you@example.com"
] | you@example.com |
868e5aecbeb632ef54b45e30b589b06ef555afab | 8a8dc992231e30f70e2eb645a2d869a730d3c052 | /backendvfinal4.py | c2f42ef346ee5a8a29839e1e19c4f4175b16f97a | [] | no_license | severlove/backend_project | 111c3f34fb9891f6a1e0f02cab6530e099215dca | 48c3f2c0d2c7ff6bd5d520303eda4deff229611e | refs/heads/master | 2020-04-27T07:53:17.313968 | 2019-03-07T16:37:09 | 2019-03-07T16:37:09 | 174,151,363 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,209 | py | #!/usr/bin/env python3
import psycopg2
def top_3_articles():
'''Print top 3 articles of all time'''
db = psycopg2.connect("dbname=news")
c = db.cursor()
query = """SELECT articles.title, count(articles.title) as num
FROM articles,log
WHERE articles.slug = SUBSTRING(log.path, 10)
GROUP BY title
ORDER BY num DESC
LIMIT 3;"""
c.execute(query)
results = c.fetchall()
db.close()
print("\nTop 3 Articles:\n")
for i in range(len(results)):
print "\"" + results[i][0] + "\" - " + str(results[i][1]) + " views"
def popular_authors():
'''Print the top authors'''
db = psycopg2.connect("dbname=news")
c = db.cursor()
query = """SELECT authors.name, count(articles.author) as num
FROM articles, log, authors
WHERE articles.slug = SUBSTRING(log.path, 10) and
articles.author = authors.id
GROUP BY authors.name
ORDER BY num DESC"""
c.execute(query)
results = c.fetchall()
db.close()
print("\nPopular Authors:\n")
for i in range(len(results)):
print results[i][0] + " - " + str(results[i][1]) + " views"
def errors():
'''Print the date where more than 1% of requests cause errors'''
db = psycopg2.connect("dbname=news")
c = db.cursor()
query = """SELECT Date, Total, Error,
(Error::float*100)/Total::float as Percent
FROM (
SELECT to_char(time::timestamp::date, 'Month DD, YYYY') as Date,
count(status) as Total, sum(case when not status =
'200 OK' then 1 else 0 end) as Error
FROM log
GROUP BY time::timestamp::date) as result
WHERE(Error::float*100)/Total::float > 1.0;"""
c.execute(query)
results = c.fetchall()
db.close()
print("\nDays where more than 1% of requests lead to errors:\n")
for i in range(len(results)):
print(str(results[i][0]) +
" - " + str(round(results[i][3], 1)) + "% errors")
print("\n")
if __name__ == '__main__':
top_3_articles()
popular_authors()
errors()
| [
"noreply@github.com"
] | severlove.noreply@github.com |
3daab6c956e8d126316ecdb6ef6e71d8af6a258d | 1c8a1b7cfb5c78fe94c4cc62a78dbfff96161924 | /day05/test04.py | 7715b05a49b005d9cad71dc19124fa6797945c72 | [] | no_license | WHUTyuen/PIL_opencv | d264858f0eaa4ecc555747efd5f277f48a432b91 | 3ae6e7d878215866c304e64eac05bf1011ecb428 | refs/heads/main | 2023-01-01T14:00:33.331676 | 2020-11-01T11:35:18 | 2020-11-01T11:35:18 | 309,072,309 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,122 | py | import cv2
import numpy as np
A = cv2.imread('3.jpg')
B = cv2.imread('4.jpg')
G = A.copy()
gpA = [G]
for i in range(6):
G = cv2.pyrDown(G)
gpA.append(G)
G = B.copy()
gpB = [G]
for i in range(6):
G = cv2.pyrDown(G)
gpB.append(G)
# generate Laplacian Pyramid for A
lpA = [gpA[5]]
for i in range(5, 0, -1):
GE = cv2.pyrUp(gpA[i])
L = cv2.subtract(gpA[i - 1], GE)
lpA.append(L)
# generate Laplacian Pyramid for B
lpB = [gpB[5]]
for i in range(5, 0, -1):
GE = cv2.pyrUp(gpB[i])
L = cv2.subtract(gpB[i - 1], GE)
lpB.append(L)
# Now add left and right halves of images in each level
LS = []
for la, lb in zip(lpA, lpB):
rows, cols, dpt = la.shape
ls = np.hstack((la[:, 0:cols // 2], lb[:, cols // 2:]))
LS.append(ls)
# now reconstruct
ls_ = LS[0]
for i in range(1, 6):
ls_ = cv2.pyrUp(ls_)
ls_ = cv2.add(ls_, LS[i])
# image with direct connecting each half
real = np.hstack((A[:, :cols // 2], B[:, cols // 2:]))
cv2.imshow('Pyramid_blending.jpg', ls_)
cv2.imshow('Direct_blending.jpg', real)
cv2.waitKey(0)
| [
"noreply@github.com"
] | WHUTyuen.noreply@github.com |
27c38c01ec059532373e8cd03289ccde4ded2e1d | f0f3f8731145e236e8e08dafb4201108d35af488 | /wish_list_items/migrations/0007_auto_20160414_1317.py | 8478f0d9cfbb5235617279dac1587637337832db | [] | no_license | AaronScruggs/wish_list_project | 49fdfc9c3a9e72470084bbf283085c15aa659a3e | a2a741823e0a570390ce344f3407f6f3b57f2590 | refs/heads/master | 2021-01-01T05:18:10.817456 | 2016-04-19T00:36:24 | 2016-04-19T00:36:24 | 56,259,190 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 471 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-14 20:17
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wish_list_items', '0006_auto_20160414_1312'),
]
operations = [
migrations.AlterField(
model_name='wishitem',
name='item_url',
field=models.URLField(default=True, null=True),
),
]
| [
"aarondscruggs@gmail.com"
] | aarondscruggs@gmail.com |
98872d68f0a8214b87de5d704fb379cb99c72671 | 6f76052e2a1161b8a728dd6fff6dfafd107c6c57 | /Kuznechik.py | 58c46f3b1ea4e3f98d1464f74da175e13cfeeead | [] | no_license | mratnikov/Rain | e33ed9bf51a0eebd8a4cd1e8c6f039fc511658bf | a11c662a87647078141fb213379c544b258881fc | refs/heads/master | 2020-03-24T11:59:53.650407 | 2019-03-14T17:38:23 | 2019-03-14T17:38:23 | 142,700,601 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 100 | py | F = [0] * (n + 1)
F[0] = 1
F[1] = 1
for i in range(2, n + 1):
F[i] = F[i - 2] + F[i — 1]
| [
"mratnikov@gmail.com"
] | mratnikov@gmail.com |
4cf93fc1cb9c2a0a2877f9616693f8488607baa0 | 10e8c5f2f917fd091b0bab7aed18f4130039fa86 | /day_7_2.py | 71299c297d0081f51e139cc688aec2245d0f6f76 | [] | no_license | DannyBarbaro/AOC_2019 | 9383a2f14c50cbed9c359fff5f62edd291e1da77 | e1b7948ebc76af9e5bab076c18a6140e54e54f64 | refs/heads/master | 2020-12-03T17:56:23.228622 | 2020-01-02T16:30:14 | 2020-01-02T16:30:14 | 231,418,989 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,312 | py | datastream = []
def get_value(val, mode, data):
return data[val] if mode == 0 else val
def oppcode_1(val_1, mode_1, val_2, mode_2, result, data):
data[result] = get_value(val_1, mode_1, data) + get_value(val_2, mode_2, data)
return data
def oppcode_2(val_1, mode_1, val_2, mode_2, result, data):
data[result] = get_value(val_1, mode_1, data) * get_value(val_2, mode_2, data)
return data
def oppcode_3(val, data):
data[val] = datastream.pop(0)
return data
def oppcode_4(val, mode, data):
return get_value(val, mode, data)
def oppcode_5(val_1, mode_1, data):
return True if get_value(val_1, mode_1, data) != 0 else False
def oppcode_6(val_1, mode_1, data):
return True if get_value(val_1, mode_1, data) == 0 else False
def oppcode_7(val_1, mode_1, val_2, mode_2, val_3, data):
num = 1 if get_value(val_1, mode_1, data) < get_value(val_2, mode_2, data) else 0
data[val_3] = num
return data
def oppcode_8(val_1, mode_1, val_2, mode_2, val_3, data):
num = 1 if get_value(val_1, mode_1, data) == get_value(val_2, mode_2, data) else 0
data[val_3] = num
return data
def run_intcode(ints, index):
while True:
code = ints[index]
op = code % 100
mode_1 = code % 1000 - code % 100
mode_2 = code % 10000 - code % 1000
mode_3 = code % 100000 - code % 10000
if op == 1:
ints = oppcode_1(ints[index+1], mode_1, ints[index+2], mode_2, ints[index+3], ints)
index += 4
elif op == 2:
ints = oppcode_2(ints[index+1], mode_1, ints[index+2], mode_2, ints[index+3], ints)
index += 4
elif op == 3:
ints = oppcode_3(ints[index+1], ints)
index += 2
elif op == 4:
return [oppcode_4(ints[index+1], mode_1, ints), ints, index+2]
elif op == 5:
if oppcode_5(ints[index+1], mode_1, ints):
index = get_value(ints[index+2], mode_2, ints)
else:
index += 3
elif op == 6:
if oppcode_6(ints[index+1], mode_1, ints):
index = get_value(ints[index+2], mode_2, ints)
else:
index += 3
elif op == 7:
ints = oppcode_7(ints[index+1], mode_1, ints[index+2], mode_2, ints[index+3], ints)
index += 4
elif op == 8:
ints = oppcode_8(ints[index+1], mode_1, ints[index+2], mode_2, ints[index+3], ints)
index += 4
elif op == 99:
return
def new_state():
ints = []
with open('inputs/input7.txt') as f:
return [int(numeric_string) for numeric_string in f.readline().split(',')]
def calc_for_perm(combo):
datastream.clear()
states = [[],[],[],[],[]]
for i in range(len(states)):
states[i] = new_state()
pointers = [0,0,0,0,0]
val = 0
isFirst = True
running = True
while running:
for i in range(len(combo)):
if isFirst:
datastream.append(combo[i])
datastream.append(val)
output = run_intcode(states[i], pointers[i])
if output == None:
running = False
else:
val = output[0]
states[i] = output[1]
pointers[i] = output[2]
isFirst = False
return val
# Part 2
import itertools
import time
perms = list(itertools.permutations([5,6,7,8,9]))
maximum = 0
best = []
for combo in perms:
combo = list(combo)
val = calc_for_perm(combo)
if val > maximum:
maximum = val
best = combo
print(maximum)
print(combo)
# expected = 139629729 | [
"dxb486@case.edu"
] | dxb486@case.edu |
276ee2323e5bd2b36459c096005c556b3732ee1f | 6832a7fa70fc98f62b777421fca13989b8e2e1e9 | /greeting_decorator.py | 6254f95fa5c91f95649dcbfae0570d84bbd5cc75 | [] | no_license | serdarayalp/my-python | f553df88b284528dddd245557935f349b34df963 | 1e23e6b9cccdeb96f3bfd9a0812638dd8c02c189 | refs/heads/main | 2023-02-28T00:39:24.691917 | 2021-02-06T08:53:30 | 2021-02-06T08:53:30 | 330,440,789 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 244 | py | from functools import wraps
def greeting(func):
@wraps(func)
def function_wrapper(x):
""" function_wrapper of greeting """
print("Hi, " + func.__name__ + " returns:")
return func(x)
return function_wrapper
| [
"serdarayalp@googlemail.com"
] | serdarayalp@googlemail.com |
3f17973dce3bd98e0003d899e9f727b81dc0574e | 27767ea4ec9cb4b3d8a3ba31c9e668957d3f900c | /tools/statistic.py | 4606f091b46a67087b07c966d225389bbf7ff6f9 | [] | no_license | 01ghost13/smartyPantsBot | 8609dd71bfaf0ace1d137cc7bbe4cf87e6b9f1b6 | bc8943b416e42be263cc617c17e702b978aeab8b | refs/heads/master | 2020-04-10T07:27:03.989421 | 2019-05-12T12:55:38 | 2019-05-12T13:04:55 | 160,880,698 | 0 | 0 | null | 2018-12-16T14:52:15 | 2018-12-07T22:12:11 | Python | UTF-8 | Python | false | false | 666 | py | from tools import Database
class Statistic:
smile_count_sql = """
select e.name, e.discord_id, count(e_m.id) as smile_count
from emojies as e
join emoji_messages as e_m
on e_m.emoji_id = e.id
join messages as m
on m.id = e_m.message_id
where m.channel_id = '%s'
group by e.name, e.discord_id
order by smile_count desc
"""
@staticmethod
def smile_count(channel_id):
with Database() as db:
conn = db.connect()
cursor = conn.cursor()
cursor.execute(Statistic.smile_count_sql % channel_id)
return cursor.fetchall()
| [
"orkich@gmail.com"
] | orkich@gmail.com |
79feeaca9b02f648fa48495c3481b1b1940c3057 | 406abf223c6eaca3ec4741e7136cfc2fe819022b | /api/utils.py | d778f462a585c6d1f73e13507c6ed37c3e75ed4a | [] | no_license | WarrentLee/ImgChangeServer | e068132e2430863698870038d2627a79e2bb51b3 | 14e9658e75ab2eebe0cbafa119410ec2c6200321 | refs/heads/master | 2022-10-07T15:36:03.136973 | 2020-06-10T01:41:50 | 2020-06-10T01:41:50 | 269,850,341 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 636 | py | from flask import Flask
from database.model import db
import os, json
from config import Config
def create_app(name):
app = Flask(name, template_folder='../templates')
# app.config['SQLALCHEMY_DATABASE_URI'] = os.getenv('DATABASE_URL', 'sqlite:///' + os.path.join(app.root_path, 'data.db'))
print("Database Directory:", os.getenv('DATABASE_URL'))
app.config['SQLALCHEMY_DATABASE_URI'] = os.getenv('DATABASE_URL', 'sqlite:///' + Config.DATABASE_DIRECTORY)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.secret_key = "asfnsdjnsdlflasdkc553d3s1"
db.app = app
db.init_app(app)
return app
| [
"1065791740@qq.com"
] | 1065791740@qq.com |
a19dea9f4e498c18ae262efb60f03b1b275e63ba | f43df719ae5cc238c450f01959e4cb3625ae1c82 | /config_make.py | d96d27bb8e73dd8fb5491ba43c7d0623f1a1fee9 | [] | no_license | kenshow8/eth_erc20_token_python_api | 8f10e5aaa503730b4abc66ebd8f1779b97e021f0 | 4390d47bcb574205230fefb8cb9b2e4b238ac27e | refs/heads/master | 2022-09-29T14:09:10.058167 | 2020-05-31T08:08:14 | 2020-05-31T08:08:14 | 268,100,598 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 796 | py | # coding:utf-8
import os
import configparser
from pathlib import Path
file_dir = Path(__file__).parent
file_name = 'config/setting.ini'
file_path = file_dir.joinpath(file_name)
config = configparser.ConfigParser()
# コントラクト設定
config_web3 = 'web3_config'
config.add_section(config_web3)
## web3 設定 ##
config.set(config_web3, 'web3_url', 'http://127.0.0.1:7545') # contractのデプロイ先に応じて変更してください。
config.set(config_web3, 'contract_address', '0x238f011262D73a07c0bfACdf5f851CE467bc94ee') # contract addressを入力してください。
config.set(config_web3, 'owner_address', '0xB66d64EF0fACCebFd6F5E10Ece3dcBd3a65B82F1') # contractのowner addressを入力してください。
with open(file_path, 'w') as file:
config.write(file)
| [
"wmapp.kj@gmail.com"
] | wmapp.kj@gmail.com |
2f4d530d9958802e44c5a71b92bc8a6bf4fd5a84 | 0149c07b7c1bdd2d56aecf9934e16ec9c5b163c7 | /oc_ocdm/test/storer/test_storer.py | 5be49fe468ed9e80b576ea191f16708cec2154aa | [
"ISC"
] | permissive | opencitations/oc_ocdm | fac6903403b4149cafd080a88d761ebb44fb91cf | 26298e81fd4903ad60edb1d824f2c7abbdc88656 | refs/heads/master | 2023-07-07T23:53:38.396919 | 2023-06-22T15:20:16 | 2023-06-22T15:20:16 | 322,327,342 | 2 | 6 | ISC | 2023-03-14T08:41:54 | 2020-12-17T14:56:50 | Python | UTF-8 | Python | false | false | 11,370 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2016, Silvio Peroni <essepuntato@gmail.com>
#
# Permission to use, copy, modify, and/or distribute this software for any purpose
# with or without fee is hereby granted, provided that the above copyright notice
# and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT,
# OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
# DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
# SOFTWARE.
import json
import os
import unittest
from platform import system
from shutil import rmtree
from zipfile import ZipFile
from rdflib import ConjunctiveGraph, URIRef, compare
from oc_ocdm.graph.graph_set import GraphSet
from oc_ocdm.prov.prov_set import ProvSet
from oc_ocdm.storer import Storer
class TestStorer(unittest.TestCase):
def setUp(self):
self.resp_agent = "http://resp_agent.test/"
self.base_iri = "http://test/"
self.graph_set = GraphSet(self.base_iri, "", "060", False)
self.prov_set = ProvSet(self.graph_set, self.base_iri, "", False)
self.br = self.graph_set.add_br(self.resp_agent)
def tearDown(self):
rmtree(os.path.join("oc_ocdm", "test", "storer", "data"))
def test_store_graphs_in_file(self):
base_dir = os.path.join("oc_ocdm", "test", "storer", "data", "rdf") + os.sep
is_unix = system() != "Windows"
with self.subTest("output_format=json-ld, zip_output=True"):
modified_entities = self.prov_set.generate_provenance()
prov_storer = Storer(self.prov_set, context_map={}, dir_split=10000, n_file_item=1000, default_dir="_", output_format='json-ld', zip_output=True)
storer = Storer(self.graph_set, context_map={}, dir_split=10000, n_file_item=1000, default_dir="_", output_format='json-ld', zip_output=True, modified_entities=modified_entities)
storer.store_all(base_dir, self.base_iri)
prov_storer.store_all(base_dir, self.base_iri)
self.graph_set.commit_changes()
with ZipFile(os.path.join(base_dir, "br", "060", "10000", "1000.zip"), mode="r") as archive:
with archive.open("1000.json") as f:
data = json.load(f)
self.assertEqual(data, [{'@graph': [{'@id': 'http://test/br/0601', '@type': ['http://purl.org/spar/fabio/Expression']}], '@id': 'http://test/br/'}])
with ZipFile(os.path.join(base_dir, "br", "060", "10000", "1000", "prov", "se.zip"), mode="r") as archive:
with archive.open("se.json") as f:
data = [{g:[{k:v for k,v in datum.items() if k != "http://www.w3.org/ns/prov#generatedAtTime"} for datum in data] if g == "@graph" else data for g, data in graph.items()} for graph in json.load(f)]
self.assertEqual(data, [{'@graph': [{
'@id': 'http://test/br/0601/prov/se/1',
'@type': ['http://www.w3.org/ns/prov#Entity'],
'http://purl.org/dc/terms/description': [{'@value': "The entity 'http://test/br/0601' has been created."}],
'http://www.w3.org/ns/prov#specializationOf': [{'@id': 'http://test/br/0601'}],
'http://www.w3.org/ns/prov#wasAttributedTo': [{'@id': 'http://resp_agent.test/'}]}], '@id': 'http://test/br/0601/prov/'}])
if is_unix:
self.assertTrue(os.path.exists(os.path.join(base_dir, "br", "060", "10000", "1000.zip.lock")))
self.assertTrue(os.path.exists(os.path.join(base_dir, "br", "060", "10000", "1000", "prov", "se.zip.lock")))
with self.subTest("output_format=json-ld, zip_output=False"):
base_dir_1 = os.path.join("oc_ocdm", "test", "storer", "data", "rdf_1") + os.sep
storer = Storer(self.graph_set, context_map={}, dir_split=10000, n_file_item=1000, default_dir="_", output_format='json-ld', zip_output=False)
self.prov_set.generate_provenance()
prov_storer = Storer(self.prov_set, context_map={}, dir_split=10000, n_file_item=1000, default_dir="_", output_format='json-ld', zip_output=False)
storer.store_all(base_dir_1, self.base_iri)
prov_storer.store_all(base_dir_1, self.base_iri)
self.graph_set.commit_changes()
with open(os.path.join(base_dir_1, "br", "060", "10000", "1000.json")) as f:
data = json.load(f)
self.assertEqual(data, [{'@graph': [{'@id': 'http://test/br/0601', '@type': ['http://purl.org/spar/fabio/Expression']}], '@id': 'http://test/br/'}])
with open(os.path.join(base_dir_1, "br", "060", "10000", "1000", "prov", "se.json")) as f:
data = [{g:[{k:v for k,v in datum.items() if k != "http://www.w3.org/ns/prov#generatedAtTime"} for datum in data] if g == "@graph" else data for g, data in graph.items()} for graph in json.load(f)]
self.assertEqual(data, [{'@graph': [{
'@id': 'http://test/br/0601/prov/se/1',
'@type': ['http://www.w3.org/ns/prov#Entity'],
'http://purl.org/dc/terms/description': [{'@value': "The entity 'http://test/br/0601' has been created."}],
'http://www.w3.org/ns/prov#specializationOf': [{'@id': 'http://test/br/0601'}],
'http://www.w3.org/ns/prov#wasAttributedTo': [{'@id': 'http://resp_agent.test/'}]}], '@id': 'http://test/br/0601/prov/'}])
if is_unix:
self.assertTrue(os.path.exists(os.path.join(base_dir_1, "br", "060", "10000", "1000.json.lock")))
self.assertTrue(os.path.exists(os.path.join(base_dir_1, "br", "060", "10000", "1000", "prov", "se.json.lock")))
with self.subTest("output_format=nquads, zip_output=True"):
base_dir_2 = os.path.join("oc_ocdm", "test", "storer", "data", "rdf_2") + os.sep
storer = Storer(self.graph_set, context_map={}, dir_split=10000, n_file_item=1000, default_dir="_", output_format='nquads', zip_output=True)
self.prov_set.generate_provenance()
prov_storer = Storer(self.prov_set, context_map={}, dir_split=10000, n_file_item=1000, default_dir="_", output_format='nquads', zip_output=True)
storer.store_all(base_dir_2, self.base_iri)
prov_storer.store_all(base_dir_2, self.base_iri)
self.graph_set.commit_changes()
with ZipFile(os.path.join(base_dir_2, "br", "060", "10000", "1000.zip"), mode="r") as archive:
with archive.open("1000.nt") as f:
data = f.read().decode("utf-8")
self.assertEqual(data, "<http://test/br/0601> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://purl.org/spar/fabio/Expression> <http://test/br/> .\n\n")
with ZipFile(os.path.join(base_dir_2, "br", "060", "10000", "1000", "prov", "se.zip"), mode="r") as archive:
with archive.open("se.nq") as f:
data = f.read().decode("utf-8")
data_g = ConjunctiveGraph()
expected_data_g = ConjunctiveGraph()
data_g.parse(data=data, format="nquads")
expected_data_g.parse(data="""
<http://test/br/0601/prov/se/1> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://www.w3.org/ns/prov#Entity> <http://test/br/0601/prov/> .
<http://test/br/0601/prov/se/1> <http://www.w3.org/ns/prov#specializationOf> <http://test/br/0601> <http://test/br/0601/prov/> .
<http://test/br/0601/prov/se/1> <http://www.w3.org/ns/prov#wasAttributedTo> <http://resp_agent.test/> <http://test/br/0601/prov/> .
<http://test/br/0601/prov/se/1> <http://purl.org/dc/terms/description> "The entity 'http://test/br/0601' has been created." <http://test/br/0601/prov/> .
""", format="nquads")
for s, p, o, c in data_g.quads():
if p == URIRef("http://www.w3.org/ns/prov#generatedAtTime"):
data_g.remove((s, p, o, c))
self.assertTrue(compare.isomorphic(data_g, expected_data_g))
if is_unix:
self.assertTrue(os.path.exists(os.path.join(base_dir_2, "br", "060", "10000", "1000.zip.lock")))
self.assertTrue(os.path.exists(os.path.join(base_dir_2, "br", "060", "10000", "1000", "prov", "se.zip.lock")))
with self.subTest("output_format=nquads, zip_output=False"):
base_dir_3 = os.path.join("oc_ocdm", "test", "storer", "data", "rdf_3") + os.sep
storer = Storer(self.graph_set, context_map={}, dir_split=10000, n_file_item=1000, default_dir="_", output_format='nquads', zip_output=False)
self.prov_set.generate_provenance()
prov_storer = Storer(self.prov_set, context_map={}, dir_split=10000, n_file_item=1000, default_dir="_", output_format='nquads', zip_output=False)
storer.store_all(base_dir_3, self.base_iri)
prov_storer.store_all(base_dir_3, self.base_iri)
self.graph_set.commit_changes()
prov_unzipped = ConjunctiveGraph()
expected_prov_unzipped = ConjunctiveGraph()
with open(os.path.join(base_dir_3, "br", "060", "10000", "1000.nt"), "r", encoding="utf-8") as f:
data_unzipped = f.read()
prov_unzipped.parse(source=os.path.join(base_dir_3, "br", "060", "10000", "1000", "prov", "se.nq"), format="nquads")
expected_prov_unzipped.parse(data="""
<http://test/br/0601/prov/se/1> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://www.w3.org/ns/prov#Entity> <http://test/br/0601/prov/> .
<http://test/br/0601/prov/se/1> <http://www.w3.org/ns/prov#specializationOf> <http://test/br/0601> <http://test/br/0601/prov/> .
<http://test/br/0601/prov/se/1> <http://www.w3.org/ns/prov#wasAttributedTo> <http://resp_agent.test/> <http://test/br/0601/prov/> .
<http://test/br/0601/prov/se/1> <http://purl.org/dc/terms/description> "The entity 'http://test/br/0601' has been created." <http://test/br/0601/prov/> .
""", format="nquads")
for s, p, o, c in prov_unzipped.quads():
if p == URIRef("http://www.w3.org/ns/prov#generatedAtTime"):
prov_unzipped.remove((s, p, o, c))
self.assertEqual(data_unzipped, "<http://test/br/0601> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://purl.org/spar/fabio/Expression> <http://test/br/> .\n\n")
self.assertTrue(compare.isomorphic(prov_unzipped, expected_prov_unzipped))
if is_unix:
self.assertTrue(os.path.exists(os.path.join(base_dir_3, "br", "060", "10000", "1000.nt.lock")))
self.assertTrue(os.path.exists(os.path.join(base_dir_3, "br", "060", "10000", "1000", "prov", "se.nq.lock")))
if __name__ == '__main__':
unittest.main() | [
"arcangelomas@gmail.com"
] | arcangelomas@gmail.com |
8424d48df45764507a31d8a1ab65bd44b3544d9b | aa2210cbc53e2892f2d7192085433950879b3e93 | /file_summary_1.py | a2aa31b77215d790056587eed7df6145cb8b9d87 | [] | no_license | 6188506/LearnPythonHardWay | dddc9f076902e94ba1cbff24142237ea7cdeeb28 | bd071a6602a1cde297209fa2339ebe3d96da6441 | refs/heads/master | 2021-01-20T04:11:36.536522 | 2018-05-03T09:01:38 | 2018-05-03T09:01:38 | 89,654,909 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 202 | py | #from sys import argv
#script, destination_file = argv
#print script
#print "read file from ex1 to ex41,and write to a argv file"
x = 6
print open('e:/learnpythonhardway/ex'+str(x)+'.py','r').read()
| [
"6188506@qq.com"
] | 6188506@qq.com |
f25d084d56d88bcd33d43a1a0ea324541f6d799f | ae9ff58a96682e57da9b6a1376db7ece5d84baa2 | /contrib/bulk_operations/bulk_operations.py | 005659e1475c3241682c87ea583fa35188da1cdc | [
"MIT"
] | permissive | simozhan/product-definition-center | 0ca1fde76c9c80368e3116dd68465fe91a290c7f | 55c2de179cf4169213c8a8bd81977e0cdc8ee350 | refs/heads/master | 2021-01-22T16:05:30.731848 | 2015-07-24T04:46:16 | 2015-07-24T04:46:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,001 | py | #
# Copyright (c) 2015 Red Hat
# Licensed under The MIT License (MIT)
# http://opensource.org/licenses/MIT
#
"""
This module provides BulkRouter that extends the registered ViewSets with bulk
operations if they are not provided yet.
To display documentation in the browsable API, it is necessary to provide a
method `bulk_op` (where `op` is any of `update`, `partial_update`, `destroy`)
on the viewset which calls `pdc.apps.common.bulk.bulk_op_impl` instead of a
parent class. Bulk create does not get its own tab in browsable API. If the
docstrings for these methods are not provided, they come with some generic
ones.
"""
from functools import wraps
from collections import OrderedDict
from rest_framework.settings import api_settings
from rest_framework import routers, status
from rest_framework.response import Response
from django.conf import settings
def _failure_response(ident, response, data=None):
"""
Given an identifier, a response from a view and optional data, return a
Response object that describes the error.
"""
result = {
'invalid_data_id': ident,
'detail': response.data.get('detail', response.data),
}
if data:
result['invalid_data'] = data
response = Response(result, status=response.status_code)
# This tells ChangesetMiddleware to abort the transaction.
response.exception = True
return response
def _safe_run(func, *args, **kwargs):
"""
Try to run a function with given arguments. If it raises an exception, try
to convert it to response with the exception handler. If that fails, the
exception is re-raised.
"""
try:
return func(*args, **kwargs)
except Exception, exc:
response = api_settings.EXCEPTION_HANDLER(exc, context=kwargs)
if response is not None:
return response
raise
def bulk_create_wrapper(func):
@wraps(func)
def wrapper(self, request, *args, **kwargs):
data = request.data
if not isinstance(data, list):
return func(self, request, *args, **kwargs)
result = []
for idx, obj in enumerate(data):
request._full_data = obj
response = _safe_run(func, self, request, *args, **kwargs)
if not status.is_success(response.status_code):
return _failure_response(idx, response, data=obj)
# Reset object in view set.
setattr(self, 'object', None)
result.append(response.data)
return Response(result, status=status.HTTP_201_CREATED)
return wrapper
def bulk_destroy_impl(self, request, **kwargs):
"""
It is possible to delete multiple items in one request. Use the `DELETE`
method with the same url as for listing/creating objects. The request body
should contain a list with identifiers for objects to be deleted. The
identifier is usually the last part of the URL for deleting a single
object.
"""
if not isinstance(request.data, list):
return Response(status=status.HTTP_400_BAD_REQUEST,
data={'detail': 'Bulk delete needs a list of identifiers.'})
self.kwargs.update(kwargs)
for ident in OrderedDict.fromkeys(request.data):
self.kwargs[self.lookup_field] = unicode(ident)
response = _safe_run(self.destroy, request, **self.kwargs)
if not status.is_success(response.status_code):
return _failure_response(ident, response)
return Response(status=status.HTTP_204_NO_CONTENT)
def bulk_update_impl(self, request, **kwargs):
"""
It is possible to update multiple objects in one request. Use the `PUT` or
`PATCH` method with the same url as for listing/creating objects. The
request body should contain an object, where keys are identifiers of
objects to be modified and their values use the same format as normal
*update*.
"""
if not isinstance(request.data, dict):
return Response(status=status.HTTP_400_BAD_REQUEST,
data={'detail': 'Bulk update needs a mapping.'})
result = {}
self.kwargs.update(kwargs)
orig_data = request.data
for ident, data in orig_data.iteritems():
self.kwargs[self.lookup_field] = unicode(ident)
request._full_data = data
response = _safe_run(self.update, request, **self.kwargs)
if not status.is_success(response.status_code):
return _failure_response(ident, response, data=data)
result[ident] = response.data
return Response(status=status.HTTP_200_OK, data=result)
def bulk_partial_update_impl(self, request, **kwargs):
if not request.data:
return Response(
status=status.HTTP_400_BAD_REQUEST,
data=settings.EMPTY_PATCH_ERROR_RESPONSE
)
self.kwargs['partial'] = True
return self.bulk_update(request, **kwargs)
def bulk_create_dummy_impl():
"""
It is possible to create this resource in bulk. To do so, use the same
procedure as when creating a single instance, only the request body should
contain a list of JSON objects. The response you get back will also contain
a list of values which you would obtain by submitting the request data
separately.
"""
assert False, ('This method should never be called, it is here just so '
'that there is a method to attach a docstring to.')
class BulkRouter(routers.DefaultRouter):
"""
This router provides the standard set of resources (the same as
`DefaultRouter`). In addition to that, it allows for bulk operations on the
collection as a whole. These are performed as a PUT/PATCH/DELETE request on
the `{basename}-list` url. These requests are dispatched to the
`bulk_update`, `bulk_partial_update` and `bulk_destroy` methods
respectively.
The bulk create does not have a dedicated method (because the URL and
method are the same as for regular create). Currently, there is no way to
opt-out from having bulk create added. It is however possible to define a
method named `bulk_create` which will provide docstring to be rendered in
browsable API. This method will never be called. If the method is missing,
a generic documentation will be added.
"""
def get_routes(self, viewset):
for route in self.routes:
if isinstance(route, routers.Route) and route.name.endswith('-list'):
route.mapping.update({'delete': 'bulk_destroy',
'put': 'bulk_update',
'patch': 'bulk_partial_update'})
return super(BulkRouter, self).get_routes(viewset)
def register(self, prefix, viewset, base_name=None):
if hasattr(viewset, 'create'):
viewset.create = bulk_create_wrapper(viewset.create)
if not hasattr(viewset, 'bulk_create'):
viewset.bulk_create = bulk_create_dummy_impl
if hasattr(viewset, 'destroy') and not hasattr(viewset, 'bulk_destroy'):
viewset.bulk_destroy = bulk_destroy_impl
if hasattr(viewset, 'update') and not hasattr(viewset, 'bulk_update'):
viewset.bulk_update = bulk_update_impl
if hasattr(viewset, 'partial_update') and not hasattr(viewset, 'bulk_partial_update'):
viewset.bulk_partial_update = bulk_partial_update_impl
super(BulkRouter, self).register(prefix, viewset, base_name)
def get_lookup_regex(self, viewset, lookup_prefix=''):
"""
For viewsets using the MultiLookupFieldMixin, it is necessary to
construct the lookup_value_regex attribute here.
"""
if hasattr(viewset, 'lookup_fields'):
regexes = []
for field_name, field_regex in viewset.lookup_fields:
regexes.append('(?P<%s>%s)' % (field_name, field_regex))
viewset.lookup_value_regex = '/'.join(regexes)
return super(BulkRouter, self).get_lookup_regex(viewset, lookup_prefix)
| [
"lsedlar@redhat.com"
] | lsedlar@redhat.com |
3d4eeac4dc2bacb190a2ec396de0cfce8de00b15 | 902215df8f073b83b5815657d7b69fd6d67c8b5f | /kkd/views.py | 00c6807b8480442a788fd0a2ca628a9bf47f4ca8 | [] | no_license | k821209/KKD_GO_DB | e7643209da7d33cc1eee1395430046fd41199af7 | 9745dcb2c2a62db08b239716dfc4f3581200bb8c | refs/heads/master | 2020-04-07T05:48:01.054782 | 2018-05-02T05:48:47 | 2018-05-02T06:33:00 | 124,186,255 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 143 | py | from django.shortcuts import render
def show_go_list(request):
return render(request, 'kkd/go_index.html',{})
# Create your views here.
| [
"kangyangjae@gmail.com"
] | kangyangjae@gmail.com |
8104871522b317d80e9b8bcd58d1d04195096050 | 57385c8ae2b10d01c2dede1d69f1887d8a8a7ac0 | /manage.py | 001c29815eafae252644ec0a25538483b622b402 | [
"MIT"
] | permissive | bruno-dash/teste-5 | 151efb6558168c20d3320c510893a922e2d68ee1 | 484cc065673fe859cda571b208be79d82368eeae | refs/heads/main | 2023-08-19T01:37:10.334089 | 2021-10-16T23:27:45 | 2021-10-16T23:27:45 | 417,916,703 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 662 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'teste5.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"bonofre36@gmail.com"
] | bonofre36@gmail.com |
d874e4f137ec4de13520364ebc883e120ec966a1 | 7f19038537cfafb2c725973d36ae0d1ae767995f | /Labs/lab10/lab10.py | 378f26a0b28368e93b796d5f88913dbfcea81dc5 | [] | no_license | aaeissa/UMBC-CS-201 | f532684734543dd7a16698b61f1824443dfeb23e | c07a97469fc9c3a3b91e5228880abeb49918691f | refs/heads/master | 2021-01-13T12:49:50.373730 | 2016-10-29T18:30:52 | 2016-10-29T18:30:52 | 72,298,768 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,082 | py |
def convertToDict(info):
stateFile = info.readlines()
myDict = {}
states = ""
for line in stateFile:
states = line.strip()
key, value = states.split(",")
myDict[key] = value
return(myDict)
def main():
print("Welcomet to the State Abbreviator.")
info = open("states.txt", "r")
myDict = convertToDict(info)
userInput = ""
while userInput != "exit" and userInput != "Exit":
userInput = input("Please enter the state to abbreviate (list to get list and exit to exit): ").strip()
if userInput == "list":
for key in sorted(myDict):
print(key)
else:
fullName = myDict.keys()
abbreviation = myDict.values()
if userInput in fullName:
print("The abbreviation of the state: ", userInput, "is", myDict[userInput])
else:
print("Sorry. That is not a state.")
print("Thank you for using the State Abbreviator.")
info.close()
main()
| [
"ahmedeissa33@gmail.com"
] | ahmedeissa33@gmail.com |
587c0100e02d4abc95b9fb79c169f06d3ed8b47b | 891c267df1228d42153ff1a36d6b92b58a32f435 | /schemes.py | ad91237c7020256b802593dc48fb3b750e821042 | [] | no_license | ellawang44/numerical-advection-eq-MATH2306- | eaa6be239e1d27f1aed2cb29bba4fdfde305a081 | de410631ec9d2fcb68c7aa2405f2cd785cb1c604 | refs/heads/master | 2021-07-21T12:17:50.392999 | 2017-10-25T02:15:15 | 2017-10-25T02:15:15 | 106,899,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 568 | py | def ftbs(jn1, j, C_r):
"""Forward Time Backwards Space Scheme"""
return j - C_r*(j - jn1)
def ftcs(jn1, j, j1, C_r):
"""Forward Time Centered Space Scheme"""
return j - C_r/2*(j1 - jn1)
def ftfs(j, j1, C_r):
"""Forwards Time Forwards Space Scheme"""
return j - C_r*(j1 - j)
def lax_wendroff(jn1, j, j1, C_r):
"""Lax Wendroff Scheme"""
return j - C_r/2*(j1 - jn1) + C_r**2/2*(j1 - 2*j + jn1)
def beam_warming(jn2, jn1, j, C_r):
"""Beam Warming Scheme"""
return j - C_r/2*(3*j - 4*jn1 + jn2) + C_r**2/2*(j - 2*jn1 + jn2)
| [
"u6083620@anu.edu.au"
] | u6083620@anu.edu.au |
9872a4700e6e9c1bd2a6de233fa4c578321b2559 | ac6fdcfe85b72bcf1b719bdea3ce7f9954217fd3 | /zap_transformers.py | 99dcb9b97c33bf38a861744f3fe4f97081fc6b5d | [] | no_license | giuice/ds-zap-challenge | 084510963ee413b8d62aafcb600cfebd1b86d6e6 | 34eb56c1ee74d9af02a668522bb97fea1ac95c9e | refs/heads/master | 2020-06-25T23:08:32.085580 | 2019-08-16T21:07:48 | 2019-08-16T21:07:48 | 199,449,612 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,621 | py | # Reference
#
import numpy as np
import pandas as pd
import warnings
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils.validation import check_is_fitted
from base_transformers import BaseCategoricalEncoder,BaseCategoricalTransformer, _define_variables, BaseNumericalTransformer
from sklearn.feature_extraction import DictVectorizer
from sklearn.preprocessing import FunctionTransformer, StandardScaler, RobustScaler
from sklearn.preprocessing import Imputer, MultiLabelBinarizer
class DFStandardScaler(TransformerMixin):
# StandardScaler but for pandas DataFrames
def __init__(self):
self.ss = None
self.mean_ = None
self.scale_ = None
def fit(self, X, y=None):
self.ss = StandardScaler()
self.ss.fit(X)
self.mean_ = pd.Series(self.ss.mean_, index=X.columns)
self.scale_ = pd.Series(self.ss.scale_, index=X.columns)
return self
def transform(self, X):
# assumes X is a DataFrame
Xss = self.ss.transform(X)
Xscaled = pd.DataFrame(Xss, index=X.index, columns=X.columns)
return Xscaled
class DFRobustScaler(BaseNumericalTransformer):
# RobustScaler but for pandas DataFrames
def __init__(self, variables=None):
self.variables = _define_variables(variables)
self.rs = None
self.center_ = None
self.scale_ = None
def fit(self, X, y=None):
self.rs = RobustScaler()
var = self.variables
self.rs.fit(X[var])
self.center_ = pd.Series(self.rs.center_, index=X[var].columns)
self.scale_ = pd.Series(self.rs.scale_, index=X[var].columns)
return self
def transform(self, X):
# assumes X is a DataFrame
#for feature in self.variables:
var = self.variables
X[var] = self.rs.transform(X[var])
#Xscaled = pd.DataFrame(Xrs, index=X.index, columns=X.columns)
return X
class ColumnExtractor(TransformerMixin):
def __init__(self, cols):
self.cols = cols
def fit(self, X, y=None):
# stateless transformer
return self
def transform(self, X):
# assumes X is a DataFrame
Xcols = X[self.cols]
return Xcols
class ZeroFillTransformer(TransformerMixin):
def fit(self, X, y=None):
# stateless transformer
return self
def transform(self, X):
# assumes X is a DataFrame
Xz = X.fillna(value=0)
return Xz
class CountFrequencyCategoricalEncoder(BaseCategoricalEncoder):
"""
The CountFrequencyCategoricalEncoder() replaces categories by the count of
observations per category or by the percentage of observations per category.
For example in the variable colour, if 10 observations are blue, blue will
be replaced by 10. Alternatively, if 10% of the observations are blue, blue
will be replaced by 0.1.
The CountFrequencyCategoricalEncoder() will encode only categorical variables
(type 'object'). A list of variables can be passed as an argument. If no
variables are passed as argument, the encoder will only encode categorical
variables (object type) and ignore the rest.
The encoder first maps the categories to the numbers for each variable (fit).
The encoder then transforms the categories to those mapped numbers (transform).
Parameters
----------
encoding_method : str, default='count'
Desired method of encoding.
'count': number of observations per category
'frequency' : percentage of observations per category
variables : list
The list of categorical variables that will be encoded. If None, the
encoder will find and transform all object type variables.
Attributes
----------
encoder_dict_: dictionary
The dictionary containing the {count / frequency: category} pairs used
to replace categories for every variable.
"""
def __init__(self, encoding_method = 'count', variables = None):
if encoding_method not in ['count', 'frequency']:
raise ValueError("encoding_method takes only values 'count' and 'frequency'")
self.encoding_method = encoding_method
self.variables = _define_variables(variables)
def fit(self, X, y = None):
"""
Learns the numbers that should be used to replace the categories in
each variable.
Parameters
----------
X : pandas dataframe of shape = [n_samples, n_features]
The training input samples.
Can be the entire dataframe, not just seleted variables.
y : None
y is not needed in this encoder, yet the sklearn pipeline API
requires this parameter for checking. You can either leave it as None
or pass y.
"""
# brings the variables from the BaseEncoder
super().fit(X, y)
self.encoder_dict_ = {}
for var in self.variables:
if self.encoding_method == 'count':
self.encoder_dict_[var] = X[var].value_counts().to_dict()
elif self.encoding_method == 'frequency':
n_obs = np.float(len(X))
self.encoder_dict_[var] = (X[var].value_counts() / n_obs).to_dict()
if len(self.encoder_dict_)==0:
raise ValueError('Encoder could not be fitted. Check that correct parameters and dataframe were passed during training')
self.input_shape_ = X.shape
return self
class OrdinalCategoricalEncoder(BaseCategoricalEncoder):
"""
The OrdinalCategoricalEncoder() replaces categories by ordinal numbers
(0, 1, 2, 3, etc). The numbers can be ordered based on the mean of the target
per category, or assigned arbitrarily.
For the ordered ordinal encoding for example in the variable colour, if the
mean of the target for blue, red and grey is 0.5, 0.8 and 0.1 respectively,
blue is replaced by 1, red by 2 and grey by 0.
For the arbitrary ordinal encoding the numbers will be assigned arbitrarily
to the categories, on a first seen first served basis.
The Encoder will encode only categorical variables (type 'object'). A list
of variables can be passed as an argument. If no variables are passed as
argument, the encoder will only encode categorical variables (object type)
and ignore the rest.
The encoder first maps the categories to the numbers for each variable (fit).
The encoder then transforms the categories to the mapped numbers (transform).
Parameters
----------
encoding_method : str, default='ordered'
Desired method of encoding.
'ordered': the categories are numbered in ascending order according to
the target mean per category.
'arbitrary' : categories are numbered arbitrarily.
variables : list, default=None
The list of categorical variables that will be encoded. If None, the
encoder will find and select all object type variables.
Attributes
----------
encoder_dict_: dictionary
The dictionary containing the {ordinal number: category} pairs used
to replace categories for every variable.
"""
def __init__(self, encoding_method = 'ordered', variables = None):
if encoding_method not in ['ordered', 'arbitrary']:
raise ValueError("encoding_method takes only values 'ordered' and 'arbitrary'")
self.encoding_method = encoding_method
self.variables = _define_variables(variables)
def fit(self, X, y=None):
""" Learns the numbers that should be used to replace the labels in each
variable.
Parameters
----------
X : pandas dataframe of shape = [n_samples, n_features]
The training input samples.
Can be the entire dataframe, not just seleted variables.
y : Target. Can be None if selecting encoding_method = 'arbitrary'.
Otherwise, needs to be passed when fitting the transformer.
"""
# brings the variables from the BaseEncoder
super().fit(X, y)
if self.encoding_method == 'ordered':
if y is None:
raise ValueError('Please provide a target (y) for this encoding method')
temp = pd.concat([X, y], axis=1)
temp.columns = list(X.columns)+['target']
self.encoder_dict_ = {}
for var in self.variables:
if self.encoding_method == 'ordered':
t = temp.groupby([var])['target'].mean().sort_values(ascending=True).index
elif self.encoding_method == 'arbitrary':
t = X[var].unique()
self.encoder_dict_[var] = {k:i for i, k in enumerate(t, 0)}
if len(self.encoder_dict_)==0:
raise ValueError('Encoder could not be fitted. Check that correct parameters and dataframe were passed during training')
self.input_shape_ = X.shape
return self
class MeanCategoricalEncoder(BaseCategoricalTransformer):
"""
The MeanCategoricalEncoder() replaces categories by the mean of the target.
For example in the variable colour, if the mean of the target for blue, red
and grey is 0.5, 0.8 and 0.1 respectively, blue is replaced by 0.5, red by 0.8
and grey by 0.1.
The Encoder will encode only categorical variables (type 'object'). A list
of variables can be passed as an argument. If no variables are passed as
argument, the encoder will only encode categorical variables (object type)
and ignore the rest.
The encoder first maps the categories to the numbers for each variable (fit).
The encoder then transforms the categories to the mapped numbers (transform).
Parameters
----------
variables : list, default=None
The list of categorical variables that will be encoded. If None, the
encoder will find and select all object type variables.
Attributes
----------
encoder_dict_: dictionary
The dictionary containing the {target mean: category} pairs used
to replace categories for every variable
"""
def __init__(self, variables = None):
self.variables = _define_variables(variables)
def fit(self, X, y):
"""
Learns the numbers that should be used to replace the labels in each variable.
Parameters
----------
X : pandas dataframe of shape = [n_samples, n_features]
The training input samples.
Can be the entire dataframe, not just seleted variables.
y : Target
"""
# brings the variables from the BaseEncoder
super().fit(X, y)
if y is None:
raise ValueError('Please provide a target (y) for this encoding method')
temp = pd.concat([X, y], axis=1)
temp.columns = list(X.columns)+['target']
self.encoder_dict_ = {}
for var in self.variables:
self.encoder_dict_[var] = temp.groupby(var)['target'].mean().to_dict()
if len(self.encoder_dict_)==0:
raise ValueError('Encoder could not be fitted. Check that correct parameters and dataframe were passed during training')
self.input_shape_ = X.shape
return self
def transform(self, X):
""" Replaces categories with the estimated numbers.
Parameters
----------
X : pandas dataframe of shape = [n_samples, n_features].
The input samples.
Returns
-------
X_transformed : pandas dataframe of shape = [n_samples, n_features].
The dataframe containing categories replaced by numbers.
"""
# Check that the method fit has been called
check_is_fitted(self, ['encoder_dict_'])
# Check that the input is of the same shape as the training set passed
# during fit.
if X.shape[1] != self.input_shape_[1]:
raise ValueError('Number of columns in dataset is different from train set used to fit the encoder')
# encode labels
X = X.copy()
for feature in self.variables:
X[feature+'_mean'] = X[feature].map(self.encoder_dict_[feature], na_action='ignore')
if X[feature+'_mean'].isnull().sum() > 0:
X[feature+'_mean'] = X[feature+'_mean'].fillna(0)
warnings.warn("NaN values were introduced by the encoder due to labels in variable {} not present in the training set. Try using the RareLabelCategoricalEncoder.".format(feature) )
return X
class MedianCategoricalEncoder(BaseCategoricalTransformer):
"""
The MedianCategoricalEncoder() replaces categories by the mean of the target.
For example in the variable colour, if the medan of the target for blue, red
and grey is 0.5, 0.8 and 0.1 respectively, blue is replaced by 0.5, red by 0.8
and grey by 0.1.
The Encoder will encode only categorical variables (type 'object'). A list
of variables can be passed as an argument. If no variables are passed as
argument, the encoder will only encode categorical variables (object type)
and ignore the rest.
The encoder first maps the categories to the numbers for each variable (fit).
The encoder then transforms the categories to the mapped numbers (transform).
Parameters
----------
variables : list, default=None
The list of categorical variables that will be encoded. If None, the
encoder will find and select all object type variables.
Attributes
----------
encoder_dict_: dictionary
The dictionary containing the {target mean: category} pairs used
to replace categories for every variable
"""
def __init__(self, variables = None):
self.variables = _define_variables(variables)
def fit(self, X, y):
"""
Learns the numbers that should be used to replace the labels in each variable.
Parameters
----------
X : pandas dataframe of shape = [n_samples, n_features]
The training input samples.
Can be the entire dataframe, not just seleted variables.
y : Target
"""
# brings the variables from the BaseEncoder
super().fit(X, y)
if y is None:
raise ValueError('Please provide a target (y) for this encoding method')
temp = pd.concat([X, y], axis=1)
temp.columns = list(X.columns)+['target']
self.encoder_dict_ = {}
for var in self.variables:
self.encoder_dict_[var] = temp.groupby(var)['target'].median().to_dict()
if len(self.encoder_dict_)==0:
raise ValueError('Encoder could not be fitted. Check that correct parameters and dataframe were passed during training')
self.input_shape_ = X.shape
return self
def transform(self, X):
""" Replaces categories with the estimated numbers.
Parameters
----------
X : pandas dataframe of shape = [n_samples, n_features].
The input samples.
Returns
-------
X_transformed : pandas dataframe of shape = [n_samples, n_features].
The dataframe containing categories replaced by numbers.
"""
# Check that the method fit has been called
check_is_fitted(self, ['encoder_dict_'])
# Check that the input is of the same shape as the training set passed
# during fit.
if X.shape[1] != self.input_shape_[1]:
raise ValueError('Number of columns in dataset is different from train set used to fit the encoder')
# encode labels
X = X.copy()
for feature in self.variables:
X[feature+'_median'] = X[feature].map(self.encoder_dict_[feature], na_action='ignore')
if X[feature+'_median'].isnull().sum() > 0:
X[feature+'_median'] = X[feature+'_median'].fillna(0)
warnings.warn("NaN values were introduced by the encoder due to labels in variable {} not present in the training set. Try using the RareLabelCategoricalEncoder.".format(feature) )
return X
class RareLabelCategoricalEncoder(BaseCategoricalEncoder):
"""
The RareLabelCategoricalEncoder() groups rare / infrequent categories in
a new category called "Rare".
For example in the variable colour, if the percentage of observations
for the categories magenta, cyan and burgundy are < 5 %, all those
categories will be replaced by the new label "Rare".
The Encoder will encode only categorical variables (type 'object'). A list
of variables can be passed as an argument. If no variables are passed as
argument, the encoder will only encode categorical variables (object type)
and ignore the rest.
The encoder first finds the frequent labels for each variable (fit).
The encoder then groups the infrequent labels under the new label 'Rare'
(transform).
Parameters
----------
tol: float, default=0.05
the minimum frequency a label should have to be considered frequent
and not be removed.
n_categories: int, default=10
the minimum number of categories a variable should have in order for
the encoder to find frequent labels. If the variable contains
less categories, all of them will be considered frequent.
variables : list, default=None
The list of categorical variables that will be encoded. If None, the
encoder will find and select all object type variables.
Attributes
----------
encoder_dict_: dictionary
The dictionary containg the frequent categories (that will be kept)
for each variable. Categories not present in this list will be replaced
by 'Rare'.
"""
def __init__(self, tol = 0.05, n_categories = 10, variables = None):
if tol <0 or tol >1 :
raise ValueError("tol takes values between 0 and 1")
if n_categories < 0 or not isinstance(n_categories, int):
raise ValueError("n_categories takes only positive integer numbers")
self.tol = tol
self.n_categories = n_categories
self.variables = _define_variables(variables)
def fit(self, X, y = None):
"""
Learns the frequent categories for each variable.
Parameters
----------
X : pandas dataframe of shape = [n_samples, n_features]
The training input samples.
Can be the entire dataframe, not just seleted variables
y : None
There is no need of a target in a transformer, yet the pipeline API
requires this parameter. You can leave y as None, or pass it as an
argument.
"""
# brings the variables from the BaseEncoder
super().fit(X, y)
self.encoder_dict_ = {}
for var in self.variables:
if len(X[var].unique()) > self.n_categories:
# if the variable has more than the indicated number of categories
# the encoder will learn the most frequent categories
t = pd.Series(X[var].value_counts() / np.float(len(X)))
# non-rare labels:
self.encoder_dict_[var] = t[t>=self.tol].index
else:
# if the total number of categories is smaller than the indicated
# the encoder will consider all categories as frequent.
self.encoder_dict_[var]= X[var].unique()
self.input_shape_ = X.shape
return self
def transform(self, X):
"""
Groups rare labels under separate group 'Rare'.
Parameters
----------
X : pandas dataframe of shape = [n_samples, n_features]
The input samples.
Returns
-------
X_transformed : pandas dataframe of shape = [n_samples, n_features]
The dataframe where rare categories have been grouped.
"""
# Check is fit had been called
check_is_fitted(self, ['encoder_dict_'])
# Check that the input is of the same shape as the one passed
# during fit.
if X.shape[1] != self.input_shape_[1]:
raise ValueError('Number of columns in dataset is different from training set used to fit the encoder')
X = X.copy()
for feature in self.variables:
X[feature] = np.where(X[feature].isin(self.encoder_dict_[feature]), X[feature], 'Rare')
return X | [
"giuice@gmail.com"
] | giuice@gmail.com |
371f634ce0aaff313e172b3b8cd67e6b98119edc | cdcaf074da4a38c2d653705225e8578e5c8a1372 | /爬取天气.py | 03c682424390894b35734bd1d8e09a7017f5d30c | [] | no_license | 475271359/TST | f71b8632ae06f3963e9b440d8c26885fe6b9be58 | d9cbd24bab66f4f92718f424a40345ae3944695c | refs/heads/master | 2020-11-26T04:25:04.989105 | 2020-03-10T03:37:27 | 2020-03-10T03:37:27 | 228,964,176 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,268 | py | """
目标网站: 2345天气网站
总目标:爬取北方大区所有城市的历史天气。
阶段性目标:爬取山东济南过去一年的天气数据。
输入内容:
分析网站:
待爬取页面:http://tianqi.2345.com/wea_history/54511.htm
待爬取数据:数据在js里面,http://tianqi.2345.com/t/wea_history/js/202001/54511_202001.js
构建循环,用request库,批量下载。
处理内容: 不是标准的json,
实现返回的javascript解析,得到目标数据。
对于javascript的json如何解析? 使用tqInfos = demjson.decode(data)["tqInfo"]进行解析。
输出内容:
保存为csv
http://tianqi.2345.com/t/wea_history/js/202001/54511_202001.js
"""
#构造2019年全年的月份列表
import requests as re
import demjson
import numpy as np
import pandas as pd
global city
#list = [54237, 54161, 54347, 54453, 54471, 54433, 54353, 54497, 53892, 54449, 53698, 54534, 54602, 53982, 60259,
# 53986,57091, 58005, 57195, 60255, 57186, 53978, 57073, 71361, 57051, 54857, 54765, 54843, 54830, 54915,
# 54827,58024,54945, 54938, 54828, 54823, 54714]
list = [54347,54161]
def getHTMLText(url): # 定义了一个函数,用于获取html的文本。
try:
r = re.get(url, timeout=30)
r.raise_for_status() # 如果状态不是200,引发HTTPError异常。
r.encoding = r.apparent_encoding # 从内容中分析,修正代码的编码方式。
return r.text
except:
return "产生异常"
for x in list:
year = 2019
ymd = []
bWendu = []
yWendu = []
tianqi = []
fengxiang = []
fengli = []
aqi = []
aqiInfo = []
aqiLevel = []
city_list = []
all_datas = []
datas = []
months = ["{:d}{:0>2d}".format(year, month + 1) for month in range(12)] # 列表生成式
urls = ["http://tianqi.2345.com/t/wea_history/js/{}/".format(month) +str(x)+"_{}.js".format(month) for month in months] # 列表生成器
for url in urls:
data = getHTMLText(url).lstrip("var weather_str=").rsplit(";")
datas.append(data[0])
for data in datas:
tqInfos = demjson.decode(data)["tqInfo"]
city = demjson.decode(data)["city"]
all_datas.extend(x for x in tqInfos if len(x)>0)
for y in range(len(all_datas)):
ymd.append(all_datas[y].get('ymd'))
bWendu.append(all_datas[y].get('bWendu'))
yWendu.append(all_datas[y].get('yWendu'))
tianqi.append(all_datas[y].get('tianqi'))
fengxiang.append(all_datas[y].get('fengxiang'))
fengli.append(all_datas[y].get('fengli'))
aqi.append(all_datas[y].get('aqi'))
aqiInfo.append(all_datas[y].get('aqiInfo'))
aqiLevel.append(all_datas[y].get('aqiLevel'))
city_list.append(city)
Tianqi_np=np.array([ymd,bWendu,yWendu,tianqi,fengxiang,fengli,aqi,aqiInfo,aqiLevel,city_list])
Tianqi_df = pd.DataFrame(Tianqi_np,index=["ymd","bWendu","yWendu","tianqi","fengxiang","fengli","aqi","aqiInfo","aqiLevel","city_list"])
Tianqi_df=pd.DataFrame(Tianqi_df.values.T, index=Tianqi_df.columns, columns=Tianqi_df.index)
Tianqi_df.to_excel("C:/Users/86132/Desktop/"+city+".xlsx")
| [
"noreply@github.com"
] | 475271359.noreply@github.com |
4816b6ce56b6ba10760fc6ec50b511666a0ef942 | c0f5d309576f791f8cc062e2d0cad340eec41d7d | /3.py | 846552142673f67774ae9cc5803b41248ec09248 | [] | no_license | mjjin1214/algorithm | fa91455ab792c38d01fd210c12e53e50f516eb55 | 423119406061443939b4b966c7d9f1513544dd03 | refs/heads/master | 2020-04-22T19:31:23.981387 | 2019-04-05T07:58:10 | 2019-04-05T07:58:10 | 170,610,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 456 | py | import sys
sys.stdin = open('input2.txt')
def subset(n, su):
global visit, count
if n == len(score):
if not visit & (1<<su):
visit ^= (1<<su)
count += 1
return
subset(n+1, su+score[n])
subset(n+1, su)
T = int(input())
for t in range(T):
N = int(input())
score = list(set(map(int, input().split())))
visit = count = 0
subset(0, 0)
print('#{} {}'.format(t+1, count+N-len(score))) | [
"moc0etan@gmail.com"
] | moc0etan@gmail.com |
ae9c47cbad8ea97eded1a2947f4a8a01472b1951 | be2fe3d75f5d08d44d207de42309aefac90f8c64 | /O Big.py | 02c8fc7d2f7e6ff0e6cb59e6ce33be3bbad2de42 | [] | no_license | lucasilvabc/CS241 | be18016862b420da0a7d44d9d6ab49aa76111e81 | e94487a7d4b1e4b282de8df419fa2abad61f2087 | refs/heads/main | 2023-04-06T20:11:01.283402 | 2021-04-19T23:42:58 | 2021-04-19T23:42:58 | 344,626,769 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | def contains(haystack, needle):
# Does the haystack contain the needle?
for item in haystack:
if item == needle:
return True
return False | [
"noreply@github.com"
] | lucasilvabc.noreply@github.com |
cb1e1c4fd0adabebcd87bc33eefe453ec2df48fa | 942ee5e8d54e8ebe9c5c841fbfdd1da652946944 | /1001-1500/1443.Minimum Time to Collect All Apples in a Tree.py | e8ae7ff0deadce1de133f2d3d5feb31d43fde59a | [] | no_license | kaiwensun/leetcode | 0129c174457f32887fbca078fb448adce46dd89d | 6b607f4aae3a4603e61f2e2b7480fdfba1d9b947 | refs/heads/master | 2023-08-31T07:30:50.459062 | 2023-08-27T07:59:16 | 2023-08-27T07:59:16 | 57,526,914 | 69 | 9 | null | 2023-08-20T06:34:41 | 2016-05-01T05:37:29 | Python | UTF-8 | Python | false | false | 728 | py | from collections import defaultdict
class Solution(object):
def minTime(self, n, edges, hasApple):
"""
:type n: int
:type edges: List[List[int]]
:type hasApple: List[bool]
:rtype: int
"""
graph = defaultdict(list)
for edge in edges:
graph[edge[0]].append(edge[1])
graph[edge[1]].append(edge[0])
visited = set()
def dfs(root):
res = 0
if root not in visited:
visited.add(root)
for nbr in graph[root]:
res += dfs(nbr)
if res or hasApple[root]:
res += 2
return res
return max(0, dfs(0) - 2)
| [
"noreply@github.com"
] | kaiwensun.noreply@github.com |
6c1a65c48e423d27b57f75c12495f1387113b2df | 3a4ecc7f7db23e92b7f8e406d3a73a77c53e1472 | /src/chapter02/c2_df1.py | 6026dc4be928099e2aae64f9481d1a1313b6da19 | [] | no_license | Parthi3610/pandas_workbook | b89b768f2d99cb5f042accbaebc1470bc350565e | 6e61f67db42ff708b6f40a9fb96b755085fe6587 | refs/heads/main | 2023-01-19T08:52:37.000264 | 2020-11-06T03:58:38 | 2020-11-06T03:58:38 | 303,878,391 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,071 | py | import pandas as pd
import numpy as np
movies = pd.read_csv("C:/Users/preet/PycharmProjects/pandas_workbook/src/data/movie.csv")
movie_act_dir = movies[
[
"actor_1_name",
"actor_2_name",
"actor_3_name",
"director_name"
]
]
print(movie_act_dir.head())
print(type(movies[["director_name"]]))
print(type(movies["director_name"]))
print(type(movies.loc[:,["director_name"]]))
print(type(movies.loc[:,"director_name"]))
def shorten(col):
return (
str(col).replace("facebook_likes","fb").replace("_for_reviews","")
)
movies = movies.rename(columns = shorten)
print (movies.dtypes.value_counts())
print(movies.select_dtypes(include = "int64").head())
print(movies.select_dtypes(include = "float64").head())
print(movies.select_dtypes(include = "number").head())
print(movies.select_dtypes(include = ["int64","object"]).head())
print(movies.filter(like = 'fb').head())
print(movies.columns)
cat_core = [
"movie_title",
"title_year",
"content_rating",
"genres",
]
cat_people = [
"director_name",
"actor_1_name",
"actor_2_name",
"actor_3_name",
]
cat_other = [
"color",
"country",
"language",
"plot_keywords",
"movie_imdb_link",
]
cont_fb = [
"director_fb",
"actor_1_fb",
"actor_2_fb",
"actor_3_fb",
"cast_total_fb",
"movie_fb",
]
cont_finance = ["budget", "gross"]
cont_num_reviews = [
"num_voted_users",
"num_user",
"num_critic",
]
cont_other = [
"imdb_score",
"duration",
"aspect_ratio",
"facenumber_in_poster",
]
new_col_order = (cat_core+cat_people+cat_other+cont_fb+cont_finance+cont_num_reviews+cont_other)
print(set(movies.columns) == set(new_col_order))
print(movies[new_col_order].head())
print(movies.describe())
print(movies.describe().T)
print(movies.describe(percentiles=[0.01,0.3,1]).T)
print(movies.min(skipna=False))
print(movies.isnull().head())
print(movies.isnull().sum().head())
print(movies.isnull().sum().sum())
print(movies.select_dtypes(["object"]).fillna("").max())
| [
"parthi_3610@yahoo.co.in"
] | parthi_3610@yahoo.co.in |
8130572a3731398674da676762ba7c5a6251ef9f | 79a23f55af0e8d4898714b7e210cadc88033a390 | /website/views.py | eae6187e75e7acb17fd508e9063dcc70f47de1c6 | [] | no_license | luoyueyaoqing/forum-website | be9bc7802d83a58abc3f802469287b3c2e11f44b | d5fbb14adf3a3e75f70df8f6eed397665f9c2d84 | refs/heads/master | 2020-03-25T10:01:16.533106 | 2018-08-15T08:48:55 | 2018-08-15T08:48:55 | 143,681,923 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,756 | py | from django.shortcuts import render, HttpResponseRedirect, redirect, HttpResponse
from .models import User, Article, Plate, Comment
from django.contrib import messages
from django.contrib.auth import login, authenticate, logout
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth.decorators import login_required
def index_register(request):
if request.method == "POST":
username = request.POST.get('username')
password1 = request.POST.get('password1')
password2 = request.POST.get('password2')
if not User.objects.filter(username=username).exists():
if password1 == password2:
User.objects.create_user(username=username, password=password1)
messages.success(request, '注册成功')
return redirect(to='login')
else:
messages.warning(request, '两次密码输入不一致')
else:
messages.warning(request, "账号已存在")
return render(request, 'register.html')
def index_login(request):
next_url = request.GET.get('next')
if request.method == "POST":
form = AuthenticationForm(data=request.POST)
if form.is_valid():
login(request, form.get_user())
if next_url:
return redirect(next_url)
return redirect('index')
return HttpResponseRedirect(request.get_full_path())
return render(request, 'login.html', {'next_url': next_url})
def index_logout(request):
logout(request)
return redirect(to=index)
def index(request):
plates = Plate.objects.all()
return render(request, 'index.html', {'plates': plates})
def articles(request, id):
plate = Plate.objects.get(id=id)
articles = Article.objects.all()
return render(request, 'articles.html', locals())
def detail(request, id):
article = Article.objects.get(id=id)
return render(request, 'detail.html', {'article': article})
@login_required
def add_article(request, id):
if request.method == "POST":
plate = Plate.objects.get(id=id)
title = request.POST.get('title')
content = request.POST.get('content')
article = Article.objects.create(title=title, content=content, author=request.user, column=plate)
return redirect(to='articles', id=id)
else:
return render(request, 'add_article.html')
@login_required
def comment(request, id):
article = Article.objects.get(id=id)
content = request.POST.get('content')
# user = request.user
# Comment.objects.create(content=content, user=user, article=article)
article.comment_this(user=request.user, content=content)
messages.success(request, '评论成功')
return redirect(to='detail', id=article.id)
@login_required
def edit(request, id):
article = Article.objects.get(id=id)
if request.method == "POST":
article.title = request.POST.get('title')
article.content = request.POST.get('content')
article.save()
return redirect(to=detail, id=id)
return render(request, 'edit.html', {'article': article})
@login_required
def del_article(request, id):
if request.method == "GET":
article = Article.objects.get(id=id)
column = article.column
article.delete()
return redirect(to=articles, id=column.id)
@login_required
def del_comment(request, id):
if request.method == "GET":
comment = Comment.objects.get(id=id)
comment.delete()
article = comment.article
messages.success(request, "评论已删除")
return redirect(to=detail, id=article.id) | [
"shmilyfae@163.com"
] | shmilyfae@163.com |
f5a8345f6e04ba694a886993167e2ae60ef1dcd4 | 8a6a96d487a0933293a563b04c24d08907608920 | /app/auth.py | 85c5eecc234752549804a0b229487c55dc6041ed | [] | no_license | shuxue051/ninja | 21ec07318f8c868e61659b977edce1ebdefc8e97 | ccb9724ff6329b82e18f839bb76246cae899fe99 | refs/heads/master | 2021-01-10T21:04:20.004674 | 2014-09-24T02:50:25 | 2014-09-24T02:50:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,941 | py | import os, datetime
from flask import current_app, Blueprint, render_template, abort, request, flash, redirect, url_for
from jinja2 import TemplateNotFound
from app import login_manager, flask_bcrypt
from flask.ext.login import (current_user, login_required, login_user, logout_user, confirm_login, fresh_login_required)
import forms
from User import User
auth_flask_login = Blueprint('auth_flask_login', __name__, template_folder='templates')
@auth_flask_login.route("/login", methods=["GET", "POST"])
def login():
if request.method == "POST" and "email" in request.form:
email = request.form["email"]
userObj = User()
user = userObj.get_by_email_w_password(email)
if user and flask_bcrypt.check_password_hash(user.password,request.form["password"]) and user.is_active():
remember = request.form.get("remember", "no") == "yes"
if login_user(user, remember=remember):
flash("Logged in!")
return redirect('/notes/create')
else:
flash("unable to log you in")
return render_template("/auth/login.html")
#
# Route disabled - enable route to allow user registration.
#
@auth_flask_login.route("/register", methods=["GET","POST"])
def register():
registerForm = forms.SignupForm(request.form)
current_app.logger.info(request.form)
if request.method == 'POST' and registerForm.validate() == False:
current_app.logger.info(registerForm.errors)
return "uhoh registration error"
elif request.method == 'POST' and registerForm.validate():
email = request.form['email']
# generate password hash
password_hash = flask_bcrypt.generate_password_hash(request.form['password'])
# prepare User
user = User(email,password_hash)
print user
try:
user.save()
if login_user(user, remember="no"):
flash("Logged in!")
return redirect('/')
else:
flash("unable to log you in")
except:
flash("unable to register with that email address")
current_app.logger.error("Error on registration - possible duplicate emails")
# prepare registration form
# registerForm = RegisterForm(csrf_enabled=True)
templateData = {
'form' : registerForm
}
return render_template("/auth/register.html", **templateData)
@auth_flask_login.route("/reauth", methods=["GET", "POST"])
@login_required
def reauth():
if request.method == "POST":
confirm_login()
flash(u"Reauthenticated.")
return redirect(request.args.get("next") or '/admin')
templateData = {}
return render_template("/auth/reauth.html", **templateData)
@auth_flask_login.route("/logout")
@login_required
def logout():
logout_user()
flash("Logged out.")
return redirect('/login')
@login_manager.unauthorized_handler
def unauthorized_callback():
return redirect('/login')
@login_manager.user_loader
def load_user(id):
if id is None:
redirect('/login')
user = User()
user.get_by_id(id)
if user.is_active():
return user
else:
return None | [
"shuxue051@gmail.com"
] | shuxue051@gmail.com |
7afc92885276c205066adf4255d7d46f5f5e60fb | 30f9e6a5f649b52b5d1a0ae606f44764038f6028 | /pai-fs/fsimpl/Errors.py | 0ad8850b971ccf904dbdae10d1b94181ddf962f7 | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | unixcrh/pai | 97d6ffe6ff16eecd91a8e5222c7f4b33efddb1aa | 5c1836edc1db6e7d3b23ad2416fec309d48fc677 | refs/heads/master | 2021-04-15T09:12:25.198703 | 2018-03-26T09:40:39 | 2018-03-26T09:40:39 | 126,845,731 | 1 | 0 | MIT | 2018-03-26T14:59:16 | 2018-03-26T14:59:15 | null | UTF-8 | Python | false | false | 1,449 | py | # Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
class FsException(Exception):
def __init__(self, msg=str()):
self.msg = msg
super(FsException, self).__init__(self.msg)
class BadConnection(FsException):
pass
class Unauthorized(FsException):
pass
class FileNotFound(FsException):
pass
class PathNotEmpty(FsException):
pass | [
"v-yifxio@microsoft.com"
] | v-yifxio@microsoft.com |
f442cd7e453fbda6168780d39defdc6005872da8 | 576e36669d9012c244fe2f777314ba1a6d903277 | /ChefAndPizza_problem_PCJ18G.py | fe82a5fe7766e9a759c1bf080cb7195945214b8b | [] | no_license | OlhaKuzaka/algorithms_python | cf26658e1f70491eda4cb636269c0373aed9f76d | 14edacc8f0c0040fc95e9cae0e88515ac5293985 | refs/heads/master | 2020-03-26T10:37:50.018355 | 2018-08-15T05:03:23 | 2018-08-15T05:03:23 | 144,808,431 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,376 | py | # All submissions for this problem are available.
# Chef's pizza is the tastiest pizza to exist, and the reason for that is his special, juicy homegrown tomatoes.
#
# Tomatoes can be grown in rectangular patches of any side lengths. However, Chef only has a limited amount of land.
#
# Consider the entire town of Chefville to be consisting of cells in a rectangular grid of positive coordinates.
#
# Chef own all cells (x,y)
# that satisfy x∗y≤N
# As an example if N=4
# , Chef owns the following cells:
#
# (1,1),(1,2),(1,3),(1,4),(2,1),(2,2),(3,1),(4,1)
# Chef can only grow tomatoes in rectangular patches consisting only of cells which belong to him.
# Also, if he uses a cell, he must use it entirely. He cannot use only a portion of it.
#
# Help Chef find the number of unique patches of rectangular land that he can grow tomatoes in!
# Since this number can be very large, output it modulo 1000000007
# .
#
# Input:
# The first line of the input contains T
# , the number of test cases.
# The next T
# lines of input contains one integer N
# .
def num_rectangles(x, y):
return x*(x+1) * y*(y+1)/4
if __name__ == '__main__':
n = 10000000000
total = 0
for i in range(1, n+1):
edge = int(n/i)
total += num_rectangles(i, edge)
if i-1 != 0:
total -= num_rectangles(i-1, edge)
print(total % 1000000007)
| [
"o.kuzka@gmail.com"
] | o.kuzka@gmail.com |
ed321ee3351dd686798f469165a774a3b522d685 | 8541f96e5f48780050a7995ed735c70d4f3c2642 | /test/test_validation.py | 5fb04f8372aea80988cd6820c8839dc4e3d5c76a | [
"LicenseRef-scancode-public-domain",
"CC0-1.0"
] | permissive | influence-usa/lobbying_federal_domestic | ede64752f81b6a8496a9f6b31b3f2a6c3a5f485e | 117a1991e01a203c2c181c1c6163b044c4d5c56f | refs/heads/master | 2021-01-02T23:02:57.140303 | 2016-11-30T22:47:46 | 2016-11-30T22:47:46 | 17,500,504 | 7 | 2 | null | 2014-05-08T21:32:29 | 2014-03-07T02:39:15 | Python | UTF-8 | Python | false | false | 3,903 | py | import json
import pytest
import validictory
from utils.validate import validate_uuid, validate_url, validate_email
@pytest.fixture
def uuid_fixture():
data = json.loads(''' {
"uuidInt": 117574695023396164616661330147169357159,
"uuidHex": "054a4828074e45f293a3a7ffbcd43bfb",
"uuidCanon": "054a4828-074e-45f2-93a3-a7ffbcd43bfb"
}''')
schema = {
"title": "My test schema",
"properties": {
"uuidHex": {
"format": "uuid_hex"
},
"uuidInt": {
"format": "uuid_int"
},
"uuidCanon": {
"format": "uuid_hex"
}
}
}
return {'data': data, 'schema': schema}
def test_validate_uuid(uuid_fixture):
uuid_data = uuid_fixture['data']
uuid_schema = uuid_fixture['schema']
formatdict = {"uuid_hex": validate_uuid, "uuid_int": validate_uuid}
# Make sure good data validates
validictory.validate(uuid_data, uuid_schema, format_validators=formatdict)
# Make sure bad data doesn't
with pytest.raises(validictory.ValidationError):
bad_data = uuid_data.copy()
bad_data['uuidHex'] = 'not_a_uuid'
validictory.validate(bad_data, uuid_schema,
format_validators=formatdict)
@pytest.fixture
def url_fixture():
data = json.loads(''' {
"test_http": "http://sunlightfoundation.com/api",
"test_https": "https://www.aal-usa.com",
"test_ftp": "ftp://ftp.fec.gov/FEC/"
}''')
schema = {
"title": "Url test schema",
"properties": {
"test_http": {
"format": "url_http"
},
"test_https": {
"format": "url_http"
},
"test_ftp": {
"format": "url_ftp"
},
}
}
return {'data': data, 'schema': schema}
def test_validate_url(url_fixture):
url_data = url_fixture['data']
url_schema = url_fixture['schema']
formatdict = {"url_http": validate_url, "url_ftp": validate_url}
# Make sure good data validates
validictory.validate(url_data, url_schema, format_validators=formatdict)
# Make sure bad data doesn't
bad_egs = zip(['test_http', 'test_https', 'test_ftp'],
['sunlightfoundation.com', 'https:/www.aal-usa.com',
'ftp:://ftp.fec.fgov/FEC/'])
print bad_egs
for field, bad_eg in bad_egs:
with pytest.raises(validictory.ValidationError):
bad_data = url_data.copy()
bad_data[field] = bad_eg
print bad_eg
validictory.validate(bad_data, url_schema,
format_validators=formatdict)
@pytest.fixture
def email_fixture():
data = json.loads(''' {
"email": "blannon@sunlightfoundation.com"
}''')
schema = {
"title": "Email test schema",
"properties": {
"email": {
"format": "email"
}
}
}
return {'data': data, 'schema': schema}
def test_validate_email(email_fixture):
email_data = email_fixture['data']
email_schema = email_fixture['schema']
formatdict = {"email": validate_email}
# Make sure good data validates
validictory.validate(email_data, email_schema,
format_validators=formatdict)
# Make sure bad data doesn't
with pytest.raises(validictory.FieldValidationError):
bad_data = email_data.copy()
bad_data['email'] = 'bobby bear at gmail.com'
validictory.validate(bad_data, email_schema,
format_validators=formatdict)
| [
"blannon@gmail.com"
] | blannon@gmail.com |
0866ea3ae0e864e61eef207ffc5ef2936d10b99e | 9e6bab4664f9275896b510209a83d5917b4746e1 | /apps/moca/migrations/0003_merchantprofile.py | 7d57bdec7486b538d9b0c963255d7ab013ee3543 | [] | no_license | manticarodrigo/moca | 4a25215715a11b0a85d0ce65388b35df8c799eed | d8cce8dc5c88e8dc944bb7c5c89e11a86e009e03 | refs/heads/master | 2023-08-03T08:25:52.856943 | 2019-12-04T22:20:00 | 2019-12-04T22:20:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 717 | py | # Generated by Django 2.1.10 on 2019-12-04 18:46
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('moca', '0002_device_auth_token'),
]
operations = [
migrations.CreateModel(
name='MerchantProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('stripe_user_id', models.CharField(max_length=30)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"manticarodrigo@gmail.com"
] | manticarodrigo@gmail.com |
59a23d259eca385a7a14f82cc06da91203ce8f7c | 9e30d4e6e14c5a57d99f80b28b59ee42f23fe4eb | /Listas.py | c1caff2066d60c415e7c5c3aedf552bd7ca108f7 | [] | no_license | lukeencinas/EjerciciosExtraPython | 408b1208f526a11a4ae214fd8c15b3fd31216626 | e75378d8b0fb12684c5cfeba396a50d3ec030c1d | refs/heads/master | 2020-05-09T18:25:33.295651 | 2019-04-14T17:04:31 | 2019-04-14T17:04:31 | 181,341,719 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 623 | py | #1) e1 = ["F. de Computadores", "Visualizacion","Fisica","Quimica", "Historia", "Lengua"]
# print(e1)
#2) e2 = ["F. de Computadores", "Visualizacion","Fisica","Quimica", "Historia", "Lengua"]
# for i in e2:
# print("Yo estudio: "+i)
#3) e3 = ["F. de Computadores", "Visualizacion","Fisica"]
# nota = []
# for i in e3:
# nota.append(input('¿Que has sacado en '+ i + '? '))
# for i in range(len(e3)):
# print ("En la asignatura ",e3[i]," Has sacado ",nota[i])
e4 = []
for i in range(6):
e4.append(int(input("¿Cuales son los numeros de la loteria? ")))
e4.sort()
print("Los numeros en orden son: "+ str(e4)) | [
"noreply@github.com"
] | lukeencinas.noreply@github.com |
c2f2d9873572b84a36f2345329ebd77f92a88cbe | 98e1716c1c3d071b2fedef0ac029eb410f55762c | /part15-statistical-thinking-1/No04-Bee-swarm-plot.py | 0b503f7631dcaaedd5a7afe2edbda8d651de8a7c | [] | no_license | iamashu/Data-Camp-exercise-PythonTrack | 564531bcf1dff119949cbb75e1fd63d89cb2779f | c72a4e806494f0e263ced9594597dc8882c2131c | refs/heads/master | 2020-07-22T00:23:12.024386 | 2019-04-12T09:24:42 | 2019-04-12T09:24:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,212 | py | #Bee swarm plot
'''
Make a bee swarm plot of the iris petal lengths. Your x-axis should contain each of the three species, and the y-axis the petal lengths. A data frame containing the data is in your namespace as df.
For your reference, the code Justin used to create the bee swarm plot in the video is provided below:
_ = sns.swarmplot(x='state', y='dem_share', data=df_swing)
_ = plt.xlabel('state')
_ = plt.ylabel('percent of vote for Obama')
plt.show()
In the IPython Shell, you can use sns.swarmplot? or help(sns.swarmplot) for more details on how to make bee swarm plots using seaborn.
Instructions
In the IPython Shell, inspect the DataFrame df using df.head(). This will let you identify which column names you need to pass as the x and y keyword arguments in your call to sns.swarmplot().
Use sns.swarmplot() to make a bee swarm plot from the DataFrame containing the Fisher iris data set, df. The x-axis should contain each of the three species, and the y-axis should contain the petal lengths.
Label the axes.
Show your plot.
'''
# code
sns.swarmplot(x='species', y='petal length (cm)', data=df)
# Label the axes
plt.xlabel('species')
plt.ylabel('petal length (cm)')
# Show the plot
plt.show() | [
"beiran@hotmail.com"
] | beiran@hotmail.com |
f7bb5b008461cd4f51770163a3cf7e600d784405 | 81c5c07e1144747dc0e98f8dffb287a69be1eba7 | /score_mcc_bin.py | 686c4e86fcab42e4f12a69f6f893e59e1cfe31ee | [] | no_license | twistedmove/e2e_antispoofing | acbb9ec5bc4454c1698fc355d0c0fee3bf70006e | 686dfb515b2c568a1006136f56bbaad0419f0787 | refs/heads/master | 2020-09-07T10:41:12.024794 | 2019-10-06T19:28:19 | 2019-10-06T19:28:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,454 | py | import argparse
import numpy as np
import glob
import torch
import torch.nn.functional as F
import os
from kaldi_io import read_mat_scp
import model as model_
import scipy.io as sio
from utils import compute_eer_labels, set_device, read_trials, get_freer_gpu
def prep_feats(data_):
#data_ = ( data_ - data_.mean(0) ) / data_.std(0)
features = data_.T
if features.shape[1]<50:
mul = int(np.ceil(50/features.shape[1]))
features = np.tile(features, (1, mul))
features = features[:, :50]
return torch.from_numpy(features[np.newaxis, np.newaxis, :, :]).float()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Compute scores for mcc model')
parser.add_argument('--path-to-data', type=str, default='./data/feats.scp', metavar='Path', help='Path to input data')
parser.add_argument('--trials-path', type=str, default='./data/trials', metavar='Path', help='Path to trials file')
parser.add_argument('--cp-path', type=str, default=None, metavar='Path', help='Path for file containing model')
parser.add_argument('--out-path', type=str, default='./out.txt', metavar='Path', help='Path to output hdf file')
parser.add_argument('--model', choices=['lstm', 'resnet', 'resnet_pca', 'lcnn_9', 'lcnn_29', 'lcnn_9_pca', 'lcnn_29_pca', 'lcnn_9_prodspec', 'lcnn_9_icqspec', 'lcnn_9_CC', 'lcnn_29_CC', 'resnet_CC'], default='lcnn_9', help='Model arch')
parser.add_argument('--n-classes', type=int, default=-1, metavar='N', help='Number of classes for the mcc case (default: binary classification)')
parser.add_argument('--no-cuda', action='store_true', default=False, help='Disables GPU use')
parser.add_argument('--no-output-file', action='store_true', default=False, help='Disables writing scores into out file')
parser.add_argument('--no-eer', action='store_true', default=False, help='Disables computation of EER')
parser.add_argument('--eval', action='store_true', default=False, help='Enables eval trials reading')
parser.add_argument('--ncoef', type=int, default=90, metavar='N', help='Number of cepstral coefs (default: 90)')
parser.add_argument('--init-coef', type=int, default=0, metavar='N', help='First cepstral coefs (default: 0)')
args = parser.parse_args()
args.cuda = True if not args.no_cuda and torch.cuda.is_available() else False
if args.cp_path is None:
raise ValueError('There is no checkpoint/model path. Use arg --cp-path to indicate the path!')
if os.path.isfile(args.out_path):
os.remove(args.out_path)
print(args.out_path + ' Removed')
print('Cuda Mode is: {}'.format(args.cuda))
print('Selected model is: {}'.format(args.model))
if args.cuda:
device = get_freer_gpu()
if args.model == 'lstm':
model = model_.cnn_lstm(nclasses=args.n_classes)
elif args.model == 'resnet':
model = model_.ResNet(nclasses=args.n_classes)
elif args.model == 'resnet_pca':
model = model_.ResNet_pca(nclasses=args.n_classes)
elif args.model == 'lcnn_9':
model = model_.lcnn_9layers(nclasses=args.n_classes)
elif args.model == 'lcnn_29':
model = model_.lcnn_29layers_v2(nclasses=args.n_classes)
elif args.model == 'lcnn_9_pca':
model = model_.lcnn_9layers_pca(nclasses=args.n_classes)
elif args.model == 'lcnn_29_pca':
model = model_.lcnn_29layers_v2_pca(nclasses=args.n_classes)
elif args.model == 'lcnn_9_icqspec':
model = model_.lcnn_9layers_icqspec(nclasses=args.n_classes)
elif args.model == 'lcnn_9_prodspec':
model = model_.lcnn_9layers_prodspec(nclasses=args.n_classes)
elif args.model == 'lcnn_9_CC':
model = model_.lcnn_9layers_CC(nclasses=args.n_classes, ncoef=args.ncoef, init_coef=args.init_coef)
elif args.model == 'lcnn_29_CC':
model = model_.lcnn_29layers_CC(nclasses=args.n_classes, ncoef=args.ncoef, init_coef=args.init_coef)
elif args.model == 'resnet_CC':
model = model_.ResNet_CC(nclasses=args.n_classes, ncoef=args.ncoef, init_coef=args.init_coef)
print('Loading model')
ckpt = torch.load(args.cp_path, map_location = lambda storage, loc: storage)
model.load_state_dict(ckpt['model_state'], strict=False)
model.eval()
print('Model loaded')
print('Loading data')
if args.eval:
test_utts = read_trials(args.trials_path, eval_=args.eval)
else:
test_utts, attack_type_list, label_list = read_trials(args.trials_path, eval_=args.eval)
data = { k:m for k,m in read_mat_scp(args.path_to_data) }
print('Data loaded')
print('Start of scores computation')
score_list = []
with torch.no_grad():
for i, utt in enumerate(test_utts):
print('Computing score for utterance '+ utt)
feats = prep_feats(data[utt])
try:
if args.cuda:
feats = feats.to(device)
model = model.to(device)
score = 1.-F.softmax(model.forward(feats), dim=1)[:,1:].sum().item()
except:
feats = feats.cpu()
model = model.cpu()
score = 1.-F.softmax(model.forward(feats), dim=1)[:,1:].sum().item()
score_list.append(score)
print('Score: {}'.format(score_list[-1]))
if not args.no_output_file:
print('Storing scores in output file:')
print(args.out_path)
with open(args.out_path, 'w') as f:
if args.eval:
for i, utt in enumerate(test_utts):
f.write("%s" % ' '.join([utt, str(score_list[i])+'\n']))
else:
for i, utt in enumerate(test_utts):
f.write("%s" % ' '.join([utt, attack_type_list[i], label_list[i], str(score_list[i])+'\n']))
if not args.no_eer and not args.eval:
print('EER: {}'.format(compute_eer_labels(label_list, score_list)))
print('All done!!')
| [
"joaomonteirof@gmail.com"
] | joaomonteirof@gmail.com |
158c8395e7b37a739bbe7438d2a3fb3853747fb2 | 0b20f4ce14b9ff77c84cedbecbaa29831335920d | /tests/cloudformation/file_formats/test_yaml.py | 76149f86216a57acc3de965d65a22daae34bad5a | [
"Apache-2.0"
] | permissive | sergesec488/checkov | 219c1b3864ab4f70b39a4cd79b041e98f3145364 | 56008e1c531b3626f14716067731be6e673040bc | refs/heads/master | 2023-04-10T12:26:49.749864 | 2021-02-26T18:36:52 | 2021-02-26T18:40:58 | 342,883,133 | 0 | 1 | Apache-2.0 | 2023-03-30T13:31:25 | 2021-02-27T15:01:08 | null | UTF-8 | Python | false | false | 681 | py | import os
import unittest
from checkov.cloudformation.runner import Runner
from checkov.runner_filter import RunnerFilter
class TestYamlFileFormat(unittest.TestCase):
def test_summary(self):
runner = Runner()
current_dir = os.path.dirname(os.path.realpath(__file__))
test_files_dir = current_dir + "/yaml"
report = runner.run(root_folder=test_files_dir)
summary = report.get_summary()
self.assertEqual(summary['passed'], 1)
self.assertEqual(summary['failed'], 0)
self.assertEqual(summary['skipped'], 0)
self.assertEqual(summary['parsing_errors'], 0)
if __name__ == '__main__':
unittest.main()
| [
"noreply@github.com"
] | sergesec488.noreply@github.com |
1161c6ec01e8bf8124199a123fc850feb16f7924 | 27c94d7e040902d3cdadd5862b15e67ec2ee4b6e | /exps/NAS-Bench-201-algos/DARTS-V1.py | 67441af82a7bc2f760fa028163eb4ca9c8887773 | [
"MIT"
] | permissive | D-X-Y/AutoDL-Projects | 8a0779a7710d809af2b052787928d8d34c14d0d9 | f46486e21b71ae6459a700be720d7648b5429569 | refs/heads/main | 2023-08-13T10:53:49.550889 | 2022-04-24T22:18:16 | 2022-04-24T22:18:16 | 168,538,768 | 989 | 197 | MIT | 2022-04-24T22:16:21 | 2019-01-31T14:30:50 | Python | UTF-8 | Python | false | false | 15,785 | py | ##################################################
# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2020 #
########################################################
# DARTS: Differentiable Architecture Search, ICLR 2019 #
########################################################
import sys, time, random, argparse
from copy import deepcopy
import torch
from pathlib import Path
from xautodl.config_utils import load_config, dict2config, configure2str
from xautodl.datasets import get_datasets, get_nas_search_loaders
from xautodl.procedures import (
prepare_seed,
prepare_logger,
save_checkpoint,
copy_checkpoint,
get_optim_scheduler,
)
from xautodl.utils import get_model_infos, obtain_accuracy
from xautodl.log_utils import AverageMeter, time_string, convert_secs2time
from xautodl.models import get_cell_based_tiny_net, get_search_spaces
from nas_201_api import NASBench201API as API
def search_func(
xloader,
network,
criterion,
scheduler,
w_optimizer,
a_optimizer,
epoch_str,
print_freq,
logger,
gradient_clip,
):
data_time, batch_time = AverageMeter(), AverageMeter()
base_losses, base_top1, base_top5 = AverageMeter(), AverageMeter(), AverageMeter()
arch_losses, arch_top1, arch_top5 = AverageMeter(), AverageMeter(), AverageMeter()
network.train()
end = time.time()
for step, (base_inputs, base_targets, arch_inputs, arch_targets) in enumerate(
xloader
):
scheduler.update(None, 1.0 * step / len(xloader))
base_targets = base_targets.cuda(non_blocking=True)
arch_targets = arch_targets.cuda(non_blocking=True)
# measure data loading time
data_time.update(time.time() - end)
# update the weights
w_optimizer.zero_grad()
_, logits = network(base_inputs)
base_loss = criterion(logits, base_targets)
base_loss.backward()
if gradient_clip > 0:
torch.nn.utils.clip_grad_norm_(network.parameters(), gradient_clip)
w_optimizer.step()
# record
base_prec1, base_prec5 = obtain_accuracy(
logits.data, base_targets.data, topk=(1, 5)
)
base_losses.update(base_loss.item(), base_inputs.size(0))
base_top1.update(base_prec1.item(), base_inputs.size(0))
base_top5.update(base_prec5.item(), base_inputs.size(0))
# update the architecture-weight
a_optimizer.zero_grad()
_, logits = network(arch_inputs)
arch_loss = criterion(logits, arch_targets)
arch_loss.backward()
a_optimizer.step()
# record
arch_prec1, arch_prec5 = obtain_accuracy(
logits.data, arch_targets.data, topk=(1, 5)
)
arch_losses.update(arch_loss.item(), arch_inputs.size(0))
arch_top1.update(arch_prec1.item(), arch_inputs.size(0))
arch_top5.update(arch_prec5.item(), arch_inputs.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if step % print_freq == 0 or step + 1 == len(xloader):
Sstr = (
"*SEARCH* "
+ time_string()
+ " [{:}][{:03d}/{:03d}]".format(epoch_str, step, len(xloader))
)
Tstr = "Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})".format(
batch_time=batch_time, data_time=data_time
)
Wstr = "Base [Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})]".format(
loss=base_losses, top1=base_top1, top5=base_top5
)
Astr = "Arch [Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})]".format(
loss=arch_losses, top1=arch_top1, top5=arch_top5
)
logger.log(Sstr + " " + Tstr + " " + Wstr + " " + Astr)
return base_losses.avg, base_top1.avg, base_top5.avg
def valid_func(xloader, network, criterion):
data_time, batch_time = AverageMeter(), AverageMeter()
arch_losses, arch_top1, arch_top5 = AverageMeter(), AverageMeter(), AverageMeter()
network.eval()
end = time.time()
with torch.no_grad():
for step, (arch_inputs, arch_targets) in enumerate(xloader):
arch_targets = arch_targets.cuda(non_blocking=True)
# measure data loading time
data_time.update(time.time() - end)
# prediction
_, logits = network(arch_inputs)
arch_loss = criterion(logits, arch_targets)
# record
arch_prec1, arch_prec5 = obtain_accuracy(
logits.data, arch_targets.data, topk=(1, 5)
)
arch_losses.update(arch_loss.item(), arch_inputs.size(0))
arch_top1.update(arch_prec1.item(), arch_inputs.size(0))
arch_top5.update(arch_prec5.item(), arch_inputs.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
return arch_losses.avg, arch_top1.avg, arch_top5.avg
def main(xargs):
assert torch.cuda.is_available(), "CUDA is not available."
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.set_num_threads(xargs.workers)
prepare_seed(xargs.rand_seed)
logger = prepare_logger(args)
train_data, valid_data, xshape, class_num = get_datasets(
xargs.dataset, xargs.data_path, -1
)
# config_path = 'configs/nas-benchmark/algos/DARTS.config'
config = load_config(
xargs.config_path, {"class_num": class_num, "xshape": xshape}, logger
)
search_loader, _, valid_loader = get_nas_search_loaders(
train_data,
valid_data,
xargs.dataset,
"configs/nas-benchmark/",
config.batch_size,
xargs.workers,
)
logger.log(
"||||||| {:10s} ||||||| Search-Loader-Num={:}, Valid-Loader-Num={:}, batch size={:}".format(
xargs.dataset, len(search_loader), len(valid_loader), config.batch_size
)
)
logger.log("||||||| {:10s} ||||||| Config={:}".format(xargs.dataset, config))
search_space = get_search_spaces("cell", xargs.search_space_name)
if xargs.model_config is None:
model_config = dict2config(
{
"name": "DARTS-V1",
"C": xargs.channel,
"N": xargs.num_cells,
"max_nodes": xargs.max_nodes,
"num_classes": class_num,
"space": search_space,
"affine": False,
"track_running_stats": bool(xargs.track_running_stats),
},
None,
)
else:
model_config = load_config(
xargs.model_config,
{
"num_classes": class_num,
"space": search_space,
"affine": False,
"track_running_stats": bool(xargs.track_running_stats),
},
None,
)
search_model = get_cell_based_tiny_net(model_config)
logger.log("search-model :\n{:}".format(search_model))
w_optimizer, w_scheduler, criterion = get_optim_scheduler(
search_model.get_weights(), config
)
a_optimizer = torch.optim.Adam(
search_model.get_alphas(),
lr=xargs.arch_learning_rate,
betas=(0.5, 0.999),
weight_decay=xargs.arch_weight_decay,
)
logger.log("w-optimizer : {:}".format(w_optimizer))
logger.log("a-optimizer : {:}".format(a_optimizer))
logger.log("w-scheduler : {:}".format(w_scheduler))
logger.log("criterion : {:}".format(criterion))
flop, param = get_model_infos(search_model, xshape)
# logger.log('{:}'.format(search_model))
logger.log("FLOP = {:.2f} M, Params = {:.2f} MB".format(flop, param))
if xargs.arch_nas_dataset is None:
api = None
else:
api = API(xargs.arch_nas_dataset)
logger.log("{:} create API = {:} done".format(time_string(), api))
last_info, model_base_path, model_best_path = (
logger.path("info"),
logger.path("model"),
logger.path("best"),
)
network, criterion = torch.nn.DataParallel(search_model).cuda(), criterion.cuda()
if last_info.exists(): # automatically resume from previous checkpoint
logger.log(
"=> loading checkpoint of the last-info '{:}' start".format(last_info)
)
last_info = torch.load(last_info)
start_epoch = last_info["epoch"]
checkpoint = torch.load(last_info["last_checkpoint"])
genotypes = checkpoint["genotypes"]
valid_accuracies = checkpoint["valid_accuracies"]
search_model.load_state_dict(checkpoint["search_model"])
w_scheduler.load_state_dict(checkpoint["w_scheduler"])
w_optimizer.load_state_dict(checkpoint["w_optimizer"])
a_optimizer.load_state_dict(checkpoint["a_optimizer"])
logger.log(
"=> loading checkpoint of the last-info '{:}' start with {:}-th epoch.".format(
last_info, start_epoch
)
)
else:
logger.log("=> do not find the last-info file : {:}".format(last_info))
start_epoch, valid_accuracies, genotypes = (
0,
{"best": -1},
{-1: search_model.genotype()},
)
# start training
start_time, search_time, epoch_time, total_epoch = (
time.time(),
AverageMeter(),
AverageMeter(),
config.epochs + config.warmup,
)
for epoch in range(start_epoch, total_epoch):
w_scheduler.update(epoch, 0.0)
need_time = "Time Left: {:}".format(
convert_secs2time(epoch_time.val * (total_epoch - epoch), True)
)
epoch_str = "{:03d}-{:03d}".format(epoch, total_epoch)
logger.log(
"\n[Search the {:}-th epoch] {:}, LR={:}".format(
epoch_str, need_time, min(w_scheduler.get_lr())
)
)
search_w_loss, search_w_top1, search_w_top5 = search_func(
search_loader,
network,
criterion,
w_scheduler,
w_optimizer,
a_optimizer,
epoch_str,
xargs.print_freq,
logger,
xargs.gradient_clip,
)
search_time.update(time.time() - start_time)
logger.log(
"[{:}] searching : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%, time-cost={:.1f} s".format(
epoch_str, search_w_loss, search_w_top1, search_w_top5, search_time.sum
)
)
valid_a_loss, valid_a_top1, valid_a_top5 = valid_func(
valid_loader, network, criterion
)
logger.log(
"[{:}] evaluate : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%".format(
epoch_str, valid_a_loss, valid_a_top1, valid_a_top5
)
)
# check the best accuracy
valid_accuracies[epoch] = valid_a_top1
if valid_a_top1 > valid_accuracies["best"]:
valid_accuracies["best"] = valid_a_top1
genotypes["best"] = search_model.genotype()
find_best = True
else:
find_best = False
genotypes[epoch] = search_model.genotype()
logger.log(
"<<<--->>> The {:}-th epoch : {:}".format(epoch_str, genotypes[epoch])
)
# save checkpoint
save_path = save_checkpoint(
{
"epoch": epoch + 1,
"args": deepcopy(xargs),
"search_model": search_model.state_dict(),
"w_optimizer": w_optimizer.state_dict(),
"a_optimizer": a_optimizer.state_dict(),
"w_scheduler": w_scheduler.state_dict(),
"genotypes": genotypes,
"valid_accuracies": valid_accuracies,
},
model_base_path,
logger,
)
last_info = save_checkpoint(
{
"epoch": epoch + 1,
"args": deepcopy(args),
"last_checkpoint": save_path,
},
logger.path("info"),
logger,
)
if find_best:
logger.log(
"<<<--->>> The {:}-th epoch : find the highest validation accuracy : {:.2f}%.".format(
epoch_str, valid_a_top1
)
)
copy_checkpoint(model_base_path, model_best_path, logger)
with torch.no_grad():
# logger.log('arch-parameters :\n{:}'.format( nn.functional.softmax(search_model.arch_parameters, dim=-1).cpu() ))
logger.log("{:}".format(search_model.show_alphas()))
if api is not None:
logger.log("{:}".format(api.query_by_arch(genotypes[epoch], "200")))
# measure elapsed time
epoch_time.update(time.time() - start_time)
start_time = time.time()
logger.log("\n" + "-" * 100)
logger.log(
"DARTS-V1 : run {:} epochs, cost {:.1f} s, last-geno is {:}.".format(
total_epoch, search_time.sum, genotypes[total_epoch - 1]
)
)
if api is not None:
logger.log("{:}".format(api.query_by_arch(genotypes[total_epoch - 1], "200")))
logger.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser("DARTS first order")
parser.add_argument("--data_path", type=str, help="Path to dataset")
parser.add_argument(
"--dataset",
type=str,
choices=["cifar10", "cifar100", "ImageNet16-120"],
help="Choose between Cifar10/100 and ImageNet-16.",
)
# channels and number-of-cells
parser.add_argument("--search_space_name", type=str, help="The search space name.")
parser.add_argument("--max_nodes", type=int, help="The maximum number of nodes.")
parser.add_argument("--channel", type=int, help="The number of channels.")
parser.add_argument(
"--num_cells", type=int, help="The number of cells in one stage."
)
parser.add_argument(
"--track_running_stats",
type=int,
choices=[0, 1],
help="Whether use track_running_stats or not in the BN layer.",
)
parser.add_argument("--config_path", type=str, help="The config path.")
parser.add_argument(
"--model_config",
type=str,
help="The path of the model configuration. When this arg is set, it will cover max_nodes / channels / num_cells.",
)
parser.add_argument("--gradient_clip", type=float, default=5, help="")
# architecture leraning rate
parser.add_argument(
"--arch_learning_rate",
type=float,
default=3e-4,
help="learning rate for arch encoding",
)
parser.add_argument(
"--arch_weight_decay",
type=float,
default=1e-3,
help="weight decay for arch encoding",
)
# log
parser.add_argument(
"--workers",
type=int,
default=2,
help="number of data loading workers (default: 2)",
)
parser.add_argument(
"--save_dir", type=str, help="Folder to save checkpoints and log."
)
parser.add_argument(
"--arch_nas_dataset",
type=str,
help="The path to load the architecture dataset (nas-benchmark).",
)
parser.add_argument("--print_freq", type=int, help="print frequency (default: 200)")
parser.add_argument("--rand_seed", type=int, help="manual seed")
args = parser.parse_args()
if args.rand_seed is None or args.rand_seed < 0:
args.rand_seed = random.randint(1, 100000)
main(args)
| [
"280835372@qq.com"
] | 280835372@qq.com |
aaab34fc3d46736a9c6abdb2195fc43a0ba0f6f4 | 75f48cb18eb83fd4f9fd1c7dd0543fcaaf781342 | /members/migrations/0029_profile.py | e7b48cc47f392bbf73ff5bb36412d5885b6a87dc | [] | no_license | trouvaay/trouvaay | f28a879e60de6212cbf30904c4471158c3907ebb | 5440928238428c9e60be61e75910ad8d78866426 | refs/heads/master | 2021-03-27T10:11:09.602842 | 2015-06-05T22:06:28 | 2015-06-05T22:06:28 | 27,945,386 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 817 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import localflavor.us.models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('members', '0028_auto_20150220_1922'),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('phone', localflavor.us.models.PhoneNumberField(default=b'', max_length=20, blank=True)),
('authuser', models.OneToOneField(related_name='profile', to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
]
| [
"sergey@lanasoft.net"
] | sergey@lanasoft.net |
15a3b3cd280903839a7706abb9e05c8e6621ad2a | 837877c72a803fc62eea396e466dff36cffa5028 | /dnnlib/submission/internal/__init__.py | 52a7fce687d3906d061a6a76834e222143555b9b | [] | no_license | tantantetetao/StyleGANv2-Re-Implementation | 55d6c9e98e2d98450734937133348acb197bf5d8 | bfa0c410073863ab199cc5e5fbafb3dd2da35ea9 | refs/heads/master | 2022-12-12T23:32:53.423432 | 2020-09-14T07:26:27 | 2020-09-14T07:26:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 281 | py | # Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://nvlabs.github.io/stylegan2/license.html
# Cheng-Bin Jin re-implementation.
from . import local | [
"jinchengbin@huya.com"
] | jinchengbin@huya.com |
c03967857b3abb3a4db4df537c2c4342ac393b68 | d41d18d3ea6edd2ec478b500386375a8693f1392 | /plotly/validators/scatter/marker/line/_width.py | 108770c589b4e4605b6ff605e20647ef337325b7 | [
"MIT"
] | permissive | miladrux/plotly.py | 38921dd6618650d03be9891d6078e771ffccc99a | dbb79e43e2cc6c5762251537d24bad1dab930fff | refs/heads/master | 2020-03-27T01:46:57.497871 | 2018-08-20T22:37:38 | 2018-08-20T22:37:38 | 145,742,203 | 1 | 0 | MIT | 2018-08-22T17:37:07 | 2018-08-22T17:37:07 | null | UTF-8 | Python | false | false | 465 | py | import _plotly_utils.basevalidators
class WidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name='width', parent_name='scatter.marker.line', **kwargs
):
super(WidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=True,
edit_type='style',
min=0,
role='style',
**kwargs
)
| [
"adam.kulidjian@gmail.com"
] | adam.kulidjian@gmail.com |
e6ce7ed7be75cb36861a415589dd9916147d2c3b | 17ed1b63dcc519c1ddf91d500e2b1e80c8cb338f | /Fixed_Income_Project/pricing_2.py | 82ee24b37e9daa928d8f3fa6f694ec9230ef7465 | [] | no_license | khorwei01/reinforcement | 6834187814901b216317c2e2e29eed873a43b242 | cfd2a151d1ecfae9f00ed658b45304ef0ff43046 | refs/heads/master | 2021-11-01T00:15:05.047005 | 2021-10-18T07:43:16 | 2021-10-18T07:43:16 | 238,147,407 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,703 | py | # -*- coding: utf-8 -*-
"""
FI part 4
@author: Archer
"""
import numpy as np
import pandas as pd
from enum import Enum
from scipy.stats import norm
from scipy.integrate import quad
from scipy.misc import derivative
from math import log
from scipy.optimize import fsolve
# discounting methods
from part1 import OISdiscountFactorOutput, Fswapr
from part2 import getSABR
def sigmanN(n,N,K,m):
N = N-n
S = ParSwapRate(n,N,m)
T = N
alpha, rho, nu = getSABR(n, N-n)
sigma_sabr = SABR(S, K, T, alpha, 0.9, rho, nu)
return sigma_sabr
def SABR(F, K, T, alpha, beta, rho, nu):
X = K
# if K is at-the-money-forward
if abs(F - K) < 1e-12:
numer1 = (((1 - beta)**2)/24)*alpha*alpha/(F**(2 - 2*beta))
numer2 = 0.25*rho*beta*nu*alpha/(F**(1 - beta))
numer3 = ((2 - 3*rho*rho)/24)*nu*nu
VolAtm = alpha*(1 + (numer1 + numer2 + numer3)*T)/(F**(1-beta))
sabrsigma = VolAtm
else:
z = (nu/alpha)*((F*X)**(0.5*(1-beta)))*log(F/X)
zhi = log((((1 - 2*rho*z + z*z)**0.5) + z - rho)/(1 - rho))
numer1 = (((1 - beta)**2)/24)*((alpha*alpha)/((F*X)**(1 - beta)))
numer2 = 0.25*rho*beta*nu*alpha/((F*X)**((1 - beta)/2))
numer3 = ((2 - 3*rho*rho)/24)*nu*nu
numer = alpha*(1 + (numer1 + numer2 + numer3)*T)*z
denom1 = ((1 - beta)**2/24)*(log(F/X))**2
denom2 = (((1 - beta)**4)/1920)*((log(F/X))**4)
denom = ((F*X)**((1 - beta)/2))*(1 + denom1 + denom2)*zhi
sabrsigma = numer/denom
return sabrsigma
# import from part1 for reference
def OISref(n):
return OISdiscountFactorOutput(n)
#OISref(3)
def ParSwapRate(n,N,m=2):
'''underlying swap LIBOR/ collateralized
'''
# flt = sum([LiborRate(i-1,i)*OISref(i) for i in range(n+1,N+1,m)])
# fix = sum([1*OISref(i) for i in range(n+1,N+1,m)])
# return flt/fix
return Fswapr(n,N-n)
#ParSwapRate(5,15)
class PayoffType(Enum):
Call = 0
Put = 1
def IRR(S,Tenor,m=2):
'''sum of IRR discounting
swap should pay from n+1 yr, so adjust
m is payment frequency
S is par swap rate
n,N are yrs starting swap and stop swap
Note:
1. swap first payment start from n+1
2. by default start from 1 to m*N
'''
comps = [1/m/(1+S/m)**i for i in range(1,Tenor*m+1)]
return sum(comps)
def IRR_1d(S,Tenor,m=2):
'''derivative once of IRR
'''
comps = [-i*(1/m**2)/(1+S/m)**(i+1) for i in range(1,Tenor*m+1)]
return sum(comps)
def IRR_2d(S,Tenor,m=2):
'''derivative twice of IRR
'''
comps = [(i*(i+1))*(1/m**3)/(1+S/m)**(i+2) for i in range(1,Tenor*m+1)]
return sum(comps)
def Black76(S, K, r, sigma, T, PayoffType=PayoffType.Call):
'''real Black76 should go with F=S*np.exp(r*T)
'''
d1 = (np.log(S/K)+(r+sigma**2/2)*T) / (sigma*np.sqrt(T))
d2 = d1 - sigma*np.sqrt(T)
func = {
PayoffType.Call: lambda : S*norm.cdf(d1) - K*np.exp(-r*T)*norm.cdf(d2),
PayoffType.Put: lambda : S*norm.cdf(d1) - K*np.exp(-r*T)*norm.cdf(d2) - S + K*np.exp(-r*T)
}
return func[PayoffType]()
def SwaptionPrice(Df, S0, K, swapTenor, n, PayoffType=PayoffType.Call, m=2):
'''
Df is Discount Factor from Tn(swap start time) to T0
S0 refers to par swap rate observed at T0 with payment from Tn to TN
swapTenor is swap tenor
n is time before swap
signanN is a function
'''
irr = IRR(S0, swapTenor, m)
sigmaK = sigmanN(n=n,N=n+swapTenor,K=K,m=m)
b76p = Black76(S0, K, 0, sigmaK, n, PayoffType=PayoffType) # take r as 0
return Df*irr*b76p
def testSwaptionPricer():
# swaption price test case: forward swaption 5*10
Df = 1 # discount factor
K = 0.1 # strike
n=5
swapTenor=10
S0 = ParSwapRate(n,n+swapTenor) # par swap rate at T0
swaptionp = SwaptionPrice(Df, S0, K, swapTenor, n, PayoffType=PayoffType.Call)
print('Swaption price test-Call:',swaptionp)
swaptionp = SwaptionPrice(Df, S0, K, swapTenor, n, PayoffType=PayoffType.Put)
print('Swaption price test-Put:',swaptionp)
def CMSPrice(payoff, g_d1, g_d2, swapTenor, n, m=2):
'''
payoff(or g) is a function with parameter K(par swap rate)
h_d1 is first difference with K
h_d2 is twice difference with K
Df from 0 to n(when swap starts)
'''
if (n%1 != 0) or (swapTenor%1 != 0):
print('Do not support float numbers for now.')
return 0
S0 = ParSwapRate(n,n+swapTenor,m)
Df = OISref(n)
# specify used formulas, used analytical formulas as much as possible
IRR_cms = lambda K: IRR(K,swapTenor)
IRR_d1_cms = lambda K: IRR_1d(K,swapTenor)
IRR_d2_cms = lambda K: IRR_2d(K,swapTenor)
# h = payoff / IRR
# h = lambda K: payoff(K)/IRR_cms(K) # not used
h_d1 = lambda K: ( g_d1(K) / IRR_cms(K)
- IRR_d1_cms(K) * payoff(K) / IRR_cms(K)**2 )
h_d2 = lambda K: ( g_d2(K) / IRR_cms(K)
- IRR_d2_cms(K) * payoff(K) / (IRR_cms(K)**2)
- 2 * IRR_d1_cms(K) * g_d1(K) / (IRR_cms(K)**2)
+ 2 * IRR_d1_cms(K)**2 * payoff(K)/(IRR_cms(K)**3) )
# Df in swaption must be from Tn to T0 for swaption
# from derivation, we know that K to swap is strike, but for payoff function, is par swap rate.
swaption_payer = lambda K : SwaptionPrice(Df, S0, K, swapTenor, n, PayoffType=PayoffType.Call, m=m)
swaption_receiver = lambda K : SwaptionPrice(Df, S0, K, swapTenor, n, PayoffType=PayoffType.Put, m=m)
# within quad
quad1 = lambda K : h_d2(K)*swaption_receiver(K)
quad2 = lambda K : h_d2(K)*swaption_payer(K)
# sum parts
# p3 and p4 are intergals, which can have divergent results
p1 = Df * payoff(S0)
p2 = h_d1(S0)*(swaption_payer(S0)-swaption_receiver(S0))
p3 = quad(quad1, 0, S0)[0]
p4 = quad(quad2, S0, np.inf)[0]
return p1 + p2 + p3 + p4
def CMSCapletPrice(payoff, g_d1, g_d2, swapTenor, n, capletstrike, m=2):
if (n%1 != 0) or (swapTenor%1 != 0):
print('Do not support float numbers for now.')
return 0
S0 = ParSwapRate(n,n+swapTenor,m)
Df = OISref(n)
# specify used formulas, used analytical formulas as much as possible
IRR_cms = lambda K: IRR(K,swapTenor )
IRR_d1_cms = lambda K: IRR_1d(K,swapTenor )
IRR_d2_cms = lambda K: IRR_2d(K,swapTenor )
# h = payoff / IRR
# h = lambda K: payoff(K)/IRR_cms(K) # not used
h_d1 = lambda K: ( g_d1(K) / IRR_cms(K)
- IRR_d1_cms(K) * payoff(K) / IRR_cms(K)**2 )
h_d2 = lambda K: ( g_d2(K) / IRR_cms(K)
- IRR_d2_cms(K) * payoff(K) / (IRR_cms(K)**2)
- 2 * IRR_d1_cms(K) * g_d1(K) / (IRR_cms(K)**2)
+ 2 * IRR_d1_cms(K)**2 * payoff(K)/(IRR_cms(K)**3) )
# Df in swaption must be from Tn to T0 for swaption
# from derivation, we know that K to swap is strike, but for payoff function, is par swap rate.
swaption_payer = lambda K : SwaptionPrice(Df, S0, K, swapTenor, n, PayoffType=PayoffType.Call, m=m)
swaption_receiver = lambda K : SwaptionPrice(Df, S0, K, swapTenor, n, PayoffType=PayoffType.Put, m=m)
# within quad
quad1 = lambda K : h_d2(K)*swaption_receiver(K)
quad2 = lambda K : h_d2(K)*swaption_payer(K)
# sum parts
# p3 and p4 are intergals, which can have divergent results
if capletstrike < S0:
p1 = Df * payoff(S0)
p2 = h_d1(capletstrike)*swaption_receiver(capletstrike)
p3 = quad(quad1, capletstrike, S0)[0]
p4 = quad(quad2, S0, np.inf)[0]
else:
p1 = 0
p2 = h_d1(capletstrike)*swaption_payer(capletstrike)
p3 = 0
p4 = quad(quad2, capletstrike, np.inf)[0]
pcheck = quad(quad1, 0, capletstrike)[0]
print('Caplet part2 and 3', [p2,pcheck])
print('h1d',h_d1(capletstrike),'\nh2d',h_d2(capletstrike))
print('rec',round(swaption_receiver(capletstrike),8))
return p1 + p2 + p3 + p4
class backup():
# derivative by derivative func
IRR_d1_cms = lambda K: derivative(IRR_cms, K, dx=0.001 ,n=1)
IRR_d2_cms = lambda K: derivative(IRR_cms, K, dx=0.001 ,n=2)
# normal case for normal CMS paying K
payoff = lambda K: K
g_d1 = lambda K: 1
g_d2 = lambda K: 0
def peng():
m = 2
N = 15
n = 5
swapTenor = N-n
# payoff equations
payoff = lambda K: K
g_d1 = lambda K: 1
g_d2 = lambda K: 0
# S0 = ParSwapRate(n,n+swapTenor)
PV = CMSPrice(payoff, g_d1, g_d2, swapTenor, n, m)
print('Q1: CMS PV:',PV)
Df = OISref(n)
print('CMS rate:',PV/Df)
def part4(n=5, N=15):
p=4
q=2
swapTenor = N-n
# payoff equations
payoff = lambda K: K**(1/p) - 0.04**(1/q)
g_d1 = lambda K: (1/p)*K**(1/p-1)
g_d2 = lambda K: 1/p*(1/p-1)*K**(1/p-2)
m=2
# S0 = ParSwapRate(n,n+swapTenor)
PV = CMSPrice(payoff, g_d1, g_d2, swapTenor, n, m)
print('Q1: CMS PV:',round(PV,8))
Df = OISref(n)
print('CMS rate:',round(PV/Df,8))
S0 = ParSwapRate(n,n+swapTenor)
print('S0',S0)
capletstrike = fsolve(payoff,0)[0]
print('caplet strike', capletstrike)
PVop = CMSCapletPrice(payoff, g_d1, g_d2, swapTenor, n, capletstrike, m)
print('Q2: CMS Caplet PV:',round(PVop,8))
print('difference between Option - CMS', PVop-PV)
if __name__ == '__main__':
# so far cannot support n and N flt numbers
part4(n=5, N=15)
| [
"noreply@github.com"
] | khorwei01.noreply@github.com |
7556cf1918258fe40c2aabedcaf970d5535e3a6b | 7c926109cda8e59cef6abf097774c1d64c7f77af | /app.py | 18bb8ed0dbd02ac640d073f871277c93e192c80d | [] | no_license | zahraEskandari/corona_dashboard | f27ee1091a24cf622d1f631712036387760e6327 | 4c881800369a9c3be390409e4d9d146fbd5858c2 | refs/heads/master | 2021-05-20T04:15:44.021841 | 2020-04-01T13:24:15 | 2020-04-01T13:24:15 | 252,181,036 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,609 | py | #from flask import Flask
import flask
import dash
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
import plotly.graph_objs as go
import random
import math
import dash_table as dst
from dash.dependencies import Input, Output
import datetime
import plotly
# storing and anaysis
import numpy as np
# visualization
#import matplotlib.pyplot as plt
#import seaborn as sns
import plotly.express as px
import folium
#from time import strftime
from flask_caching import Cache
import json
data_path = 'path to .. \\data\\corona_data.csv'
external_scripts = [
{'src': 'my view counter script'}
]
server = flask.Flask(__name__)
app = dash.Dash(__name__, server=server, external_scripts=external_scripts)
# show data
def serve_layout():
# importing datasets
full_table = pd.read_csv(data_path ,
parse_dates=['Date'])
# unifying names
full_table['Country/Region'] = full_table['Country/Region'].replace('Mainland China', 'China')
full_table['Country/Region'] = full_table['Country/Region'].replace('Iran (Islamic Republic of)', 'Iran')
full_table['Country/Region'] = full_table['Country/Region'].replace('Republic of Korea', 'South Korea')
full_table['Country/Region'] = full_table['Country/Region'].replace('Korea, South', 'South Korea')
# filling missing values with NA
full_table[['Province/State']] = full_table[['Province/State']].fillna('NA')
Iran = full_table[full_table['Country/Region']=='Iran']
full_latest = full_table[full_table['Date'] == max(full_table['Date'])].reset_index()
# last 2 days
temp = full_table.groupby('Date')['Confirmed', 'Deaths', 'Recovered'].sum()
temp = temp.reset_index()
temp = temp.sort_values('Date', ascending=False)
today = temp.iloc[0]["Date"]
t = str(today)#today.strftime("%d-%b-%Y")
today = t[0:t.find('T')]
total_cases_today = temp.iloc[0]["Confirmed"]
death_cases_today = temp.iloc[0]["Deaths"]
recovered_cases_today = temp.iloc[0]["Recovered"]
print(temp.head())
yesterday = temp.iloc[1]["Date"]
total_cases_yesterday = temp.iloc[1]["Confirmed"]
death_cases_yesterday = temp.iloc[1]["Deaths"]
recovered_cases_yesterday = temp.iloc[1]["Recovered"]
temp_Iran = Iran.groupby('Date')['Confirmed', 'Deaths', 'Recovered'].sum()
temp_Iran = temp_Iran.reset_index()
temp_Iran = temp_Iran.sort_values('Date', ascending=False)
iran_today = temp_Iran.iloc[0]["Date"]
iran_total_cases_today = temp_Iran.iloc[0]["Confirmed"]
iran_death_cases_today = temp_Iran.iloc[0]["Deaths"]
iran_recovered_cases_today = temp_Iran.iloc[0]["Recovered"]
iran_yesterday = temp_Iran.iloc[1]["Date"]
iran_total_cases_yesterday = temp_Iran.iloc[1]["Confirmed"]
iran_death_cases_yesterday = temp_Iran.iloc[1]["Deaths"]
iran_recovered_cases_yesterday = temp_Iran.iloc[1]["Recovered"]
full_latest = full_table[full_table['Date'] == max(full_table['Date'])].reset_index()
full_latest_grouped = full_latest.groupby('Country/Region')['Confirmed', 'Deaths', 'Recovered'].sum().reset_index()
full_latest_grouped_confirmed = full_latest_grouped[['Country/Region', 'Confirmed']]
result = full_latest_grouped_confirmed.nlargest(8, columns='Confirmed')
print(int(full_latest_grouped_confirmed.loc[full_latest_grouped_confirmed['Country/Region']=='Iran'] ["Confirmed"]))
if 'Iran' not in result['Country/Region'].values :
result.loc[len(result)] = ['Iran', int(full_latest_grouped_confirmed.loc[full_latest_grouped_confirmed['Country/Region']=='Iran'] ["Confirmed"])]
result.loc[len(result)] = ['Other Countries', full_latest_grouped_confirmed.loc[~full_latest_grouped_confirmed['Country/Region'].isin(result['Country/Region']), 'Confirmed'].sum()]
#full_latest_grouped_confirmed
result
temp_full = full_table.groupby(['Country/Region', 'Date'])['Confirmed', 'Deaths', 'Recovered'].sum()
temp_full = temp_full.reset_index()
#temp_full['Country/Region'].isin(result['Country/Region'])
temp = temp_full.loc[temp_full['Country/Region'].isin(result['Country/Region']) ]
temp2 = temp_full.loc[~temp_full['Country/Region'].isin(result['Country/Region']) ].groupby(['Date'])['Confirmed', 'Deaths', 'Recovered'].sum()
temp2 = temp2.reset_index()
temp2['Country/Region'] = 'Other Countries'
temp = temp.append(temp2, ignore_index=True)
temp
fig1 = px.bar(temp, x="Date", y="Confirmed", color='Country/Region', orientation='v',width= 600 , height=600,
title='مجموع موارد تایید شده در دنیا', color_discrete_sequence = px.colors.cyclical.HSV)
fig1.update_layout(legend_orientation='h') #fig.show()
fig2 = px.bar(temp, x="Date", y="Deaths", color='Country/Region', orientation='v', width= 600 , height=600,
title='مجموع موارد فوت شده در دنیا', color_discrete_sequence = px.colors.cyclical.HSV)
fig2.update_layout(legend_orientation='h') #fig.show()
fig3= px.line(temp, x='Date', y='Confirmed', color='Country/Region', width= 600 , height=600,
title=' موارد تایید شده به تفکیک کشور', color_discrete_sequence = px.colors.cyclical.HSV )
fig3.update_layout(legend_orientation='h')#fig.show()
fig4= px.line(temp, x='Date', y='Deaths', color='Country/Region' , width= 600 , height=600,
title=' موارد فوت شده به تفکیک کشور', color_discrete_sequence = px.colors.cyclical.HSV )
fig4.update_layout(legend_orientation='h')#fig.show()
gdf = gdf = full_table.groupby(['Date', 'Country/Region'])['Confirmed', 'Deaths', 'Recovered'].max()
gdf = gdf.reset_index()
temp_iran = gdf[gdf['Country/Region']=='Iran'].groupby('Date').sum().reset_index()
temp_iran = temp_iran.melt(id_vars='Date', value_vars=['Confirmed', 'Deaths', 'Recovered'],
var_name='Case', value_name='Count')
fig5 = px.bar(temp_iran, x="Date", y="Count", color='Case', facet_col="Case",
title='مجموع موارد تایید شده، فوت شده و بهبود یافته ایران' , width=1000)
temp_iran2 = Iran.groupby('Date')['Confirmed', 'Deaths', 'Recovered'].sum().diff()
print(temp_iran2)
temp_iran2 = temp_iran2.reset_index()
temp_iran2 = temp_iran2.melt(id_vars="Date",
value_vars=['Confirmed', 'Deaths', 'Recovered'])
fig9 = px.bar(temp_iran2, x="Date", y="value", color='variable',
title='تعداد موارد تایید شده، فوت شده و بهبود یافته ایران در هر روز' , width=1000)
fig9.update_layout(barmode='group')
temp['Mortality Rate'] = round(1.0 * temp['Deaths']/
temp['Confirmed'], 3)*100
temp['Recovery Rate'] = round(1.0 * temp['Recovered']/
temp['Confirmed'], 3)*100
#fig5.show()
fig6 = px.line(temp, x="Date", y='Mortality Rate', color='Country/Region',
facet_col='Country/Region',hover_name='Country/Region' , facet_col_wrap = 3 , render_mode = 'webgl'
, title='نرخ موارد منجر به مرگ نسبت به موارد تایید شده در طول زمان' , width=1000 , height=700)
fig6.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
fig7 = px.line(temp, x="Date", y='Recovery Rate', color='Country/Region',
facet_col='Country/Region',hover_name='Country/Region' , facet_col_wrap = 3 , render_mode = 'webgl'
, title='نرخ موارد بهبود یافته نسبت به موارد تایید شده در طول زمان' , width=1000 , height=700)
fig7.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
fig8 = px.line(temp, x="Date", y='Confirmed', color='Country/Region',
facet_col='Country/Region',hover_name='Country/Region' , facet_col_wrap = 3 , render_mode = 'auto'
, title='تعداد موارد تایید شده در کشورهای مختلف' , width=1000 , height=700)
fig8.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
colors = {
'background': '#222222',
'text': '#7FDBFF'
}
d1 = html.Div( id = 'd1' , className = 'class list emergency' , style={'color':'white' , 'border':'solid 1px white' , 'direction':'rtl' , 'padding':'20px' , 'margin':'8px' }
, children=[
html.Div( className = 'content'
, children=[
html.Div( className = 'content' , children = html.Span( style={'font-size': '24px' } ,children='موارد تایید شده در دنیا' ))
,html.Div( id ='confirmedCases_count' , style= {'color':'red' , 'font-size': '48px' } , className = 'heading1' , children = "{:,d}".format(int(total_cases_today)) ) #
,html.Div( className = 'content' , children = "{:,d}".format(int(total_cases_today - total_cases_yesterday)) + ' مورد بیشتر از روز قبل' ) #"{:,d}".format(total_cases_today - total_cases_yesterday) + ' مورد بیشتر از روز قبل'
]
)
, html.Div( className = 'metadata' , children='تاریخ به روز رسانی:' + str(today) ) #'تاریخ به روز رسانی:' + today.strftime("%b %d %Y")
]
)
d2 = html.Div( id = 'd2' , className = 'class list emergency' , style={'color':'white' , 'border':'solid 1px white' , 'direction':'rtl' , 'padding':'20px' , 'margin':'8px'}
, children=[
html.Div( className = 'content'
, children=[
html.Div( className = 'content' , children = html.Span( style={'font-size': '24px' } ,children='موارد فوت شده در دنیا' ))
,html.Div( id ='DeathsCases_count' , style= {'color':'red' , 'font-size': '48px' } , className = 'heading1' , children = "{:,d}".format(int(death_cases_today)) ) #str(total_cases_today)
,html.Div( className = 'content' , children = "{:,d}".format(int(death_cases_today - death_cases_yesterday) ) + ' مورد بیشتر از روز قبل' )
]
)
, html.Div( className = 'metadata' , children= 'تاریخ به روز رسانی:' + str(today) )
]
)
d3 = html.Div( id = 'd3' , className = 'class list emergency' , style={'color':'white' , 'border':'solid 1px white' , 'direction':'rtl' , 'padding':'20px' , 'margin':'8px'}
, children=[
html.Div( className = 'content'
, children=[
html.Div( className = 'content' , children = html.Span( style={'font-size': '24px' } ,children='موارد بهبود یافته در دنیا' ))
,html.Div( id ='RecoveredCases_count' , style= {'color':'red' , 'font-size': '48px' } , className = 'heading1' , children = "{:,d}".format(int(recovered_cases_today)) ) #str(total_cases_today)
,html.Div( className = 'content' , children = "{:,d}".format(int(recovered_cases_today - recovered_cases_yesterday)) + ' مورد بیشتر از دیروز' )
]
)
, html.Div( className = 'metadata' , children= 'تاریخ به روز رسانی:' + str(today) )#today.strftime("%b %d %Y")
]
)
d1_iran = html.Div( id = 'd1_iran' , className = 'class list emergency' , style={'color':'white' , 'border':'solid 1px white' , 'direction':'rtl' , 'padding':'20px' , 'margin':'8px' }
, children=[
html.Div( className = 'content'
, children=[
html.Div( className = 'content' , children = html.Span( style={'font-size': '24px' } ,children='موارد تایید شده در ایران' ))
,html.Div( id ='iran_confirmedCases_count' , style= {'color':'red' , 'font-size': '48px' } , className = 'heading1' , children = "{:,d}".format( int(iran_total_cases_today)) ) #str(total_cases_today)
,html.Div( className = 'content' , children = "{:,d}".format( int(iran_total_cases_today - iran_total_cases_yesterday)) + ' مورد بیشتر از روز قبل' )
]
)
, html.Div( className = 'metadata' , children= 'تاریخ به روز رسانی:' + str(today) )
]
)
d2_iran = html.Div( id = 'd2_iran' , className = 'class list emergency' , style={'color':'white' , 'border':'solid 1px white' , 'direction':'rtl' , 'padding':'20px' , 'margin':'8px'}
, children=[
html.Div( className = 'content'
, children=[
html.Div( className = 'content' , children = html.Span( style={'font-size': '24px' } ,children='موارد فوت شده در ایران' ))
,html.Div( id ='iran_DeathsCases_count' , style= {'color':'red' , 'font-size': '48px' } , className = 'heading1' , children = "{:,d}".format( int(iran_death_cases_today)) ) #str(total_cases_today)
,html.Div( className = 'content' , children = "{:,d}".format(int(iran_death_cases_today - iran_death_cases_yesterday) ) + ' مورد بیشتر از روز قبل' )
]
)
, html.Div( className = 'metadata' , children= 'تاریخ به روز رسانی:' + str(today) )
]
)
d3_iran = html.Div( id = 'd3_iran' , className = 'class list emergency' , style={'color':'white' , 'border':'solid 1px white' , 'direction':'rtl' , 'padding':'20px' , 'margin':'8px'}
, children=[
html.Div( className = 'content'
, children=[
html.Div( className = 'content' , children = html.Span( style={'font-size': '24px' } ,children='موارد بهبود یافته در ایران' ))
,html.Div( id ='iran_RecoveredCases_count' , style= {'color':'red' , 'font-size': '48px' } , className = 'heading1' , children = "{:,d}".format(int(iran_recovered_cases_today)) ) #str(total_cases_today)
,html.Div( className = 'content' , children = "{:,d}".format(int(iran_recovered_cases_today - iran_recovered_cases_yesterday)) + ' مورد بیشتر از روز قبل' )
]
)
, html.Div( className = 'metadata' , children= 'تاریخ به روز رسانی:' + str(today) )
]
)
confirmed_cases_fig_div = html.Div( style={ 'display': 'flex' , 'flex-direction': 'column' , 'justify-content': 'center' , 'backgroundColor': colors['background']}
, children=[html.Div(
style={'display': 'flex' , 'flex-direction': 'row' , 'margin-left' :'10px', 'margin-right' :'10px'}
, children = dcc.Graph( id='life-exp-vs-gdp6', figure = fig1)
)
]
)
confirmed_cases_fig_div_2 = html.Div( style={ 'display': 'flex' , 'flex-direction': 'column' , 'justify-content': 'center' , 'backgroundColor': colors['background']}
, children=[html.Div(
style={'display': 'flex' , 'flex-direction': 'row' , 'margin-left' :'10px', 'margin-right' :'10px'}
, children = dcc.Graph( id='confirmed_cases_fig_div_2', figure = fig3)
)
]
)
deaths_cases_fig_div = html.Div( style={ 'display': 'flex' , 'flex-direction': 'column' , 'justify-content': 'center' , 'backgroundColor': colors['background']}
, children=[ html.Div(
style={'display': 'flex' , 'flex-direction': 'row' , 'margin-left' :'10px', 'margin-right' :'10px'}
, children = dcc.Graph( id='life-exp-vs-gdp7', figure = fig2)
)
]
)
deaths_cases_fig_div_2 = html.Div( style={ 'display': 'flex' , 'flex-direction': 'column' , 'justify-content': 'center' , 'backgroundColor': colors['background']}
, children=[ html.Div(
style={'display': 'flex' , 'flex-direction': 'row' , 'margin-left' :'10px', 'margin-right' :'10px'}
, children = dcc.Graph( id='deaths_cases_fig_div_2', figure = fig4)
)
]
)
iran_status = html.Div( style={ 'display': 'flex' , 'flex-direction': 'column' , 'justify-content': 'center' , 'backgroundColor': colors['background']}
, children=[ html.Div(
style={'display': 'flex' , 'flex-direction': 'row' , 'margin-left' :'10px', 'margin-right' :'10px'}
, children = dcc.Graph( id='iran_status', figure = fig5)
)
]
)
iran_daily_status = html.Div( style={ 'display': 'flex' , 'flex-direction': 'column' , 'justify-content': 'center' , 'backgroundColor': colors['background']}
, children=[ html.Div(
style={'display': 'flex' , 'flex-direction': 'row' , 'margin-left' :'10px', 'margin-right' :'10px'}
, children = dcc.Graph( id='iran_daily_status', figure = fig9)
)
]
)
mortality_rate = html.Div( style={ 'display': 'flex' , 'flex-direction': 'column' , 'justify-content': 'center' , 'backgroundColor': colors['background']}
, children=[ html.Div(
style={'display': 'flex' , 'flex-direction': 'row' , 'margin-left' :'10px', 'margin-right' :'10px'}
, children = dcc.Graph( id='mortality_rate', figure = fig6)
)
]
)
recovery_rate = html.Div( style={ 'display': 'flex' , 'flex-direction': 'column' , 'justify-content': 'center' , 'backgroundColor': colors['background']}
, children=[ html.Div(
style={'display': 'flex' , 'flex-direction': 'row' , 'margin-left' :'10px', 'margin-right' :'10px'}
, children = dcc.Graph( id='reconvery_rate', figure = fig7)
)
]
)
confirmed_cases_faceted = html.Div( style={ 'display': 'flex' , 'flex-direction': 'column' , 'justify-content': 'center' , 'backgroundColor': colors['background']}
, children=[ html.Div(
style={'display': 'flex' , 'flex-direction': 'row' , 'margin-left' :'10px', 'margin-right' :'10px'}
, children = dcc.Graph( id='confirmed_cases_faceted', figure = fig8)
)
]
)
date_string = f'{datetime.datetime.now():%Y-%m-%d %H:%M:%S%z}'
return html.Div( style={ 'display': 'flex' , 'flex-direction': 'column' , 'justify-content': 'center' , 'backgroundColor': colors['background']}
,children=[
html.Div(
children=[
html.H1(style={'color':'white' , 'text-align':'center'} , children='وضعیت انتشار کرونا در ایران و دنیا')
, html.H5(style={'color':'white' , 'text-align':'center'} , children='Loaded at: ' +date_string)
]
)
, html.Div( id="global_stats" , children = [ d1 , d2 , d3]
, style= { 'display': 'flex' , 'flex-direction': 'row' , 'justify-content': 'center'
, 'margin-bottom' :'20px' , 'backgroundColor': colors['background'] , 'direction':'rtl'}
)
, html.Div( id="iran_stats" , children = [ d1_iran , d2_iran , d3_iran]
, style= { 'display': 'flex' , 'flex-direction': 'row' , 'justify-content': 'center'
, 'margin-bottom' :'30px' , 'backgroundColor': colors['background'] , 'direction':'rtl'}
)
, html.Div(
style={ 'display': 'flex' , 'flex-direction': 'row' , 'justify-content': 'center' , 'margin-bottom' :'10px' , 'backgroundColor': colors['background'] , 'direction' : 'rtl'}
, children=[
confirmed_cases_fig_div , deaths_cases_fig_div
]
)
, html.Div(
style={ 'display': 'flex' , 'flex-direction': 'row' , 'justify-content': 'center' , 'margin-bottom' :'10px' , 'backgroundColor': colors['background'] , 'direction' : 'rtl'}
, children=[
confirmed_cases_fig_div_2 , deaths_cases_fig_div_2
]
)
, html.Div(
style={ 'display': 'flex' , 'flex-direction': 'row' , 'justify-content': 'center' , 'margin-bottom' :'10px' , 'backgroundColor': colors['background'] , 'direction' : 'rtl'}
, children = iran_status
)
, html.Div(
style={ 'display': 'flex' , 'flex-direction': 'row' , 'justify-content': 'center' , 'margin-bottom' :'10px' , 'backgroundColor': colors['background'] , 'direction' : 'rtl'}
, children = iran_daily_status
)
, html.Div(
style={ 'display': 'flex' , 'flex-direction': 'row' , 'justify-content': 'center' , 'margin-bottom' :'10px' , 'backgroundColor': colors['background'] , 'direction' : 'rtl'}
, children = confirmed_cases_faceted
)
, html.Div(
style={ 'display': 'flex' , 'flex-direction': 'row' , 'justify-content': 'center' , 'margin-bottom' :'10px' , 'backgroundColor': colors['background'] , 'direction' : 'rtl'}
, children = mortality_rate
)
, html.Div(
style={ 'display': 'flex' , 'flex-direction': 'row' , 'justify-content': 'center' , 'margin-bottom' :'10px' , 'backgroundColor': colors['background'] , 'direction' : 'rtl'}
, children = recovery_rate
)
#, html.A(href="http://www.webgozar.com/counter/stats.aspx?code=3745510" , target="_blank" , children='آمار')
, html.Div(
style={ 'display': 'flex' , 'flex-direction': 'row' , 'justify-content': 'center' , 'margin-bottom' :'10px' , 'backgroundColor': colors['background'] , 'direction' : 'rtl'}
, children = [
html.Span( style= {'color':'white'} , children=' این داشبورد بر اساس دادههای آماری دانشگاه جان هاپکینز آمریکا تهیه شده است. از طریق لینک زیر می توانید به این داده ها دسترسی پیدا کنید. ')
]
)
, html.Div(
style={ 'display': 'flex' , 'flex-direction': 'row' , 'justify-content': 'center' , 'margin-bottom' :'10px' , 'backgroundColor': colors['background'] , 'direction' : 'rtl'}
, children = [
html.A(style= {'color':'white'} , href="https://github.com/CSSEGISandData/COVID-19" , target="_blank" , children='https://github.com/CSSEGISandData/COVID-19')
,html.Br()
]
)
, html.Div(
style={ 'display': 'flex' , 'flex-direction': 'row' , 'justify-content': 'center' , 'margin-bottom' :'10px' , 'backgroundColor': colors['background'] , 'direction' : 'rtl'}
, children = [
html.Span( style= {'color':'white'} , children='برای ارتباط با من می توانید روی وبلاگ دیتا اینسایتز به آدرس زیر پیام بگذارید : ')
#, html.A(style= {'color':'white'} , href="https://datainsights.blogsky.com/" , target="_blank" , children=' دیتا اینسایتز ')
]
)
, html.Div(
style={ 'display': 'flex' , 'flex-direction': 'row' , 'justify-content': 'center' , 'margin-bottom' :'10px' , 'backgroundColor': colors['background'] , 'direction' : 'rtl'}
, children = [
html.A(style= {'color':'white'} , href="https://datainsights.blogsky.com/" , target="_blank" , children='https://datainsights.blogsky.com/ ')
]
)
])
app.layout = serve_layout
app.scripts.append_script({"external_url": "http://www.webgozar.ir/c.aspx?Code=3570732&t=counter"})
if __name__ == '__main__':
#app.run_server(debug=True)
app.run_server(debug=False , dev_tools_hot_reload=False) | [
"zahra.eskandari@gmail.com"
] | zahra.eskandari@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.