blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2df59917675f17b1ef4c2e128cb4b35161ef9123
|
c578281dc76364763c4e633c6a2ce984464a6600
|
/scarlette/constants.py
|
adcea08899bc71f26bee492da282d9deabbaa0b4
|
[
"MIT"
] |
permissive
|
frankhart2018/scarlette
|
faef851ab720ac789d730ea63851dcb3f26b3745
|
de472fbd03f27ebc9056bd40b2d818fbef493ee6
|
refs/heads/master
| 2023-06-09T09:21:06.753448
| 2021-06-22T14:08:32
| 2021-06-22T14:08:32
| 379,288,720
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 211
|
py
|
from pathlib import Path
import os
INSTALLATION_DIR: str = os.path.join(str(Path.home()), ".scarlette")
GITHUB_CREDS_FILE: str = os.path.join(INSTALLATION_DIR, "github")
OPTIONS = """1. Github Repo Creator"""
|
[
"siddharthadhar.soumen@srmuniv.edu.in"
] |
siddharthadhar.soumen@srmuniv.edu.in
|
53b343e14ec050c62cbaff4ace4811ff12ea876e
|
52af41612bdff8ca2348197b7a80dc365cac0664
|
/test/functional/feature_filelock.py
|
e80a2271b135cb890b8fd8a96fbe3d4b02898bee
|
[
"MIT"
] |
permissive
|
pill-pals/pillcoin
|
263617fa9787d2700b74c50c6bd06cd4ff300f71
|
ddab177dd6973982975890831a56ad6f1fd96448
|
refs/heads/master
| 2023-05-04T03:14:18.131963
| 2021-05-23T22:12:17
| 2021-05-23T22:12:17
| 369,870,018
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,795
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Check that it's not possible to start a second pillcoind instance using the same datadir or wallet."""
import os
from test_framework.test_framework import BitcoinTestFramework
from test_framework.test_node import ErrorMatch
class FilelockTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
def setup_network(self):
self.add_nodes(self.num_nodes, extra_args=None)
self.nodes[0].start([])
self.nodes[0].wait_for_rpc_connection()
def run_test(self):
datadir = os.path.join(self.nodes[0].datadir, 'regtest')
self.log.info("Using datadir {}".format(datadir))
self.log.info("Check that we can't start a second pillcoind instance using the same datadir")
expected_msg = "Error: Cannot obtain a lock on data directory {}. Pillcoin Core is probably already running.".format(datadir)
self.nodes[1].assert_start_raises_init_error(extra_args=['-datadir={}'.format(self.nodes[0].datadir), '-noserver'], expected_msg=expected_msg)
if self.is_wallet_compiled():
wallet_dir = os.path.join(datadir, 'wallets')
self.log.info("Check that we can't start a second pillcoind instance using the same wallet")
expected_msg = "Error: Error initializing wallet database environment"
self.nodes[1].assert_start_raises_init_error(extra_args=['-walletdir={}'.format(wallet_dir), '-noserver'], expected_msg=expected_msg, match=ErrorMatch.PARTIAL_REGEX)
if __name__ == '__main__':
FilelockTest().main()
|
[
"williamnharvey@gmail.com"
] |
williamnharvey@gmail.com
|
1dfbbe2ea61180cb081caccafe1da9c037398c25
|
bb32aa23f6c87dba0e5fc2afa49a71f48c82bf54
|
/toutiao/牌积分.py
|
5e90da5b3ce87bc6752caef2ef330a3e51b216fc
|
[] |
no_license
|
jwf-ai/algorithm
|
fecae169f0efb96ae70d0591ef0a981d130c1df1
|
7d5521472a9536a04ae1827ae67e6067c8809538
|
refs/heads/master
| 2020-03-26T03:06:51.694616
| 2018-09-18T06:55:46
| 2018-09-18T06:55:46
| 144,440,033
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 659
|
py
|
# encoding: utf-8
scores = [
(9,7),
(5,3),
(5,8),
(3,7),
(1,4),
(8,5),
(4,2),
(1,3)]
scores = sorted(scores,key = lambda i:[i[0]],reverse=True)
print(scores)
a = 0
b = 0
sum_score = 0
temp = 0
for i in range(0,len(scores)):
if a > b:
b += scores[i][0]
temp += scores[i][1]
elif a < b:
a += scores[i][0]
temp += scores[i][1]
else:
if temp > sum_score:
sum_score = temp
a += scores[i][0]
temp += scores[i][1]
if a == b:
if temp > sum_score:
sum_score = temp
if sum_score == 0:
print("None")
else:
print(sum_score)
|
[
"jiawenfu@foxmail.com"
] |
jiawenfu@foxmail.com
|
3d4e242013f1f59e23cd6283e76aeefb57384ae8
|
88a67b8291d6aec658fb2a8ca39f9ca49c6912e4
|
/regression/adaboost/imp_z99_z0.py
|
6f77e0ed985dd518862cb46ca53a6721c1b400dd
|
[] |
no_license
|
lluciesmith/mlhalos_code
|
fc08929181020bf98a801d6d4c1bcde178b33035
|
d17502f8d1d633ba1f9cfdc44e3706b26a081f02
|
refs/heads/master
| 2023-02-05T15:35:30.957324
| 2020-12-30T13:44:17
| 2020-12-30T13:44:17
| 186,514,112
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,287
|
py
|
"""
Get errorbars for importances on z=0 and z=99 training features
from different training sets.
"""
import sys
sys.path.append("/home/lls/mlhalos_code")
import numpy as np
from regression.adaboost import gbm_04_only as gbm_fun
from multiprocessing.pool import Pool
saving_path_z0 = "/share/data2/lls/regression/gradboost/z0_den/imp_nest600/"
saving_path_z99 = "/share/data2/lls/regression/gradboost/ic_traj/imp_nest600/"
features_path = "/share/data2/lls/features_w_periodicity_fix/"
def get_random_training_ids(halo_mass):
radii_path = "/home/lls/stored_files/radii_stuff/"
halo_mass_in_ids = halo_mass[halo_mass > 0]
# sort ids in halos and corresponding r/r_vir value
radii_properties_in = np.load(radii_path + "radii_properties_in_ids.npy")
radii_properties_out = np.load(radii_path + "radii_properties_out_ids.npy")
fraction = np.concatenate((radii_properties_in[:, 2], radii_properties_out[:, 2]))
ids_in_halo = np.concatenate((radii_properties_in[:, 0], radii_properties_out[:, 0]))
ind_sorted = np.argsort(ids_in_halo)
ids_in_halo_mass = ids_in_halo[ind_sorted].astype("int")
r_fraction = fraction[ind_sorted]
del fraction
del ids_in_halo
# Select a balanced training set
# Take particle ids in each halo mass bin
n, log_bins = np.histogram(np.log10(halo_mass_in_ids), bins=50)
bins = 10 ** log_bins
training_ind = []
for i in range(len(bins) - 1):
ind_bin = np.where((halo_mass_in_ids >= bins[i]) & (halo_mass_in_ids < bins[i + 1]))[0]
ids_in_mass_bin = ids_in_halo_mass[ind_bin]
if ids_in_mass_bin.size == 0:
print("Pass")
pass
else:
if i == 49:
num_p = 2000
else:
num_p = 1000
radii_in_mass_bin = r_fraction[ind_bin]
np.random.seed()
ids_03 = np.random.choice(ids_in_mass_bin[radii_in_mass_bin < 0.3], num_p, replace=False)
ids_06 = np.random.choice(ids_in_mass_bin[(radii_in_mass_bin >= 0.3) & (radii_in_mass_bin < 0.6)], num_p,
replace=False)
ids_1 = np.random.choice(ids_in_mass_bin[(radii_in_mass_bin >= 0.6) & (radii_in_mass_bin < 1)], num_p,
replace=False)
ids_outer = np.random.choice(ids_in_mass_bin[radii_in_mass_bin >= 1], num_p, replace=False)
training_ids_in_bin = np.concatenate((ids_03, ids_06, ids_1, ids_outer))
training_ind.append(training_ids_in_bin)
training_ind = np.concatenate(training_ind)
remaining_ids = ids_in_halo_mass[~np.in1d(ids_in_halo_mass, training_ind)]
np.random.seed()
random_sample = np.random.choice(remaining_ids, 50000, replace=False)
training_ind = np.concatenate((training_ind, random_sample))
return training_ind
# data
z0_den_features = np.load(features_path + "z0l_density_contrasts.npy")
traj = np.load("/share/data2/lls/features_w_periodicity_fix/ics_density_contrasts.npy")
def train_and_get_imp_GBT(num):
print("Loop " + str(num))
halo_mass = np.load("/home/lls/stored_files/halo_mass_particles.npy")
tr_ids = get_random_training_ids(halo_mass)
training_features_z0 = np.column_stack((z0_den_features[tr_ids], np.log10(halo_mass[tr_ids])))
training_features_z99 = np.column_stack((traj[tr_ids], np.log10(halo_mass[tr_ids])))
param_grid = {"loss": "lad", "learning_rate": 0.01, "n_estimators": 600, "max_depth": 5, "max_features": 15}
clf_z0 = gbm_fun.train_gbm(training_features_z0, param_grid=param_grid, cv=False, save=False)
imp_z0 = clf_z0.feature_importances_
np.save(saving_path_z0 + "imp_" + str(num) + ".npy", imp_z0)
clf_z99 = gbm_fun.train_gbm(training_features_z99, param_grid=param_grid, cv=False, save=False)
imp_z99 = clf_z99.feature_importances_
np.save(saving_path_z99 + "imp_" + str(num) + ".npy", imp_z99)
return imp_z0, imp_z99
pool = Pool(processes=12)
imps = pool.map(train_and_get_imp_GBT, np.arange(12))
pool.close()
pool.join()
imps_0 = np.array([imps[i][0] for i in range(12)])
imps_99 = np.array([imps[i][1] for i in range(12)])
np.save(saving_path_z0 + "all_imps_z0.npy", imps_0)
np.save(saving_path_z0 + "all_imps_z99.npy", imps_99)
|
[
"ucapllu@ucl.ac.uk"
] |
ucapllu@ucl.ac.uk
|
51c46c7b731fd93f82f800a5134109dc0fd3e110
|
cc37173708f802eb6d095708524a8b808833f102
|
/scratch/test/pyflow_unit_tests.py
|
fa260a733bb425419c0d0368ac68bec4efc6a471
|
[
"BSD-2-Clause"
] |
permissive
|
abladon/pyflow
|
d6b2b0b9d4c069179a6a1fca504aadcff4a96edb
|
bbce40b983e90a40255ecd38112cf8301d2b6994
|
refs/heads/master
| 2021-01-16T20:32:15.685045
| 2016-02-21T20:15:58
| 2016-02-21T20:15:58
| 51,919,180
| 0
| 0
| null | 2016-02-17T11:55:10
| 2016-02-17T11:55:10
| null |
UTF-8
|
Python
| false
| false
| 13,495
|
py
|
#!/usr/bin/env python
import unittest
import os
import sys
scriptDir=os.path.abspath(os.path.dirname(__file__))
def pyflow_lib_dir() :
return os.path.abspath(os.path.join(scriptDir,os.pardir,os.pardir,"pyflow","src"))
try :
# if pyflow is in PYTHONPATH already then use the specified copy:
from pyflow import isWindows,WorkflowRunner
except :
# otherwise use the relative path within this repo:
sys.path.append(pyflow_lib_dir())
from pyflow import isWindows,WorkflowRunner
def getRmCmd() :
if isWindows():
return ["del","/f"]
else:
return ["rm","-f"]
def getSleepCmd() :
if isWindows():
return ["timeout"]
else:
return ["sleep"]
def getCatCmd() :
if isWindows():
return ["type"]
else:
return ["cat"]
def getCmdString(cmdList) :
return " ".join(cmdList)
class NullWorkflow(WorkflowRunner) :
pass
class TestWorkflowRunner(unittest.TestCase) :
def __init__(self, *args, **kw) :
unittest.TestCase.__init__(self, *args, **kw)
self.testPath="testDataRoot"
def setUp(self) :
self.clearTestPath()
def tearDown(self) :
self.clearTestPath()
def clearTestPath(self) :
import shutil
if os.path.isdir(self.testPath) :
shutil.rmtree(self.testPath)
def test_createDataDir(self) :
w=NullWorkflow()
w.run("local",self.testPath,isQuiet=True)
self.assertTrue(os.path.isdir(self.testPath))
def test_badMode(self) :
w=NullWorkflow()
try:
w.run("foomode",self.testPath,isQuiet=True)
self.fail("Didn't raise Exception")
except KeyError:
self.assertTrue(sys.exc_info()[1].args[0].find("foomode") != -1)
def test_errorLogPositive(self) :
"""
Test that errors are written to separate log when requested
"""
os.mkdir(self.testPath)
logFile=os.path.join(self.testPath,"error.log")
w=NullWorkflow()
try:
w.run("foomode",self.testPath,errorLogFile=logFile,isQuiet=True)
self.fail("Didn't raise Exception")
except KeyError:
self.assertTrue(sys.exc_info()[1].args[0].find("foomode") != -1)
self.assertTrue((os.path.getsize(logFile) > 0))
def test_errorLogNegative(self) :
"""
Test that no errors are written to separate error log when none occur
"""
os.mkdir(self.testPath)
logFile=os.path.join(self.testPath,"error.log")
w=NullWorkflow()
w.run("local",self.testPath,errorLogFile=logFile,isQuiet=True)
self.assertTrue((os.path.getsize(logFile) == 0))
def test_dataDirCollision(self) :
"""
Test that when two pyflow jobs are launched with the same dataDir, the second will fail.
"""
import threading,time
class StallWorkflow(WorkflowRunner) :
def workflow(self2) :
self2.addTask("sleeper",getSleepCmd()+["5"])
class runner(threading.Thread) :
def __init__(self2) :
threading.Thread.__init__(self2)
self2.retval1=1
def run(self2) :
w=StallWorkflow()
self2.retval1=w.run("local",self.testPath,isQuiet=True)
w2=StallWorkflow()
r1=runner()
r1.start()
time.sleep(1)
retval2=w2.run("local",self.testPath,isQuiet=True)
self.assertTrue(retval2==1)
r1.join()
self.assertTrue(r1.retval1==0)
def test_forceContinue(self) :
class TestWorkflow(WorkflowRunner) :
color="red"
def setColor(self2,color) :
self2.color=color
def workflow(self2) :
self2.addTask("A","echo "+self2.color)
w=TestWorkflow()
retval=w.run("local",self.testPath,isQuiet=True)
self.assertTrue(retval==0)
retval=w.run("local",self.testPath,isContinue=True,isQuiet=True)
self.assertTrue(retval==0)
w.setColor("green")
retval=w.run("local",self.testPath,isContinue=True,isQuiet=True)
self.assertTrue(retval==1)
retval=w.run("local",self.testPath,isContinue=True,isForceContinue=True,isQuiet=True)
self.assertTrue(retval==0)
def test_badContinue(self) :
w=NullWorkflow()
try:
w.run("local",self.testPath,isContinue=True,isQuiet=True)
self.fail("Didn't raise Exception")
except Exception:
self.assertTrue(sys.exc_info()[1].args[0].find("Cannot continue run") != -1)
def test_goodContinue(self) :
w=NullWorkflow()
retval1=w.run("local",self.testPath,isQuiet=True)
retval2=w.run("local",self.testPath,isContinue=True,isQuiet=True)
self.assertTrue((retval1==0) and (retval2==0))
def test_autoContinue(self) :
w=NullWorkflow()
retval1=w.run("local",self.testPath,isContinue="Auto",isQuiet=True)
retval2=w.run("local",self.testPath,isContinue="Auto",isQuiet=True)
self.assertTrue((retval1==0) and (retval2==0))
def test_simpleDependency(self) :
"make sure B waits for A"
class TestWorkflow(WorkflowRunner) :
def workflow(self2) :
filePath=os.path.join(self.testPath,"tmp.txt")
self2.addTask("A","echo foo > " +filePath)
self2.addTask("B",getCmdString(getCatCmd()) + " " + filePath + " && " + getCmdString(getRmCmd())+ " " + filePath,dependencies="A")
w=TestWorkflow()
self.assertTrue((0==w.run("local",self.testPath,isQuiet=True)))
def test_waitDependency(self) :
"make sure waitForTasks waits for A on the workflow thread"
class TestWorkflow(WorkflowRunner) :
def workflow(self2) :
filePath=os.path.join(self.testPath,"tmp.txt")
if os.path.isfile(filePath) : os.remove(filePath)
self2.addTask("A",getCmdString(getSleepCmd()) + " 5 && echo foo > %s" % (filePath))
self2.waitForTasks("A")
assert(os.path.isfile(filePath))
self2.addTask("B",getCmdString(getCatCmd()) + " " + filePath +" && " + getCmdString(getRmCmd())+ " " + filePath)
w=TestWorkflow()
self.assertTrue(0==w.run("local",self.testPath,isQuiet=True))
def test_flowLog(self) :
"make sure flowLog doesn't throw -- but this does not check if the log is updated"
class TestWorkflow(WorkflowRunner) :
def workflow(self2) :
self2.flowLog("My Message")
w=TestWorkflow()
self.assertTrue(0==w.run("local",self.testPath,isQuiet=True))
def test_deadSibling(self) :
"""
Tests that when a task error occurs in one sub-workflow, its
sibling workflows exit correctly (instead of hanging forever).
This test is an early library error case.
"""
class SubWorkflow1(WorkflowRunner) :
"this one fails"
def workflow(self2) :
self2.addTask("A",getSleepCmd()+["5"])
self2.addTask("B","boogyman!",dependencies="A")
class SubWorkflow2(WorkflowRunner) :
"this one doesn't fail"
def workflow(self2) :
self2.addTask("A",getSleepCmd()+["5"])
self2.addTask("B",getSleepCmd()+["5"],dependencies="A")
class MasterWorkflow(WorkflowRunner) :
def workflow(self2) :
wflow1=SubWorkflow1()
wflow2=SubWorkflow2()
self2.addWorkflowTask("wf1",wflow1)
self2.addWorkflowTask("wf2",wflow2)
w=MasterWorkflow()
self.assertTrue(1==w.run("local",self.testPath,nCores=2,isQuiet=True))
def test_selfDependency1(self) :
"""
"""
class SelfWorkflow(WorkflowRunner) :
def workflow(self2) :
self2.addTask("A",getSleepCmd()+["5"],dependencies="A")
w=SelfWorkflow()
self.assertTrue(1==w.run("local",self.testPath,isQuiet=True))
def test_expGraphScaling(self) :
"""
This tests that pyflow does not scale poorly with highly connected subgraphs.
When the error occurs, it locks the primary thread, so we put the test workflow
on its own thread so that we can time it and issue an error.
Issue reported by R Kelley and A Halpern
"""
import threading
class ScalingWorkflow(WorkflowRunner) :
def workflow(self2) :
tasks = set()
for idx in xrange(60) :
sidx = str(idx)
tasks.add(self2.addTask("task_" + sidx, "echo " + sidx, dependencies = tasks))
self2.waitForTasks("task_50")
tasks.add(self2.addTask("task_1000", "echo 1000", dependencies = tasks))
class runner(threading.Thread) :
def __init__(self2) :
threading.Thread.__init__(self2)
self2.setDaemon(True)
def run(self2) :
w=ScalingWorkflow()
w.run("local",self.testPath,isQuiet=True)
r1=runner()
r1.start()
r1.join(30)
self.assertTrue(not r1.isAlive())
def test_startFromTasks(self) :
"""
run() option to ignore all tasks before a specified task node
"""
filePath=os.path.join(self.testPath,"tmp.txt")
class SelfWorkflow(WorkflowRunner) :
def workflow(self2) :
self2.addTask("A","echo foo > "+filePath)
self2.addTask("B",getSleepCmd()+["1"],dependencies="A")
self2.addTask("C",getSleepCmd()+["1"],dependencies=("A","B"))
w=SelfWorkflow()
self.assertTrue(0==w.run("local",self.testPath,isQuiet=True,startFromTasks="B"))
self.assertTrue(not os.path.exists(filePath))
def test_startFromTasksSubWflow(self) :
"""
run() option to ignore all tasks before a specified task node
"""
filePath=os.path.join(self.testPath,"tmp.txt")
class SubWorkflow(WorkflowRunner) :
def workflow(self2) :
self2.addTask("D","echo foo > "+filePath)
class SelfWorkflow(WorkflowRunner) :
def workflow(self2) :
self2.addTask("A",getSleepCmd()+["1"])
self2.addWorkflowTask("B",SubWorkflow(),dependencies="A")
self2.addTask("C",getSleepCmd()+["1"],dependencies=("A","B"))
w=SelfWorkflow()
self.assertTrue(0==w.run("local",self.testPath,isQuiet=True,startFromTasks="B"))
self.assertTrue(os.path.exists(filePath))
def test_startFromTasksSubWflow2(self) :
"""
run() option to ignore all tasks before a specified task node
"""
filePath=os.path.join(self.testPath,"tmp.txt")
class SubWorkflow(WorkflowRunner) :
def workflow(self2) :
self2.addTask("D","echo foo > "+filePath)
class SelfWorkflow(WorkflowRunner) :
def workflow(self2) :
self2.addTask("A",getSleepCmd()+["1"])
self2.addWorkflowTask("B",SubWorkflow(),dependencies="A")
self2.addTask("C",getSleepCmd()+["1"],dependencies=("A","B"))
w=SelfWorkflow()
self.assertTrue(0==w.run("local",self.testPath,isQuiet=True,startFromTasks="C"))
self.assertTrue(not os.path.exists(filePath))
def test_ignoreTasksAfter(self) :
"""
run() option to ignore all tasks below a specified task node
"""
class SelfWorkflow(WorkflowRunner) :
def workflow(self2) :
self2.addTask("A",getSleepCmd()+["1"])
self2.addTask("B",getSleepCmd()+["1"],dependencies="A")
self2.addTask("C",getSleepCmd()+["1"],dependencies=("A","B"))
w=SelfWorkflow()
self.assertTrue(0==w.run("local",self.testPath,isQuiet=True,ignoreTasksAfter="B"))
self.assertTrue(not w.isTaskComplete("C"))
def test_addTaskOutsideWorkflow(self) :
"""
test that calling addTask() outside of a workflow() method
raises an exception
"""
class SelfWorkflow(WorkflowRunner) :
def __init__(self2) :
self2.addTask("A",getSleepCmd()+["1"])
try :
w=SelfWorkflow()
self.fail("Didn't raise Exception")
except :
pass
def test_runModeInSubWorkflow(self) :
"""
test that calling getRunMode() in a sub-workflow() method
does not raise an exception (github issue #5)
"""
class SubWorkflow(WorkflowRunner) :
def workflow(self2) :
if self2.getRunMode() == "local" :
self2.addTask("D",getSleepCmd()+["1"])
class SelfWorkflow(WorkflowRunner) :
def workflow(self2) :
self2.addTask("A",getSleepCmd()+["1"])
self2.addWorkflowTask("B",SubWorkflow(),dependencies="A")
self2.addTask("C",getSleepCmd()+["1"],dependencies=("A","B"))
try :
w=SelfWorkflow()
self.assertTrue(0==w.run("local",self.testPath,isQuiet=True))
except :
self.fail("Should not raise Exception")
if __name__ == '__main__' :
unittest.main()
|
[
"csaunders@illumina.com"
] |
csaunders@illumina.com
|
cb4f44a116888a1d380e4583492b90293e51c33b
|
0d01cff14457e2d2feb111f74c07f3b8113de98e
|
/Scrapy1/shiyanlou_courses_spider.py
|
92cd6bc2ac4795b1186c86c8e58d16ae921635a9
|
[] |
no_license
|
njqijie/shiyanlou-001
|
1824726efe3acf4d31852e00a6803c1528f3e4a2
|
55985974cb659cfec345b271685a4e911d66a1a7
|
refs/heads/master
| 2020-03-25T01:58:57.439549
| 2018-12-31T02:31:54
| 2018-12-31T02:31:54
| 143,267,722
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 770
|
py
|
import scrapy
class ShiyanlouCourseSpider(scrapy.Spider):
name = 'shiyanlou-courses'
def start_requests(self):
url_temp = 'https://www.shiyanlou.com/courses/?category=all&course_type=all&fee=all&tag=all&page={}'
urls = (url_temp.format(i) for i in range(1,23))
for url in urls:
yield scrapy.Request(url=url,callback=self.parse)
def parse(self,response):
for course in response.css('div.course-body'):
yield{
'name':course.css('div.course-name::text').extract(),
'description':course.css('div.course-desc::text').extract_first(),
'type':course.css('div.course-footer span.pull-right::text').extract_first(default='Free'),
'students':course.xpath('.//span[contains(@class,"pull-left")]/text()[2]').re_first('[^\d]*(\d+)[^\d]*')
}
|
[
"909967625@qq.com"
] |
909967625@qq.com
|
65332f10fc0988bb35231e7f78e2f5138eccf221
|
a94711afdd30bcf2e8ddb7d7fb7100b808b32a8c
|
/events/scaffolding.py
|
1584737e3698498bbcc4eca150d6cac948bf88ca
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
shawnwanderson/campfire-empire
|
7dda251912d6e823864cbeb7c03200082c0c3863
|
6492d9fd938b3e567e61257bd94848b0dd8a7e1f
|
refs/heads/master
| 2021-01-12T09:12:25.879677
| 2016-12-23T22:56:11
| 2016-12-23T22:56:11
| 76,797,101
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 154
|
py
|
from generic_scaffold import CrudManager
from events import models
class EventCrudManager(CrudManager):
model = models.Event
prefix = 'events/'
|
[
"swanders@ualberta.ca"
] |
swanders@ualberta.ca
|
01f4fe39300dabb28ebd9429d9b72f65a855968c
|
ae400fd04ee0bb85f96e044c78bf492be027b2df
|
/ML/sample/knn/knntest1.py
|
172e92305bba32bab538f1ab0a95a0970d1ca765
|
[] |
no_license
|
allragedbody/stocknew
|
34df913c22e0174d1176262044f8413e83e64899
|
62a5bbf17e3780a52b51b262e0d70bfe4cd8ca4e
|
refs/heads/master
| 2022-12-21T11:40:28.695153
| 2018-05-25T20:12:23
| 2018-05-25T20:12:23
| 114,553,952
| 1
| 1
| null | 2022-12-15T19:16:03
| 2017-12-17T17:06:28
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 194
|
py
|
# -*- coding: utf-8 -*-
from numpy import *
import operator
def createDataSet():
group = array([[1,0,1,1],[1,0,1,0],[0,0],[0,0,1]])
lables=["A","A","B","B"]
return group,labels
|
[
"dengyunfei@360buyAD.local"
] |
dengyunfei@360buyAD.local
|
3ac346f5f0d1b99994cb17d2dd5b8ece984c5a40
|
239efcf3d973038c30ea9babc701563d65871df7
|
/app/app-mon.py
|
bb72073d12af98f4ff9219fc6dfb540fed0c94e4
|
[] |
no_license
|
Shaverdoff/k8s-conf-demo
|
c36df1b71957748841ee5385edb005eddd8f2d2f
|
6f00715be73c5ad328b3d493ce514836759a66d4
|
refs/heads/master
| 2023-05-31T06:35:17.677962
| 2019-11-28T22:45:13
| 2019-11-29T08:30:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 575
|
py
|
import logging
import os
import time
from flask import Flask, request
from prometheus_flask_exporter import PrometheusMetrics
app = Flask(__name__)
metrics = PrometheusMetrics(app)
metrics.register_default(
metrics.counter(
'by_path_counter', 'Request count by request paths',
labels={'path': lambda: request.path}
)
)
@app.route('/handler')
def handler():
return 'OK'
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG)
app.run(host='0.0.0.0', port=os.getenv("HTTP_PORT", 8081))
|
[
"s.filatov@corp.mail.ru"
] |
s.filatov@corp.mail.ru
|
c96da4d6adf70412a46950a957b42ea1338f0606
|
b2ba670818623f8ab18162382f7394baed97b7cb
|
/test-data/AndroidSlicer/BankDroid/DD/37.py
|
af170605ae9b42ea75c17acd7f27e44ef9df9f88
|
[
"MIT"
] |
permissive
|
hsumyatwin/ESDroid-artifact
|
012c26c40537a79b255da033e7b36d78086b743a
|
bff082c4daeeed62ceda3d715c07643203a0b44b
|
refs/heads/main
| 2023-04-11T19:17:33.711133
| 2022-09-30T13:40:23
| 2022-09-30T13:40:23
| 303,378,286
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,060
|
py
|
#start monkey test seedNo 0
import os;
from subprocess import Popen
from subprocess import PIPE
from com.android.monkeyrunner import MonkeyRunner, MonkeyDevice, MonkeyImage
from com.android.monkeyrunner.MonkeyDevice import takeSnapshot
from com.android.monkeyrunner.easy import EasyMonkeyDevice
from com.android.monkeyrunner.easy import By
from com.android.chimpchat.hierarchyviewer import HierarchyViewer
from com.android.monkeyrunner import MonkeyView
import random
import sys
import subprocess
from sys import exit
from random import randint
device = MonkeyRunner.waitForConnection()
package = 'com.liato.bankdroid'
activity ='com.liato.bankdroid.MainActivity'
runComponent = package+'/'+activity
device.startActivity(component=runComponent)
MonkeyRunner.sleep(0.6)
MonkeyRunner.sleep(0.6)
device.touch(779,119, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.6)
device.touch(165,232, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.6)
device.touch(1020,132, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.6)
device.touch(416,1792, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.6)
device.touch(950,1716, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.6)
device.touch(297,296, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.6)
device.touch(158,111, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.6)
device.touch(144,216, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.6)
device.touch(1014,127, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.6)
device.touch(967,159, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.6)
device.touch(980,331, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.6)
device.touch(586,581, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.6)
device.touch(511,1086, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.6)
device.touch(479,732, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.6)
device.touch(308,1794, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.6)
device.touch(705,1723, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.6)
device.touch(220,399, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.6)
device.touch(860,1710, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.6)
device.touch(520,1695, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.6)
device.touch(534,1373, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.6)
device.touch(936,959, 'DOWN_AND_UP')
|
[
"hsumyatwin@gmail.com"
] |
hsumyatwin@gmail.com
|
3f1f86b62b84192ceadb64c5442dcff6c7359b7b
|
0ae8c7592a4e0b5c47f79a333e442db5f0c2b147
|
/amstr.py
|
677f4adda054e711de41b1d871fd3022febee8f9
|
[] |
no_license
|
Rihanashariff/pythonscript
|
3ff350495f28533f65d209e3e2f2548f94828477
|
d0417d214e5f527e2a07895c5cf8e93715177034
|
refs/heads/master
| 2020-06-12T07:41:58.571425
| 2019-08-28T09:41:55
| 2019-08-28T09:41:55
| 194,235,397
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 119
|
py
|
a=int(input())
xy=a
sum=0
while a>0:
y=a%10
sum=sum+y*y*y
a=a//10
if xy==sum:
print("yes")
else:
print("no")
|
[
"noreply@github.com"
] |
Rihanashariff.noreply@github.com
|
320d1ef55d49843b686a7ad0ff8487e2fcbb025f
|
ef7ed04e1fa70d13cdbdb2155f7381fdf5f79be7
|
/src/reporter/core/migrations/0002_auto_20200617_1718.py
|
846345d10ccd73093fd48ae793f2cdd0cc458834
|
[] |
no_license
|
seisplot-coder-s/reporter
|
8b23ee0e3ac691849d612750092fe2d532f39eb1
|
1a05ea28d8720b28d779b199a00340bf40569a47
|
refs/heads/master
| 2022-11-13T23:47:59.659270
| 2020-07-03T11:38:55
| 2020-07-03T11:38:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 421
|
py
|
# Generated by Django 3.0.7 on 2020-06-17 17:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='report',
name='ciurl',
field=models.URLField(blank=True, null=True, verbose_name='Continuous Integration URL'),
),
]
|
[
"barsch@egu.eu"
] |
barsch@egu.eu
|
8ab62bba43b349293d0d75cf4f8bbc37c78f4b0e
|
59841c08b203d8471842deb3aaff100f1c75b4ee
|
/ResonanceCheck.py
|
5af950b3287fe5ad24bcf09b7a6bf7809a3f43cd
|
[] |
no_license
|
Rabaa-basha/PlanetaryResonance
|
5307e981d165ddf218102e80e8a6716d3107cf6f
|
731dc4b2757771cf7692add4e5918643db1c0aee
|
refs/heads/master
| 2023-01-22T00:27:33.363204
| 2020-12-01T12:31:39
| 2020-12-01T12:31:39
| 299,296,874
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,435
|
py
|
__author__ = "Samantha Lawler"
__copyright__ = "Copyright 2020"
__version__ = "1.0.1"
__maintainer__ = "Rabaa"
__email__ = "beborabaa@gmail.com"
import numpy as np
import sys
class TestParticle:
def __init__(self): # Attributes defined
self.Resonant = False
self.ResonanceType = 'n:n'
self.Name = 'N/A'
self.ResonanceCenter = -999
self.ResonanceAmplitude = -999
self.AverageSMA = -999 # Average SemiMajor axist
self.AverageEccentricity = -999
self.AverageInclination = -999
self.Kozai = False
self.SMAamplitude = -999
self.SMACenter = -999
self.Index = -1
############################################ FUNCTIONS #################################################
############################################ DATA DISSECTION #################################################
def DataDissection(self, typeOfData, IndexCount):
self.Index = IndexCount
TestParticleSample = sys.argv[1] # User to choose a test sample using terminal
with open('tp' + TestParticleSample + ".out") as f: # Counting number of lines
for line, l in enumerate(f):
pass
NumberOfLines = line
# Taking the test point's data from the .out file sequentially
TestParticleTime, Index, SemiMajorAxis, Eccentricity, Inclination, Omega, omega, AngularPosition, LongitudeTP = np.genfromtxt(
'tp' + TestParticleSample + ".out", unpack=True)
Longitude = np.genfromtxt(
"LN.out", usecols= 8, unpack=True)
NumberOfLines = (NumberOfLines / (max(Index)+1)) -1
TestParticleTime = TestParticleTime[Index == IndexCount]
SemiMajorAxis = SemiMajorAxis[Index == IndexCount]
Eccentricity = Eccentricity[Index == IndexCount]
Inclination = Inclination[Index == IndexCount]
Omega = Omega[Index == IndexCount]
omega = omega[Index == IndexCount]
AngularPosition = AngularPosition[Index == IndexCount]
Lambda = (Omega + omega + AngularPosition) % 360 # The Lambda for test particles
Pomega = (Omega + omega) % 360 # The longitude if pericenter in degrees
# Flags "Specific ones"
IsItResonant = False # Is it in resonance?
ResonanceAmplitude = -999 # The Resonance Amplitude
ResonanceCenter = -999 # The Resonance Center
ResonanceName = -999 # The Resonance name "Ration"
IsItKozai = False # Is it Kozai resonance?
SMAAmplitude = -999 # SemiMajor amplitude
SMACenter = -999 # SemiMajor center
# Flags "General ones"
IsIt = False # Resonance / Kozai ?
Amplitude = -999 # Phi / SMA
Center = -999 # Phi / SMA
Name = -999 # Name of the test particle
# list of resonances to check: pp and qq for pp:qq resonance
pp = [2, 3, 3, 4, 4, 5, 5, 5, 5, 6, 7, 7, 7, 7, 8, 8, 9, 9, 9, 10]
qq = [1, 1, 2, 1, 3, 1, 2, 3, 4, 1, 1, 2, 3, 4, 1, 3, 1, 2, 4, 1]
for jj in np.arange(0, len(pp)): # First Loop
ResSemiMajorAxis = 30.1 * (float(pp[jj]) / float(qq[jj])) ** (
2. / 3.) # Kepler's Third Law to calculate semimajor axis of the resonance
# Searching within 2 AUs from the resonance center
if IsIt == 0 and (ResSemiMajorAxis + 2) > np.average(SemiMajorAxis) > (ResSemiMajorAxis - 2):
phi = (float(pp[jj]) * Lambda - float(qq[jj]) * Longitude - (float(pp[jj]) - float(qq[jj])) * Pomega) % 360
AngleRange = np.arange(0, 360, 15) # Array of angles 5 degrees increment each step
Window = int(0)
Loop = 0
if typeOfData == 0:
# Dividing the timeline to 10 separate windows Detecting resonance on smaller scales
WindowStep = int(NumberOfLines / 10)
IsItArray = np.zeros(int(len(
phi) / WindowStep)) # Array of 10 binary elements to check for resonance each step '10%' set to zero
CenterArray = np.zeros(int(len(
phi) / WindowStep)) # Array of 10 binary elements to check the res angle each step '10%' set to zero
while Window + WindowStep < len(phi):
# Average of the semi-major axis from Current Window -> Next Window
WindowAverage = np.average(SemiMajorAxis[Window:Window + WindowStep])
if (ResSemiMajorAxis + 2) > WindowAverage > (
ResSemiMajorAxis - 2): # Within 2 AUs of Window Average
WindowPhi = phi[Window:Window + WindowStep] # Phi of next window
AnglePresent = np.zeros(len(AngleRange)) + 1
for step in np.arange(0, len(
AngleRange) - 1): # find out where the res angle doesn't go for 15 degrees, proxy for AnglePresent
if len(WindowPhi[
(WindowPhi > AngleRange[step]) * (WindowPhi < (AngleRange[step + 1]))]) == 0:
AnglePresent[step] = 0
IsItArray[Loop] = np.average(AnglePresent) * 180.
CenterArray[Loop] = np.average(
AnglePresent[AnglePresent != 0] * AngleRange[AnglePresent != 0])
else:
IsItArray[Loop] = 180.
Window += WindowStep # Increment Window
Loop += 1 # Increment Loop
if len(IsItArray[
IsItArray < 180.]) > 8: # If 8 out of 10 Windows classified as Resonant
IsIt = True
Amplitude = np.average(IsItArray)
Center = np.average(CenterArray)
Name = str(pp[jj]) + ':' + str(qq[jj])
MaxCenter = max(CenterArray)
MinCenter = min(CenterArray)
if (MaxCenter - MinCenter) > 210:
IsIt = False
Amplitude = -999
Center = -999
break
else:
Amplitude = -999
Center = -999
else:
# If checking for Kozai, we only want one window
WindowStep = int(NumberOfLines)
IsItArray = np.zeros(int(len(
omega) / WindowStep)) # For Kozai we check SMA
CenterArray = np.zeros(int(len(
omega) / WindowStep))
while Window + WindowStep < len(SemiMajorAxis):
# WindowSMA = SemiMajorAxis[Window:Window + WindowStep] # SMA of next window
AnglePresent = np.zeros(len(AngleRange)) + 1
for step in np.arange(0, len(
AngleRange) - 1): # find out where the res angle doesn't go for 15 degrees, proxy for AnglePresent
if len(omega[
(omega > AngleRange[step]) * (omega < (AngleRange[step + 1]))]) == 0:
AnglePresent[step] = 0
IsItArray[Loop] = np.average(AnglePresent) * 180.
CenterArray[Loop] = np.average(
AnglePresent[AnglePresent != 0] * AngleRange[AnglePresent != 0])
Window += WindowStep # Increment Window
Loop += 1 # Increment Loop
if len(IsItArray[
IsItArray < 180.]) == 1: # If the Window classified as Kozai
IsIt = True
Amplitude = np.average(IsItArray)
Center = np.average(CenterArray)
Name = str(pp[jj]) + ':' + str(qq[jj])
else:
Amplitude = -999
Center = -999
if typeOfData == 0:
IsItResonant = IsIt
ResonanceAmplitude = Amplitude
ResonanceCenter = Center
ResonanceName = Name
self.Resonant = IsItResonant
self.ResonanceAmplitude = ResonanceAmplitude
self.ResonanceCenter = ResonanceCenter
self.ResonanceType = ResonanceName
else:
IsItKozai = IsIt
SMAAmplitude = Amplitude
SMACenter = Center
self.Kozai = IsItKozai
self.SMAamplitude = SMAAmplitude
self.SMACenter = SMACenter
self.Name = TestParticleSample
self.AverageEccentricity = np.average(Eccentricity)
self.AverageInclination = np.average(Inclination)
self.AverageSMA = np.average(SemiMajorAxis)
#sns.set_style('dark')
#palette = sns.color_palette("mako", as_cmap=True)
#sns.set_palette("dark", 10)
#sns.relplot(x=TestParticleTime, y=omega)
#plt.show()
return
############################################ IDENTIFY RESONANCE ##############################################
def IdentifyResonance(self, IndexCount):
type = 0 # Indicated that the variable Resonant is what we want from DataDissection function
self.DataDissection(type, IndexCount)
if self.Resonant == True:
type = 1 # Indicated that the variable Kozai is what we want from DataDissection function
self.DataDissection(type, IndexCount)
############################################## PRINT DATA ##############################################
def PrintData(self, IndexCount ): # To be changed to SetData at the end of the project
TestParticleSample = sys.argv[1]
TestParticleTime, Index, SemiMajorAxis, Eccentricity, Inclination, Omega, omega, AngularPosition, Longitude = np.genfromtxt(
"tp" + TestParticleSample + ".out", unpack=True)
TextFile.write((str(self.Index) + " " +str(SemiMajorAxis[IndexCount]) + " " + str(Eccentricity[IndexCount]) + " " + str(Inclination[IndexCount]) + " " + str(Omega[IndexCount]) + " " + str(omega[IndexCount]) + " " + str(AngularPosition[IndexCount]) + " " + str(self.Name) + " " + str(self.AverageSMA) + " " + str(self.AverageEccentricity) + " " + str(self.AverageInclination) + " " + str(self.ResonanceCenter) + " " + str(self.ResonanceAmplitude) + " " + str(self.SMACenter) + " " + str(self.SMAamplitude) + " " + '\n'))
if __name__ == '__main__':
TestParticleSample = sys.argv[1]
Index = np.genfromtxt('tp' + TestParticleSample + ".out", usecols=1, unpack=True)
NumberOfTPs = max(Index)
TextFile = open("TestParticleResonance"+ TestParticleSample +".out", "a+")
TextFile.write("# SMA0 Ecc0 Inc0 Node0 ArgPeri0 MeanAnom0 Name AverageSMA AverageEcc AverageInc LibrationCenter LibrationAmp KozaiCenter KozaiAmp" + '\n')
IndexCount = 0
for IndexCount in range(0, int(NumberOfTPs)+1 ):
Tp = TestParticle()
Tp.IdentifyResonance(IndexCount)
Tp.PrintData(IndexCount)
print(TestParticleSample)
|
[
"noreply@github.com"
] |
Rabaa-basha.noreply@github.com
|
1c5180b83cdc8885c48ae3fdcdcde1daed57ea2f
|
7e7a00b2c845be85f243666cc01751de0a367e14
|
/Session4/intro_con.py
|
806e471fb8c3bd17349c7fa7c0e2ce05ee01cc3c
|
[] |
no_license
|
MeinSpaghett/C4T16-1
|
f5de3886fccbd08b2f989620dbdad1b9abd8190a
|
d5b4f62f0a2d1c77fd2ff5571f6b1fe5e1c7ae12
|
refs/heads/master
| 2020-06-24T12:58:29.263176
| 2019-07-26T06:25:32
| 2019-07-26T06:25:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 217
|
py
|
month=int(input("Month: "))
if month<1 or month>12:
print("Invalid")
elif month<=3:
print("Spring")
elif month<=6:
print("Summer")
elif month<=9:
print("Autumn")
else:
print("Winter")
print ("Bye")
|
[
"minhtri9201@gmail.com"
] |
minhtri9201@gmail.com
|
d3f3c8bf9d854803f2851bd69700d221860bc46b
|
f717be1c7d0455a726c9d87f8560662e9d505c60
|
/cfmtx.py
|
cd4c3021a0963e179549ba62a828835239e8cee9
|
[
"MIT"
] |
permissive
|
yodacatmeow/VGG16-SNU-B36-50
|
c56f5976b0b3a5fc73f0f349159ff8c2f2661aa4
|
e583eccfc9775288f457bfef4655a813e827eb03
|
refs/heads/master
| 2020-03-17T09:04:21.775732
| 2019-03-11T04:17:29
| 2019-03-11T04:17:29
| 133,459,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,433
|
py
|
"""
"cfmtx.py"
References
# Plot a confusion matrix
http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
"""
# Public python modules
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import itertools
# Custom python module
from snub36_50_category import class_names # categories of dataset "SNU-B36-50"
# If categories of a validation set is subset of categories of a training set
def cfmtx(label, prediction, dim, batch_size):
c = np.zeros([dim,dim]) # Initialize c confusion matrix
l = label # Label
p = prediction # Prediction
for i in range(batch_size):
c[l[i], p[i]] += 1
return c
# If categories of test set is not a subset of categories of training set
def cfmtx2(label, prediction, shape):
c = np.zeros([shape[0], shape[1]]) # Initialize confusion matrix
l = label # Label
p = prediction # Prediction
c[l, p] += 1
return c
# Merge k confusion matrices in .csv
def merge(folder, dim, keyword, n_data_per_category, normalize = True):
dir = os.listdir(folder)
filenames = []
for file in dir:
if file.find(keyword) == -1:
pass
else:
filenames.append(os.path.join(folder,file))
c = np.zeros([dim, dim])
for file in filenames:
# Read csv file
df = pd.read_csv(file, sep=',')
# Drop the first col.
df = df.drop(df.columns[0], axis=1)
c = c + df
c = np.array(c) # numpy array
if normalize:
c = c / n_data_per_category
else:
pass
return c
# If categories of validation set is subset of categories of training set
def draw(cfmtx, normalize = True):
# Fill
plt.imshow(cfmtx, interpolation='nearest', cmap=plt.cm.Blues)
plt.colorbar()
# Ticks
tick_marks = np.arange(len(class_names))
plt.xticks(tick_marks, class_names, rotation=90)
plt.yticks(tick_marks, class_names)
# Text
fmt = '.2f' if normalize else 'd'
thres = cfmtx.max() / 2.
for i, j in itertools.product(range(cfmtx.shape[0]), range(cfmtx.shape[1])):
#print(i, j)
plt.text(j, i, int(cfmtx[i, j]), horizontalalignment='center', verticalalignment='center', color='white' if cfmtx[i, j] > thres else 'black')
plt.tight_layout()
# Label
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
# If categories of test set is not a subset of categories of training set
def draw2(file, normalize = True, xticks_ref=None, yticks_ref=None):
# Read csv file
df = pd.read_csv(file, sep=',')
# Drop the first col.
df = df.drop(df.columns[0], axis=1)
cfmtx = np.array(df) # numpy array
# Fill
plt.imshow(cfmtx, interpolation='nearest', cmap=plt.cm.Blues)
plt.colorbar()
# Ticks
tick_marks_x = np.arange(len(xticks_ref))
tick_marks_y = np.arange(len(yticks_ref))
plt.xticks(tick_marks_x, xticks_ref, rotation=90)
plt.yticks(tick_marks_y, yticks_ref)
# Text
fmt = '.2f' if normalize else 'd'
thres = cfmtx.max() / 2.
for i, j in itertools.product(range(cfmtx.shape[0]), range(cfmtx.shape[1])):
#print(i, j)
plt.text(j, i, int(cfmtx[i, j]), horizontalalignment='center', verticalalignment='center', color='white' if cfmtx[i, j] > thres else 'black')
plt.tight_layout()
# Label
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
if __name__ == "__main__":
#1. A simple test
# (predicted classes)
# (Actual | 0 1 2
# classes) |----------------
# 0 | 0 0 0
# 1 | 0 2 0
# 2 | 0 1 0
#cf = cfmtx([1, 1, 2], [1, 1, 1], 3, 3)
#plt.imshow(cf)
#plt.show()
#2.
# Plot a confusion matrix from multiple files in .csv
#c1 = merge(folder='result/tran_nfrz_mspecdb15', dim=39 , keyword='cfm', n_data_per_category=50, normalize=False)
c2 = merge(folder='result/tr_nf_mspdb_2048_2048_592_ep50', dim=39, keyword='cfm', n_data_per_category=50, normalize=False)
draw(cfmtx=c2, normalize=False)
#3.
# Draw a confusion matrix whose label ~= prediction
#draw2(file = 'result/test1.csv', normalize=True, xticks_ref=class_names, yticks_ref=class_names2)
|
[
"its_me_chy@snu.ac.kr"
] |
its_me_chy@snu.ac.kr
|
74995875dc35115994335d1dceeec3fc4b65b3de
|
9eef6a845e03c20f940536f0d6331ba0dce2019f
|
/ShinaTimeSeries.py
|
50197f0df3e4f3a0da74727edc5b0b81ce8d7142
|
[] |
no_license
|
sPaMFouR/PythonJunk
|
6523985766c057dded4c412c9944753c68184391
|
a234e6c35a0663b15757c55f499db9ef7572721f
|
refs/heads/master
| 2021-05-05T06:11:15.910934
| 2018-01-24T16:49:15
| 2018-01-24T16:49:15
| 118,786,049
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,703
|
py
|
import pandas as pd
def user_input(message, default):
while True:
try:
user_value = int(raw_input("{0}(Default = {1}): ".format(message, default)) or str(default))
except ValueError:
print("\n*ERROR: The User Input Is Not A Number*\n")
else:
break
return user_value
file_name = raw_input("Enter The Filename to Generate Time Series Data: ") or "uv_lightcurve.dat"
data_file = pd.read_csv(filepath_or_buffer=file_name, names=['Time', 'Mag1', 'Err1', 'Mag2', 'Err2'], sep="\s+",
engine='python')
rows = len(data_file.index.values)
delete_rows = user_input(message="Number Of Rows To Be Deleted?", default=10)
generate_files = user_input(message="Number Of Realisations To Be Implemented?", default=50)
while True:
if delete_rows > rows:
print("Failure To Perform Operation : You're Trying To Delete More Rows Than What The File Contains")
delete_rows = user_input(message="Number Of Rows To Be Deleted?", default=10)
else:
break
format_mapping = {'Time': '{:.5f}', 'Mag1': '{:.5f}', 'Err1': '{:.5f}', 'Mag2': '{:.5f}', 'Err2': '{:.5f}'}
for index in range(1, generate_files + 1):
new_df = data_file.sample(n=rows - delete_rows)
new_df = new_df.sort_values(by='Time').sort_index(kind='mergesort')
for key, value in format_mapping.items():
new_df[key] = new_df[key].apply(value.format)
new_df1 = new_df[['Time', 'Mag1', 'Err1']]
new_df2 = new_df[['Time', 'Mag2', 'Err2']]
new_df1.to_csv('file_{0}a.dat'.format(index), sep=" ", index=False, header=False)
new_df2.to_csv('file_{0}b.dat'.format(index), sep=" ", index=False, header=False)
|
[
"noreply@github.com"
] |
sPaMFouR.noreply@github.com
|
5a1c12b74b4fea0c91d86dada50adabd244268ef
|
0c436d9c5eefa59aaee1a46d0c4bdf6701014803
|
/model.py
|
96988e29e97fa1c769e616f4bdefedc42a197bea
|
[] |
no_license
|
soham96/grab_safety
|
ab7e41340c5416c1fb37d44925ef4aba48e23eed
|
838e1585f3b17fe701862fb32fd44345bce0afb7
|
refs/heads/master
| 2023-04-19T02:43:43.433453
| 2019-06-17T16:20:28
| 2019-06-17T16:20:28
| 192,161,022
| 0
| 0
| null | 2023-03-24T23:02:47
| 2019-06-16T07:08:12
|
Python
|
UTF-8
|
Python
| false
| false
| 4,810
|
py
|
import argparse
import os
import numpy as np
from keras.models import Sequential
from keras.layers import Flatten, Conv1D, Dense, TimeDistributed, ConvLSTM2D, Dropout, LSTM, MaxPooling1D
from keras.utils import to_categorical
from sklearn.metrics import confusion_matrix, precision_score, recall_score, f1_score, cohen_kappa_score, roc_auc_score, roc_curve
from utils.helpers import read_data
from preprocess import *
def get_results(model, model_type):
'''
Function to get the result from testing the model.
Args:
model: The trained model
model_type: The type of model. For instance 'LSTM'
Returns:
NA. Prints the statistics of the results of training the model
'''
x, y = get_test_batch(batches=1, model = model_type)
predicted_class= model.predict(x, verbose=1)
predicted_class = [np.argmax(r) for r in predicted_class]
test_y = [np.argmax(r) for r in y]
print('Confusion matrix is \n', confusion_matrix(test_y, predicted_class))
print('tn, fp, fn, tp =')
print(confusion_matrix(test_y, predicted_class).ravel())
# Precision
print('Precision = ', precision_score(test_y, predicted_class))
# Recall
print('Recall = ', recall_score(test_y, predicted_class))
# f1_score
print('f1_score = ', f1_score(test_y, predicted_class))
# cohen_kappa_score
print('cohen_kappa_score = ', cohen_kappa_score(test_y, predicted_class))
# roc_auc_score
print('roc_auc_score = ', roc_auc_score(test_y, predicted_class))
def LSTM_train(args):
'''
Function to train a Vanilla LSTM model
'''
data_df, label_df=read_data()
model = Sequential()
model.add(LSTM(512, input_shape=(1200,11)))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dense(2, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# fit network
model.fit_generator(prepare_data_for_training(data_df, label_df, seq_len=1200), steps_per_epoch=1000, epochs=int(args.epochs), verbose=1)
get_results(model=model, model_type='LSTM')
if args.save_to:
print("Saving Model")
model.save(args.save_to)
def CNN_LSTM(args):
'''
Function to train a CNN LSTM
'''
data_df, label_df=read_data()
model = Sequential()
model.add(TimeDistributed(Conv1D(filters=128, kernel_size=3, activation='relu'), input_shape=(None,120,11)))
model.add(TimeDistributed(Conv1D(filters=128, kernel_size=3, activation='relu')))
model.add(TimeDistributed(Dropout(0.5)))
model.add(TimeDistributed(MaxPooling1D(pool_size=2)))
model.add(TimeDistributed(Flatten()))
model.add(LSTM(128))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dense(2, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit_generator(prepare_data_for_training(data_df, label_df, seq_len=1200, model_type='CNN_LSTM', batch_size=1), steps_per_epoch=1000, epochs=int(args.epochs), verbose=1)
get_results(model=model, model_type='CNN_LSTM')
if args.save_to:
print("Saving Model")
model.save(args.save_to)
def Conv_LSTM(args):
'''
Function to train a Convolutional LSTM
'''
data_df, label_df=read_data()
model = Sequential()
model.add(ConvLSTM2D(filters=64, kernel_size=(1,3), activation='relu', input_shape=(10, 1, 120, 11)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(100, activation='relu'))
model.add(Dense(2, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit_generator(prepare_data_for_training(data_df, label_df, seq_len=1200, model_type='CONV_LSTM', batch_size=1), steps_per_epoch=1000, epochs=int(args.epochs), verbose=1)
get_results(model=model, model_type='CONV_LSTM')
if args.save_to:
print("Saving Model")
model.save(args.save_to)
if __name__=='__main__':
a=argparse.ArgumentParser()
a.add_argument('--model', default='LSTM', help='There are three models you can train: LSTM, CNN_LSTM and CONV_LSTM.\
Choose one of those values. Default: LSTM')
a.add_argument('--save_to', help='Location to save your model to.')
a.add_argument('--epochs', help='Number of epochs to train your model for. Default = 1', default=1)
args = a.parse_args()
if args.model == 'LSTM':
LSTM_train(args)
elif args.model == 'CNN_LSTM':
CNN_LSTM(args)
elif args.model == 'CONV_LSTM':
Conv_LSTM(args)
else:
raise ValueError("Please specify model using the --model tag. Use one of:LSTM, CNN_LSTM and CONV_LSTM. see --help for more info.")
|
[
"96soham96@gmail.com"
] |
96soham96@gmail.com
|
734f55e09030ebb7d9fbf0535d07c75dbd730d08
|
e9f4bf4aed7c68b0cde2e2cd02f7bc8f06978a24
|
/lib/python3.8/site-packages/multidict-5.0.2-py3.8-linux-x86_64.egg/multidict/_multidict.py
|
ff0d136d32c09a7af0956b7e197199b94ea33f81
|
[] |
no_license
|
mihalko711/bot
|
d0abdcbafafef71df584187d2fd3dbdada99cedf
|
c08d34e46d8b218d8b68ab19e8948286523e9138
|
refs/heads/master
| 2023-01-18T20:50:23.351484
| 2020-11-15T00:09:24
| 2020-11-15T00:09:24
| 312,925,762
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 314
|
py
|
def __bootstrap__():
global __bootstrap__, __loader__, __file__
import sys, pkg_resources, imp
__file__ = pkg_resources.resource_filename(__name__, '_multidict.cpython-38-x86_64-linux-gnu.so')
__loader__ = None; del __bootstrap__, __loader__
imp.load_dynamic(__name__,__file__)
__bootstrap__()
|
[
"“mihchirkov711@gmail.com”"
] |
“mihchirkov711@gmail.com”
|
9bdf567e333e76234a4dfe0b334b47cf62c82dfd
|
41b6b1ea3692e6f8f2f05b99c6a5bbf4b8bc1007
|
/task12.py
|
79c8a8f8151eb7e0058e3e8473e411e54cf7c4ba
|
[] |
no_license
|
komap2017/euler
|
8d96fe1f2fece8b74a57dc99f4e1afa8c09c47c0
|
b0738248089108ac104760f621c870760ec98d90
|
refs/heads/master
| 2020-03-26T11:38:49.696716
| 2018-08-15T21:19:19
| 2018-08-15T21:19:19
| 144,852,395
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 518
|
py
|
import numpy as np
import num
import seq
def triangular(n):
return np.sum(range(1, n + 1))
def main():
arr = []
start = 12
a = seq.Array([1, 2, 3])
print(a)
print(a[-1])
while len(arr) == 0:
arr = seq.Array(np.arange(start * 1000, (start + 1) * 1000))
print(arr)
arr.map(triangular)
print(arr)
arr.filter(lambda x: len(tuple(num.dividers(x))) > 500)
print(arr)
start += 1
print(arr[0])
if __name__ == '__main__':
main()
|
[
"nikitos-grigorenko@yandex.ru"
] |
nikitos-grigorenko@yandex.ru
|
9fe54c21a865df6d0240e60ece7685cca2636c37
|
a2d364202982b14b1c3c5a12956306f9276a1071
|
/live_coins_with_selenium/live_coins_with_selenium/middlewares.py
|
c7c35921cdba839aa6b69bbe53316d1a4ee535dc
|
[] |
no_license
|
izharabbasi/Scraping-with-scrapy
|
2fa2571339894128d958f64c94168de9eaaa0965
|
9fb8dca2b048191f11e101664e7ec8a072192dc2
|
refs/heads/master
| 2022-12-11T09:02:14.574777
| 2020-09-05T14:08:07
| 2020-09-05T14:08:07
| 281,634,442
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,627
|
py
|
# -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class LiveCoinsWithSeleniumSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class LiveCoinsWithSeleniumDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
|
[
"izharabbasi103@gmail.com"
] |
izharabbasi103@gmail.com
|
48524d5ca62173a782642eea459cb9ee3a61d5a8
|
dc950817c283a31ad4c3ee047d1bc3b11b20b435
|
/ronaldo.py
|
b5f0ad288f2d0c8fe98586b653726d8332963d18
|
[] |
no_license
|
rajatkeshri/ZS-HACK-predict-ronaldo-goal
|
319c3e5faba5eb4a2b1c073afa21a2a2f2ba1394
|
8da586712e08aa7bdca326816ff82126bd6a7e7b
|
refs/heads/master
| 2020-07-16T02:27:31.448438
| 2020-05-31T13:14:03
| 2020-05-31T13:14:03
| 205,700,116
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,777
|
py
|
import pandas as pd
import numpy as np
import math
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_boston
from sklearn.metrics import mean_squared_error
import scipy
from scipy.stats import spearmanr
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import scale
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from sklearn import svm
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
datasets = pd.read_csv('data.csv')
output=pd.DataFrame(datasets)
cols = [17,10]
output = output[output.columns[cols]]
testdata=pd.DataFrame(datasets)
cols = [2,3,5,9,10]
testdata = testdata[testdata.columns[cols]]
df = pd.DataFrame(datasets)
cols = [2,3,5,9,10]
df = df[df.columns[cols]]
###########################################################################################
#Train DATA
k=0
x=[]
for i in df["is_goal"]:
if math.isnan(i):
x.append(k)
#print(i)
k+=1
df=(df.drop(x))
k=0
x=[]
for i in df["distance_of_shot"]:
if math.isnan(i):
x.append(df.index[k])
#print(i)
k+=1
df=(df.drop(x))
k=0
x=[]
for i in df["power_of_shot"]:
if math.isnan(i):
x.append(df.index[k])
#print(i)
k+=1
df=(df.drop(x))
k=0
x=[]
for i in df["location_x"]:
if math.isnan(i):
x.append(df.index[k])
#print(i)
k+=1
df=(df.drop(x))
k=0
x=[]
for i in df["location_y"]:
if math.isnan(i):
x.append(df.index[k])
#print(i)
k+=1
df=(df.drop(x))
#print(df)
X = df.iloc[:, :-1].values
Y = df.iloc[:, 4].values
#print(X,Y)
###################################################################################################
#Test DATA
k=0
x=[]
for i in testdata["is_goal"]:
if math.isnan(i)==False:
x.append(k)
#print(i)
k+=1
testdata=(testdata.drop(x))
values = {'distance_of_shot': 0.0, 'power_of_shot': 0.0, 'location_x': 0.0, 'location_y': 0.0}
testdata.fillna(value=values,inplace=True)
#print(testdata)
X_TEST=testdata.iloc[:,:-1].values
Y_TEST=testdata.iloc[:,4].values
#print(X_TEST,Y_TEST)
k=0
x=[]
for i in output["is_goal"]:
if math.isnan(i)==False:
x.append(k)
#print(i)
k+=1
output=(output.drop(x))
values ={"shot_id_number":0.0}
output.fillna(value=values,inplace=True)
#print(output)
###################################################################################################
(X_train, X_test, Y_train, Y_test) = train_test_split(X, Y, random_state=0)
Lr=LogisticRegression(random_state=0, solver='lbfgs', multi_class='ovr')
LRRR=LinearRegression()
SVM = svm.LinearSVC()
RF = RandomForestClassifier(n_estimators=100, max_depth=2, random_state=0)
NN = MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(5, 2), random_state=1)
Lr.fit(X,Y)
LRRR.fit(X,Y)
SVM.fit(X, Y)
RF.fit(X, Y)
NN.fit(X, Y)
print(Lr.score(X,Y))
print(LRRR.score(X,Y))
print(SVM.score(X,Y))
print(RF.score(X,Y))
print(NN.score(X,Y))
l=[[10,12,3,32]]
print(Lr.predict(X_TEST[0:10]))
print(LRRR.predict(X_TEST[0:10]))
print(SVM.predict(X_TEST[0:10]))
print(RF.predict(X_TEST[0:10]))
print(NN.predict(X_TEST[0:10]))
Y_TEST=Lr.predict(X_TEST)
print(Y_TEST)
output["is_goal"]= Y_TEST
output["shot_id_number"]=output.index+1
print(output)
output.to_csv('file1.csv',index = False)
|
[
"noreply@github.com"
] |
rajatkeshri.noreply@github.com
|
52abb923e5ea3868b73c18035ecd3ed6b254bee3
|
004f8f49e335f8e41462eae512bbe20d750fce8c
|
/rldb/db/paper__ddqn/algo__random/__init__.py
|
1dc5a63582d0a276e767fc89de8e1692297ca2ae
|
[
"MIT"
] |
permissive
|
seungjaeryanlee/rldb
|
3ab4317e26a39e6fa9b6e7ed9063907dc6a60070
|
8c471c4666d6210c68f3cb468e439a2b168c785d
|
refs/heads/master
| 2020-04-29T15:50:26.890588
| 2019-12-19T08:31:12
| 2019-12-19T08:31:12
| 176,241,314
| 52
| 2
|
MIT
| 2019-05-16T06:33:17
| 2019-03-18T08:54:24
|
Python
|
UTF-8
|
Python
| false
| false
| 449
|
py
|
"""
Random scores from DDQN paper.
106 entries
- 49 no-op entries from DQN
- 49 human entries from Gorila DQN
------------------------------------------------------------------------
8 unique entries
"""
from .entries import entries
# Specify ALGORITHM
algo = {
# ALGORITHM
"algo-title": "Random",
"algo-nickname": "Random",
}
# Populate entries
entries = [{**entry, **algo} for entry in entries]
assert len(entries) == 8
|
[
"noreply@github.com"
] |
seungjaeryanlee.noreply@github.com
|
e86dc53a1ad4fd2dc2052d8609c6dedf4840b3ac
|
79c0c60dc5e380ae30d0cd8ea08fa9b81247047e
|
/scripts/Jesutofunmi-Adewole.py
|
b7e587fb0dfb08e8dd6ddd9622b62034877b6f45
|
[] |
no_license
|
ahmed2929/Team-Incredible
|
440e35077de4953926bc26ef968313a9d0f31cac
|
6e2e312232c929d373cfa969e04456020ec2667f
|
refs/heads/master
| 2022-09-30T02:24:46.414439
| 2020-06-05T17:53:33
| 2020-06-05T17:53:33
| 269,724,967
| 3
| 0
| null | 2020-06-05T17:46:43
| 2020-06-05T17:46:43
| null |
UTF-8
|
Python
| false
| false
| 230
|
py
|
#!/usr/bin/env python3
import json
my_data = "Hello World, this is Jesutofunmi Adewole with HNGi7 ID HNG-03321 and email tofdebby@gmail.com using Python for stage 2 task."
def print_my_data():
print(my_data)
print_my_data()
|
[
"tofdebby@gmail.com"
] |
tofdebby@gmail.com
|
9ffcac02920e318763ea537522fab3362b56979c
|
49a130c36c2db4007a7ace6bcab2b2644092cd7a
|
/blackjackenv.py
|
35735727424acbd216bbcd644e5256cfb76dbae1
|
[] |
no_license
|
nbeguier/Q_Blackjack
|
91bcdb0c3ca5b3a193bd0773d495b5d5485f5b4e
|
8771cef05f7b718cc6c96638dd1517c358d5cdce
|
refs/heads/master
| 2021-06-14T05:33:45.609365
| 2017-04-30T17:40:08
| 2017-04-30T17:40:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,570
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" BlackJack Environment """
from random import randint
from pybrain.rl.environments.environment import Environment
class BlackjackEnv(Environment):
""" A (terribly simplified) Blackjack game implementation of an environment. """
# the number of action values the environment accepts
indim = 1
# the number of sensor values the environment produces
outdim = 20
hand_value = 0
def getSensors(self):
"""
The currently visible state of the world
The observation may be stochastic - repeated calls returning different values
:rtype: by default, this is assumed to be a numpy array of doubles
"""
if self.hand_value == 0:
self.hand_value = randint(self.indim, self.outdim)
else:
self.hand_value += randint(self.indim, 10)
if self.hand_value > self.outdim:
self.hand_value = 0
return [float(self.hand_value),]
def performAction(self, action):
"""
Perform an action on the world that changes it's internal state (maybe stochastically).
:key action: an action that should be executed in the Environment.
:type action: by default, this is assumed to be a numpy array of doubles
"""
# The environment can't affect the action
return action
def reset(self):
"""
Most environments will implement this optional method that allows for reinitialization.
"""
self.hand_value = 0
|
[
"nicolas_beguier@hotmail.com"
] |
nicolas_beguier@hotmail.com
|
cea6eac4eae56b5cb5246f602da345bb5071700f
|
b2ead300f2f62de62d1d10e3678c89fc4ff3ce19
|
/quicksort.py
|
233ab0eace544643f04caaf09ecb8f682d6a8f9f
|
[
"MIT"
] |
permissive
|
shaneslone/cs_week4_day4
|
0fa2e263c5342c16c58e1e715b57414abe4acc1c
|
7504e70d637d1dbe3e4b0530b1d7fbbb1005e4d8
|
refs/heads/main
| 2023-02-25T09:33:57.949844
| 2021-02-05T03:19:04
| 2021-02-05T03:19:04
| 336,150,669
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 359
|
py
|
def quicksort(a):
if len(a) <= 1:
return a
pivot = a[0]
left = []
right = []
for i in range(1, len(a)):
if a[i] > pivot:
right.append(a[i])
else:
left.append(a[i])
left = quicksort(left)
right = quicksort(right)
return left + [pivot] + right
print(quicksort([4, 3, 5, 1, 2]))
|
[
"slone.shane@gmail.com"
] |
slone.shane@gmail.com
|
df58907e1b6de8efe4fd2f7e41c0775d357df081
|
df1a0bcb185a6ef6ec67bdf06b6846680d14784d
|
/load_table.py
|
6d520e69885fa757e5057adce937583d332a7fe0
|
[] |
no_license
|
Ruidi/Select-Optimal-Decisoins-via-DRO-KNN
|
c82153c8a4e23bd51cbb38eb9e4e9edfbbd7d9e5
|
e6e52585fd95aab8812d0c0148c9ac6bbad83454
|
refs/heads/master
| 2020-08-24T00:48:35.645889
| 2019-10-22T06:20:18
| 2019-10-22T06:20:18
| 216,734,983
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,866
|
py
|
import sys
sys.path.append('/home/rchen15/prescription/run_pres')
import pandas as pd
import numpy as np
from sklearn.model_selection import ShuffleSplit
from collections import Counter
from util import get_base_path
def load_diabetes_final_table_for_prescription(trial_id, test_ratio=0.2):
"""
load preprocess diabetes data
:param trial_id: trial id
:param test_ratio: ratio of test data
:return: train_all_x, train_all_y, train_all_z, train_all_u, test_x, test_y, test_z, test_u
"""
df = pd.read_pickle('/home/rchen15/prescription/presription_shared/diabetes.p')
prescription_columns = ['prescription_oral', 'prescription_injectable']
hist_pres_columns = ['hist_prescription_oral', 'hist_prescription_injectable']
useful_feature = [item for item in df.columns.tolist() if
item not in prescription_columns and item != 'future_a1c']
X = np.array(df[useful_feature].values, dtype=np.float32)
y = np.array(df['future_a1c'].values, dtype=np.float32)
z = np.array(df[prescription_columns].values, dtype=int)
u = np.array(df[hist_pres_columns].values, dtype=int)
z = np.array(z[:, 0] + 2 * z[:, 1], dtype=int)
u = np.array(u[:, 0] + 2 * u[:, 1], dtype=int)
train_all_x = []
train_all_y = []
train_all_z = []
train_all_u = []
test_x = []
test_y = []
test_z = []
test_u = []
for pres_id in range(4):
valid_id = z == pres_id
this_X = X[valid_id]
this_y = y[valid_id]
this_z = z[valid_id]
this_u = u[valid_id]
rs = ShuffleSplit(n_splits=1, test_size=test_ratio, random_state=trial_id)
train_index, test_index = rs.split(this_X).__next__()
X_train_all, X_test = this_X[train_index], this_X[test_index]
y_train_all, y_test = this_y[train_index], this_y[test_index]
z_train_all, z_test = this_z[train_index], this_z[test_index]
u_train_all, u_test = this_u[train_index], this_u[test_index]
train_all_x.append(X_train_all)
train_all_y.append(y_train_all)
train_all_z.append(z_train_all)
train_all_u.append(u_train_all)
test_x.append(X_test)
test_y.append(y_test)
test_z.append(z_test)
test_u.append(u_test)
return train_all_x, train_all_y, train_all_z, train_all_u, test_x, test_y, test_z, test_u
def load_hypertension_final_table_for_prescription(trial_id, test_ratio=0.2):
"""
load preprocess hypertension data
:param trial_id: trial id
:param test_ratio: ratio of test data
:return: train_all_x, train_all_y, train_all_z, train_all_u, test_x, test_y, test_z, test_u
"""
df = pd.read_pickle('/home/rchen15/prescription/presription_shared/hypertension.p')
not_use_columns = ['measure_systolic_future', 'visits_in_regimen', 'measure_height',
'measure_o2_saturation', 'measure_respiratory_rate', 'measure_temperature',
'measure_weight', 'diag_042', 'diag_070', 'diag_110', 'diag_174',
'diag_185', 'hist_prescription_ACEI', 'hist_prescription_ARB',
'hist_prescription_AlphaBlocker', 'hist_prescription_BetaBlocker',
'hist_prescription_CCB', 'hist_prescription_Diuretics']
prescription_columns = ['prescription_ACEI', 'prescription_ARB', 'prescription_AlphaBlocker',
'prescription_BetaBlocker', 'prescription_CCB', 'prescription_Diuretics']
hist_pres_columns = ['hist_prescription_ACEI', 'hist_prescription_ARB', 'hist_prescription_AlphaBlocker',
'hist_prescription_BetaBlocker', 'hist_prescription_CCB', 'hist_prescription_Diuretics']
useful_feature = [item for item in df.columns.tolist()
if item not in not_use_columns and item not in prescription_columns]
X = np.array(df[useful_feature].values, dtype=np.float32)
y = np.array(df['measure_systolic_future'].values, dtype=np.float32)
z = np.array(df[prescription_columns].values, dtype=int)
u = np.array(df[hist_pres_columns].values, dtype=int)
z_c = z[:, 0] + 2 * z[:, 1] + 4 * z[:, 2] + 8 * z[:, 3] + 16 * z[:, 4] + 32 * z[:, 5]
z_c = np.asanyarray(z_c, dtype=int)
u_c = u[:, 0] + 2 * u[:, 1] + 4 * u[:, 2] + 8 * u[:, 3] + 16 * u[:, 4] + 32 * u[:, 5]
u_c = np.asanyarray(u_c, dtype=int)
commom_19 = [item[0] for item in Counter(z_c).most_common(19)]
new_id = {item: item_id for item_id, item in enumerate(commom_19)}
for i in range(64):
if i not in new_id.keys():
new_id[i] = 19
z = np.array([new_id[item] for item in z_c], dtype=int)
u = np.array([new_id[item] for item in u_c], dtype=int)
train_all_x = []
train_all_y = []
train_all_z = []
train_all_u = []
test_x = []
test_y = []
test_z = []
test_u = []
for pres_id in range(20):
valid_id = z == pres_id
this_X = X[valid_id]
this_y = y[valid_id]
this_z = z[valid_id]
this_u = u[valid_id]
rs = ShuffleSplit(n_splits=1, test_size=test_ratio, random_state=trial_id)
train_index, test_index = rs.split(this_X).__next__()
X_train_all, X_test = this_X[train_index], this_X[test_index]
y_train_all, y_test = this_y[train_index], this_y[test_index]
z_train_all, z_test = this_z[train_index], this_z[test_index]
u_train_all, u_test = this_u[train_index], this_u[test_index]
train_all_x.append(X_train_all)
train_all_y.append(y_train_all)
train_all_z.append(z_train_all)
train_all_u.append(u_train_all)
test_x.append(X_test)
test_y.append(y_test)
test_z.append(z_test)
test_u.append(u_test)
return train_all_x, train_all_y, train_all_z, train_all_u, test_x, test_y, test_z, test_u
|
[
"noreply@github.com"
] |
Ruidi.noreply@github.com
|
70a5d7670fe8a0d65eb10fa79e5d71bc62ccad8a
|
86cba2a1bc203454d4b4c4400fd2f6acfa5671ff
|
/ProjectFiles/Project/API/datafinderTest.py
|
805dca72e3021a7275b61ffd9b6f77537dfd14ed
|
[] |
no_license
|
AndresMtzP/Globetrotters
|
ce7b5a17dcfd29cfc607ddeb03f937ec99768684
|
dc8731a825dabc574e26bfaa5bd0d4d7630159e3
|
refs/heads/master
| 2023-02-07T09:27:56.868605
| 2017-05-24T23:54:45
| 2017-05-24T23:54:45
| 80,865,053
| 1
| 4
| null | 2020-12-30T09:17:46
| 2017-02-03T20:07:41
|
C
|
UTF-8
|
Python
| false
| false
| 682
|
py
|
import urllib
import requests
import wptools
from datafinder import getGeneral, getImage
loc = raw_input("Enter a location: ")
# tries to get population information for location
getGeneral(loc)
# saves image in python folder as loc.jpg
getImage(loc)
# OUTPUT FOR loc=Toronto AFTER RUNNING PROGRAM
# Enter a location: Toronto
# Location Name: Toronto
# Total Area (km2): 630.21
# Region: CA-ON
# Population: 2731571
# OUTPUT FOR loc=San Diego AFTER RUNNING PROGRAM
# Enter a location: San Diego
# Water area (km2): 122.27
# Location Name: San Diego, California
# Total Area (km2): 964.51
# Region: US-CA
# Population: 1394928
# Land Area (km2): 842.23
|
[
"andresmartinez@Andres-Martinezs-MacBook-Pro.local"
] |
andresmartinez@Andres-Martinezs-MacBook-Pro.local
|
6790a614d2c9581ab252bca4167e963b1e55845f
|
249782453476c5cd2937522806cbb0f15d7ab52b
|
/bot.py
|
ef5bcb2c0f34c6a1a3bd335ba90e819c9f78300e
|
[] |
no_license
|
ComedicChimera/Null-Discord-Bot
|
9247382f8a675fee6385af16400aba608e41b9ce
|
3c0376500695abf920dd404c092d70d1d500c149
|
refs/heads/master
| 2021-03-31T01:58:54.912752
| 2018-06-17T19:37:51
| 2018-06-17T19:37:51
| 124,813,710
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,439
|
py
|
import util
from command import process_command
import session as discord_session
import filters
from discord import Embed
from logger import log
# for the memes
from random import choice
@util.client.event
async def on_message(message):
if message.author.bot:
# check to make sure it didn't send message
if message.author == util.client.user:
return
# brandon clauses
if message.content.startswith('What do you call a'):
await insult(message)
elif message.content.startswith('How do you'):
await insult(message)
return
try:
await handle_message(message)
except Exception as e:
log(message.server.id, str(e))
await util.client.send_message(message.channel, embed=Embed(color=util.error_color, description='An unknown error occurred.'))
async def insult(message):
messages = [
'That joke wasn\'t funny and it never will be.',
'You are not funny.',
'Please be quiet for all of our sakes.',
'You make me want to die.',
'You don\'t need to tell jokes; you are one.',
'Inferior...',
':joy: :gun:',
'```Roses are red,\nViolets are blue,\nThat bot is garbage,\nand Brandon is too.```'
]
await util.client.send_message(message.channel, choice(messages))
@util.client.event
async def on_server_join(server):
embed = Embed(color=util.theme_color, title='Null Bot')
embed.description = 'Null is a fully featured, powerful, multipurpose Discord bot from easy use on any server.'
embed.add_field(name='Get Started', value='Use `!help` to get list of command and how to use them.', inline=False)
embed.add_field(name='Support', value='**Donate** donate_url\n**Source** github_url\n**Upvote** upvote_url', inline=False)
embed.add_field(name='Join our Discord', value='discord_url', inline=False)
icon_url = 'https://cdn.discordapp.com/avatars/226732838181928970/19562db0c14f445ac5a0bf8f605989c1.png?size=128'
embed.set_footer(text='Developed by ComedicChimera#3451', icon_url=icon_url)
await util.client.send_message(server.default_channel, embed=embed)
@util.client.event
async def on_member_join(member):
if member.id in util.servers[member.server].hack_bans:
await util.client.kick(member)
else:
await util.client.send_message(member.server.default_channel, 'Welcome `%s`!' % member.name)
async def handle_message(message):
prefix = util.get_server_prefix(message.server)
# use custom input handler if specified
if discord_session.has_session(message.server, message.channel, message.author):
await util.client.send_typing(message.channel)
s = discord_session.get_session(message.server, message.channel, message.author)
await s.handler(message, s.session_id)
# else pass to command handler
elif message.content.startswith(prefix):
await process_command(message, prefix)
# apply filters
elif filters.match_filters(message):
await util.client.delete_message(message)
if __name__ == '__main__':
# imported command sets
import modules.general.commands
import modules.music.commands
import modules.math.commands
import modules.internet.commands
import modules.money.commands
import modules.games.commands
import modules.admin.commands
# start the bot
util.client.run(util.token)
|
[
"forlornsisu@gmail.com"
] |
forlornsisu@gmail.com
|
9ed19fde50f5ddaf080e4ab38c3515b6a0952675
|
6250bcc03d18734672c200edad776b9407b2d1b7
|
/libra/spectra/__init__.py
|
92a00dd59216796be75917777bb1ca8871c95afa
|
[
"MIT"
] |
permissive
|
bmorris3/libra
|
f7835110d288e4765e89d78d9022a6e49e65604f
|
2a6faa54c72e1c077f521260dfb0eda744f6afc7
|
refs/heads/master
| 2023-03-19T08:50:10.178743
| 2018-11-08T15:53:26
| 2018-11-08T15:53:26
| 117,314,484
| 0
| 2
|
MIT
| 2018-02-21T16:31:58
| 2018-01-13T04:40:01
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 125
|
py
|
from .phoenix import *
from .spectrum import *
from .irtf import *
from .spectral_types import *
from .transmission import *
|
[
"brettmorris21@gmail.com"
] |
brettmorris21@gmail.com
|
4655af3f1bdada7560da13b76b6b2a81021a9c4a
|
9de3976bb633fd4418193221a4e09aa229cab827
|
/adminEnviron/views.py
|
91ba616aefaff5224fac868543a217b2e36928de
|
[] |
no_license
|
hbbuildbot/adminPromax
|
6f17d5238bea4a9add60fc49699d328b89106e2b
|
3548b0ff0be5b97d1fe79b6536b3673fc9b4e5fd
|
refs/heads/master
| 2021-06-30T20:57:07.000206
| 2017-09-20T17:24:11
| 2017-09-20T17:24:11
| 104,246,649
| 0
| 0
| null | 2017-09-20T17:27:39
| 2017-09-20T17:27:39
| null |
UTF-8
|
Python
| false
| false
| 577
|
py
|
from django.shortcuts import get_object_or_404, render
from datetime import datetime
from django.views import generic
from django.http import HttpRequest, HttpResponse
# Create your views here.
def index(request):
"""Renders the 'index' page."""
assert isinstance(request, HttpRequest)
return render(
request,
'adminEnviron/index.html',
{
'menu':'adminEnviron',
'appname':'adminPromax',
'title':'adminEnviron/Index',
'year':datetime.now().year,
'request':request,
}
)
|
[
"nunix@hbsis.com.br"
] |
nunix@hbsis.com.br
|
317686d0f4e788f51f6ea851decc5e1779293a01
|
25944ee62c9bf6e7b8aacaecdc6072bc1d9c93bc
|
/paprika_sync/core/utils.py
|
0fb7d43871f36d8ea5ab6995c34a08cd602e2563
|
[
"MIT"
] |
permissive
|
grschafer/paprika-sync
|
b6c797858a99618b80aa289860c2598f31c34db1
|
8b6fcd6246557bb79009fa9355fd4d588fb8ed90
|
refs/heads/master
| 2021-09-02T09:10:26.848750
| 2021-08-15T20:13:04
| 2021-08-15T20:13:04
| 171,336,526
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 806
|
py
|
import functools
import re
# Logs start and end of the wrapped function
# Used for extra logging verbosity on cronjobs (management commands)
def log_start_end(logger):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
logger.info('Start')
val = func(*args, **kwargs)
logger.info('End')
return val
return wrapper
return decorator
def strip_query_params(url):
return url.partition('?')[0]
PAPRIKA_S3_KEY_REGEX = re.compile(r'http://uploads.paprikaapp.com.s3.amazonaws.com/(?P<key>.*)')
def make_s3_url_https(url):
match = PAPRIKA_S3_KEY_REGEX.match(url)
if match:
key = match.group("key")
return f'https://s3.amazonaws.com/uploads.paprikaapp.com/{key}'
return url
|
[
"greg@occipital.com"
] |
greg@occipital.com
|
5f9a64bbd544a9c6fa5ebb1d8ddc4a03a1a6da60
|
a2312fe43596c308627982719537b6c552ad912d
|
/p7.py
|
5f78b8893576ed928368bc3953efe270d122e061
|
[] |
no_license
|
aasa11/pychallenge
|
19417e58a3420755174917826b51b433c37ebaa7
|
3a490c05826f8a387f05067b28662c5e042df72f
|
refs/heads/master
| 2021-01-15T14:46:37.302484
| 2013-08-21T07:39:54
| 2013-08-21T07:39:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,215
|
py
|
#!/usr/bin/
#coding=gbk
'''
Created on 2013/08/06
@summary:
@author: huxiufeng
'''
import Image
def isalpha(ch):
if ord(ch) <= ord('z') and ord(ch) >= ord('a') :
return True
elif ord(ch) <= ord('Z') and ord(ch) >= ord('A') :
return True
elif ord(ch) <= ord('9') and ord(ch) >= ord('0') :
return True
elif ch ==' ' or ch == ',' or ch == '. ' or ch ==':':
return True
return False
def getdata(lst, i):
ch = chr(lst[i])
if isalpha(ch):
return ch
return None
def openimg(imgfile):
im = Image.open(imgfile,'r')
for j in xrange(im.size[1]):
des = ''
for i in xrange(im.size[0]):
ch = getdata(im.getpixel((i,j)), 0)
if ch is not None:
des += ch
print des
#----------------------It is a split line--------------------------------------
def main():
imgfile = r'G:\down\ChrDw\oxygen.png'
openimg(imgfile)
des = ''
for i in [105, 110, 116,101, 103,114,105, 116, 121] :
des +=chr(i)
print des
#----------------------It is a split line--------------------------------------
if __name__ == "__main__":
main()
print "It's ok"
|
[
"anferneextt@gmail.com"
] |
anferneextt@gmail.com
|
fffedbf33f2b5efc781108075f4a6faaec096934
|
dcc16942a331f7a48df7271c158153bb621ff96d
|
/dynamic_programming/unique_paths_in_a_grid.py
|
06fd2ec9308c2ff58dab8e2e7b867c952d093379
|
[] |
no_license
|
muzavan/py-interviewbit
|
eb639ecad7fc35e8963cb9c2df244e9269537804
|
a66acf22f1f7fee04af63864051b8a93428b669f
|
refs/heads/master
| 2023-01-06T06:20:49.741860
| 2020-10-25T10:25:14
| 2020-10-25T10:25:14
| 306,753,027
| 5
| 1
| null | 2020-10-24T16:59:10
| 2020-10-23T21:43:06
|
Python
|
UTF-8
|
Python
| false
| false
| 931
|
py
|
class Solution:
# @param A : list of list of integers
# @return an integer
def uniquePathsWithObstacles(self, A):
prev = []
R = len(A)
C = len(A[0])
for r in range(R):
curr = []
for c in range(C):
a = A[r][c]
if a == 1:
curr.append(0)
continue
# a == 0
if r == 0 and c == 0:
curr.append(1)
continue
poss = 0
if c != 0:
poss += curr[-1]
if r != 0:
poss += prev[c]
curr.append(poss)
prev = curr
return prev[-1]
|
[
"muzavan@gmail.com"
] |
muzavan@gmail.com
|
9015233865c959df846691b9d184dbb18c449eeb
|
dd48c9a21aa25742cf2b2c5140849e8cd38afbc2
|
/lunch_and_learn/examples.py
|
0529d4d304c4bcbdad10214ed794582071aa2a0e
|
[] |
no_license
|
jayfry1077/dynamo_db_examples
|
f8177373c61479ab7ca0c89da466a1de5a57f74f
|
fe4c6cbef557ab4d3d618a0f6bfb2168756e9a62
|
refs/heads/master
| 2022-11-22T19:41:33.094284
| 2020-07-18T21:48:38
| 2020-07-18T21:48:38
| 277,045,635
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,418
|
py
|
import datetime
import boto3
from botocore.exceptions import ClientError
from botocore.exceptions import ValidationError
import uuid
import json
dynamodb = boto3.client('dynamodb', 'us-east-1')
TABLE_NAME = 'lunch_and_learn'
def put_owner(owner_name, franchise_name, owner_phone, owner_email, PPP):
created_at = datetime.datetime.now()
try:
result = dynamodb.put_item(
TableName=TABLE_NAME,
Item={
'PK': {'S': 'OWNER#{}'.format(owner_name)},
'SK': {'S': 'OWNER#{}'.format(owner_name)},
"FranchiseName": {"S": franchise_name},
"OwnerPhone": {"S": owner_phone},
"OwnerEmail": {"S": owner_email},
"PPP": {"S": PPP},
"Created_At": {"S": created_at.isoformat()},
}
)
print(result)
except ClientError as e:
print(e)
def get_owner(owner_name):
try:
result = dynamodb.query(
TableName=TABLE_NAME,
KeyConditionExpression="#pk = :pk AND #sk = :sk",
ExpressionAttributeNames={
"#pk": "PK",
"#sk": "SK"},
ExpressionAttributeValues={
":pk": {'S': 'OWNER#{}'.format(owner_name)},
":sk": {'S': 'OWNER#{}'.format(owner_name)}
},
)
print(result)
return result
except ClientError as e:
print(e)
def get_owner_and_stores(owner_name):
try:
result = dynamodb.query(
TableName=TABLE_NAME,
KeyConditionExpression="#pk = :pk",
ExpressionAttributeNames={
"#pk": "PK", },
ExpressionAttributeValues={
":pk": {'S': 'OWNER#{}'.format(owner_name)},
},
)
print(result)
return result
except ClientError as e:
print(e)
def add_store_to_owner(owner_name, store_number, store_phone, store_email, store_address, status, territory, region, market, area):
created_at = datetime.datetime.now()
try:
result = dynamodb.put_item(
TableName=TABLE_NAME,
Item={
'PK': {'S': 'OWNER#{}'.format(owner_name)},
'SK': {'S': 'STORE#{}'.format(store_number.zfill(6))},
"StorePhone": {"S": store_phone},
"StoreEmail": {"S": store_email},
"StoreAddress": {"S": store_address},
"Status": {"S": status},
"Territory": {"S": territory},
"Region": {"S": region},
"Market": {"S": market},
"Area": {"S": area},
"GSI1": {"S": 'STORE#{}'.format(store_number.zfill(6))},
"Created_At": {"S": created_at.isoformat()},
}
)
print(result)
except ClientError as e:
print(e)
def add_store(store_number):
created_at = datetime.datetime.now()
try:
result = dynamodb.put_item(
TableName=TABLE_NAME,
Item={
'PK': {'S': 'STORE#{}'.format(store_number)},
'SK': {'S': 'STORE#{}'.format(store_number.zfill(6))},
"Created_At": {"S": created_at.isoformat()},
}
)
print(result)
except ClientError as e:
print(e)
def get_store_with_filter(owner_name, territory, region, market, area):
try:
result = dynamodb.query(
TableName=TABLE_NAME,
KeyConditionExpression="#pk = :pk",
FilterExpression="#terr = :terr AND #market = :market AND #region = :region AND #area = :area",
ExpressionAttributeNames={
"#pk": "PK",
"#terr": "Territory",
"#market": "Market",
"#region": "Region",
"#area": "Area"
},
ExpressionAttributeValues={
":pk": {'S': 'OWNER#{}'.format(owner_name)},
":terr": {"S": territory},
":region": {"S": region},
":market": {"S": market},
":area": {"S": area},
}
)
print(result)
return result
except ClientError as e:
print(e)
def get_owner_info_by_store(store, owner_name):
try:
result = dynamodb.query(
TableName=TABLE_NAME,
IndexName='GSI1-index',
KeyConditionExpression="GSI1 = :GSI1",
ExpressionAttributeValues={
":GSI1": {'S': 'OWNER#{}'.format(owner_name)}
}
)
print(result)
return result
except ClientError as e:
print(e)
def get_owner_info_by_store_bad(store_number, owner_name):
try:
result = dynamodb.query(
TableName=TABLE_NAME,
KeyConditionExpression="#pk = :pk",
FilterExpression="#store = :store_number",
ExpressionAttributeNames={
"#pk": "PK",
"#store": "GSI1"
},
ExpressionAttributeValues={
":pk": {'S': 'OWNER#{}'.format(owner_name)},
":store_number": {'S': 'STORE#{}'.format(store_number)}
}
)
print(result)
return result
except ClientError as e:
print(e)
def add_employee_to_store(store_number, employee_id, employee_name, employee_age, employee_role):
created_at = datetime.datetime.now()
try:
result = dynamodb.put_item(
TableName=TABLE_NAME,
Item={
'PK': {'S': 'STORE#{}'.format(store_number.zfill(6))},
'SK': {'S': 'EMPLOYEE#{}'.format(employee_id)},
'GSI1': {'S': 'STORE#{}'.format(store_number.zfill(6))},
'Name': {'S': employee_name},
'Age': {'N': employee_age},
'Role': {'S': employee_role},
'GSI2': {'S': 'EMPLOYEE#{}'.format(employee_id)},
"Created_At": {"S": created_at.isoformat()},
}
)
print(result)
except ClientError as e:
print(e)
def get_employees_by_store(store_number):
try:
result = dynamodb.query(
TableName=TABLE_NAME,
KeyConditionExpression="#pk = :pk AND begins_with(#sk, :sk)",
ExpressionAttributeNames={
"#pk": "PK",
"#sk": "SK"
},
ExpressionAttributeValues={
":pk": {'S': 'STORE#{}'.format(store_number)},
":sk": {'S': 'EMPLOYEE#'}
}
)
print(result)
return result
except ClientError as e:
print(e)
def get_store_by_employeeID(employee_id):
try:
result = dynamodb.query(
TableName=TABLE_NAME,
IndexName='GSI2-index',
KeyConditionExpression="GSI2 = :GSI2",
ExpressionAttributeValues={
":GSI2": {'S': 'EMPLOYEE#{}'.format(employee_id)}
}
)
print(result)
return result
except ClientError as e:
print(e)
def add_menu_items_to_store(store_number, menu_item_id, price, tax, description):
created_at = datetime.datetime.now()
try:
result = dynamodb.put_item(
TableName=TABLE_NAME,
Item={
'PK': {'S': 'STORE#{}'.format(store_number.zfill(6))},
'SK': {'S': 'ITEM#{}'.format(menu_item_id)},
'ItemID': {'S': menu_item_id},
'Price': {'S': price},
'Tax': {'S': tax},
'description': {'S': description},
"Created_At": {"S": created_at.isoformat()},
}
)
print(result)
except ClientError as e:
print(e)
def get_items_by_store(store_number):
try:
result = dynamodb.query(
TableName=TABLE_NAME,
KeyConditionExpression="#pk = :pk AND begins_with(#sk, :sk)",
ExpressionAttributeNames={
"#pk": "PK",
"#sk": "SK"
},
ExpressionAttributeValues={
":pk": {'S': 'STORE#{}'.format(store_number)},
":sk": {'S': 'ITEM#'}
}
)
print(result)
return result
except ClientError as e:
print(e)
# put_owner('Jonathan_Bradbury', "Taco Hut",
# '714-394-5161', 'jonathan.bradbury@yum.com', 'PPP1234567')
# get_owner('Jonathan_Bradbury')
# get_owner_and_stores('Jonathan_Bradbury')
# add_store_to_owner('Jonathan_Bradbury', '000017', '555-555-1514', '000017@tacobell.com',
# '17 Burrito road, Mexican Pizza City, CA, 92617', 'OPEN', 'Territory1', 'Region1', 'Market1', 'Area2')
# get_store_with_filter('Jonathan_Bradbury', 'Territory1',
# 'Region1', 'Market1', 'Area1')
# add_store('000017')
# get_owner_info_by_store('000017', 'Jonathan_Bradbury')
'''This shows that using filter expressions scans more data and cost more money'''
# get_owner_info_by_store_bad('000017', 'Jonathan_Bradbury')
# add_employee_to_store('000015', 'jxb7210',
# 'Jonathan Bradbury', '32', 'Service Champion')
# get_employees_by_store('000015')
# get_store_by_employeeID('jxb7210')
# add_menu_items_to_store('000017', '00004', '3.50', '.27', 'Baja Blast')
# get_items_by_store('000017')
|
[
"53982942+jayfry1077@users.noreply.github.com"
] |
53982942+jayfry1077@users.noreply.github.com
|
93f6b405d9e674595aa73ac2ae21af8d1576e29a
|
251f0caf2b9bbb2ff2f469e37cd56280184d2f15
|
/src/classify_proj/urls.py
|
994d44104d13b0704139dc6d647fcce6ec677d4f
|
[] |
no_license
|
boluwatifeajayi/draw-app-react-django-cnn
|
ccc190a135d5ee729b9c29903f939b0ccb63fac6
|
ee4a3168b924f4478706a1d8931ea4f05e72a636
|
refs/heads/main
| 2023-02-08T11:51:08.013229
| 2021-01-02T12:51:36
| 2021-01-02T12:51:36
| 326,181,748
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,019
|
py
|
"""classify_proj URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include('digits.api.urls')),
path('api-auth/', include('rest_framework.urls')),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"bolu4good@gmail.com"
] |
bolu4good@gmail.com
|
80c71a359a9e7ac7377d8ce5d1c8d082f91f6b72
|
6ee49a8922c052703c5281b2ee9fce1cd9c14a9b
|
/datamidplatform/synchronizationTeacher_hotvalue.py
|
36f452d008fc60379c6aed43a73581f1de3a9986
|
[] |
no_license
|
XingshengLiu/PythonTools
|
a5f50388bd598be39a2d93138b0d6031c372d36c
|
701ce95fe6e0682685115d032cbdd3287bcbc187
|
refs/heads/master
| 2023-08-17T05:51:39.012439
| 2023-07-21T02:07:20
| 2023-07-21T02:07:20
| 136,017,511
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,194
|
py
|
# @File : synchronizationTeacher_hotvalue.py
# @Author: LiuXingsheng
# @Date : 2020/7/19
# @Desc : 同步名师热度
import os
import csv
import collections
import xlsxwriter
DirPath= r'H:\大数据中台项目\标签测试报告\同步名师热度'
def readSourceData():
teachercollectionlist = collections.defaultdict(dict)
datacollectionlist = collections.defaultdict(dict)
hotvaluecollectionlist = collections.defaultdict(dict)
datadic = {}
with open(os.path.join(DirPath, 't_teacher.csv'), 'r', encoding='utf-8') as f:
reader = csv.reader(f)
teacherlist = [row for row in reader]
for teacher in teacherlist[1:]:
teachercollectionlist[teacher[1]] = teacher[0]
print(teachercollectionlist)
with open(os.path.join(DirPath, '同步课名师点播数据已筛选.csv'), 'r', encoding='utf-8') as f:
reader = csv.reader(f)
datalist = [row for row in reader]
for teacher in datalist[1:]:
datacollectionlist[str(teacher[0].split('_')[0])] = teacher[1:]
print(datacollectionlist)
with open(os.path.join(DirPath, '同步名师结果数据.csv'), 'r', encoding='utf-8') as f:
reader = csv.reader(f)
hotlist = [row for row in reader]
for hotvalue in hotlist[1:]:
hotvaluecollectionlist[hotvalue[1]] = hotvalue[3]
print(hotvaluecollectionlist)
for key in datacollectionlist.keys():
print('*******',str(teachercollectionlist[key]),'++++++++++++++++',hotvaluecollectionlist[teachercollectionlist[key]])
datadic.update({str(teachercollectionlist[key]) : datacollectionlist[key] + [hotvaluecollectionlist[teachercollectionlist[key]]]})
print(len(datadic))
print(datadic)
return datadic
def writeContent(datadic):
workbook = xlsxwriter.Workbook(os.path.join(DirPath, '同步课名师热度测试结果_all.xlsx'))
ws = workbook.add_worksheet(u'sheet1')
i= 0
for k,v in datadic.items():
ws.write(i,0,k)
for inneritem in range(len(v)):
ws.write(i,inneritem + 1,str(v[inneritem]))
i += 1
workbook.close()
if __name__ == '__main__':
datadic = readSourceData()
writeContent(datadic)
|
[
"liuxingsheng@oaserver.dw.gdbbk.com"
] |
liuxingsheng@oaserver.dw.gdbbk.com
|
b9f8ac63e8bd57e0dde32e47e098edd7df71f28d
|
0ae526b50510a4c7ccccfe81929029316bfcd452
|
/test/dataset/test_common.py
|
e3b9fd3f3edba167eb39f8f5560102aabc075737
|
[
"Apache-2.0"
] |
permissive
|
yejiachen/gluon-ts
|
6ecf837743fc5b05909c606184a243164009a411
|
ed3fa81cef12a1852398fac1410e00524cb99b46
|
refs/heads/master
| 2020-07-13T13:35:54.717207
| 2019-08-28T15:37:15
| 2019-08-28T15:37:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,018
|
py
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import pytest
import pandas as pd
from gluonts.dataset.common import ProcessStartField
@pytest.mark.parametrize(
"freq, expected",
[
("B", "2019-11-01"),
("W", "2019-10-27"),
("M", "2019-10-31"),
("Y", "2019"),
],
)
def test_process_start_field(freq, expected):
process = ProcessStartField.process
given = "2019-11-01 12:34:56"
assert process(given, freq) == pd.Timestamp(expected)
|
[
"noreply@github.com"
] |
yejiachen.noreply@github.com
|
62640cef3eac0c7d5abd2ea298c00f5fad2944e7
|
5908f3b7e5f074f7acd88e43458ab6f4fb8bbd1e
|
/graphic/colorscheme.py
|
7c840734b4dd3c5751f46545a143738d9753bfab
|
[] |
no_license
|
S-W-R/game-points
|
c53206bedde818a66929113bd8e9e4d9be0f1ea7
|
4b8eb6db4a0c6b37b4188528ac08466ebe0c88ef
|
refs/heads/master
| 2023-02-08T22:32:22.551774
| 2020-12-23T23:59:09
| 2020-12-23T23:59:09
| 304,621,771
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 720
|
py
|
from __future__ import annotations
from typing import Dict
from entities.celltype import CellType
from graphic.cellgraphic import CellGraphic
class ColorScheme:
def __init__(self, name: str, border_color: str,
scheme: Dict[CellType, CellGraphic]):
self._name = name
self._border_color = border_color
for i in CellType:
if i not in scheme.keys():
raise AttributeError()
self._scheme = scheme
@property
def name(self) -> str:
return self._name
@property
def border_color(self) -> str:
return self._border_color
def __getitem__(self, item: CellType) -> CellGraphic:
return self._scheme[item]
|
[
"sn_swr1@mail.ru"
] |
sn_swr1@mail.ru
|
57c357522868b421355e456829230f9d6e918e51
|
7efd687ae84f7680e7af7f103f2756a814f48194
|
/最新socket网络编程/socket通信介绍 2.py
|
71451e10de4d10c72b3561aa9105bb8165892d63
|
[
"Apache-2.0"
] |
permissive
|
xiaoyongaa/ALL
|
1746e43c241cdb58f2f2e39ed49f0aa9ba57f099
|
76acb00db7d3a7126efea74750a33ec0c5ea0a62
|
refs/heads/master
| 2021-01-11T03:05:33.405232
| 2016-12-21T07:15:34
| 2016-12-21T07:15:34
| 71,097,431
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 94
|
py
|
#!/application/python3.5/bin/python3.5
# -*- coding:utf-8 -*-
# Author:xiaoyong
import socket
|
[
"845219159@qq.com"
] |
845219159@qq.com
|
c1327c26686ac918eb23487df03337cc8e9209f4
|
cb14c47a41204f4179f097295ec05309da8406e5
|
/binaryExploitation/playground/1/exploit.py
|
3d33c9da63e8bc040a2d167a949b1bc81cb97a88
|
[
"MIT"
] |
permissive
|
mapa17/pwn
|
301a15908e73368aab650d3f9b0154a644a99e97
|
c863fde63fc8698403726bcf6bac18c1433fc42c
|
refs/heads/main
| 2023-02-13T07:27:24.632432
| 2021-01-13T17:25:43
| 2021-01-13T17:25:43
| 329,381,139
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 117
|
py
|
from xUtils import hexAddrInverted
buffer = b'B'*24
jmpAddr = hexAddrInverted(0x004001ebd)
print(buffer + jmpAddr)
|
[
"manuel.pasieka@protonmail.ch"
] |
manuel.pasieka@protonmail.ch
|
0815a63e25929d456a187a3afc2d91881b57b98f
|
0daaeb23beccecb4302c3c504fa904d038d4c1eb
|
/app/tests/test_posts.py
|
c8797c66db4fc8309c171a9a1f90c878d5610b27
|
[
"MIT"
] |
permissive
|
vaultah/L
|
80e2d566a99753f427bf7354477f3cd5391746fc
|
6e3eb6b0832ae6ccaea23abb0258b5abe0956901
|
refs/heads/master
| 2021-01-13T05:33:34.977484
| 2016-10-08T11:05:45
| 2016-10-08T11:05:45
| 42,345,740
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,235
|
py
|
import pytest
from app.el import posts
from app.el.accounts.records import Record
from app.el.images import Image
def test_create():
acct = Record.new()
objects = next(Image.new()),
post = posts.Post.new(acct, content='<string>', images=objects)
assert post.good()
@pytest.mark.parametrize('base_type',
[Image, posts.Post],
ids=['base image', 'base posts']
)
def test_create_base(base_type):
base = base_type.new()
# Won't fail if base_type is Image
try:
base = next(base)
except TypeError:
pass
reply = posts.Post.new(base.owner, content='<string>', ext=base)
assert reply.good()
assert reply.is_reply()
shared = posts.Post.new(base.owner, content=None, ext=base)
assert shared.good()
assert not shared.is_reply()
def test_delete():
obj = posts.Post.new()
owner = obj.owner
posts.Post.delete(owner, [obj])
assert not posts.Post(obj.id).good()
def test_class():
instance = posts.Post.new()
new = posts.Post(instance.id)
assert new.good()
assert new == instance
def test_load_plain():
objects = [posts.Post.new()]
owner = objects[0].owner
loaded = list(posts.page(owner))
assert len(loaded)
assert all(isinstance(x, posts.Post) for x in loaded)
assert all(x.owner == owner for x in loaded)
assert all(x.good() for x in loaded)
assert set(objects) == set(loaded)
# Testing iter_info here
res = posts.iter_info(objects)
assert res
def test_load_parents():
bases = [next(Image.new())]
owner = bases[-1].owner
for _ in range(5):
# Create a reply and make it a new base
reply = posts.Post.new(owner, content='<string>', ext=bases[-1])
bases.append(reply)
for x in range(1, 5):
parents = list(posts.parents(bases[x - 1]))
assert len(parents) == x
assert isinstance(parents[0], Image)
assert all(isinstance(x, posts.Post) for x in parents[1:])
# Everything but image
assert all(x.is_reply() for x in parents[1:])
def test_load_derived():
bases, derived = [next(Image.new())], []
owner = bases[-1].owner
for _ in range(5):
# 'Share' base
shared = posts.Post.new(owner, content=None, ext=bases[-1])
# Create a reply and make it a new base
reply = posts.Post.new(owner, content='<string>', ext=bases[-1])
bases.append(reply)
# There're 2 item on each level
derived.append([shared, reply])
# Without shared
levels = list(posts.derived(bases[0]))
assert len(levels) - 1 == len(derived)
assert all(len(level) == 1 for level in levels[1:])
# With shared
levels = list(posts.derived(bases[0], reflections=True))
assert all(len(level) == 2 for level in levels[1:])
assert levels[1:] == derived
# Testing level_info here
res = list(posts.level_info(levels))
assert res
def test_delete_derived():
bases = [posts.Post.new()]
owner = bases[-1].owner
for _ in range(5):
reply = posts.Post.new(owner, content='<string>', ext=bases[-1])
bases.append(reply)
posts.delete_derived(bases[0])
assert all(not posts.Post(x.id).good() for x in bases)
|
[
"waultah@gmail.com"
] |
waultah@gmail.com
|
42c5d0fbbf756c31a7e1d7d1ea47df337f3661a6
|
1ac689147abb4d86885b249dce35ba6c20f7b8b6
|
/python/nehe/opengl_app.py
|
fec65990bbfa43a99b7a3e4db5fddd0b4bb2612c
|
[
"Apache-2.0"
] |
permissive
|
acs/opengl-samples
|
0ec81b9794ebca493985881a2a8af8dfefccf198
|
ea2cfee663781e7eaf56e001e4ea7c992c58722e
|
refs/heads/master
| 2022-11-24T18:32:04.781573
| 2020-07-27T17:02:32
| 2020-07-27T17:02:32
| 280,253,985
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,284
|
py
|
#!/usr/bin/env python
# Base class App used for the OpenGL tutorials http://nehe.gamedev.net
import numpy
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
from PIL import Image
import sys
class OpenGLApp:
# Keyboard
ESCAPE_KEY = b'\x1b'
CTRLC_KEY = b'\x03'
# GL Window
window = 0
width = 640
height = 480
window_active = True
full_screen = False
z_deep = -5.0
# Rotation
rotation_triangle = 0
rotation_square = 0
x_rot = y_rot = 0
x_rot_speed = y_rot_speed = 1
# Blending
blend = False
# Lighting
lights = False # lights on/off
# Ambient light is light that doesn't come from any particular direction.
# All the objects in your scene will be lit up by the ambient light
light_ambient = (0.5, 0.5, 0.5, 1.0)
# Diffuse light is created by your light source and is reflected off the surface of an object in your scene.
# Any surface of an object that the light hits directly will be very bright,
# and areas the light barely gets to will be darker. This creates a nice shading effect on the sides
light_diffuse = (1.0, 1.0, 1.0, 1.0)
# Light in front of the screen because of 2.0 z
light_position = (0.0, 0.0, 2.0, 1.0)
# Textures
texture_id = 0
texture_ids = []
# Nice place to get textures: https://www.texturex.com/
textures = [("data/voxelers.bmp", GL_NEAREST),
("data/NeHe.bmp", GL_LINEAR_MIPMAP_NEAREST),
("data/glass.bmp", GL_LINEAR)]
def load_gl_texture(self, image_path, filtering=GL_NEAREST):
"""
Load a texture
:param image_path: path with the image path
:param filtering: GL_NEAREST (no filtering), GL_LINEAR (texture look smooth CPU/GPU intensive),
GL_LINEAR_MIPMAP_NEAREST (tries different texture resolutions)
:return:
"""
image = Image.open(image_path)
image_data = numpy.array(list(image.getdata()), numpy.uint8)
# Create three Textures
texture_id = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, texture_id)
glPixelStorei(GL_UNPACK_ALIGNMENT, 4)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_BASE_LEVEL, 0)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAX_LEVEL, 0)
# filtering to use when the image is larger (GL_TEXTURE_MAG_FILTER)
# or stretched on the screen than the original texture,
# or when it's smaller (GL_TEXTURE_MIN_FILTER) on the screen than the actual texture
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
# Generate the texture
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, image.size[0], image.size[1],
0, GL_RGB, GL_UNSIGNED_BYTE, image_data)
image.close()
return texture_id
def load_textures(self, textures):
for texture in textures:
self.texture_ids.append(self.load_gl_texture(texture[0], texture[1]))
# A general OpenGL initialization function. Sets all of the initial parameters.
# We call this right after our OpenGL window is created.
def init_gl(self):
self.load_textures(self.textures)
glEnable(GL_TEXTURE_2D)
glClearColor(0.0, 0.0, 0.0, 0.0) # This Will Clear The Background Color To Black
glClearDepth(1.0) # Enables Clearing Of The Depth Buffer
glDepthFunc(GL_LEQUAL) # The Type Of Depth Test To Do
glEnable(GL_DEPTH_TEST) # Enables Depth Testing
glShadeModel(GL_SMOOTH) # Enables Smooth Color Shading
glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST) # Really Nice Perspective
# Setup lighting
glLightfv(GL_LIGHT1, GL_AMBIENT, self.light_ambient)
glLightfv(GL_LIGHT1, GL_DIFFUSE, self.light_diffuse)
glLightfv(GL_LIGHT1, GL_POSITION, self.light_position)
glEnable(GL_LIGHT1) # Lights won't show until GL_LIGHTING is enabled
glMatrixMode(GL_PROJECTION)
glLoadIdentity() # Reset The Projection Matrix
# Calculate The Aspect Ratio Of The Window
gluPerspective(45.0, float(self.width) / float(self.height), 0.1, 100.0)
glMatrixMode(GL_MODELVIEW)
# Blending
glColor4f(1.0,1.0,1.0,0.5)
glBlendFunc(GL_SRC_ALPHA,GL_ONE)
# The function called when our window is resized (which shouldn't happen if you enable fullscreen, below)
def resize_gl_scene(self, width, height):
self.width = width
self.height = height
if height == 0: # Prevent A Divide By Zero If The Window Is Too Small
height = 1
glViewport(0, 0, width, height) # Reset The Current Viewport And Perspective Transformation
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(45.0, float(self.width) / float(self.height), 0.1, 100.0)
glMatrixMode(GL_MODELVIEW)
# The main drawing function.
def draw_gl_scene(self):
# Clear The Screen And The Depth Buffer
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity() # Reset The View to the center
# since this is double buffered, swap the buffers to display what just got drawn.
glutSwapBuffers()
# The function called whenever a key is pressed
def key_pressed(self, *args):
# If escape is pressed, kill everything.
if args[0] in [self.ESCAPE_KEY, self.CTRLC_KEY]:
glutDestroyWindow(self.window)
sys.exit()
if args[0] == b'f':
glutFullScreenToggle()
if args[0] == b'l':
self.lights = not self.lights
if self.lights:
glEnable(GL_LIGHTING)
else:
glDisable(GL_LIGHTING)
if args[0] == b'w':
self.z_deep += 0.1
if args[0] == b's':
self.z_deep -= 0.1
if args[0] == b't':
self.texture_id +=1
self.texture_id = self.texture_id % len(self.texture_ids)
if args[0] == b'b':
self.blend = not self.blend
if self.blend:
glEnable(GL_BLEND)
glDisable(GL_DEPTH_TEST)
else:
glDisable(GL_BLEND)
glEnable(GL_DEPTH_TEST)
def main(self):
# For now we just pass glutInit one empty argument. I wasn't sure what should or could be passed in (tuple, list, ...)
# Once I find out the right stuff based on reading the PyOpenGL source, I'll address this.
glutInit(())
# Select type of Display mode:
# Double buffer
# RGBA color
# Alpha components supported
# Depth buffer
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_ALPHA | GLUT_DEPTH)
# get a 640 x 480 window
glutInitWindowSize(self.width, self.height)
# the window starts at the upper left corner of the screen
glutInitWindowPosition(0, 0)
# Okay, like the C version we retain the window id to use when closing, but for those of you new
# to Python (like myself), remember this assignment would make the variable local and not global
# if it weren't for the global declaration at the start of main.
window = glutCreateWindow("GL Code Tutorial based on NeHe '99")
# Register the drawing function with glut, BUT in Python land, at least using PyOpenGL, we need to
# set the function pointer and invoke a function to actually register the callback, otherwise it
# would be very much like the C version of the code.
glutDisplayFunc(self.draw_gl_scene)
# Uncomment this line to get full screen.
# glutFullScreen()
# When we are doing nothing, redraw the scene.
glutIdleFunc(self.draw_gl_scene)
# Register the function called when our window is resized.
glutReshapeFunc(self.resize_gl_scene)
# Register the function called when the keyboard is pressed.
glutKeyboardFunc(self.key_pressed)
# Initialize our window.
self.init_gl()
# Start Event Processing Engine
glutMainLoop()
|
[
"adelcastillo@thingso2.com"
] |
adelcastillo@thingso2.com
|
73a45cae89d010094ec8cfb94f87ce9f8b6ed4e7
|
2d11051177f7b2666d58c2e4bf1af1cba36a5b95
|
/python/cuboid.py
|
6254317e3e640b6f3e896ccba9d02e3f930d7f1e
|
[] |
no_license
|
cpausmit/JLabTools
|
cb7c097628125baf65035d8c5df93e2357344abe
|
926bd32dec43f6679b687e769cf8f32869b60d57
|
refs/heads/master
| 2021-07-24T01:50:19.820985
| 2020-06-10T19:49:28
| 2020-06-10T19:49:28
| 30,076,352
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,432
|
py
|
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mlp
from scipy.optimize import curve_fit
from optparse import OptionParser
# initial settings
mlp.rcParams['axes.linewidth'] = 2
# give tick labels a little more space from axis
mlp.rcParams['xtick.major.pad'] = 12
mlp.rcParams['ytick.major.pad'] = 12
#---------------------------------------------------------------------------------------------------
def gaussian(x, mean, amplitude, standard_deviation):
return amplitude * np.exp( - ((x - mean) / standard_deviation) ** 2)
#---------------------------------------------------------------------------------------------------
def readDataFromFile(name,i_column,min,max):
with open(name+".dat","r") as file:
data = file.read()
values = []
for line in data.split("\n"):
f = line.split(' ')
# do we have a valid column?
if len(f)>i_column:
value = float(f[i_column])
if len(f)>i_column and value>min and value<max:
values.append(float(f[i_column]))
return values
#---------------------------------------------------------------------------------------------------
# define and get all command line arguments
parser = OptionParser()
parser.add_option("-n", "--name", dest="name", default='temp', help="name of plot")
parser.add_option("-x", "--xtitle",dest="xtitle",default='Time [milli seconds]',help="x axis title")
parser.add_option("-y", "--ytitle",dest="ytitle",default='analog values', help="y axis title")
(options, args) = parser.parse_args()
# get my data
values = readDataFromFile(options.name,8,3000,4000)
# bins
xmin = int(0.8*min(values))
xmax = int(1.2*max(values))
nbins = 25
print " min=%d, max=%d, nb=%d"%(xmin,xmax,nbins)
# make the histogram plot
n, bins, patches = plt.hist(values,bins=nbins,facecolor="lightblue",ec="blue")
plt.ylim(ymax=n.max()*1.1)
# prepare the fit
bin_centers = bins[:-1] + np.diff(bins) / 2
popt, covar = curve_fit(gaussian, bin_centers, n,
p0 = [sum(values)/len(values), sum(values), sum(values)/len(values)])
print popt
print covar
x_interval_for_fit = np.linspace(bins[0], bins[-1], 10000)
plt.plot(x_interval_for_fit, gaussian(x_interval_for_fit, *popt), linewidth=4)
# titles on the x and y axes
plt.xlabel('Volume [mm$^3$]', fontsize=26)
plt.ylabel('Number of Measurements', fontsize=26)
# tick marker sizes
ax = plt.gca()
ax.xaxis.set_tick_params(labelsize=20)
ax.yaxis.set_tick_params(labelsize=20)
ymin,ymax = ax.get_ylim()
dy = ymax-ymin
xmin,xmax = ax.get_xlim()
dx = xmax-xmin
plt.text(xmin+0.02*dx,ymax-0.06*dy, r'Mean: %.0f$\pm$%.0f'%(popt[0],covar[0][0]**0.5), fontsize=20)
plt.text(xmin+0.02*dx,ymax-0.12*dy, r'Width: %.0f$\pm$%.0f'%(popt[1],covar[1][1]**0.5), fontsize=20)
plt.text(xmin+0.02*dx,ymax-0.18*dy, r'Integral: %.0f$\pm$%.0f'%(popt[2],covar[2][2]**0.5), fontsize=20)
#plt.text(xmin+0.02*dx,ymax-0.08*dy, r'Volume [TB]: %5.2f'%(totalSizeTb), fontsize=20)
#plt.text(xmin+0.02*dx,ymax-0.12*dy, r'Exitcode: %4d'%(ecode), fontsize=20)
#plt.text(xmin+0.02*dx,ymax-0.16*dy, r'Past: %s'%(options.window), fontsize=20)
#
#plt.text(xmin-0.06*dx,ymin-0.10*dy, r'EC: %d'%(ecode), fontsize=34,color=color)
# save plot for later viewing
plt.subplots_adjust(top=0.99, right=0.99, bottom=0.2, left=0.07)
plt.savefig(options.name+".png",bbox_inches='tight',dpi=400)
|
[
"paus@mit.edu"
] |
paus@mit.edu
|
7db725538e29a6206585a021132a0022eaf7d0c1
|
100954953c3c94ff357500a14c3c57ba52fc20b1
|
/eventFinderApp/admin.py
|
31c0c12e965e315e9fc549202c1d61be6bb46022
|
[] |
no_license
|
shae1811/event-finda
|
de1b152eb5453949200d7c2ee7ddadf09817a5b6
|
4d4622a4d21918854c0885957127195a02eb03ab
|
refs/heads/master
| 2022-12-15T09:43:19.493439
| 2019-10-05T05:30:37
| 2019-10-05T05:30:37
| 206,282,071
| 0
| 0
| null | 2022-12-08T06:40:38
| 2019-09-04T09:26:46
|
Python
|
UTF-8
|
Python
| false
| false
| 166
|
py
|
from django.contrib import admin
from .models import Event, Category, Account
admin.site.register(Event)
# admin.site.register(Category)
admin.site.register(Account)
|
[
"shae1811@gmail.com"
] |
shae1811@gmail.com
|
2ae16f046b84775e7191a18977aebee595321a24
|
b1262dcefffb4e73ee183fd6ca8329d1a7a46d93
|
/back/common/helpers/passwordHelper.py
|
055fde6bba2663c749add2ddf15a6236f4244c3a
|
[] |
no_license
|
lahirusamaraweera/otmp-python
|
b247a979469f6f5c6e0a508b4afeee55aed55086
|
2f25bd52575a6ad88a32f7a1e486b3768dd4c258
|
refs/heads/develop
| 2023-02-22T19:59:21.246375
| 2023-02-03T16:48:41
| 2023-02-03T16:48:41
| 238,250,169
| 7
| 1
| null | 2023-01-07T15:07:40
| 2020-02-04T16:21:05
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 873
|
py
|
import hashlib, binascii, os
def hash_password(password):
"""Hash a password for storing."""
salt = hashlib.sha256(os.urandom(60)).hexdigest().encode('ascii')
pwdhash = hashlib.pbkdf2_hmac('sha512', password.encode('utf-8'),
salt, 100000)
pwdhash = binascii.hexlify(pwdhash)
return (salt + pwdhash).decode('ascii')
def verify_password(stored_password, provided_password):
"""Verify a stored password against one provided by user"""
salt = stored_password[:64]
stored_password = stored_password[64:]
pwdhash = hashlib.pbkdf2_hmac('sha512',
provided_password.encode('utf-8'),
salt.encode('ascii'),
100000)
pwdhash = binascii.hexlify(pwdhash).decode('ascii')
return pwdhash == stored_password
|
[
"lahiru.studixeon@gmail.com"
] |
lahiru.studixeon@gmail.com
|
6ce92864a9897b5b66aca9f7a38a6c671609e383
|
a987aba8d1c52a41577a8ba9f242870b4a66140a
|
/assignment9/q1_g.py
|
3d189a3380c0be1af4027da5db1880258c2913fc
|
[] |
no_license
|
valbartlett/PHY3090_MaterialScience
|
0e3b27df087888b5a7e1c307b937a17586484c65
|
42ff382bf387d9aa9d1df0d5eaac5ff401467ae8
|
refs/heads/master
| 2016-09-10T20:18:10.393806
| 2015-02-24T17:14:02
| 2015-02-24T17:14:02
| 28,978,576
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 505
|
py
|
#!/usr/bin/env python
dt = 0.0001
maxRun = 1000000
k = 1
m = 3.0
t = 0.
v = 0.
x = 2.
i = 0
xprevious = x
timeThroughZero = []
while i < maxRun:
a = -(k/m)**.5*x
v = v + a*dt
x = x + v*dt
if ( xprevious > 0) and ( x < 0):
timeThroughZero.append(t)
xprevious = x
t = t + dt
i=i+1
#print t, x, v, a
i = 1
while (i < len(timeThroughZero)):
period = timeThroughZero[i+1] - timeThroughZero[i]
freq = 1/period
print 'Freq is: ', freq, ' Hz'
|
[
"val.bartlett@live.com"
] |
val.bartlett@live.com
|
6449302ae6905785e53e6f2f4c4f919e2da37df6
|
8b2af3cff75ba2a6f8557cdea0d852b9076ff6a3
|
/day005/2-week-work.py
|
772acc3130683d348f3bce8ecc511eb80bc54760
|
[] |
no_license
|
AlexYangLong/Foundations-of-Python
|
98e5eaf7e7348120049f1ff4bb3d31393ad05592
|
bcf3a1fe526140fd2b05283c104488698ebc99fd
|
refs/heads/master
| 2020-03-16T21:45:34.232670
| 2018-05-11T10:19:21
| 2018-05-11T10:19:21
| 133,013,526
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 287
|
py
|
'''
title: 打印出所有三位数中的水仙花数
time: 2018.03.31 08:50
author: 杨龙(Alex)
'''
start = 100
while start < 1000:
bw = start // 100
sw = start // 10 % 10
gw = start % 10
if bw ** 3 + sw ** 3 + gw ** 3 == start:
print(start)
start += 1
|
[
"alex@alex.com"
] |
alex@alex.com
|
38e9f9b4869c72739f6b918c741836dd0e083902
|
5f747b3d7935b566fbe12d96c0f9a283a03dc049
|
/new_search/search.py
|
af79f0d9cfdb8e0e9fd18b57bf4f402ee477becc
|
[] |
no_license
|
zhangqian12/interface_dx2
|
378c244c0714485ddb5f668b8ba25c398a68f42a
|
f59235eecafcc292b19acc93e43c6c0dcf28ce84
|
refs/heads/master
| 2021-05-14T19:06:40.412751
| 2018-01-03T06:41:28
| 2018-01-03T06:41:28
| 116,099,330
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,382
|
py
|
# -*- coding:utf-8 -*-
__author__ = 'zhangqian'
import unittest
import requests
import json
import random
class InterfaceTestCase(unittest.TestCase):
def setUp(self):
self.domain = 'http://search.diaox2.com' #测试地址
# self.domain = 'https://api.diaox2.com' #生产地址
self.json_headers = {'content-type':'application/json','Authorization':'diaodiao eyJhbGciOiJIUzI1NiIsImV4cCI6MTUxNDM2NjIyMywiaWF0IjoxNTE0MzYyNjIzfQ.eyJ1c2VybmFtZSI6InJkIn0.e1Io6QDbSKOOKBhTK3CbeuZ7tsJS2SrJa2JJ8gOt0Yg'}
# self.json_headers= {'content-type':'application/json','Authorization':self.test_login()["Authorization"]}
self.username = "rd"
self.password = "rd"
self.device = {"version":"3.9.15","did":"F7A1DB2A-5F5E-48D2-8655-658B22474530","screen":"375,812","client":"ios","idfa":"213A0FFB-7679-40E1-A0E1-A793A86E1345","device":"iPhone10,3","net":"WIFI","os":"iOS 11.2.1"}
def tearDown(self):
print("test end")
# /v4_search/login GET,POST 登陆
def test_login(self):
paras = {"username":self.username,"passwd":self.password}
path = '/v4_search/login'
self.post_json_paras(paras,path)
# /v4_search/hot_queries 热门搜索query列表,GET,POST
def test_hot_queries(self):
paras = {}
path = '/v4_search/hot_queries'
self.post_json_paras(paras, path)
#/v4_search/normal POST 普通搜索
def test_normal(self):
paras = {"s_type":"normal","query":"手机壳","uid":"115722","device_info":self.device,"origin":"mainFeed","page_num":1,"page_size":20}
path = '/v4_search/normal'
self.post_json_paras(paras,path)
#/v4_search/index GET,POST 初始化索引 使用py脚本跑
def test_index(self):
paras = {}
path = '/v4_search/index'
self.post_json_paras(paras,path)
#/v4_search/config/index GET,POST 配置项索引初始化 使用py脚本跑
def test_config_index(self):
paras = {}
path = '/v4_search/config/index'
self.post_json_paras(paras,path)
#/v4_search/config/insert POST 插入一个配置项
def test_config_insert(self):
paras = {"configs":{"sensitive_word":{"intro":"query敏感词屏蔽","config":["习近平","张倩"]}}}
path = '/v4_search/config/insert'
self.post_json_paras(paras,path)
#/v4_search/config/delete POST 删除某个配置项
def test_config_del(self):
paras = {"configs":["k_category_coeff"]}
path = '/v4_search/config/delete'
self.post_json_paras(paras,path)
#/v4_search/config/all GET,POST 查看当前所有的配置
def test_config_all(self):
paras = {}
path = '/v4_search/config/all'
self.post_json_paras(paras,path)
#/v4_search/config/getconfig POST 查看单个配置项
def test_getconfig(self):
paras={"config_name":"title_coeff"}
path = '/v4_search/config/getconfig'
self.post_json_paras(paras,path)
#/v4_search/updates_needed POST 通知更新文章、sku数据(大哥通知我)
#/v4_search/special/all GET,POST 查看当前所有特型文章数据
def test_special_all(self):
paras = {}
path = '/v4_search/special/all'
self.post_json_paras(paras,path)
#/v4_search/special/insert POST 插入一篇特型文章数据
def test_special_insert(self):
"""插入文章类型是article的文章"""
paras = {"special_metas":[{"associated_query":"洗面奶","head_image":"https://c.diaox2.com/cms/diaodiao/people/yangjie.png","special_id":"899999","thumb_image":"https://content.image.alimmdn.com/cms/sites/default/files/20150721/zk/sl.jpg","author":"gc","act_type":"article","interact":"https://c.diaox2.com/view/app/?m=show&id=2423","body":"哈哈哈,请你给我增加一个小接口,让我可以测试","title":"[日常]洗漱","status":1,"timestamp":"1514261497","up_time":"","down_time":""}]}
path = '/v4_search/special/insert'
self.post_json_paras(paras,path)
# /v4_search/special/insert POST 插入一篇特型文章数据
def test_special_insert(self):
"""插入文章类型是article的文章"""
paras = {"special_metas": [{"associated_query": "牙膏挑选器", "head_image": "https://c.diaox2.com/cms/diaodiao/people/yangjie.png","special_id": "900000","thumb_image": "https://content.image.alimmdn.com/cms/sites/default/files/20150721/zk/sl.jpg","author": "gc", "act_type": "link", "interact": "https://c.diaox2.com/view/app/?m=show&id=2423","body": "哈哈哈,请你给我增加一个小接口,让我可以测试", "title": "[日常]洗漱", "status": 1, "timestamp": "1514261497","up_time": "", "down_time": ""}]}
path = '/v4_search/special/insert'
self.post_json_paras(paras, path)
#/v4_search/special/delete POST 删除一篇特型文章数据
def test_special_del(self):
paras = {"ids":["900000"]}
path = '/v4_search/special/delete'
self.post_json_paras(paras,path)
# /v4_search/gift_showtext GET,POST 礼物筛选器筛选条件
def test_gift_showtext(self):
paras = {}
path = '/v4_search/gift_showtext'
self.post_json_paras(paras,path)
# /v4_search/gift_search POST 礼物搜索
def test_gift_search(self):
paras = {"query":"鼠标","filter":{"category":"科技数码","scene":"过年回家","relation":"父母","price":[200,5000]},"uid":"115722","device_info":self.device,"origin":"mainFeed","order_type":"normal","page_num":1,"page_size":20}
path = '/v4_search/gift_search'
self.post_json_paras(paras,path)
# /v4_search/gift_search_wechat POST 小程序礼物搜索
def test_gift_search_wechat(self):
paras = {"category":"科技数码","scene":"生日","relation":"爸爸","price":[500,1000]}
path = '/v4_search/gift_search_wechat'
self.post_json_paras(paras,path)
# /v4_search/clicked_log POST 日志(三种,搜索日志、点击日志、礼物筛选条件点击日志)
def test_clicked_log(self):
paras = {"origin":"mainFeed","search_type":"normal","device_info":self.device,"uid":"115722","timestamp":1510308704,"query":"手机壳","log_type":"clicked","clicked":{"id":900000,"type":"article"},"clicked_order":1}
path = '/v4_search/clicked_log'
self.post_json_paras(paras,path)
# /v4_search/clicked_log POST 日志(三种,搜索日志、点击日志、礼物筛选条件点击日志)
def test_clicked_log(self):
paras = {"origin": "mainFeed", "search_type": "gift", "device_info": self.device, "uid": "115722",
"timestamp": 1510308704, "query": "手机壳", "filter_clicked": "过年回家",}
path = '/v4_search/clicked_log'
self.post_json_paras(paras, path)
# /v4_search/debug/normal POST 查看普通搜索得分情况
def test_debug_normal(self):
paras = {"s_type":"normal","query":"口红","uid":"115722","device_info":self.device,"origin":"mainFeed","page_num":1,"page_size":"20","id":900000}
path = '/v4_search/debug/normal'
self.post_json_paras(paras,path)
# /v4_search/debug/gift POST 查看礼物搜索得分情况
def test_debug_gift(self):
paras = {"origin":"mainFeed","uid":115722,"query":"键盘鼠标","order_type":"normal","filter":{"category":"","price":[100,500],"relation":"父母","scene":""},"device_info":self.device,"page_num":2,"page_size":40}
path = '/v4_search/debug/gift'
self.post_json_paras(paras,path)
# /v4_search/debug/article?id=123 GET 查看文章索引数据
def test_debug_article(self):
paras = {"id":900000}
path = '/v4_search/debug/sku?id=123'
self.get_test_interface(path)
# /v4_search/debug/sku?id=123 GET 查看sku索引数据
def test_debug_sku(self):
path = '/v4_search/debug/sku?id=123'
self.get_test_interface(path)
# /v4_search/debug/es_status GET 搜索引擎状态、信息、配置
def test_debug_es_status(self):
path = '/v4_search/debug/es_status'
self.get_test_interface(path)
########################################################################################以下为公共
def post_json_paras(self,paras,path):
'''传参数,拿数据请求'''
json_data = json.dumps(paras)
r = requests.post(self.url(path), data=json_data, headers=self.json_headers).json()
print self.get_json(r)
ass = self.assertEqual(r['state'],"SUCCESS")
return self.get_json(r),ass
def post_no_json_paras(self,paras,path):
'''传不带json的数据'''
r = requests.post(self.url(path), json=paras).json()
print self.get_json(r)
ass = self.assertEqual(r['state'],"SUCCESS")
return self.get_json(r),ass
# get接口
def get_test_interface(self,path):
print("test get ")
result = requests.get(self.url(path)).json()
print self.get_json(result)
return result
# url地址
def url(self,path):
'''拼接url地址'''
return self.domain + path
# 转json格式
def get_json(self,json_type):
'''转json格式'''
json_data = json.dumps(json_type)
return json_data
if __name__ == '__main__':
unittest.main()
|
[
"451677236@qq.com"
] |
451677236@qq.com
|
408ffc3f89cd3fe9bc8896486cb00bdecadcaee0
|
9377fafd93ef351497336d0cc8b37f730265ba3e
|
/aes/key_expander.py
|
bbb2e775c1e17e87cc60eb8644cafee0c6ce0559
|
[] |
no_license
|
FennaHD/cs465
|
02bcb6b8523311bf348cbba80aaa97d00335b852
|
62b6e2f88fbed7c0a54e2c6f0d948cfdf745a0d2
|
refs/heads/main
| 2023-08-02T17:02:54.781855
| 2021-10-09T00:50:38
| 2021-10-09T00:50:38
| 403,775,362
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,700
|
py
|
from ff_math import FFMath as ffm
from r_con import Rcon as rc
from s_box import SBox
class KeyExpander:
"""
Class in charge of performing all operations involving key expansion.
Is also in charge of transforming input into the format we want to work with.
So far we are inputting a string of space separated bytes, and we convert the
substring bytes into hex integers.
"""
@staticmethod
def get_initial_words(cipher_key):
"""
Splits key into an array of words, while also reformatting them to be hex integers.
cipher_key is expected to be a single-space separated string of bytes. E.g.
"2b 7e 15 16 28 ae d2 a6 ab f7 15 88 09 cf 4f 3c". We can't work with this key, so
we must first convert it to a format we can use. We decided on a 2D array so that
with the input from above:
2b 28 ab 09 [[0x2b, 0x7e, 0x15, 0x16],
7e ae f7 cf --> [0x28, 0xae, 0xd2, 0xa6],
15 d2 15 4f [0xab, 0xf7, 0x15, 0x88],
16 a6 88 3c [0x09, 0xcf, 0x4f, 0x3c]]
"""
# turn "<nibble><nibble>" into int 0x<nibble><nibble>
byte_array = list(map(lambda w: int(w, 16), cipher_key.key.split(" ")))
i = 0
w = []
while i < cipher_key.nk:
i4 = 4 * i
w.append(byte_array[i4:i4 + 4])
i += 1
return w
@staticmethod
def get_all_words(cipher_key):
"""
Implements algorithm as shown in 5.2, figure 11.
@:param cipher_key is a string of space separated hex bytes.
E.g. "2b 7e 15 16 28 ae d2 a6 ab f7 15 88 09 cf 4f 3c".
"""
w = KeyExpander.get_initial_words(cipher_key)
i = cipher_key.nk
while i < (cipher_key.nb * (cipher_key.nr + 1)):
temp = w[i-1]
if i % cipher_key.nk == 0:
temp = ffm.add_words(KeyExpander.sub_word(KeyExpander.rot_word(temp)), rc.get(int(i / cipher_key.nk)))
elif cipher_key.nk > 6 and i % cipher_key.nk == 4:
temp = KeyExpander.sub_word(temp)
w.append(ffm.add_words(w[i-cipher_key.nk], temp))
i += 1
return w
@staticmethod
def get_schedule(cipher_key):
"""
Returns array of states. Each state is a 4x4 byte 2D array.
"""
num_bytes_in_word = 4
w = KeyExpander.get_all_words(cipher_key)
schedule = []
for i in range(int(len(w)/num_bytes_in_word)):
word_first_index = i * num_bytes_in_word
schedule.append(w[word_first_index : word_first_index + num_bytes_in_word])
return schedule
@staticmethod
def sub_word(word):
"""
Takes a four-byte input word and substitutes each byte in that word
with its appropriate value from the S-Box.
"""
return list(map(lambda w: SBox().transformation(w), word))
@staticmethod
def rot_word(word):
"""
Performs a cyclic permutation on its input word.
[A, B, C, D] becomes [B, C, D, A]
"""
return word[1:] + [word[0]]
|
[
"fmaluendap@gmail.com"
] |
fmaluendap@gmail.com
|
074860fc83c9d423f7bb879580455ccb728ed393
|
6ee76c78547d36f3b349da8e1503f690067729dc
|
/list_account_messages.py
|
91dba919945a7b5bb536eb0729156b05e3d1cba9
|
[] |
no_license
|
ATOIMIO/iota_iot_hub
|
3bf8ae2e71b292141c3c209359d9da0aa799a069
|
ab1a5b94412457dbfb344c882e775e6d68e36b23
|
refs/heads/master
| 2023-04-27T08:27:46.502585
| 2021-05-26T10:58:28
| 2021-05-26T10:58:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 851
|
py
|
# Copyright 2020 IOTA Stiftung
# SPDX-License-Identifier: Apache-2.0
import iota_wallet as iw
import os
from dotenv import load_dotenv
from app.wallet_interact import get_account
account = get_account()
# Load the env variables
#load_dotenv()
# Get the stronghold password
#STRONGHOLD_PASSWORD = os.getenv('STRONGHOLD_PASSWORD')
# This example sends IOTA toens to an address.
#account_manager = iw.AccountManager(
# storage_path='./alice-database'
#)
#account_manager.set_stronghold_password(STRONGHOLD_PASSWORD)
#account = account_manager.get_account('Alice')
print(f'Account: {account.alias()} selected')
# Always sync before doing anything with the account
print('Syncing...')
synced = account.sync().execute()
for ac in account.list_messages():
print(ac)
#print(f"message {ac['id']}; confirmation status = {ac['confirmed']}'")
|
[
"hgreg@online.no"
] |
hgreg@online.no
|
7cbe60fb521bbc9077104c6d20aa3297d1e98f3b
|
023e3b1e887edc237bdc0734d842f34a6d9d3a9f
|
/method/gui/main_panel.py
|
a327118b8678b1f06a0d1eb23c3b8ce9a837c521
|
[] |
no_license
|
mokusen/deresute-fans
|
64b0e4d53fc48f54f816439d48cd357a1b29d2f7
|
f383c4b0daf6d53568d9043f6ec20f47b398c827
|
refs/heads/master
| 2022-12-16T03:38:29.976073
| 2021-02-13T07:11:09
| 2021-02-13T07:11:09
| 188,259,142
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,893
|
py
|
import wx
import datetime
from method.sql import connect_mysql
from method.utils import handle_yaml
from pathlib import Path
class MainGui(wx.Frame):
def __init__(self, parent, id, title):
wx.Frame.__init__(self, parent, id, title, size=(330, 330))
panel = MainPanel(self)
self.Center()
self.Show()
class MainPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent=parent)
self.frame = parent
self.input_number = 5
self.defalut_size = (120, 28)
self.idol_size = (160, 28)
self.time_txt_size = (90, 28)
self.time_size = (190, 28)
self.defalut_font = wx.Font(12, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, 'Meiryo UI')
self.idol_list = connect_mysql.select_idol_base()
self.before_date_info_yaml_path = Path(__file__).parents[2].joinpath('before_date_info.yml')
self.__myinit()
def __myinit(self):
idol_panel = self.__idot_panel()
fans_panel = self.__fans_panel()
time_panel = self.__time_panel()
regi_panel = self.__regi_panel()
restore_panel = self.__restore_panel()
base_layout = wx.GridBagSizer(0, 0)
base_layout.Add(idol_panel, (0, 0), (1, 1), flag=wx.EXPAND | wx.LEFT, border=20)
base_layout.Add(fans_panel, (0, 1), (1, 1), flag=wx.EXPAND)
base_layout.Add(time_panel, (1, 0), (1, 2), flag=wx.EXPAND | wx.LEFT, border=20)
base_layout.Add(regi_panel, (2, 0), (1, 2), flag=wx.EXPAND | wx.LEFT, border=20)
base_layout.Add(restore_panel, (3, 0), (1, 2), flag=wx.EXPAND | wx.LEFT, border=20)
layout = wx.BoxSizer(wx.HORIZONTAL)
layout.Add(base_layout, flag=wx.EXPAND | wx.TOP | wx.BOTTOM, border=10)
self.SetSizer(layout)
def __idot_panel(self):
idol_name = wx.StaticText(self, wx.ID_ANY, 'アイドル名', size=self.defalut_size, style=wx.TE_CENTER)
self.idol_input_list = [wx.ComboBox(self, wx.ID_ANY, '', choices=self.idol_list, size=self.idol_size, style=wx.CB_DROPDOWN) for _ in range(self.input_number)]
# フォントサイズ設定
idol_name.SetFont(self.defalut_font)
for idol_input in self.idol_input_list:
idol_input.SetFont(self.defalut_font)
print(idol_input.GetBestSize())
# レイアウト調整
base_layout = wx.GridBagSizer(0, 0)
base_layout.Add(idol_name, (0, 0), (1, 1), flag=wx.EXPAND)
for idol_index in range(self.input_number):
base_layout.Add(self.idol_input_list[idol_index], (idol_index + 1, 0), (1, 1), flag=wx.EXPAND)
return base_layout
def __fans_panel(self):
fans = wx.StaticText(self, wx.ID_ANY, 'ファン人数', size=self.defalut_size, style=wx.TE_CENTER)
self.fans_input_list = [wx.TextCtrl(self, wx.ID_ANY, '') for _ in range(self.input_number)]
# フォントサイズ設定
fans.SetFont(self.defalut_font)
for fans_input in self.fans_input_list:
fans_input.SetFont(self.defalut_font)
# レイアウト調整
base_layout = wx.GridBagSizer(0, 0)
base_layout.Add(fans, (0, 0), (1, 1), flag=wx.EXPAND)
for fan_index in range(self.input_number):
base_layout.Add(self.fans_input_list[fan_index], (fan_index + 1, 0), (1, 1), flag=wx.EXPAND)
return base_layout
def __time_panel(self):
time_text = wx.StaticText(self, wx.ID_ANY, '登録時間', size=self.time_txt_size, style=wx.TE_CENTER)
self.time_input = wx.TextCtrl(self, wx.ID_ANY, '', size=self.time_size)
# フォントサイズ設定
time_text.SetFont(self.defalut_font)
self.time_input.SetFont(self.defalut_font)
# レイアウト調整
base_layout = wx.GridBagSizer(0, 0)
base_layout.Add(time_text, (0, 0), (1, 1), flag=wx.EXPAND)
base_layout.Add(self.time_input, (0, 1), (1, 1), flag=wx.EXPAND)
return base_layout
def __regi_panel(self):
register_button = wx.Button(self, wx.ID_ANY, '登 録')
# フォントサイズ設定
register_button.SetFont(self.defalut_font)
# 登録機能付与
register_button.Bind(wx.EVT_BUTTON, self.regi_date)
return register_button
def __restore_panel(self):
restore_button = wx.Button(self, wx.ID_ANY, '前回入力を復元する')
# フォントサイズ設定
restore_button.SetFont(self.defalut_font)
# 登録機能付与
restore_button.Bind(wx.EVT_BUTTON, self.restore_date)
return restore_button
def regi_date(self, event):
message, idol_list, fans_list = self.__check_input_date()
if message != '':
return wx.MessageBox(message, "入力エラー", wx.ICON_ERROR)
create_ts = self.time_input.GetValue()
dlg = wx.MessageDialog(None, "登録を開始して良いですか?", ' 登録内容確認', wx.YES_NO | wx.ICON_INFORMATION)
result = dlg.ShowModal()
if result == wx.ID_YES:
# if create_ts != '':
# for index in range(len(idol_list)):
# connect_mysql.insert_idol_fans(idol_list[index], fans_list[index], create_ts)
# else:
# for index in range(len(idol_list)):
# connect_mysql.insert_idol_fans(idol_list[index], fans_list[index], datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
wx.MessageBox("登録完了しました。", "登録完了", wx.ICON_INFORMATION)
# 登録情報を一時保存する
print(self.time_input.GetValue())
before_date_yaml = {'idol_list': {index: idol_id for index, idol_id in enumerate(idol_list)},
'fans_list': {index: fans for index, fans in enumerate(fans_list)},
'time': self.time_input.GetValue()}
handle_yaml.output_yaml(self.before_date_info_yaml_path, before_date_yaml)
dlg.Destroy()
def __check_input_date(self):
return_message = ''
idol_list = [idol_input.GetSelection()+1 for idol_input in self.idol_input_list if idol_input.GetSelection() != -1]
fans_list = [fans_input.GetValue() for fans_input in self.fans_input_list if fans_input.GetValue() != '']
# 入力されていない場合は、エラーとする
if len(fans_list) == 0 and len(idol_list) == 0:
return_message += "登録するアイドルとファン人数を入力してください。\n"
# 入力アイドルとファン人数項目数が一致するか
if len(fans_list) != len(idol_list):
return_message += "アイドルとファン人数は対になるように入力してください\n"
# 整数チェック
try:
fans_list = [int(fans_input) for fans_input in fans_list]
except:
return_message += "ファン人数は整数で入力してください。\n"
return return_message[:-1], idol_list, fans_list
def restore_date(self, event):
before_date = handle_yaml.get_yaml(self.before_date_info_yaml_path)
# 前回の情報を入力欄に復元する
for index in range(len(before_date["idol_list"])):
self.idol_input_list[index].SetSelection(before_date["idol_list"][index]-1)
self.fans_input_list[index].SetValue(str(before_date["fans_list"][index]))
self.time_input.SetValue(before_date["time"])
def callMainGui():
app = wx.App(False)
MainGui(None, wx.ID_ANY, title=u'アイドルファンカウンター')
app.MainLoop()
|
[
"kikawa.tech@gmail.com"
] |
kikawa.tech@gmail.com
|
9d3ac3a02335392317299ac89caa4c3082512770
|
7ac43f018756573e12eb49ec55ed854fb5f9d2b1
|
/TEST_1/Analysis/orderparameter.py
|
9beb11b21cb83557ae0e3ed135f5e760df09fd6c
|
[
"MIT"
] |
permissive
|
dmalagarriga/PLoS_2015_segregation
|
68d92965c1db5f58a2e6b913e630d788bffad65f
|
949afedf96945c11ee84b1a6c9842e5257fb5be8
|
refs/heads/main
| 2023-01-03T09:18:10.985981
| 2020-10-28T07:58:25
| 2020-10-28T07:58:25
| 307,944,255
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 884
|
py
|
#!/usr/bin/python
from scipy import *
from pylab import *
from numpy import *
from matplotlib.collections import LineCollection
import glob
import os
from os.path import join as pjoin
def comparacio(a,b):
(Sepa,numa) = a.split('_')
(pa,exta) = numa.split('.')
(Sepb,numb) = b.split('_')
(pb,extb) = numb.split('.')
value=float(pa)-float (pb)
return int(value/abs(value))
Kv = os.getenv('K_Conn')
nNodes = int(os.getenv('Nodes')) # Column 0 also counts
Trans=1
path = os.getenv('P_Dir')
infiles = sorted(glob.glob( '%s/Data_*.dat' %path), cmp=comparacio)
RsynN=[]
rN=[]
for infile in infiles:
#Load data sets
s = loadtxt(infile, unpack=True) # Voxels 1,2,3,...
for i in range(1,nNodes):
r=0
rN.append(var(s[i][Trans:]))
R=mean(rN)
#r+=(s[i][Trans:].std())**2
Rsyn= var(s[nNodes][Trans:])/R
#RsynN.append(Rsyn)
print Rsyn
|
[
"Dani@My-MacBook-Pro.lan"
] |
Dani@My-MacBook-Pro.lan
|
f33938d14928c678f1d821d2abc65fe23f9ed2b2
|
92f16bbef4597221059cc66c33087f54f72a7519
|
/auto_exp/items.py
|
6382133a5c63f3cba0dc8d511e17e192b2b4b045
|
[] |
no_license
|
zeratul0097/auto_exp
|
9d2537810d8062d577c00cc07f93acd7f95104d6
|
cb8be78c93080d6c0c95d068c0b1ea36f09a7f9c
|
refs/heads/master
| 2022-11-25T20:22:43.122996
| 2020-07-19T02:09:09
| 2020-07-19T02:09:09
| 276,880,599
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 442
|
py
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
from scrapy import Item, Field
from scrapy.loader.processors import Join, MapCompose
from auto_exp.utilities.yy_utilities import get_last_chapter
class BookInfo(Item):
last_chapter = Field(
input_processor=MapCompose(get_last_chapter),
output_processor=Join()
)
|
[
"daoducminh1997@gmail.com"
] |
daoducminh1997@gmail.com
|
a325d935d28e2e5df75b636247f66d4eb34ef0dd
|
f45fab2976ad78f259741e8a77c4b1101dee5792
|
/venv/bin/easy_install
|
4026dadf436804590ba625cba3c7f31d19a7a2d9
|
[] |
no_license
|
hoin14/python_game
|
bc13a733aa13e0bff5cc6997ef5017a24856db06
|
009692079a2311ed5e0484fc729d07ef252088f6
|
refs/heads/master
| 2023-01-06T21:58:28.298680
| 2020-11-09T02:54:09
| 2020-11-09T02:54:09
| 311,207,255
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 269
|
#!/Users/hoins/PycharmProjects/pythonGame/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"hoins@hoin-ui-MacBookPro.local"
] |
hoins@hoin-ui-MacBookPro.local
|
|
e8acac5e7e384d71ed93af5458c5f553acef9d13
|
1e1f7d3687b71e69efa958d5bbda2573178f2acd
|
/srm/doctype/qa_inspection_report/qa_inspection_report.py
|
58ad248c87e4412395558022a598565562eaeb1b
|
[] |
no_license
|
ravidey/erpnext
|
680a31e2a6b957fd3f3ddc5fd6b383d8ea50f515
|
bb4b9bfa1551226a1d58fcef0cfe8150c423f49d
|
refs/heads/master
| 2021-01-17T22:07:36.049581
| 2011-06-10T07:32:01
| 2011-06-10T07:32:01
| 1,869,316
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,204
|
py
|
# Please edit this list and import only required elements
import webnotes
from webnotes.utils import add_days, add_months, add_years, cint, cstr, date_diff, default_fields, flt, fmt_money, formatdate, generate_hash, getTraceback, get_defaults, get_first_day, get_last_day, getdate, has_common, month_name, now, nowdate, replace_newlines, sendmail, set_default, str_esc_quote, user_format, validate_email_add
from webnotes.model import db_exists
from webnotes.model.doc import Document, addchild, removechild, getchildren, make_autoname, SuperDocType
from webnotes.model.doclist import getlist, copy_doclist
from webnotes.model.code import get_obj, get_server_obj, run_server_obj, updatedb, check_syntax
from webnotes import session, form, is_testing, msgprint, errprint
set = webnotes.conn.set
sql = webnotes.conn.sql
get_value = webnotes.conn.get_value
in_transaction = webnotes.conn.in_transaction
convert_to_lists = webnotes.conn.convert_to_lists
# -----------------------------------------------------------------------------------------
class DocType:
def __init__(self, doc, doclist=[]):
self.doc = doc
self.doclist = doclist
# Autoname
# ---------
def autoname(self):
self.doc.name = make_autoname(self.doc.naming_series+'.#####')
def get_item_specification_details(self):
self.doc.clear_table(self.doclist, 'qa_specification_details')
specification = sql("select specification, value from `tabItem Specification Detail` where parent = '%s' order by idx" % (self.doc.item_code))
for d in specification:
child = addchild(self.doc, 'qa_specification_details', 'QA Specification Detail', 1, self.doclist)
child.specification = d[0]
child.value = d[1]
child.status = 'Accepted'
def on_submit(self):
if self.doc.purchase_receipt_no:
sql("update `tabPurchase Receipt Detail` set qa_no = '%s' where parent = '%s' and item_code = '%s'" % (self.doc.name, self.doc.purchase_receipt_no, self.doc.item_code))
def on_cancel(self):
if self.doc.purchase_receipt_no:
sql("update `tabPurchase Receipt Detail` set qa_no = '' where parent = '%s' and item_code = '%s'" % (self.doc.purchase_receipt_no, self.doc.item_code))
|
[
"pdvyas@erpnext.com"
] |
pdvyas@erpnext.com
|
46f0cc2808d25eef61326b4ee6646188640eff4b
|
3a5ea75a5039207104fd478fb69ac4664c3c3a46
|
/vega/common/__init__.py
|
cba02abb9bbbc58fa821ba5f13084f865b0d986d
|
[
"MIT"
] |
permissive
|
fmsnew/vega
|
e3df25efa6af46073c441f41da4f2fdc4929fec5
|
8e0af84a57eca5745fe2db3d13075393838036bb
|
refs/heads/master
| 2023-06-10T04:47:11.661814
| 2021-06-26T07:45:30
| 2021-06-26T07:45:30
| 285,174,199
| 0
| 0
|
MIT
| 2020-08-11T14:19:09
| 2020-08-05T03:59:49
|
Python
|
UTF-8
|
Python
| false
| false
| 622
|
py
|
from .utils import init_log, module_existed, update_dict, copy_search_file
from .utils import update_dict_with_flatten_keys, switch_directory
from .config import Config
from .file_ops import FileOps
from .task_ops import TaskOps
from .user_config import UserConfig
from .config_serializable import ConfigSerializable
from .class_factory import ClassType, ClassFactory, SearchSpaceType
from .json_coder import JsonEncoder
from .consts import Status, DatatimeFormatString
from .general import General
from .message_server import MessageServer
from .message_client import MessageClient
from .arg_parser import argment_parser
|
[
"zhangjiajin@huawei.com"
] |
zhangjiajin@huawei.com
|
d258b4757fa8e7ec847e8658053ba8ec722fe9f3
|
57a775c5a896d0f9e821552f69f4ecc12e53b823
|
/ems_server/ems_server/settings.py
|
86dbaf6b011ba3a32335663edbe126c2efc15944
|
[] |
no_license
|
hrynbnbyo/day6
|
ef0f630b14267121cf1bdd77f37b501b83e06355
|
63cda889e6375898b39a034a5addb243f0acdcfa
|
refs/heads/master
| 2023-03-30T05:49:48.543874
| 2021-04-06T18:23:44
| 2021-04-06T18:23:44
| 355,290,367
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,597
|
py
|
"""
Django settings for ems_server project.
Generated by 'django-admin startproject' using Django 2.0.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*sftwqk32!$@ufvf%z*=eo^9ms05*5rsrc1*_5@i1f+oi$1a-4'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'corsheaders',
'user.apps.UserConfig',
'emp',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ems_server.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ems_server.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'hry',
'USER':'root',
'PASSWORD':'root',
'HOST':'localhost',
'PROT':3306,
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
# 修改中文
LANGUAGE_CODE = 'en-us'
# 修改时区
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, "media/")
MEDIA_URL = "/media/"
# 允许跨域请求访问
CORS_ORIGIN_ALLOW_ALL = True
REST_FRAMEWORK = {
# 全局异常处理
"EXCEPTION_HANDLER": "utils.exceptions.exception_handler"
}
|
[
"3202448109@qq.com"
] |
3202448109@qq.com
|
34040193b78929d5295fbdf8d8da64db038005e7
|
385408aa4b4b63212ed7fc53e8549cb61466d0ef
|
/backend/manage.py
|
9dc57b80d448ecacdd9d878b0359ed725232809a
|
[] |
no_license
|
crowdbotics-apps/lithium-18155
|
a0a671a9c56ee0371ba11a225567e717bb545b6b
|
d44c419789c633b6d6f08b51f2b4288bbf4fa3ac
|
refs/heads/master
| 2022-10-18T06:29:10.028600
| 2020-06-16T23:53:19
| 2020-06-16T23:53:19
| 272,836,074
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 633
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'lithium_18155.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
35f3e209cb9a1c50f11c2104cc1650ae5831e986
|
e617a2a6ba47348a496f38fb8781cced92bbe74d
|
/test.py
|
ac4a7ed3bce3c02dcdaa1f256e9403c97a4f0c91
|
[
"Apache-2.0"
] |
permissive
|
YuehChuan/Keras_2_CMSIS
|
f87864db66d7e2595e5a7727aaf49fec0a906aa9
|
f4ca1a126e61e923488a56d6832b178871b27711
|
refs/heads/master
| 2022-01-11T17:16:29.505725
| 2019-05-12T20:20:59
| 2019-05-12T20:20:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,116
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 10 15:29:18 2019
@author: quinn
"""
import keras.backend as K
from keras.models import Sequential
from keras.layers import Conv1D, MaxPool1D, AvgPool1D, UpSampling1D
#from numpy.random import normal
from main import convert_model
def gen_test_model_1d(input_shape=(None,5)):
m = Sequential()
m.add(Conv1D(4, 8, padding='same', input_shape=input_shape, activation='relu'))
m.add(MaxPool1D(2, padding='same'))
m.add(Conv1D(4, 8, padding='same', activation='tanh'))
m.add(UpSampling1D(2))
m.add(Conv1D(4, 8, padding='same', activation='sigmoid'))
m.compile('SGD','mse')
return m
if __name__=='__main__':
import tempfile
shape = (100,4)
batch_shape = (100,)+shape
m = gen_test_model_1d(shape)
# a, b = normal(0, 1, batch_shape), normal(0, 1, batch_shape)
# path = tempfile.gettempdir()
path = './tmp/'
m.save(path+'/__test_1d.h5')
convert_model(path+'/__test_1d.h5',
name='__test_1d_model',
path=path,
verbose=False)
|
[
"quinna@wearstrive.com"
] |
quinna@wearstrive.com
|
a743110325998544601a239a68b21f038d064b34
|
3d2b991bf016ee8bb67944e2e78c27ffdfadf4d1
|
/tests/challenges/test_get_edge.py
|
075a5f39dddc8d34f81e4c630ffaad63e550d5e1
|
[] |
no_license
|
Hadeaji/data-structures-and-algorithms
|
748b5fdb31621b6a5e1c09cd71e60ba1b956fb98
|
cb9a2d756bf0a495fef5f927d7b84b52d7b9545c
|
refs/heads/master
| 2023-02-20T17:42:18.444686
| 2021-01-26T23:14:55
| 2021-01-26T23:14:55
| 316,959,706
| 0
| 1
| null | 2021-01-26T23:14:56
| 2020-11-29T13:41:33
|
Python
|
UTF-8
|
Python
| false
| false
| 1,815
|
py
|
from data_structures_and_algorithms.challenges.get_edge.get_edge import *
import pytest
def test_get_edge_1(graph_test):
actual = get_edge(graph_test,['Metroville', 'Pandora',])
expect = 'True, $82'
assert expect == actual
def test_get_edge_2(graph_test):
actual = get_edge(graph_test,['Arendelle', 'New Monstropolis', 'Naboo'])
expect = 'True, $115'
assert expect == actual
def test_get_edge_3(graph_test):
actual = get_edge(graph_test,['Naboo','Pandora'])
expect = False
assert expect == actual
def test_get_edge_4(graph_test):
actual = get_edge(graph_test,['Narnia', 'Arendelle', 'Naboo'])
expect = False
assert expect == actual
@pytest.fixture
def graph_test():
test1 = Graph()
test1.add_node('Metroville')
test1.add_node('Pandora')
test1.add_node('Arendelle')
test1.add_node('New Monstropolis')
test1.add_node('Naboo')
test1.add_node('Narnia')
test1.add_edge('Pandora','Arendelle',150)
test1.add_edge('Pandora','Metroville',82)
test1.add_edge('Metroville','Pandora',82)
test1.add_edge('Metroville','Arendelle',99)
test1.add_edge('Metroville','New Monstropolis',105)
test1.add_edge('Metroville','Naboo',26)
test1.add_edge('Metroville','Narnia',37)
test1.add_edge('Arendelle','New Monstropolis',42)
test1.add_edge('Arendelle','Metroville',99)
test1.add_edge('Arendelle','Pandora',150)
test1.add_edge('New Monstropolis','Arendelle',42)
test1.add_edge('New Monstropolis','Metroville',105)
test1.add_edge('New Monstropolis','Naboo',73)
test1.add_edge('Naboo','New Monstropolis',73)
test1.add_edge('Naboo','Metroville',26)
test1.add_edge('Naboo','Narnia',250)
test1.add_edge('Narnia','Metroville',37)
test1.add_edge('Narnia','Naboo',250)
return test1
|
[
"hadeaji@gmail.com"
] |
hadeaji@gmail.com
|
f0286c737255c570c63958d337554514988f27f5
|
c99d76d2cf0b4c0e6760959c9e986bd6d7e62e95
|
/09_TOW/PV_Lib Toolbox/Python/pvlib-python-master/docs/sphinx/source/conf.py
|
24d41deb6ad723f6288044a70fedcf9b1fb37703
|
[
"BSD-3-Clause"
] |
permissive
|
maxin8852/CAPER
|
812cb1fbcf3daca0a2d30ebe351330503a8db177
|
dab894567ba1cfb1062881aa0ad2d3518ffff0cc
|
refs/heads/master
| 2023-02-27T14:07:06.141965
| 2017-04-24T14:04:38
| 2017-04-24T14:04:38
| 89,206,279
| 1
| 0
| null | 2017-04-24T06:41:28
| 2017-04-24T06:41:28
| null |
UTF-8
|
Python
| false
| false
| 9,505
|
py
|
# -*- coding: utf-8 -*-
#
# PVLIB_Python documentation build configuration file, created by
# sphinx-quickstart on Fri Nov 7 15:56:33 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# Mock modules so RTD works
try:
from mock import Mock as MagicMock
except ImportError:
from unittest.mock import MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return Mock()
MOCK_MODULES = []
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../sphinxext'))
sys.path.insert(0, os.path.abspath('../../../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.extlinks',
'numpydoc',
'sphinx.ext.autosummary',
'IPython.sphinxext.ipython_directive',
'IPython.sphinxext.ipython_console_highlighting',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pvlib-python'
copyright = u'2015, Sandia National Labs, Rob Andrews, University of Arizona, github contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# Get the version from the version file
version_file = os.path.join(os.path.dirname(__file__),
'../../../pvlib/version.py')
with open(version_file, 'r') as f:
exec(f.read())
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'PVLIB_Pythondoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'PVLIB_Python.tex', u'PVLIB\\_Python Documentation',
u'Sandia National Labs, Rob Andrews, University of Arizona, github contributors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# extlinks alias
extlinks = {'issue': ('https://github.com/pvlib/pvlib-python/issues/%s',
'GH'),
'wiki': ('https://github.com/pvlib/pvlib-python/wiki/%s',
'wiki ')}
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pvlib_python', u'PVLIB_Python Documentation',
[u'Sandia National Labs, Rob Andrews, University of Arizona, github contributors'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'PVLIB_Python', u'PVLIB_Python Documentation',
u'Sandia National Labs, Rob Andrews, University of Arizona, github contributors', 'PVLIB_Python', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
[
"atow@g.clemson.edu"
] |
atow@g.clemson.edu
|
765d7b8c972737e542068be83e7155ef12408ca6
|
b26bf2fc0a1cd2dbefd550d6a5518107bd3b9124
|
/examples/easy_wav_player.py
|
92b7a13de9b55cd27006bae54a77ce71bbb104f1
|
[] |
no_license
|
TGiles1998/micropython-i2s-examples
|
06234a48e3df69fe45c2a8f7e365f557035957ac
|
237efced6df21653843a1291e5d2c32558e863f2
|
refs/heads/master
| 2023-08-24T09:36:31.452437
| 2021-09-30T17:33:15
| 2021-09-30T17:33:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,040
|
py
|
# The MIT License (MIT)
# Copyright (c) 2021 Mike Teachman
# https://opensource.org/licenses/MIT
#
# Purpose: Play a WAV audio file out of a speaker or headphones
#
import os
import time
from machine import Pin
from wavplayer import WavPlayer
if os.uname().machine.find("PYBv1") == 0:
# ======= I2S CONFIGURATION =======
SCK_PIN = 'Y6'
WS_PIN = 'Y5'
SD_PIN = 'Y8'
I2S_ID = 2
BUFFER_LENGTH_IN_BYTES = 40000
# ======= I2S CONFIGURATION =======
elif os.uname().machine.find("PYBD") == 0:
import pyb
pyb.Pin("EN_3V3").on() # provide 3.3V on 3V3 output pin
# ======= SD CARD CONFIGURATION =======
os.mount(pyb.SDCard(), "/sd")
# ======= SD CARD CONFIGURATION =======
# ======= I2S CONFIGURATION =======
SCK_PIN = 'Y6'
WS_PIN = 'Y5'
SD_PIN = 'Y8'
I2S_ID = 2
BUFFER_LENGTH_IN_BYTES = 40000
# ======= I2S CONFIGURATION =======
elif os.uname().machine.find("ESP32") == 0:
from machine import SDCard
# ======= SD CARD CONFIGURATION =======
sd = SDCard(slot=2) # sck=18, mosi=23, miso=19, cs=5
os.mount(sd, "/sd")
# ======= SD CARD CONFIGURATION =======
# ======= I2S CONFIGURATION =======
SCK_PIN = 32
WS_PIN = 25
SD_PIN = 33
I2S_ID = 0
BUFFER_LENGTH_IN_BYTES = 40000
# ======= I2S CONFIGURATION =======
else:
raise NotImplementedError("I2S protocol not supported on this board ")
wp = WavPlayer(id=I2S_ID,
sck_pin=Pin(SCK_PIN),
ws_pin=Pin(WS_PIN),
sd_pin=Pin(SD_PIN),
ibuf=BUFFER_LENGTH_IN_BYTES)
wp.play("music-16k-16bits-stereo.wav", loop=False)
# wait until the entire WAV file has been played
while wp.isplaying() == True:
# other actions can be done inside this loop during playback
pass
wp.play("music-16k-16bits-mono.wav", loop=False)
time.sleep(10) # play for 10 seconds
wp.pause()
time.sleep(5) # pause playback for 5 seconds
wp.resume() # continue playing to the end of the WAV file
|
[
"mike.teachman@gmail.com"
] |
mike.teachman@gmail.com
|
e2a0f2fce01349f1c3eaec839b214cbafdf62cbd
|
5723347dc2bcd20c7993ab99aa9758a0365b1707
|
/line-notify.py
|
0100b89a993390ba73d4d62c5b6635386ffaca0c
|
[] |
no_license
|
NPEMasa/Line-notify
|
a089115b15d26af0631010621a8272b678416605
|
1412f91e8671265980cf0680bc012ede01e151ce
|
refs/heads/master
| 2021-01-01T20:47:31.273116
| 2017-11-19T06:54:34
| 2017-11-19T06:54:34
| 98,935,649
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 298
|
py
|
import requests
line_token = "<Your API Key>"
url = "https://notify-api.line.me/api/notify"
msg = "こんにちは"
payload = {'message': msg}
headers = {'Authorization': 'Bearer ' + line_token}
line_notify = requests.post(url, data=payload, headers=headers)
res = line_notify.text
print (res)
|
[
"masanobu.miyagi@lac.co.jp"
] |
masanobu.miyagi@lac.co.jp
|
4a03c0d099001823b6ed2dabd024c042aacdbbae
|
d2b4f08e8feaab12251f5840da7df08fd87b5b8f
|
/RunCarAll.py
|
9ccbec526d789a407e8c40ae62d9484816e60e99
|
[] |
no_license
|
zzhang1987/HyperGraphMatchingBP
|
821fd78f869327969cd8d771ee53909f284ec275
|
2ce1a3651ad82a20681a63d4c190709e25c97ace
|
refs/heads/master
| 2021-01-20T01:43:41.475232
| 2018-09-26T07:15:52
| 2018-09-26T07:15:52
| 89,321,207
| 5
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,800
|
py
|
import matlab.engine
eng = matlab.engine.start_matlab()
#import matlab.engine
import numpy as np;
import FactorBP as FB
import drawMatches as dm
import RunAlgorithm as RA
from Utils import LoadCar
import cPickle as pickle
def ComputeAccuracyPas(decode, gTruth, NofInliers ):
Ccnt = 0
for i in range(len(gTruth)):
if((decode[i] == gTruth[i]) and (gTruth[i] < NofInliers)):
Ccnt += 1
return 1.0 * Ccnt / NofInliers
# eng = matlab.engine.start_matlab()
CarData = LoadCar()
AlgorithmNames=['Ours', 'OursBca', 'BCA', 'BCA-MP', 'BCA-IPFP', 'HGM', 'RRWHM', 'TM', 'OursPW', 'FGM']
SecondOrderMethods = ('Ours', 'FGM')
ThirdOrderMethods = ('Ours', 'OursBca', 'BCA', 'BCA-MP', 'BCA-IPFP', 'HGM', 'RRWHM', 'TM')
MaxNofOus = 20
NofInstances = 30
reload(RA)
AllAcc = dict()
AllRtime = dict()
AllObj = dict()
for NofOus in range(0,MaxNofOus+1):
Accuracy = dict()
Rtime = dict()
Obj = dict()
AllAcc[NofOus] = dict()
AllRtime[NofOus] = dict()
AllObj[NofOus] = dict()
for idx in range(1, NofInstances + 1):
car1 = CarData[idx]
LocalFeature1 = car1['features1']
LocalFeature2 = car1['features2']
PT1 = LocalFeature1[:, 0:2]
PT2 = LocalFeature2[:, 0:2]
orientation1 = LocalFeature1[:, 8]
orientation2 = LocalFeature2[:, 8]
GT = car1['gTruth'][0]
NofInliers = len(GT)
CMaxNofOus = np.min([LocalFeature1.shape[0], LocalFeature2.shape[0]]) - NofInliers
CNofOus = NofOus
if(CNofOus > CMaxNofOus):
CNofOus = CMaxNofOus
NofNodes = CNofOus + NofInliers
gTruth = np.random.permutation(NofNodes)
PT1 = PT1[gTruth, :]
orientation1 = orientation1[gTruth]
MG1 = FB.MatchingGraph(PT1[0:NofNodes], orientation1[0:NofNodes])
MG2 = FB.MatchingGraph(PT2[0:NofNodes], orientation2[0:NofNodes])
for Type in ('pas', 'pasDisOnly'):
for WithEdge in (True,False):
if(WithEdge == True):
for methods in SecondOrderMethods:
FullMethods = (methods + 'WithEdge' + str(WithEdge) +
'WithTriplet' + str(False) + Type)
print('Run %s' % FullMethods)
if(idx == 1):
Accuracy[FullMethods] = dict()
Rtime[FullMethods] = dict()
Obj[FullMethods] = dict()
decode,rtime,obj = RA.RunAlgorithm(MG1, MG2, WithEdge,
False, Type, methods,
eng)
Accuracy[FullMethods][idx] = ComputeAccuracyPas(decode, gTruth, NofInliers)
Rtime[FullMethods][idx] = rtime
Obj[FullMethods][idx] = obj
Fname = ('Res/Car%d_Nous%d_' + FullMethods + '.pdf') % (idx, NofOus)
dm.drawMatchesWithOutlier(car1['I1'],car1['I2'],PT1[0:NofNodes],PT2[0:NofNodes],decode, gTruth, NofInliers, Fname)
for methods in ThirdOrderMethods:
FullMethods = (methods + 'WithEdge' + str(WithEdge) +
'WithTriplet' + str(True) + Type)
print('Run %s' % FullMethods)
if(idx == 1):
Accuracy[FullMethods] = dict()
Rtime[FullMethods] = dict()
Obj[FullMethods] = dict()
decode, rtime, obj = RA.RunAlgorithm(MG1, MG2, WithEdge,
True, Type, methods,
eng)
Accuracy[FullMethods][idx] = ComputeAccuracyPas(decode,
gTruth,
NofInliers)
Rtime[FullMethods][idx] = rtime
Obj[FullMethods][idx] = obj
Fname = ('Res/Car%d_Nous%d_' + FullMethods
+ '.pdf') % (idx, NofOus)
dm.drawMatchesWithOutlier(car1['I1'], car1['I2'],
PT1[0:NofNodes], PT2[0:NofNodes],
decode, gTruth,
NofInliers, Fname)
AllAcc[NofOus] = Accuracy
AllRtime[NofOus] = Rtime
AllObj[NofOus] = Obj
f = open('CarRes.pkl', "w")
pickle.dump(AllAcc, f)
pickle.dump(AllRtime, f)
pickle.dump(AllObj, f)
f.close()
|
[
"zhen@zzhang.org"
] |
zhen@zzhang.org
|
fcfe654d69b045cb2e2d84dfc858c98db2b70af4
|
3daf74bdadb46f4aa18918f1b6938c714b331723
|
/poco/drivers/unity3d/test/test.py
|
18262e6b341cf52a9d321d00496b0e905a6c9128
|
[
"Apache-2.0"
] |
permissive
|
AirtestProject/Poco
|
d173b465edefbae72f02bb11d60edfa5af8d4ec4
|
65c2c5be0c0c1de680eedf34ac18ae065c5408ee
|
refs/heads/master
| 2023-08-15T23:00:11.805669
| 2023-03-29T08:58:41
| 2023-03-29T08:58:41
| 118,706,014
| 1,703
| 312
|
Apache-2.0
| 2023-08-08T10:30:21
| 2018-01-24T03:24:01
|
Python
|
UTF-8
|
Python
| false
| false
| 608
|
py
|
# coding=utf-8
import base64
import json
import traceback
import time
import unittest
from poco.drivers.std.test.simple import TestStandardFunction
from poco.drivers.unity3d.unity3d_poco import UnityPoco
from airtest.core.api import connect_device
class TestU3dDriverAndroid(TestStandardFunction):
@classmethod
def setUpClass(cls):
connect_device('Android:///')
cls.poco = UnityPoco()
class TestU3dDriverUnityEditor(TestStandardFunction):
@classmethod
def setUpClass(cls):
cls.poco = UnityPoco(unity_editor=True)
if __name__ == '__main__':
unittest.main()
|
[
"lxn3032@corp.netease.com"
] |
lxn3032@corp.netease.com
|
52cd062f35c2f6ce934437db2c865a5be97238d7
|
2620b8104daf0cf6b306496d22f443f662b9d503
|
/rsmtool/test_utils.py
|
d97ed64b7e2ac9958462ec59632eefdabeb70ef9
|
[
"Apache-2.0"
] |
permissive
|
jkahn/rsmtool
|
b4477cbd176350f9bc31e9848d1b99aca9ee3192
|
53d90ab8442776ff212eaeb78d8f7a677f2941fd
|
refs/heads/master
| 2021-01-15T17:07:56.026008
| 2016-06-28T16:57:02
| 2016-06-28T16:57:02
| 62,756,214
| 0
| 0
| null | 2016-07-06T22:02:22
| 2016-07-06T22:02:22
| null |
UTF-8
|
Python
| false
| false
| 12,097
|
py
|
import re
import numpy as np
import pandas as pd
from glob import glob
from os import remove
from os.path import basename, join
from nose.tools import assert_equal, ok_
from pandas.util.testing import assert_frame_equal
from rsmtool import run_experiment
from rsmtool.model import create_fake_skll_learner
from rsmtool.predict import predict_with_model
from rsmtool.rsmcompare import run_comparison
from rsmtool.rsmeval import run_evaluation
from rsmtool.rsmpredict import compute_and_save_predictions
html_error_regexp = re.compile(r'Traceback \(most recent call last\)')
section_regexp = re.compile(r'<h2>(.*?)</h2>')
def do_run_experiment(source, experiment_id, config_file):
"""
Run RSMTool experiment using the given experiment
configuration file located in the given source directory
and using the given experiment ID.
Parameters
----------
source : str
Path to where the test is located on disk.
experiment_id : str
Experiment ID to use when running.
config_file : str
Path to the experiment configuration file.
"""
source_output_dir = 'test_outputs'
experiment_dir = join(source_output_dir, source)
# remove all previously created files
for output_subdir in ['output', 'figure', 'report']:
files = glob(join(source_output_dir, source, output_subdir, '*'))
for f in files:
remove(f)
run_experiment(config_file, experiment_dir)
def do_run_evaluation(source, experiment_id, config_file):
"""
Run RSMEval experiment using the given experiment
configuration file located in the given source directory
and using the given experiment ID.
Parameters
----------
source : str
Path to where the test is located on disk.
experiment_id : str
Experiment ID to use when running.
config_file : str
Path to the experiment configuration file.
"""
source_output_dir = 'test_outputs'
experiment_dir = join(source_output_dir, source)
# remove all previously created files
for output_subdir in ['output', 'figure', 'report']:
files = glob(join(source_output_dir, source, output_subdir, '*'))
for f in files:
remove(f)
run_evaluation(config_file, experiment_dir)
def do_run_prediction(source, config_file):
"""
Run RSMPredict experiment using the given experiment
configuration file located in the given source directory.
Parameters
----------
source : str
Path to where the test is located on disk.
config_file : str
Path to the experiment configuration file.
"""
source_output_dir = 'test_outputs'
output_file = join(source_output_dir, source, 'output', 'predictions.csv')
feats_file = join(source_output_dir, source, 'output', 'preprocessed_features.csv')
# remove all previously created files
files = glob(join(source_output_dir, 'output', '*'))
for f in files:
remove(f)
compute_and_save_predictions(config_file, output_file, feats_file)
def do_run_comparison(source, config_file):
"""
Run RSMCompare experiment using the given experiment
configuration file located in the given source directory.
Parameters
----------
source : str
Path to where the test is located on disk.
config_file : str
Path to the experiment configuration file.
"""
source_output_dir = 'test_outputs'
experiment_dir = join(source_output_dir, source)
run_comparison(config_file, experiment_dir)
def check_csv_output(csv1, csv2):
"""
Check if two experiment CSV files have values that are
the same to within three decimal places. Raises an
AssertionError if they are not.
Parameters
----------
csv1 : str
Path to the first CSV file.
csv2 : str
Path to the second CSV files.
"""
df1 = pd.read_csv(csv1, index_col=0)
df2 = pd.read_csv(csv2, index_col=0)
# sort all the indices alphabetically
df1.sort_index(inplace=True)
df2.sort_index(inplace=True)
# convert any integer columns to floats in either data frame
for df in [df1, df2]:
for c in df.columns:
if df[c].dtype == np.int64:
df[c] = df[c].astype(np.float64)
# do the same for indices
for df in [df1, df2]:
if df.index.dtype == np.int64:
df.index = df.index.astype(np.float64)
# for pca and factor correlations convert all values to absolutes
# because the sign may not always be the same
if csv1.endswith('pca.csv') or csv1.endswith('factor_correlations.csv'):
for df in [df1, df2]:
msk = df.dtypes == np.float64
df.loc[:,msk] = df.loc[:,msk].abs()
assert_frame_equal(df1.sort_index(axis=1),
df2.sort_index(axis=1),
check_exact=False,
check_less_precise=True)
def check_report(html_file):
"""
Checks if the HTML report contains any errors.
Raises an AssertionError if it does.
Parameters
----------
html_file : str
Path the HTML report file on disk.
"""
report_errors = 0
with open(html_file, 'r') as htmlf:
for line in htmlf:
m = html_error_regexp.search(line)
if m:
report_errors += 1
assert_equal(report_errors, 0)
def check_scaled_coefficients(source, experiment_id):
"""
Check that the predictions generated using scaled
coefficients match the scaled scores. Raises an
AssertionError if they do not.
Parameters
----------
source : str
Path to the source directory on disk.
experiment_id : str
The experiment ID.
"""
preprocessed_test_file = join('test_outputs',
source,
'output',
'{}_test_preprocessed_features.csv'.format(experiment_id))
scaled_coefficients_file = join('test_outputs',
source,
'output',
'{}_coefficients_scaled.csv'.format(experiment_id))
predictions_file = join('test_outputs',
source,
'output',
'{}_pred_processed.csv'.format(experiment_id))
df_preprocessed_test_data = pd.read_csv(preprocessed_test_file)
df_old_predictions = pd.read_csv(predictions_file)
df_old_predictions = df_old_predictions[['spkitemid', 'sc1', 'scale']]
# create fake skll objects with new coefficients
df_coef = pd.read_csv(scaled_coefficients_file)
new_model = create_fake_skll_learner(df_coef)
# generate new predictions and rename the prediction column to 'scale'
df_new_predictions = predict_with_model(new_model, df_preprocessed_test_data)
df_new_predictions.rename(columns={'raw': 'scale'}, inplace=True)
# check that new predictions match the scaled old predictions
assert_frame_equal(df_new_predictions.sort_index(axis=1),
df_old_predictions.sort_index(axis=1),
check_exact=False,
check_less_precise=True)
def check_all_csv_exist(csv_files, experiment_id, model_source):
"""
Check that all crucial output files have been generated.
Raises an AssertionError if they have not.
Parameters
----------
csv_files : list of str
List of CSV files generated by a test.
experiment_id : str
The experiment ID.
model_source : str
'rsmtool' or 'skll'
"""
csv_must_have_both = ["_confMatrix.csv",
"_cors_orig.csv",
"_cors_processed.csv",
"_eval.csv",
"_eval_short.csv",
"_feature.csv",
"_feature_descriptives.csv",
"_feature_descriptivesExtra.csv",
"_feature_outliers.csv",
"_margcor_score_all_data.csv",
"_pca.csv",
"_pcavar.csv",
"_pcor_score_all_data.csv",
#"_pred.csv", check again
"_pred_processed.csv",
"_pred_train.csv",
"_score_dist.csv",
"_train_preprocessed_features.csv",
"_test_preprocessed_features.csv",
"_postprocessing_params.csv"
]
csv_must_have_rsmtool = ["_betas.csv",
"_coefficients.csv"]
if model_source == 'rsmtool':
csv_must_have = csv_must_have_both + csv_must_have_rsmtool
else:
csv_must_have = csv_must_have_both
csv_must_with_id = [experiment_id + file_name for file_name in csv_must_have]
csv_exist = [basename(file_name) for file_name in csv_files]
missing_csv = set(csv_must_with_id).difference(set(csv_exist))
assert_equal(len(missing_csv), 0, "Missing csv files: {}".format(','.join(missing_csv)))
def check_consistency_files_exist(csv_files, experiment_id):
"""
Check to make sure that the consistency files
were generated. Raises an AssertionError if
they were not.
Parameters
----------
csv_files : list of str
List of CSV files generated by a test.
experiment_id : str
The experiment ID.
"""
csv_must_have = ["_consistency.csv",
"_degradation.csv"]
csv_must_with_id = [experiment_id + file_name for file_name in csv_must_have]
csv_exist = [basename(file_name) for file_name in csv_files]
missing_csv = set(csv_must_with_id).difference(set(csv_exist))
assert_equal(len(missing_csv), 0, "Missing csv files: {}".format(','.join(missing_csv)))
def check_subgroup_outputs(output_dir, experiment_id, subgroups):
"""
Check to make sure that the subgroup outputs
look okay. Raise an AssertionError if they do not.
Parameters
----------
output_dir : str
Path to the `output` experiment output directory for a test.
experiment_id : str
The experiment ID.
subgroups : list of str
List of column names that contain grouping
information.
"""
train_preprocessed = pd.read_csv(join(output_dir,
'{}_{}'.format(experiment_id,
'train_metadata.csv')),
index_col=0)
test_preprocessed = pd.read_csv(join(output_dir,
'{}_{}'.format(experiment_id,
'test_metadata.csv')),
index_col=0)
for group in subgroups:
ok_(group in train_preprocessed.columns)
ok_(group in test_preprocessed.columns)
# check that the total sum of N per category matches the total N
# in data composition and the total N categories matches what is
# in overall data composition
df_data_composition_all = pd.read_csv(join(output_dir,
'{}_data_composition.csv'.format(experiment_id)))
for group in subgroups:
composition_by_group = pd.read_csv(join(output_dir,
'{}_data_composition_by_{}.csv'.format(experiment_id,
group)))
for partition in ['Training', 'Evaluation']:
partition_info = df_data_composition_all.ix[df_data_composition_all['partition'] == partition]
ok_(sum(composition_by_group['{} set'.format(partition)]) == partition_info.iloc[0]['responses'])
ok_(len(composition_by_group.ix[composition_by_group['{} set'.format(partition)] != 0]) == partition_info.iloc[0][group])
|
[
"nmadnani@ets.org"
] |
nmadnani@ets.org
|
6133f9bcf6b914d9ce3daf3ca186dda3e4a2b218
|
75e180f4adbf14d0c04e9cf28bdbdb9455f78c3c
|
/getfile.py
|
eced28617d97d54cd504aaf0d074bba428ab052f
|
[] |
no_license
|
Tyzex/Text-Recognition
|
73664b60c7378c5b96760e2c429016fc890a3eb8
|
76177cdd3920e5424ffa8fa35c2d34e62011c2eb
|
refs/heads/master
| 2023-07-16T09:43:49.384072
| 2021-08-17T15:17:27
| 2021-08-17T15:17:27
| 392,409,870
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 625
|
py
|
from tkinter import filedialog
from configfile import *
class GetFile(object):
def __init__(self, root):
self.root = root
self.filename = None
def getfilename(self):
self.filepath = filedialog.askopenfilename(initialdir="/", title="Select Image",
filetypes=(("Image Files", "*.png*"),
("All Files", "*.*")))
if self.filepath is not None:
obj = Config(self.root, self.filepath)
obj.newgui()
def getfilepath(self):
return self.filepath
|
[
"deveshsingh45450@gmail.com"
] |
deveshsingh45450@gmail.com
|
c52843fca25bdd4385022115b3a2b45db15056c4
|
159a2e75ff6cc7c0b58741c25636b83410e42bc7
|
/模块openpyxl例子.py
|
ada0274c1493986d594c89335089c53e722a2fea
|
[] |
no_license
|
articuly/python_study
|
e32ba6827e649773e5ccd953e35635aec92c2c15
|
b7f23cdf3b74431f245fe30c9d73b4c6910b1067
|
refs/heads/master
| 2020-11-24T04:20:35.131925
| 2020-09-10T08:21:06
| 2020-09-10T08:21:06
| 227,961,859
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 230
|
py
|
# coding:utf-8
from openpyxl import Workbook
wb = Workbook()
ws = wb.active
ws.title = 'python'
ws1 = wb.create_sheet('PHP')
print(wb.sheetnames)
ws['C1'] = '123'
ws.cell(row=2, column=4, value=456)
wb.save('python sample.xlsx')
|
[
"articuly@gmail.com"
] |
articuly@gmail.com
|
5d47d24e7688ed9ee77ee0c71cb829e8f7d1b023
|
4c7d9ea3edebf1c39f22ed3e040f5dc77ece6e25
|
/dharanipro20.py
|
6b091f576aafb495e293bd18a53f31018b82133a
|
[] |
no_license
|
muvendiran/dharania
|
d1ca74423dddb6ae5024dc84ff0a4a3226a516bf
|
d99f6bd86e47ea375d973687d0affab1f7c4a720
|
refs/heads/master
| 2020-07-04T13:39:59.284332
| 2019-08-14T09:20:12
| 2019-08-14T09:20:12
| 202,301,558
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 347
|
py
|
#d
x,y=map(int,input().split())
arr=list(map(int,input().split()))
arr.sort(reverse=True)
a=0
total=y
for i in arr:
if total>=i:
rem=int(total/i)
a+=rem
total=total - (i*rem)
if total==0:
break
if total==0:
print(a)
else:
print("it's not possible to sum up coins in such a way that they um upto",y)
|
[
"noreply@github.com"
] |
muvendiran.noreply@github.com
|
e0611ecd79179045ed445f18ae54a628b75624bc
|
de1d66cb03d9e0a6891b74828a51f5ffdc06ce36
|
/Python_3/ex38.py
|
6323c00a30bf3302a2b6344659e939e8d4339808
|
[] |
no_license
|
almazkun/the_hard_way
|
6c029ab7ce26d78b312e1f231533bec2291828ae
|
be2ae13af3de8f6005f03eccae3f4e556257259e
|
refs/heads/master
| 2023-05-01T17:37:25.813050
| 2022-02-11T09:24:07
| 2022-02-11T09:24:07
| 203,321,326
| 0
| 0
| null | 2023-04-21T21:56:47
| 2019-08-20T07:16:57
|
Python
|
UTF-8
|
Python
| false
| false
| 700
|
py
|
ten_things = "Apples Oranges Cross Telephone Light Sugar"
print("Wait a minute, There shpuld be 10 items! Let's fix it!")
stuff = ten_things.split(" ")
more_stuff = ["Day", "Night", "Song", "Flisbee", "Corn", "Banana", "Girl", "Boy"]
while len(stuff) != 10:
next_one = more_stuff.pop()
print("Add: {}".format(next_one))
stuff.append(next_one)
print(
"This is what we have now: {}, and number of objects are {}".format(
stuff, len(stuff)
)
)
print("This is what we have in the end: {}".format(stuff))
print("Let's do something with them")
print(stuff[1])
print(stuff[-1])
print(stuff.pop())
print(" ".join(stuff))
print("#".join(stuff[3:5]))
|
[
"almaz.kun@gmail.com"
] |
almaz.kun@gmail.com
|
22cac2d85470799d1a79fcb952061ca24753cb6a
|
f862200d97a8b926b474a6d17afc3d5b7f96d6df
|
/python/card-games/lists.py
|
0190f48dd25721dcabe0c44a8d4e85063784d2f8
|
[
"MIT"
] |
permissive
|
res0nat0r/exercism
|
2f7e80505b924a7701d369f9d6afb68b0150d3ca
|
eaf6b3db18c1724e91d9a339758bdc5f3a0f156c
|
refs/heads/main
| 2023-08-16T17:56:30.084462
| 2023-08-16T05:38:40
| 2023-08-16T05:38:40
| 231,866,137
| 0
| 0
|
MIT
| 2023-09-14T18:19:54
| 2020-01-05T04:16:15
|
C++
|
UTF-8
|
Python
| false
| false
| 2,631
|
py
|
"""Functions for tracking poker hands and assorted card tasks.
Python list documentation: https://docs.python.org/3/tutorial/datastructures.html
"""
def get_rounds(number):
"""Create a list containing the current and next two round numbers.
:param number: int - current round number.
:return: list - current round and the two that follow.
"""
rounds = [number]
rounds.append(number + 1)
rounds.append(number + 2)
return rounds
def concatenate_rounds(rounds_1, rounds_2):
"""Concatenate two lists of round numbers.
:param rounds_1: list - first rounds played.
:param rounds_2: list - second set of rounds played.
:return: list - all rounds played.
"""
return rounds_1 + rounds_2
def list_contains_round(rounds, number):
"""Check if the list of rounds contains the specified number.
:param rounds: list - rounds played.
:param number: int - round number.
:return: bool - was the round played?
"""
return number in rounds
def card_average(hand):
"""Calculate and returns the average card value from the list.
:param hand: list - cards in hand.
:return: float - average value of the cards in the hand.
"""
return sum(hand) / len(hand)
def approx_average_is_average(hand):
"""Return if an average is using (first + last index values )
OR ('middle' card) == calculated average.
:param hand: list - cards in hand.
:return: bool - does one of the approximate averages equal the `true average`?
"""
middle_index = len(hand) // 2
middle_num = hand[middle_index]
first_last = (hand[0] + hand[-1]) / 2
return card_average(hand) in [first_last, middle_num]
def average_even_is_average_odd(hand):
"""Return if the (average of even indexed card values) == (average of odd indexed card values).
:param hand: list - cards in hand.
:return: bool - are even and odd averages equal?
"""
even_sum, even_count, odd_sum, odd_count = (0, 0, 0, 0)
for iterator, card in enumerate(hand):
if iterator % 2 == 0:
even_sum += card
even_count += 1
for iterator, card in enumerate(hand):
if iterator % 2 != 0:
odd_sum += card
odd_count += 1
return (even_sum / even_count) == (odd_sum / odd_count)
def maybe_double_last(hand):
"""Multiply a Jack card value in the last index position by 2.
:param hand: list - cards in hand.
:return: list - hand with Jacks (if present) value doubled.
"""
if hand[-1] == 11:
hand.append(hand.pop() * 2)
return hand
return hand
|
[
"noreply@github.com"
] |
res0nat0r.noreply@github.com
|
7f3b087d26ed86bc780046df17e65c01cb7e8c13
|
cdab54a34b4f954275ffd800eb80a58a300ff30f
|
/lsnn/my_spiking_models.py
|
6bd5bb0acfe7acecbe3a5b7f308dc9d75cabb2db
|
[
"BSD-3-Clause-Clear"
] |
permissive
|
Quickblink/LSNN-official
|
085f4a6b2f8db6b638d3956d4a2ff45d67747173
|
8a423ccbfa4a8f16791affeabfef9f8e1b13c535
|
refs/heads/master
| 2022-11-16T14:09:28.192315
| 2020-07-13T22:24:01
| 2020-07-13T22:24:01
| 269,710,196
| 0
| 0
|
BSD-3-Clause-Clear
| 2020-06-05T16:54:12
| 2020-06-05T16:54:11
| null |
UTF-8
|
Python
| false
| false
| 43,931
|
py
|
"""
The Clear BSD License
Copyright (c) 2019 the LSNN team, institute for theoretical computer science, TU Graz
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the limitations in the disclaimer below) provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of LSNN nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from distutils.version import LooseVersion
import datetime
from collections import OrderedDict
from collections import namedtuple
import numpy as np
import numpy.random as rd
import tensorflow as tf
from tensorflow.python.framework import function
from tensorflow.python.framework.ops import Tensor
if LooseVersion(tf.__version__) >= LooseVersion("1.11"):
from tensorflow.python.ops.variables import Variable, RefVariable
else:
print("Using tensorflow version older then 1.11 -> skipping RefVariable storing")
from tensorflow.python.ops.variables import Variable
from lsnn.toolbox.rewiring_tools import weight_sampler
from lsnn.toolbox.tensorflow_einsums.einsum_re_written import einsum_bi_ijk_to_bjk
from lsnn.toolbox.tensorflow_utils import tf_roll
from time import time
Cell = tf.contrib.rnn.BasicRNNCell
def placeholder_container_for_rnn_state(cell_state_size, dtype, batch_size, name='TupleStateHolder'):
with tf.name_scope(name):
default_dict = cell_state_size._asdict()
placeholder_dict = OrderedDict({})
for k, v in default_dict.items():
if np.shape(v) == ():
v = [v]
shape = np.concatenate([[batch_size], v])
placeholder_dict[k] = tf.placeholder(shape=shape, dtype=dtype, name=k)
placeholder_tuple = cell_state_size.__class__(**placeholder_dict)
return placeholder_tuple
def feed_dict_with_placeholder_container(dict_to_update, state_holder, state_value, batch_selection=None):
if state_value is None:
return dict_to_update
assert state_holder.__class__ == state_value.__class__, 'Should have the same class, got {} and {}'.format(
state_holder.__class__, state_value.__class__)
for k, v in state_value._asdict().items():
if batch_selection is None:
dict_to_update.update({state_holder._asdict()[k]: v})
else:
dict_to_update.update({state_holder._asdict()[k]: v[batch_selection]})
return dict_to_update
#################################
# Spike function
#################################
@tf.custom_gradient
def SpikeFunction(v_scaled, dampening_factor):
z_ = tf.greater(v_scaled, 0.)
z_ = tf.cast(z_, dtype=tf.float32)
def grad(dy):
dE_dz = dy
dz_dv_scaled = tf.maximum(1 - tf.abs(v_scaled), 0)
dz_dv_scaled *= dampening_factor
dE_dv_scaled = dE_dz * dz_dv_scaled
return [dE_dv_scaled,
tf.zeros_like(dampening_factor)]
return tf.identity(z_, name="SpikeFunction"), grad
def weight_matrix_with_delay_dimension(w, d, n_delay):
"""
Generate the tensor of shape n_in x n_out x n_delay that represents the synaptic weights with the right delays.
:param w: synaptic weight value, float tensor of shape (n_in x n_out)
:param d: delay number, int tensor of shape (n_in x n_out)
:param n_delay: number of possible delays
:return:
"""
with tf.name_scope('WeightDelayer'):
w_d_list = []
for kd in range(n_delay):
mask = tf.equal(d, kd)
w_d = tf.where(condition=mask, x=w, y=tf.zeros_like(w))
w_d_list.append(w_d)
delay_axis = len(d.shape)
WD = tf.stack(w_d_list, axis=delay_axis)
return WD
# PSP on output layer
def exp_convolve(tensor, decay): # tensor shape (trial, time, neuron)
with tf.name_scope('ExpConvolve'):
assert tensor.dtype in [tf.float16, tf.float32, tf.float64]
tensor_time_major = tf.transpose(tensor, perm=[1, 0, 2])
initializer = tf.zeros_like(tensor_time_major[0])
filtered_tensor = tf.scan(lambda a, x: a * decay + (1 - decay) * x, tensor_time_major, initializer=initializer)
filtered_tensor = tf.transpose(filtered_tensor, perm=[1, 0, 2])
return filtered_tensor
LIFStateTuple = namedtuple('LIFStateTuple', ('v', 'z', 'i_future_buffer', 'z_buffer'))
def tf_cell_to_savable_dict(cell, sess, supplement={}):
"""
Usefull function to return a python/numpy object from of of the tensorflow cell object defined here.
The idea is simply that varaibles and Tensors given as attributes of the object with be replaced by there numpy value evaluated on the current tensorflow session.
:param cell: tensorflow cell object
:param sess: tensorflow session
:param supplement: some possible
:return:
"""
dict_to_save = {}
dict_to_save['cell_type'] = str(cell.__class__)
time_stamp = datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
dict_to_save['time_stamp'] = time_stamp
dict_to_save.update(supplement)
tftypes = [Variable, Tensor]
if LooseVersion(tf.__version__) >= LooseVersion("1.11"):
tftypes.append(RefVariable)
for k, v in cell.__dict__.items():
if k == 'self':
pass
elif type(v) in tftypes:
dict_to_save[k] = sess.run(v)
elif type(v) in [bool, int, float, np.int64, np.ndarray]:
dict_to_save[k] = v
else:
print('WARNING: attribute of key {} and value {} has type {}, recoding it as string.'.format(k, v, type(v)))
dict_to_save[k] = str(v)
return dict_to_save
class NoReset(Cell):
def __init__(self, n_in, n_rec, tau=20., thr=0.03,
dt=1., reset=0, dtype=tf.float32, n_delay=1, rewiring_connectivity=-1,
in_neuron_sign=None, rec_neuron_sign=None,
dampening_factor=0.3,
injected_noise_current=0.,
V0=1.):
"""
Tensorflow cell object that simulates a LIF neuron with an approximation of the spike derivatives.
:param n_in: number of input neurons
:param n_rec: number of recurrent neurons
:param tau: membrane time constant
:param thr: threshold voltage
:param dt: time step of the simulation
:param n_refractory: number of refractory time steps
:param dtype: data type of the cell tensors
:param n_delay: number of synaptic delay, the delay range goes from 1 to n_delay time steps
:param reset: method of resetting membrane potential after spike thr-> by fixed threshold amount, zero-> to zero
"""
self.reset_m = tf.Variable(reset, dtype=dtype, name="Reset", trainable=False)
#if np.isscalar(tau): tau = tf.ones(n_rec, dtype=dtype) * np.mean(tau)
if np.isscalar(thr): thr = tf.ones(n_rec, dtype=dtype) * np.mean(thr)
#tau = tf.cast(tau, dtype=dtype)
#dt = tf.cast(dt, dtype=dtype)
self.dampening_factor = dampening_factor
# Parameters
#self.n_delay = n_delay
#self.n_refractory = n_refractory
#self.dt = dt
self.n_in = n_in
self.n_rec = n_rec
self.data_type = dtype
self._num_units = self.n_rec
#self.tau = tf.Variable(tau, dtype=dtype, name="Tau", trainable=False)
self._decay = 0.95 #tf.exp(-dt / tau)
self.thr = tf.Variable(thr, dtype=dtype, name="Threshold", trainable=False)
#self.V0 = V0
#self.injected_noise_current = injected_noise_current
#self.rewiring_connectivity = rewiring_connectivity
self.in_neuron_sign = in_neuron_sign
self.rec_neuron_sign = rec_neuron_sign
with tf.variable_scope('InputWeights'):
# Input weights
#if 0 < rewiring_connectivity < 1:
# self.w_in_val, self.w_in_sign, self.w_in_var, _ = weight_sampler(n_in, n_rec, rewiring_connectivity,
# neuron_sign=in_neuron_sign)
#else:
self.w_in_var = tf.Variable(rd.randn(n_in, n_rec) / np.sqrt(n_in), dtype=dtype, name="InputWeight")
self.w_in_val = self.w_in_var
#self.w_in_val = self.V0 * self.w_in_val
#self.w_in_delay = tf.Variable(rd.randint(self.n_delay, size=n_in * n_rec).reshape(n_in, n_rec),
# dtype=tf.int64, name="InDelays", trainable=False)
self.W_in = self.w_in_val #weight_matrix_with_delay_dimension(self.w_in_val, self.w_in_delay, self.n_delay)
with tf.variable_scope('RecWeights'):
#if 0 < rewiring_connectivity < 1:
# self.w_rec_val, self.w_rec_sign, self.w_rec_var, _ = weight_sampler(n_rec, n_rec,
# rewiring_connectivity,
# neuron_sign=rec_neuron_sign)
#else:
# if rec_neuron_sign is not None or in_neuron_sign is not None:
# raise NotImplementedError('Neuron sign requested but this is only implemented with rewiring')
self.w_rec_var = Variable(rd.randn(n_rec, n_rec) / np.sqrt(n_rec), dtype=dtype,
name='RecurrentWeight')
self.w_rec_val = self.w_rec_var
#recurrent_disconnect_mask = np.diag(np.ones(n_rec, dtype=bool))
#self.w_rec_val = self.w_rec_val * self.V0
#self.w_rec_val = tf.where(recurrent_disconnect_mask, tf.zeros_like(self.w_rec_val),
# self.w_rec_val) # Disconnect autotapse
#self.w_rec_delay = tf.Variable(rd.randint(self.n_delay, size=n_rec * n_rec).reshape(n_rec, n_rec),
# dtype=tf.int64, name="RecDelays", trainable=False)
self.W_rec = self.w_rec_val#weight_matrix_with_delay_dimension(self.w_rec_val, self.w_rec_delay, self.n_delay)
@property
def state_size(self):
return LIFStateTuple(v=self.n_rec,
z=self.n_rec,
i_future_buffer=1,
z_buffer=(self.n_rec, 1))
@property
def output_size(self):
return self.n_rec
def zero_state(self, batch_size, dtype, n_rec=None):
if n_rec is None: n_rec = self.n_rec
v0 = tf.zeros(shape=(batch_size, n_rec), dtype=dtype)
z0 = tf.zeros(shape=(batch_size, n_rec), dtype=dtype)
i_buff0 = tf.zeros(shape=(batch_size, 1), dtype=dtype)
z_buff0 = tf.zeros(shape=(batch_size, n_rec, 1), dtype=dtype)
return LIFStateTuple(
v=v0,
z=z0,
i_future_buffer=i_buff0,
z_buffer=z_buff0
)
def __call__(self, inputs, state, scope=None, dtype=tf.float32):
#i_future_buffer = state.i_future_buffer + einsum_bi_ijk_to_bjk(inputs, self.W_in) + einsum_bi_ijk_to_bjk(
# state.z, self.W_rec)
i = tf.matmul(inputs, self.W_in) + tf.matmul(state.z, self.W_rec)
i_future_buffer = tf.expand_dims(i, -1)
new_v, new_z = self.LIF_dynamic(
v=state.v,
z=state.z,
z_buffer=state.z_buffer,
i_future_buffer=i_future_buffer)
new_z_buffer = tf_roll(state.z_buffer, new_z, axis=2)
new_i_future_buffer = state.i_future_buffer #tf_roll(i_future_buffer, axis=2)
new_state = LIFStateTuple(v=new_v,
z=new_z,
i_future_buffer=new_i_future_buffer,
z_buffer=new_z_buffer)
return new_z, new_state
def LIF_dynamic(self, v, z, z_buffer, i_future_buffer, thr=None, decay=None, n_refractory=None, add_current=0.):
"""
Function that generate the next spike and voltage tensor for given cell state.
:param v
:param z
:param z_buffer:
:param i_future_buffer:
:param thr:
:param decay:
:param n_refractory:
:param add_current:
:return:
"""
#if self.injected_noise_current > 0:
# add_current = tf.random_normal(shape=z.shape, stddev=self.injected_noise_current)
with tf.name_scope('LIFdynamic'):
if thr is None: thr = self.thr
if decay is None: decay = self._decay
#if n_refractory is None: n_refractory = self.n_refractory
i_t = i_future_buffer[:, :, 0]# + add_current
I_reset = z * thr# * self.dt
new_v = decay * v + (1 - decay) * i_t - self.reset_m * I_reset
#TODO: reverse
# Spike generation
v_scaled = (v - thr) / thr
# new_z = differentiable_spikes(v_scaled=v_scaled)
new_z = SpikeFunction(v_scaled, self.dampening_factor)
#TODO: reverse
#if n_refractory > 0:
# is_ref = tf.greater(tf.reduce_max(z_buffer[:, :, -n_refractory:], axis=2), 0)
# new_z = tf.where(is_ref, tf.zeros_like(new_z), new_z)
#new_z = new_z * 1 / self.dt
return new_v, new_z
class TheirReset(Cell):
def __init__(self, n_in, n_rec, tau=20., thr=0.03,
dt=1., n_refractory=0, dtype=tf.float32, n_delay=1, rewiring_connectivity=-1, reset=0,
in_neuron_sign=None, rec_neuron_sign=None,
dampening_factor=0.3,
injected_noise_current=0.,
V0=1.):
"""
Tensorflow cell object that simulates a LIF neuron with an approximation of the spike derivatives.
:param n_in: number of input neurons
:param n_rec: number of recurrent neurons
:param tau: membrane time constant
:param thr: threshold voltage
:param dt: time step of the simulation
:param n_refractory: number of refractory time steps
:param dtype: data type of the cell tensors
:param n_delay: number of synaptic delay, the delay range goes from 1 to n_delay time steps
:param reset: method of resetting membrane potential after spike thr-> by fixed threshold amount, zero-> to zero
"""
if np.isscalar(tau): tau = tf.ones(n_rec, dtype=dtype) * np.mean(tau)
if np.isscalar(thr): thr = tf.ones(n_rec, dtype=dtype) * np.mean(thr)
tau = tf.cast(tau, dtype=dtype)
dt = tf.cast(dt, dtype=dtype)
self.reset_m = tf.Variable(reset, dtype=dtype, name="Reset", trainable=False)
self.dampening_factor = dampening_factor
# Parameters
self.n_delay = n_delay
self.n_refractory = n_refractory
self.dt = dt
self.n_in = n_in
self.n_rec = n_rec
self.data_type = dtype
self._num_units = self.n_rec
self.tau = tf.Variable(tau, dtype=dtype, name="Tau", trainable=False)
self._decay = tf.exp(-dt / tau)
self.thr = tf.Variable(thr, dtype=dtype, name="Threshold", trainable=False)
self.V0 = V0
self.injected_noise_current = injected_noise_current
self.rewiring_connectivity = rewiring_connectivity
self.in_neuron_sign = in_neuron_sign
self.rec_neuron_sign = rec_neuron_sign
with tf.variable_scope('InputWeights'):
# Input weights
if 0 < rewiring_connectivity < 1:
self.w_in_val, self.w_in_sign, self.w_in_var, _ = weight_sampler(n_in, n_rec, rewiring_connectivity,
neuron_sign=in_neuron_sign)
else:
self.w_in_var = tf.Variable(rd.randn(n_in, n_rec) / np.sqrt(n_in), dtype=dtype, name="InputWeight")
self.w_in_val = self.w_in_var
self.w_in_val = self.V0 * self.w_in_val
self.w_in_delay = tf.Variable(rd.randint(self.n_delay, size=n_in * n_rec).reshape(n_in, n_rec),
dtype=tf.int64, name="InDelays", trainable=False)
self.W_in = weight_matrix_with_delay_dimension(self.w_in_val, self.w_in_delay, self.n_delay)
with tf.variable_scope('RecWeights'):
if 0 < rewiring_connectivity < 1:
self.w_rec_val, self.w_rec_sign, self.w_rec_var, _ = weight_sampler(n_rec, n_rec,
rewiring_connectivity,
neuron_sign=rec_neuron_sign)
else:
if rec_neuron_sign is not None or in_neuron_sign is not None:
raise NotImplementedError('Neuron sign requested but this is only implemented with rewiring')
self.w_rec_var = Variable(rd.randn(n_rec, n_rec) / np.sqrt(n_rec), dtype=dtype,
name='RecurrentWeight')
self.w_rec_val = self.w_rec_var
recurrent_disconnect_mask = np.diag(np.ones(n_rec, dtype=bool))
self.w_rec_val = self.w_rec_val * self.V0
self.w_rec_val = tf.where(recurrent_disconnect_mask, tf.zeros_like(self.w_rec_val),
self.w_rec_val) # Disconnect autotapse
self.w_rec_delay = tf.Variable(rd.randint(self.n_delay, size=n_rec * n_rec).reshape(n_rec, n_rec),
dtype=tf.int64, name="RecDelays", trainable=False)
self.W_rec = weight_matrix_with_delay_dimension(self.w_rec_val, self.w_rec_delay, self.n_delay)
@property
def state_size(self):
return LIFStateTuple(v=self.n_rec,
z=self.n_rec,
i_future_buffer=(self.n_rec, self.n_delay),
z_buffer=(self.n_rec, self.n_refractory))
@property
def output_size(self):
return self.n_rec
def zero_state(self, batch_size, dtype, n_rec=None):
if n_rec is None: n_rec = self.n_rec
v0 = tf.zeros(shape=(batch_size, n_rec), dtype=dtype)
z0 = tf.zeros(shape=(batch_size, n_rec), dtype=dtype)
i_buff0 = tf.zeros(shape=(batch_size, n_rec, self.n_delay), dtype=dtype)
z_buff0 = tf.zeros(shape=(batch_size, n_rec, self.n_refractory), dtype=dtype)
return LIFStateTuple(
v=v0,
z=z0,
i_future_buffer=i_buff0,
z_buffer=z_buff0
)
def __call__(self, inputs, state, scope=None, dtype=tf.float32):
i_future_buffer = state.i_future_buffer + einsum_bi_ijk_to_bjk(inputs, self.W_in) + einsum_bi_ijk_to_bjk(
state.z, self.W_rec)
new_v, new_z = self.LIF_dynamic(
v=state.v,
z=state.z,
z_buffer=state.z_buffer,
i_future_buffer=i_future_buffer)
new_z_buffer = tf_roll(state.z_buffer, new_z, axis=2)
new_i_future_buffer = tf_roll(i_future_buffer, axis=2)
new_state = LIFStateTuple(v=new_v,
z=new_z,
i_future_buffer=new_i_future_buffer,
z_buffer=new_z_buffer)
return new_z, new_state
def LIF_dynamic(self, v, z, z_buffer, i_future_buffer, thr=None, decay=None, n_refractory=None, add_current=0.):
"""
Function that generate the next spike and voltage tensor for given cell state.
:param v
:param z
:param z_buffer:
:param i_future_buffer:
:param thr:
:param decay:
:param n_refractory:
:param add_current:
:return:
"""
if self.injected_noise_current > 0:
add_current = tf.random_normal(shape=z.shape, stddev=self.injected_noise_current)
with tf.name_scope('LIFdynamic'):
if thr is None: thr = self.thr
if decay is None: decay = self._decay
if n_refractory is None: n_refractory = self.n_refractory
i_t = i_future_buffer[:, :, 0] + add_current
I_reset = z * thr * self.dt
#new_v = decay * v + (1 - decay) * i_t - I_reset
new_v = decay * v + (1 - decay) * i_t - self.reset_m * I_reset
#TODO: reverse
# Spike generation
v_scaled = v# (v - thr) / thr
# new_z = differentiable_spikes(v_scaled=v_scaled)
new_z = SpikeFunction(v_scaled, self.dampening_factor)
#TODO: reverse
#if n_refractory > 0:
# is_ref = tf.greater(tf.reduce_max(z_buffer[:, :, -n_refractory:], axis=2), 0)
# new_z = tf.where(is_ref, tf.zeros_like(new_z), new_z)
new_z = new_z * 1 / self.dt
return new_v, new_z
class MyLIF(Cell):
def __init__(self, n_in, n_rec, tau=20., thr=0.03,
dt=1., n_refractory=0, dtype=tf.float32, n_delay=1, rewiring_connectivity=-1,
in_neuron_sign=None, rec_neuron_sign=None,
dampening_factor=0.3,
injected_noise_current=0.,
V0=1.):
"""
Tensorflow cell object that simulates a LIF neuron with an approximation of the spike derivatives.
:param n_in: number of input neurons
:param n_rec: number of recurrent neurons
:param tau: membrane time constant
:param thr: threshold voltage
:param dt: time step of the simulation
:param n_refractory: number of refractory time steps
:param dtype: data type of the cell tensors
:param n_delay: number of synaptic delay, the delay range goes from 1 to n_delay time steps
:param reset: method of resetting membrane potential after spike thr-> by fixed threshold amount, zero-> to zero
"""
#if np.isscalar(tau): tau = tf.ones(n_rec, dtype=dtype) * np.mean(tau)
if np.isscalar(thr): thr = tf.ones(n_rec, dtype=dtype) * np.mean(thr)
#tau = tf.cast(tau, dtype=dtype)
#dt = tf.cast(dt, dtype=dtype)
self.dampening_factor = dampening_factor
# Parameters
#self.n_delay = n_delay
#self.n_refractory = n_refractory
#self.dt = dt
self.n_in = n_in
self.n_rec = n_rec
self.data_type = dtype
self._num_units = self.n_rec
#self.tau = tf.Variable(tau, dtype=dtype, name="Tau", trainable=False)
self._decay = 0.95 #tf.exp(-dt / tau)
self.thr = tf.Variable(thr, dtype=dtype, name="Threshold", trainable=False)
#self.V0 = V0
#self.injected_noise_current = injected_noise_current
#self.rewiring_connectivity = rewiring_connectivity
self.in_neuron_sign = in_neuron_sign
self.rec_neuron_sign = rec_neuron_sign
with tf.variable_scope('InputWeights'):
# Input weights
#if 0 < rewiring_connectivity < 1:
# self.w_in_val, self.w_in_sign, self.w_in_var, _ = weight_sampler(n_in, n_rec, rewiring_connectivity,
# neuron_sign=in_neuron_sign)
#else:
self.w_in_var = tf.Variable(rd.randn(n_in, n_rec) / np.sqrt(n_in), dtype=dtype, name="InputWeight")
self.w_in_val = self.w_in_var
#self.w_in_val = self.V0 * self.w_in_val
#self.w_in_delay = tf.Variable(rd.randint(self.n_delay, size=n_in * n_rec).reshape(n_in, n_rec),
# dtype=tf.int64, name="InDelays", trainable=False)
self.W_in = self.w_in_val #weight_matrix_with_delay_dimension(self.w_in_val, self.w_in_delay, self.n_delay)
with tf.variable_scope('RecWeights'):
#if 0 < rewiring_connectivity < 1:
# self.w_rec_val, self.w_rec_sign, self.w_rec_var, _ = weight_sampler(n_rec, n_rec,
# rewiring_connectivity,
# neuron_sign=rec_neuron_sign)
#else:
# if rec_neuron_sign is not None or in_neuron_sign is not None:
# raise NotImplementedError('Neuron sign requested but this is only implemented with rewiring')
self.w_rec_var = Variable(rd.randn(n_rec, n_rec) / np.sqrt(n_rec), dtype=dtype,
name='RecurrentWeight')
self.w_rec_val = self.w_rec_var
#recurrent_disconnect_mask = np.diag(np.ones(n_rec, dtype=bool))
#self.w_rec_val = self.w_rec_val * self.V0
#self.w_rec_val = tf.where(recurrent_disconnect_mask, tf.zeros_like(self.w_rec_val),
# self.w_rec_val) # Disconnect autotapse
#self.w_rec_delay = tf.Variable(rd.randint(self.n_delay, size=n_rec * n_rec).reshape(n_rec, n_rec),
# dtype=tf.int64, name="RecDelays", trainable=False)
self.W_rec = self.w_rec_val#weight_matrix_with_delay_dimension(self.w_rec_val, self.w_rec_delay, self.n_delay)
@property
def state_size(self):
return LIFStateTuple(v=self.n_rec,
z=self.n_rec,
i_future_buffer=1,
z_buffer=(self.n_rec, 1))
@property
def output_size(self):
return self.n_rec
def zero_state(self, batch_size, dtype, n_rec=None):
if n_rec is None: n_rec = self.n_rec
v0 = tf.zeros(shape=(batch_size, n_rec), dtype=dtype)
z0 = tf.zeros(shape=(batch_size, n_rec), dtype=dtype)
i_buff0 = tf.zeros(shape=(batch_size, 1), dtype=dtype)
z_buff0 = tf.zeros(shape=(batch_size, n_rec, 1), dtype=dtype)
return LIFStateTuple(
v=v0,
z=z0,
i_future_buffer=i_buff0,
z_buffer=z_buff0
)
def __call__(self, inputs, state, scope=None, dtype=tf.float32):
#i_future_buffer = state.i_future_buffer + einsum_bi_ijk_to_bjk(inputs, self.W_in) + einsum_bi_ijk_to_bjk(
# state.z, self.W_rec)
i = tf.matmul(inputs, self.W_in) + tf.matmul(state.z, self.W_rec)
i_future_buffer = tf.expand_dims(i, -1)
new_v, new_z = self.LIF_dynamic(
v=state.v,
z=state.z,
z_buffer=state.z_buffer,
i_future_buffer=i_future_buffer)
new_z_buffer = tf_roll(state.z_buffer, new_z, axis=2)
new_i_future_buffer = state.i_future_buffer #tf_roll(i_future_buffer, axis=2)
new_state = LIFStateTuple(v=new_v,
z=new_z,
i_future_buffer=new_i_future_buffer,
z_buffer=new_z_buffer)
return new_z, new_state
def LIF_dynamic(self, v, z, z_buffer, i_future_buffer, thr=None, decay=None, n_refractory=None, add_current=0.):
"""
Function that generate the next spike and voltage tensor for given cell state.
:param v
:param z
:param z_buffer:
:param i_future_buffer:
:param thr:
:param decay:
:param n_refractory:
:param add_current:
:return:
"""
#if self.injected_noise_current > 0:
# add_current = tf.random_normal(shape=z.shape, stddev=self.injected_noise_current)
with tf.name_scope('LIFdynamic'):
if thr is None: thr = self.thr
if decay is None: decay = self._decay
#if n_refractory is None: n_refractory = self.n_refractory
i_t = i_future_buffer[:, :, 0]# + add_current
I_reset = z * thr# * self.dt
new_v = decay * v + (1 - decay) * i_t - I_reset
#TODO: reverse
# Spike generation
v_scaled = v# (v - thr) / thr
# new_z = differentiable_spikes(v_scaled=v_scaled)
new_z = SpikeFunction(v_scaled, self.dampening_factor)
#TODO: reverse
#if n_refractory > 0:
# is_ref = tf.greater(tf.reduce_max(z_buffer[:, :, -n_refractory:], axis=2), 0)
# new_z = tf.where(is_ref, tf.zeros_like(new_z), new_z)
#new_z = new_z * 1 / self.dt
return new_v, new_z
class LIF(Cell):
def __init__(self, n_in, n_rec, tau=20., thr=0.03,
dt=1., n_refractory=0, dtype=tf.float32, n_delay=1, rewiring_connectivity=-1,
in_neuron_sign=None, rec_neuron_sign=None,
dampening_factor=0.3,
injected_noise_current=0.,
V0=1.):
"""
Tensorflow cell object that simulates a LIF neuron with an approximation of the spike derivatives.
:param n_in: number of input neurons
:param n_rec: number of recurrent neurons
:param tau: membrane time constant
:param thr: threshold voltage
:param dt: time step of the simulation
:param n_refractory: number of refractory time steps
:param dtype: data type of the cell tensors
:param n_delay: number of synaptic delay, the delay range goes from 1 to n_delay time steps
:param reset: method of resetting membrane potential after spike thr-> by fixed threshold amount, zero-> to zero
"""
if np.isscalar(tau): tau = tf.ones(n_rec, dtype=dtype) * np.mean(tau)
if np.isscalar(thr): thr = tf.ones(n_rec, dtype=dtype) * np.mean(thr)
tau = tf.cast(tau, dtype=dtype)
dt = tf.cast(dt, dtype=dtype)
self.dampening_factor = dampening_factor
# Parameters
self.n_delay = n_delay
self.n_refractory = n_refractory
self.dt = dt
self.n_in = n_in
self.n_rec = n_rec
self.data_type = dtype
self._num_units = self.n_rec
self.tau = tf.Variable(tau, dtype=dtype, name="Tau", trainable=False)
self._decay = tf.exp(-dt / tau)
self.thr = tf.Variable(thr, dtype=dtype, name="Threshold", trainable=False)
self.V0 = V0
self.injected_noise_current = injected_noise_current
self.rewiring_connectivity = rewiring_connectivity
self.in_neuron_sign = in_neuron_sign
self.rec_neuron_sign = rec_neuron_sign
with tf.variable_scope('InputWeights'):
# Input weights
if 0 < rewiring_connectivity < 1:
self.w_in_val, self.w_in_sign, self.w_in_var, _ = weight_sampler(n_in, n_rec, rewiring_connectivity,
neuron_sign=in_neuron_sign)
else:
self.w_in_var = tf.Variable(rd.randn(n_in, n_rec) / np.sqrt(n_in), dtype=dtype, name="InputWeight")
self.w_in_val = self.w_in_var
self.w_in_val = self.V0 * self.w_in_val
self.w_in_delay = tf.Variable(rd.randint(self.n_delay, size=n_in * n_rec).reshape(n_in, n_rec),
dtype=tf.int64, name="InDelays", trainable=False)
self.W_in = weight_matrix_with_delay_dimension(self.w_in_val, self.w_in_delay, self.n_delay)
with tf.variable_scope('RecWeights'):
if 0 < rewiring_connectivity < 1:
self.w_rec_val, self.w_rec_sign, self.w_rec_var, _ = weight_sampler(n_rec, n_rec,
rewiring_connectivity,
neuron_sign=rec_neuron_sign)
else:
if rec_neuron_sign is not None or in_neuron_sign is not None:
raise NotImplementedError('Neuron sign requested but this is only implemented with rewiring')
self.w_rec_var = Variable(rd.randn(n_rec, n_rec) / np.sqrt(n_rec), dtype=dtype,
name='RecurrentWeight')
self.w_rec_val = self.w_rec_var
recurrent_disconnect_mask = np.diag(np.ones(n_rec, dtype=bool))
self.w_rec_val = self.w_rec_val * self.V0
self.w_rec_val = tf.where(recurrent_disconnect_mask, tf.zeros_like(self.w_rec_val),
self.w_rec_val) # Disconnect autotapse
self.w_rec_delay = tf.Variable(rd.randint(self.n_delay, size=n_rec * n_rec).reshape(n_rec, n_rec),
dtype=tf.int64, name="RecDelays", trainable=False)
self.W_rec = weight_matrix_with_delay_dimension(self.w_rec_val, self.w_rec_delay, self.n_delay)
@property
def state_size(self):
return LIFStateTuple(v=self.n_rec,
z=self.n_rec,
i_future_buffer=(self.n_rec, self.n_delay),
z_buffer=(self.n_rec, self.n_refractory))
@property
def output_size(self):
return self.n_rec
def zero_state(self, batch_size, dtype, n_rec=None):
if n_rec is None: n_rec = self.n_rec
v0 = tf.zeros(shape=(batch_size, n_rec), dtype=dtype)
z0 = tf.zeros(shape=(batch_size, n_rec), dtype=dtype)
i_buff0 = tf.zeros(shape=(batch_size, n_rec, self.n_delay), dtype=dtype)
z_buff0 = tf.zeros(shape=(batch_size, n_rec, self.n_refractory), dtype=dtype)
return LIFStateTuple(
v=v0,
z=z0,
i_future_buffer=i_buff0,
z_buffer=z_buff0
)
def __call__(self, inputs, state, scope=None, dtype=tf.float32):
i_future_buffer = state.i_future_buffer + einsum_bi_ijk_to_bjk(inputs, self.W_in) + einsum_bi_ijk_to_bjk(
state.z, self.W_rec)
new_v, new_z = self.LIF_dynamic(
v=state.v,
z=state.z,
z_buffer=state.z_buffer,
i_future_buffer=i_future_buffer)
new_z_buffer = tf_roll(state.z_buffer, new_z, axis=2)
new_i_future_buffer = tf_roll(i_future_buffer, axis=2)
new_state = LIFStateTuple(v=new_v,
z=new_z,
i_future_buffer=new_i_future_buffer,
z_buffer=new_z_buffer)
return new_z, new_state
def LIF_dynamic(self, v, z, z_buffer, i_future_buffer, thr=None, decay=None, n_refractory=None, add_current=0.):
"""
Function that generate the next spike and voltage tensor for given cell state.
:param v
:param z
:param z_buffer:
:param i_future_buffer:
:param thr:
:param decay:
:param n_refractory:
:param add_current:
:return:
"""
if self.injected_noise_current > 0:
add_current = tf.random_normal(shape=z.shape, stddev=self.injected_noise_current)
with tf.name_scope('LIFdynamic'):
if thr is None: thr = self.thr
if decay is None: decay = self._decay
if n_refractory is None: n_refractory = self.n_refractory
i_t = i_future_buffer[:, :, 0] + add_current
I_reset = z * thr * self.dt
new_v = decay * v + (1 - decay) * i_t - I_reset
#TODO: reverse
# Spike generation
v_scaled = (v - thr) / thr
# new_z = differentiable_spikes(v_scaled=v_scaled)
new_z = SpikeFunction(v_scaled, self.dampening_factor)
#TODO: reverse
#if n_refractory > 0:
# is_ref = tf.greater(tf.reduce_max(z_buffer[:, :, -n_refractory:], axis=2), 0)
# new_z = tf.where(is_ref, tf.zeros_like(new_z), new_z)
new_z = new_z * 1 / self.dt
return new_v, new_z
ALIFStateTuple = namedtuple('ALIFState', (
'z',
'v',
'b',
'i_future_buffer',
'z_buffer'))
class ALIF(LIF):
def __init__(self, n_in, n_rec, tau=20, thr=0.01,
dt=1., n_refractory=0, dtype=tf.float32, n_delay=1,
tau_adaptation=200., beta=1.6,
rewiring_connectivity=-1, dampening_factor=0.3,
in_neuron_sign=None, rec_neuron_sign=None, injected_noise_current=0.,
V0=1.):
"""
Tensorflow cell object that simulates a LIF neuron with an approximation of the spike derivatives.
:param n_in: number of input neurons
:param n_rec: number of recurrent neurons
:param tau: membrane time constant
:param thr: threshold voltage
:param dt: time step of the simulation
:param n_refractory: number of refractory time steps
:param dtype: data type of the cell tensors
:param n_delay: number of synaptic delay, the delay range goes from 1 to n_delay time steps
:param tau_adaptation: adaptation time constant for the threshold voltage
:param beta: amplitude of adpatation
:param rewiring_connectivity: number of non-zero synapses in weight matrices (at initialization)
:param in_neuron_sign: vector of +1, -1 to specify input neuron signs
:param rec_neuron_sign: same of recurrent neurons
:param injected_noise_current: amplitude of current noise
:param V0: to choose voltage unit, specify the value of V0=1 Volt in the desired unit (example V0=1000 to set voltage in millivolts)
"""
super(ALIF, self).__init__(n_in=n_in, n_rec=n_rec, tau=tau, thr=thr, dt=dt, n_refractory=n_refractory,
dtype=dtype, n_delay=n_delay,
rewiring_connectivity=rewiring_connectivity,
dampening_factor=dampening_factor, in_neuron_sign=in_neuron_sign,
rec_neuron_sign=rec_neuron_sign,
injected_noise_current=injected_noise_current,
V0=V0)
if tau_adaptation is None: raise ValueError("alpha parameter for adaptive bias must be set")
if beta is None: raise ValueError("beta parameter for adaptive bias must be set")
self.tau_adaptation = tf.Variable(tau_adaptation, dtype=dtype, name="TauAdaptation", trainable=False)
self.beta = tf.Variable(beta, dtype=dtype, name="Beta", trainable=False)
self.decay_b = np.exp(-dt / tau_adaptation)
@property
def output_size(self):
return [self.n_rec, self.n_rec, self.n_rec]
@property
def state_size(self):
return ALIFStateTuple(v=self.n_rec,
z=self.n_rec,
b=self.n_rec,
i_future_buffer=(self.n_rec, self.n_delay),
z_buffer=(self.n_rec, self.n_refractory))
def zero_state(self, batch_size, dtype, n_rec=None):
if n_rec is None: n_rec = self.n_rec
v0 = tf.zeros(shape=(batch_size, n_rec), dtype=dtype)
z0 = tf.zeros(shape=(batch_size, n_rec), dtype=dtype)
b0 = tf.zeros(shape=(batch_size, n_rec), dtype=dtype)
i_buff0 = tf.zeros(shape=(batch_size, n_rec, self.n_delay), dtype=dtype)
z_buff0 = tf.zeros(shape=(batch_size, n_rec, self.n_refractory), dtype=dtype)
return ALIFStateTuple(
v=v0,
z=z0,
b=b0,
i_future_buffer=i_buff0,
z_buffer=z_buff0
)
def __call__(self, inputs, state, scope=None, dtype=tf.float32):
with tf.name_scope('ALIFcall'):
i_future_buffer = state.i_future_buffer + einsum_bi_ijk_to_bjk(inputs, self.W_in) + einsum_bi_ijk_to_bjk(
state.z, self.W_rec)
new_b = self.decay_b * state.b + (1. - self.decay_b) * state.z
thr = self.thr + new_b * self.beta * self.V0
new_v, new_z = self.LIF_dynamic(
v=state.v,
z=state.z,
z_buffer=state.z_buffer,
i_future_buffer=i_future_buffer,
decay=self._decay,
thr=thr)
new_z_buffer = tf_roll(state.z_buffer, new_z, axis=2)
new_i_future_buffer = tf_roll(i_future_buffer, axis=2)
new_state = ALIFStateTuple(v=new_v,
z=new_z,
b=new_b,
i_future_buffer=new_i_future_buffer,
z_buffer=new_z_buffer)
return [new_z, new_v, thr], new_state
def static_rnn_with_gradient(cell, inputs, state, loss_function, T, verbose=True):
batch_size = tf.shape(inputs)[0]
thr_list = []
state_list = []
z_list = []
v_list = []
if verbose: print('Building forward Graph...', end=' ')
t0 = time()
for t in range(T):
outputs, state = cell(inputs[:, t, :], state)
z, v, thr = outputs
z_list.append(z)
v_list.append(v)
thr_list.append(thr)
state_list.append(state)
zs = tf.stack(z_list, axis=1)
vs = tf.stack(v_list, axis=1)
thrs = tf.stack(thr_list, axis=1)
loss = loss_function(zs)
de_dz_partial = tf.gradients(loss, zs)[0]
if de_dz_partial is None:
de_dz_partial = tf.zeros_like(zs)
print('Warning: Partial de_dz is None')
print('Done in {:.2f}s'.format(time() - t0))
def namedtuple_to_list(state):
return list(state._asdict().values())
zero_state_as_list = cell.zero_state(batch_size, tf.float32)
de_dstate = namedtuple_to_list(cell.zero_state(batch_size, dtype=tf.float32))
g_list = []
if verbose: print('Building backward Graph...', end=' ')
t0 = time()
for t in np.arange(T)[::-1]:
# gradient from next state
if t < T - 1:
state = namedtuple_to_list(state_list[t])
next_state = namedtuple_to_list(state_list[t + 1])
de_dstate = tf.gradients(ys=next_state, xs=state, grad_ys=de_dstate)
for k_var, de_dvar in enumerate(de_dstate):
if de_dvar is None:
de_dstate[k_var] = tf.zeros_like(zero_state_as_list[k_var])
print('Warning: var {} at time {} is None'.format(k_var, t))
# add the partial derivative due to current error
de_dstate[0] = de_dstate[0] + de_dz_partial[:, t]
g_list.append(de_dstate[0])
g_list = list(reversed(g_list))
gs = tf.stack(g_list, axis=1)
print('Done in {:.2f}s'.format(time() - t0))
return zs, vs, thrs, gs, state_list[-1]
|
[
"eric.koepke@tum.de"
] |
eric.koepke@tum.de
|
1f0159477038149cef6ee32026e10ec26cf0b494
|
564ccf876cd04d199dd2364e2e138989598be98d
|
/Stock/venv/Lib/site-packages/plotly/graph_objs/indicator/_gauge.py
|
5ffccfcbd6f25787dd47a74ef1d46be6634c81a8
|
[] |
no_license
|
hcz2000/pywork
|
7eedcc7d53d85036b823a2ed96a1bffe761a8aec
|
345820faa87de131203a98932a039725ff2bebda
|
refs/heads/master
| 2023-08-15T10:48:36.176886
| 2023-07-26T09:23:09
| 2023-07-26T09:23:09
| 134,735,647
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 27,017
|
py
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Gauge(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "indicator"
_path_str = "indicator.gauge"
_valid_props = {
"axis",
"bar",
"bgcolor",
"bordercolor",
"borderwidth",
"shape",
"stepdefaults",
"steps",
"threshold",
}
# axis
# ----
@property
def axis(self):
"""
The 'axis' property is an instance of Axis
that may be specified as:
- An instance of :class:`plotly.graph_objs.indicator.gauge.Axis`
- A dict of string/value properties that will be passed
to the Axis constructor
Supported dict properties:
dtick
Sets the step in-between ticks on this axis.
Use with `tick0`. Must be a positive number, or
special strings available to "log" and "date"
axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick
number. For example, to set a tick mark at 1,
10, 100, 1000, ... set dtick to 1. To set tick
marks at 1, 100, 10000, ... set dtick to 2. To
set tick marks at 1, 5, 25, 125, 625, 3125, ...
set dtick to log_10(5), or 0.69897000433. "log"
has several special values; "L<f>", where `f`
is a positive number, gives ticks linearly
spaced in value (but not position). For example
`tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10
plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is
ignored for "D1" and "D2". If the axis `type`
is "date", then you must convert the time to
milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to
86400000.0. "date" also has special values
"M<n>" gives ticks spaced by a number of
months. `n` must be a positive integer. To set
ticks on the 15th of every third month, set
`tick0` to "2000-01-15" and `dtick` to "M3". To
set ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick
exponents. For example, consider the number
1,000,000,000. If "none", it appears as
1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If
"SI", 1G. If "B", 1B.
minexponent
Hide SI prefix for 10^n if |n| is below this
number. This only has an effect when
`tickformat` is "SI" or "B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks
will be chosen automatically to be less than or
equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
range
Sets the range of this axis.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of
the first tick is shown. If "last", only the
exponent of the last tick is shown. If "none",
no exponents appear.
showticklabels
Determines whether or not the tick labels are
drawn.
showtickprefix
If "all", all tick labels are displayed with a
prefix. If "first", only the first tick is
displayed with a prefix. If "last", only the
last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
tick0
Sets the placement of the first tick on this
axis. Use with `dtick`. If the axis `type` is
"log", then you must take the log of your
starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when
`dtick`=*L<f>* (see `dtick` for more info). If
the axis `type` is "date", it should be a date
string, like date data. If the axis `type` is
"category", it should be a number, using the
scale where each category is assigned a serial
number from zero in the order it appears.
tickangle
Sets the angle of the tick labels with respect
to the horizontal. For example, a `tickangle`
of -90 draws the tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3
formatting mini-languages which are very
similar to those in Python. For numbers, see: h
ttps://github.com/d3/d3-format/tree/v1.4.5#d3-f
ormat. And for dates see:
https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two
items to d3's date formatter: "%h" for half of
the year as a decimal number as well as "%{n}f"
for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display
"09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.indicat
or.gauge.axis.Tickformatstop` instances or
dicts with compatible properties
tickformatstopdefaults
When used in a template (as layout.template.dat
a.indicator.gauge.axis.tickformatstopdefaults),
sets the default property values to use for
elements of
indicator.gauge.axis.tickformatstops
ticklabelstep
Sets the spacing between tick labels as
compared to the spacing between ticks. A value
of 1 (default) means each tick gets a label. A
value of 2 means shows every 2nd label. A
larger value n means only every nth tick is
labeled. `tick0` determines which labels are
shown. Not implemented for axes with `type`
"log" or "multicategory", or when `tickmode` is
"array".
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto",
the number of ticks is set via `nticks`. If
"linear", the placement of the ticks is
determined by a starting position `tick0` and a
tick step `dtick` ("linear" is the default
value if `tick0` and `dtick` are provided). If
"array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`.
("array" is the default value if `tickvals` is
provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If
"", this axis' ticks are not drawn. If
"outside" ("inside"), this axis' are drawn
outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position
via `tickvals`. Only has an effect if
`tickmode` is set to "array". Used with
`tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud
for `ticktext`.
tickvals
Sets the values at which ticks on this axis
appear. Only has an effect if `tickmode` is set
to "array". Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud
for `tickvals`.
tickwidth
Sets the tick width (in px).
visible
A single toggle to hide the axis while
preserving interaction like dragging. Default
is true when a cheater plot is present on the
axis, otherwise false
Returns
-------
plotly.graph_objs.indicator.gauge.Axis
"""
return self["axis"]
@axis.setter
def axis(self, val):
self["axis"] = val
# bar
# ---
@property
def bar(self):
"""
Set the appearance of the gauge's value
The 'bar' property is an instance of Bar
that may be specified as:
- An instance of :class:`plotly.graph_objs.indicator.gauge.Bar`
- A dict of string/value properties that will be passed
to the Bar constructor
Supported dict properties:
color
Sets the background color of the arc.
line
:class:`plotly.graph_objects.indicator.gauge.ba
r.Line` instance or dict with compatible
properties
thickness
Sets the thickness of the bar as a fraction of
the total thickness of the gauge.
Returns
-------
plotly.graph_objs.indicator.gauge.Bar
"""
return self["bar"]
@bar.setter
def bar(self, val):
self["bar"] = val
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the gauge background color.
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the color of the border enclosing the gauge.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# borderwidth
# -----------
@property
def borderwidth(self):
"""
Sets the width (in px) of the border enclosing the gauge.
The 'borderwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["borderwidth"]
@borderwidth.setter
def borderwidth(self, val):
self["borderwidth"] = val
# shape
# -----
@property
def shape(self):
"""
Set the shape of the gauge
The 'shape' property is an enumeration that may be specified as:
- One of the following enumeration values:
['angular', 'bullet']
Returns
-------
Any
"""
return self["shape"]
@shape.setter
def shape(self, val):
self["shape"] = val
# steps
# -----
@property
def steps(self):
"""
The 'steps' property is a tuple of instances of
Step that may be specified as:
- A list or tuple of instances of plotly.graph_objs.indicator.gauge.Step
- A list or tuple of dicts of string/value properties that
will be passed to the Step constructor
Supported dict properties:
color
Sets the background color of the arc.
line
:class:`plotly.graph_objects.indicator.gauge.st
ep.Line` instance or dict with compatible
properties
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
range
Sets the range of this axis.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
thickness
Sets the thickness of the bar as a fraction of
the total thickness of the gauge.
Returns
-------
tuple[plotly.graph_objs.indicator.gauge.Step]
"""
return self["steps"]
@steps.setter
def steps(self, val):
self["steps"] = val
# stepdefaults
# ------------
@property
def stepdefaults(self):
"""
When used in a template (as
layout.template.data.indicator.gauge.stepdefaults), sets the
default property values to use for elements of
indicator.gauge.steps
The 'stepdefaults' property is an instance of Step
that may be specified as:
- An instance of :class:`plotly.graph_objs.indicator.gauge.Step`
- A dict of string/value properties that will be passed
to the Step constructor
Supported dict properties:
Returns
-------
plotly.graph_objs.indicator.gauge.Step
"""
return self["stepdefaults"]
@stepdefaults.setter
def stepdefaults(self, val):
self["stepdefaults"] = val
# threshold
# ---------
@property
def threshold(self):
"""
The 'threshold' property is an instance of Threshold
that may be specified as:
- An instance of :class:`plotly.graph_objs.indicator.gauge.Threshold`
- A dict of string/value properties that will be passed
to the Threshold constructor
Supported dict properties:
line
:class:`plotly.graph_objects.indicator.gauge.th
reshold.Line` instance or dict with compatible
properties
thickness
Sets the thickness of the threshold line as a
fraction of the thickness of the gauge.
value
Sets a treshold value drawn as a line.
Returns
-------
plotly.graph_objs.indicator.gauge.Threshold
"""
return self["threshold"]
@threshold.setter
def threshold(self, val):
self["threshold"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
axis
:class:`plotly.graph_objects.indicator.gauge.Axis`
instance or dict with compatible properties
bar
Set the appearance of the gauge's value
bgcolor
Sets the gauge background color.
bordercolor
Sets the color of the border enclosing the gauge.
borderwidth
Sets the width (in px) of the border enclosing the
gauge.
shape
Set the shape of the gauge
steps
A tuple of
:class:`plotly.graph_objects.indicator.gauge.Step`
instances or dicts with compatible properties
stepdefaults
When used in a template (as
layout.template.data.indicator.gauge.stepdefaults),
sets the default property values to use for elements of
indicator.gauge.steps
threshold
:class:`plotly.graph_objects.indicator.gauge.Threshold`
instance or dict with compatible properties
"""
def __init__(
self,
arg=None,
axis=None,
bar=None,
bgcolor=None,
bordercolor=None,
borderwidth=None,
shape=None,
steps=None,
stepdefaults=None,
threshold=None,
**kwargs,
):
"""
Construct a new Gauge object
The gauge of the Indicator plot.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.indicator.Gauge`
axis
:class:`plotly.graph_objects.indicator.gauge.Axis`
instance or dict with compatible properties
bar
Set the appearance of the gauge's value
bgcolor
Sets the gauge background color.
bordercolor
Sets the color of the border enclosing the gauge.
borderwidth
Sets the width (in px) of the border enclosing the
gauge.
shape
Set the shape of the gauge
steps
A tuple of
:class:`plotly.graph_objects.indicator.gauge.Step`
instances or dicts with compatible properties
stepdefaults
When used in a template (as
layout.template.data.indicator.gauge.stepdefaults),
sets the default property values to use for elements of
indicator.gauge.steps
threshold
:class:`plotly.graph_objects.indicator.gauge.Threshold`
instance or dict with compatible properties
Returns
-------
Gauge
"""
super(Gauge, self).__init__("gauge")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.indicator.Gauge
constructor must be a dict or
an instance of :class:`plotly.graph_objs.indicator.Gauge`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("axis", None)
_v = axis if axis is not None else _v
if _v is not None:
self["axis"] = _v
_v = arg.pop("bar", None)
_v = bar if bar is not None else _v
if _v is not None:
self["bar"] = _v
_v = arg.pop("bgcolor", None)
_v = bgcolor if bgcolor is not None else _v
if _v is not None:
self["bgcolor"] = _v
_v = arg.pop("bordercolor", None)
_v = bordercolor if bordercolor is not None else _v
if _v is not None:
self["bordercolor"] = _v
_v = arg.pop("borderwidth", None)
_v = borderwidth if borderwidth is not None else _v
if _v is not None:
self["borderwidth"] = _v
_v = arg.pop("shape", None)
_v = shape if shape is not None else _v
if _v is not None:
self["shape"] = _v
_v = arg.pop("steps", None)
_v = steps if steps is not None else _v
if _v is not None:
self["steps"] = _v
_v = arg.pop("stepdefaults", None)
_v = stepdefaults if stepdefaults is not None else _v
if _v is not None:
self["stepdefaults"] = _v
_v = arg.pop("threshold", None)
_v = threshold if threshold is not None else _v
if _v is not None:
self["threshold"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
[
"huangchangzhan@hotmail.com"
] |
huangchangzhan@hotmail.com
|
51a0194d0c81e8e32f497aeb6035955bfdb104a8
|
19fd5d0de9be45b3fd0c6c71e64f5b5887d42fa2
|
/mandatory_main.py
|
7d5ea2f2f58f4a130eb4309af3f39b9d01b05c28
|
[] |
no_license
|
Skydt90/Python_mandatory
|
8269ae8823cf8002e548a1a6624e4bc17688cd7b
|
ab800f7c013c83afda0b20f85bc66132a1464093
|
refs/heads/master
| 2020-04-28T00:32:14.056852
| 2019-06-13T15:23:55
| 2019-06-13T15:23:55
| 174,819,180
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 285
|
py
|
import sys
import time
from request_clone_and_pull import cloneAndPullRepos, getGithubCloneUrls
from md_create import createMDFile, pullAndPushToGit
def main():
cloneAndPullRepos(getGithubCloneUrls())
time.sleep(3)
pullAndPushToGit()
if __name__ == "__main__":
main()
|
[
"Christian@camillas-air.home"
] |
Christian@camillas-air.home
|
bbf34e777ab0d9cabee867c88b277fe54d361da6
|
5935e39dedac1479f52a715e5f40ddd5861b7598
|
/Generator/model/field.py
|
755798447a6833f846b0a447d3fc7501565f58bd
|
[] |
no_license
|
vkochano1/SchemaGen
|
a9d5e0e6805e942df1f6544f12214246886e1427
|
c971eb999db788af366677f703056b712976a602
|
refs/heads/master
| 2020-03-31T06:59:18.033077
| 2018-11-15T06:35:44
| 2018-11-15T06:35:44
| 152,002,843
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,496
|
py
|
import namespace
import logging
import utils
import copy
from common import *
class Field(ModelObject):
def __init__(self, name, tag, dataType, namespace, attrs = None, displayName = None):
super(Field, self).__init__(ObjectType.Field, namespace, name)
self.className = 'Field' + self.name
self.displayName = displayName
self.tag = tag
self.dataTypeName = dataType
self.dataType = None
self.attrs = attrs
self.logger.debug('Created field %s::%s ' % (namespace.fullName, self.name))
def __str__(self):
return "{{ Field:'{name}',Tag:'{tag}', Datatype: '{datatype}' }}".format(name=self.fullName, tag = self.tag, datatype = str(self.dataType))
def __repr__(self): return str(self)
def resolveLinks(self):
self.dataType = self.namespace().resolveDataTypeByName(self.dataTypeName)
if self.dataType == None:
self.dataType = self.namespace().resolveDataTypeByName("Lib::" + self.dataTypeName)
if self.dataType == None:
raise Exception('Failed to resolve datatype %s' % str(self.dataTypeName))
if self.attrs != None:
## need to create new data type with field atributes
cloned = copy.copy(self.dataType)
self.dataType.fullName = cloned.namespace().fullName + "::" + cloned.name + "<" + ','.join(self.attrs) + ">"
self.dataType = cloned
self.changePropDataCategory(self.dataType.propDataCategory())
|
[
"vladimir.kochanov.g@gmail.com"
] |
vladimir.kochanov.g@gmail.com
|
9324968def63f1bc2a815d281034d41cb14f2c18
|
823b828854e2a9a8e7585f5ac70ec02b36ddc320
|
/ERP Python/pb-music-library-pa-sample-pasternakewa/file_handling.py
|
82cec7a9b2f5dce10337b957541f62f3418037b4
|
[] |
no_license
|
Stachozaur/ERP
|
f57d4e865d40b97c76b8990569b4212fc3e32005
|
1d4e5340eadd1cfe2b56eeeb8e1750ea2c653b5f
|
refs/heads/master
| 2022-11-06T06:39:44.885635
| 2020-06-22T11:09:46
| 2020-06-22T11:09:46
| 274,113,893
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,006
|
py
|
def import_data(filename='albums_data.txt'):
"""
Import data from a file to a list. Expected returned data format:
["David Bowie", "Low", "1977", "rock", "38:26"],
["Britney Spears", "Baby One More Time", "1999", "pop", "42:20"],
...]
:param str filename: optional, name of the file to be imported
:returns: list of lists representing albums' data
:rtype: list
"""
def export_data(albums, filename='albums_data.txt', mode='a'):
"""
Export data from a list to file. If called with mode 'w' it should overwrite
data in file. If called with mode 'a' it should append data at the end.
:param list albums: albums' data
:param str filename: optional, name of file to export data to
:param str mode: optional, file open mode with the same meaning as\
file open modes used in Python. Possible values: only 'w' or 'a'
:raises ValueError: if mode other than 'w' or 'a' was given. Error message:
'Wrong write mode'
"""
|
[
"noreply@github.com"
] |
Stachozaur.noreply@github.com
|
eb1d2064715d7ec8f340bcb3bbec0f22274bcacf
|
da01dcf75e9674f2123fd1e548f117ab27394f0d
|
/export_spare_parts.py
|
d012fecf9eae598b97dbfd5783d3b0217934e719
|
[] |
no_license
|
lolbefree/export_spare_parts
|
778198b9b72c7d3159dc66ddfd8d08fc48d84235
|
2adfe0fe94b78666a3208a1854fb6c7056edc6e1
|
refs/heads/master
| 2023-03-05T09:21:37.090344
| 2021-02-23T11:44:40
| 2021-02-23T11:44:40
| 341,150,840
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20,856
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from PyQt5 import QtWidgets, uic
import pyodbc
import openpyxl
from openpyxl import Workbook
import sys
from untitled import Ui_Export_spare_parts
import sql_querys
class SpareParts(QtWidgets.QDialog):
wb = Workbook()
server = ''
database = ''
username = ''
password = 'PW'
driver = '{SQL Server}' # Driver you need to connect to the database
port = '1433'
def __init__(self):
self.ui = Ui_Export_spare_parts()
super().__init__()
self.ui.setupUi(self)
self.provider_link_before_delete = ""
self.ui.open_excel_button.clicked.connect(lambda x: self.showDialog())
self.ui.commandLinkButton.clicked.connect(lambda x: self.check_in_base())
self.ui.pushButton.clicked.connect(lambda x: self.foresight())
self.ui.del_from_iprr.clicked.connect(lambda x: self.delete_from_iprr_and_iprh())
self.show() # Show the GUI
self.cnn = pyodbc.connect(
'DRIVER=' + self.driver + ';PORT=port;SERVER=' + self.server + ';PORT=1443;DATABASE=' + self.database + ';UID=' + self.username +
';PWD=' + self.password)
self.group_err = False
self.discount_err = False
self.code_list = list()
self.name_list = list()
self.group_list = list()
self.discount_list = list()
self.enter_price_list = list()
self.retail_list = list()
self.provider_list = list()
self.original_code = list() # Список оригинальных кодов с спец символами
self.original_code_without_symbols = list() # Список оригинальных кодов без пец символами
self.sql_iprr = str()
self.sql_iprh = str()
self.main_dict = dict()
self.cursor = self.cnn.cursor()
self.cnt = 0
self.if_exist_in_base = str()
self.ui.pushButton_2.clicked.connect(lambda x: self.add_to_main_base())
def delete_from_iprr_and_iprh(self):
self.cursor.execute(sql_querys.delelet_from_iprr(self.provider_link_before_delete))
self.cnn.commit()
self.cursor.execute(sql_querys.delelet_from_iprh(self.provider_link_before_delete))
self.cnn.commit()
self.ui.print_res.setText("Данный каталог удален с IPRR")
self.ui.print_res.setStyleSheet("color: blue")
def clear_lists_of_data(self):
self.ui.print_res.setText("")
for data in [self.original_code_without_symbols, self.name_list, self.group_list, self.discount_list,
self.enter_price_list,
self.retail_list, self.provider_list, self.original_code]:
data.clear()
def check_float(self, potential_float):
try:
float(potential_float)
return True
except ValueError:
return False
def create_list(self, later, row_num, name_of_list):
# print(later, row_num, name_of_list)
self.wb = openpyxl.load_workbook(self.filename, data_only=True)
self.ws = self.wb[self.ui.later.text()]
row_max = self.ws.max_row # не забыть отнять 1
if later == row_num:
if name_of_list == "group_" and self.check_float(self.ui.group_.text()):
self.group_list.append(self.ui.group_.text())
self.group_list = self.group_list * (row_max - 1)
self.group_err = False
if name_of_list == "group_" and not self.check_float(self.ui.group_.text()):
self.group_err = True
self.group_list.append(self.ui.group_.text())
self.group_list = self.group_list * (row_max - 1)
if name_of_list == "discount_" and self.check_float(self.ui.discount_.text()):
self.discount_list.append(self.ui.discount_.text())
self.discount_list = self.discount_list * (row_max - 1)
self.discount_err = False
if name_of_list == "discount_" and not self.check_float(self.ui.discount_.text()):
self.discount_err = True
self.discount_list.append(self.ui.discount_.text())
self.discount_list = self.discount_list * (row_max - 1)
if name_of_list == "provider_":
self.provider_list.append(self.ui.provider_.text())
self.provider_list = self.provider_list * (row_max - 1)
# print(f"later = {later}, row_max = {row_num}")
else:
if not self.ui.disccount_check.isChecked():
self.discount_err = False
if not self.ui.group_check.isChecked():
self.group_err = False
while int(row_num) <= row_max and self.ws[f"{later}{row_num}"].value is not None:
# print(self.ws[f"{later}{row_num}"].value)
string = ""
if name_of_list == "code_":
for item in str(self.ws[f"{later}{row_num}"].value):
if item.isalpha() or item.isdigit():
string += item
# print(string)
if int(row_num) <= row_max:
self.original_code.append(self.ws[f"{later}{row_num}"].value)
self.original_code_without_symbols.append(string)
elif name_of_list == "name_":
self.name_list.append(self.ws[f"{later}{row_num}"].value)
elif name_of_list == "group_" and not self.ui.group_check.isChecked():
self.group_list.append(self.ws[f"{later}{row_num}"].value)
elif name_of_list == "enter_price":
self.enter_price_list.append(self.ws[f"{later}{row_num}"].value)
elif name_of_list == "retail_":
self.retail_list.append(self.ws[f"{later}{row_num}"].value)
elif name_of_list == "discount_" and not self.ui.disccount_check.isChecked():
self.discount_list.append(self.ws[f"{later}{row_num}"].value)
elif name_of_list == "provider_" and not self.ui.provider_check.isChecked():
self.provider_list.append(self.ws[f"{later}{row_num}"].value)
row_num = int(row_num) + 1
def showDialog(self):
# self.clear_all_lists()
fname = QtWidgets.QFileDialog.getOpenFileName(self, 'Open file', '*.xlsx')[0]
name_index_ = fname.rfind("/")
self.filename = fname
self.ui.label_7.setText(fname[name_index_ + 1:])
self.main_dict.clear()
def start_main_work(self):
self.create_list(self.ui.code_.text()[:1], self.ui.code_.text()[1:], "code_")
self.create_list(self.ui.name_.text()[:1], self.ui.name_.text()[1:], "name_")
if len(self.ui.group_.text()) > 1 and not self.ui.group_check.isChecked():
self.create_list(self.ui.group_.text()[:1], self.ui.group_.text()[1:], "group_")
else:
self.create_list(self.ui.group_.text(), self.ui.group_.text(), "group_")
if len(self.ui.discount_.text()) > 1 and not self.ui.disccount_check.isChecked():
self.create_list(self.ui.discount_.text()[:1], self.ui.discount_.text()[1:], "discount_")
else:
self.create_list(self.ui.discount_.text(), self.ui.discount_.text(), "discount_")
self.create_list(self.ui.enter_price.text()[:1], self.ui.enter_price.text()[1:], "enter_price")
self.create_list(self.ui.retail_.text()[:1], self.ui.retail_.text()[1:], "retail_")
if len(self.ui.provider_.text()) > 1 and not self.ui.provider_check.isChecked():
self.create_list(self.ui.provider_.text()[:1], self.ui.provider_.text()[1:], "provider_")
else:
self.create_list(self.ui.provider_.text(), self.ui.provider_.text(), "provider_")
def closeEvent(self, event):
self.cnn.close()
def check_in_base(self):
if_exist_ = f"""
if exists
(select top 1 * from iprr where SUPLNO='{self.provider_link_before_delete}')
select 'true'
else
select 'false'
"""
res = self.cursor.execute(if_exist_)
for row in res:
if (row[0]) == "true":
self.if_exist_in_base = "true"
else:
self.if_exist_in_base = "false"
if self.if_exist_in_base == "false":
self.insert_in_database()
else:
self.ui.print_res.setText("Данный каталог уже есть в IPRR")
self.ui.print_res.setStyleSheet("color: red")
def insert_in_database(self):
self.ui.progressBar.setMaximum(len(self.main_dict))
# try:
res = self.cursor.execute(sql_querys.check_supl(self.provider_link_before_delete))
res = list(res)
for row in res:
if (row[0]) == "true":
self.cursor.execute(sql_querys.suplno_config(self.provider_link_before_delete))
self.cnn.commit()
self.sql_iprh = f"""
INSERT INTO iprh (created,CTYPE,PRSETDT,SUPLNO,USRSID,UPDPAC,UPDSAL,CRENEW,CHGLIS,NEWLIS,ONLYBPR,CURRCD,CHALIS,CHELIS,NOTE,BPRLIS,FLANG1)
values (getdate(),'f',convert(date,getdate()),'{self.provider_link_before_delete}','auto',1,0,1,0,0,0,'uah',0,0,'{"OK " + str(len(self.main_dict))}',0,'eng')"""
# print(self.sql_iprh)
self.cursor.execute(self.sql_iprh)
self.sql_iprr_key = f"""declare @key datetime
set @key=(select max(created) from iprh where SUPLNO='{self.provider_link_before_delete}'
group by SUPLNO)
select @key"""
for row in self.cursor.execute(self.sql_iprr_key):
self.key = row[0]
self.cnn.commit()
print("tyt1")
for ITEMNO in self.main_dict:
if "'" in self.main_dict[ITEMNO]["name"]:
self.main_dict[ITEMNO]["name"] = self.main_dict[ITEMNO]["name"].replace("'", "`")
group_id = f"""select CONVERT(varchar,convert(integer,IGROUPID), 100) from igrp
where SUPLNO='{self.main_dict[ITEMNO]["SUPLNO"]}' and igrpid='{self.main_dict[ITEMNO]["IGRPID"]}'"""
print(f"group_id: {group_id}")
print(list(self.cursor.execute(group_id)))
self.sql_iprr = f"""
insert into iprr (CREATED,SUPLNO,ITEMNO,skey,name,SWENAME,IGRPID,DDISCCD,svatcd,BUYPR,SELPR,CURRCD)
values (convert(datetime, '{self.key.strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]}'),'{self.main_dict[ITEMNO]["SUPLNO"]}','{self.main_dict[ITEMNO]["ITEMNO"]}','{self.main_dict[ITEMNO]["skey"]}','{self.main_dict[ITEMNO]["name"]}','{self.main_dict[ITEMNO]["name"]}','{list(self.cursor.execute(group_id))[0][0]}','{self.main_dict[ITEMNO]["DDISCCD"]}','1' ,{self.main_dict[ITEMNO]["BUYPR"]},{self.main_dict[ITEMNO]["SELPR"]},'UAH')"""
# print(self.main_dict[ITEMNO]["skey"])
# print(self.sql_iprr)
self.cursor.execute(self.sql_iprr)
self.cnn.commit()
self.ui.label_itemno.setText(f"Код запчасти : {self.main_dict[ITEMNO]['ITEMNO']}")
self.cnt += 1
self.ui.progressBar.setValue(self.cnt)
self.ui.print_res.setText("Экспорт удачно выполнен!")
self.ui.print_res.setStyleSheet("color: green")
self.main_dict.clear()
else:
self.ui.print_res.setText("Сначала добавьте поставщика!")
self.ui.print_res.setStyleSheet("color: red")
# except Exception as err:
# print("tyt",err)
def foresight_clear(self):
for data in [self.code_list_foresigh, self.name_list_foresigh, self.group_list_foresigh,
self.discount_list_foresigh, self.enter_price_list_foresigh, self.retail_list_foresigh,
self.provider_list_foresigh]:
for item in data:
item.setText("")
def main_procedure(self):
self.start_main_work()
def clear_all_lists(self):
try:
self.provider_link_before_delete = self.provider_list[0]
except IndexError:
self.ui.print_res.setText("Проверьте правильность координат")
self.ui.print_res.setStyleSheet('color: red')
self.original_code_without_symbols.clear()
self.original_code.clear()
self.provider_list.clear()
self.group_list.clear()
self.discount_list.clear()
self.retail_list.clear()
self.enter_price_list.clear()
self.name_list.clear()
def foresight(self):
try:
self.code_list_foresigh = [self.ui.code, self.ui.code_2, self.ui.code_3, self.ui.code_4]
self.name_list_foresigh = [self.ui.name, self.ui.name_2, self.ui.name_3, self.ui.name_4]
self.group_list_foresigh = [self.ui.group, self.ui.group_2, self.ui.group_3, self.ui.group_4]
self.discount_list_foresigh = [self.ui.discount, self.ui.discount_2, self.ui.discount_3,
self.ui.discount_4]
self.enter_price_list_foresigh = [self.ui.enter_price_l, self.ui.enter_price_l_2, self.ui.enter_price_l_3,
self.ui.enter_price_l_4]
self.retail_list_foresigh = [self.ui.retail, self.ui.retail_2, self.ui.retail_3, self.ui.retail_4]
self.provider_list_foresigh = [self.ui.provider_1, self.ui.provider_2, self.ui.provider_3,
self.ui.provider_4, ]
self.foresight_clear()
self.main_procedure()
if len(self.original_code_without_symbols) == len(self.name_list) == len(self.group_list) == len(
self.discount_list) == len(
self.enter_price_list) == len(self.retail_list) == len(self.provider_list):
# print(len(set(self.provider_list)))
if len(set(self.provider_list)) > 1:
self.ui.print_res.setText("Проверьте колонку поставщика, далжно иметь уникальное значение.")
self.ui.print_res.setStyleSheet("color: red")
else:
if len(self.original_code_without_symbols) < 4:
len_foresight_print = len(self.original_code_without_symbols)
else:
len_foresight_print = 4
for item in range(len_foresight_print):
self.code_list_foresigh[item].setText(str(self.original_code_without_symbols[item]))
self.name_list_foresigh[item].setText(str(self.name_list[item]))
self.group_list_foresigh[item].setText(str(self.group_list[item]))
self.discount_list_foresigh[item].setText(str(self.discount_list[item]))
self.enter_price_list_foresigh[item].setText(str(self.enter_price_list[item]))
self.retail_list_foresigh[item].setText(str(self.retail_list[item]))
self.provider_list_foresigh[item].setText(str(self.provider_list[item]))
self.ui.print_res.setStyleSheet('color: green')
if self.group_err:
self.ui.print_res.setText("Группа должна быть целым числом")
self.ui.print_res.setStyleSheet('color: red')
elif self.discount_err:
self.ui.print_res.setText("Скидка должна быть вещественным или целым числом")
self.ui.print_res.setStyleSheet("color: red")
elif self.discount_err and self.group_err:
self.ui.print_res.setText("Скидка и группа должна быть вещественным или целым числом")
self.ui.print_res.setStyleSheet("color: red")
else:
self.ui.print_res.setText("Предосмотр сформирован")
print(self.original_code_without_symbols)
for ITEMNO in self.original_code_without_symbols:
d = {ITEMNO: {"ITEMNO": 0, "SUPLNO": 0, "name": 0, "IGRPID": 0, "BUYPR": 0, "SELPR": 0,
"skey": 0}}
self.main_dict.update(d)
for ITEMNO, SUPLNO, name, IGRPID, DDISCCD, BUYPR, SELPR, orig in zip(
self.original_code_without_symbols,
self.provider_list,
self.name_list,
self.group_list,
self.discount_list,
self.enter_price_list,
self.retail_list,
self.original_code):
if float(self.main_dict[ITEMNO]["SELPR"]) < float(SELPR):
self.main_dict[ITEMNO]["ITEMNO"] = ITEMNO
self.main_dict[ITEMNO]["SUPLNO"] = SUPLNO
self.main_dict[ITEMNO]["name"] = name
self.main_dict[ITEMNO]["IGRPID"] = IGRPID
self.main_dict[ITEMNO]["DDISCCD"] = DDISCCD
self.main_dict[ITEMNO]["BUYPR"] = BUYPR
self.main_dict[ITEMNO]["SELPR"] = SELPR
self.main_dict[ITEMNO]["skey"] = orig
if self.main_dict[ITEMNO]["ITEMNO"] in self.original_code_without_symbols:
ITEMNO_Index = self.original_code_without_symbols.index(
self.main_dict[ITEMNO]["ITEMNO"])
self.main_dict[ITEMNO]["skey"] = self.original_code[ITEMNO_Index]
elif self.main_dict[ITEMNO]["ITEMNO"] not in self.original_code_without_symbols:
self.main_dict[ITEMNO]["skey"] = ITEMNO
self.clear_all_lists()
except KeyError:
self.ui.print_res.setText("Проверте название листа")
self.ui.print_res.setStyleSheet("color: red")
except (AttributeError, openpyxl.utils.exceptions.InvalidFileException):
self.ui.print_res.setText("Выберите excel")
self.ui.print_res.setStyleSheet('color: red')
def add_to_main_base(self):
# print("add to main base")
try:
if self.ui.radioButton_current.isChecked():
self.cursor.execute(sql_querys.main_query(self.provider_link_before_delete))
self.cnn.commit()
self.ui.print_res.setText(f"Обновление каталога {self.provider_link_before_delete} успешен")
self.ui.print_res.setStyleSheet('color: Green')
elif self.ui.radioButton_vw.isChecked():
self.cursor.execute("dbo.vwpriceimport")
self.ui.print_res.setText(f"Обновление каталога VW успешен")
self.ui.print_res.setStyleSheet('color: Green')
elif self.ui.radioButton_dil.isChecked():
self.cursor.execute("dbo.dilpriceimport")
self.ui.print_res.setText(f"Обновление каталога Dil успешен")
self.ui.print_res.setStyleSheet('color: Green')
elif self.ui.radioButton_sk.isChecked():
self.cursor.execute("dbo.skpriceimport")
self.ui.print_res.setText(f"Обновление каталога sk успешен")
self.ui.print_res.setStyleSheet('color: Green')
elif self.ui.radioButton_sk.isChecked():
self.cursor.execute("dbo.nzppriceimport")
self.ui.print_res.setText(f"Обновление каталога NZP успешен")
self.ui.print_res.setStyleSheet('color: Green')
except Exception as err:
self.ui.print_res.setText(f"{err}")
self.ui.print_res.setStyleSheet('color: red')
def main():
app = QtWidgets.QApplication(sys.argv)
SpareParts().exec_()
if __name__ == '__main__':
main()
|
[
"jioji1000@gmail.com"
] |
jioji1000@gmail.com
|
e6177a3a63b8598016491d7e2ef26786706b0d63
|
d443e632c3359c7888f7cccc17d3ada64759f6fd
|
/scripts/test.py
|
692cb0b983618581f62f2b3d6d7d190c700c31ee
|
[] |
no_license
|
andrewhalle/gomeetpeople
|
b2e123ede2170e01e3b133e652ab555617443f14
|
a9c52137f17e3c089ae7e55f86449546fbd613b5
|
refs/heads/master
| 2021-08-07T18:21:15.591804
| 2017-11-08T18:00:15
| 2017-11-08T18:00:15
| 108,918,032
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,510
|
py
|
import os
import sys
from pathlib import Path
# Detect running script from any directory other than /gomeetpeople
if Path(os.getcwd()).parts[-1] != "gomeetpeople":
print("Please run scripts from /gomeetpeople, the top-level directory of this project")
sys.exit()
import app
import unittest
class TestGetUsers(unittest.TestCase):
def log_in(self):
self.app.post("/login", data={"username": "andrew"})
def setUp(self):
app.app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite://"
app.app.testing = True
self.app = app.app.test_client()
app.db.create_all()
app.db.session.add(app.User(username="andrew", latitude=10, longitude=20, active=True))
app.db.session.add(app.User(username="chris", latitude=20, longitude=10, active=True))
app.db.session.add(app.User(username="anna", latitude=100, longitude=100, active=True))
app.db.session.add(app.User(username="michelle", latitude=10.1, longitude=20.1, active=False))
app.db.session.commit()
def test_not_logged_in(self):
rv = self.app.get("/api/")
assert b'error' in rv.data
def test_get_users(self):
self.log_in()
rv = self.app.get("/api/")
print(rv.data)
assert b'andrew' not in rv.data
assert b'chris' in rv.data
assert b'anna' not in rv.data
assert b'michelle' not in rv.data
def tearDown(self):
app.db.session.remove()
app.db.drop_all()
class TestSetLocation(unittest.TestCase):
def log_in(self):
self.app.post("/login", data={"username": "andrew"})
def setUp(self):
app.app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite://"
app.app.testing = True
self.app = app.app.test_client()
app.db.create_all()
app.db.session.add(app.User(username="andrew", latitude=10, longitude=20, active=True))
app.db.session.add(app.User(username="chris", latitude=20, longitude=10, active=True))
app.db.session.add(app.User(username="anna", latitude=100, longitude=100, active=True))
app.db.session.add(app.User(username="michelle", latitude=10.1, longitude=20.1, active=False))
app.db.session.commit()
def test_location_set(self):
# TODO
return
def test_matching(self):
# TODO
return
def tearDown(self):
app.db.session.remove()
app.db.drop_all()
if __name__ == "__main__":
unittest.main()
|
[
"ahalle@berkeley.edu"
] |
ahalle@berkeley.edu
|
b79cb0b787dc77b2ddd4a7a402ddcff61edf845f
|
f154280f1e991a6db1c1ab01a56b70d73ed7b043
|
/PracticaWeb/flylo/migrations/0022_auto_20170427_0628.py
|
fe5f0adf2103a99ed7f5ca6eb868caba8cb26093
|
[] |
no_license
|
pausanchezv/Flylo
|
6b0dfa317102d97a83f2b7518d5cbe2efd5cb897
|
820a66e02c7ea66bbaab9f7a91b5e7f6824674bb
|
refs/heads/master
| 2020-12-10T03:25:12.384765
| 2017-06-27T05:54:54
| 2017-06-27T05:54:54
| 95,522,157
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 598
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-27 06:28
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('flylo', '0021_clientflights_airline'),
]
operations = [
migrations.RemoveField(
model_name='clientflights',
name='airline',
),
migrations.AddField(
model_name='clientflights',
name='seats',
field=models.IntegerField(default=1, verbose_name='Number of seats'),
),
]
|
[
"pausanchez.admifin@gmail.com"
] |
pausanchez.admifin@gmail.com
|
b4e1a3269787ae9b42a3a530aa59faf672684c4b
|
d7c45c2dc0f4c76a49b850582ff4adcd0dae1cab
|
/client-python/pycti/entities/opencti_stix_observable_relation.py
|
a2446ee86629477674ea9b2afea26e45ff94b2d2
|
[
"Apache-2.0"
] |
permissive
|
0xmanhnv/OpenCTI-Platform
|
cc4fbc7bde5de0363f9ffbba177de0f304f223f6
|
19af1b904bd908501d7e6b700fa9b2ac210989ab
|
refs/heads/master
| 2022-07-01T06:03:05.183165
| 2020-05-11T17:18:28
| 2020-05-11T17:18:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,313
|
py
|
# coding: utf-8
import dateutil.parser
import datetime
class StixObservableRelation:
def __init__(self, opencti):
self.opencti = opencti
self.properties = """
id
stix_id_key
entity_type
relationship_type
description
weight
role_played
first_seen
last_seen
created
modified
created_at
updated_at
from {
id
stix_id_key
entity_type
observable_value
}
to {
id
stix_id_key
entity_type
observable_value
}
createdByRef {
node {
id
entity_type
stix_id_key
stix_label
name
alias
description
created
modified
}
relation {
id
}
}
markingDefinitions {
edges {
node {
id
entity_type
stix_id_key
definition_type
definition
level
color
created
modified
}
relation {
id
}
}
}
externalReferences {
edges {
node {
id
entity_type
stix_id_key
source_name
description
url
hash
external_id
created
modified
}
relation {
id
}
}
}
"""
"""
List stix_observable_relation objects
:param fromId: the id of the source entity of the relation
:param toId: the id of the target entity of the relation
:param relationType: the relation type
:param firstSeenStart: the first_seen date start filter
:param firstSeenStop: the first_seen date stop filter
:param lastSeenStart: the last_seen date start filter
:param lastSeenStop: the last_seen date stop filter
:param inferred: includes inferred relations
:param first: return the first n rows from the after ID (or the beginning if not set)
:param after: ID of the first row for pagination
:return List of stix_observable_relation objects
"""
def list(self, **kwargs):
from_id = kwargs.get("fromId", None)
from_types = kwargs.get("fromTypes", None)
to_id = kwargs.get("toId", None)
to_types = kwargs.get("toTypes", None)
relation_type = kwargs.get("relationType", None)
first_seen_start = kwargs.get("firstSeenStart", None)
first_seen_stop = kwargs.get("firstSeenStop", None)
last_seen_start = kwargs.get("lastSeenStart", None)
last_seen_stop = kwargs.get("lastSeenStop", None)
inferred = kwargs.get("inferred", None)
first = kwargs.get("first", 500)
after = kwargs.get("after", None)
order_by = kwargs.get("orderBy", None)
order_mode = kwargs.get("orderMode", None)
get_all = kwargs.get("getAll", False)
force_natural = kwargs.get("forceNatural", False)
if get_all:
first = 500
self.opencti.log(
"info",
"Listing stix_observable_relations with {type: "
+ str(relation_type)
+ ", from_id: "
+ str(from_id)
+ ", to_id: "
+ str(to_id)
+ "}",
)
query = (
"""
query StixObservableRelations($fromId: String, $fromTypes: [String], $toId: String, $toTypes: [String], $relationType: String, $firstSeenStart: DateTime, $firstSeenStop: DateTime, $lastSeenStart: DateTime, $lastSeenStop: DateTime, $inferred: Boolean, $first: Int, $after: ID, $orderBy: StixObservableRelationsOrdering, $orderMode: OrderingMode, $forceNatural: Boolean) {
stixObservableRelations(fromId: $fromId, fromTypes: $fromTypes, toId: $toId, toTypes: $toTypes, relationType: $relationType, firstSeenStart: $firstSeenStart, firstSeenStop: $firstSeenStop, lastSeenStart: $lastSeenStart, lastSeenStop: $lastSeenStop, inferred: $inferred, first: $first, after: $after, orderBy: $orderBy, orderMode: $orderMode, forceNatural: $forceNatural) {
edges {
node {
"""
+ self.properties
+ """
}
}
pageInfo {
startCursor
endCursor
hasNextPage
hasPreviousPage
globalCount
}
}
}
"""
)
result = self.opencti.query(
query,
{
"fromId": from_id,
"fromTypes": from_types,
"toId": to_id,
"toTypes": to_types,
"relationType": relation_type,
"firstSeenStart": first_seen_start,
"firstSeenStop": first_seen_stop,
"lastSeenStart": last_seen_start,
"lastSeenStop": last_seen_stop,
"inferred": inferred,
"first": first,
"after": after,
"orderBy": order_by,
"orderMode": order_mode,
"forceNatural": force_natural,
},
)
return self.opencti.process_multiple(result["data"]["stixObservableRelations"])
"""
Read a stix_observable_relation object
:param id: the id of the stix_observable_relation
:param stix_id_key: the STIX id of the stix_observable_relation
:param fromId: the id of the source entity of the relation
:param toId: the id of the target entity of the relation
:param relationType: the relation type
:param firstSeenStart: the first_seen date start filter
:param firstSeenStop: the first_seen date stop filter
:param lastSeenStart: the last_seen date start filter
:param lastSeenStop: the last_seen date stop filter
:param inferred: includes inferred relations
:return stix_observable_relation object
"""
def read(self, **kwargs):
id = kwargs.get("id", None)
from_id = kwargs.get("fromId", None)
to_id = kwargs.get("toId", None)
relation_type = kwargs.get("relationType", None)
first_seen_start = kwargs.get("firstSeenStart", None)
first_seen_stop = kwargs.get("firstSeenStop", None)
last_seen_start = kwargs.get("lastSeenStart", None)
last_seen_stop = kwargs.get("lastSeenStop", None)
inferred = kwargs.get("inferred", None)
custom_attributes = kwargs.get("customAttributes", None)
if id is not None:
self.opencti.log("info", "Reading stix_observable_relation {" + id + "}.")
query = (
"""
query StixObservableRelation($id: String!) {
stixObservableRelation(id: $id) {
"""
+ (
custom_attributes
if custom_attributes is not None
else self.properties
)
+ """
}
}
"""
)
result = self.opencti.query(query, {"id": id})
return self.opencti.process_multiple_fields(
result["data"]["stixObservableRelation"]
)
else:
result = self.list(
fromId=from_id,
toId=to_id,
relationType=relation_type,
firstSeenStart=first_seen_start,
firstSeenStop=first_seen_stop,
lastSeenStart=last_seen_start,
lastSeenStop=last_seen_stop,
inferred=inferred,
)
if len(result) > 0:
return result[0]
else:
return None
"""
Create a stix_observable_relation object
:param from_id: id of the source entity
:return stix_observable_relation object
"""
def create_raw(self, **kwargs):
from_id = kwargs.get("fromId", None)
from_role = kwargs.get("fromRole", None)
to_id = kwargs.get("toId", None)
to_role = kwargs.get("toRole", None)
relationship_type = kwargs.get("relationship_type", None)
description = kwargs.get("description", None)
role_played = kwargs.get("role_played", None)
first_seen = kwargs.get("first_seen", None)
last_seen = kwargs.get("last_seen", None)
weight = kwargs.get("weight", None)
id = kwargs.get("id", None)
stix_id_key = kwargs.get("stix_id_key", None)
created = kwargs.get("created", None)
modified = kwargs.get("modified", None)
created_by_ref = kwargs.get("createdByRef", None)
marking_definitions = kwargs.get("markingDefinitions", None)
self.opencti.log(
"info",
"Creating stix_observable_relation {"
+ from_role
+ ": "
+ from_id
+ ", "
+ to_role
+ ": "
+ to_id
+ "}.",
)
query = (
"""
mutation StixObservableRelationAdd($input: StixObservableRelationAddInput!) {
stixObservableRelationAdd(input: $input) {
"""
+ self.properties
+ """
}
}
"""
)
result = self.opencti.query(
query,
{
"input": {
"fromId": from_id,
"fromRole": from_role,
"toId": to_id,
"toRole": to_role,
"relationship_type": relationship_type,
"description": description,
"role_played": role_played,
"first_seen": first_seen,
"last_seen": last_seen,
"weight": weight,
"internal_id_key": id,
"stix_id_key": stix_id_key,
"created": created,
"modified": modified,
"createdByRef": created_by_ref,
"markingDefinitions": marking_definitions,
}
},
)
return self.opencti.process_multiple_fields(
result["data"]["stixObservableRelationAdd"]
)
"""
Create a stix_observable_relation object only if it not exists, update it on request
:param name: the name of the stix_observable_relation
:return stix_observable_relation object
"""
def create(self, **kwargs):
from_id = kwargs.get("fromId", None)
from_type = kwargs.get("fromType", None)
to_type = kwargs.get("toType", None)
to_id = kwargs.get("toId", None)
relationship_type = kwargs.get("relationship_type", None)
description = kwargs.get("description", None)
role_played = kwargs.get("role_played", None)
first_seen = kwargs.get("first_seen", None)
last_seen = kwargs.get("last_seen", None)
weight = kwargs.get("weight", None)
id = kwargs.get("id", None)
stix_id_key = kwargs.get("stix_id_key", None)
created = kwargs.get("created", None)
modified = kwargs.get("modified", None)
created_by_ref = kwargs.get("createdByRef", None)
marking_definitions = kwargs.get("markingDefinitions", None)
update = kwargs.get("update", False)
ignore_dates = kwargs.get("ignore_dates", False)
custom_attributes = """
id
entity_type
name
description
weight
first_seen
last_seen
"""
stix_relation_result = None
if stix_id_key is not None:
stix_relation_result = self.read(
id=stix_id_key, customAttributes=custom_attributes
)
if stix_relation_result is None:
if (
ignore_dates is False
and first_seen is not None
and last_seen is not None
):
first_seen = dateutil.parser.parse(first_seen)
first_seen_start = (first_seen + datetime.timedelta(days=-1)).strftime(
"%Y-%m-%dT%H:%M:%S+00:00"
)
first_seen_stop = (first_seen + datetime.timedelta(days=1)).strftime(
"%Y-%m-%dT%H:%M:%S+00:00"
)
last_seen = dateutil.parser.parse(last_seen)
last_seen_start = (last_seen + datetime.timedelta(days=-1)).strftime(
"%Y-%m-%dT%H:%M:%S+00:00"
)
last_seen_stop = (last_seen + datetime.timedelta(days=1)).strftime(
"%Y-%m-%dT%H:%M:%S+00:00"
)
else:
first_seen_start = None
first_seen_stop = None
last_seen_start = None
last_seen_stop = None
stix_relation_result = self.read(
fromId=from_id,
toId=to_id,
relationType=relationship_type,
firstSeenStart=first_seen_start,
firstSeenStop=first_seen_stop,
lastSeenStart=last_seen_start,
lastSeenStop=last_seen_stop,
customAttributes=custom_attributes,
)
if stix_relation_result is not None:
if update:
if description is not None:
self.update_field(
id=stix_relation_result["id"],
key="description",
value=description,
)
stix_relation_result["description"] = description
if weight is not None:
self.update_field(
id=stix_relation_result["id"], key="weight", value=str(weight)
)
stix_relation_result["weight"] = weight
if first_seen is not None:
new_first_seen = dateutil.parser.parse(first_seen)
old_first_seen = dateutil.parser.parse(
stix_relation_result["first_seen"]
)
if new_first_seen < old_first_seen:
self.update_field(
id=stix_relation_result["id"],
key="first_seen",
value=first_seen,
)
stix_relation_result["first_seen"] = first_seen
if last_seen is not None:
new_last_seen = dateutil.parser.parse(last_seen)
old_last_seen = dateutil.parser.parse(
stix_relation_result["last_seen"]
)
if new_last_seen > old_last_seen:
self.update_field(
id=stix_relation_result["id"],
key="last_seen",
value=last_seen,
)
stix_relation_result["last_seen"] = last_seen
return stix_relation_result
else:
roles = self.opencti.resolve_role(relationship_type, from_type, to_type)
if roles is not None:
final_from_id = from_id
final_to_id = to_id
else:
roles = self.opencti.resolve_role(relationship_type, to_type, from_type)
if roles is not None:
final_from_id = to_id
final_to_id = from_id
else:
self.opencti.log(
"error",
"Relation creation failed, cannot resolve roles: {"
+ relationship_type
+ ": "
+ from_type
+ ", "
+ to_type
+ "}",
)
return None
return self.create_raw(
fromId=final_from_id,
fromRole=roles["from_role"],
toId=final_to_id,
toRole=roles["to_role"],
relationship_type=relationship_type,
description=description,
first_seen=first_seen,
last_seen=last_seen,
weight=weight,
role_played=role_played,
id=id,
stix_id_key=stix_id_key,
created=created,
modified=modified,
createdByRef=created_by_ref,
markingDefinitions=marking_definitions,
)
"""
Update a stix_observable_relation object field
:param id: the stix_observable_relation id
:param key: the key of the field
:param value: the value of the field
:return The updated stix_observable_relation object
"""
def update_field(self, **kwargs):
id = kwargs.get("id", None)
key = kwargs.get("key", None)
value = kwargs.get("value", None)
if id is not None and key is not None and value is not None:
self.opencti.log(
"info",
"Updating stix_observable_relation {" + id + "} field {" + key + "}.",
)
query = (
"""
mutation StixObservableRelationEdit($id: ID!, $input: EditInput!) {
stixObservableRelationEdit(id: $id) {
fieldPatch(input: $input) {
"""
+ self.properties
+ """
}
}
}
"""
)
result = self.opencti.query(
query, {"id": id, "input": {"key": key, "value": value}}
)
return self.opencti.process_multiple_fields(
result["data"]["stixObservableRelationEdit"]["fieldPatch"]
)
else:
self.opencti.log("error", "Missing parameters: id and key and value")
return None
|
[
"ngovantu1211@gmail.com"
] |
ngovantu1211@gmail.com
|
8b8dc423f44e89a08daa4f54c382b61ef245fdae
|
d3dd39f878c4dbe38f63c5760fd4dab5f5680ab5
|
/add_factions.py
|
a541cc5be72af6e13f991ea34f39f0d3a526c244
|
[] |
no_license
|
drvarner/infinity
|
2607d58795d395d90b5cb33c2798ef6422346593
|
13ebe05d8811b77e4603aff17e7d233bf09262e6
|
refs/heads/master
| 2021-01-21T18:57:43.670578
| 2017-05-27T23:28:43
| 2017-05-27T23:28:43
| 92,098,710
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 494
|
py
|
from app import db
from app.models import Faction
pano = Faction('PanOceania', 101)
yuji = Faction('Yu Jing', 201)
aria = Faction('Ariadna', 301)
haqq = Faction('Haqqislam', 401)
noma = Faction('Nomads', 501)
comb = Faction('Combined Army', 601)
alep = Faction('Aleph', 701)
toha = Faction('Tohaa', 801)
db.session.add(pano)
db.session.add(yuji)
db.session.add(aria)
db.session.add(haqq)
db.session.add(noma)
db.session.add(comb)
db.session.add(alep)
db.session.add(toha)
db.session.commit()
|
[
"david.r.varner@gmail.com"
] |
david.r.varner@gmail.com
|
f2379650e0dd6343b60ab59650eff837015f9c3e
|
91b80ef798cbcdaab7f6ae0be994f5a3b12f1515
|
/199_2.py
|
7fd6af39a342243b36e1fd985043c0e445de5aba
|
[] |
no_license
|
luckkyzhou/leetcode
|
13377565a1cc2c7861601ca5d55f6b83c63d490e
|
43bcf65d31f1b729ac8ca293635f46ffbe03c80b
|
refs/heads/master
| 2021-06-21T11:26:06.114096
| 2021-03-24T21:06:15
| 2021-03-24T21:06:15
| 205,568,339
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 674
|
py
|
class TreeNode:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
class Solution(object):
def rightSideView(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
res = list()
if not root:
return res
queue = list()
queue.append(root)
while queue:
res.append(queue[-1].val)
for i in range(len(queue)):
tmp = queue.pop(0)
if tmp.left:
queue.append(tmp.left)
if tmp.right:
queue.append(tmp.right)
return res
|
[
"luckkyzhou@gmail.com"
] |
luckkyzhou@gmail.com
|
5155c3a801662511c62cdf2006a8d750af586233
|
b8f51945f532350bb5388490da0366bcecee042f
|
/model/disciplina_ofertada.py
|
a2854f41c8b0f7d1ffd2deef38b9c12f82cb8866
|
[] |
no_license
|
Flaks009/api-cadastros
|
2d0e86c29050b1137361cec35f8390bb8c177021
|
fb1e2a42fdc5769c705a12b03dc31a3aafaaef2b
|
refs/heads/master
| 2020-05-18T08:33:54.267170
| 2019-05-07T18:38:29
| 2019-05-07T18:38:29
| 184,298,198
| 0
| 0
| null | 2019-10-03T01:42:40
| 2019-04-30T16:44:39
|
Python
|
UTF-8
|
Python
| false
| false
| 1,805
|
py
|
#id: inteiro, id_disciplina: inteiro, id_professor: inteiro, ano: inteiro, semestre: inteiro, turma: texto, id_curso: inteiro, data: date
class Disciplina_ofertada():
def __init__(self, id, id_disciplina, id_professor, id_curso, ano, semestre, turma, data):
self.__id = id
self.__id_disciplina = id_disciplina
self.__id_professor = id_professor
self.__id_curso = id_curso
self.__ano = ano
self.__semestre = semestre
self.__turma = turma
self.__data = data
def atualiza(self, id, id_disciplina, id_professor, id_curso, ano, semestre, turma, data):
self.__id = id
self.__id_disciplina = id_disciplina
self.__id_professor = id_professor
self.__id_curso = id_curso
self.__ano = ano
self.__semestre = semestre
self.__turma = turma
self.__data = data
return self
@property
def id(self):
return self.__id
@property
def id_disciplina(self):
return self.__id_disciplina
@id_disciplina.setter
def id_disciplina(self, id_disciplina):
self.__id_disciplina = id_disciplina
@property
def id_professor(self):
return self.__id_professor
@id_professor.setter
def id_professor(self, id_professor):
self.__id_professor = id_professor
@property
def id_curso(self):
return self.__id_curso
@id_curso.setter
def id_curso(self, id_curso):
self.__id_curso = id_curso
@property
def ano(self):
return self.__ano
@property
def semestre(self):
return self.__semestre
@property
def turma(self):
return self.__turma
@property
def data(self):
return self.__data
|
[
"bruno.flaks@aluno.faculdadeimpacta.com.br"
] |
bruno.flaks@aluno.faculdadeimpacta.com.br
|
c003668bad68a6261c5ef756832963ba513c2b68
|
05b859a82f8b634a760c5d3998ba2a0eb3ca08d8
|
/migrations/versions/145df4f73ca2_.py
|
39529c0f61589b3f091d7d7c56f3e129a7398867
|
[
"MIT"
] |
permissive
|
akelshareif/fiscally
|
e69b43ddde830881cbf99eef8cae976508451972
|
ca44ca00537d2b9ef1bca8a3a67b66427394dc72
|
refs/heads/master
| 2022-12-03T02:23:23.869998
| 2020-08-26T19:21:41
| 2020-08-26T19:21:41
| 285,939,808
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 691
|
py
|
"""empty message
Revision ID: 145df4f73ca2
Revises: 22527c758991
Create Date: 2020-08-17 19:12:58.148706
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '145df4f73ca2'
down_revision = '22527c758991'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('savings_goal', sa.Column('previous_amount', sa.Float(precision=2), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('savings_goal', 'previous_amount')
# ### end Alembic commands ###
|
[
"abdelkareemelshareif@Abdelkareems-MacBook-Pro.local"
] |
abdelkareemelshareif@Abdelkareems-MacBook-Pro.local
|
a5a9181fee6a2e332de834a4584fec005a8df231
|
d83c981aab3c299e2a0e5c4329550acbaaa5e031
|
/Week 6/venv/Scripts/pip3-script.py
|
d9351c4280b2b2911207578f6d30495b0f34a140
|
[] |
no_license
|
Adit-COCO-Garg/Computer-Science-Intro-at-RIT
|
1570a24b12962bad50a436f5252b563173271fb7
|
04af43edd559163ac01e20f6b62a3c2711740acd
|
refs/heads/master
| 2020-04-16T22:36:45.146161
| 2020-01-02T07:08:46
| 2020-01-02T07:08:46
| 165,975,850
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 420
|
py
|
#!"Z:\IGMProfile\Desktop\SEM 3\CSCI-141\Week 6\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
|
[
"ag9126@ad.rit.edu"
] |
ag9126@ad.rit.edu
|
2f70eac9c2392ead56e7947b876359c87083acf0
|
fceef7219b16f067054a5d0350f503b48660d54a
|
/smartcity/profile/views.py
|
7f157f413188f59dcb46f8f73e091b810f7a62db
|
[] |
no_license
|
jiashengc/IFB299
|
e67a393df3aa9f2174a6bfcd1eb8183f69d44536
|
6bd7eea88700d9d715f6cec50e940babae5b9ef9
|
refs/heads/master
| 2021-03-24T12:38:14.931668
| 2017-11-01T11:37:39
| 2017-11-01T11:37:39
| 98,381,234
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 541
|
py
|
from django.shortcuts import render
from django.http import HttpResponse
from django.http import HttpResponseForbidden
from django.shortcuts import HttpResponseRedirect
from django.core import serializers
from splash import models
import json
def profile(request):
if not request.user.is_authenticated():
return HttpResponseRedirect('/login')
profile = [1]
profile[0] = request.user.profile
return render(request, 'profiles/profile.html', context={
"profile": serializers.serialize('json', profile),
})
|
[
"n9483985@qut.edu.au"
] |
n9483985@qut.edu.au
|
24f127ca764cdb6bc48af66564549bb9072a956c
|
fba2f0cb205f3456f78e47db5470b5244e5fcfaf
|
/problem1.py
|
174f1ed4e6574b2d9f23edbb3b299c1fecae8f0f
|
[] |
no_license
|
Donkey1996/Perceptron
|
0d74698f818dd3a6b7b9da197a2bdfa3ef4d96de
|
e6b83db7d987938052eb923a7eff1efa29547933
|
refs/heads/master
| 2020-06-24T22:08:19.105738
| 2019-07-27T02:41:18
| 2019-07-27T02:41:18
| 199,105,996
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,593
|
py
|
import pandas as pd
from visualize import visualize_scatter
import sys
class Perceptron:
def __init__(self):
self.x1 = []
self.x2 = []
self.y = []
self.weights = [0, 0, 0]
self.fx = []
def read(self, data):
self.x1 = list(data['x1'])
self.x2 = list(data['x2'])
self.y = list(data['y'])
self.fx = [0]*len(self.x1)
def f_x(self):
w0, w1, w2 = self.weights[0], self.weights[1], self.weights[2]
for i in range(len(self.x1)):
if w0 + w1*self.x1[i] + w2*self.x2[i] > 0:
self.fx[i] = 1
else:
self.fx[i] = -1
return self.fx
def is_convergent(self, fx):
if fx == self.y:
return True
return False
def fit(self, output):
#implement PLA
file = open(output, 'w')
file.write(str(self.weights[2])+","+str(self.weights[0])+","+str(self.weights[1])+"\n")
while not self.is_convergent(self.f_x()):
#update weights using all examples until converged
for i in range(len(self.x1)):
if not self.y[i]*self.fx[i] > 0:
self.weights[0] += self.y[i]
self.weights[1] += self.x1[i]*self.y[i]
self.weights[2] += self.x2[i]*self.y[i]
#if self.is_convergent(self.f_x()):
# break
file.write(str(self.weights[1])+","+str(self.weights[2])+","+str(self.weights[0])+"\n")
def main():
input, output = sys.argv[1], sys.argv[2]
data = pd.read_csv(input, names=['x1', 'x2', 'y'])
p = Perceptron()
p.read(data)
p.fit(output)
#visualize_scatter(data, feat1='x1', feat2='x2', labels='y', weights=[p.weights[1], p.weights[2], p.weights[0]])
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
Donkey1996.noreply@github.com
|
c584c53cbfbda7c86cd1e5675fabd0dd424f48ed
|
f3198eaa900c8d994fdfcc590b29720c790ab262
|
/docker-compose/dags/data_piplines_book/chapter3/daily_scheduled.py
|
519cb1e8aa03a3d53abb958d804bde6229c3be9b
|
[] |
no_license
|
markday1962/airflow
|
dceef2ebf51584bea6a3070e43fdb3233cb82351
|
c20bb8c7cacd1061f6b8e3a335bbfa4cadb0dcd2
|
refs/heads/master
| 2023-01-03T14:53:15.488958
| 2020-10-16T12:39:03
| 2020-10-16T12:39:03
| 289,263,538
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,234
|
py
|
from datetime import datetime
from pathlib import Path
import pandas as pd
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import PythonOperator
dag = DAG(
dag_id="daily_scheduled",
start_date=datetime(year=2020, month=10, day=16),
schedule_interval="@daily",
)
# First fetch and store the events from the API
fetch_events = BashOperator(
task_id="fetch_events",
bash_command=(
"mkdir -p /data/daily && "
"curl -o /data/daily/events.json http://10.39.0.245:5000/events"
),
dag=dag,
)
# Load the events, process, and write results to CSV
def _calculate_stats(input_path, output_path):
"""Calculates event statistics."""
events = pd.read_json(input_path)
stats = events.groupby(["date", "user"]).size().reset_index()
Path(output_path).parent.mkdir(exist_ok=True)
stats.to_csv(output_path, index=False)
# Calculate stats
calculate_stats = PythonOperator(
task_id="calculate_stats",
python_callable=_calculate_stats,
op_kwargs={"input_path": "/data/daily/events.json", "output_path": "/data/daily/stats.csv"},
dag=dag,
)
# Set order of execution
fetch_events >> calculate_stats
|
[
"mark.day@aistemos.com"
] |
mark.day@aistemos.com
|
2abbbf73517dccdb9129cff5d91a75ec2f606013
|
648e0eef462faf933cde77f88869033722967ac3
|
/CodeSignal/stringsRearrangement.py
|
2a7ce6503626f432eb5e0f2a29f298eff32060a4
|
[
"MIT"
] |
permissive
|
andremichalowski/code-challenge
|
3ff8de2381de5e3ee3538a08cafb7172ded66766
|
9ea037cd4e3d6bf319b2ecab5badba94c2329528
|
refs/heads/main
| 2023-01-28T08:55:00.843177
| 2020-12-05T19:43:41
| 2020-12-05T19:43:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,306
|
py
|
# CodeSignal problem: https://app.codesignal.com/arcade/intro/level-7/PTWhv2oWqd6p4AHB9
# This solution on CodeSignal: https://app.codesignal.com/arcade/intro/level-7/PTWhv2oWqd6p4AHB9/solutions?solutionId=cNGxjcQ9Mti5fmTNe
# Worst case time complexity O(n!)
# Space complexity O(n^2)
# Commented solution: stringsRearrangement-commented.py
def stringsRearrangement(inputArray):
for this_word in inputArray:
remaining = inputArray[:]
remaining.remove(this_word)
if test_remaining(this_word, remaining):
return True
return False
def almost_same(this_word, next_word):
different = False
for i in range(len(this_word)):
if this_word[i] != next_word[i]:
if different:
return False
else:
different = True
return different
def test_remaining(this_word, remaining):
if len(remaining) == 1:
return almost_same(this_word, *remaining)
for next_word in remaining:
if almost_same(this_word, next_word):
rest = remaining[:]
rest.remove(next_word)
if test_remaining(next_word, rest):
return True
return False
# LocalWords: stringsRearrangement cNGxjcQ9Mti5fmTNe
# LocalWords: solutionId PTWhv2oWqd6p4AHB9
|
[
"harry@gebel.tech"
] |
harry@gebel.tech
|
14260221a37d4da5624aed84ef13269be8805d26
|
16c2077504a905be3f9db0c6e46d308880926dbb
|
/ChatBotWEB/QQChat/textsimilar/test.py
|
7ec1defdbbe2b574b9970d4485d97e4b2cd5c5b9
|
[] |
no_license
|
Tr0py/QQbot_Kia
|
a9f359d1b97a2b863a7bc9ffc46ea1ac67300b7c
|
bf0c0d807c930fad5f665ad281ad7ef415093515
|
refs/heads/master
| 2020-03-31T11:58:11.185969
| 2018-10-09T07:05:25
| 2018-10-09T07:05:25
| 152,198,136
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 790
|
py
|
#!/usr/bin/env python3
# coding: utf-8
# File: test.py
# Author: lhy<lhy_in_blcu@126.com,https://huangyong.github.io>
# Date: 18-4-27
from sim_cilin import *
from sim_hownet import *
from sim_simhash import *
from sim_tokenvector import *
from sim_vsm import *
def test():
cilin = SimCilin()
hownet = SimHownet()
simhash = SimHaming()
simtoken = SimTokenVec()
simvsm = SimVsm()
while 1:
text1 = input('enter sent1:').strip()
text2 = input('enter sent2:').strip()
print('cilin', cilin.distance(text1, text2))
print('hownet', hownet.distance(text1, text2))
print('simhash', simhash.distance(text1, text2))
print('simtoken', simtoken.distance(text1, text2))
print('simvsm', simvsm.distance(text1, text2))
test()
|
[
"1610839@mail.nankai.edu.cn"
] |
1610839@mail.nankai.edu.cn
|
ab9afd063b8a0d3acec0c3d527357298316bc819
|
1b3700b7ad398eeba342383e8cb977216f69e906
|
/api/v1/utils/decorator.py
|
62e801125083fa69b22404925c4a4580114b9913
|
[
"MIT"
] |
permissive
|
jorgep0496/Backend_FinalProject4Geek
|
cbe024e1ca8e9eef69343083339482abd99e7153
|
d61f4702335d75577a7d0b19151fa6f4028654c5
|
refs/heads/main
| 2023-01-21T01:12:35.852012
| 2020-12-02T05:14:56
| 2020-12-02T05:14:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 901
|
py
|
from functools import wraps
from flask import request
from api.v1.services.auth_service import Auth
def token_required(f):
@wraps(f)
def decorated(*args, **kwargs):
data, status = Auth.get_logged_in_user(request)
token = data.get('data')
if not token:
return data, status
return f(*args, **kwargs)
return decorated
def admin_token_required(f):
@wraps(f)
def decorated(*args, **kwargs):
data, status = Auth.get_logged_in_user(request)
token = data.get('data')
if not token:
return data, status
admin = token.get('admin')
if not admin:
response_object = {
'status': 'fail',
'message': 'Token de administrador requerido'
}
return response_object, 401
return f(*args, **kwargs)
return decorated
|
[
"lilianmonterolopez@gmail.com"
] |
lilianmonterolopez@gmail.com
|
2708743f94efe43f3b567232aa149f30939b96ae
|
b9ff578716936db04baa887c8e0be0dc43fda682
|
/thirdparty/node_and_linklist.py
|
87ddd96e5ea1645e431e7b9b489f714433e5f00b
|
[
"MIT"
] |
permissive
|
csyhping/Advent-of-Code-2019
|
a7791beaf33a5096e8cffedbaad8b788192523fa
|
86ea7fc4340619ddd732c070c4ad518133960fed
|
refs/heads/master
| 2020-09-23T23:25:38.032306
| 2019-12-20T09:38:38
| 2019-12-20T09:38:38
| 225,614,112
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,150
|
py
|
# apply Node annd link list in python
# define Node class
class Node():
"""Create a new node, args1 = data, args2 = next"""
def __init__(self, data = None, next = None):
super(Node, self).__init__()
self.data = data
self.next = next
def __repr__(self):
# print the data of the node
return str(self.data)
# define Link list class:
class SingleLinkList():
"""Create a new node"""
def __init__(self):
super(SingleLinkList, self).__init__()
self.head = None
def __len__(self):
# the length of the list
count = 0
curr = self.head
while curr is not None:
count += 1
curr = curr.next
return count
def insertFront(self, insert_data):
# insert a node at the front of the list
# Return value: the new head node
if insert_data is None:
print('===[NOTE]=== Can not insert [None] data.')
return None
node = Node(insert_data, self.head)
self.head = node
return node
def append(self, append_data):
# append a node at the last of the list
# Return value: the new last node
if append_data is None:
return None
node = Node(append_data)
if self.head is None:
# if the list is empty
self.head = node
return node
curr = self.head
while curr.next is not None:
curr = curr.next
curr.next = node
return node
def is_empty(self):
return self.head == None
def show(self):
print('--[NOTE]--list show start')
curr = self.head
while curr is not None:
print(curr.data)
curr = curr.next
print('--[NOTE]--list show end')
# # test single link list #
# l1 = SingleLinkList()
# l2 = SingleLinkList()
# l3 = SingleLinkList()
# print(len(l1))
# a = l1.insertFront('new')
# b = l1.insertFront('new2')
# print(a, b)
# print(len(l1))
# print(len(l2))
# print(l1.is_empty())
# c = l2.append('last')
# d = l2.append('last2')
# print(c, d)
# print(len(l2))
# for i in range(7):
# l3.append(i)
# print(len(l3))
# # test node class #
# n1 = Node('fyphia')
# print(n1)
# n2 = Node('loves')
# n3 = Node('placido')
# n1.next = n2
# n2.next = n3
# def printNodes(node):
# while node:
# print('current node is ', node)
# node = node.next
# printNodes(n1)
# # test node class #
|
[
"csyhping@connect.hku.hk"
] |
csyhping@connect.hku.hk
|
34d3a738bb8e035c680612508bb860ecf43b4724
|
7a6049d9d99b676bde93fc1564ab736eaa2d80e7
|
/WebServer/public/chatbot/utils/Preprocess.py
|
d4675d9a21781f6024355281c00ce00a8654bbe5
|
[] |
no_license
|
Aromdami/HybridAICharacter
|
1aff73ec7fecab523c14ee1bef9d999a67c4614e
|
91bf4e9824b8fa7c7a9ab5f99d2df19ecf8e49bf
|
refs/heads/main
| 2023-08-23T12:33:22.155801
| 2021-10-13T04:11:38
| 2021-10-13T04:11:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,887
|
py
|
from konlpy.tag import Komoran
import pickle
import jpype
class Preprocess:
def __init__(self, word2index_dic='', userdic=None):
# 단어 인덱스 사전 불러오기
if(word2index_dic != ''):
f = open(word2index_dic, "rb")
self.word_index = pickle.load(f)
f.close()
else:
self.word_index = None
# 형태소 분석기 초기화
self.komoran = Komoran(userdic=userdic)
# 제외할 품사
# 참조 : https://docs.komoran.kr/firststep/postypes.html
# 관계언 제거, 기호 제거
# 어미 제거
# 접미사 제거
self.exclusion_tags = [
'JKS', 'JKC', 'JKG', 'JKO', 'JKB', 'JKV', 'JKQ',
'JX', 'JC',
'SF', 'SP', 'SS', 'SE', 'SO',
'EP', 'EF', 'EC', 'ETN', 'ETM',
'XSN', 'XSV', 'XSA'
]
# 형태소 분석기 POS 태거
def pos(self, sentence):
jpype.attachThreadToJVM()
return self.komoran.pos(sentence)
# 불용어 제거 후, 필요한 품사 정보만 가져오기
def get_keywords(self, pos, without_tag=False):
f = lambda x: x in self.exclusion_tags
word_list = []
for p in pos:
if f(p[1]) is False:
word_list.append(p if without_tag is False else p[0])
return word_list
# 키워드를 단어 인덱스 시퀀스로 변환
def get_wordidx_sequence(self, keywords):
if self.word_index is None:
return []
w2i = []
for word in keywords:
try:
w2i.append(self.word_index[word])
except KeyError:
# 해당 단어가 사전에 없는 경우, OOV 처리
w2i.append(self.word_index['OOV'])
return w2i
|
[
"noreply@github.com"
] |
Aromdami.noreply@github.com
|
12b5d42a0a18f840c1a60bb40b91bd7aa8ce0507
|
6014ae7deb5066555acaa0881fb3d0a5debeefef
|
/week04/Film_News/manage.py
|
769b7945b53fe588a1b9c50b850568b434c9e29e
|
[] |
no_license
|
wdlcoke/Python006-006
|
6e1b9355efd429930b86240b9c838933516237d5
|
4d3be1ef4ca1e136a36d7937ae3c7f354673f5ec
|
refs/heads/main
| 2023-04-02T18:44:01.674768
| 2021-03-21T15:37:47
| 2021-03-21T15:37:47
| 323,491,211
| 0
| 0
| null | 2020-12-22T01:41:49
| 2020-12-22T01:41:48
| null |
UTF-8
|
Python
| false
| false
| 629
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Film_News.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"saibeilingyuanfe@126.com"
] |
saibeilingyuanfe@126.com
|
37f6eea0ea5e08df396251ae139b189c804a84c1
|
989eba1d1a9bb60d21eba86f137171a3c453bee4
|
/vision/camera_tryhard.py
|
cfee5f701602f1309284f98bd95dbe9e94450e5b
|
[] |
no_license
|
herculanodavi/balizabot
|
b698c2aab54ce9504bebc09dc19eae7eb1c96bb5
|
9b606b7fc70575638760d668dfc30a7c1abe613c
|
refs/heads/master
| 2021-03-13T01:25:49.488962
| 2017-07-07T15:51:48
| 2017-07-07T15:51:48
| 91,476,584
| 0
| 1
| null | 2017-06-13T18:48:42
| 2017-05-16T15:50:34
|
Python
|
UTF-8
|
Python
| false
| false
| 1,995
|
py
|
import numpy as np
import cv2
properties=["CV_CAP_PROP_FRAME_WIDTH",# Width of the frames in the video stream.
"CV_CAP_PROP_FRAME_HEIGHT",# Height of the frames in the video stream.
"CV_CAP_PROP_BRIGHTNESS",# Brightness of the image (only for cameras).
"CV_CAP_PROP_CONTRAST",# Contrast of the image (only for cameras).
"CV_CAP_PROP_SATURATION",# Saturation of the image (only for cameras).
"CV_CAP_PROP_GAIN"]
cap = cv2.VideoCapture(1)
for prop in properties:
val=cap.get(eval("cv2.cv."+prop))
print prop+": "+str(val)
gain=0
cap.set(cv2.cv.CV_CAP_PROP_GAIN,gain)
brightness=60
cap.set(cv2.cv.CV_CAP_PROP_BRIGHTNESS,brightness)
contrast=20
cap.set(cv2.cv.CV_CAP_PROP_CONTRAST,contrast)
saturation=20
cap.set(cv2.cv.CV_CAP_PROP_SATURATION,saturation)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
#rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
rgb=frame
# Display the resulting frame
cv2.imshow('frame',rgb)
key=cv2.waitKey(4)
if key == ord('x'):
break
elif key == ord('w'):
brightness+=0.1
cap.set(cv2.cv.CV_CAP_PROP_BRIGHTNESS,brightness)
elif key == ord('s'):
brightness-=0.1
cap.set(cv2.cv.CV_CAP_PROP_BRIGHTNESS,brightness)
elif key == 1048676:
contrast+=0.1
cap.set(cv2.cv.CV_CAP_PROP_CONTRAST,contrast)
elif key == ord('a'):
contrast-=0.1
cap.set(cv2.cv.CV_CAP_PROP_CONTRAST,contrast)
elif key == ord('e'):
saturation+=0.1
cap.set(cv2.cv.CV_CAP_PROP_SATURATION,saturation)
elif key == ord('q'):
saturation-=0.1
cap.set(cv2.cv.CV_CAP_PROP_SATURATION,saturation)
else:
continue
print "\n\n"
for prop in properties:
val=cap.get(eval("cv2.cv."+prop))
print prop+": "+str(val)
# When everything done, release the capture
cap.release()
#cv2.destroyAllWindows()
|
[
"herculanodavi@gmail.com"
] |
herculanodavi@gmail.com
|
226eb526c9383b4780dc18c15674b5da25188b0e
|
17db1d93f22021392b834623390486bb47efa414
|
/meiduo_mall/meiduo_mall/apps/orders/urls.py
|
c02d3b0ab075c9a7d5cb8d280d96c0f1a7cef8b5
|
[] |
no_license
|
lisa530/meiduo
|
634f3bf3b0b7aac590e4ec28d7fcb08aa75d2bb8
|
9081dc0d16090f23c006727934880f0e79d1a7f7
|
refs/heads/master
| 2022-12-18T22:24:30.297694
| 2020-09-20T10:06:57
| 2020-09-20T10:06:57
| 292,215,639
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 647
|
py
|
from django.conf.urls import url
from . import views
urlpatterns = [
# 订单结算
url(r'^orders/settlement/$', views.OrderSettlementView.as_view(),name='settlement'),
# 提交订单
url(r'^orders/commit/$', views.OrderCommitView.as_view()),
# 提交订单成功
url(r'^orders/success/$', views.OrderSuccessView.as_view()),
# 我的订单
url(r'^orders/info/(?P<page_num>\d+)/$', views.UserOrderInfoView.as_view(), name='info'),
# 订单评价
url(r'^orders/comment/$', views.OrderCommentView.as_view()),
# 展示商品评价
url(r'^comments/(?P<sku_id>\d+)/$',views.GoodsCommentView.as_view()),
]
|
[
"lisa_283022177@126.com"
] |
lisa_283022177@126.com
|
fbce6c099d23663062ea7acbaffa0159634404e6
|
a968ccd89787540982de76a5bfc7c7efbf811189
|
/projects/breakout/analog_MCP3008/saatovastus.py
|
8db2a0358082c2557015d408c0d700466e6cdac9
|
[] |
no_license
|
Pohjois-Tapiolan-lukio/raspberry_pi-projects
|
43ce8e0da0e3a96fbb5c7520fc831efcdb4490d3
|
846b2625eb0bf7d87c84288d3ec1c25c146361d3
|
refs/heads/master
| 2021-05-06T11:42:41.269727
| 2019-06-06T05:29:59
| 2019-06-06T05:29:59
| 114,273,386
| 2
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 582
|
py
|
'''
PINnit raspbyn ja dca muuntimen valissa
Raspberry--> MCP3008
Pin1 (3.3V)--> Pin16(VDD)
Pin1 --> Pin15(vref)
Pin6 (GND)--> Pin14(AGND)
Pin23(Sclk)-->Pin13(CLK)
Pin21(MISO)--> Pin12 (DOUT)
Pin19(MOSI)--> Pin11 (DIN)
Pin 24 (CE0) --> Pin10 (CS/SHDN)
Pin6(GND)-->Pin9(DGND)
output valilla 0...1
'''
from gpiozero import MCP3008
import time
saatovastus = MCP3008(channel =0)
print("ctrl +C lopettaa ohjelman")
try:
while True:
lukema = saatovastus.value
print("{:.2f}".format(lukema))
#print(lukema)
time.sleep(1)
except KeyboardInterrupt:
print(" Lopetetaan ohjelma")
|
[
"kahvikannu@gmail.com"
] |
kahvikannu@gmail.com
|
95143bdbaec49649b31fff9740d2c2f3502ea677
|
42fc3542747a8e74e8c0d1daeb087e33ccc2a97e
|
/backend/manage.py
|
317be14109aafa4915c3dcebd55f34d3e02e7493
|
[] |
no_license
|
crowdbotics-apps/nilai-sidang-22115
|
6b2eb34e38d1c1af6fcfbcccbac6f0980f5dc5c7
|
e81adc7606a200d5d298dc41a71bb7e1a95e4cc8
|
refs/heads/master
| 2023-01-09T00:43:22.798201
| 2020-10-30T08:45:35
| 2020-10-30T08:45:35
| 308,571,285
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 638
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "nilai_sidang_22115.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == "__main__":
main()
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
2cd118a99c5e88d03b834efc5e89d827926f740a
|
eb8b5cde971573668800146b3632e43ed6e493d2
|
/python/oneflow/compatible/single_client/nn/modules/sparse.py
|
f1e7ac3286dbd95fb3eef79b3eb8899929a564d2
|
[
"Apache-2.0"
] |
permissive
|
big-data-ai/oneflow
|
16f167f7fb7fca2ce527d6e3383c577a90829e8a
|
b1c67df42fb9c5ab1335008441b0273272d7128d
|
refs/heads/master
| 2023-07-08T21:21:41.136387
| 2021-08-21T11:31:14
| 2021-08-21T11:31:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,346
|
py
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import List, Optional, Tuple
from oneflow.compatible import single_client as flow
from oneflow.compatible.single_client.framework.tensor import Tensor
from oneflow.compatible.single_client.nn.module import Module
class Embedding(Module):
"""A simple lookup table that stores embeddings of a fixed dictionary and size.
This module is often used to store word embeddings and retrieve them using indices.
The input to the module is a list of indices, and the output is the corresponding
word embeddings.
Args:
num_embeddings (int): size of the dictionary of embeddings
embedding_dim (int): the size of each embedding vector
padding_idx (int, optional): If specified, the entries at :attr:`padding_idx` do not contribute to the gradient;
therefore, the embedding vector at :attr:`padding_idx` is not updated during training,
i.e. it remains as a fixed "pad". For a newly constructed Embedding,
the embedding vector at :attr:`padding_idx` will default to all zeros,
but can be updated to another value to be used as the padding vector.
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow.compatible.single_client.experimental as flow
>>> flow.enable_eager_execution()
>>> indices = flow.Tensor([[1, 2, 4, 5], [4, 3, 2, 9]], dtype=flow.int)
>>> m = flow.nn.Embedding(10, 3)
>>> y = m(indices)
"""
def __init__(
self,
num_embeddings: int,
embedding_dim: int,
padding_idx: Optional[int] = None,
max_norm: Optional[float] = None,
norm_type: Optional[float] = None,
scale_grad_by_freq: bool = False,
sparse: bool = False,
_weight: Optional[Tensor] = None,
):
super().__init__()
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
if padding_idx is not None:
if padding_idx > 0:
assert (
padding_idx < self.num_embeddings
), "Padding_idx must be within num_embeddings"
elif padding_idx < 0:
assert (
padding_idx >= -self.num_embeddings
), "Padding_idx must be within num_embeddings"
padding_idx = self.num_embeddings + padding_idx
self.padding_idx = padding_idx
assert max_norm is None, "Not support max_norm yet!"
assert norm_type is None, "Not support norm_type yet!"
assert scale_grad_by_freq is False, "Not support scale_grad_by_freq=True yet!"
assert sparse is False, "Not support sparse=True yet!"
if _weight is None:
self.weight = flow.nn.Parameter(Tensor(num_embeddings, embedding_dim))
self.reset_parameters()
else:
assert list(_weight.shape) == [
num_embeddings,
embedding_dim,
], "Shape of weight does not match num_embeddings and embedding_dim"
self.weight = flow.nn.Parameter(_weight)
self.sparse = sparse
def reset_parameters(self) -> None:
flow.nn.init.normal_(self.weight)
self._fill_padding_idx_with_zero()
def _fill_padding_idx_with_zero(self) -> None:
if self.padding_idx is not None:
with flow.no_grad():
self.weight[self.padding_idx].fill_(0)
def forward(self, indices):
res = flow.F.gather(self.weight, indices, axis=0)
return res
if __name__ == "__main__":
import doctest
doctest.testmod(raise_on_error=True)
|
[
"noreply@github.com"
] |
big-data-ai.noreply@github.com
|
a3a660f9f94b8e2cd9eced37e919540a58f771dc
|
263b1997190f39b4547530ce05e889699a77a922
|
/Problems/Patients/main.py
|
6e0b83d31abbdda1c3e89f82b1a09be7cb067872
|
[] |
no_license
|
IgnatIvanov/To-Do_List_JetBrainsAcademy
|
fa593a29143bf388f085d4ba95713540cd89eeca
|
2bc4ed360c41ece09634e72e705dbc257e686958
|
refs/heads/master
| 2023-03-08T08:25:11.022569
| 2021-02-20T19:28:47
| 2021-02-20T19:28:47
| 339,089,324
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 413
|
py
|
class Patient:
def __init__(self, name, last_name, age):
self.name = name
self.last_name = last_name
self.age = age
# create methods here
def __repr__(self):
return "Object of the class Patient. name: {}, last_name: {}, age: {}".format(self.name, self.last_name, self.age)
def __str__(self):
return "{} {}. {}".format(self.name, self.last_name, self.age)
|
[
"ignativanov1996@mail.ru"
] |
ignativanov1996@mail.ru
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.