blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a76256e5c53a0f726234358d2eeec7cce0cde04f
|
06ab66fe85631fb8e0351245af483b3a8e98295b
|
/src/config/logger.py
|
a708dd302034317cdf2dbf836a63869ed4a63415
|
[] |
no_license
|
SeanCherngTW/toy-real-time-bidding-buyer
|
ed62d8e60f196bff06ad69765f7ae8e711b66ea1
|
82e09598649d2ffd4aecc6356257fa3c5a0504ea
|
refs/heads/main
| 2023-06-12T18:19:07.445796
| 2021-07-05T14:16:40
| 2021-07-05T14:16:40
| 383,154,896
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,239
|
py
|
import os
import logging
from os.path import exists
from logging import handlers
class DebugLog(object):
def __init__(self, ad_path_config):
self.model_name = ad_path_config['model_name']
self.log_file_path = ad_path_config['log_file_path'] + self.model_name + ".log"
self.dst_dir = ad_path_config['dst_dir']
self.prepare_log_path()
self.logger = self.logger_initialize()
self.logger.propagate = False
def prepare_log_path(self):
if not os.path.exists(self.dst_dir):
os.mkdir(self.dst_dir)
def logger_initialize(self):
logger = logging.getLogger(self.model_name)
logger.setLevel(logging.INFO)
formatter = logging.Formatter(
'[%(asctime)s] - [%(name)s] - [%(filename)s] - %(levelname)s - %(message)s'
)
fh = handlers.RotatingFileHandler(
filename=self.log_file_path,
backupCount=1,
encoding="utf-8",
)
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
logger.addHandler(fh)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(formatter)
logger.addHandler(ch)
return logger
|
[
"seancherng.tw@gmail.com"
] |
seancherng.tw@gmail.com
|
829a60803827790a24c17e21c99521fc7746dd54
|
bae7e3b6cdfd6f354b79dbc849c1969a46aed586
|
/hiAPP/plot_linkage_matrix.py
|
62439c042f1b38aa4eb1a36072056960d65d5d01
|
[
"MIT"
] |
permissive
|
jmborr/LDRDSANS
|
7f6b8ef44db3b93972ae9bff08a641067c19bae1
|
b8081ecb78da46a530d61efd3cb6764f3b17b567
|
refs/heads/master
| 2021-07-24T23:49:38.271100
| 2017-11-05T22:36:40
| 2017-11-05T22:36:40
| 71,494,238
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,315
|
py
|
# -*- coding: utf-8 -*-
"""
Matplotlib of the dendogram associated with the linkage matrix.
Thanks to Jorn's Blog
<https://joernhees.de/blog/2015/08/26/scipy-hierarchical-clustering-and-dendrogram-tutorial/>
"""
# needed imports
from matplotlib import pyplot as plt
from scipy.cluster.hierarchy import dendrogram, linkage
import numpy as np
import argparse
import sys
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Plots a dendogram from a scipy.cluster.hierarchy linkage matrix.")
parser.add_argument("linkage", type=str, help="linkage matrix file, output from fpch2scph.py")
parser.add_argument("--p", type=int, default=10, help="show only the last p merged clusters")
args=parser.parse_args()
Z=np.loadtxt(args.linkage)
plt.title('Hierarchical Clustering Dendrogram (truncated)')
plt.xlabel('sample index')
plt.ylabel('RMSD (Angstroms)')
dendrogram(
Z,
truncate_mode='lastp', # show only the last p merged clusters
p=args.p, # show only the last p merged clusters
show_leaf_counts=False, # otherwise numbers in brackets are counts
leaf_rotation=90.,
leaf_font_size=12.,
show_contracted=True, # to get a distribution impression in truncated branches
)
plt.show()
sys.exit(0)
|
[
"borreguero@gmail.com"
] |
borreguero@gmail.com
|
ce67d5e4cbc106774ba02c02cb38b2fa7b165403
|
b01eee55884e21412a1812593996a0d9156e20bc
|
/cipp/x64assembler/instructions/push_reg.py
|
d3c6d03e68af5bf12c7f9965096d230b1733a50b
|
[] |
no_license
|
JacquesLucke/cipp
|
46bdb7eebaeb863f424c92542ea56b49b5f0fe2e
|
d4f38fd1fc84aed9cbf49b85bf6c4b96f2561f71
|
refs/heads/master
| 2021-10-27T18:29:23.288884
| 2019-04-18T15:36:52
| 2019-04-18T15:36:52
| 123,611,764
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 761
|
py
|
from .. bits import Bits
from .. block import Instruction
class PushRegInstr(Instruction):
def __init__(self, reg):
assert reg.size in (16, 64)
self.reg = reg
def toIntelSyntax(self):
return f"push {self.reg.name}"
def toMachineCode(self):
if self.reg.size == 64:
return self.toMachineCode_64()
elif self.reg.size == 16:
return self.toMachineCode_16()
else:
raise Exception()
def toMachineCode_64(self):
prefix = Bits.fromHex("" if self.reg.group == 0 else "41")
opcode = Bits.fromHexAndOffset("50", self.reg.number)
return prefix + opcode
def toMachineCode_16(self):
return Bits.fromHex("66") + self.toMachineCode_64()
|
[
"mail@jlucke.com"
] |
mail@jlucke.com
|
a19341832df5aa7bd0970ac6ef6b9c9a7279c21a
|
73b5d880fa06943c20ff0a9aee9d0c1d1eeebe10
|
/tinyos-1.x/contrib/ucb/apps/LandmarkRouting/lossy.py
|
404b3df55a95a17dbacc58e49ca3b896c54ce7b8
|
[
"Intel"
] |
permissive
|
x3ro/tinyos-legacy
|
101d19f9e639f5a9d59d3edd4ed04b1f53221e63
|
cdc0e7ba1cac505fcace33b974b2e0aca1ccc56a
|
refs/heads/master
| 2021-01-16T19:20:21.744228
| 2015-06-30T20:23:05
| 2015-06-30T20:23:05
| 38,358,728
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 781
|
py
|
from simcore import *
if not sim.__driver.pluginManager.getPlugin("RadioModelGuiPlugin").isRegistered():
print "Please create radio model first using the Radio Model Plugin."
else:
pf = open('packet','w')
space = ' '
end = ' 0.0 0.0\n'
for i in motes:
for j in motes:
s = str(i.getID()) + space + str(j.getID()) + space
if i.getID() == j.getID():
continue
elif i.getID() == 1 or i.getID() == 0:
continue
elif j.getID() == 1 or j.getID() == 0:
continue
elif radio.getLossRate(i.getID(), j.getID()) < 1.0:
s += str(radio.getLossRate(i.getID(),j.getID())) + end
pf.write(s)
pf.flush()
pf.close()
|
[
"lucas@x3ro.de"
] |
lucas@x3ro.de
|
fc49994cbf7356c6fd241ebfa3d48ca03c7d5983
|
f0a5ad7b8aa39f51f233391fead0da3eabecc4ee
|
/.history/toolbox/tradaExtract_20191128085816.py
|
a1f0a049b0449f364b7c3a9c579677dbaf4a3ae4
|
[] |
no_license
|
OseiasBeu/webScrapping
|
e0a524847e55b24dbbd3d57bbe7fa43b4e101f48
|
1e72c7551aea355a891043baecfcbab8a89e719a
|
refs/heads/master
| 2022-10-25T18:12:50.858653
| 2020-06-18T01:29:24
| 2020-06-18T01:29:24
| 224,681,550
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 559
|
py
|
from bs4 import BeautifulSoup
arr = [['#', 'clienteEstado', 'warehouseId', 'Pendentes', 'de', 'integrao'], ['1', 'SP', '2404', '48'], ['2', 'SP', '2462', '10'], ['3', 'SP', '7100', '7'], ['4', 'MG', 'BR19_A002', '6'], ['5', 'SP', 'BR19_A002', '6'], ['6', 'PE', 'BR19_A002', '5'], ['7', 'SP', '2444', '3'], ['8', 'MG', '7100', '2'], ['9', 'RJ', 'BR19_A002', '2'], ['10', 'BA', 'BR19_A002', '2'], ['11', 'MG', '0', '1'], ['12', 'SP', '7134', '1'], ['13', 'SP', '7136', '1'], ['14', 'SP', 'BR1F_A002', '1']]
soup = BeautifulSoup(arr).encode("utf-8")
print(arr)
|
[
"oseiasbeu@outlook.com"
] |
oseiasbeu@outlook.com
|
6171b8e1aaffc27ebb5b2e594409e8ce47552e37
|
ae9d32213e4ab423965e4a7f3ba1e6abfea85817
|
/PreplotCalculator.py
|
93599d5d0fedb8bd01a8babfdb6fcdffc49ae537
|
[] |
no_license
|
syntaxnoob/SpawnerDistance
|
9e8d68123a8eb6835cff33f991b12bb153fb0858
|
a07767d5e9358bb2b1efde171ee4a5c297302933
|
refs/heads/master
| 2022-07-31T08:24:35.172896
| 2020-05-23T16:22:09
| 2020-05-23T16:22:09
| 263,573,361
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,143
|
py
|
#!/bin/python3
import math
import pandas as pd
### Variabel ###
# spawner coordinates (Xcoordinate, Ycoordinate, Zcoordinate)
Spawners = [(370, 28, 886), (365, 37, 945), (359, 39, 917), (381, 42, 917),
(351, 44, 931), (362, 44, 891), (408, 44, 927), (429, 35, 897)]
Bigsum = 0
Distancelist = [] # List with Blockindex and Distances
Blocklist = [] # List with Blockindex and X/Y/Z coordinates
Sumlist = [] # List with Distances
Blockindex = -3 # Blockindex is the index for the searched block
maxdistance = 16 # Max distance from player to spawner
Xcoords = []
Ycoords = []
Zcoords = []
bestlist = [] # List of blockindexes
goedblok = [] # List of bestlist blocks
### Find Search area ###
for d in Spawners:
Xcoords.append(d[0])
Ycoords.append(d[1])
Zcoords.append(d[2])
Xcoords.sort()
Ycoords.sort()
Zcoords.sort()
minX = Xcoords[0]
minY = Ycoords[0]
minZ = Zcoords[0]
maxX = Xcoords[-1]
maxY = Ycoords[-1]
maxZ = Zcoords[-1]
# Could be optimized
### Brute force the shortest distance ###
for i in range(minX, maxX): # Xcoords Loop
Blockindex = Blockindex + 1
for j in range(minY, maxY): # Ycoords Loop
Blockindex = Blockindex + 1
for k in range(minZ, maxZ): # Zcoords Loop
Blockindex = Blockindex + 1
for l in range(0, 7):
# Pythagorean.
distance = math.sqrt(
math.pow((Spawners[l][0] - i), 2) + math.pow((Spawners[l][1] - j), 2) + math.pow((Spawners[l][2] - k), 2))
if (distance > maxdistance):
# Later used to calculate the amount of spawners that will be activated.
Bigsum = 1000000 + Bigsum
else: # Distance is allways positive
Bigsum = distance + Bigsum
Distancelist.append(Blockindex)
Distancelist.append(Bigsum)
Sumlist.append(Bigsum)
Blocklist.append(Blockindex)
Blocklist.append(i)
Blocklist.append(j)
Blocklist.append(k)
Bigsum = 0
Blockindex = Blockindex - 1
Blockindex = Blockindex - 1
Sumlist.sort()
print(Sumlist[0])
ID = (Distancelist.index(Sumlist[0]))
DI = Blocklist.index(ID)
print ("The block that is closest to all spawners is:", Blocklist[DI + 1], ",",
Blocklist[DI + 2], ",", Blocklist[DI + 3], ".", "And you activate:", round((7000000 - Distancelist[ID]) / 1000000), "Spawners.")
for i in range(len(Distancelist)):
if (Distancelist[i] > 1000000):
if (Distancelist[i] < 5000000):
bestlist.append(Distancelist[(i - 1)])
else:
continue
else:
continue
### Bestlist is GOED, niet aankomen ###
for v in range(len(bestlist)):
if(v == (len(bestlist) - 1)):
break
else:
for w in range(len(Blocklist)):
if (bestlist[v] == Blocklist[w]):
goedblok.append(Blocklist[(w + 1):(w + 4)])
break
else:
continue
print("blocks dat 3 spawners activeren: ", len(bestlist))
pd.DataFrame(goedblok).to_csv("3spawner.csv", index=False)
|
[
"unconfigured@null.spigotmc.org"
] |
unconfigured@null.spigotmc.org
|
3b497b13bfb03c08d8605c64566caeff353afe1f
|
a1aadb13c35f2a3fb27078090e5a582a3ea462f1
|
/devel/py-pyobjc-core/patches/patch-setup.py
|
f046aa0efda0c7712c4171148edac369e6c807f7
|
[] |
no_license
|
fidelix-project/pkgsrc
|
702346ca3a74b3dced9de29b07d342154466d1bd
|
8a6673aa3e19b8604d2077015dc4673304399afc
|
refs/heads/master
| 2022-11-06T04:48:33.983672
| 2020-06-28T14:06:28
| 2020-06-28T14:06:28
| 273,759,036
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 742
|
py
|
$NetBSD: patch-setup.py,v 1.1 2019/11/18 17:05:06 adam Exp $
Do not add debug symbols.
Do not override compiler optimiztion flags.
--- setup.py.orig 2019-11-18 16:02:47.000000000 +0000
+++ setup.py
@@ -66,7 +66,6 @@ def get_sdk_level(sdk):
# CFLAGS for the objc._objc extension:
CFLAGS = [
- "-g",
"-fexceptions",
# Loads of warning flags
"-Wall",
@@ -137,7 +136,7 @@ if get_config_var("Py_DEBUG"):
elif isinstance(cfg_vars[k], str) and "-O3" in cfg_vars[k]:
cfg_vars[k] = cfg_vars[k].replace("-O3", "-O1 -g")
-else:
+elif False:
# Enable -O4, which enables link-time optimization with
# clang. This appears to have a positive effect on performance.
cfg_vars = get_config_vars()
|
[
"adam@pkgsrc.org"
] |
adam@pkgsrc.org
|
ca46bb856d561d725345a0a14058c5877a4cac0e
|
9adc810b07f7172a7d0341f0b38088b4f5829cf4
|
/experiments/ashvin/icml2020/d4rl/test1.py
|
99515aca2a2dba3519cd10dc424cb31a7cf4af19
|
[
"MIT"
] |
permissive
|
Asap7772/railrl_evalsawyer
|
7ee9358b5277b9ddf2468f0c6d28beb92a5a0879
|
baba8ce634d32a48c7dfe4dc03b123e18e96e0a3
|
refs/heads/main
| 2023-05-29T10:00:50.126508
| 2021-06-18T03:08:12
| 2021-06-18T03:08:12
| 375,810,557
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,000
|
py
|
"""
AWR + SAC from demo experiment
"""
from rlkit.demos.source.hdf5_path_loader import HDF5PathLoader
from rlkit.launchers.experiments.awac.awac_rl import experiment, process_args
import rlkit.misc.hyperparameter as hyp
from rlkit.launchers.arglauncher import run_variants
from rlkit.torch.sac.policies import GaussianPolicy
if __name__ == "__main__":
variant = dict(
num_epochs=101,
num_eval_steps_per_epoch=1000,
num_trains_per_train_loop=1000,
num_expl_steps_per_train_loop=1000,
min_num_steps_before_training=1000,
max_path_length=1000,
batch_size=1024,
replay_buffer_size=int(2E6),
layer_size=256,
policy_class=GaussianPolicy,
policy_kwargs=dict(
hidden_sizes=[256, 256, 256, 256],
max_log_std=0,
min_log_std=-6,
std_architecture="values",
# num_gaussians=1,
),
qf_kwargs=dict(
hidden_sizes=[256, 256, ],
),
algorithm="SAC",
version="normal",
collection_mode='batch',
trainer_kwargs=dict(
discount=0.99,
soft_target_tau=5e-3,
target_update_period=1,
policy_lr=3E-4,
qf_lr=3E-4,
reward_scale=1,
beta=1,
use_automatic_entropy_tuning=False,
alpha=0,
compute_bc=False,
bc_num_pretrain_steps=0,
q_num_pretrain1_steps=0,
q_num_pretrain2_steps=25000,
policy_weight_decay=1e-4,
q_weight_decay=0,
bc_loss_type="mse",
rl_weight=1.0,
use_awr_update=True,
use_reparam_update=False,
reparam_weight=0.0,
awr_weight=0.0,
bc_weight=1.0,
post_bc_pretrain_hyperparams=dict(
bc_weight=0.0,
compute_bc=False,
),
reward_transform_kwargs=None, # r' = r + 1
terminal_transform_kwargs=None, # t = 0
),
launcher_config=dict(
num_exps_per_instance=1,
region='us-west-2',
),
path_loader_class=HDF5PathLoader,
path_loader_kwargs=dict(),
add_env_demos=False,
add_env_offpolicy_data=False,
# logger_variant=dict(
# tensorboard=True,
# ),
load_demos=False,
load_env_dataset_demos=True,
pretrain_policy=True,
pretrain_rl=True,
# save_pretrained_algorithm=True,
# snapshot_mode="all",
)
search_space = {
# 'env': ["pen-sparse-v0", "door-sparse-v0"],
'env': ["halfcheetah-mixed-v0", "walker2d-mixed-v0", "hopper-mixed-v0", ],
'trainer_kwargs.bc_loss_type': ["mle"],
'trainer_kwargs.awr_loss_type': ["mle"],
'seedid': range(3),
'trainer_kwargs.beta': [0.1, 0.3, 1.0, ],
'trainer_kwargs.reparam_weight': [0.0, ],
'trainer_kwargs.awr_weight': [1.0],
'trainer_kwargs.bc_weight': [1.0, ],
'policy_kwargs.std_architecture': ["values", ],
# 'trainer_kwargs.clip_score': [0.5, ],
# 'trainer_kwargs.compute_bc': [True, ],
'trainer_kwargs.awr_use_mle_for_vf': [True, ],
'trainer_kwargs.awr_sample_actions': [False, ],
'trainer_kwargs.awr_min_q': [True, ],
'trainer_kwargs.q_weight_decay': [0, ],
'trainer_kwargs.reward_transform_kwargs': [None, ],
'trainer_kwargs.terminal_transform_kwargs': [dict(m=0, b=0), ],
# 'qf_kwargs.output_activation': [Clamp(max=0)],
# 'trainer_kwargs.train_bc_on_rl_buffer':[True],
# 'policy_kwargs.num_gaussians': [1, ],
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
variants = []
for variant in sweeper.iterate_hyperparameters():
variants.append(variant)
run_variants(experiment, variants, process_args)
|
[
"alexanderkhazatsky@gmail.com"
] |
alexanderkhazatsky@gmail.com
|
5022b105c714e2dc4421650a004f69e753e7f87b
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_unbarring.py
|
324d198051173b711ebc3f517ecffc2d0ffdcc48
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 224
|
py
|
#calss header
class _UNBARRING():
def __init__(self,):
self.name = "UNBARRING"
self.definitions = unbar
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['unbar']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
2520af0b5128fb372cc2fef73350890249d44869
|
5396a46275e52bfc972f05097e925742d5bbf2d1
|
/_2016/eola/thumbnails.py
|
9bc1e91212c3f71dae4f75496806adaf7814e43c
|
[
"MIT"
] |
permissive
|
3b1b/videos
|
6ab0e4fe0fb07d15b5455f8726131a880437c42c
|
e841b1410fdda2d3bddb7cfa12ce070a3b66a026
|
refs/heads/master
| 2023-08-29T01:37:23.424512
| 2023-08-16T03:35:03
| 2023-08-16T03:35:03
| 325,873,493
| 4,601
| 1,868
| null | 2023-03-30T08:15:37
| 2020-12-31T21:07:33
|
Python
|
UTF-8
|
Python
| false
| false
| 4,029
|
py
|
from manim_imports_ext import *
from _2016.eola.chapter9 import Jennifer, You
class Chapter0(LinearTransformationScene):
CONFIG = {
"include_background_plane" : False,
"t_matrix" : [[3, 1], [2, -1]]
}
def construct(self):
self.setup()
self.plane.fade()
for mob in self.get_mobjects():
mob.set_stroke(width = 6)
self.apply_transposed_matrix(self.t_matrix, run_time = 0)
class Chapter1(Scene):
def construct(self):
arrow = Vector(2*UP+RIGHT)
vs = OldTexText("vs.")
array = Matrix([1, 2])
array.set_color(TEAL)
everyone = VMobject(arrow, vs, array)
everyone.arrange(RIGHT, buff = 0.5)
everyone.set_height(4)
self.add(everyone)
class Chapter2(LinearTransformationScene):
def construct(self):
self.lock_in_faded_grid()
vectors = VMobject(*[
Vector([x, y])
for x in np.arange(-int(FRAME_X_RADIUS)+0.5, int(FRAME_X_RADIUS)+0.5)
for y in np.arange(-int(FRAME_Y_RADIUS)+0.5, int(FRAME_Y_RADIUS)+0.5)
])
vectors.set_submobject_colors_by_gradient(PINK, BLUE_E)
words = OldTexText("Span")
words.scale(3)
words.to_edge(UP)
words.add_background_rectangle()
self.add(vectors, words)
class Chapter3(Chapter0):
CONFIG = {
"t_matrix" : [[3, 0], [2, -1]]
}
class Chapter4p1(Chapter0):
CONFIG = {
"t_matrix" : [[1, 0], [1, 1]]
}
class Chapter4p2(Chapter0):
CONFIG = {
"t_matrix" : [[1, 2], [-1, 1]]
}
class Chapter5(LinearTransformationScene):
def construct(self):
self.plane.fade()
self.add_unit_square()
self.plane.set_stroke(width = 6)
VMobject(self.i_hat, self.j_hat).set_stroke(width = 10)
self.square.set_fill(YELLOW, opacity = 0.7)
self.square.set_stroke(width = 0)
self.apply_transposed_matrix(self.t_matrix, run_time = 0)
class Chapter9(Scene):
def construct(self):
you = You()
jenny = Jennifer()
you.change_mode("erm")
jenny.change_mode("speaking")
you.shift(LEFT)
jenny.shift(2*RIGHT)
vector = Vector([3, 2])
vector.center().shift(2*DOWN)
vector.set_stroke(width = 8)
vector.tip.scale(2)
you.coords = Matrix([3, 2])
jenny.coords = Matrix(["5/3", "1/3"])
for pi in jenny, you:
pi.bubble = pi.get_bubble(SpeechBubble, width = 3, height = 3)
if pi is you:
pi.bubble.shift(MED_SMALL_BUFF*RIGHT)
else:
pi.coords.scale(0.8)
pi.bubble.shift(MED_SMALL_BUFF*LEFT)
pi.bubble.add_content(pi.coords)
pi.add(pi.bubble, pi.coords)
pi.look_at(vector)
self.add(you, jenny, vector)
class Chapter10(LinearTransformationScene):
CONFIG = {
"foreground_plane_kwargs" : {
"x_radius" : FRAME_WIDTH,
"y_radius" : FRAME_HEIGHT,
"secondary_line_ratio" : 1
},
"include_background_plane" : False,
}
def construct(self):
v_tex = "\\vec{\\textbf{v}}"
eq = OldTex("A", v_tex, "=", "\\lambda", v_tex)
eq.set_color_by_tex(v_tex, YELLOW)
eq.set_color_by_tex("\\lambda", MAROON_B)
eq.scale(3)
eq.add_background_rectangle()
eq.shift(2*DOWN)
title = OldTexText(
"Eigen", "vectors \\\\",
"Eigen", "values"
, arg_separator = "")
title.scale(2.5)
title.to_edge(UP)
# title.set_color_by_tex("Eigen", MAROON_B)
title[0].set_color(YELLOW)
title[2].set_color(MAROON_B)
title.add_background_rectangle()
self.add_vector([-1, 1], color = YELLOW, animate = False)
self.apply_transposed_matrix([[3, 0], [1, 2]])
self.plane.fade()
self.remove(self.j_hat)
self.add(eq, title)
|
[
"grant@3blue1brown.com"
] |
grant@3blue1brown.com
|
5cc0139aa5321db4c991af5ca4902a1878f8d7f1
|
ec1deb682fb96a1f937f2fca5f161aa951462876
|
/unittestPython/part_1/name_function.py
|
61209de86dc7aec85c8f1a819784981abebebc0c
|
[] |
no_license
|
AnatoliKosarev/Python-beginner-course--Teclado-
|
31d82f5e9a1f39e2970323bed9de1fd539990565
|
fa91199938d6975b5874341585343566caaf3600
|
refs/heads/main
| 2023-06-30T12:14:33.779827
| 2021-07-24T11:16:19
| 2021-07-24T11:16:19
| 376,371,590
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 214
|
py
|
def get_formatted_name(first, last, middle=""): # middle name is optional
if middle:
full_name = f"{first} {middle} {last}"
else:
full_name = f"{first} {last}"
return full_name.title()
|
[
"anatoli.kosarev@gmail.com"
] |
anatoli.kosarev@gmail.com
|
db09f5e6aeb8defe8a7c9c365689f0ee46b07dc4
|
2fd0c65aa0f72133f773dac5d9a5c48fe9e26fac
|
/Dsz/PyScripts/Lib/dsz/mca/network/cmd/banner/errors.py
|
3dffd24a2b423eab69b50b74ee3889931f22a361
|
[] |
no_license
|
FingerLeakers/DanderSpritz_docs
|
f5d2430e0b86b1b2f0684f02ddd4fa973a5a7364
|
d96b6a71c039b329f9f81544f645857c75360e7f
|
refs/heads/master
| 2021-01-25T13:05:51.732149
| 2018-03-08T01:22:49
| 2018-03-08T01:22:49
| 123,527,268
| 2
| 0
| null | 2018-03-02T03:48:31
| 2018-03-02T03:48:30
| null |
UTF-8
|
Python
| false
| false
| 1,606
|
py
|
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.10 (default, Feb 6 2017, 23:53:20)
# [GCC 4.2.1 Compatible Apple LLVM 8.0.0 (clang-800.0.34)]
# Embedded file name: errors.py
import mcl.status
ERR_SUCCESS = mcl.status.MCL_SUCCESS
ERR_INVALID_PARAM = mcl.status.framework.ERR_START
ERR_CALLBACK_FAILED = mcl.status.framework.ERR_START + 1
ERR_MARSHAL_FAILED = mcl.status.framework.ERR_START + 2
ERR_SOCKET_INIT_FAILURE = mcl.status.framework.ERR_START + 3
ERR_SOCKET_BIND_FAILURE = mcl.status.framework.ERR_START + 4
ERR_SOCKET_OPTION_FAILURE = mcl.status.framework.ERR_START + 5
ERR_CONNECT_FAILURE = mcl.status.framework.ERR_START + 6
ERR_SEND_FAILURE = mcl.status.framework.ERR_START + 7
ERR_PACKET_TOO_LARGE = mcl.status.framework.ERR_START + 8
ERR_RECV_ERROR = mcl.status.framework.ERR_START + 9
ERR_RECV_TIMEOUT = mcl.status.framework.ERR_START + 10
ERR_NOT_IMPLEMENTED = mcl.status.framework.ERR_START + 11
errorStrings = {ERR_INVALID_PARAM: 'Invalid parameter(s)',
ERR_CALLBACK_FAILED: 'Error making callback',
ERR_MARSHAL_FAILED: 'Marshaling data failed',
ERR_SOCKET_INIT_FAILURE: 'Socket initialization failed',
ERR_SOCKET_BIND_FAILURE: 'Failed to bind to given source port',
ERR_SOCKET_OPTION_FAILURE: 'Failed to set socket option',
ERR_CONNECT_FAILURE: 'Connect request failed',
ERR_SEND_FAILURE: 'Send failed',
ERR_PACKET_TOO_LARGE: 'The given packet is too large to send',
ERR_RECV_ERROR: 'Error receiving data',
ERR_RECV_TIMEOUT: 'Timeout waiting for data',
ERR_NOT_IMPLEMENTED: 'Not implemented on this platform'
}
|
[
"francisck@protonmail.ch"
] |
francisck@protonmail.ch
|
447fc54eea01a339401254a7ab9eea6548c5d5d1
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/intentions/PyInvertIfConditionIntentionTest/generalNoElseTry.py
|
8071d065e802d90e83cc718813bbe0e7adcdde7c
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 207
|
py
|
def func():
value = "not-none"
<caret>if value is None:
print("None")
return
try:
return int(value)
except ValueError:
raise RuntimeError("Value is not int")
|
[
"intellij-monorepo-bot-no-reply@jetbrains.com"
] |
intellij-monorepo-bot-no-reply@jetbrains.com
|
971d6c7a8b93db04103d5493b66aab379de626ae
|
2794764ddbe9daf666601014cb84e5ca7b6ca7c3
|
/Account/urls.py
|
d1d10c86cebf2fd2a839bfcf8f84f540ce97c97e
|
[] |
no_license
|
aydanaderi/goldoon
|
5b7341f1b94cb607bcc7b895fe22a6affb817cd7
|
3f4cc6a526eae70f55833d0b07d5209b243aff20
|
refs/heads/main
| 2023-01-19T16:12:22.837854
| 2020-11-26T15:46:24
| 2020-11-26T15:46:24
| 311,077,966
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 638
|
py
|
from django.urls import path
from knox import views as knox_views
from . import views
urlpatterns = [
path('signup/', views.RegisterAPI.as_view(), name = 'register'),
path('login/', views.LoginAPI.as_view(), name = 'login'),
path('logout/', knox_views.LogoutView.as_view(), name = 'logout'),
path('change_password/', views.ChangePasswordView.as_view(), name = 'change-password'),
path('reset/', views.ResetPasswodView, name = 'Reset_Password'),
path('<int:username_id>/reset/confirm/', views.ConfirmResetPasswodView , name = 'confirm_Reset_password'),
path('profile/', views.ProfileView, name = 'profile'),
]
|
[
"ayda.f.naderi@gmail.com"
] |
ayda.f.naderi@gmail.com
|
1c57bba12ea1d28e3d22c8f069be2ea6fb0a8d9d
|
aca4f00c884e1d0e6b2978512e4e08e52eebd6e9
|
/2021/atcoder.jp/abc/196/prob.py
|
561d92c060025a984e9491c8ceafd39586a1b707
|
[] |
no_license
|
jki14/competitive-programming
|
2d28f1ac8c7de62e5e82105ae1eac2b62434e2a4
|
ba80bee7827521520eb16a2d151fc0c3ca1f7454
|
refs/heads/master
| 2023-08-07T19:07:22.894480
| 2023-07-30T12:18:36
| 2023-07-30T12:18:36
| 166,743,930
| 2
| 0
| null | 2021-09-04T09:25:40
| 2019-01-21T03:40:47
|
C++
|
UTF-8
|
Python
| false
| false
| 400
|
py
|
from math import floor
from sys import stderr, stdout
def solution(s):
p = s.find('.')
if p == -1:
p = len(s)
stdout.write('%d\n' % int(s[:p]))
def main():
while True:
try:
s = raw_input().strip()
solution(s)
except EOFError:
break
except ValueError:
continue
if __name__ == '__main__':
main()
|
[
"jki14wz@gmail.com"
] |
jki14wz@gmail.com
|
4e4b7278b5d85aced09f29bfe8d49d79fc5fb567
|
c1ee8f22ece4fc39cb94fe19832fcba8e45cf5bc
|
/프로그래머스/문자열 내 마음대로 정렬하기.py
|
a45fa146443052022e2644fb242635aa218465d9
|
[] |
no_license
|
JeongHanJun/BOJ
|
ae6b1c64c5b3226deef2708ae447aa1225333a92
|
a865624fb0a9291b68f99af8535f708554fa0b41
|
refs/heads/master
| 2023-03-31T02:22:58.974437
| 2021-04-02T02:43:57
| 2021-04-02T02:43:57
| 258,809,902
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 325
|
py
|
# 문자열 내 마음대로 정렬하기
# 제목부터 sorted, key, lambda 가 떠오른다.
def solution(strings, n):
answer = sorted(strings, key = lambda x : (x[n], x))
return answer
s1 = ["sun", "bed", "car"]
n1 = 1
s2 = ["abce", "abcd", "cdx"]
n2 = 2
print(solution(s1, n1))
print(solution(s2, n2))
|
[
"noreply@github.com"
] |
JeongHanJun.noreply@github.com
|
26d13e4f0ecb08e455798eadfe6fa1e6ec855e3a
|
cd847bb6162a44945e7882992be6a8e99cd475b2
|
/venv/bin/venv/bin/wheel
|
a26c6118b433f85ea2d30bd0128814e1bbf1d383
|
[] |
no_license
|
jasvr/wags_to_wings
|
60e04375e3273e9db23f16d7f7d18263e5b14a93
|
d03edcdd0db27efadb5ec7e8321ae30f23f0216a
|
refs/heads/master
| 2020-05-04T23:42:55.924620
| 2019-04-04T22:40:55
| 2019-04-04T22:40:55
| 179,553,036
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 260
|
#!/Users/jasvrgs/wdi/projects/hackathon/venv/bin/venv/bin/python3.7
# -*- coding: utf-8 -*-
import re
import sys
from wheel.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"jas.vrgs@gmail.com"
] |
jas.vrgs@gmail.com
|
|
61800f469bd79f4a99bbc707dbf177d0e80735dd
|
b45b8ad36b3cd9b625a16af014c4dd735602e97f
|
/Python语言程序设计/Week9面向对象/1、面向对象/jam_10_元类.py
|
03723960f8bc1205ce9539185bf1cac1f05827b0
|
[] |
no_license
|
YuanXianguo/Python-MOOC-2019
|
c4935cbbb7e86568aa7a25cb0bd867b3f5779130
|
f644380427b4a6b1959c49f134a1e27db4b72cc9
|
refs/heads/master
| 2020-11-26T18:38:22.530813
| 2019-12-20T02:51:52
| 2019-12-20T02:51:52
| 229,173,967
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 287
|
py
|
def run(self):
print("{}会跑".format(self.name))
# type称为元类,可以创建类,参数:类名,父类(元组),属性(字典)
# 可以在用属性引用一个函数作为方法
Test = type("Test", (object,), {"name": "test", "run": run})
t = Test()
t.run()
|
[
"736913978@qq.com"
] |
736913978@qq.com
|
dedb5daeed1de9d8fb153d68ae4e7352469334d3
|
98c6ea9c884152e8340605a706efefbea6170be5
|
/examples/data/Assignment_1/krmtsi001/question2.py
|
70123bdafa165e39c9cdea9ad141336d1a5e6aa8
|
[] |
no_license
|
MrHamdulay/csc3-capstone
|
479d659e1dcd28040e83ebd9e3374d0ccc0c6817
|
6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2
|
refs/heads/master
| 2021-03-12T21:55:57.781339
| 2014-09-22T02:22:22
| 2014-09-22T02:22:22
| 22,372,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 843
|
py
|
def timer():
hours=eval(input("Enter the hours:\n"))
minutes=eval (input("Enter the minutes:\n"))
seconds=eval(input("Enter the seconds:\n"))
if(0<=hours<=23):
if(0<=minutes<=59):
if(0<=seconds<=59):
print("Your time is valid.")
else:
print("Your time is invalid.")
else:
print("Your time is invalid.")
else:
print("Your time is invalid.")
timer()
|
[
"jarr2000@gmail.com"
] |
jarr2000@gmail.com
|
dbeeef05b86bdf486c9b96b36c84624c17e9f3b0
|
e10a6d844a286db26ef56469e31dc8488a8c6f0e
|
/ipagnn/adapters/common_adapters_test.py
|
1a4022ad703be31751ce21dbd2b8f1c7fd8e4246
|
[
"Apache-2.0",
"CC-BY-4.0"
] |
permissive
|
Jimmy-INL/google-research
|
54ad5551f97977f01297abddbfc8a99a7900b791
|
5573d9c5822f4e866b6692769963ae819cb3f10d
|
refs/heads/master
| 2023-04-07T19:43:54.483068
| 2023-03-24T16:27:28
| 2023-03-24T16:32:17
| 282,682,170
| 1
| 0
|
Apache-2.0
| 2020-07-26T15:50:32
| 2020-07-26T15:50:31
| null |
UTF-8
|
Python
| false
| false
| 1,165
|
py
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Learned Interpreters workflows."""
from absl.testing import absltest
import jax.numpy as jnp
from ipagnn.adapters import common_adapters
class CommonAdaptersTest(absltest.TestCase):
def test_compute_weighted_cross_entropy(self):
logits = jnp.array([
[[.8, .2, -.5],
[.2, .5, -.1]],
[[.1, -.2, .2],
[.4, -.5, .1]],
])
labels = jnp.array([
[0, 1],
[2, 2],
])
common_adapters.compute_weighted_cross_entropy(logits, labels)
if __name__ == '__main__':
absltest.main()
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
11a768e5cb050aff0f5193393950a1a5603947ed
|
53dd5d2cfb79edc87f6c606bbfb7d0bedcf6da61
|
/.history/EMR/EMRzdyj_20190422140749.py
|
7acb80b6448a09bdb1f2a5d6aac099e548cb7519
|
[] |
no_license
|
cyc19950621/python
|
4add54894dc81187211aa8d45e5115903b69a182
|
d184b83e73334a37d413306d3694e14a19580cb0
|
refs/heads/master
| 2020-04-11T20:39:34.641303
| 2019-07-02T12:54:49
| 2019-07-02T12:54:49
| 162,078,640
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,837
|
py
|
#-*- coding: UTF-8 -*-
#本文件用于提取目标目录中的所有txt,并提取关键词所在行到指定目录,并提取关键词新建文件
import time
import math
import os
import sys
import os, os.path,shutil
import codecs
import EMRdef
import re
emrtxts = EMRdef.txttq(u'D:\DeepLearning ER\EHR')#txt目录提取
zljhs = []
for emrtxt in emrtxts:
f = open(emrtxt,'r',errors="ignore")#中文加入errors
emrtxt = os.path.basename(emrtxt)
emrtxt_str = re.findall(r'(^.+?)\_',emrtxt)#提取ID
emrtxt = "".join(emrtxt_str)#转成str
pattern = r',|.|,|。|;|;'#清除标点
#txtp=txtp.decode('utf-8')
temp_line = []
for line in f.readlines():
line = re.sub(' ','',line)#删除空格
if line.find(u'程传输')<=-1:
temp_line.append(line)
else:
break
for line in temp_line:
if line.find (u'诊断依据:',0,6) >-1:
line = re.sub(r'h|H', '小时', line)#小时替换成中文
line = re.sub(r'诊断依据:', '', line)#删除入院诊断字样
line_deldl = re.split(r'。',line)#根据标点分行
line_deld = '\n'.join(line_deldl) #转成str格式
line_out = re.sub(r'\d+、|\d+)、|\d+\)、|\d+\)|\(+\d+\)|①|②|③|④|⑤|⑥|⑦','',line_deld) #删除序号
line_output = re.split('\n',line_out)
line = '\n'.join(line_output)
a = re.sub(r'(.*)'+'“'|'”为主诉'+r'(.*)','',line)
line = ''.join(a)
EMRdef.text_create(r'D:\DeepLearning ER\EHRzdyj','.txt' ,emrtxt,line)#导出带有诊疗计划的文件和诊疗计划
#zljhs.append(emrtxt+':'+line)
#EMRdef.text_save('D:\python\EMR\zljh.txt',zljhs)
|
[
"1044801968@qq.com"
] |
1044801968@qq.com
|
647eb0c247d92d421b317ab1114d9bf82e66f4d5
|
9edaf93c833ba90ae9a903aa3c44c407a7e55198
|
/autosar/models/bsw_internal_behavior_subtypes_enum.py
|
1c215010479de31b0fe70b14ce7782accfe53979
|
[] |
no_license
|
tefra/xsdata-samples
|
c50aab4828b8c7c4448dbdab9c67d1ebc519e292
|
ef027fe02e6a075d8ed676c86a80e9647d944571
|
refs/heads/main
| 2023-08-14T10:31:12.152696
| 2023-07-25T18:01:22
| 2023-07-25T18:01:22
| 222,543,692
| 6
| 1
| null | 2023-06-25T07:21:04
| 2019-11-18T21:00:37
|
Python
|
UTF-8
|
Python
| false
| false
| 171
|
py
|
from enum import Enum
__NAMESPACE__ = "http://autosar.org/schema/r4.0"
class BswInternalBehaviorSubtypesEnum(Enum):
BSW_INTERNAL_BEHAVIOR = "BSW-INTERNAL-BEHAVIOR"
|
[
"tsoulloftas@gmail.com"
] |
tsoulloftas@gmail.com
|
fa441165e9c8186f8a8823b6af81f6ead2fdf63e
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/438/usersdata/309/98435/submittedfiles/pico.py
|
c35ed557c01838f28a74350f9a1bcb6cfd1089fd
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 704
|
py
|
# -*- coding: utf-8 -*-
def pico(lista):
x=[]
n=len(lista)
for i in range (0,n-1,1):
if (lista[i] > lista[i+1]):
x.append(1)
elif (lista[i] < lista[i+1]):
x.append(2)
else :
x.append(0)
k= sorted(x)
if (x==k):
if (0 in x ):
print("N")
elif (1 in x and 2 in x):
print ("S")
else:
print("N")
# PROGRAMA PRINCIPAL
n = int(input('Digite a quantidade de elementos da lista: '))
lista=[]
for i in range (0,n,1):
lista.append(int(input("Digite um elemeto para o seu vetor:" )))
pico(lista)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
888c07976bb9ed42e9facf2f077f76c39b73cdb1
|
5080a829777b85f9f2618b398a8b7a2c34b8b83c
|
/pyvo/__init__.py
|
cc22ced09730551bbe38af2f7b01e2a5e90eb381
|
[] |
no_license
|
kernsuite-debian/pyvo
|
ab037461def921411515f4b690f319976970a7a1
|
ee85c50c5c520ac7bede2d6f18de225c57dedc33
|
refs/heads/master
| 2021-08-07T16:17:11.674702
| 2017-11-08T14:39:19
| 2017-11-08T14:39:19
| 107,262,511
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,639
|
py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
PyVO is a package providing access to remote data and services of the
Virtual observatory (VO) using Python.
The pyvo module currently provides these main capabilities:
* find archives that provide particular data of a particular type and/or
relates to a particular topic
* regsearch()
* search an archive for datasets of a particular type
* imagesearch(), spectrumsearch()
* do simple searches on catalogs or databases
* conesearch(), linesearch(), tablesearch()
* get information about an object via its name
* resolve(), object2pos(), object2sexapos()
Submodules provide additional functions and classes for greater control over
access to these services.
This module also exposes the exception classes raised by the above functions,
of which DALAccessError is the root parent exception.
"""
#this indicates whether or not we are in the pyvo's setup.py
try:
_ASTROPY_SETUP_
except NameError:
from sys import version_info
if version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
builtins._ASTROPY_SETUP_ = False
del version_info
try:
from .version import version as __version__
except ImportError:
__version__ = '0.0.dev'
try:
from .version import githash as __githash__
except ImportError:
__githash__ = ''
def _get_test_runner():
from astropy.tests.helper import TestRunner
return TestRunner(__path__[0])
def test(package=None, test_path=None, args=None, plugins=None,
verbose=False, pastebin=None, remote_data=False, pep8=False,
pdb=False, coverage=False, open_files=False, **kwargs):
"""
Run the tests using py.test. A proper set of arguments is constructed and
passed to `pytest.main`.
Parameters
----------
package : str, optional
The name of a specific package to test, e.g. 'io.fits' or 'utils'.
If nothing is specified all default tests are run.
test_path : str, optional
Specify location to test by path. May be a single file or
directory. Must be specified absolutely or relative to the
calling directory.
args : str, optional
Additional arguments to be passed to `pytest.main` in the `args`
keyword argument.
plugins : list, optional
Plugins to be passed to `pytest.main` in the `plugins` keyword
argument.
verbose : bool, optional
Convenience option to turn on verbose output from py.test. Passing
True is the same as specifying `-v` in `args`.
pastebin : {'failed','all',None}, optional
Convenience option for turning on py.test pastebin output. Set to
'failed' to upload info for failed tests, or 'all' to upload info
for all tests.
remote_data : bool, optional
Controls whether to run tests marked with @remote_data. These
tests use online data and are not run by default. Set to True to
run these tests.
pep8 : bool, optional
Turn on PEP8 checking via the pytest-pep8 plugin and disable normal
tests. Same as specifying `--pep8 -k pep8` in `args`.
pdb : bool, optional
Turn on PDB post-mortem analysis for failing tests. Same as
specifying `--pdb` in `args`.
coverage : bool, optional
Generate a test coverage report. The result will be placed in
the directory htmlcov.
open_files : bool, optional
Fail when any tests leave files open. Off by default, because
this adds extra run time to the test suite. Works only on
platforms with a working `lsof` command.
kwargs
Any additional keywords passed into this function will be passed
on to the astropy test runner. This allows use of test-related
functionality implemented in later versions of astropy without
explicitly updating the package template.
See Also
--------
pytest.main : py.test function wrapped by `run_tests`.
"""
test_runner = _get_test_runner()
return test_runner.run_tests(
package=package, test_path=test_path, args=args,
plugins=plugins, verbose=verbose, pastebin=pastebin,
remote_data=remote_data, pep8=pep8, pdb=pdb,
coverage=coverage, open_files=open_files, **kwargs)
if not _ASTROPY_SETUP_:
import os
from warnings import warn
from astropy import config
# add these here so we only need to cleanup the namespace at the end
config_dir = None
if not os.environ.get('ASTROPY_SKIP_CONFIG_UPDATE', False):
config_dir = os.path.dirname(__file__)
try:
config.configuration.update_default_config(__package__, config_dir)
except config.configuration.ConfigurationDefaultMissingError as e:
wmsg = (e.args[0] + " Cannot install default profile. If you are "
"importing from source, this is expected.")
warn(config.configuration.ConfigurationDefaultMissingWarning(wmsg))
del e
del os, warn, config_dir # clean up namespace
# make sure we have astropy
import astropy.io.votable
from . import registry
from .dal import ssa, sia, sla, scs, tap
from .registry import search as regsearch
from .dal import (
imagesearch, spectrumsearch, conesearch, linesearch, tablesearch,
DALAccessError, DALProtocolError, DALFormatError, DALServiceError,
DALQueryError)
from .nameresolver import *
__all__ = [
"imagesearch", "spectrumsearch", "conesearch", "linesearch", "tablesearch",
"regsearch", "resolve", "object2pos", "object2sexapos" ]
|
[
"gijs@pythonic.nl"
] |
gijs@pythonic.nl
|
ee9fa1df4d941a31ed508d0034c5b7a6d87ed67d
|
c682e03a8394f0b6be4b309789209f7f5a67b878
|
/d12/d12p1.py
|
3a270b901acf7225aa4a4bce0c619c7cf39cf20e
|
[] |
no_license
|
filipmlynarski/Advent-of-Code-2016
|
e84c1d3aa702b5bd387b0aa06ac10a4196574e70
|
b62483971e3e1f79c1e7987374fc9f030f5a0338
|
refs/heads/master
| 2021-08-28T06:43:04.764495
| 2017-12-11T13:00:07
| 2017-12-11T13:00:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,966
|
py
|
puzle = open('puzzle')
puzzle = []
for i in puzle:
puzzle.append(i.split('\n')[0])
def is_int(x):
ints = '1,2,3,4,5,6,7,8,9,0'.split(',')
for j in ints:
if j == x:
return True
a = 0
b = 0
c = 0
d = 0
count = 0
while count < len(puzzle):
i = puzzle[count]
if i.split(' ')[0] == 'cpy':
if i.split(' ')[2] == 'a':
if is_int(i.split(' ')[1][0]):
a = int(i.split(' ')[1])
else:
if i.split(' ')[1] == 'b':
a = b
elif i.split(' ')[1] == 'c':
a = c
else:
a = d
elif i.split(' ')[2] == 'b':
if is_int(i.split(' ')[1][0]):
b = int(i.split(' ')[1])
else:
if i.split(' ')[1] == 'a':
b = a
elif i.split(' ')[1] == 'c':
b = c
else:
b = d
elif i.split(' ')[2] == 'c':
if is_int(i.split(' ')[1][0]):
c = int(i.split(' ')[1])
else:
if i.split(' ')[1] == 'b':
c = b
elif i.split(' ')[1] == 'a':
c = a
else:
c = d
elif i.split(' ')[2] == 'd':
if is_int(i.split(' ')[1][0]):
d = int(i.split(' ')[1])
else:
if i.split(' ')[2] == 'b':
d = b
elif i.split(' ')[2] == 'c':
d = c
else:
d = a
elif i.split(' ')[0] == 'inc':
if i.split(' ')[1] == 'a':
a += 1
elif i.split(' ')[1] == 'b':
b += 1
elif i.split(' ')[1] == 'c':
c += 1
else:
d += 1
elif i.split(' ')[0] == 'dec':
if i.split(' ')[1] == 'a':
a -= 1
elif i.split(' ')[1] == 'b':
b -= 1
elif i.split(' ')[1] == 'c':
c -= 1
else:
d -= 1
elif i.split(' ')[0] == 'jnz':
if (is_int(i.split(' ')[1][0]) and i.split(' ')[1] != '0'):
count += int(i.split(' ')[2]) - 1
elif (i.split(' ')[1] == 'a' and a != 0):
count += int(i.split(' ')[2]) - 1
elif (i.split(' ')[1] == 'b' and b != 0):
count += int(i.split(' ')[2]) - 1
elif (i.split(' ')[1] == 'c' and c != 0):
count += int(i.split(' ')[2]) - 1
elif (i.split(' ')[1] == 'd' and d != 0):
count += int(i.split(' ')[2]) - 1
count += 1
print count
print a
|
[
"fmynarski@gmail.com"
] |
fmynarski@gmail.com
|
52797b9cba609b57070454e6614cc01f745736b8
|
0beb76303c915431ada62f2fbe9cf9f803667f2e
|
/questions/maximum-binary-tree/Solution.py
|
2509fd3a8872330c2c30d32a6b32a3c76748a6e5
|
[
"MIT"
] |
permissive
|
ShaoCorn/leetcode-solutions
|
ad6eaf93eadd9354fd51f5ae93c6b6115174f936
|
07ee14ba3d3ad7a9f5164ec72f253997c6de6fa5
|
refs/heads/master
| 2023-03-19T00:44:33.928623
| 2021-03-13T01:44:55
| 2021-03-13T01:44:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,878
|
py
|
"""
You are given an integer array nums with no duplicates. A maximum binary tree can be built recursively from nums using the following algorithm:
Create a root node whose value is the maximum value in nums.
Recursively build the left subtree on the subarray prefix to the left of the maximum value.
Recursively build the right subtree on the subarray suffix to the right of the maximum value.
Return the maximum binary tree built from nums.
Example 1:
Input: nums = [3,2,1,6,0,5]
Output: [6,3,5,null,2,0,null,null,1]
Explanation: The recursive calls are as follow:
- The largest value in [3,2,1,6,0,5] is 6. Left prefix is [3,2,1] and right suffix is [0,5].
- The largest value in [3,2,1] is 3. Left prefix is [] and right suffix is [2,1].
- Empty array, so no child.
- The largest value in [2,1] is 2. Left prefix is [] and right suffix is [1].
- Empty array, so no child.
- Only one element, so child is a node with value 1.
- The largest value in [0,5] is 5. Left prefix is [0] and right suffix is [].
- Only one element, so child is a node with value 0.
- Empty array, so no child.
Example 2:
Input: nums = [3,2,1]
Output: [3,null,2,null,1]
Constraints:
1 <= nums.length <= 1000
0 <= nums[i] <= 1000
All integers in nums are unique.
"""
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def constructMaximumBinaryTree(self, nums: List[int]) -> TreeNode:
stk = []
for n in nums:
node = TreeNode(n)
while stk and stk[-1].val < n:
node.left = stk.pop()
if stk:
stk[-1].right = node
stk.append(node)
return stk[0]
|
[
"franklingujunchao@gmail.com"
] |
franklingujunchao@gmail.com
|
e407556606fcbe38ecf08e8a07f0d038a65c200f
|
ec53949dafa4b6ad675d679b05ed7c83fef2c69a
|
/DataStructuresAndAlgo/LinkedList/SingleCircular/searchSingleCircular.py
|
689ea541a0a5fb820a3180d868aef7d6eaf128b7
|
[] |
no_license
|
tpotjj/Python
|
9a5a20a53cd7a6ec14386c1db8ce155e0fc9ab8a
|
ca73c116ada4d05c0c565508163557744c86fc76
|
refs/heads/master
| 2023-07-11T16:37:10.039522
| 2021-08-14T11:17:55
| 2021-08-14T11:17:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,431
|
py
|
class Node:
def __init__(self, value=None):
self.value = value
self.next = None
class CircularSingleLinkedList:
def __init__(self):
self.head = None
self.tail = None
def __iter__(self):
node = self.head
while node:
yield node
node = node.next
if node == self.tail.next:
break
def createCSLL(self, nodeValue):
node = Node(nodeValue)
node.next = node
self.head = node
self.tail = node
return "CSLL is created"
def insertCSLL(self, value, location):
if self.head is None:
return "The linkedlist does not exist"
else:
newNode = Node(value)
if location == 0:
newNode.next = self.head
self.head = newNode
self.tail.next = newNode
elif location == 1:
newNode.next = self.tail.next
self.tail.next = newNode
self.tail = newNode
else:
tempNode = self.head
index = 0
while index < location -1:
tempNode = tempNode.next
index += 1
nextNode = tempNode.next
tempNode.next = newNode
newNode.next = nextNode
return "Insertion completed"
def traverseCSLL(self):
if self.head is None:
return "The linked list does not contain any node"
else:
tempNode = self.head
while tempNode:
print(tempNode.value)
tempNode = tempNode.next
if tempNode == self.tail.next:
break
def searchCSLL(self, nodeValue):
if self.head is None:
return "The linked list does not contain any node"
else:
tempNode = self.head
while tempNode:
if tempNode.value == nodeValue:
return tempNode.value
tempNode = tempNode.next
if tempNode == self.tail.next:
return "The node does noet exist in this CSLL"
csll = CircularSingleLinkedList()
csll.createCSLL(1)
csll.insertCSLL(0, 0)
csll.insertCSLL(2, 1)
csll.insertCSLL(3, 2)
csll.traverseCSLL()
print(csll.searchCSLL(4))
print([node.value for node in csll])
|
[
"joris97jansen@gmail.com"
] |
joris97jansen@gmail.com
|
6e53b1e376aede8e6976aa0651c0f0be160d2b0d
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02675/s931606976.py
|
56113375870acd3ee74e593566c92faeeefac768
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 169
|
py
|
n = list(str(input()))
if n[-1] =='3':
print('bon')
exit()
if n[-1] =='0'or n[-1] =='1' or n[-1] =='6' or n[-1] == '8':
print('pon')
exit()
print('hon')
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
892b5b9906e360a8169a5689ac2cb443c84abeef
|
4a0537a45c8aa1420d4686f7882ee741f32bbbf0
|
/servee_document/__init__.py
|
66abf3ed228dfb4724787c3a31194bc2dcd7e5f1
|
[
"BSD-3-Clause"
] |
permissive
|
servee/django-servee-document
|
b982204bc4d46d1f937da6ff47ff7b17b354f2b5
|
99d1a3970dbcb38d1b84ed6687bb709e89cc6a86
|
refs/heads/master
| 2021-01-19T10:29:04.427783
| 2017-01-18T22:16:43
| 2017-01-18T22:16:43
| 1,505,296
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 405
|
py
|
VERSION = (0, 1, 0, "a", 1) # following PEP 386
DEV_N = 1
def get_version():
version = "%s.%s" % (VERSION[0], VERSION[1])
if VERSION[2]:
version = "%s.%s" % (version, VERSION[2])
if VERSION[3] != "f":
version = "%s%s%s" % (version, VERSION[3], VERSION[4])
if DEV_N:
version = "%s.dev%s" % (version, DEV_N)
return version
__version__ = get_version()
|
[
"issac.kelly@gmail.com"
] |
issac.kelly@gmail.com
|
298a3fca6d8a1714293ac0664d61974996d18ffd
|
ed269e9a4d9d6bfbb833381b7aef65a23f391fe2
|
/比赛/1685. 有序数组中差绝对值之和.py
|
b9089483502c2c3b47da1d63fd0a60dc01a825b3
|
[] |
no_license
|
Comyn-Echo/leeCode
|
fcff0d4c4c10209a47bd7c3204e3f64565674c91
|
67e9daecb7ffd8f7bcb2f120ad892498b1219327
|
refs/heads/master
| 2023-04-28T17:35:52.963069
| 2021-05-19T01:52:16
| 2021-05-19T01:52:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 832
|
py
|
import math
class Solution(object):
def getSumAbsoluteDifferences(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
length = len(nums)
cur = 0
preSum = [0 for i in range(length+1)]
for i in range(length):
cur += nums[i]
preSum[i+1] = cur
# print(preSum)
ans = []
for index, i in enumerate(nums):
leftSum = preSum[index]
left = index
right = length - left -1
rightSum = preSum[length] - preSum[index+1]
# print(leftSum, rightSum)
now = math.fabs(i* left - leftSum) + math.fabs(i* right - rightSum)
print(now)
ans.append(int(now))
return ans
Solution.getSumAbsoluteDifferences(None,[2,3,5])
|
[
"2892211452aa@gmail.com"
] |
2892211452aa@gmail.com
|
419e0aa50ad504f2287e3a41edc23acac17a9c8f
|
bb613b9eb6f5279b25908515d1e17d4dff68186b
|
/tests/localization_tests/test_ko.py
|
207d9029c71b2219779ec2a0a2e139cdac9767c3
|
[
"MIT"
] |
permissive
|
mayfield/pendulum
|
eb0b9c66f89a5d164446e728b8f8bc8e8d7f47d9
|
bd7e9531bda35c45ddf794138c9967d9454209d4
|
refs/heads/master
| 2021-01-17T08:30:18.122524
| 2016-08-24T22:29:50
| 2016-08-24T22:29:50
| 66,504,189
| 0
| 0
| null | 2016-08-24T22:24:29
| 2016-08-24T22:24:28
| null |
UTF-8
|
Python
| false
| false
| 2,363
|
py
|
# -*- coding: utf-8 -*-
from pendulum import Pendulum
from .. import AbstractTestCase
from . import AbstractLocalizationTestCase
class KoTest(AbstractLocalizationTestCase, AbstractTestCase):
locale = 'ko'
def diff_for_humans(self):
with self.wrap_with_test_now():
d = Pendulum.now().subtract(seconds=1)
self.assertEqual('1 초 전', d.diff_for_humans())
d = Pendulum.now().subtract(seconds=2)
self.assertEqual('2 초 전', d.diff_for_humans())
d = Pendulum.now().subtract(minutes=1)
self.assertEqual('1 분 전', d.diff_for_humans())
d = Pendulum.now().subtract(minutes=2)
self.assertEqual('2 분 전', d.diff_for_humans())
d = Pendulum.now().subtract(hours=1)
self.assertEqual('1 시간 전', d.diff_for_humans())
d = Pendulum.now().subtract(hours=2)
self.assertEqual('2 시간 전', d.diff_for_humans())
d = Pendulum.now().subtract(days=1)
self.assertEqual('1 일 전', d.diff_for_humans())
d = Pendulum.now().subtract(days=2)
self.assertEqual('2 일 전', d.diff_for_humans())
d = Pendulum.now().subtract(weeks=1)
self.assertEqual('1 주일 전', d.diff_for_humans())
d = Pendulum.now().subtract(weeks=2)
self.assertEqual('2 주일 전', d.diff_for_humans())
d = Pendulum.now().subtract(months=1)
self.assertEqual('1 개월 전', d.diff_for_humans())
d = Pendulum.now().subtract(months=2)
self.assertEqual('2 개월 전', d.diff_for_humans())
d = Pendulum.now().subtract(years=1)
self.assertEqual('1 년 전', d.diff_for_humans())
d = Pendulum.now().subtract(years=2)
self.assertEqual('2 년 전', d.diff_for_humans())
d = Pendulum.now().add(seconds=1)
self.assertEqual('1 초 후', d.diff_for_humans())
d = Pendulum.now().add(seconds=1)
d2 = Pendulum.now()
self.assertEqual('1 초 뒤', d.diff_for_humans(d2))
self.assertEqual('1 초 앞', d2.diff_for_humans(d))
self.assertEqual('1 초', d.diff_for_humans(d2, True))
self.assertEqual('2 초', d2.diff_for_humans(d.add(seconds=1), True))
|
[
"sebastien.eustace@gmail.com"
] |
sebastien.eustace@gmail.com
|
7414fd98d42d6e768a6f1ee6d810c5b17e116d26
|
fce5eda4745578557f7120104188c2437529b98f
|
/loops/while/programa_enquete.py
|
2b558d6a9ae86ed8bf432898056552414884c0b8
|
[] |
no_license
|
weguri/python
|
70e61584e8072125a4b4c57e73284ee4eb10f33b
|
d5195f82428104d85b0e6215b75e31ee260e5370
|
refs/heads/master
| 2022-12-01T08:26:36.248787
| 2020-08-23T03:30:46
| 2020-08-23T03:30:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 739
|
py
|
respostas = dict()
# Define uma flag para indicar que a enquete está ativa
votacao_ativa = True
while votacao_ativa:
# Pede o nome da pessoa e a resposta
nome = input("\nQual é o seu nome? ")
resposta = input("Qual é o seu animal favorito? ")
if resposta == "":
continue
# Armazena a resposta no dicionario
respostas[nome] = resposta
# Perguntar se deseja continuar ou não
repetir = input("Deseja incluir mais algum animal?(S/N) ")
if repetir.lower() == 'n':
votacao_ativa = False
# A enquete foi concluída. Mostra os resultados
print('\n', '-' * 12, 'Resultado', '-' * 12)
for nome, resposta in respostas.items():
print("%s é o animal favorito do %s" % (resposta, nome))
|
[
"welguri@gmail.com"
] |
welguri@gmail.com
|
08ff099c5ed7b9163cedf928c7367bd903d5c48c
|
ea1af1a564f96fb36974aa094192877598b0c6bf
|
/Chapter7/Samples/even_or_odd.py
|
b9586fb2f08798bd5b1d1f990db9f1d19861d275
|
[] |
no_license
|
GSantos23/Crash_Course
|
63eecd13a60141e520b5ca4351341c21c4782801
|
4a5fc0cb9ce987948a728d43c4f266d34ba49a87
|
refs/heads/master
| 2020-03-20T23:20:43.201255
| 2018-08-21T01:13:06
| 2018-08-21T01:13:06
| 137,841,877
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 244
|
py
|
# Sample 7.4
number = input("Enter a number, and I'll tell you if it's even or odd: ")
number = int(number)
if number % 2 == 0:
print("\nThe number " + str(number) + " is even.")
else:
print("\nThe number " + str(number) + " is odd.")
|
[
"santosgerson64@gmail.com"
] |
santosgerson64@gmail.com
|
611c4a17b89df4081e481c466963b09963030328
|
deaf5d0574494c06c0244be4b4f93ffa9b4e9e00
|
/pandas_ml/skaccessors/covariance.py
|
aca597e1c5f6debb5f0994ee6c690562fff85bf8
|
[] |
no_license
|
Mars-Wei/pandas-ml
|
71db18a6f4e0c4fbe3ba8a5390d39ffb5ffd7db6
|
994197dfbf57e289e9f3fce2cb90d109b0afbbe3
|
refs/heads/master
| 2021-01-20T17:13:29.139122
| 2015-11-01T00:07:46
| 2015-11-01T00:07:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,618
|
py
|
#!/usr/bin/env python
import numpy as np
import pandas as pd
from pandas_ml.core.accessor import _AccessorMethods
class CovarianceMethods(_AccessorMethods):
"""
Accessor to ``sklearn.covariance``.
"""
_module_name = 'sklearn.covariance'
def empirical_covariance(self, *args, **kwargs):
"""
Call ``sklearn.covariance.empirical_covariance`` using automatic mapping.
- ``X``: ``ModelFrame.data``
"""
func = self._module.empirical_covariance
data = self._data
covariance = func(data.values, *args, **kwargs)
covariance = self._constructor(covariance, index=data.columns, columns=data.columns)
return covariance
def ledoit_wolf(self, *args, **kwargs):
"""
Call ``sklearn.covariance.ledoit_wolf`` using automatic mapping.
- ``X``: ``ModelFrame.data``
"""
func = self._module.ledoit_wolf
data = self._data
shrunk_cov, shrinkage = func(data.values, *args, **kwargs)
shrunk_cov = self._constructor(shrunk_cov, index=data.columns, columns=data.columns)
return shrunk_cov, shrinkage
def oas(self, *args, **kwargs):
"""
Call ``sklearn.covariance.oas`` using automatic mapping.
- ``X``: ``ModelFrame.data``
"""
func = self._module.oas
data = self._data
shrunk_cov, shrinkage = func(data.values, *args, **kwargs)
shrunk_cov = self._constructor(shrunk_cov, index=data.columns, columns=data.columns)
return shrunk_cov, shrinkage
|
[
"sinhrks@gmail.com"
] |
sinhrks@gmail.com
|
56dbb2cdb998061e64756b96835adf91b0b9d505
|
8fce2bc291452d88f883616c6610d9e0cc6609f7
|
/util/label_map_util.py
|
916ee37f3a391c0be3e21d59a33c3be18deb5bfa
|
[
"ISC"
] |
permissive
|
BlueLens/bl-api-search
|
02830ef35d1e9dee659c6b8c1e36b0077c16fdc9
|
bf213776abb3e969cb63477a68f9f0a1c537eca2
|
refs/heads/master
| 2021-07-24T03:20:08.449203
| 2017-11-04T15:39:04
| 2017-11-04T15:39:04
| 105,105,987
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,367
|
py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Label map utility functions."""
import logging
import tensorflow as tf
from google.protobuf import text_format
from . import string_int_label_map_pb2
def create_category_index(categories):
"""Creates dictionary of COCO compatible categories keyed by category id.
Args:
categories: a list of dicts, each of which has the following keys:
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name
e.g., 'cat', 'dog', 'pizza'.
Returns:
category_index: a dict containing the same entries as categories, but keyed
by the 'id' field of each category.
"""
category_index = {}
for cat in categories:
category_index[cat['id']] = cat
return category_index
def convert_label_map_to_categories(label_map,
max_num_classes,
use_display_name=True):
"""Loads label map proto and returns categories list compatible with eval.
This function loads a label map and returns a list of dicts, each of which
has the following keys:
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name
e.g., 'cat', 'dog', 'pizza'.
We only allow class into the list if its id-label_id_offset is
between 0 (inclusive) and max_num_classes (exclusive).
If there are several items mapping to the same id in the label map,
we will only keep the first one in the categories list.
Args:
label_map: a StringIntLabelMapProto or None. If None, a default categories
list is created with max_num_classes categories.
max_num_classes: maximum number of (consecutive) label indices to include.
use_display_name: (boolean) choose whether to load 'display_name' field
as category name. If False of if the display_name field does not exist,
uses 'name' field as category names instead.
Returns:
categories: a list of dictionaries representing all possible categories.
"""
categories = []
list_of_ids_already_added = []
if not label_map:
label_id_offset = 1
for class_id in range(max_num_classes):
categories.append({
'id': class_id + label_id_offset,
'name': 'category_{}'.format(class_id + label_id_offset)
})
return categories
for item in label_map.item:
if not 0 < item.id <= max_num_classes:
logging.info('Ignore item %d since it falls outside of requested '
'label range.', item.id)
continue
name = item.name
code = item.display_name
if item.id not in list_of_ids_already_added:
list_of_ids_already_added.append(item.id)
categories.append({'id': item.id, 'name': name, 'code': code})
return categories
# TODO: double check documentaion.
def load_labelmap(path):
"""Loads label map proto.
Args:
path: path to StringIntLabelMap proto text file.
Returns:
a StringIntLabelMapProto
"""
with tf.gfile.GFile(path, 'r') as fid:
label_map_string = fid.read()
label_map = string_int_label_map_pb2.StringIntLabelMap()
try:
text_format.Merge(label_map_string, label_map)
except text_format.ParseError:
label_map.ParseFromString(label_map_string)
return label_map
def get_label_map_dict(label_map_path):
"""Reads a label map and returns a dictionary of label names to id.
Args:
label_map_path: path to label_map.
Returns:
A dictionary mapping label names to id.
"""
label_map = load_labelmap(label_map_path)
label_map_dict = {}
for item in label_map.item:
label_map_dict[item.name] = item.id
return label_map_dict
|
[
"master@bluehack.net"
] |
master@bluehack.net
|
5173c9eb2fab5fd8da633920ab0ff53a7ce5e390
|
e2a63481c05e08fdcd2243946f813c5f8d5c2e99
|
/update_features.py
|
a7a4c8427ec6e8855f0698066eed45b218e86bdc
|
[
"Apache-2.0"
] |
permissive
|
mapsme/cf_audit
|
3127bc1b36b5c080387766b85d808f5e16124895
|
1089ad5b6ee74ee2bf7953a972062068f3f3f8ab
|
refs/heads/master
| 2023-01-31T04:16:07.769088
| 2023-01-22T15:24:07
| 2023-01-22T15:24:07
| 111,695,225
| 6
| 9
|
Apache-2.0
| 2023-01-22T15:24:09
| 2017-11-22T14:36:03
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,608
|
py
|
#!/usr/bin/env python
import os
import sys
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, BASE_DIR)
PYTHON = 'python2.7'
VENV_DIR = os.path.join(BASE_DIR, 'venv', 'lib', PYTHON, 'site-packages')
if os.path.exists(VENV_DIR):
sys.path.insert(1, VENV_DIR)
import codecs
import datetime
import logging
import json
from www.db import Project, database
from www.util import update_features, update_features_cache
if len(sys.argv) < 3:
print "Usage: {} <project_id> <features.json> [<audit.json>]".format(sys.argv[0])
sys.exit(1)
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s', datefmt='%H:%M:%S')
logging.info('Reading JSON files')
if sys.argv[2] == '-':
features = []
else:
with codecs.open(sys.argv[2], 'r', 'utf-8') as f:
features = json.load(f)['features']
audit = None
if len(sys.argv) > 3:
with codecs.open(sys.argv[3], 'r', 'utf-8') as f:
audit = json.load(f)
if not features and not audit:
logging.error("No features read")
sys.exit(2)
try:
project = Project.get(Project.name == sys.argv[1])
except Project.DoesNotExist:
logging.error("No such project: %s", sys.argv[1])
sys.exit(2)
logging.info('Updating features')
proj_audit = json.loads(project.audit or '{}')
if audit:
proj_audit.update(audit)
project.audit = json.dumps(proj_audit, ensure_ascii=False)
project.updated = datetime.datetime.utcnow().date()
with database.atomic():
update_features(project, features, proj_audit)
logging.info('Updating the feature cache')
update_features_cache(project)
project.save()
|
[
"zverik@textual.ru"
] |
zverik@textual.ru
|
60e23adbd3b4692652d12167c566829b3c70cb6d
|
fce003f93476ec393e0fc2f7255e9e2367e8f07e
|
/generateParantheses.py
|
a8e6e57d647ae03f1b1157c48b176e1feb09c0c6
|
[] |
no_license
|
WillLuong97/Back-Tracking
|
f3f6cb9f31dd3e59ed3826cfbdfa5972d6277e01
|
54bfe83f4bd6c7fef23a2a15cffcaa40129250cb
|
refs/heads/master
| 2023-07-02T05:20:52.510639
| 2021-08-11T00:37:41
| 2021-08-11T00:37:41
| 287,618,480
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,749
|
py
|
# Given n pairs of parentheses, write a function to generate all combinations of well-formed parentheses.
# For example, given n = 3, a solution set is:
# [
# "((()))",
# "(()())",
# "(())()",
# "()(())",
# "()()()"
# ]
#first approach - Brute Force
def generateParenthesis(n):
def generate(A = []):
# print(A)
if len(A) == 2*n:
if valid(A):
ans.append("".join(A))
else:
A.append('(')
generate(A)
A.pop()
A.append(')')
generate(A)
A.pop()
def valid(A):
bal = 0
for c in A:
if c == '(': bal += 1
else: bal -= 1
if bal < 0: return False
return bal == 0
ans = []
generate()
return ans
#second approach - using back tracking:
def generateParenthesis_BackTracking(n):
retStr = []
#back tracking
def backTracking(parenString = "", opening_bracket_index = 0 , closing_bracket_index = 0):
#if the parentheses string finally reaches number of parentheses pairs:
if(len(parenString) == 2 * n):
retStr.append(parenString)
#add a opening parentheses to the string parenthese string:
if opening_bracket_index < n:
backTracking(parenString + '(', opening_bracket_index + 1, closing_bracket_index)
#add a closing parenthese to string
if closing_bracket_index < opening_bracket_index:
backTracking(parenString + ')', opening_bracket_index, closing_bracket_index + 1)
backTracking()
return retStr
def main():
print(generateParenthesis(2))
print("")
print(generateParenthesis_BackTracking(2))
pass
main()
|
[
"tluong@stedwards.edu"
] |
tluong@stedwards.edu
|
d2d09e0416267edf3afd5d46e8489754f3ce3e27
|
611c184838b8c5cfafe61c9877a32606e2d435eb
|
/OtherScripts/Split.py
|
af567a81ee36eb111cae70b69564fdc920ed6100
|
[] |
no_license
|
minghao2016/protein_structure_clustering
|
c6ac06c15f5ca03d506ec6ced51bd70d4838eaa0
|
3e709bf370071d2bf16cb24b0d0d9779ca005c3e
|
refs/heads/master
| 2022-01-20T15:46:33.694778
| 2019-04-12T17:03:25
| 2019-04-12T17:03:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 166
|
py
|
x = 'C:/Users/pedro.arguelles/Desktop/Repos/oi-mua/src/OI.FFM.InboundServices/pas/'
y = '/'.join(str(x).split('/')[7:])
y = str(y).replace('/','\\')
print(y)
|
[
"noreply@github.com"
] |
minghao2016.noreply@github.com
|
695541aeff8d2f878246fea73c798f9f927e6ce0
|
ed702dcb76a85d815d322c426d62f9f3f213b137
|
/light.py
|
dbe24f48bfdd429821444d2e6082eca1ae33dd1e
|
[] |
no_license
|
jack1806/Lamp
|
5f9d400eb34b224c96dcacec3834c901f4ad0a1a
|
9271bccecd47d4d3924fe311c0d8cff0e7e0d490
|
refs/heads/master
| 2020-03-26T20:21:38.078186
| 2018-08-19T16:26:07
| 2018-08-19T16:26:07
| 145,319,111
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 958
|
py
|
#!/usr/bin/python3
import argparse
import requests
LED_ON = "http://192.168.4.1/led/0"
LED_OFF = "http://192.168.4.1/led/1"
def req(url):
try:
request = requests.get(url)
response = request.text
if response:
return 0
else:
print("Something went wrong!")
except requests.ConnectionError or requests.ConnectTimeout:
print("Something went wrong!")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="It works!")
parser.add_argument("mode", type=str, metavar="on/off")
args = parser.parse_args()
print(args.mode)
# parser.add_argument("-on", help="Turn on", action="store_true", default=False)
# parser.add_argument("-off", help="Turn off", action="store_true", default=False)
# args = parser.parse_args()
# if args.on:
# req(LED_ON)
# elif args.off:
# req(LED_OFF)
# else:
# parser.print_help()
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
1fee2606104089bb18dc89e6b2349bdbb11e5e26
|
83de24182a7af33c43ee340b57755e73275149ae
|
/aliyun-python-sdk-hbase/aliyunsdkhbase/request/v20190101/UnTagResourcesRequest.py
|
3b7a375a83fe401cbc85e1de3ee25a29e7df3a56
|
[
"Apache-2.0"
] |
permissive
|
aliyun/aliyun-openapi-python-sdk
|
4436ca6c57190ceadbc80f0b1c35b1ab13c00c7f
|
83fd547946fd6772cf26f338d9653f4316c81d3c
|
refs/heads/master
| 2023-08-04T12:32:57.028821
| 2023-08-04T06:00:29
| 2023-08-04T06:00:29
| 39,558,861
| 1,080
| 721
|
NOASSERTION
| 2023-09-14T08:51:06
| 2015-07-23T09:39:45
|
Python
|
UTF-8
|
Python
| false
| false
| 1,950
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkhbase.endpoint import endpoint_data
class UnTagResourcesRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'HBase', '2019-01-01', 'UnTagResources','hbase')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_All(self):
return self.get_query_params().get('All')
def set_All(self,All):
self.add_query_param('All',All)
def get_ResourceIds(self):
return self.get_query_params().get('ResourceId')
def set_ResourceIds(self, ResourceIds):
for depth1 in range(len(ResourceIds)):
if ResourceIds[depth1] is not None:
self.add_query_param('ResourceId.' + str(depth1 + 1) , ResourceIds[depth1])
def get_TagKeys(self):
return self.get_query_params().get('TagKey')
def set_TagKeys(self, TagKeys):
for depth1 in range(len(TagKeys)):
if TagKeys[depth1] is not None:
self.add_query_param('TagKey.' + str(depth1 + 1) , TagKeys[depth1])
|
[
"sdk-team@alibabacloud.com"
] |
sdk-team@alibabacloud.com
|
39b9a6cb194a618d38d92f0d437d3b47363248c9
|
1a7e621312f88bc940e33ee5ff9ca5ac247f2bc9
|
/venv/bin/django-admin.py
|
e07af0189af5844d527efeef517bb577881fadd1
|
[] |
no_license
|
hirossan4049/ZisakuZitenAPI
|
9c2ef8de5c197353a33f58518d60aff304b8d2df
|
439f202b4939059b42c771960ad579048737f3d7
|
refs/heads/master
| 2022-05-04T12:08:39.670493
| 2020-01-11T06:23:41
| 2020-01-11T06:23:41
| 225,121,453
| 0
| 1
| null | 2022-04-22T22:50:05
| 2019-12-01T07:14:23
|
Python
|
UTF-8
|
Python
| false
| false
| 179
|
py
|
#!/Users/Linear/Desktop/pythonnnnn/ZisakuZitenRestServer/venv/bin/python
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
[
"haruto405329@gmail.com"
] |
haruto405329@gmail.com
|
601bc5df1c1b8dc0775b683e62fc763c59b76786
|
afa2ebb439e6592caf42c507a789833b9fbf44b2
|
/supervised_learning/0x03-optimization/11-learning_rate_decay.py
|
040b4379fbcdd158d5e82d23cdbf111a9811b6bc
|
[] |
no_license
|
anaruzz/holbertonschool-machine_learning
|
64c66a0f1d489434dd0946193747ed296760e6c8
|
91300120d38acb6440a6dbb8c408b1193c07de88
|
refs/heads/master
| 2023-07-30T20:09:30.416167
| 2021-09-23T16:22:40
| 2021-09-23T16:22:40
| 279,293,274
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 328
|
py
|
#!/usr/bin/env python3
"""
Script that updates a variable in place using
inverse time decay in numpy
"""
import numpy as np
def learning_rate_decay(alpha, decay_rate, global_step, decay_step):
"""
returns the updated value for alpha
"""
alpha /= (1 + decay_rate * (global_step // decay_step))
return alpha
|
[
"laabidigh@gmail.com"
] |
laabidigh@gmail.com
|
a950e1fea6e22a293fa8d134164513e4fd5e63df
|
4ce94e6fdfb55a889a0e7c4788fa95d2649f7bca
|
/User/apps/logreg/views.py
|
26ada8889c8fa75821a4cceb627c5948d6d94bde
|
[] |
no_license
|
HaochengYang/Django-class-assignment
|
4018d8eb0619a99ebe8c3e47346d29934aafc66b
|
cb8f920f432209f88c810407ca646ee7dec82e22
|
refs/heads/master
| 2021-06-08T20:05:22.876794
| 2016-12-19T23:39:22
| 2016-12-19T23:39:22
| 75,032,572
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,430
|
py
|
from django.shortcuts import render, redirect
from .models import User
from django.contrib import messages
# Create your views here.
def index(request):
return render(request, 'logreg/index.html')
def register(request):
response = User.objects.add_user(request.POST)
if response['status']:
# successful add a new user in here
request.session['user_id'] = response['new_user'].id
request.session['user_first_name'] = response['new_user'].first_name
request.session['user_last_name'] = response['new_user'].last_name
return redirect('logreg:main')
else:
for error in response['errors']:
messages.error(request, error)
return redirect('logreg:index')
def login(request):
response = User.objects.check_user(request.POST)
if response['status']:
# successful login user in here
request.session['user_id'] = response['login_user'].id
request.session['user_first_name'] = response['login_user'].first_name
request.session['user_last_name'] = response['login_user'].last_name
return redirect('logreg:main')
else:
#falid to validate
for error in response['errors']:
messages.error(request, error)
return redirect('logreg:index')
def main(request):
return render(request, 'logreg/success.html')
def logout(request):
request.session.clear()
return redirect('logreg:index')
|
[
"haocheng0906@gmail.com"
] |
haocheng0906@gmail.com
|
8a7bc189c27f77d9317613f60f7e3bc016ff5c8e
|
2ed0ab730b62665b3a36841ab006eea961116f87
|
/Hash/ValidSoduko.py
|
ef9721fb40020f4c7aa19f5c56366347684f6f3b
|
[] |
no_license
|
scarlettlite/hackathon
|
0f0a345d867b9e52823f10fe67c6ec210a40945f
|
179ba9038bbed4d48cb2f044fd8430cf2be2bab3
|
refs/heads/master
| 2021-07-04T00:55:17.665292
| 2019-03-04T09:10:59
| 2019-03-04T09:10:59
| 141,269,070
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,494
|
py
|
from collections import defaultdict
class Solution:
def __init__(self):
arr = [(0,2), (3,5), (6,8)]
self.sq = [(a,b,c,d) for a,b in arr for c,d in arr]
def getsqr(self, ir, ic):
for a,b,c,d in self.sq:
if a <= ir <= b and c <= ic <= d:
return a,b,c,d
def isValidSudoku(self, board):
"""
:type board: List[List[str]]
:rtype: bool
"""
rows = defaultdict(set)
cols = defaultdict(set)
sqrs = defaultdict(set)
for i, row in enumerate(board):
for j, x in enumerate(row):
if x == '.': continue
if x not in rows[i]:
rows[i].add(x)
else:
return False
if x not in cols[j]:
cols[j].add(x)
else:
return False
t = self.getsqr(i, j)
if x not in sqrs[t]:
sqrs[t].add(x)
else:
return False
return True
print(Solution().isValidSudoku([
["8","3",".",".","7",".",".",".","."],
["6",".",".","1","9","5",".",".","."],
[".","9","8",".",".",".",".","6","."],
["8",".",".",".","6",".",".",".","3"],
["4",".",".","8",".","3",".",".","1"],
["7",".",".",".","2",".",".",".","6"],
[".","6",".",".",".",".","2","8","."],
[".",".",".","4","1","9",".",".","5"],
[".",".",".",".","8",".",".","7","9"]
]))
|
[
"shivanirathore496@gmail.com"
] |
shivanirathore496@gmail.com
|
43dd394b16bcb1affa4035fe5a3d08f9a9a88fa1
|
6527b66fd08d9e7f833973adf421faccd8b765f5
|
/yuancloud/recicler/l10n_jp/__yuancloud__.py
|
b492c6d6207e9dc3a4ba55e08b14acdd16a2b3e3
|
[] |
no_license
|
cash2one/yuancloud
|
9a41933514e57167afb70cb5daba7f352673fb4d
|
5a4fd72991c846d5cb7c5082f6bdfef5b2bca572
|
refs/heads/master
| 2021-06-19T22:11:08.260079
| 2017-06-29T06:26:15
| 2017-06-29T06:26:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,266
|
py
|
# -*- coding: utf-8 -*-
# Part of YuanCloud. See LICENSE file for full copyright and licensing details.
# Copyright (C) Rooms For (Hong Kong) Limited T/A OSCG
{
'name': 'Japan - Accounting',
'version': '1.2',
'category' : 'Finance Management',
'description': """
Overview:
---------
* Chart of Accounts and Taxes template for companies in Japan.
* This probably does not cover all the necessary accounts for a company. \
You are expected to add/delete/modify accounts based on this template.
Note:
-----
* Fiscal positions '内税' and '外税' have been added to handle special \
requirements which might arise from POS implementation. [1] You may not \
need to use these at all under normal circumstances.
[1] See https://github.com/yuancloud/yuancloud/pull/6470 for detail.
""",
'author': 'Rooms For (Hong Kong) Limited T/A OSCG',
'website': 'http://www.yuancloud-asia.net/',
'depends': ['account'],
'data': [
'data/account_chart_template.xml',
'data/account.account.template.csv',
'data/account.tax.template.csv',
'data/account_chart_template_after.xml',
'data/account_chart_template.yml',
'data/account.fiscal.position.template.csv',
],
'installable': True,
}
|
[
"liuganghao@lztogether.com"
] |
liuganghao@lztogether.com
|
6c8ce69edeaeec26ac063384011a0af1deeb31ac
|
082246f32a7972abdb674f424d3ba250666a8eb5
|
/Demo/PyQt4/Sample Files/Logo.py
|
bc0ff66efb867e458c6c0a1cd88140624a59c61c
|
[] |
no_license
|
maxale/Data-Mining
|
4ef8c8a4403a9b1eb64dbec94414b8cf865134a7
|
19edff15047a2cce90515dae1d6c3d280284fc2a
|
refs/heads/master
| 2023-04-29T19:42:23.586079
| 2023-04-24T14:59:07
| 2023-04-24T14:59:07
| 322,360,530
| 1
| 0
| null | 2023-03-29T21:02:45
| 2020-12-17T17:05:24
| null |
UTF-8
|
Python
| false
| false
| 195
|
py
|
import sys
from PyQt4 import QtGui, QtSvg
app = QtGui.QApplication(sys.argv)
svgWidget = QtSvg.QSvgWidget('pic1.svg')
svgWidget.setGeometry(50,50,759,668)
svgWidget.show()
sys.exit(app.exec_())
|
[
"amir.h.jafari@okstate.edu"
] |
amir.h.jafari@okstate.edu
|
183ea05ca67621b0b058eddd765cbe6d2b39188f
|
80fd32cb735bfd288c4fb9be1280146f5cf15210
|
/ditto/__init__.py
|
c61386e62de95b8c57e65526c1ef615a8bebac77
|
[
"BSD-3-Clause"
] |
permissive
|
NREL/ditto
|
c8e44ea04272b750dcbbaef2bfc33eb340822eb1
|
41b93f954af5836cbe5986add0c104b19dc22fde
|
refs/heads/master
| 2023-08-23T02:41:59.653838
| 2023-07-11T16:25:38
| 2023-07-11T16:25:38
| 121,418,744
| 57
| 43
|
BSD-3-Clause
| 2023-07-11T16:25:40
| 2018-02-13T18:19:47
|
Python
|
UTF-8
|
Python
| false
| false
| 208
|
py
|
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
__author__ = """Tarek Elgindy"""
__email__ = "tarek.elgindy@nrel.gov"
__version__ = "0.1.0"
from .store import Store
|
[
"kdheepak89@gmail.com"
] |
kdheepak89@gmail.com
|
832e744fb6433173675ac4d52a40613a22346536
|
14164dfdc5f316ff259519d1aeb8671dad1b9749
|
/lib/loaf/slack_api/web_client/team.py
|
398e0b8ae939f27a2449c005838e4dd0536dec83
|
[
"MIT"
] |
permissive
|
cbehan/loaf
|
4b537f75c97c1e78ef5d178ac59379460452648a
|
cb9c4edd33a33ff1d5a1931deb6705ddfe82d459
|
refs/heads/master
| 2021-12-14T15:04:15.568615
| 2021-12-02T22:47:08
| 2021-12-02T22:47:08
| 131,346,943
| 0
| 0
| null | 2018-04-27T21:34:34
| 2018-04-27T21:34:33
| null |
UTF-8
|
Python
| false
| false
| 194
|
py
|
class Team:
def __init__(self, client):
self.client = client
async def info(self):
result = await self.client.api_call('GET', 'team.info')
return result['team']
|
[
"nick.beeuwsaert@gmail.com"
] |
nick.beeuwsaert@gmail.com
|
e5d84d7646621256ddc5234054836df2021abe99
|
2f55769e4d6bc71bb8ca29399d3809b6d368cf28
|
/Miniconda2/Lib/site-packages/sklearn/feature_selection/tests/test_base.py
|
2e118b4b00b6cd2416b175913c43056efa022a37
|
[] |
no_license
|
jian9695/GSV2SVF
|
e5ec08b2d37dbc64a461449f73eb7388de8ef233
|
6ed92dac13ea13dfca80f2c0336ea7006a6fce87
|
refs/heads/master
| 2023-03-02T03:35:17.033360
| 2023-02-27T02:01:48
| 2023-02-27T02:01:48
| 199,570,103
| 9
| 16
| null | 2022-10-28T14:31:05
| 2019-07-30T03:47:41
|
Python
|
UTF-8
|
Python
| false
| false
| 3,796
|
py
|
import numpy as np
from scipy import sparse as sp
from numpy.testing import assert_array_equal
from sklearn.base import BaseEstimator
from sklearn.feature_selection.base import SelectorMixin
from sklearn.utils import check_array
from sklearn.utils.testing import assert_raises, assert_equal
class StepSelector(SelectorMixin, BaseEstimator):
"""Retain every `step` features (beginning with 0)"""
def __init__(self, step=2):
self.step = step
def fit(self, X, y=None):
X = check_array(X, 'csc')
self.n_input_feats = X.shape[1]
return self
def _get_support_mask(self):
mask = np.zeros(self.n_input_feats, dtype=bool)
mask[::self.step] = True
return mask
support = [True, False] * 5
support_inds = [0, 2, 4, 6, 8]
X = np.arange(20).reshape(2, 10)
Xt = np.arange(0, 20, 2).reshape(2, 5)
Xinv = X.copy()
Xinv[:, 1::2] = 0
y = [0, 1]
feature_names = list('ABCDEFGHIJ')
feature_names_t = feature_names[::2]
feature_names_inv = np.array(feature_names)
feature_names_inv[1::2] = ''
def test_transform_dense():
sel = StepSelector()
Xt_actual = sel.fit(X, y).transform(X)
Xt_actual2 = StepSelector().fit_transform(X, y)
assert_array_equal(Xt, Xt_actual)
assert_array_equal(Xt, Xt_actual2)
# Check dtype matches
assert_equal(np.int32, sel.transform(X.astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(X.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_t_actual = sel.transform([feature_names])
assert_array_equal(feature_names_t, names_t_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xt_actual = sel.fit(sparse(X)).transform(sparse(X))
Xt_actual2 = sel.fit_transform(sparse(X))
assert_array_equal(Xt, Xt_actual.toarray())
assert_array_equal(Xt, Xt_actual2.toarray())
# Check dtype matches
assert_equal(np.int32, sel.transform(sparse(X).astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(sparse(X).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_inverse_transform_dense():
sel = StepSelector()
Xinv_actual = sel.fit(X, y).inverse_transform(Xt)
assert_array_equal(Xinv, Xinv_actual)
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(Xt.astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(Xt.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_inv_actual = sel.inverse_transform([feature_names_t])
assert_array_equal(feature_names_inv, names_inv_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_inverse_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xinv_actual = sel.fit(sparse(X)).inverse_transform(sparse(Xt))
assert_array_equal(Xinv, Xinv_actual.toarray())
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(sparse(Xt).astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(sparse(Xt).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_get_support():
sel = StepSelector()
sel.fit(X, y)
assert_array_equal(support, sel.get_support())
assert_array_equal(support_inds, sel.get_support(indices=True))
|
[
"JLiang@esri.com"
] |
JLiang@esri.com
|
f99600be5c8c03928c69180bccdb942a3fd04a83
|
bc441bb06b8948288f110af63feda4e798f30225
|
/container_sdk/api/hpa/hpa_client.py
|
255789842ee9a30e61634e383487feb93b1428f1
|
[
"Apache-2.0"
] |
permissive
|
easyopsapis/easyops-api-python
|
23204f8846a332c30f5f3ff627bf220940137b6b
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
refs/heads/master
| 2020-06-26T23:38:27.308803
| 2020-06-16T07:25:41
| 2020-06-16T07:25:41
| 199,773,131
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,468
|
py
|
# -*- coding: utf-8 -*-
import os
import sys
import container_sdk.api.hpa.delete_hpa_pb2
import google.protobuf.empty_pb2
import container_sdk.api.hpa.update_pb2
import container_sdk.model.container.hpa_pb2
import container_sdk.utils.http_util
import google.protobuf.json_format
class HpaClient(object):
def __init__(self, server_ip="", server_port=0, service_name="", host=""):
"""
初始化client
:param server_ip: 指定sdk请求的server_ip,为空时走名字服务路由
:param server_port: 指定sdk请求的server_port,与server_ip一起使用, 为空时走名字服务路由
:param service_name: 指定sdk请求的service_name, 为空时按契约名称路由。如果server_ip和service_name同时设置,server_ip优先级更高
:param host: 指定sdk请求服务的host名称, 如cmdb.easyops-only.com
"""
if server_ip == "" and server_port != 0 or server_ip != "" and server_port == 0:
raise Exception("server_ip和server_port必须同时指定")
self._server_ip = server_ip
self._server_port = server_port
self._service_name = service_name
self._host = host
def delete_hpa(self, request, org, user, timeout=10):
# type: (container_sdk.api.hpa.delete_hpa_pb2.DeleteHPARequest, int, str, int) -> google.protobuf.empty_pb2.Empty
"""
删除 HPA
:param request: delete_hpa请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: google.protobuf.empty_pb2.Empty
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.container.hpa.DeleteHPA"
uri = "/api/container/v1/horizontalpodautoscalers/{instanceId}".format(
instanceId=request.instanceId,
)
requestParam = request
rsp_obj = container_sdk.utils.http_util.do_api_request(
method="DELETE",
src_name="logic.container_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = google.protobuf.empty_pb2.Empty()
google.protobuf.json_format.ParseDict(rsp_obj, rsp, ignore_unknown_fields=True)
return rsp
def update(self, request, org, user, timeout=10):
# type: (container_sdk.api.hpa.update_pb2.UpdateRequest, int, str, int) -> container_sdk.model.container.hpa_pb2.HorizontalPodAutoscaler
"""
更新 HPA
:param request: update请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: container_sdk.model.container.hpa_pb2.HorizontalPodAutoscaler
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.container.hpa.Update"
uri = "/api/container/v1/horizontalpodautoscalers/{instanceId}".format(
instanceId=request.instanceId,
)
requestParam = request
rsp_obj = container_sdk.utils.http_util.do_api_request(
method="PUT",
src_name="logic.container_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = container_sdk.model.container.hpa_pb2.HorizontalPodAutoscaler()
google.protobuf.json_format.ParseDict(rsp_obj["data"], rsp, ignore_unknown_fields=True)
return rsp
|
[
"service@easyops.cn"
] |
service@easyops.cn
|
50c0dd89f9d5f33b5fd955695c6dfc1d7b182a64
|
625ff91e8d6b4cdce9c60f76e693d32b761bfa16
|
/uk.ac.gda.core/scripts/gdadevscripts/developertools/checkScannableNames.py
|
6a7b8fee428d3ee3fc6cd6a66c7d043b002d7436
|
[] |
no_license
|
openGDA/gda-core
|
21296e4106d71d6ad8c0d4174a53890ea5d9ad42
|
c6450c22d2094f40ca3015547c60fbf644173a4c
|
refs/heads/master
| 2023-08-22T15:05:40.149955
| 2023-08-22T10:06:42
| 2023-08-22T10:06:42
| 121,757,680
| 4
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 391
|
py
|
# Run this module to show scannables whose (internal) name differs from their (external) label
from gda.device import Scannable
print "The following scannables have labels (for typing) different than names(that go into files)"
print "Label\tName"
for label in dir():
if (isinstance(eval(label),Scannable)):
name = eval(label).getName()
if label!=name:
print label + "\t : " + name
|
[
"dag-group@diamond.ac.uk"
] |
dag-group@diamond.ac.uk
|
486e14339acaf81e3a59ed9a6ba548e5de49105b
|
7944d2fd5d885a034347a986f3114f0b81166447
|
/facebookads/adobjects/helpers/adaccountusermixin.py
|
da4d36229bfcaa877d38eeadcde3eb4fe09c6387
|
[] |
no_license
|
it-devros/django-facebook-api
|
4fd94d1bbbff664f0314e046f50d91ee959f5664
|
ee2d91af49bc2be116bd10bd079c321bbf6af721
|
refs/heads/master
| 2021-06-23T06:29:07.664905
| 2019-06-25T07:47:50
| 2019-06-25T07:47:50
| 191,458,626
| 2
| 0
| null | 2021-06-10T21:33:08
| 2019-06-11T22:22:47
|
Python
|
UTF-8
|
Python
| false
| false
| 2,325
|
py
|
# Copyright 2014 Facebook, Inc.
# You are hereby granted a non-exclusive, worldwide, royalty-free license to
# use, copy, modify, and distribute this software in source code or binary
# form for use in connection with the web services and APIs provided by
# Facebook.
# As with any software that integrates with the Facebook platform, your use
# of this software is subject to the Facebook Developer Principles and
# Policies [http://developers.facebook.com/policy/]. This copyright notice
# shall be included in all copies or substantial portions of the software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from facebookads.adobjects.adaccount import AdAccount
from facebookads.adobjects.page import Page
from facebookads.adobjects.objectparser import ObjectParser
from facebookads.api import FacebookRequest
from facebookads.typechecker import TypeChecker
class AdAccountUserMixin:
class Field(object):
id = 'id'
name = 'name'
permissions = 'permissions'
role = 'role'
class Permission(object):
account_admin = 1
admanager_read = 2
admanager_write = 3
billing_read = 4
billing_write = 5
reports = 7
class Role(object):
administrator = 1001
analyst = 1003
manager = 1002
# @deprecated get_endpoint function is deprecated
@classmethod
def get_endpoint(cls):
return 'users'
def get_ad_accounts(self, fields=None, params=None):
"""Returns iterator over AdAccounts associated with this user."""
return self.iterate_edge(AdAccount, fields, params, endpoint='adaccounts')
def get_ad_account(self, fields=None, params=None):
"""Returns first AdAccount associated with this user."""
return self.edge_object(AdAccount, fields, params)
def get_pages(self, fields=None, params=None):
"""Returns iterator over Pages's associated with this user."""
return self.iterate_edge(Page, fields, params)
|
[
"it-devros@outlook.com"
] |
it-devros@outlook.com
|
139924ddf0df882a3fb73abd3feb2199cf4b54c5
|
11a246743073e9d2cb550f9144f59b95afebf195
|
/codeforces/873/a.py
|
b327a471eb4b3b25939bf9172ff27110c6a1f419
|
[] |
no_license
|
ankitpriyarup/online-judge
|
b5b779c26439369cedc05c045af5511cbc3c980f
|
8a00ec141142c129bfa13a68dbf704091eae9588
|
refs/heads/master
| 2020-09-05T02:46:56.377213
| 2019-10-27T20:12:25
| 2019-10-27T20:12:25
| 219,959,932
| 0
| 1
| null | 2019-11-06T09:30:58
| 2019-11-06T09:30:57
| null |
UTF-8
|
Python
| false
| false
| 163
|
py
|
def main():
n, k, x = map(int, input().split())
a = list(map(int, input().split()))
end = n - k
ans = sum(a[:end]) + x * k
print(ans)
main()
|
[
"arnavsastry@gmail.com"
] |
arnavsastry@gmail.com
|
ff088fd7c2c3c7a9326af48a17e85f769f1f608a
|
53f9dd194792672424e423e691dbbba0e4af7474
|
/kolibri/core/discovery/utils/network/urls.py
|
27e881fb6f9bec6666ce64a6591932f57fcb1773
|
[
"MIT"
] |
permissive
|
DXCanas/kolibri
|
8e26668023c8c60f852cc9b7bfc57caa9fd814e8
|
4571fc5e5482a2dc9cd8f93dd45222a69d8a68b4
|
refs/heads/develop
| 2021-12-05T22:18:15.925788
| 2018-09-21T19:30:43
| 2018-09-21T19:30:43
| 54,430,150
| 1
| 0
|
MIT
| 2019-11-28T00:35:17
| 2016-03-21T23:25:49
|
Python
|
UTF-8
|
Python
| false
| false
| 5,913
|
py
|
import re
from six.moves.urllib.parse import urlparse
from . import errors
HTTP_PORTS = (8080, 80, 8008)
HTTPS_PORTS = (443,)
# from https://stackoverflow.com/a/33214423
def is_valid_hostname(hostname):
if hostname[-1] == ".":
# strip exactly one dot from the right, if present
hostname = hostname[:-1]
if len(hostname) > 253:
return False
labels = hostname.split(".")
# the TLD must be not all-numeric
if re.match(r"[0-9]+$", labels[-1]):
return False
allowed = re.compile(r"(?!-)[a-z0-9-]{1,63}(?<!-)$", re.IGNORECASE)
return all(allowed.match(label) for label in labels)
# from https://stackoverflow.com/a/319293
def is_valid_ipv4_address(ip):
"""Validates IPv4 addresses.
"""
pattern = re.compile(r"""
^
(?:
# Dotted variants:
(?:
# Decimal 1-255 (no leading 0's)
[3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2}
|
0x0*[0-9a-f]{1,2} # Hexadecimal 0x0 - 0xFF (possible leading 0's)
|
0+[1-3]?[0-7]{0,2} # Octal 0 - 0377 (possible leading 0's)
)
(?: # Repeat 3 times, separated by a dot
\.
(?:
[3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2}
|
0x0*[0-9a-f]{1,2}
|
0+[1-3]?[0-7]{0,2}
)
){3}
|
0x0*[0-9a-f]{1,8} # Hexadecimal notation, 0x0 - 0xffffffff
|
0+[0-3]?[0-7]{0,10} # Octal notation, 0 - 037777777777
|
# Decimal notation, 1-4294967295:
429496729[0-5]|42949672[0-8]\d|4294967[01]\d\d|429496[0-6]\d{3}|
42949[0-5]\d{4}|4294[0-8]\d{5}|429[0-3]\d{6}|42[0-8]\d{7}|
4[01]\d{8}|[1-3]\d{0,9}|[4-9]\d{0,8}
)
$
""", re.VERBOSE | re.IGNORECASE)
return pattern.match(ip) is not None
# from https://stackoverflow.com/a/319293
def is_valid_ipv6_address(ip):
"""Validates IPv6 addresses.
"""
pattern = re.compile(r"""
^
\s* # Leading whitespace
(?!.*::.*::) # Only a single wildcard allowed
(?:(?!:)|:(?=:)) # Colon iff it would be part of a wildcard
(?: # Repeat 6 times:
[0-9a-f]{0,4} # A group of at most four hexadecimal digits
(?:(?<=::)|(?<!::):) # Colon unless preceeded by wildcard
){6} #
(?: # Either
[0-9a-f]{0,4} # Another group
(?:(?<=::)|(?<!::):) # Colon unless preceeded by wildcard
[0-9a-f]{0,4} # Last group
(?: (?<=::) # Colon iff preceeded by exacly one colon
| (?<!:) #
| (?<=:) (?<!::) : #
) # OR
| # A v4 address with NO leading zeros
(?:25[0-4]|2[0-4]\d|1\d\d|[1-9]?\d)
(?: \.
(?:25[0-4]|2[0-4]\d|1\d\d|[1-9]?\d)
){3}
)
\s* # Trailing whitespace
$
""", re.VERBOSE | re.IGNORECASE | re.DOTALL)
return pattern.match(ip) is not None
def parse_address_into_components(address):
# if it looks to be an IPv6 address, make sure it is surrounded by square brackets
if address.count(":") > 2 and re.match("^[a-f0-9\:]+$", address):
address = "[{}]".format(address)
# ensure that there's a scheme on the address
if "://" not in address:
address = "http://" + address
# parse out the URL into its components
parsed = urlparse(address)
p_scheme = parsed.scheme
p_hostname = parsed.hostname
p_path = parsed.path.rstrip("/") + "/"
try:
p_port = parsed.port
if not p_port:
# since urlparse silently excludes some types of bad ports, check and throw ourselves
split_by_colon = parsed.netloc.split("]")[-1].rsplit(":")
if len(split_by_colon) > 1:
extracted_port = split_by_colon[-1]
raise errors.InvalidPort(extracted_port)
except ValueError:
raise errors.InvalidPort(parsed.netloc.rsplit(":")[-1])
# perform basic validation on the URL components
if p_scheme not in ("http", "https"):
raise errors.InvalidScheme(p_scheme)
if is_valid_ipv6_address(p_hostname):
p_hostname = "[{}]".format(p_hostname)
elif not (is_valid_hostname(p_hostname) or is_valid_ipv4_address(p_hostname)):
raise errors.InvalidHostname(p_hostname)
return p_scheme, p_hostname, p_port, p_path
def get_normalized_url_variations(address):
"""Takes a URL, hostname, or IP, validates it, and turns it into a list of possible URLs, varying the scheme, port, and path."""
p_scheme, p_hostname, p_port, p_path = parse_address_into_components(address)
# build up a list of possible URLs, in priority order
urls = []
paths = (p_path,) if p_path == "/" else (p_path, "/")
for path in paths:
schemes = ("http", "https") if p_scheme == "http" else ("https", "http")
for scheme in schemes:
ports = HTTP_PORTS if scheme == "http" else HTTPS_PORTS
if p_port:
ports = (p_port,) + ports
for port in ports:
if (scheme == "http" and port == 80) or (scheme == "https" and port == 443):
port_component = ""
else:
port_component = ":{port}".format(port=port)
urls.append("{scheme}://{hostname}{port}{path}".format(
scheme=scheme,
hostname=p_hostname,
port=port_component,
path=path
))
return urls
|
[
"jamalex@gmail.com"
] |
jamalex@gmail.com
|
a4ef2d7cc0c353c839e5ba8800de8867a6695388
|
b6c93083b83cd0b441c2d2347b08a529e41eaa2c
|
/utils/munin/newsblur_tasks_pipeline.py
|
1588ff390bb2579e26a7724283f0b52c48959628
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
seejay/NewsBlur
|
4b2b65536f38cfedc47f85708f6f23778986f951
|
311c5a71981c12d1389b58def94df62cb5c60575
|
refs/heads/master
| 2023-06-08T00:46:21.118450
| 2021-06-24T04:13:33
| 2021-06-24T04:13:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,586
|
py
|
#!/srv/newsblur/venv/newsblur/bin/python
from utils.munin.base import MuninGraph
import os
os.environ["DJANGO_SETTINGS_MODULE"] = "newsblur.settings"
import django
django.setup()
class NBMuninGraph(MuninGraph):
@property
def graph_config(self):
graph = {
'graph_category' : 'NewsBlur',
'graph_title' : 'NewsBlur Task Pipeline',
'graph_vlabel' : 'Feed fetch pipeline times',
'graph_args' : '-l 0',
'feed_fetch.label': 'feed_fetch',
'feed_process.label': 'feed_process',
'page.label': 'page',
'icon.label': 'icon',
'total.label': 'total',
}
return graph
def calculate_metrics(self):
return self.stats
@property
def stats(self):
import datetime
from django.conf import settings
stats = settings.MONGOANALYTICSDB.nbanalytics.feed_fetches.aggregate([{
"$match": {
"date": {
"$gt": datetime.datetime.now() - datetime.timedelta(minutes=5),
},
},
}, {
"$group": {
"_id": 1,
"feed_fetch": {"$avg": "$feed_fetch"},
"feed_process": {"$avg": "$feed_process"},
"page": {"$avg": "$page"},
"icon": {"$avg": "$icon"},
"total": {"$avg": "$total"},
},
}])
return list(stats)[0]
if __name__ == '__main__':
NBMuninGraph().run()
|
[
"samuel@ofbrooklyn.com"
] |
samuel@ofbrooklyn.com
|
708d35129495d3fcd0f5c2e678318c001704c805
|
d451e26b2689a34a6660a6a947fe97abde90010a
|
/pang/helpers/nso.py
|
8f21ba0b3cffa504f2fe7acdd6ba2e400a42f1dc
|
[
"MIT"
] |
permissive
|
kecorbin/pang
|
25ef8d64d90a5490a4582e7a883c0460b52b1c9a
|
1e35cbdf0e30cda5b428ba72fd1fe0a550854ec5
|
refs/heads/master
| 2023-01-12T18:55:54.098474
| 2018-09-30T03:26:56
| 2018-09-30T03:26:56
| 149,394,962
| 6
| 0
|
MIT
| 2022-12-26T20:38:22
| 2018-09-19T05:04:49
|
Python
|
UTF-8
|
Python
| false
| false
| 5,324
|
py
|
import requests
import os
import errno
from .files import MAKEFILE_BASE
class NSO(object):
def __init__(self, url, username='admin', password='admin'):
self.username = username
self.password = password
self.base_url = url
@property
def headers(self):
headers = {
'Content-Type': "application/vnd.yang.data+json",
'Accept': "application/vnd.yang.collection+json,"
"application/vnd.yang.data+json"
}
return headers
def _utf8_encode(self, obj):
if obj is None:
return None
if isinstance(obj, str): # noqa
return obj
if type(obj) is list:
return [self._utf8_encode(value) for value in obj]
if type(obj) is dict:
obj_dest = {}
for key, value in obj.items():
if 'EXEC' not in key and key != "operations":
obj_dest[self._utf8_encode(key)] = self._utf8_encode(value)
return obj_dest
return obj
def get(self, uri):
url = self.base_url + uri
response = requests.get(url,
headers=self.headers,
auth=(self.username, self.password))
if response.ok:
return response
else:
response.raise_for_status()
def get_device_config_xml(self, device):
headers = {
'Content-Type': "application/vnd.yang.data+xml",
'Accept': "application/vnd.yang.collection+xml,"
"application/vnd.yang.data+xml"
}
url = '/api/config/devices/device/{}/config?deep'.format(device)
url = self.base_url + url
response = requests.get(url,
headers=headers,
auth=(self.username, self.password))
return response.text
def post(self, uri, data=None):
url = self.base_url + uri
response = requests.post(url,
headers=self.headers,
auth=(self.username, self.password))
if response.ok:
return response
else:
response.raise_for_status()
def sync_from(self, device=None):
if device:
raise NotImplementedError
else:
url = "/api/config/devices/_operations/sync-from"
resp = self.post(url)
return resp.json()
def get_device_config(self, device):
"""
gets device configuration from NSO
"""
url = '/api/config/devices/device/{}/config?deep'.format(device)
resp = self.get(url)
return self._utf8_encode(resp.json())
def get_device_list(self):
"""
returns a list of device names from NSO
"""
url = "/api/running/devices/device"
response = self.get(url)
device_list = list()
for d in response.json()["collection"]["tailf-ncs:device"]:
device_list.append(d["name"])
return device_list
def get_ned_id(self, device):
"""
returns a ned id for a given device in NSO
"""
url = "/api/running/devices/device/{}/device-type?deep"
url = url.format(device)
response = self.get(url)
try:
# making some potentially bad assumptions here
#
# {
# "tailf-ncs:device-type": {
# "cli": {
# "ned-id": "tailf-ned-cisco-nx-id:cisco-nx",
# "protocol": "telnet"
# }
# }
# }
device_type = response.json()["tailf-ncs:device-type"]
ned_id = device_type["cli"]["ned-id"]
# tailf-ned-cisco-nx-id:cisco-nx
ned_id = ned_id.split(":")[-1] # cisco-nx
return ned_id
except LookupError:
return None
def generate_netsim_configs(self, devices):
device_types = dict()
# deal with generating load-dir
for d in devices:
xml_config = self.get_device_config_xml(d)
filename = 'load-dir/{0}.xml'.format(d)
if not os.path.exists(os.path.dirname(filename)):
try:
os.makedirs(os.path.dirname(filename))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
with open(filename, "w") as f:
f.write(xml_config)
# grab ned id for later
ned_id = self.get_ned_id(d)
if ned_id:
device_types[d] = ned_id
with open('Makefile', 'w') as fh:
create_template = "\tncs-netsim create-device {} {}\n"
add_template = "\tncs-netsim add-device {} {}\n"
fh.write(MAKEFILE_BASE.format(base_url=self.base_url))
fh.write("netsim:\n")
first = True
for device, ned in device_types.items():
if first:
fh.write(create_template.format(ned, device))
else:
fh.write(add_template.format(ned, device))
first = False
|
[
"kecorbin@cisco.com"
] |
kecorbin@cisco.com
|
0b3451456383d74e43a4eb1d7a9f8ab12ef4adfd
|
053cf58d2cbae6f76a03f80b97c2aa53581a49ab
|
/interface/LiveClassAPI_v_Video_test.py
|
696546bf1e716cf0e0cfbcf9084c2fc09a46412d
|
[] |
no_license
|
qujinliang/LiveClassAPIAutoTest
|
8a84bb6649de46d5b90365f5d4d0e9d2ee0e1e11
|
6fbbbfb358d51bde8a4e4912625c73c6d1a9da49
|
refs/heads/master
| 2020-04-12T14:32:44.359097
| 2018-12-20T09:18:57
| 2018-12-20T09:18:57
| 162,555,549
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,027
|
py
|
import unittest
from util import THQS
from util import LiveAPIRequests
from util.LiveAPIDataFile import LiveAPIData
class LiveClassAPIVideoTest(unittest.TestCase):
"""设为暖场视频接口测试"""
def setUp(self):
'''初始化请求数据url'''
url = LiveAPIData.urlData(self)
url = url+"/api/v1/video/warm/set?"
self.warm_set_data = LiveAPIData.warmSetData(self)
t = THQS.thqs()
warm_set_data = t.get_thqs(self.warm_set_data)
self.warm_set_url = url+warm_set_data
self.live = LiveAPIRequests.LiveAPIRequests
def tearDown(self):
pass
def test_a_list(self):
'''设为暖场视频成功'''
r = self.live.SendOut(self,self.warm_set_url)
if r == None:
print('请求失败,没有返回数据')
self.assertEqual(None,'')
return
print("输入参数:%s" % self.warm_set_data)
print("返回数据: %s" % r)
self.assertEqual(r['result'],'OK')
|
[
"qujin_liang@163.com"
] |
qujin_liang@163.com
|
5445bd7a3b77d5f5e64961ad50413d9a4f7b317b
|
e1e5ffef1eeadd886651c7eaa814f7da1d2ade0a
|
/Systest/tests/aaa/AAA_FUN_007.py
|
5195ce9c3b3c259ea8e7bd9c2e4f562ee283af1d
|
[] |
no_license
|
muttu2244/MyPython
|
1ddf1958e5a3514f9605d1f83c0930b24b856391
|
984ca763feae49a44c271342dbc15fde935174cf
|
refs/heads/master
| 2021-06-09T02:21:09.801103
| 2017-10-10T07:30:04
| 2017-10-10T07:30:04
| 13,803,605
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,161
|
py
|
#!/usr/bin/env python2.5
"""
#######################################################################
#
# Copyright (c) Stoke, Inc.
# All Rights Reserved.
#
# This code is confidential and proprietary to Stoke, Inc. and may only
# be used under a license from Stoke.
#
#######################################################################
Description: - Verify SSX limits the number of sessions to the Max-sessions configured
TEST PLAN: AAA/RADIUS Test Plan
TEST CASES: AAA-FUN-007
TOPOLOGY DIAGRAM:
(Linux) (SSX) (Linux)
------- -------- --------------
|Takama | --------------------------| |------------------------| qa-svr4 |
------- | | --------------
| |
|Lihue-mc|
(Netscreen) | | (Linux)
------ | | --------------
|qa-ns1 | --------------------------| |-------------------------| qa-svr3 |
------ | | --------------
--------
How to run: "python2.5 AAA_FUN_007.py"
AUTHOR: Mahesh - mahesh@primesoftsolutionsinc.com
REVIEWER:
"""
### Import the system libraries we need.
import sys, os
### To make sure that the libraries are in correct path.
mydir = os.path.dirname(__file__)
qa_lib_dir = os.path.join(mydir, "../../lib/py")
if qa_lib_dir not in sys.path:
sys.path.insert(1,qa_lib_dir)
# frame-work libraries
from Linux import Linux
from SSX import SSX
from aaa import *
from ike import *
from StokeTest import *
from log import buildLogger
from logging import getLogger
from helpers import is_healthy
# import configs file
from aaa_config import *
from topo import *
# python libraries
import time
class test_AAA_FUN_007(test_case):
myLog = getLogger()
def setUp(self):
"""Establish a telnet session to the SSX box."""
self.ssx = SSX(topo.ssx1['ip_addr'])
self.ssx.telnet()
# CLear SSX configuration
self.ssx.clear_config()
#Establish a telnet session to the Xpress VPN client box.
self.xpress_vpn = Linux(topo.linux["ip_addr"],topo.linux["user_name"],topo.linux["password"])
self.xpress_vpn.telnet()
# wait for card to come up
self.ssx.wait4cards()
self.ssx.clear_health_stats()
def tearDown(self):
# Close the telnet session of SSX
self.ssx.close()
# Close the telnet session of Xpress VPN Client
self.xpress_vpn.close()
def test_AAA_FUN_007(self):
"""
Test case Id: - AAA_FUN_007
"""
self.myLog.output("\n**********start the test**************\n")
# Push SSX config
self.ssx.config_from_string(script_var['common_ssx1'])
self.ssx.config_from_string(script_var['fun_007_ssx'])
# Push xpress vpn config
self.xpress_vpn.write_to_file(script_var['fun_007_xpressvpn_multi'],"autoexec.cfg","/xpm/")
self.xpress_vpn.write_to_file(script_var['add_ip_takama'],"add_ip_takama","/xpm/")
# Enable debug logs for iked
self.ssx.cmd("context %s" % script_var['context'])
self.ssx.cmd("debug module iked all")
self.ssx.cmd("debug module aaad all")
# Flush the debug logs in SSX, if any
self.ssx.cmd("clear log debug")
# Initiate IKE Session from Xpress VPN Client (takama)
self.xpress_vpn.cmd("cd /xpm/")
self.xpress_vpn.cmd("sudo chmod 777 add_ip_takama")
self.xpress_vpn.cmd("sudo ./add_ip_takama")
time.sleep(3)
op_client_cmd = self.xpress_vpn.cmd("sudo ./start_ike")
time.sleep(10)
#Consider 9 client
op_ssx_sa = self.ssx.configcmd("show ike-session brief")
i=0
count=0
ssx_max_ses=5
for i in range(0,len(clnt_ips)):
if clnt_ips[i] in op_ssx_sa:
count=count+1
self.myLog.output("\n\n************* the no. of ike sessions:%d\n\n"%count)
self.failUnless(count==ssx_max_ses,"Mismatch with the number of sessions and Max sessions configured")
# Check the "authentication fail" notify message when more than Max sessions are initiated
op_debug = verify_in_debug(self.ssx,"AUTHEN_FAIL")
self.failUnless(op_debug,"the AUTHENTICATION_FAILED notify message is not sent by SSX")
# Checking SSX Health
hs = self.ssx.get_health_stats()
self.failUnless(is_healthy(hs), "Platform is not healthy")
if __name__ == '__main__':
logfile=__file__.replace('.py','.log')
log = buildLogger(logfile, debug=True, console=True)
suite = test_suite()
suite.addTest(test_AAA_FUN_007)
test_runner(stream=sys.stdout).run(suite)
|
[
"muttu2244@yahoo.com"
] |
muttu2244@yahoo.com
|
1d1eb7b895ef58d7411393323789a6cea8d74eb4
|
5ec06dab1409d790496ce082dacb321392b32fe9
|
/clients/python/generated/test/test_com_day_cq_wcm_designimporter_parser_taghandlers_factory_text_component_info.py
|
9dd311727f08f354672f50c4174ba64c40b3ee04
|
[
"Apache-2.0"
] |
permissive
|
shinesolutions/swagger-aem-osgi
|
e9d2385f44bee70e5bbdc0d577e99a9f2525266f
|
c2f6e076971d2592c1cbd3f70695c679e807396b
|
refs/heads/master
| 2022-10-29T13:07:40.422092
| 2021-04-09T07:46:03
| 2021-04-09T07:46:03
| 190,217,155
| 3
| 3
|
Apache-2.0
| 2022-10-05T03:26:20
| 2019-06-04T14:23:28
| null |
UTF-8
|
Python
| false
| false
| 1,437
|
py
|
# coding: utf-8
"""
Adobe Experience Manager OSGI config (AEM) API
Swagger AEM OSGI is an OpenAPI specification for Adobe Experience Manager (AEM) OSGI Configurations API # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: opensource@shinesolutions.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import swaggeraemosgi
from swaggeraemosgi.models.com_day_cq_wcm_designimporter_parser_taghandlers_factory_text_component_info import ComDayCqWcmDesignimporterParserTaghandlersFactoryTextComponentInfo # noqa: E501
from swaggeraemosgi.rest import ApiException
class TestComDayCqWcmDesignimporterParserTaghandlersFactoryTextComponentInfo(unittest.TestCase):
"""ComDayCqWcmDesignimporterParserTaghandlersFactoryTextComponentInfo unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testComDayCqWcmDesignimporterParserTaghandlersFactoryTextComponentInfo(self):
"""Test ComDayCqWcmDesignimporterParserTaghandlersFactoryTextComponentInfo"""
# FIXME: construct object with mandatory attributes with example values
# model = swaggeraemosgi.models.com_day_cq_wcm_designimporter_parser_taghandlers_factory_text_component_info.ComDayCqWcmDesignimporterParserTaghandlersFactoryTextComponentInfo() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"michael.bloch@shinesolutions.com"
] |
michael.bloch@shinesolutions.com
|
33d65505e62415ec474373f835f060af9a83c2d8
|
7b12eb45c1ea76ad9c186b858b5dfebf2c5b862a
|
/.history/DEBER_20210904224213.py
|
22245042ab40e7800c1484553318ca360a097082
|
[
"MIT"
] |
permissive
|
Alopezm5/PROYECTO-PARTE-1
|
a1dce04009b24852c1c60e69bdf602ad3af0574b
|
bd7a8594edf08d41c6ca544cf6bac01ea4fcb684
|
refs/heads/main
| 2023-07-25T11:22:17.994770
| 2021-09-07T03:27:34
| 2021-09-07T03:27:34
| 403,670,226
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,323
|
py
|
import os
class Empresa():
def __init__(self,nom="",ruc=0,dire="",tele=0,ciud="",tipEmpr=""):
self.nombre=nom
self.ruc=ruc
self.direccion=dire
self.telefono=tele
self.ciudad=ciud
self.tipoEmpresa=tipEmpr
def datosEmpresa(self):#3
self.nombre=input("Ingresar nombre de la empresa: ")
self.ruc=int(input("Ingresar ruc de la empresa: "))
self.direccion=input("Ingresar la direccion de la empresa: ")
self.telefono=int(input("Ingresar el numero de telefono de la empresa: "))
self.ciudad=input("Ingresar ciudad donde esta la empresa: ")
self.tipoEmpresa=input("Ingresar tipo de empresa publica o privada: ")
def mostrarEmpresa(self):
print("")
print("Datos de la Empresa")
print("La empresa de nombre {}\n De RUC #{} \n Está ubicada en {}\n Se puede comunicar al #{}\n Está empresa esta en la ciudad de {}\n Es una entidad {}".format(self.nombre,self.ruc,self.direccion, self.telefono,self.ciudad, self.tipoEmpresa))
class Empleado(Empresa):
def __init__(self,nom="",cedu=0,dire="",tele=0,email="",estado="",profe="",anti=0,com=0,fNomina="",fIngreso="",iess=0):
self.nombre=nom
self.cedula=cedu
self.direccion=dire
self.telefono=tele
self.correo=email
self.estadocivil=estado
self.profesion=profe
self.antiguedad=anti
self.comision=com
self.fechaNomina=fNomina
self.fechaIngreso=fIngreso
self.iess=iess
def empleado(self):
self.nombre=input("Ingresar nombre del empleado: ")
self.cedula=int(input("Ingresar numero de cedula del empleado: "))
self.direccion=input("Ingresar la direccion del empleado: ")
self.telefono=int(input("Ingresar numero de contacto del empleado: "))
self.correo=input("Ingresar correo personal del empleado: ")
self.iess=float(input("Ingresar valor del iees recordar que debe ser porcentuado Ejemplo si quiere decir 20% debe ingresar 0.20"))
self.fechaNomina=input("Ingresar fecha de nomida (formato año-mes-dia): ")
self.fechaIngreso=input("Ingresar fecha de ingreso (formato año-mes-dia): ")
self.antiguedad=float(input("Ingresar valor de antiguedad"))
self.comision=float(input("Ingresar calor de la comsion: "))
def empleadoObrero(self):
self.estadocivil=input("Ingresar estado civil del empleado: ")
def empleadoOficina(self):
self.profesion=input("Ingresar profesion del empleado: ")
# def mostrarempleado(self):
# print("El empleado: {} con # de C.I. {} \n Con direccion {}, y numero de contacto{}\n Y correo {}".format(self.nombre,self.cedula,self.direccion,self.telefono,self.correo))
class Departamento(Empleado):
def __init__(self,dep=""):
self.departamento=dep
def departa(self):
self.departamento=input("Ingresar el departamento al que pertenece el empleado: ")
def mostrarDeparta(self):
print("El empleado pertenece al departamento de: {}".format(self.departamento))
class Pagos(Empleado):
def __init__(self, desc=0,desper=0,valhora=0,hotraba=0,extra=0):
self.permisos=desper
self.valorhora=valhora
self.horastrabajadas=hotraba
self.valextra=extra
self.sueldo= suel
self.horasRecargo= hrecar
self.horasExtraordinarias=hextra
self.prestamo= pres
self.mesCuota= mcou
self.valor_hora= valho
self.sobretiempo=sobtiem
self.comEmpOficina = comofi
self.antiEmpObrero = antobre
self.iessEmpleado = iemple
self.cuotaPrestamo=cuopres
self.totdes = tot
self.liquidoRecibir = liquid
def pagoNormal(self):
self.sueldo=float(input("Ingresar sueldo del trabajador: $"))
self.prestamo=float(input("Ingresar monto del prestamo que ha generado el empleado: $"))
self.mesCuota=("Ingresar meses qa diferir el prestamo: ")
def pagoExtra(self):
self.horasRecargo=int(input("Ingresar horas de recargo: "))
self.horasExtraordinarias=int(input("Ingresar horas extraordinarias: "))
def calculoSueldo(self):
self.valor_hora=self.sueldo/240
self.sobretiempo= valor_hora * (horasRecargo*0.50+horasExtraordinarias*2)
self.comEmpOficina = self.comision*self.sueldo
self.antiEmpObrero = self.antiguedad*(FechaNomina - FechaIngreso)/365*self.sueldo
self.iessEmpleado = self.iess*(self.sueldo+self.sobretiempo)
self.cuotaPrestamo=self.prestamo/self.mesCuota
if eleccion==1:
self.toting = self.sueldo+self.sobretiempo+ self.comEmpOficina
elif eleccion==2:
self.toting = self.sueldo+self.sobretiempo+self.antiEmpObrero
self.totdes = iessEmpleado + prestamoEmpleado
self.liquidoRecibir = toting - totdes
def mostrarSueldo(self):
print("Arreglar")
emp=Empresa()
emp.datosEmpresa()
emple=Empleado()
emple.empleado()
eleccion=int(input("Va a ingresar un empleado tipo 1. Obreo o 2.Oficina: "))
emple.empleadoObrero()
emple.empleadoOficina()
pag=Pagos()
pag.pagoNormal()
pag.pagoExtra()
pag.calculoSueldo()
os.system ("cls")
emp.mostrarEmpresa()
print("")
emple.mostrarempleado()
print("")
|
[
"85761855+Alopezm5@users.noreply.github.com"
] |
85761855+Alopezm5@users.noreply.github.com
|
89b21af428d1d308b279efb03d30b5f58713a620
|
2f0c413962f96fe449ddcaf9363f1bdfd4f5e98d
|
/test/test_gossip.py
|
8060199884d775bc93067661bb947acca582a832
|
[
"MIT"
] |
permissive
|
vijayanant/kunai
|
1d922791dbad8c6132d790d7a58040c3f9ecbedc
|
0dfe169731eaceb1bba66e12715b3968d2a3de20
|
refs/heads/master
| 2021-01-22T12:02:17.293478
| 2014-12-27T13:15:25
| 2014-12-27T13:15:25
| 28,539,772
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 481
|
py
|
#!/usr/bin/env python
# Copyright (C) 2014:
# Gabes Jean, naparuba@gmail.com
import copy
import time
import threading
from kunai_test import *
from kunai.gossip import Gossip
from kunai.broadcast import Broadcaster
class TestGossip(KunaiTest):
def setUp(self):
self.gossip = Gossip({}, threading.RLock(), 'localhost', 6768, 'testing-kunai', 0, 'AAAA', ['linux', 'kv'], [])
def test_gossip(self):
pass
if __name__ == '__main__':
unittest.main()
|
[
"naparuba@gmail.com"
] |
naparuba@gmail.com
|
02086cbeccc8c19cd85a073f2c7eab29f2e06976
|
d2f71636c17dc558e066d150fe496343b9055799
|
/eventi/receipts/forms.py
|
9d3016a8c6419210c1823d66a44e71eee0f454e5
|
[
"MIT"
] |
permissive
|
klebercode/lionsclub
|
9d8d11ad6083d25f6d8d92bfbae9a1bbfa6d2106
|
60db85d44214561d20f85673e8f6c047fab07ee9
|
refs/heads/master
| 2020-06-11T19:45:39.974945
| 2015-04-05T01:11:57
| 2015-04-05T01:11:57
| 33,409,707
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,159
|
py
|
# coding: utf-8
from django import forms
from django.template.loader import render_to_string
from django.core.mail import EmailMultiAlternatives
from eventi.receipts.models import Receipt
from eventi.subscriptions.models import Subscription
class ReceiptForm(forms.ModelForm):
class Meta:
model = Receipt
def send_mail(self, pk):
subject = u'Lions Clubes, comprovante enviado.'
context = {
'name': self.cleaned_data['name'],
'subscription': self.cleaned_data['subscription'],
}
s = Subscription.objects.get(pk=self.cleaned_data['subscription'])
if s:
email_to = s.email
else:
email_to = ''
message = render_to_string('receipts/receipt_mail.txt', context)
message_html = render_to_string('receipts/receipt_mail.html',
context)
msg = EmailMultiAlternatives(subject, message,
'convencao@lionsclubegaranhuns.org.br',
[email_to])
msg.attach_alternative(message_html, 'text/html')
msg.send()
|
[
"kleberr@msn.com"
] |
kleberr@msn.com
|
fee2e7e85fd97f95bfea8e5a4c9bfbaf48c5d3df
|
e204623da7c836b95f209cc7fb357dc0b7f60548
|
/meetings/admin.py
|
0d6435eab6b99696f4439688f520fcfc68c0a66a
|
[] |
no_license
|
juliocebrito/syfboyaca4gavantel
|
9551db0a9b74dadf831362ceb0f685a482afa828
|
c9e9cc5f591dddaa53e3d1fd3db50d16d34424d7
|
refs/heads/master
| 2022-04-30T00:06:57.239452
| 2019-08-05T15:12:23
| 2019-08-05T15:12:23
| 196,095,965
| 0
| 0
| null | 2022-04-22T22:00:52
| 2019-07-09T23:08:12
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 975
|
py
|
from django.contrib import admin
from .models import Meeting, Point
class PointInline(admin.TabularInline):
model = Point
@admin.register(Meeting)
class MeetingAdmin(admin.ModelAdmin):
resource_class = Meeting
list_display = (
'id',
'date',
'type_meeting',
'state',
'sub_state',
'created_at',
'updated_at',
)
list_filter = ('state', 'sub_state', 'created_at', 'updated_at')
search_fields = ['id', 'date', 'type_meeting']
inlines = [PointInline]
@admin.register(Point)
class PointAdmin(admin.ModelAdmin):
resource_class = Point
list_display = (
'id',
'meeting',
'name',
'description',
'comments',
'point_state',
'state',
'sub_state',
'created_at',
'updated_at',
)
list_filter = ('state', 'sub_state', 'created_at', 'updated_at')
search_fields = ['id', 'meeting', 'name', 'point_state']
|
[
"jucebridu@gmail.com"
] |
jucebridu@gmail.com
|
e0718b4f12184db6ea4c15fbd4918f1f3212a582
|
8e39a4f4ae1e8e88d3b2d731059689ad5b201a56
|
/lib32-apps/lib32-libXext/lib32-libXext-1.3.3.py
|
773e23325d398e9fbe0d784a04ec5d4ef87625a4
|
[] |
no_license
|
wdysln/new
|
d5f5193f81a1827769085932ab7327bb10ef648e
|
b643824b26148e71859a1afe4518fe05a79d333c
|
refs/heads/master
| 2020-05-31T00:12:05.114056
| 2016-01-04T11:38:40
| 2016-01-04T11:38:40
| 37,287,357
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 359
|
py
|
metadata = """
summary @ 11 miscellaneous extensions library
homepage @ http://xorg.freedesktop.org/
license @ MIT
src_url @ http://xorg.freedesktop.org/releases/individual/lib/libXext-$version.tar.bz2
arch @ ~x86_64
"""
depends = """
runtime @ sys-libs/glibc x11-libs/libX11 x11-proto/xextproto
"""
srcdir = "libXext-%s" % version
get("main/lib32_utils")
|
[
"zirkovandersen@gmail.com"
] |
zirkovandersen@gmail.com
|
0ee0628059ce0bbdb5a337a48cab93a60ec822f8
|
468f7b1d7639e2465b2ba4e0f960c8a75a10eb89
|
/kerasAC/cross_validate.py
|
8322e8d37895ddfb25a8c3cf60d3670904215752
|
[
"MIT"
] |
permissive
|
soumyakundu/kerasAC
|
874a7453044050afa3198c5bd3c34185b53ea571
|
f692abf1c6003a9f0d917117f3579a0746ed3b5a
|
refs/heads/master
| 2020-04-23T04:34:10.659657
| 2019-02-14T23:01:42
| 2019-02-14T23:01:42
| 156,909,046
| 0
| 0
| null | 2019-02-15T18:13:21
| 2018-11-09T19:32:57
|
Python
|
UTF-8
|
Python
| false
| false
| 6,381
|
py
|
from .splits import *
from .config import args_object_from_args_dict
from .train import *
from .predict import *
from .interpret import *
import argparse
import pdb
def parse_args():
parser=argparse.ArgumentParser(add_help=True)
parser.add_argument("--multi_gpu",action="store_true",default=False)
parser.add_argument("--assembly",default="hg19")
parser.add_argument("--data_path",help="path that stores training/validation/test data")
parser.add_argument("--model_hdf5",required=True)
parser.add_argument("--batch_size",type=int,default=1000)
parser.add_argument("--init_weights",default=None)
parser.add_argument("--ref_fasta",default="/mnt/data/annotations/by_release/hg19.GRCh37/hg19.genome.fa")
parser.add_argument("--w1_w0_file",default=None)
parser.add_argument("--save_w1_w0", default=None,help="output text file to save w1 and w0 to")
parser.add_argument("--weighted",action="store_true")
parser.add_argument('--w1',nargs="*", type=float, default=None)
parser.add_argument('--w0',nargs="*", type=float, default=None)
parser.add_argument("--from_checkpoint_weights",default=None)
parser.add_argument("--from_checkpoint_arch",default=None)
parser.add_argument("--num_tasks",required=True,type=int)
parser.add_argument("--num_train",type=int,default=700000)
parser.add_argument("--num_valid",type=int,default=150000)
#add functionality to train on individuals' allele frequencies
parser.add_argument("--vcf_file",default=None)
parser.add_argument("--global_vcf",action="store_true")
parser.add_argument("--revcomp",action="store_true")
parser.add_argument("--epochs",type=int,default=40)
parser.add_argument("--patience",type=int,default=3)
parser.add_argument("--patience_lr",type=int,default=2,help="number of epochs with no drop in validation loss after which to reduce lr")
parser.add_argument("--architecture_spec",type=str,default="basset_architecture_multitask")
parser.add_argument("--architecture_from_file",type=str,default=None)
parser.add_argument("--tensorboard",action="store_true")
parser.add_argument("--tensorboard_logdir",default="logs")
parser.add_argument("--squeeze_input_for_gru",action="store_true")
parser.add_argument("--seed",type=int,default=1234)
parser.add_argument("--train_upsample", type=float, default=None)
parser.add_argument("--valid_upsample", type=float, default=None)
parser.add_argument("--threads",type=int,default=1)
parser.add_argument("--max_queue_size",type=int,default=100)
parser.add_argument('--weights',help='weights file for the model')
parser.add_argument('--yaml',help='yaml file for the model')
parser.add_argument('--json',help='json file for the model')
parser.add_argument('--predict_chroms',default=None)
parser.add_argument('--data_hammock',help='input file is in hammock format, with unique id for each peak')
parser.add_argument('--variant_bed')
parser.add_argument('--predictions_pickle',help='name of pickle to save predictions',default=None)
parser.add_argument('--accuracy_metrics_file',help='file name to save accuracy metrics',default=None)
parser.add_argument('--predictions_pickle_to_load',help="if predictions have already been generated, provide a pickle with them to just compute the accuracy metrics",default=None)
parser.add_argument('--background_freqs',default=None)
parser.add_argument('--flank',default=500,type=int)
parser.add_argument('--mask',default=10,type=int)
parser.add_argument('--center_on_summit',default=False,action='store_true',help="if this is set to true, the peak will be centered at the summit (must be last entry in bed file or hammock) and expanded args.flank to the left and right")
parser.add_argument("--interpret_chroms",nargs="*")
parser.add_argument("--interpretation_outf",default=None)
parser.add_argument("--method",choices=['gradxinput','deeplift'],default="deeplift")
parser.add_argument('--task_id',type=int)
parser.add_argument('--chromsizes',default='/mnt/data/annotations/by_release/hg19.GRCh37/hg19.chrom.sizes')
parser.add_argument("--interpret",action="store_true",default=False)
return parser.parse_args()
def cross_validate(args):
if type(args)==type({}):
args=args_object_from_args_dict(args)
#run training on each of the splits
if args.assembly not in splits:
raise Exception("Unsupported genome assembly:"+args.assembly+". Supported assemblies include:"+str(splits.keys())+"; add splits for this assembly to splits.py file")
args_dict=vars(args)
print(args_dict)
base_model_file=str(args_dict['model_hdf5'])
base_accuracy_file=str(args_dict['accuracy_metrics_file'])
base_interpretation=str(args_dict['interpretation_outf'])
base_predictions_pickle=str(args_dict['predictions_pickle'])
for split in splits[args.assembly]:
print("Starting split:"+str(split))
test_chroms=splits[args.assembly][split]['test']
validation_chroms=splits[args.assembly][split]['valid']
train_chroms=list(set(chroms[args.assembly])-set(test_chroms+validation_chroms))
#convert args to dict
args_dict=vars(args)
args_dict['train_chroms']=train_chroms
args_dict['validation_chroms']=validation_chroms
#set the training arguments specific to this fold
args_dict['model_hdf5']=base_model_file+"."+str(split)
print("Training model")
train(args_dict)
#set the prediction arguments specific to this fold
if args.save_w1_w0!=None:
args_dict["w1_w0_file"]=args.save_w1_w0
args_dict['accuracy_metrics_file']=base_accuracy_file+"."+str(split)
args_dict['predictions_pickle']=base_predictions_pickle+"."+str(split)
args_dict['predict_chroms']=test_chroms
print("Calculating predictions on the test fold")
predict(args_dict)
if args.interpret==True:
args_dict['interpret_chroms']=test_chroms
args_dict['interpretation_outf']=base_interpretation+'.'+str(split)
print("Running interpretation on the test fold")
interpret(args_dict)
def main():
args=parse_args()
cross_validate(args)
if __name__=="__main__":
main()
|
[
"annashcherbina@gmail.com"
] |
annashcherbina@gmail.com
|
a1ee9dd36bec6ac5b2c5fcfb664c79cf089b0fd3
|
944d3c07a3e0edb65f41a4a302f494e6b44e3f45
|
/nntoolbox/callbacks/lookahead.py
|
d87b7f399790c6d799238d7cf9b99033b6816ce0
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
nhatsmrt/nn-toolbox
|
acbc753d6081ed1e2ed91ac0fd3343287c78a094
|
689b9924d3c88a433f8f350b89c13a878ac7d7c3
|
refs/heads/master
| 2022-12-05T21:11:23.725346
| 2021-08-15T17:03:34
| 2021-08-15T17:03:34
| 189,150,286
| 19
| 3
|
Apache-2.0
| 2023-09-08T19:29:53
| 2019-05-29T04:25:16
|
Python
|
UTF-8
|
Python
| false
| false
| 2,535
|
py
|
from .callbacks import Callback
from ..utils import copy_model, get_device
from typing import Dict, Any
from torch.nn import Module
__all__ = ['LookaheadOptimizer']
class LookaheadOptimizer(Callback):
"""
Lookahead Optimizer: Keep track of a set of "slow weights", which only update periodically. (UNTESTED)
References:
Michael R. Zhang, James Lucas, Geoffrey Hinton, Jimmy Ba. "Lookahead Optimizer: k steps forward, 1 step back."
https://arxiv.org/abs/1907.08610
"""
def __init__(
self, step_size: float=0.5, update_every: int=1, timescale: str="iter", device=get_device()
):
"""
https://arxiv.org/pdf/1803.05407.pdf
:param model: the model currently being trained
:param step_size: the stepsize for slow weight update
:param average_after: the first epoch to start averaging
:param update_every: how many epochs/iters between each average update
"""
assert timescale == "epoch" or timescale == "iter"
self.step_size = step_size
self._update_every = update_every
self._timescale = timescale
self._device = device
def on_train_begin(self):
self._model = self.learner._model
self._model_slow = copy_model(self._model).to(self._device)
def on_epoch_end(self, logs: Dict[str, Any]) -> bool:
if self._timescale == "epoch":
if logs["epoch"] % self._update_every == 0:
self.update_slow_weights()
print("Update slow weights after epoch " + str(logs["epoch"]))
return False
def on_batch_end(self, logs: Dict[str, Any]):
if self._timescale == "iter":
if logs["iter_cnt"] % self._update_every == 0:
self.update_slow_weights()
print("Update slow weights after iteration " + str(logs["iter_cnt"]))
def on_train_end(self):
self._model_slow.to(self.learner._device)
for inputs, labels in self.learner._train_data:
self._model_slow(inputs.to(self.learner._device))
self.learner._model = self._model_slow
def update_slow_weights(self):
for model_p, slow_p in zip(self._model.parameters(), self._model_slow.parameters()):
slow_p.data.add_(self.step_size * (model_p.data.to(slow_p.data.dtype) - slow_p.data))
def get_final_model(self) -> Module:
"""
Return the post-training average model
:return: the averaged model
"""
return self._model_slow
|
[
"nhatsmrt@uw.edu"
] |
nhatsmrt@uw.edu
|
066f1468dcad77bbbba420be664825ae167ba1e7
|
f6252f763b46053d81ffcc19919a5adcb0fff069
|
/trax/tf_numpy/numpy/tests/utils_test.py
|
2558976fd867d9db4baf0bf87b1ce79b48145b68
|
[
"Apache-2.0"
] |
permissive
|
codespeakers/trax
|
ee5da9e39b83b173034ff2638d856dec38e9675a
|
9fc11bca7accda0394d629cac96558f4539d7f61
|
refs/heads/master
| 2020-12-14T15:50:49.634706
| 2020-01-18T20:52:27
| 2020-01-18T20:52:27
| 234,796,218
| 0
| 0
|
Apache-2.0
| 2020-01-18T20:51:52
| 2020-01-18T20:51:51
| null |
UTF-8
|
Python
| false
| false
| 1,847
|
py
|
# coding=utf-8
# Copyright 2019 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for utils.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from trax.tf_numpy.numpy import utils
class UtilsTest(tf.test.TestCase):
# pylint: disable=unused-argument
def testNpDoc(self):
def np_fun(x):
"""np_fun docstring."""
return
@utils.np_doc(np_fun)
def f():
"""f docstring."""
return
expected = """TensorFlow variant of `numpy.np_fun`.
Unsupported arguments: `x`.
f docstring.
Documentation for `numpy.np_fun`:
np_fun docstring."""
self.assertEqual(f.__doc__, expected)
def testNpDocErrors(self):
def np_fun(x, y=1, **kwargs):
return
# pylint: disable=unused-variable
with self.assertRaisesRegexp(TypeError, 'Cannot find parameter'):
@utils.np_doc(np_fun)
def f1(a):
return
with self.assertRaisesRegexp(TypeError, 'is of kind'):
@utils.np_doc(np_fun)
def f2(x, kwargs):
return
with self.assertRaisesRegexp(
TypeError, 'Parameter "y" should have a default value'):
@utils.np_doc(np_fun)
def f3(x, y):
return
if __name__ == '__main__':
tf.enable_v2_behavior()
tf.test.main()
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
1d77cbfd80b1f6ec236953a1d767cc6340842c4f
|
c858d9511cdb6a6ca723cd2dd05827d281fa764d
|
/MFTU/lesson 8/practice/minkowski_curve.py
|
b1f2a0c187c3359f45a4c64ed166b194bb559ba2
|
[] |
no_license
|
DontTouchMyMind/education
|
0c904aa929cb5349d7af7e06d9b1bbaab972ef95
|
32a53eb4086b730cc116e633f68cf01f3d4ec1d1
|
refs/heads/master
| 2021-03-12T11:15:02.479779
| 2020-09-17T08:19:50
| 2020-09-17T08:19:50
| 246,616,542
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 671
|
py
|
from turtle import *
shape('turtle')
speed()
size = 50
def minkowski_curve(length, n):
"""
Function draws a minkowski curve
:param length: simple line length
:param n: recursion depth
:return:
"""
if n == 0:
forward(length)
return
minkowski_curve(length, n - 1)
left(90)
minkowski_curve(length, n - 1)
right(90)
minkowski_curve(length, n - 1)
right(90)
minkowski_curve(length, n - 1)
minkowski_curve(length, n - 1)
left(90)
minkowski_curve(length, n - 1)
left(90)
minkowski_curve(length, n - 1)
right(90)
minkowski_curve(length, n - 1)
minkowski_curve(size / 4, 5)
|
[
"tobigface@gmail.com"
] |
tobigface@gmail.com
|
219ecdcca20b2b6ae2df729c0b04f80903956b35
|
f84998eddfe0800e525a5ef34dd8fac1898665b2
|
/pyski/pyski.py
|
1594fda402a49ea6f246a07eb80280dcf44b5817
|
[
"MIT"
] |
permissive
|
asmodehn/adam
|
76fdd8b7e54c50b20b87309609452293dbc76123
|
98060d76c3aebbada257cd348532509bb2986a5d
|
refs/heads/master
| 2023-09-03T11:28:40.023415
| 2017-12-01T15:50:38
| 2017-12-01T15:50:38
| 106,175,072
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,011
|
py
|
import sys
import cmd
import types
from inspect import signature
from svm import stk_set, stk_get, dup, drop, swap, over, rot
#TODO : unify interface using list of args.
# combinators
# Note how we apply only the immediate composition for B.
def B(x, y, z):
return x, y(z)
def C(x, y, z):
return x, z, y
def K(x, y):
return x
def W(x, y):
return x, y, y
class StackREPL(cmd.Cmd):
"""
Design notes. Cmd is based on the usual model of command + params, or function + arguments.
We want to have a stack based concatenative language, so we need to find the middle ground here...
Each time the user type return, one computation is effectuated
First computation is the current input line (command style) if there is one otherwise current stack (last first)
Any unknown word will be added to the stack and only considered as (unknown symbolic) param, not command.
"""
intro = 'Welcome to pyski. Type help or ? to list commands.\n'
prompt = ' '
file = None
# defining basic combinators with the host language features
env = {
'B': lambda *args: args,
'C': lambda x, y, z: x(y)(z),
'K': lambda x: x,
'W': lambda x: x,
}
# interpreter with the host language features
def evl(self, xpr):
for c in xpr:
try:
yield StackREPL.cmb[c]
except Exception:
raise # TODO : proper handling...
def prompt_refresh(self):
# note : we need the reversed stack for a left prompt
self.prompt = " ".join(reversed(tuple(stk_get()))) + ' '
def do_dup(self, arg):
"""duplicates its argument and push it up to the stack.
Extra arguments are treated before, following stack semantics.
This might seem a bit confusing and might be improved by switching prefix/postfix input semantics and repl design...
"""
stk_set(*dup(*stk_get()))
def do_drop(self, arg):
stk_set(*drop(*stk_get()))
def do_swap(self, arg):
stk_set(*swap(*stk_get()))
def do_over(self, arg):
stk_set(*over(*stk_get()))
def do_rot(self, arg):
stk_set(*rot(*stk_get()))
def default(self, line):
"""Called on an input line when the command prefix is not recognized.
This method automatically adds the command as undefined word, and recurse on argument (until one known command is found).
"""
# lets extract the command
cmd, arg, line = self.parseline(line)
if cmd: # checking for ''
# an add it to the stack (PUSH)
stk_set(cmd, *stk_get())
def emptyline(self):
"""
Called when the input line is empty
This executes one computation on the existing stack
:return:
"""
stkline = " ".join(stk_get())
if stkline:
self.onecmd(stkline)
# this parse in the opposite direction
# def parseline(self, line):
# """Parse the line into a command name and a string containing
# the arguments. Returns a tuple containing (command, args, line).
# 'command' and 'args' may be None if the line couldn't be parsed.
#
# Note this is the reverse as the default cmd implementation : the last word is the command.
# """
# line = line.strip()
# if not line:
# return None, None, line
# elif line[-1] == '?':
# line = line[:-1] + ' help'
# elif line[-1] == '!':
# if hasattr(self, 'do_shell'):
# line = line[:-1] + ' shell'
# else:
# return None, None, line
# i, n = 0, len(line)
# while i < n and line[-i] in self.identchars: i = i + 1
# cmd, arg = line[-i:].strip(), line[:-i]
#
# return cmd, arg, line
def parseline(self, line):
"""Parse the line into a command name and a string containing
the arguments. Returns a tuple containing (command, args, line).
'command' and 'args' may be None if the line couldn't be parsed.
"""
line = line.strip()
if not line:
return None, None, line
elif line[0] == '?':
line = 'help ' + line[1:]
elif line[0] == '!':
if hasattr(self, 'do_shell'):
line = 'shell ' + line[1:]
else:
return None, None, line
i, n = 0, len(line)
while i < n and line[i] in self.identchars: i = i+1
cmd, arg = line[:i], line[i:].strip()
return cmd, arg, line
def postcmd(self, stop, line):
"""Hook method executed just after a command dispatch is finished."""
cmd, arg, line = self.parseline(line)
if arg: # keep rest of the line in cmdqueue, and execute it in cmdloop.
self.cmdqueue.append(arg)
# update prompt
self.prompt_refresh()
return stop
# basic REPL commands
# def do_help(self, arg):
# ""
# def do_shell(self, arg):
# ""
def do_eof(self, arg):
'Stop recording, close the pyski window, and exit.'
print('Thank you for using pyski')
self.close()
return True
# ----- record and playback -----
def do_record(self, arg):
'Save future commands to filename: RECORD rose.cmd'
self.file = open(arg, 'w')
def do_playback(self, arg):
'Playback commands from a file: PLAYBACK rose.cmd'
self.close()
with open(arg) as f:
self.cmdqueue.extend(f.read().splitlines())
def precmd(self, line):
line = line.lower()
if self.file and 'playback' not in line:
print(line, file=self.file)
return line
def close(self):
if self.file:
self.file.close()
self.file = None
if __name__ == '__main__':
StackREPL().cmdloop()
# TODO : separate the evaluator- loop from the read / print loop, to allow to implement word rewriting as a "view/controller" only,
# where the evaluator is kind of the model (on top of the VM for operational semantics, and some type checker/theorem proofer for denotational semantics)...
# maybe even with network protocol in between.
# HOWEVER the read/print entire state must be kept in the evaluator or the VM (and per user)
# => Our evaluator (as a reflective tower) is always running (in a specific location) , like a server, and need a second input interface to manipulate the stored read/print state.
# Maybe the read/print state could also be linked to the tower level ???
# an evaluator can usually be split into a free monad and an interpretor. So maybe we need another construct here...
# But the Free Monad might be the correct math concept that is necessary for a "location" => where current state of computation is kept.
# Comparing with living system theory, encoder/decoder is not needed in homoiconic language, channel, net and time are hardware devices that can be interfaced with teh language somehow, and associator, decider and memory are all done by the free monad implementation.
# The transducer is the interpreter.
# This seems to suggest there would be more to the free monad than just a monad ( how to actually reflection, continuations, etc. ??)...
# It seems also that the free monad could be the place to store configuration of the ditor as well as hte place to implement "optimization" features for the language
# (for ex. a term configured in editor and always used could have a direct VM implementation, rather than rewrite it, and use hte implementation of each of its parts...)
# Maybe there should be a configurable term rewritter between the monad and the interpreter ?? It would rewrite what is unknown by the free monad into what is known... We still need to understand how this is different from the actual interpreter...
# We should keep all this on the side for later, after the curses based view has been developed.
|
[
"asmodehn@gmail.com"
] |
asmodehn@gmail.com
|
f1c4caad5266b2e17020cc4bd4dbf9a3098a0328
|
50a7c55e00b661746fc953ee4940a8f3bf976b18
|
/re/finditer.py
|
1b7d9113e6a970ba95af69dd0b1a2430e16c9f36
|
[] |
no_license
|
cponeill/pymotw-practice-examples
|
a9e049a5aee671c8bfc958c6a5bfcfb764f12444
|
6e87ca8be925bc103afb7c0f80da8a69f1e80a4c
|
refs/heads/master
| 2021-01-13T13:14:39.216035
| 2017-10-25T14:31:58
| 2017-10-25T14:31:58
| 72,708,203
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 212
|
py
|
# finditer.py
import re
text = 'abbaaabbbbaaaaa'
pattern = 'ab'
for match in re.finditer(pattern, text):
s = match.start()
e = match.end()
print('Found {!r} at {:d}:{:d}'.format(text[s:e], s, e))
|
[
"caseyoneill78@hotmail.com"
] |
caseyoneill78@hotmail.com
|
635200a8db1ecb79752b90c7330d891670f8b070
|
9d7d88cc4dc326993c6be9ba2a79b5afe86254c5
|
/tests/layers/test_position_embedding.py
|
c110c9a2872554a666f6bd1bc69f201b2421ab25
|
[] |
no_license
|
LeeKLTW/posner
|
7ebe0e287c8a9db91e150ba08c41772757b2639f
|
9a1c6e00c463644a78ebf413b676c74c846dc23d
|
refs/heads/master
| 2022-12-16T17:32:38.327191
| 2020-02-26T11:50:47
| 2020-02-26T11:50:47
| 240,471,085
| 5
| 1
| null | 2022-12-08T03:36:50
| 2020-02-14T09:22:13
|
Python
|
UTF-8
|
Python
| false
| false
| 2,839
|
py
|
# -*- coding: utf-8 -*-
import os
import tempfile
import unittest
import numpy as np
from tensorflow import keras
from posner.layers import PositionEmbedding
class TestSinCosPosEmbd(unittest.TestCase):
def test_invalid_output_dim(self):
with self.assertRaises(NotImplementedError):
PositionEmbedding(
mode=PositionEmbedding.MODE_EXPAND,
output_dim=5,
)
def test_missing_output_dim(self):
with self.assertRaises(NotImplementedError):
PositionEmbedding(
mode=PositionEmbedding.MODE_EXPAND,
)
def test_add(self):
seq_len = np.random.randint(1, 10)
embed_dim = np.random.randint(1, 20) * 2
inputs = np.ones((1, seq_len, embed_dim))
model = keras.models.Sequential()
model.add(PositionEmbedding(
input_shape=(seq_len, embed_dim),
mode=PositionEmbedding.MODE_ADD,
name='Pos-Embd',
))
model.compile('adam', 'mse')
model_path = os.path.join(tempfile.gettempdir(),
'pos_embd_%f.h5' % np.random.random())
model.save(model_path)
model = keras.models.load_model(model_path, custom_objects={
'PositionEmbedding': PositionEmbedding})
model.summary()
predicts = model.predict(inputs)[0].tolist()
for i in range(seq_len):
for j in range(embed_dim):
actual = predicts[i][j]
if j % 2 == 0:
expect = 1.0 + np.sin(i / 10000.0 ** (float(j) / embed_dim))
else:
expect = 1.0 + np.cos(i / 10000.0 ** ((j - 1.0) / embed_dim))
self.assertAlmostEqual(expect, actual, places=6,
msg=(embed_dim, i, j, expect, actual))
def test_concat(self):
seq_len = np.random.randint(1, 10)
feature_dim = np.random.randint(1, 20)
embed_dim = np.random.randint(1, 20) * 2
inputs = np.ones((1, seq_len, feature_dim))
model = keras.models.Sequential()
model.add(PositionEmbedding(
input_shape=(seq_len, feature_dim),
output_dim=embed_dim,
mode=PositionEmbedding.MODE_CONCAT,
name='Pos-Embd',
))
model.compile('adam', 'mse')
model_path = os.path.join(tempfile.gettempdir(),
'test_pos_embd_%f.h5' % np.random.random())
model.save(model_path)
model = keras.models.load_model(model_path, custom_objects={
'PositionEmbedding': PositionEmbedding})
model.summary()
predicts = model.predict(inputs)[0].tolist()
for i in range(seq_len):
for j in range(embed_dim):
actual = predicts[i][feature_dim + j]
if j % 2 == 0:
expect = np.sin(i / 10000.0 ** (float(j) / embed_dim))
else:
expect = np.cos(i / 10000.0 ** ((j - 1.0) / embed_dim))
self.assertAlmostEqual(expect, actual, places=6,
msg=(embed_dim, i, j, expect, actual))
|
[
"LeeKLTW@gmail.com"
] |
LeeKLTW@gmail.com
|
17a5b8659e14c201ad12ac9f18525fb17aba949f
|
674f5dde693f1a60e4480e5b66fba8f24a9cb95d
|
/armulator/armv6/opcodes/concrete/add_sp_plus_register_thumb_t3.py
|
1df3c88b2a6cafe7aa3816167f947a12888f3b06
|
[
"MIT"
] |
permissive
|
matan1008/armulator
|
75211c18ebc9cd9d33a02890e76fc649483c3aad
|
44f4275ab1cafff3cf7a1b760bff7f139dfffb07
|
refs/heads/master
| 2023-08-17T14:40:52.793120
| 2023-08-08T04:57:02
| 2023-08-08T04:57:02
| 91,716,042
| 29
| 7
|
MIT
| 2023-08-08T04:55:59
| 2017-05-18T16:37:55
|
Python
|
UTF-8
|
Python
| false
| false
| 919
|
py
|
from armulator.armv6.bits_ops import substring, bit_at, chain
from armulator.armv6.opcodes.abstract_opcodes.add_sp_plus_register_thumb import AddSpPlusRegisterThumb
from armulator.armv6.shift import decode_imm_shift, SRType
class AddSpPlusRegisterThumbT3(AddSpPlusRegisterThumb):
@staticmethod
def from_bitarray(instr, processor):
rm = substring(instr, 3, 0)
type_ = substring(instr, 5, 4)
imm2 = substring(instr, 7, 6)
rd = substring(instr, 11, 8)
imm3 = substring(instr, 14, 12)
setflags = bit_at(instr, 20)
shift_t, shift_n = decode_imm_shift(type_, chain(imm3, imm2, 2))
if rd == 13 and (shift_t != SRType.LSL or shift_n > 3) or (rd == 15 and not setflags) or rm in (13, 15):
print('unpredictable')
else:
return AddSpPlusRegisterThumbT3(instr, setflags=setflags, m=rm, d=rd, shift_t=shift_t, shift_n=shift_n)
|
[
"matan1008@gmail.com"
] |
matan1008@gmail.com
|
0e02aa64e88f8cd0103a0bc833aa86ea0ea95fbc
|
f68afe06e4bbf3d523584852063e767e53441b2b
|
/Toontown/toontown/toon/DistributedNPCToonBase.py
|
d205a2543f853382aff51eef5c62dc2aa6178d61
|
[] |
no_license
|
DankMickey/Toontown-Offline-Squirting-Flower-Modded-
|
eb18908e7a35a5f7fc95871814207858b94e2600
|
384754c6d97950468bb62ddd8961c564097673a9
|
refs/heads/master
| 2021-01-19T17:53:36.591832
| 2017-01-15T02:00:04
| 2017-01-15T02:00:04
| 34,639,744
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,444
|
py
|
from pandac.PandaModules import *
from otp.nametag.NametagGroup import NametagGroup
from direct.directnotify import DirectNotifyGlobal
from direct.fsm import ClassicFSM
from direct.fsm import State
from toontown.toonbase import ToontownGlobals
import DistributedToon
from direct.distributed import DistributedObject
import NPCToons
from toontown.quest import Quests
from direct.distributed import ClockDelta
from toontown.quest import QuestParser
from toontown.quest import QuestChoiceGui
from direct.interval.IntervalGlobal import *
import random
class DistributedNPCToonBase(DistributedToon.DistributedToon):
def __init__(self, cr):
try:
self.DistributedNPCToon_initialized
except:
self.DistributedNPCToon_initialized = 1
DistributedToon.DistributedToon.__init__(self, cr)
self.__initCollisions()
self.setPickable(0)
self.setPlayerType(NametagGroup.CCNonPlayer)
def disable(self):
self.ignore('enter' + self.cSphereNode.getName())
DistributedToon.DistributedToon.disable(self)
def delete(self):
try:
self.DistributedNPCToon_deleted
except:
self.DistributedNPCToon_deleted = 1
self.__deleteCollisions()
DistributedToon.DistributedToon.delete(self)
def generate(self):
DistributedToon.DistributedToon.generate(self)
self.cSphereNode.setName(self.uniqueName('NPCToon'))
self.detectAvatars()
self.setParent(ToontownGlobals.SPRender)
self.startLookAround()
def generateToon(self):
self.setLODs()
self.generateToonLegs()
self.generateToonHead()
self.generateToonTorso()
self.generateToonColor()
self.parentToonParts()
self.rescaleToon()
self.resetHeight()
self.rightHands = []
self.leftHands = []
self.headParts = []
self.hipsParts = []
self.torsoParts = []
self.legsParts = []
self.__bookActors = []
self.__holeActors = []
def announceGenerate(self):
self.initToonState()
DistributedToon.DistributedToon.announceGenerate(self)
def initToonState(self):
self.setAnimState('neutral', 0.9, None, None)
npcOrigin = render.find('**/npc_origin_' + str(self.posIndex))
if not npcOrigin.isEmpty():
self.reparentTo(npcOrigin)
self.initPos()
def initPos(self):
self.clearMat()
def wantsSmoothing(self):
return 0
def detectAvatars(self):
self.accept('enter' + self.cSphereNode.getName(), self.handleCollisionSphereEnter)
def ignoreAvatars(self):
self.ignore('enter' + self.cSphereNode.getName())
def getCollSphereRadius(self):
return 3.25
def __initCollisions(self):
self.cSphere = CollisionTube(0.0, 1.0, 0.0, 0.0, 1.0, 5.0, self.getCollSphereRadius())
self.cSphere.setTangible(0)
self.cSphereNode = CollisionNode('cSphereNode')
self.cSphereNode.addSolid(self.cSphere)
self.cSphereNodePath = self.attachNewNode(self.cSphereNode)
self.cSphereNodePath.hide()
self.cSphereNode.setCollideMask(ToontownGlobals.WallBitmask)
def __deleteCollisions(self):
del self.cSphere
del self.cSphereNode
self.cSphereNodePath.removeNode()
del self.cSphereNodePath
def handleCollisionSphereEnter(self, collEntry):
pass
def setupAvatars(self, av):
self.ignoreAvatars()
av.headsUp(self, 0, 0, 0)
self.headsUp(av, 0, 0, 0)
av.stopLookAround()
av.lerpLookAt(Point3(-0.5, 4, 0), time=0.5)
self.stopLookAround()
self.lerpLookAt(Point3(av.getPos(self)), time=0.5)
def b_setPageNumber(self, paragraph, pageNumber):
self.setPageNumber(paragraph, pageNumber)
self.d_setPageNumber(paragraph, pageNumber)
def d_setPageNumber(self, paragraph, pageNumber):
timestamp = ClockDelta.globalClockDelta.getFrameNetworkTime()
self.sendUpdate('setPageNumber', [paragraph, pageNumber, timestamp])
def freeAvatar(self):
base.localAvatar.posCamera(0, 0)
base.cr.playGame.getPlace().setState('walk')
def setPositionIndex(self, posIndex):
self.posIndex = posIndex
def _startZombieCheck(self):
pass
def _stopZombieCheck(self):
pass
|
[
"jareddarty96@gmail.com"
] |
jareddarty96@gmail.com
|
0f92d183dd80697c0761f9cf3934f51b3b3fd1d8
|
9ad21dda46963fcdfe1e908596745d1d97be3dbc
|
/models/amenity.py
|
311c788d33abae282b7e95c9912d537cf31539e6
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
mj31508/AirBnB_clone_v2
|
ef903558983fc84ca7b31d20a40eedad9e622979
|
c676bc5fc6184aeb38f8669f7d295fef06e57165
|
refs/heads/master
| 2021-01-19T17:59:20.638896
| 2017-09-07T00:37:03
| 2017-09-07T00:37:03
| 101,103,176
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 731
|
py
|
#!/usr/bin/python3
"""
Amenity Class from Models Module
"""
from models.base_model import BaseModel, Base, Column, String, Table
from sqlalchemy.orm import relationship, backref
from os import getenv
class Amenity(BaseModel):
"""Amenity class handles all application amenities"""
if getenv("HBNB_TYPE_STORAGE") == "db":
__tablename__ = "amenities"
name = Column(String(128), nullable=False)
place_amenities = relationship("PlaceAmenity", backref="amenities",
cascade="all, delete, delete-orphan")
else:
name = ""
def __init__(self, *args, **kwargs):
"""instantiates a new amenity"""
super().__init__(self, *args, **kwargs)
|
[
"mj31508@gmail.com"
] |
mj31508@gmail.com
|
1a2f4478fe86735f8c3590ae191a1535f26ead5e
|
9b80999a1bdd3595022c9abf8743a029fde3a207
|
/32-Writing Functions in Python /More on Decorators /Counter.py
|
2be3435d9e6819985cf09452b2f9ff7746cd86e5
|
[] |
no_license
|
vaibhavkrishna-bhosle/DataCamp-Data_Scientist_with_python
|
26fc3a89605f26ac3b77c15dbe45af965080115a
|
47d9d2c8c93e1db53154a1642b6281c9149af769
|
refs/heads/master
| 2022-12-22T14:01:18.140426
| 2020-09-23T11:30:53
| 2020-09-23T11:30:53
| 256,755,894
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 807
|
py
|
'''You're working on a new web app, and you are curious about how many times each of the functions in it gets called. So you decide to write a decorator that adds a counter to each function that you decorate. You could use this information in the future to determine whether there are sections of code that you could remove because they are no longer being used by the app.'''
def counter(func):
def wrapper(*args, **kwargs):
wrapper.count += 1
# Call the function being decorated and return the result
return func(*args, **kwargs)
wrapper.count = 0
# Return the new decorated function
return wrapper
# Decorate foo() with the counter() decorator
@counter
def foo():
print('calling foo()')
foo()
foo()
print('foo() was called {} times.'.format(foo.count))
|
[
"vaibhavkrishna.bhosle@gmail.com"
] |
vaibhavkrishna.bhosle@gmail.com
|
b37518ea6e71d2bfbce6dd7613cdae72767dc2d5
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_383/ch84_2019_06_06_20_42_07_280719.py
|
c024c1d4525f58eee0be02e2b9c6b8bdfb678c43
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 151
|
py
|
def inverte_dicionario(dicionario):
dic={}
for k,v in dicionario.items():
if k not in dic:
dic[v] = [k]
else:
dic[i].append(k)
return dic
|
[
"you@example.com"
] |
you@example.com
|
6eab6235773fadc371788dc6921ac27ab34d157e
|
6f866eb49d0b67f0bbbf35c34cebe2babe2f8719
|
/tests/app/views/handlers/conftest.py
|
8ac724a725dc9f88afecffd8b9b45dc2787c076c
|
[
"MIT",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
ONSdigital/eq-questionnaire-runner
|
681b0d081f9cff0ee4ae3017ecc61f7390d553bf
|
87e7364c4d54fee99e6a5e96649123f11c4b53f1
|
refs/heads/main
| 2023-09-01T21:59:56.733363
| 2023-08-31T15:07:55
| 2023-08-31T15:07:55
| 219,752,509
| 12
| 18
|
MIT
| 2023-09-14T11:37:31
| 2019-11-05T13:32:18
|
Python
|
UTF-8
|
Python
| false
| false
| 7,411
|
py
|
import uuid
from datetime import datetime, timedelta, timezone
import pytest
from freezegun import freeze_time
from mock import Mock
from app.authentication.auth_payload_versions import AuthPayloadVersion
from app.data_models import QuestionnaireStore
from app.data_models.metadata_proxy import MetadataProxy
from app.data_models.session_data import SessionData
from app.data_models.session_store import SessionStore
from app.questionnaire import QuestionnaireSchema
from tests.app.parser.conftest import get_response_expires_at
time_to_freeze = datetime.now(timezone.utc).replace(second=0, microsecond=0)
tx_id = str(uuid.uuid4())
response_id = "1234567890123456"
period_str = "2016-01-01"
period_id = "2016-02-01"
ref_p_start_date = "2016-02-02"
ref_p_end_date = "2016-03-03"
ru_ref = "432423423423"
ru_name = "ru_name"
user_id = "789473423"
schema_name = "1_0000"
feedback_count = 1
display_address = "68 Abingdon Road, Goathill"
form_type = "I"
collection_exercise_sid = "ce_sid"
case_id = "case_id"
survey_id = "021"
data_version = "0.0.1"
feedback_type = "Feedback type"
feedback_text = "Feedback text"
feedback_type_question_category = "Feedback type question category"
started_at = str(datetime.now(tz=timezone.utc).isoformat())
language_code = "cy"
case_type = "I"
channel = "H"
case_ref = "1000000000000001"
region_code = "GB_WLS"
response_expires_at = get_response_expires_at()
@pytest.fixture
@freeze_time(time_to_freeze)
def session_data():
return SessionData(
language_code="cy",
)
@pytest.fixture
def confirmation_email_fulfilment_schema():
return QuestionnaireSchema(
{
"form_type": "H",
"region_code": "GB-WLS",
"submission": {"confirmation_email": True},
}
)
@pytest.fixture
def language():
return "en"
@pytest.fixture
def schema():
return QuestionnaireSchema(
{
"post_submission": {"view_response": True},
"title": "Test schema - View Submitted Response",
}
)
@pytest.fixture
def storage():
return Mock()
def set_storage_data(
storage_,
raw_data="{}",
version=1,
submitted_at=None,
):
storage_.get_user_data = Mock(
return_value=(raw_data, version, collection_exercise_sid, submitted_at)
)
@pytest.fixture
def session_data_feedback():
return SessionData(
language_code=language_code,
feedback_count=feedback_count,
)
@pytest.fixture
def schema_feedback():
return QuestionnaireSchema({"survey_id": survey_id, "data_version": data_version})
@pytest.fixture
def metadata():
return MetadataProxy.from_dict(
{
"tx_id": tx_id,
"user_id": user_id,
"schema_name": schema_name,
"collection_exercise_sid": collection_exercise_sid,
"period_id": period_id,
"period_str": period_str,
"ref_p_start_date": ref_p_start_date,
"ref_p_end_date": ref_p_end_date,
"ru_ref": ru_ref,
"response_id": response_id,
"form_type": form_type,
"display_address": display_address,
"case_type": case_type,
"channel": channel,
"case_ref": case_ref,
"region_code": region_code,
"case_id": case_id,
"language_code": language_code,
"response_expires_at": response_expires_at,
}
)
@pytest.fixture
def metadata_v2():
return MetadataProxy.from_dict(
{
"version": AuthPayloadVersion.V2,
"tx_id": tx_id,
"case_id": case_id,
"schema_name": schema_name,
"collection_exercise_sid": collection_exercise_sid,
"response_id": response_id,
"channel": channel,
"region_code": region_code,
"account_service_url": "account_service_url",
"response_expires_at": get_response_expires_at(),
"survey_metadata": {
"data": {
"period_id": period_id,
"period_str": period_str,
"ref_p_start_date": ref_p_start_date,
"ref_p_end_date": ref_p_end_date,
"ru_ref": ru_ref,
"ru_name": ru_name,
"case_type": case_type,
"form_type": form_type,
"case_ref": case_ref,
"display_address": display_address,
"user_id": user_id,
}
},
}
)
@pytest.fixture
def response_metadata():
return {
"started_at": started_at,
}
@pytest.fixture
def submission_payload_expires_at():
return datetime.now(timezone.utc) + timedelta(seconds=5)
@pytest.fixture
def submission_payload_session_data():
return SessionData(
language_code="cy",
)
@pytest.fixture
def submission_payload_session_store(
submission_payload_session_data,
submission_payload_expires_at,
): # pylint: disable=redefined-outer-name
return SessionStore("user_ik", "pepper", "eq_session_id").create(
"eq_session_id",
"user_id",
submission_payload_session_data,
submission_payload_expires_at,
)
@pytest.fixture
def mock_questionnaire_store(mocker):
storage_ = mocker.Mock()
storage_.get_user_data = mocker.Mock(return_value=("{}", "ce_id", 1, None))
questionnaire_store = QuestionnaireStore(storage_)
questionnaire_store.metadata = MetadataProxy.from_dict(
{
"tx_id": "tx_id",
"case_id": "case_id",
"ru_ref": ru_ref,
"user_id": user_id,
"collection_exercise_sid": collection_exercise_sid,
"period_id": period_id,
"schema_name": schema_name,
"account_service_url": "account_service_url",
"response_id": "response_id",
"response_expires_at": get_response_expires_at(),
}
)
return questionnaire_store
@pytest.fixture
def mock_questionnaire_store_v2(mocker):
storage_ = mocker.Mock()
storage_.get_user_data = mocker.Mock(return_value=("{}", "ce_id", 1, None))
questionnaire_store = QuestionnaireStore(storage_)
questionnaire_store.metadata = MetadataProxy.from_dict(
{
"version": AuthPayloadVersion.V2,
"tx_id": "tx_id",
"case_id": case_id,
"schema_name": schema_name,
"collection_exercise_sid": collection_exercise_sid,
"response_id": response_id,
"channel": channel,
"region_code": region_code,
"account_service_url": "account_service_url",
"response_expires_at": get_response_expires_at(),
"survey_metadata": {
"data": {
"period_id": period_id,
"period_str": period_str,
"ref_p_start_date": ref_p_start_date,
"ref_p_end_date": ref_p_end_date,
"ru_ref": ru_ref,
"ru_name": ru_name,
"case_type": case_type,
"form_type": form_type,
"case_ref": case_ref,
"display_address": display_address,
"user_id": user_id,
}
},
}
)
return questionnaire_store
|
[
"noreply@github.com"
] |
ONSdigital.noreply@github.com
|
3dc4c09fb5506a33933c0f69ed47ea51604b13d2
|
5102f7b8a300186496ce7691c6135efeeaeedd6c
|
/jobplus/app.py
|
8fefd43ae3d75fa52ccf45f67da58614eba6ec85
|
[] |
no_license
|
ISONEK/jobplus10-3
|
c0bc4ddfca67e54b5015cd9b1bfbfb2499338209
|
b595e3c53ced93efa7883c67a4633132b5f52c15
|
refs/heads/master
| 2022-10-20T20:53:15.506235
| 2019-02-25T14:05:14
| 2019-02-25T14:05:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 336
|
py
|
from flask import Flask
from flask_migrate import Migrate
from .config import configs
from .handlers import front
from jobplus.models import db
def create_app(config):
app = Flask(__name__)
app.config.from_object(configs.get(config))
db.init_app(app)
Migrate(app,db)
app.register_blueprint(front)
return app
|
[
"1195581533@qq.com"
] |
1195581533@qq.com
|
4c8a5ec0a1babe7f7faaccd7a56cf6452644aa9e
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02783/s364760222.py
|
56095ba287f4d078faedb65eb7b8df8402523e7c
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,012
|
py
|
import sys
#import string
#from collections import defaultdict, deque, Counter
#import bisect
#import heapq
#import math
#from itertools import accumulate
#from itertools import permutations as perm
#from itertools import combinations as comb
#from itertools import combinations_with_replacement as combr
#from fractions import gcd
#import numpy as np
stdin = sys.stdin
sys.setrecursionlimit(10 ** 7)
MIN = -10 ** 9
MOD = 10 ** 9 + 7
INF = float("inf")
IINF = 10 ** 18
def solve():
#n = int(stdin.readline().rstrip())
h,a = map(int, stdin.readline().rstrip().split())
#l = list(map(int, stdin.readline().rstrip().split()))
#numbers = [[int(c) for c in l.strip().split()] for l in sys.stdin]
#word = [stdin.readline().rstrip() for _ in range(n)]
#number = [[int(c) for c in stdin.readline().rstrip()] for _ in range(n)]
#zeros = [[0] * w for i in range(h)]
ans = h // a
if h%a == 0:
print(ans)
else:
print(ans + 1)
if __name__ == '__main__':
solve()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
8f1a6d780bd0edce2d520e13dad88a8227254da9
|
80d50ea48e10674b1b7d3f583a1c4b7d0b01200f
|
/src/datadog_api_client/v2/model/confluent_account_resource_attributes.py
|
17ce380b94bbc1ddf35e1700b093ca27a39b4d1d
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"MPL-2.0"
] |
permissive
|
DataDog/datadog-api-client-python
|
3e01fa630278ad0b5c7005f08b7f61d07aa87345
|
392de360e7de659ee25e4a6753706820ca7c6a92
|
refs/heads/master
| 2023-09-01T20:32:37.718187
| 2023-09-01T14:42:04
| 2023-09-01T14:42:04
| 193,793,657
| 82
| 36
|
Apache-2.0
| 2023-09-14T18:22:39
| 2019-06-25T22:52:04
|
Python
|
UTF-8
|
Python
| false
| false
| 2,096
|
py
|
# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
from __future__ import annotations
from typing import List, Union
from datadog_api_client.model_utils import (
ModelNormal,
cached_property,
unset,
UnsetType,
)
class ConfluentAccountResourceAttributes(ModelNormal):
@cached_property
def openapi_types(_):
return {
"enable_custom_metrics": (bool,),
"id": (str,),
"resource_type": (str,),
"tags": ([str],),
}
attribute_map = {
"enable_custom_metrics": "enable_custom_metrics",
"id": "id",
"resource_type": "resource_type",
"tags": "tags",
}
def __init__(
self_,
resource_type: str,
enable_custom_metrics: Union[bool, UnsetType] = unset,
id: Union[str, UnsetType] = unset,
tags: Union[List[str], UnsetType] = unset,
**kwargs,
):
"""
Attributes object for updating a Confluent resource.
:param enable_custom_metrics: Enable the ``custom.consumer_lag_offset`` metric, which contains extra metric tags.
:type enable_custom_metrics: bool, optional
:param id: The ID associated with a Confluent resource.
:type id: str, optional
:param resource_type: The resource type of the Resource. Can be ``kafka`` , ``connector`` , ``ksql`` , or ``schema_registry``.
:type resource_type: str
:param tags: A list of strings representing tags. Can be a single key, or key-value pairs separated by a colon.
:type tags: [str], optional
"""
if enable_custom_metrics is not unset:
kwargs["enable_custom_metrics"] = enable_custom_metrics
if id is not unset:
kwargs["id"] = id
if tags is not unset:
kwargs["tags"] = tags
super().__init__(kwargs)
self_.resource_type = resource_type
|
[
"noreply@github.com"
] |
DataDog.noreply@github.com
|
5a0d3f5f22f67c39c6323697a44bd6491d06ab42
|
a5455dbb01687ab031f6347306dbb5ccc3c0c162
|
/第一阶段/day13/code/mymod3.py
|
bb84da638d9bdc4dc9f09578b20fc403ed12fb85
|
[] |
no_license
|
zuobing1995/tiantianguoyuan
|
9ff67aef6d916e27d92b63f812c96a6d5dbee6f8
|
29af861f5edf74a4a1a4156153678b226719c56d
|
refs/heads/master
| 2022-11-22T06:50:13.818113
| 2018-11-06T04:52:53
| 2018-11-06T04:52:53
| 156,317,754
| 1
| 1
| null | 2022-11-22T01:06:37
| 2018-11-06T03:02:51
|
Python
|
UTF-8
|
Python
| false
| false
| 156
|
py
|
# mymod3.py
# 此模块示意模块的隐藏属性
def f1():
pass
def _f2():
pass
def __f3():
pass
name1 = "abc"
_name2 = '123'
|
[
"bing@163.com"
] |
bing@163.com
|
35c18252ebf33bb45574a6aac18b24612ea99638
|
98efe1aee73bd9fbec640132e6fb2e54ff444904
|
/loldib/getratings/models/NA/na_ezreal/na_ezreal_top.py
|
9d3af100a74aaa3bca56f5bd36d826514b917710
|
[
"Apache-2.0"
] |
permissive
|
koliupy/loldib
|
be4a1702c26546d6ae1b4a14943a416f73171718
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
refs/heads/master
| 2021-07-04T03:34:43.615423
| 2017-09-21T15:44:10
| 2017-09-21T15:44:10
| 104,359,388
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,545
|
py
|
from getratings.models.ratings import Ratings
class NA_Ezreal_Top_Aatrox(Ratings):
pass
class NA_Ezreal_Top_Ahri(Ratings):
pass
class NA_Ezreal_Top_Akali(Ratings):
pass
class NA_Ezreal_Top_Alistar(Ratings):
pass
class NA_Ezreal_Top_Amumu(Ratings):
pass
class NA_Ezreal_Top_Anivia(Ratings):
pass
class NA_Ezreal_Top_Annie(Ratings):
pass
class NA_Ezreal_Top_Ashe(Ratings):
pass
class NA_Ezreal_Top_AurelionSol(Ratings):
pass
class NA_Ezreal_Top_Azir(Ratings):
pass
class NA_Ezreal_Top_Bard(Ratings):
pass
class NA_Ezreal_Top_Blitzcrank(Ratings):
pass
class NA_Ezreal_Top_Brand(Ratings):
pass
class NA_Ezreal_Top_Braum(Ratings):
pass
class NA_Ezreal_Top_Caitlyn(Ratings):
pass
class NA_Ezreal_Top_Camille(Ratings):
pass
class NA_Ezreal_Top_Cassiopeia(Ratings):
pass
class NA_Ezreal_Top_Chogath(Ratings):
pass
class NA_Ezreal_Top_Corki(Ratings):
pass
class NA_Ezreal_Top_Darius(Ratings):
pass
class NA_Ezreal_Top_Diana(Ratings):
pass
class NA_Ezreal_Top_Draven(Ratings):
pass
class NA_Ezreal_Top_DrMundo(Ratings):
pass
class NA_Ezreal_Top_Ekko(Ratings):
pass
class NA_Ezreal_Top_Elise(Ratings):
pass
class NA_Ezreal_Top_Evelynn(Ratings):
pass
class NA_Ezreal_Top_Ezreal(Ratings):
pass
class NA_Ezreal_Top_Fiddlesticks(Ratings):
pass
class NA_Ezreal_Top_Fiora(Ratings):
pass
class NA_Ezreal_Top_Fizz(Ratings):
pass
class NA_Ezreal_Top_Galio(Ratings):
pass
class NA_Ezreal_Top_Gangplank(Ratings):
pass
class NA_Ezreal_Top_Garen(Ratings):
pass
class NA_Ezreal_Top_Gnar(Ratings):
pass
class NA_Ezreal_Top_Gragas(Ratings):
pass
class NA_Ezreal_Top_Graves(Ratings):
pass
class NA_Ezreal_Top_Hecarim(Ratings):
pass
class NA_Ezreal_Top_Heimerdinger(Ratings):
pass
class NA_Ezreal_Top_Illaoi(Ratings):
pass
class NA_Ezreal_Top_Irelia(Ratings):
pass
class NA_Ezreal_Top_Ivern(Ratings):
pass
class NA_Ezreal_Top_Janna(Ratings):
pass
class NA_Ezreal_Top_JarvanIV(Ratings):
pass
class NA_Ezreal_Top_Jax(Ratings):
pass
class NA_Ezreal_Top_Jayce(Ratings):
pass
class NA_Ezreal_Top_Jhin(Ratings):
pass
class NA_Ezreal_Top_Jinx(Ratings):
pass
class NA_Ezreal_Top_Kalista(Ratings):
pass
class NA_Ezreal_Top_Karma(Ratings):
pass
class NA_Ezreal_Top_Karthus(Ratings):
pass
class NA_Ezreal_Top_Kassadin(Ratings):
pass
class NA_Ezreal_Top_Katarina(Ratings):
pass
class NA_Ezreal_Top_Kayle(Ratings):
pass
class NA_Ezreal_Top_Kayn(Ratings):
pass
class NA_Ezreal_Top_Kennen(Ratings):
pass
class NA_Ezreal_Top_Khazix(Ratings):
pass
class NA_Ezreal_Top_Kindred(Ratings):
pass
class NA_Ezreal_Top_Kled(Ratings):
pass
class NA_Ezreal_Top_KogMaw(Ratings):
pass
class NA_Ezreal_Top_Leblanc(Ratings):
pass
class NA_Ezreal_Top_LeeSin(Ratings):
pass
class NA_Ezreal_Top_Leona(Ratings):
pass
class NA_Ezreal_Top_Lissandra(Ratings):
pass
class NA_Ezreal_Top_Lucian(Ratings):
pass
class NA_Ezreal_Top_Lulu(Ratings):
pass
class NA_Ezreal_Top_Lux(Ratings):
pass
class NA_Ezreal_Top_Malphite(Ratings):
pass
class NA_Ezreal_Top_Malzahar(Ratings):
pass
class NA_Ezreal_Top_Maokai(Ratings):
pass
class NA_Ezreal_Top_MasterYi(Ratings):
pass
class NA_Ezreal_Top_MissFortune(Ratings):
pass
class NA_Ezreal_Top_MonkeyKing(Ratings):
pass
class NA_Ezreal_Top_Mordekaiser(Ratings):
pass
class NA_Ezreal_Top_Morgana(Ratings):
pass
class NA_Ezreal_Top_Nami(Ratings):
pass
class NA_Ezreal_Top_Nasus(Ratings):
pass
class NA_Ezreal_Top_Nautilus(Ratings):
pass
class NA_Ezreal_Top_Nidalee(Ratings):
pass
class NA_Ezreal_Top_Nocturne(Ratings):
pass
class NA_Ezreal_Top_Nunu(Ratings):
pass
class NA_Ezreal_Top_Olaf(Ratings):
pass
class NA_Ezreal_Top_Orianna(Ratings):
pass
class NA_Ezreal_Top_Ornn(Ratings):
pass
class NA_Ezreal_Top_Pantheon(Ratings):
pass
class NA_Ezreal_Top_Poppy(Ratings):
pass
class NA_Ezreal_Top_Quinn(Ratings):
pass
class NA_Ezreal_Top_Rakan(Ratings):
pass
class NA_Ezreal_Top_Rammus(Ratings):
pass
class NA_Ezreal_Top_RekSai(Ratings):
pass
class NA_Ezreal_Top_Renekton(Ratings):
pass
class NA_Ezreal_Top_Rengar(Ratings):
pass
class NA_Ezreal_Top_Riven(Ratings):
pass
class NA_Ezreal_Top_Rumble(Ratings):
pass
class NA_Ezreal_Top_Ryze(Ratings):
pass
class NA_Ezreal_Top_Sejuani(Ratings):
pass
class NA_Ezreal_Top_Shaco(Ratings):
pass
class NA_Ezreal_Top_Shen(Ratings):
pass
class NA_Ezreal_Top_Shyvana(Ratings):
pass
class NA_Ezreal_Top_Singed(Ratings):
pass
class NA_Ezreal_Top_Sion(Ratings):
pass
class NA_Ezreal_Top_Sivir(Ratings):
pass
class NA_Ezreal_Top_Skarner(Ratings):
pass
class NA_Ezreal_Top_Sona(Ratings):
pass
class NA_Ezreal_Top_Soraka(Ratings):
pass
class NA_Ezreal_Top_Swain(Ratings):
pass
class NA_Ezreal_Top_Syndra(Ratings):
pass
class NA_Ezreal_Top_TahmKench(Ratings):
pass
class NA_Ezreal_Top_Taliyah(Ratings):
pass
class NA_Ezreal_Top_Talon(Ratings):
pass
class NA_Ezreal_Top_Taric(Ratings):
pass
class NA_Ezreal_Top_Teemo(Ratings):
pass
class NA_Ezreal_Top_Thresh(Ratings):
pass
class NA_Ezreal_Top_Tristana(Ratings):
pass
class NA_Ezreal_Top_Trundle(Ratings):
pass
class NA_Ezreal_Top_Tryndamere(Ratings):
pass
class NA_Ezreal_Top_TwistedFate(Ratings):
pass
class NA_Ezreal_Top_Twitch(Ratings):
pass
class NA_Ezreal_Top_Udyr(Ratings):
pass
class NA_Ezreal_Top_Urgot(Ratings):
pass
class NA_Ezreal_Top_Varus(Ratings):
pass
class NA_Ezreal_Top_Vayne(Ratings):
pass
class NA_Ezreal_Top_Veigar(Ratings):
pass
class NA_Ezreal_Top_Velkoz(Ratings):
pass
class NA_Ezreal_Top_Vi(Ratings):
pass
class NA_Ezreal_Top_Viktor(Ratings):
pass
class NA_Ezreal_Top_Vladimir(Ratings):
pass
class NA_Ezreal_Top_Volibear(Ratings):
pass
class NA_Ezreal_Top_Warwick(Ratings):
pass
class NA_Ezreal_Top_Xayah(Ratings):
pass
class NA_Ezreal_Top_Xerath(Ratings):
pass
class NA_Ezreal_Top_XinZhao(Ratings):
pass
class NA_Ezreal_Top_Yasuo(Ratings):
pass
class NA_Ezreal_Top_Yorick(Ratings):
pass
class NA_Ezreal_Top_Zac(Ratings):
pass
class NA_Ezreal_Top_Zed(Ratings):
pass
class NA_Ezreal_Top_Ziggs(Ratings):
pass
class NA_Ezreal_Top_Zilean(Ratings):
pass
class NA_Ezreal_Top_Zyra(Ratings):
pass
|
[
"noreply@github.com"
] |
koliupy.noreply@github.com
|
a121f41b3cc1380246409f13814789b0c1093fa0
|
0d5c77661f9d1e6783b1c047d2c9cdd0160699d1
|
/python/paddle/fluid/tests/unittests/test_parallel_executor_test_while_train.py
|
252793944462244539084a288e5259f216359650
|
[
"Apache-2.0"
] |
permissive
|
xiaoyichao/anyq_paddle
|
ae68fabf1f1b02ffbc287a37eb6c0bcfbf738e7f
|
6f48b8f06f722e3bc5e81f4a439968c0296027fb
|
refs/heads/master
| 2022-10-05T16:52:28.768335
| 2020-03-03T03:28:50
| 2020-03-03T03:28:50
| 244,155,581
| 1
| 0
|
Apache-2.0
| 2022-09-23T22:37:13
| 2020-03-01T13:36:58
|
C++
|
UTF-8
|
Python
| false
| false
| 3,770
|
py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.fluid as fluid
import numpy as np
import unittest
import os
def simple_fc_net():
img = fluid.layers.data(name='image', shape=[784], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
hidden = img
for _ in xrange(4):
hidden = fluid.layers.fc(
hidden,
size=200,
act='tanh',
bias_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(value=1.0)))
prediction = fluid.layers.fc(hidden, size=10, act='softmax')
loss = fluid.layers.cross_entropy(input=prediction, label=label)
loss = fluid.layers.mean(loss)
return loss
class ParallelExecutorTestingDuringTraining(unittest.TestCase):
def check_network_convergence(self, use_cuda, build_strategy=None):
os.environ['CPU_NUM'] = str(4)
main = fluid.Program()
startup = fluid.Program()
with fluid.program_guard(main, startup):
loss = simple_fc_net()
test_program = main.clone(for_test=True)
opt = fluid.optimizer.SGD(learning_rate=0.001)
opt.minimize(loss)
batch_size = 32
image = np.random.normal(size=(batch_size, 784)).astype('float32')
label = np.random.randint(0, 10, (batch_size, 1), dtype="int64")
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup)
feed_dict = {'image': image, 'label': label}
train_exe = fluid.ParallelExecutor(
use_cuda=use_cuda,
loss_name=loss.name,
main_program=main,
build_strategy=build_strategy)
test_exe = fluid.ParallelExecutor(
use_cuda=use_cuda,
main_program=test_program,
share_vars_from=train_exe,
build_strategy=build_strategy)
for i in xrange(5):
test_loss, = test_exe.run([loss.name], feed=feed_dict)
train_loss, = train_exe.run([loss.name], feed=feed_dict)
self.assertTrue(
np.allclose(
train_loss, test_loss, atol=1e-8),
"Train loss: " + str(train_loss) + "\n Test loss:" +
str(test_loss))
def test_parallel_testing(self):
build_strategy = fluid.BuildStrategy()
build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.AllReduce
self.check_network_convergence(
use_cuda=True, build_strategy=build_strategy)
self.check_network_convergence(
use_cuda=False, build_strategy=build_strategy)
def test_parallel_testing_with_new_strategy(self):
build_strategy = fluid.BuildStrategy()
build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce
self.check_network_convergence(
use_cuda=True, build_strategy=build_strategy)
self.check_network_convergence(
use_cuda=False, build_strategy=build_strategy)
if __name__ == '__main__':
unittest.main()
|
[
"xiaoyichao@haohaozhu.com"
] |
xiaoyichao@haohaozhu.com
|
10715d27aa4f7e90889e6c3656f863943f5b87a0
|
04ae1836b9bc9d73d244f91b8f7fbf1bbc58ff29
|
/404/Solution.py
|
fc03385d6cf998b49aa9ceaf42511cad45ba0ca5
|
[] |
no_license
|
zhangruochi/leetcode
|
6f739fde222c298bae1c68236d980bd29c33b1c6
|
cefa2f08667de4d2973274de3ff29a31a7d25eda
|
refs/heads/master
| 2022-07-16T23:40:20.458105
| 2022-06-02T18:25:35
| 2022-06-02T18:25:35
| 78,989,941
| 14
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,489
|
py
|
"""
Find the sum of all left leaves in a given binary tree.
Example:
3
/ \
9 20
/ \
15 7
There are two left leaves in the binary tree, with values 9 and 15 respectively. Return 24.
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def sumOfLeftLeaves(self, root):
"""
:type root: TreeNode
:rtype: int
"""
def iter(root,isleaft):
if not root:
return 0
elif not root.left and not root.right and isleaft:
return root.val
else:
return iter(root.left,True) + iter(root.right,False)
return iter(root,False)
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def sumOfLeftLeaves(self, root: TreeNode) -> int:
res = 0
def traverse(root,flag):
nonlocal res
if not root:
return
if not root.left and not root.right and flag:
res += root.val
traverse(root.left,1)
traverse(root.right,0)
traverse(root,0)
return res
|
[
"zrc720@gmail.com"
] |
zrc720@gmail.com
|
11536b5271ae3d430e3c66740a0fa2cbea21f19b
|
0089e87d4e9bef9df4fe6611a79a13225c01b98e
|
/mB3-python-03/script-b0307-01.py
|
e9ade0c7dc5e3463fdb7dbdf2ba8361e5a78fbb1
|
[] |
no_license
|
odvk/sf-pyfullstack-c02
|
63ea99bf6bea79113fe75e0e6f4e5cdfb4a90aca
|
4521b9652264d03c082a9390dbcdcec2586c8fd1
|
refs/heads/master
| 2020-06-25T06:48:50.944156
| 2019-08-17T06:08:02
| 2019-08-17T06:08:02
| 199,236,372
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,128
|
py
|
# В3.7 Ещё раз про “магические” методы
class User:
def __init__(self):
print("Конструирую пользователя")
self.name = "Гелиозавр"
# конструктор это функция определяемая внутри класса;
# конструктор называется __init__ (два подчёркивания с двух сторон от названия);
# конструктор это функция: есть формальный аргумент, который называется 'self';
# внутри конструктора фактический аргумент self можно использовать чтобы изменять поля манипулируемого объекта.
u1 = User()
u2 = User()
u2.name = "Орнитишийлар"
print(u1.name, u2.name)
print("----------------------------")
# В нашем случае мы хотим сделать так, чтобы когда User участвует в выражении как строка
# (то есть приводится к строке) использовалось поле name. Это делается таким кодом:
class User1:
def __init__(self):
print("Конструирую пользователя")
self.name = "Гелиозавр"
def __str__(self):
return self.name
u1 = User1()
print(u1)
print("----------------------------")
# Дополним нашу модель пользователя и перепишем немного определение класса:
class User2:
def __init__(self, email, name="Гелиозавр"):
self.email = email
self.name = name
def __str__(self):
return "%s <%s>" % (self.name, self.email)
u1 = User2("test@example.com")
u2 = User2(name="Орнитишийлар", email="zakusi@pet.ru")
print(u1, u2)
print("----------------------------")
# Обратите внимание, что некоторые примеров в таблице выше используют два аргумента,
# так как они описывают какое-то парное действие. К примеру, мы в нашем сервисе можем считать,
# что если у нас есть два пользователя с одинаковым е-мейлом, то это два равных пользователя.
# Чтобы использовать эту проверку в коде мы определим наш класс пользователя так:
class User3:
def __init__(self, email, name="Гелиозавр"):
self.email = email
self.name = name
def __str__(self):
return "%s <%s>" % (self.name, self.email)
def __eq__(self, other):
return self.email.lower() == other.email.lower()
u1 = User3(name="Гелиозавр", email="RAWR@mail.ru")
u2 = User3(name="Орнитишийлар", email="rawr@mail.ru")
print(u1, u2)
print("Это один и тот же пользователь?", u1 == u2)
|
[
"kustov.dv@gmail.com"
] |
kustov.dv@gmail.com
|
8c994e8baded11dfb7211bd97cfef1a47f2fdf33
|
8fd314074713b3af02d68fd99fa5bf323283439f
|
/server/src/uds/dispatchers/__init__.py
|
67ac9fa4e1883883983f5b8efeb5bf3c4d4ec13a
|
[] |
no_license
|
spofa/openuds
|
099f5d4b4eaf54064d3c2f22a04653d304552294
|
a071ce5e3ed7a3e8973431cc2e884ff4219b8056
|
refs/heads/master
| 2021-07-04T14:16:13.810597
| 2017-09-21T13:50:07
| 2017-09-21T13:50:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,613
|
py
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012 Virtual Cable S.L.
# All rights reserved.
#
'''
@author: Adolfo Gómez, dkmaster at dkmon dot com
'''
from __future__ import unicode_literals
import logging
logger = logging.getLogger(__name__)
'''
Service modules for uds are contained inside this package.
To create a new service module, you will need to follow this steps:
1.- Create the service module, probably based on an existing one
2.- Insert the module package as child of this package
3.- Import the class of your service module at __init__. For example::
from Service import SimpleService
4.- Done. At Server restart, the module will be recognized, loaded and treated
The registration of modules is done locating subclases of :py:class:`uds.core.auths.Authentication`
.. moduleauthor:: Adolfo Gómez, dkmaster at dkmon dot com
'''
def __init__():
'''
This imports all packages that are descendant of this package, and, after that,
it register all subclases of service provider as
'''
import os.path
import pkgutil
import sys
# Dinamycally import children of this package. The __init__.py files must register, if needed, inside ServiceProviderFactory
pkgpath = os.path.dirname(sys.modules[__name__].__file__)
for _, name, _ in pkgutil.iter_modules([pkgpath]):
try:
logger.info('Loading dispatcher {}'.format(name))
__import__(name, globals(), locals(), [], -1)
except:
logger.exception('Loading dispatcher {}'.format(name))
logger.debug('Dispatchers initialized')
__init__()
|
[
"dkmaster@dkmon.com"
] |
dkmaster@dkmon.com
|
875e902874cd19eed9179c2b9f5951774b7ebdd3
|
083ca3df7dba08779976d02d848315f85c45bf75
|
/ZumaGame2.py
|
0233172d4172027141b217916e5abb0e6ac474f9
|
[] |
no_license
|
jiangshen95/UbuntuLeetCode
|
6427ce4dc8d9f0f6e74475faced1bcaaa9fc9f94
|
fa02b469344cf7c82510249fba9aa59ae0cb4cc0
|
refs/heads/master
| 2021-05-07T02:04:47.215580
| 2020-06-11T02:33:35
| 2020-06-11T02:33:35
| 110,397,909
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,067
|
py
|
class Solution:
def findMinStep(self, board: str, hand: str) -> int:
def removeConsecutive(b):
j = 0
for i in range(len(b) + 1):
if i < len(b) and b[i] == b[j]:
continue
if i - j >= 3:
return removeConsecutive(b[: j] + b[i:])
j = i
return b
board = removeConsecutive(board)
if not board:
return 0
result = 100
s = set()
for i in range(len(hand)):
if hand[i] in s:
continue
s.add(hand[i])
for j in range(len(board)):
if board[j] == hand[i]:
t = self.findMinStep(board[: j] + hand[i] + board[j:], hand[:i] + hand[i + 1:])
if t != -1:
result = min(result, t + 1)
return -1 if result == 100 else result
if __name__ == '__main__':
board = input()
hand = input()
solution = Solution()
print(solution.findMinStep(board, hand))
|
[
"jiangshen95@163.com"
] |
jiangshen95@163.com
|
ca0321aca72bd390e64948f0e7f89acf174fef9a
|
1d36f1a3c527e364b50cb73d0ce82b5b5db917e6
|
/sdk/identity/azure-identity/azure/identity/aio/_credentials/certificate.py
|
1b044a24c0e14bee3174e19fcfc646c2d828904f
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
emreyalcin26/azure-sdk-for-python
|
08c0a294e49e9c3a77867fb20ded4d97722ea551
|
6927458c7baa5baaf07c3b68ed30f6e517e87c9a
|
refs/heads/master
| 2022-10-17T02:25:23.373789
| 2020-06-12T23:43:40
| 2020-06-12T23:43:40
| 272,001,096
| 1
| 0
|
MIT
| 2020-06-13T12:06:11
| 2020-06-13T12:06:11
| null |
UTF-8
|
Python
| false
| false
| 2,745
|
py
|
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
from typing import TYPE_CHECKING
from .base import AsyncCredentialBase
from .._internal import AadClient
from ..._internal import CertificateCredentialBase
if TYPE_CHECKING:
from typing import Any
from azure.core.credentials import AccessToken
class CertificateCredential(CertificateCredentialBase, AsyncCredentialBase):
"""Authenticates as a service principal using a certificate.
:param str tenant_id: ID of the service principal's tenant. Also called its 'directory' ID.
:param str client_id: the service principal's client ID
:param str certificate_path: path to a PEM-encoded certificate file including the private key
:keyword str authority: Authority of an Azure Active Directory endpoint, for example 'login.microsoftonline.com',
the authority for Azure Public Cloud (which is the default). :class:`~azure.identity.KnownAuthorities`
defines authorities for other clouds.
:keyword password: The certificate's password. If a unicode string, it will be encoded as UTF-8. If the certificate
requires a different encoding, pass appropriately encoded bytes instead.
:paramtype password: str or bytes
"""
async def __aenter__(self):
await self._client.__aenter__()
return self
async def close(self):
"""Close the credential's transport session."""
await self._client.__aexit__()
async def get_token(self, *scopes: str, **kwargs: "Any") -> "AccessToken": # pylint:disable=unused-argument
"""Asynchronously request an access token for `scopes`.
.. note:: This method is called by Azure SDK clients. It isn't intended for use in application code.
:param str scopes: desired scopes for the access token. This method requires at least one scope.
:rtype: :class:`azure.core.credentials.AccessToken`
:raises ~azure.core.exceptions.ClientAuthenticationError: authentication failed. The error's ``message``
attribute gives a reason. Any error response from Azure Active Directory is available as the error's
``response`` attribute.
"""
if not scopes:
raise ValueError("'get_token' requires at least one scope")
token = self._client.get_cached_access_token(scopes, query={"client_id": self._client_id})
if not token:
token = await self._client.obtain_token_by_client_certificate(scopes, self._certificate, **kwargs)
return token
def _get_auth_client(self, tenant_id, client_id, **kwargs):
return AadClient(tenant_id, client_id, **kwargs)
|
[
"noreply@github.com"
] |
emreyalcin26.noreply@github.com
|
aa3cad735dc9453629e4cb87982cd2dc96c5743e
|
32cb84dd41e4be24c065bb205f226f9b121a6db2
|
/antiddos/policy.py
|
eea1a79695c59d373b35ac96a1b19ba7d74d6620
|
[] |
no_license
|
InformatykaNaStart/staszic-sio2
|
b38fda84bd8908472edb2097774838ceed08fcfa
|
60a127e687ef8216d2ba53f9f03cfaa201c59e26
|
refs/heads/master
| 2022-06-29T11:09:28.765166
| 2022-06-13T21:56:19
| 2022-06-13T21:56:19
| 115,637,960
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 435
|
py
|
from oioioi.evalmgr.models import QueuedJob
from django.conf import settings
def queued_submissions_of(user):
return QueuedJob.objects.filter(state='WAITING',submission__user=user).count()
def get_queued_submissions_limit():
return getattr(settings, 'MAX_QUEUED_SUBMISSIONS_PER_USER', 10**3)
def can_submit(user):
if user.is_superuser: return True
return queued_submissions_of(user) < get_queued_submissions_limit()
|
[
"hugo@staszic.waw.pl"
] |
hugo@staszic.waw.pl
|
656c4bcc2e28b5938448e7b70cf38bafc93e704e
|
5a4f1e3013290d331d2a1e69daa69c29882fb97c
|
/asynclib/base_events.py
|
297e94c06411df242ff3ccb72025edad82377e36
|
[] |
no_license
|
pfmoore/asynclib
|
308e28609f28638f7a05c2c8e3f1fde9aa72e984
|
b03979cd532632e5165a8d35f2024ce2ea8dfc5b
|
refs/heads/master
| 2021-01-22T03:08:52.297430
| 2015-05-16T12:02:41
| 2015-05-16T12:02:41
| 35,449,413
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,591
|
py
|
"""Event loop."""
from .tasks import Task
class EventLoop:
def __init__(self):
self.ready = []
self.call_soon_queue = []
self.running = False
def call_soon(self, fn):
self.call_soon_queue.append(fn)
def _run_one_step(self):
while self.call_soon_queue:
fn = self.call_soon_queue.pop(0)
fn()
if not self.ready:
return
current = self.ready[0]
try:
next(current)
except StopIteration:
self.unschedule(current)
else:
if self.ready and self.ready[0] is current:
# current is hogging the "next available" slot.
# Implement a fairness algorithm here - in this case,
# just move it to the back to give a "round robin"
# algorithm
del self.ready[0]
self.ready.append(current)
def run_forever(self):
self.running = True
while self.running and self.ready:
self._run_one_step()
def run_until_complete(future):
pass
def is_running(self):
return self.running
def stop(self):
self.running = False
def schedule(self, coro):
self.ready.append(coro)
def unschedule(self, coro):
if coro in self.ready:
self.ready.remove(coro)
def create_task(self, coro):
t = Task(coro, loop=self) # self.schedule(coro)
def get_debug(self):
return False
def call_exception_handler(self, *args):
print(args)
loop = EventLoop()
|
[
"p.f.moore@gmail.com"
] |
p.f.moore@gmail.com
|
f2d870ea60c114be0dfb7f2f551b3c0f0b4a0a48
|
3bb57eb1f7c1c0aced487e7ce88f3cb84d979054
|
/qats/scripts/evaluators/formatted_accuracy.py
|
5ab4d2d8e8201b614d6d230be335d6d736d814a5
|
[] |
no_license
|
ghpaetzold/phd-backup
|
e100cd0bbef82644dacc73a8d1c6b757b2203f71
|
6f5eee43e34baa796efb16db0bc8562243a049b6
|
refs/heads/master
| 2020-12-24T16:41:21.490426
| 2016-04-23T14:50:07
| 2016-04-23T14:50:07
| 37,981,094
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,785
|
py
|
import os
from tabulate import tabulate
from scipy.stats import spearmanr
def getAccuracy(pred, gold):
right = 0.0
for i in range(0, len(pred)):
if pred[i]==gold[i]:
right += 1.0
return right/len(pred)
types = ['G', 'M', 'S', 'O']
systems = sorted(os.listdir('../../labels/G'))
names = {}
names['nn'] = 'SimpleNets-RNN3'
names['nn_adadelta'] = 'SimpleNets-RNN2'
names['nn_mlp'] = 'SimpleNets-MLP'
names['adaboost'] = 'Ada Boosting'
names['dectrees'] = 'Decision Trees'
names['gradientboost'] = 'Gradient Boosting'
names['randomforest'] = 'Random Forests'
names['sgd'] = 'SGD'
names['svm'] = 'SVM'
names['allgood'] = 'All Good'
names['allok'] = 'All Ok'
names['allbad'] = 'All Bad'
scores = {}
for system in systems:
scores[system] = []
for type in types:
gold = [item.strip().split('\t')[2] for item in open('../../corpora/'+type+'_test.txt')]
golds = [float(item.strip().split('\t')[2]) for item in open('../../corpora/'+type+'_test.txt')]
for system in systems:
files = os.listdir('../../labels/'+type+'/'+system)
maxacc = -1
maxspear = 0
maxfile = None
for file in files:
pred = [item.strip().split('\t')[0] for item in open('../../labels/'+type+'/'+system+'/'+file)]
preds = [float(item.strip().split('\t')[1]) for item in open('../../labels/'+type+'/'+system+'/'+file)]
preds[0] = preds[0]+0.00000001
acc = getAccuracy(pred, gold)
if acc>maxacc:
maxacc = acc
maxfile = file
spear, f = spearmanr(preds, golds)
if acc>maxspear:
maxspear = spear
scores[system].append((maxacc, maxspear))
for system in sorted(scores.keys()):
if system in names:
newline = names[system]
for value in scores[system]:
newline += r' & $' + "%.3f" % value[0] + r'$ & $' + "%.3f" % value[1] + r'$'
newline += r' \\'
print(newline)
|
[
"ghpaetzold@outlook.com"
] |
ghpaetzold@outlook.com
|
6a5307ad7db7ca33697b63b6436b59f2d9a19557
|
847273de4b1d814fab8b19dc651c651c2d342ede
|
/.history/solve_20180621175952.py
|
3582f849f7b735ae37b11bf925233f5c6574c087
|
[] |
no_license
|
Los4U/sudoku_in_python
|
0ba55850afcffeac4170321651620f3c89448b45
|
7d470604962a43da3fc3e5edce6f718076197d32
|
refs/heads/master
| 2020-03-22T08:10:13.939424
| 2018-07-04T17:21:13
| 2018-07-04T17:21:13
| 139,749,483
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,846
|
py
|
def findNextCellToFill(grid, i, j):
for x in range(i,9):
for y in range(j,9):
if grid[x][y] == 0:
return x,y
for x in range(0,9):
for y in range(0,9):
if grid[x][y] == 0:
return x,y
return -1,-1
def isValid(grid, i, j, e):
rowOk = all([e != grid[i][x] for x in range(9)])
if rowOk:
columnOk = all([e != grid[x][j] for x in range(9)])
if columnOk:
# finding the top left x,y co-ordinates of the section containing the i,j cell
secTopX, secTopY = 3 *(i//3), 3 *(j//3) #floored quotient should be used here.
for x in range(secTopX, secTopX+3):
for y in range(secTopY, secTopY+3):
if grid[x][y] == e:
return False
return True
return False
def solveSudoku(grid, i=0, j=0):
i,j = findNextCellToFill(grid, i, j)
if i == -1:
return True
for e in range(1,10):
if isValid(grid,i,j,e):
grid[i][j] = e
if solveSudoku(grid, i, j):
return True
# Undo the current cell for backtracking
grid[i][j] = 0
return False
input = [[5,1,7,6,0,0,0,3,4],[2,8,9,0,0,4,0,0,0],[3,4,6,2,0,5,0,9,0],[6,0,2,0,0,0,0,1,0],[0,3,8,0,0,6,0,4,7],[0,0,0,0,0,0,0,0,0],[0,9,0,0,0,0,0,7,8],[7,0,3,4,0,0,5,6,0],[0,0,0,0,0,0,0,0,0]]
solveSudoku(input)
|
[
"inz.kamil.wos@gmail.com"
] |
inz.kamil.wos@gmail.com
|
a7397e10786125cdc8ee81286b7a97fdbc6f1f78
|
38b8bceafb4d80afc7c77196eb9ee99694191bcf
|
/wxpython/grid2.py
|
e9749835196d535abce07a36ed5223c8b385ea9f
|
[] |
no_license
|
tangc1986/PythonStudy
|
f6c5b384874e82fbf0b5f51cfb7a7a89a48ec0ff
|
1ed1956758e971647426e7096ac2e8cbcca585b4
|
refs/heads/master
| 2021-01-23T20:39:23.930754
| 2017-10-08T07:40:32
| 2017-10-08T07:42:38
| 42,122,267
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 845
|
py
|
# -*- coding: UTF-8 -*-
__author__ = 'tangchao'
import wx
import wx.grid
class TestFrame(wx.Frame):
rowLabels = ["uno", "dos", "tres", "quatro", "cinco"]
colLabels = ["homer", "marge", "bart", "lisa", "mnaggie"]
def __init__(self):
wx.Frame.__init__(self, None, title="Grid Headers",
size=(500, 200))
grid = wx.grid.Grid(self)
grid.CreateGrid(5, 5)
for row in range(5):
#1 start
grid.SetRowLabelValue(row, self.rowLabels[row])
grid.SetColLabelValue(row, self.colLabels[row])
#1 end
for col in range(5):
grid.SetCellValue(row, col,
"(%s, %s)" % (self.rowLabels[row], self.colLabels[col]))
app = wx.PySimpleApp()
frame = TestFrame()
frame.Show()
app.MainLoop()
|
[
"tangc1986@gmail.com"
] |
tangc1986@gmail.com
|
ba3716f0dc54c992ee423cea11f9fbcde6fadde9
|
9cc3135d5fcd781c0542a905c61dc19b0ceeffef
|
/alien_colors_ver1-1.py
|
cad614ac833cdc33de423b5b07873c40dfe1f32c
|
[] |
no_license
|
bkalcho/python-crash-course
|
411d8af223fb6974d4f890c0f82c9e56b062359c
|
8425649a2ecd5abeeb438e816400f270d937758e
|
refs/heads/master
| 2022-09-11T13:47:56.837256
| 2022-08-23T10:04:35
| 2022-08-23T10:04:35
| 69,810,386
| 14
| 8
| null | 2022-08-23T10:04:36
| 2016-10-02T17:14:41
|
Python
|
UTF-8
|
Python
| false
| false
| 185
|
py
|
# Author: Bojan G. Kalicanin
# Date: 29-Sep-2016
# If alien color is not green nothing will be printed
alien_color = 'red'
if alien_color == 'green':
print('You earned 5 points.')
|
[
"bojan.g.kalicanin@gmail.com"
] |
bojan.g.kalicanin@gmail.com
|
42a51fbfbf765fe3650c8ab9c41927a8259c62ff
|
9a0ada115978e9600ad7f1eab65fcc8825f637cf
|
/work_in_progress/_old/stage_aligment_convert/remove_stage_point.py
|
45542356653d90923ad1ca5276940178c3a9f832
|
[] |
no_license
|
ver228/work-in-progress
|
c1971f8d72b9685f688a10e4c5a1b150fa0812da
|
ef5baecc324da4550f81edb0513d38f039ee3429
|
refs/heads/master
| 2018-12-16T22:18:55.457290
| 2018-09-14T09:27:49
| 2018-09-14T09:27:49
| 56,165,301
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,965
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 7 19:45:26 2017
@author: ajaver
"""
import tables
import numpy as np
import os
import pymysql
from tierpsy.analysis.contour_orient.correctVentralDorsal import switchCntSingleWorm
if __name__ == '__main__':
conn = pymysql.connect(host='localhost', database='single_worm_db')
cur = conn.cursor(pymysql.cursors.DictCursor)
sql = '''
SELECT *
FROM experiments_full
'''
cur.execute(sql)
f_data = cur.fetchall()
for irow, row in enumerate(f_data):
fpath = os.path.join(row['results_dir'], row['base_name'])
masked_file = fpath + '.hdf5'
skeletons_file = fpath + '_skeletons.hdf5'
if os.path.exists(skeletons_file):
print(irow+1, len(f_data))
switchCntSingleWorm(skeletons_file)
# with tables.File(skeletons_file, 'r+') as fid:
# if '/stage_movement' in fid:
# exit_flag = fid.get_node('/stage_movement')._v_attrs['has_finished']
# if exit_flag > 0:
# frame_diffs = fid.get_node('/stage_movement/frame_diffs')[:]
# if exit_flag > 1 or np.any(frame_diffs<0):
#
# print(exit_flag, irow, row['base_name'])
# if '/stage_movement' in fid:
# fid.remove_node('/stage_movement', recursive=True)
# if '/provenance_tracking/STAGE_ALIGMENT' in fid:
# fid.remove_node('/provenance_tracking/STAGE_ALIGMENT', recursive=True)
#
# for ext in ['_features.hdf5', '.wcon.zip']:
# fname = fpath + ext
# if os.path.exists(fname):
# os.remove(fname)
|
[
"ajaver@MRC-8791.local"
] |
ajaver@MRC-8791.local
|
fc4489fe4def15e7a8ccd94df2f27d10fc6dad76
|
537259790008b727c03c56ec55a6aaaeeeaf65a3
|
/scrapers/tvrelease_scraper.py
|
533a9a8e18bb3485693ce0a1c03222774e2bd2a3
|
[] |
no_license
|
djbijo/salts
|
a5781ac9958b77c2acfacf4f73a5286e0b91d8e2
|
9eaa736701833eedf6796403da33d648aaf348f8
|
refs/heads/master
| 2020-12-11T03:26:15.843807
| 2015-04-09T18:35:45
| 2015-04-09T18:35:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,354
|
py
|
"""
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import scraper
import urllib
import urlparse
import re
import xbmcaddon
from salts_lib import log_utils
from salts_lib.constants import VIDEO_TYPES
from salts_lib.db_utils import DB_Connection
from salts_lib.constants import QUALITIES
BASE_URL = 'http://tv-release.net'
QUALITY_MAP = {'MOVIES-XVID': QUALITIES.MEDIUM, 'TV-XVID': QUALITIES.HIGH, 'TV-MP4': QUALITIES.HIGH,
'TV-480P': QUALITIES.HIGH, 'MOVIES-480P': QUALITIES.HIGH, 'TV-720P': QUALITIES.HD, 'MOVIES-720P': QUALITIES.HD}
class TVReleaseNet_Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.db_connection = DB_Connection()
self.base_url = xbmcaddon.Addon().getSetting('%s-base_url' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.MOVIE, VIDEO_TYPES.EPISODE])
@classmethod
def get_name(cls):
return 'TVRelease.Net'
def resolve_link(self, link):
return link
def format_source_label(self, item):
return '[%s] %s' % (item['quality'], item['host'])
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
if source_url:
url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(url, cache_limit=.5)
q_str = ''
match = re.search('>Category.*?td_col">([^<]+)', html)
if match:
q_str = match.group(1).upper()
pattern = "td_cols.*?href='([^']+)"
for match in re.finditer(pattern, html):
url = match.group(1)
if re.search('\.rar(\.|$)', url):
continue
hoster = {'multi-part': False, 'class': self, 'views': None, 'url': url, 'rating': None, 'direct': False}
hoster['host'] = urlparse.urlsplit(url).hostname
hoster['quality'] = self._get_quality(video, hoster['host'], QUALITY_MAP.get(q_str, None))
hosters.append(hoster)
return hosters
def get_url(self, video):
return self._blog_get_url(video, delim=' ')
@classmethod
def get_settings(cls):
settings = super(TVReleaseNet_Scraper, cls).get_settings()
settings = cls._disable_sub_check(settings)
name = cls.get_name()
settings.append(' <setting id="%s-filter" type="slider" range="0,180" option="int" label=" Filter results older than (0=No Filter) (days)" default="30" visible="eq(-6,true)"/>' % (name))
settings.append(' <setting id="%s-select" type="enum" label=" Automatically Select" values="Most Recent|Highest Quality" default="0" visible="eq(-7,true)"/>' % (name))
return settings
def search(self, video_type, title, year):
search_url = urlparse.urljoin(self.base_url, '/?s=')
search_url += urllib.quote(title)
if video_type == VIDEO_TYPES.EPISODE:
search_url += '&cat=TV-XviD,TV-Mp4,TV-720p,TV-480p,'
else:
search_url += '&cat=Movies-XviD,Movies-720p,Movies-480p'
html = self._http_get(search_url, cache_limit=.25)
pattern = "posts_table.*?<a[^>]+>(?P<quality>[^<]+).*?href='(?P<url>[^']+)'>(?P<post_title>[^<]+).*?(?P<date>[^>]+)</td></tr>"
date_format = '%Y-%m-%d %H:%M:%S'
return self._blog_proc_results(html, pattern, date_format, video_type, title, year)
def _http_get(self, url, cache_limit=8):
return super(TVReleaseNet_Scraper, self)._cached_http_get(url, self.base_url, self.timeout, cache_limit=cache_limit)
|
[
"tknorris@gmail.com"
] |
tknorris@gmail.com
|
5b9a0e8151fc4c44ee36a6bf9630696e3772d3bf
|
c9500ad778b8521aaa85cb7fe3239989efaa4799
|
/plugins/proofpoint_tap/unit_test/test_get_blocked_clicks.py
|
20515642f86f5bf197f87ee9b53be6017f8d31ab
|
[
"MIT"
] |
permissive
|
rapid7/insightconnect-plugins
|
5a6465e720f114d71b1a82fe14e42e94db104a0b
|
718d15ca36c57231bb89df0aebc53d0210db400c
|
refs/heads/master
| 2023-09-01T09:21:27.143980
| 2023-08-31T10:25:36
| 2023-08-31T10:25:36
| 190,435,635
| 61
| 60
|
MIT
| 2023-09-14T08:47:37
| 2019-06-05T17:05:12
|
Python
|
UTF-8
|
Python
| false
| false
| 3,159
|
py
|
import sys
import os
from unittest.mock import patch
from komand_proofpoint_tap.actions.get_blocked_clicks import GetBlockedClicks
from insightconnect_plugin_runtime.exceptions import PluginException
from komand_proofpoint_tap.util.exceptions import ApiException
from test_util import Util
from unittest import TestCase
from parameterized import parameterized
sys.path.append(os.path.abspath("../"))
@patch("requests.request", side_effect=Util.mocked_requests_get)
class TestGetBlockedClicks(TestCase):
@classmethod
def setUpClass(cls) -> None:
cls.action = Util.default_connector(GetBlockedClicks())
@parameterized.expand(
[
[
"blocked_clicks",
Util.read_file_to_dict("inputs/get_blocked_clicks.json.inp"),
Util.read_file_to_dict("expected/get_blocked_clicks.json.exp"),
],
[
"blocked_clicks_cleared_status",
Util.read_file_to_dict("inputs/get_blocked_clicks_cleared_status.json.inp"),
Util.read_file_to_dict("expected/get_blocked_clicks_cleared_status.json.exp"),
],
[
"blocked_clicks_without_url",
Util.read_file_to_dict("inputs/get_blocked_clicks_without_url.json.inp"),
Util.read_file_to_dict("expected/get_blocked_clicks_without_url.json.exp"),
],
[
"blocked_clicks_without_time_start",
Util.read_file_to_dict("inputs/get_blocked_clicks_without_time_start.json.inp"),
Util.read_file_to_dict("expected/get_blocked_clicks_without_time_start.json.exp"),
],
[
"blocked_clicks_without_time_end",
Util.read_file_to_dict("inputs/get_blocked_clicks_without_time_end.json.inp"),
Util.read_file_to_dict("expected/get_blocked_clicks_without_time_end.json.exp"),
],
[
"blocked_clicks_without_time_start_end",
Util.read_file_to_dict("inputs/get_blocked_clicks_without_time_start_end.json.inp"),
Util.read_file_to_dict("expected/get_blocked_clicks_without_time_start_end.json.exp"),
],
]
)
def test_get_blocked_clicks(self, mock_request, test_name, input_params, expected):
actual = self.action.run(input_params)
self.assertDictEqual(actual, expected)
@parameterized.expand(
[
[
"blocked_clicks_timerange_invalid",
Util.read_file_to_dict("inputs/get_blocked_clicks_timerange_invalid.json.inp"),
PluginException.causes[PluginException.Preset.BAD_REQUEST],
PluginException.assistances[PluginException.Preset.BAD_REQUEST],
],
]
)
def test_get_blocked_clicks_raise_exception(self, mock_request, test_name, input_params, cause, assistance):
with self.assertRaises(ApiException) as error:
self.action.run(input_params)
self.assertEqual(error.exception.cause, cause)
self.assertEqual(error.exception.assistance, assistance)
|
[
"noreply@github.com"
] |
rapid7.noreply@github.com
|
9751b47661d97074ea93280984aa3a93a3a7246f
|
6b81296eff6aac2b81326a3f97a7240321d085d1
|
/pycampaign06[for loop].py
|
6fa1c9058afbcf87d31d21acee1273479a816d0b
|
[
"Unlicense"
] |
permissive
|
EssamSami5155/PyCampaign20
|
0d267c586e6060824c147a54a1cbc8d01c672e87
|
7c8dba63de1a499742c748a1b85d00eeebbb38d6
|
refs/heads/master
| 2022-12-17T06:32:49.112717
| 2020-09-20T09:49:51
| 2020-09-20T09:49:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,738
|
py
|
# Repeating events
# Using for loop
import turtle # turtle is a library that helps us to draw.
turtle.color("blue")
turtle.forward(100)
turtle.right(45)
turtle.color("green")
turtle.forward(50)
turtle.right(45)
turtle.color("grey")
turtle.forward(100)
# turtle commands
# right(x) - rotate right x degrees.
# left(x) - rotate left x degrees.
# colour("x") - change pen color to x.
# forward(x) - move forward x.
# backward(x) - move backward x.
# drawing square with turtle
import turtle
turtle.forward(100)
turtle.right(90)
turtle.forward(100)
turtle.right(90)
turtle.forward(100)
turtle.right(90)
turtle.forward(100)
# this is a bad way to complete this task.
# we can use loops to make this task easiar.
# Loops allows us to repeat the same line of code as often as we want.
# exp-
import turtle
for steps in range(4): # for loop is a special kind of loop which allows us to specifice how many time we need to execute this code.
turtle.forward(65)
turtle.left(90)
# in this code "steps" is a variable. we can name it anything.
# Nested loops
import turtle
for steps in range(4):
turtle.forward(100)
turtle.right(90)
for moresteps in range(4):
turtle.forward(50)
turtle.right(90)
# variables inside loop
import turtle
shawki=8
for steps in range(shawki):
turtle.forward(100)
turtle.right(360/shawki)
for moresteps in range(shawki):
turtle. forward(50)
turtle.right(360/shawki)
# In python counting starts to 0. But we can specify numbers to count to or form.
for steps in range(1,10,2):
print(steps)
# here counting starts to 1 from 10. but it skips 1 numbers after each step.
# we can also tell python exactly what values we want to use in the loop.
for steps in[1,2,3,4,5]:
print(steps)
# even we dont have to use numbers.
import turtle
for steps in ["red","blue","green","black"]:
turtle.color(steps)
turtle.forward(100)
turtle.right(90)
print(steps)
# Drawing a nested object
import turtle
print("Today we are going to draw an object using turtle librery in python.")
print("Tell us your opinion")
user=int(input("How many sides the object will have?\n"))
for steps in range(user):
turtle.forward(160)
turtle.right(360/user)
for moresteps in range(user):
turtle.forward(50)
turtle.right(360/user)
# displaying febonacci series using for loop
first=0
second=1
n=int(input("enter how many numbers you want in this series: "))
for i in range(n):
print(first)
temp=first
first=second
second=temp+second
# display the sum of the series:1,3,5,7,9,11.......1119 using list
#first method-
first = 1
listf=[]
while first<=1119:
listf.append(first)
first=first+2
num=len(listf)
v1=listf[0]
v2=listf[-1]
sum=(v1+v2)*num/2
print(sum)
# second method
first = 1
total=0
listf=[]
while first<=1119:
listf.append(first)
first=first+2
for steps in listf:
total=total+steps
print(total)
# third method
# list function converts to list
# range function is used to create a range of numbers.
# here range function indicates 1 to 1121, but not including 1121.
# and the third part indicates the gap between two number.
c=list(range(1,1121,2))
total=0
for steps in c:
total=total+steps
print(total)
# fourth method
# without using list
total=0
for steps in range(1,1121,2):
total=total+steps
#or total+=steps
print(total)
#fifth method
# using while loop
total=0
j=1
while j < 1121:
total += j
j += 2
print(total)
# sixth method
# easiest method
# one line code
print(sum(range(1,1121,2)))
# sum of those values which are the multiple of 3 from a range.
total=0
for steps in range(1,10000):
if steps % 3 == 0:
total += steps
print(total)
# sum of those values which are the multiple of 3 and 5 less than 100.
total=0
for steps in range(1,100):
if steps % 3 == 0 and steps % 5 == 0:
total += steps
print(total)
# displaying a lists first value 1 time, second value 2 time, third value 3 time,....
a=["banana","apple","mango"]
for i in range(len(a)):
for j in range(i+1):
print(a[i])
# break keyword.
nums=[1,2,3,4,5]
for n in nums:
if n == 3:
print("found!")
break
print(n)
# when the conditional is true, break keyword will breaks out the loop. It will ignore the value 3.
# continue keyword
# what if we want to ignore a value but not break out of the loop completely?
nums=[1,2,3,4,5]
for n in nums:
if n == 3:
print("found!")
continue
print(n)
# continue will skip to next value of the loop.
turtle.done()
|
[
"ahammadshawki8@gmail.com"
] |
ahammadshawki8@gmail.com
|
41dc105eb07adb417e1c322ec9271ad8365af2c2
|
9a06c8ab42e0fbce88f06a1bd7237c4d5cae592a
|
/code/python_lesson/runoob/09摄氏度转华氏度.py
|
783060706ad78ee49e750443b5c09375203fe90d
|
[
"MIT"
] |
permissive
|
lxl0928/learning_python
|
19040ca3ae92e5c07a1e813c707d625aa0ba8cb2
|
ff0c6457186e7aa5b6ed9cafaea1dba616ce493a
|
refs/heads/master
| 2023-05-13T13:59:28.815564
| 2021-03-22T02:08:53
| 2021-03-22T02:08:53
| 162,232,496
| 4
| 1
|
MIT
| 2023-05-01T20:15:55
| 2018-12-18T04:59:15
|
Python
|
UTF-8
|
Python
| false
| false
| 342
|
py
|
#! /usr/bin/python3
# -*- coding: utf-8 -*-
# Date: 2016.08.02
# Filename: 09.py
# Author: Timilong
# 用户输入摄氏温度
celsius = float(input("请输入摄氏温度: "))
# 计算华氏温度
fahrenheit = (celsius * 1.8) + 32
# 打印华氏温度
print("%0.1f摄氏温度转化为华氏温度为%0.1f" % (celsius, fahrenheit))
|
[
"lixiaolong@sensoro.com"
] |
lixiaolong@sensoro.com
|
1337f2878c504d9d15a39baca3d7e460d62f6bc4
|
c422cfdcd0303395b62a383611dca19236ea0e15
|
/core/migrations/0009_diaperchange_amount.py
|
991260de5bff332950e762549154f4f031abc2fc
|
[
"BSD-2-Clause-Views",
"BSD-2-Clause"
] |
permissive
|
Alan01252/babybuddy
|
c18d26769458fbfd60d7e5493c1fab911d624ddd
|
5382527dc84530fe56a65c7452620bba41bfd668
|
refs/heads/master
| 2022-12-18T17:09:21.064011
| 2020-09-16T11:33:07
| 2020-09-16T11:33:07
| 291,678,434
| 1
| 0
|
BSD-2-Clause
| 2020-08-31T09:57:07
| 2020-08-31T09:57:06
| null |
UTF-8
|
Python
| false
| false
| 419
|
py
|
# Generated by Django 3.0.2 on 2020-01-26 21:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0008_auto_20190607_1422'),
]
operations = [
migrations.AddField(
model_name='diaperchange',
name='amount',
field=models.FloatField(blank=True, null=True, verbose_name='Amount'),
),
]
|
[
"chris@chrxs.net"
] |
chris@chrxs.net
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.