blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6552dea2d2667854202895aec4f0df5259855cbc
|
b0f6dbd92c368bd68fa1aafd67fdde9c323ab1be
|
/config.py
|
578b0ee4e0b9ed526e8784e67ae9a7c91b5a685d
|
[
"Apache-2.0"
] |
permissive
|
niezhongliang/InsightFace-v3
|
ac62cff7d4aeb957fac9189ccca26976f9a045e9
|
e10cefec3bf0c465c92c42980ecbdb32eacc6dd5
|
refs/heads/master
| 2020-09-15T20:36:16.087481
| 2019-11-23T00:23:46
| 2019-11-23T00:23:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,130
|
py
|
import logging
import os
import torch
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # sets device for model and PyTorch tensors
# Model parameters
image_w = 112
image_h = 112
channel = 3
emb_size = 512
# Training parameters
num_workers = 8 # for data-loading; right now, only 1 works with h5py
grad_clip = 5. # clip gradients at an absolute value of
print_freq = 100 # print training/validation stats every __ batches
checkpoint = None # path to checkpoint, None if none
# Data parameters
num_classes = 85164
num_samples = 3804846
DATA_DIR = 'data'
faces_ms1m_folder = 'data/faces_ms1m_112x112'
path_imgidx = os.path.join(faces_ms1m_folder, 'train.idx')
path_imgrec = os.path.join(faces_ms1m_folder, 'train.rec')
IMG_DIR = 'data/images'
pickle_file = 'data/faces_ms1m_112x112.pickle'
def get_logger():
logger = logging.getLogger()
handler = logging.StreamHandler()
formatter = logging.Formatter("%(asctime)s %(levelname)s \t%(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
return logger
logger = get_logger()
|
[
"liuyang12@focusmedia.cn"
] |
liuyang12@focusmedia.cn
|
f9460bdd828edd3892ba9506260ad360ad7bfbad
|
ef3a7391b0a5c5d8e276355e97cbe4de621d500c
|
/venv/Lib/site-packages/thinc/neural/train.py
|
1a0492b1e6ef38288d5f82838d0e13063fc3efe1
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
countBMB/BenjiRepo
|
143f6da5d198ea6f06404b4559e1f4528b71b3eb
|
79d882263baaf2a11654ca67d2e5593074d36dfa
|
refs/heads/master
| 2022-12-11T07:37:04.807143
| 2019-12-25T11:26:29
| 2019-12-25T11:26:29
| 230,090,428
| 1
| 1
|
Apache-2.0
| 2022-12-08T03:21:09
| 2019-12-25T11:05:59
|
Python
|
UTF-8
|
Python
| false
| false
| 1,862
|
py
|
# coding: utf8
from __future__ import unicode_literals
import numpy.random
from tqdm import tqdm
from .optimizers import Adam, linear_decay
class Trainer(object):
def __init__(self, model, **cfg):
self.ops = model.ops
self.model = model
self.L2 = cfg.get("L2", 0.0)
self.optimizer = Adam(model.ops, 0.001, decay=0.0, eps=1e-8, L2=self.L2)
self.batch_size = cfg.get("batch_size", 128)
self.nb_epoch = cfg.get("nb_epoch", 20)
self.i = 0
self.dropout = cfg.get("dropout", 0.0)
self.dropout_decay = cfg.get("dropout_decay", 0.0)
self.each_epoch = []
def __enter__(self):
return self, self.optimizer
def __exit__(self, exc_type, exc_val, exc_tb):
self.model.use_params(self.optimizer.averages)
def iterate(self, train_X, train_y, progress_bar=True):
orig_dropout = self.dropout
for i in range(self.nb_epoch):
indices = numpy.arange(len(train_X))
numpy.random.shuffle(indices)
indices = self.ops.asarray(indices)
j = 0
with tqdm(total=indices.shape[0], leave=False) as pbar:
while j < indices.shape[0]:
slice_ = indices[j : j + self.batch_size]
X = _take_slice(train_X, slice_)
y = _take_slice(train_y, slice_)
yield X, y
self.dropout = linear_decay(orig_dropout, self.dropout_decay, j)
j += self.batch_size
if progress_bar:
pbar.update(self.batch_size)
for func in self.each_epoch:
func()
def _take_slice(data, slice_):
if isinstance(data, list) or isinstance(data, tuple):
return [data[int(i)] for i in slice_]
else:
return data[slice_]
|
[
"bengmen92@gmail.com"
] |
bengmen92@gmail.com
|
85a20a5685d762ddec4eeda36978c63036c74206
|
6a01a9287a4c23c7f11b7c5399cfb96bbe42eba8
|
/python/scripts/make_id_table_with_diff_expr.py
|
314e25e631430921796b32ad7d8d52c104d61aff
|
[
"MIT"
] |
permissive
|
xguse/gmm-to-gff-transcripts-vs-snps
|
3c25bf2752aee76174d5dab92060fe7269caf99f
|
75337135ab8ff6d840af3cfccfe6404a06777a54
|
refs/heads/master
| 2021-01-19T01:50:33.473897
| 2016-08-02T20:31:18
| 2016-08-02T20:31:18
| 54,731,430
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,455
|
py
|
"""Describe here what this rule accomplishes."""
import pandas as pd
import numpy as np
# Settings
edger_results_labels = snakemake.params.edger_results_labels
cufflinks_results_labels = snakemake.params.cufflinks_results_labels
# input
edger_results = snakemake.input.edger_results
cufflinks_results = snakemake.input.cufflinks_results
ids_no_diff_expr = snakemake.input.ids_no_diff_expr
#output
ids_with_diff_expr = snakemake.output.ids_with_diff_expr
def load_and_filter_diff_expr_data(path,ids,comparison,program,fdr_thresh):
"""Return new dataframe that has standardized and filtered the DE input tables.
`path` (str):
location of input file
`ids` (dataframe):
with the following columns
- tcons_id
- xloc_id
- gene_id_external
- gene_id_internal
`comparison` (str):
describe the RNA-seq analysis run ('midgut', 'salivary gland', etc)
`program` (str):
one of ['edger', 'cufflinks']
`fdr_thresh` (float):
defining multiple testing significance threshold above which DE tests should NOT be reported
"""
column_conversions = {'edger': {'Gene_Name': 'gene_id_external',
'Gene_ID': 'xloc_id',
'logFC': 'lg2_fc',
'PValue': 'p',
'FDR': 'fdr'},
'cufflinks': {'gene': 'gene_id_external',
'gene_id': 'xloc_id',
'log2.fold_change.': 'lg2_fc',
'p_value': 'p',
'q_value': 'fdr'},
}
keep_columns = ["de_id", "xloc_id", "tcons_id","gene_id_external","gene_id_internal","lg2_fc","p","fdr","comparison","program"]
de_id_program_map = {'edger': 'EDGR',
'cufflinks': 'CUFF',
}
# Load
df = pd.read_csv(path, sep='\t')
# Convert Columns
df = df.rename(columns=column_conversions[program])
# Make missing fdr values NaN
df['fdr'] = df.fdr.apply(lambda i: np.nan if i == '-' else i)
# Filter for fdr
df = df.query(""" fdr <= 0.05 """).copy()
# Add Columns
df['program'] = program
df['comparison'] = comparison
df['de_id'] = generate_de_ids(df=df,
de_type=de_id_program_map[program],
type_mod='|{comparison}'.format(comparison=comparison),
nlen=7)
# Join external and internal IDS
df = pd.merge(left=df, right=ids_no_diff_expr,
how='left',
on=None, left_on=None, right_on=None,
left_index=False, right_index=False,
sort=False, suffixes=('_x', '_y'), copy=True, indicator=False).fillna('-')
# Retain only needed columns
df = df[keep_columns]
# Return dataframe
return df.copy()
def generate_de_ids(df,de_type,type_mod='',nlen=7):
"""Generate unique tracking IDs for each statistical test of diff expr."""
template = '{de}{mod}_{{0:0{nlen}d}}'.format(de=de_type, mod=type_mod, nlen=nlen)
return [template.format(n) for n in range(1,len(df)+1)]
ids_no_diff_expr = pd.read_csv(ids_no_diff_expr)
table_list = []
# Load EDGER DE reults
for name, path in zip(edger_results_labels, edger_results):
df = load_and_filter_diff_expr_data(path=path,
ids=ids_no_diff_expr,
comparison=name,
program='edger',
fdr_thresh=0.05)
table_list.append(df)
# Load CUFFLINKS DE reults
for name, path in zip(cufflinks_results_labels, cufflinks_results):
df = load_and_filter_diff_expr_data(path=path,
ids=ids_no_diff_expr,
comparison=name,
program='cufflinks',
fdr_thresh=0.05)
table_list.append(df)
# Concat all result files into single dataframe
combined = pd.concat(objs=table_list, axis=0)
# Write out the resulting dataframe
combined.to_csv(path_or_buf=ids_with_diff_expr,
sep=',',
header=True, index=False,)
|
[
"wadunn83@gmail.com"
] |
wadunn83@gmail.com
|
f258f81afafb2186624f0028d7416f7aca37869d
|
3114430ce15c18281117459e26eea4b774e3998a
|
/day4/accounts/models.py
|
1fd9d1bf8a13f354846f792bd07b42ea810b5486
|
[
"MIT"
] |
permissive
|
Joseamica/Easily-written-Django
|
c02e7333e84ca2257b7b8bfae3f6732898c5000a
|
0b746638751702c453db9490fe29ef6d34e4a3bc
|
refs/heads/master
| 2021-05-27T20:25:41.341149
| 2014-05-25T08:25:53
| 2014-05-25T08:25:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 445
|
py
|
from django.db import models
from django.contrib.auth.models import User
import hashlib
# Create your models here.
class Account(models.Model):
user = models.OneToOneField(User)
def gravatar_url(self):
return "http://www.gravatar.com/avatar/%s?s=50" % hashlib.md5(self.user.email).hexdigest()
def __unicode__(self):
return self.user
User.profile = property(lambda u: UserProfile.objects.get_or_create(user=u)[0])
|
[
"carpedm20@gmail.com"
] |
carpedm20@gmail.com
|
538fc3e6a7b554c75a45025f802bf9fb341dae19
|
d6e287bbba11be4906e599d1362c9ef89c4fb9de
|
/modules/utils/datasets/__init__.py
|
53671ef9604559f6da0848293411281007d9f83b
|
[
"MIT"
] |
permissive
|
bityangke/WSDDN.pytorch-1
|
67d52f158238f2d5b234ddefeb7f05f06bf6b123
|
9a67323c80566cacc762c68021824aa80a82c524
|
refs/heads/master
| 2022-11-15T00:22:44.903418
| 2020-07-06T13:43:22
| 2020-07-06T13:43:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 201
|
py
|
'''import all'''
from .Sampler import GroupSampler
from .VOCDataset import VOCDataset
from .Builder import buildDataloader
'''define alll'''
__all__ = ['GroupSampler', 'VOCDataset', 'buildDataloader']
|
[
"1159254961@qq.com"
] |
1159254961@qq.com
|
826f60594002015e659cc80aca283bfe601d0b98
|
0c958692bb3abf99ecbd03bd75a605b202d4da5a
|
/CRAB/MuNu/synch/2014ocbr24/synchThree.py
|
0bc6084ab362fd99b029e74554cc6bfc9b96b5f1
|
[] |
no_license
|
tmrhombus/UWAnalysis
|
a9ed18a7ba8726522c8d98fbdc018c77d80c5cc5
|
eb9e0794e1b847f36c660a55d3631176a39148e2
|
refs/heads/master
| 2021-01-23T20:46:41.578341
| 2017-05-01T08:26:57
| 2017-05-01T08:26:57
| 10,620,824
| 0
| 0
| null | 2014-10-21T11:21:16
| 2013-06-11T12:19:43
|
Python
|
UTF-8
|
Python
| false
| false
| 4,427
|
py
|
cut = 'C1_data_2014ocbr23_m12e10_smrGenNu_clnMu'
#cut = 'C3_tt_2014ocbr23_m12e10_smrGenNu_clnMu'
andreas_events = set([line.strip() for line in open('./comp/%s_and.txt'%(cut))])
jelenas_events = set([line.strip() for line in open('./comp/%s_jel.txt'%(cut))])
toms_events = set([line.strip() for line in open('./comp/%s_tom.txt'%(cut))])
just_andrea = []
just_jelena = []
just_tom = []
t_noJ = []
t_noA = []
j_noT = []
j_noA = []
a_noT = []
a_noJ = []
t_j = []
a_t = []
j_a = []
t_j_a = []
#runover = set([line.strip() for line in open('./comp/badevents.txt')])
runover = set([])
for toms_event in toms_events:
tj = False
ta = False
for jelenas_event in jelenas_events:
if long(toms_event) == long(jelenas_event):
tj = True
break
for andreas_event in andreas_events:
if long(toms_event) == long(andreas_event):
ta = True
break
if tj == False and ta == False:
just_tom.append(toms_event)
runover.add(toms_event)
if tj == False:
t_noJ.append(toms_event)
runover.add(toms_event)
if ta == False:
t_noA.append(toms_event)
runover.add(toms_event)
if tj == True and ta == True: t_j_a.append(toms_event)
if tj == True: t_j.append(toms_event)
if ta == True: a_t.append(toms_event)
for andreas_event in andreas_events:
at = False
aj = False
for toms_event in toms_events:
if long(andreas_event) == long(toms_event):
at = True
break
for jelenas_event in jelenas_events:
if long(andreas_event) == long(jelenas_event):
aj = True
break
if at == False and aj == False:
just_andrea.append(andreas_event)
runover.add(andreas_event)
if at == False:
a_noT.append(andreas_event)
runover.add(andreas_event)
if aj == False:
a_noJ.append(andreas_event)
runover.add(andreas_event)
if aj == True: j_a.append(andreas_event)
for jelenas_event in jelenas_events:
ja = False
jt = False
for andreas_event in andreas_events:
if long(andreas_event) == long(jelenas_event):
ja = True
break
for toms_event in toms_events:
if long(toms_event) == long(jelenas_event):
jt = True
if ja == False and jt == False:
just_jelena.append(jelenas_event)
runover.add(jelenas_event)
if ja == False:
j_noA.append(jelenas_event)
runover.add(jelenas_event)
if jt == False:
j_noT.append(jelenas_event)
runover.add(jelenas_event)
print( "http://www.hep.wisc.edu/~tperry/wbb/synch/2014ocbr24/%s_comp.log"%(cut))
log = open('/afs/hep.wisc.edu/home/tperry/www/wbb/synch/2014ocbr24/%s_comp.log'%(cut),'w')
log.write("Andreas Events: %s\n"%len(andreas_events))
log.write("Jelenas Events: %s\n"%len(jelenas_events))
log.write("Toms Events: %s\n"%len(toms_events ))
log.write("All Three: %s\n\n"%len(t_j_a))
log.write(" Tom Has, Jelena Doesn't (%s)\n"%len(t_noJ))
for e in t_noJ: log.write(" "+e)
log.write("\n\n")
log.write(" Jelena Has, Tom Doesn't (%s)\n"%len(j_noT))
for e in j_noT: log.write(" "+e)
log.write("\n\n")
log.write("====================================================================\n\n")
log.write(" Tom Has, Andrea Doesn't (%s)\n"%len(t_noA))
for e in t_noA: log.write(" "+e)
log.write("\n\n")
log.write(" Andrea Has, Tom Doesn't (%s)\n"%len(a_noT))
for e in a_noT: log.write(" "+e)
log.write("\n\n")
log.write("====================================================================\n\n")
log.write(" Jelena Has, Andrea Doesn't (%s)\n"%len(j_noA))
for e in j_noA: log.write(" "+e)
log.write("\n\n")
log.write(" Andrea Has, Jelena Doesn't (%s)\n"%len(a_noJ))
for e in a_noJ: log.write(" "+e)
log.write("\n\n")
log.write("We All Have %s\n"%len(t_j_a))
for e in t_j_a: log.write(e+" ")
log.write("\n\n")
log.write("Tom Has %s\n"%len(toms_events))
for e in toms_events: log.write(e+" ")
log.write("\n\n")
log.write("Jelena Has %s\n"%len(jelenas_events))
for e in jelenas_events: log.write(e+" ")
log.write("\n\n")
log.write("Andreas Has %s\n"%len(andreas_events))
for e in andreas_events: log.write(e+" ")
log.write("\n\n")
log.write("Run Over\n")
log.write("eventsToProcess = cms.untracked.VEventRange(")
bades = []
for e in set(runover): bades.append("'1:%s'"%e)
badlist = ",".join(bades)
log.write("%s)"%(badlist))
log.write("\n\n")
log.write("eventsToProcess = cms.untracked.VEventRange(")
badet = []
for e in set(runover): badet.append("'1:%s-1:%s'"%(e,e))
badliss = ",".join(badet)
log.write("%s)"%(badliss))
log.write("\n\n")
#lob = open('./comp/badevents.txt','a')
#for e in set(runover): lob.write("%s\n"%(e))
|
[
"tperry@cern.ch"
] |
tperry@cern.ch
|
6d8ce22c751efd861956be268dafc8c2f00f3fbd
|
c0acf82a18b8e90cd38afedb02e45e53425a067e
|
/pyecharts/custom/overlap.py
|
e2cdd57622347e115a2fe03fcdc86c1ef34f05fd
|
[
"MIT"
] |
permissive
|
caideyang/pyecharts
|
66b61d0400ea15b25ef7fb90f7305647343eea3a
|
c13f2fecece566359b2c881705bf96337c42ce40
|
refs/heads/master
| 2021-01-22T13:48:00.474761
| 2017-08-18T07:09:53
| 2017-08-18T07:09:53
| 100,685,801
| 1
| 0
| null | 2017-08-18T07:31:13
| 2017-08-18T07:31:13
| null |
UTF-8
|
Python
| false
| false
| 1,609
|
py
|
#!/usr/bin/env python
# coding=utf-8
class Overlap(object):
def __init__(self):
self._chart = None
def add(self, chart):
"""
:param chart:
chart instance
:return:
"""
if self._chart is None:
self._chart = chart
else:
self.__custom(self.__get_series(chart))
def __get_series(self, chart):
""" Get chart series data
:param chart:
chart instance
:return:
"""
return (
chart._option.get('legend')[0].get('data'),
chart._option.get('series'),
)
def __custom(self, series):
""" Appends the data for the series of the chart type
:param series:
series data
"""
_name, _series = series
for n in _name:
self._chart._option.get('legend')[0].get('data').append(n)
for s in _series:
self._chart._option.get('series').append(s)
def render(self, path="render.html"):
"""
:param path:
:return:
"""
self._chart.render(path)
def render_embed(self):
"""
:return:
"""
return self._chart.render_embed()
def show_config(self):
"""
:return:
"""
import pprint
return pprint.pprint(self._chart._option)
@property
def chart(self):
"""
:return:
"""
return self._chart
def _repr_html_(self):
"""
:return:
"""
return self._chart._repr_html_()
|
[
"chenjiandongx@qq.com"
] |
chenjiandongx@qq.com
|
003d9d838b7372a3284b30915aec63707830d821
|
ef20884169d10ec9ac4d1d3b77ee35245d248294
|
/practice/deep-learning-from-scratch-2/np_random_choice.py
|
9360dbb41512575e33d9d1d800f8a11e55fdeec2
|
[] |
no_license
|
heaven324/Deeplearning
|
64016671879cdf1742eff6f374cfb640cfc708ae
|
a7a8d590fa13f53348f83f8c808538affbc7b3e8
|
refs/heads/master
| 2023-05-05T08:54:27.888155
| 2021-05-22T08:25:47
| 2021-05-22T08:25:47
| 188,010,607
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 727
|
py
|
import numpy as np
print(np.random.choice(10)) # 5
print(np.random.choice(10)) # 9
# words에서 하나만 무작위로 샘플링
words = ['you', 'say', 'goodbye', 'I', 'hello', '.']
print(np.random.choice(words))
# 5개만 무작위로 샘플링(중복 있음)
print(np.random.choice(words, size = 5))
# 5개만 무작위로 샘플링(중복 없음)
print(np.random.choice(words, size = 5, replace = False))
# 확률분포에 따라 샘플링
p = [0.5, 0.1, 0.05, 0.2, 0.05, 0.1]
print(np.random.choice(words, p = p))
# 0.75제곱을 하는 이유( 빈도가 낮은 단어의 확률을 살짝 높이기 위해서)
p = [0.7, 0.29, 0.01]
new_p = np.power(p, 0.75)
print(new_p)
new_p /= np.sum(new_p)
print(new_p)
|
[
"wjdtjdgh2005@gmail.com"
] |
wjdtjdgh2005@gmail.com
|
778459e47142827e3629b6af6b3dbfc2ccc5d25e
|
ce990be34e8759efb96b890d9676da313fd2d9b4
|
/tests/python/contrib/test_ethosu/cascader/test_plan.py
|
ddc40b49ac8a8de119af6b9b19d208ef745f4899
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"Zlib",
"MIT",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
tmoreau89/tvm
|
291c0b1beb13503e18b1e45f135aaf334660b68d
|
8136173a631bf6c7274d26285349225fcf6e495f
|
refs/heads/master
| 2022-11-23T08:36:24.853648
| 2022-11-21T07:36:57
| 2022-11-21T07:36:57
| 119,757,672
| 5
| 1
|
Apache-2.0
| 2019-03-22T23:06:53
| 2018-01-31T23:41:33
|
Python
|
UTF-8
|
Python
| false
| false
| 7,708
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm.contrib.ethosu.cascader as cs
import pytest
def test_plan(DRAM, SRAM):
subgraph = cs.TESubgraph([], None)
part = cs.InlinePart(
subgraph,
[
cs.Propagator(
[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
[0, 0],
),
],
)
tensor_1 = cs.Tensor([10, 10], "uint8")
tensor_2 = cs.Tensor([10, 10], "uint8")
part.set_input(0, tensor_1)
part.set_output(tensor_2)
tensor_1.add_consumer(part)
tensor_2.add_producer(part)
output_stripe_config = cs.StripeConfig(
shape=[5, 5],
extent=[10, 10],
strides=[5, 5],
order=[1, 2],
stripes=[2, 2],
offset=[0, 0],
)
tensor_config_out = cs.TensorConfig(
tensor=tensor_2,
home_region=DRAM,
state=cs.TensorConfigState.BOUNDARY,
buffer_mode=cs.BufferMode.RECOMPUTE,
stripe_configs=[output_stripe_config],
copy_tensor=False,
)
input_stripe_config = part.calculate_input_stripe_configs(output_stripe_config)[0]
tensor_config_in = cs.TensorConfig(
tensor=tensor_1,
home_region=DRAM,
state=cs.TensorConfigState.INTERIOR,
buffer_mode=cs.BufferMode.ROLLING,
stripe_configs=[input_stripe_config],
copy_tensor=False,
)
tensor_configs = {tensor_1: tensor_config_in, tensor_2: tensor_config_out}
open_configs = frozenset([tensor_config_in])
part_group = frozenset([part])
interior_region = SRAM
memory_usage = 100
cycles = 20
plan = cs.Plan(
tensor_configs=tensor_configs,
open_configs=open_configs,
output_config=tensor_config_out,
part_group=part_group,
interior_region=interior_region,
memory_usage=memory_usage,
cycles=cycles,
)
assert plan.tensor_configs == tensor_configs
assert plan.open_configs == open_configs
assert plan.output_config == tensor_config_out
assert plan.part_group == part_group
assert plan.interior_region == interior_region
assert plan.memory_usage == memory_usage
assert plan.cycles == cycles
def test_plan_merge(DRAM, SRAM):
subgraph = cs.TESubgraph([], None)
part_1 = cs.InlinePart(
subgraph,
[
cs.Propagator(
[[2, 0, 0], [0, 2, 0], [0, 0, 1]],
[0, 0],
),
],
)
part_2 = cs.InlinePart(
subgraph,
[
cs.Propagator(
[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
[0, 0],
),
cs.Propagator(
[[0, 0, 6], [0, 0, 6], [0, 0, 1]],
[0, 0],
),
cs.Propagator(
[[1, 0], [0, 1]],
[0],
),
],
)
tensor_1 = cs.Tensor([20, 20], "uint8")
tensor_2 = cs.Tensor([10, 10], "uint8")
tensor_3 = cs.Tensor([6, 6], "uint8")
tensor_4 = cs.Tensor([10], "uint8")
tensor_5 = cs.Tensor([10, 10], "uint8")
part_1.set_input(0, tensor_1)
part_1.set_output(tensor_2)
tensor_1.add_consumer(part_1)
tensor_2.add_producer(part_1)
part_2.set_input(0, tensor_2)
part_2.set_input(1, tensor_3)
part_2.set_input(2, tensor_4)
part_2.set_output(tensor_5)
tensor_2.add_consumer(part_2)
tensor_3.add_consumer(part_2)
tensor_4.add_consumer(part_2)
tensor_5.add_producer(part_2)
output_stripe_config = cs.StripeConfig(
shape=[5, 5],
extent=[10, 10],
strides=[5, 5],
order=[1, 2],
stripes=[2, 2],
offset=[0, 0],
)
tensor_config_5 = cs.TensorConfig(
tensor=tensor_5,
home_region=DRAM,
state=cs.TensorConfigState.BOUNDARY,
buffer_mode=cs.BufferMode.RECOMPUTE,
stripe_configs=[output_stripe_config],
copy_tensor=False,
)
input_stripe_configs = part_2.calculate_input_stripe_configs(output_stripe_config)
tensor_config_4 = cs.TensorConfig(
tensor=tensor_4,
home_region=DRAM,
state=cs.TensorConfigState.BOUNDARY,
buffer_mode=cs.BufferMode.RECOMPUTE,
stripe_configs=[input_stripe_configs[2]],
copy_tensor=False,
)
tensor_config_3 = cs.TensorConfig(
tensor=tensor_3,
home_region=SRAM,
state=cs.TensorConfigState.INTERIOR,
buffer_mode=cs.BufferMode.RECOMPUTE,
stripe_configs=[input_stripe_configs[1]],
copy_tensor=False,
)
tensor_config_2 = cs.TensorConfig(
tensor=tensor_2,
home_region=SRAM,
state=cs.TensorConfigState.INTERIOR,
buffer_mode=cs.BufferMode.ROLLING,
stripe_configs=[input_stripe_configs[0]],
copy_tensor=False,
)
input_stripe_config = part_1.calculate_input_stripe_configs(input_stripe_configs[0])[0]
tensor_config_1 = cs.TensorConfig(
tensor=tensor_1,
home_region=DRAM,
state=cs.TensorConfigState.BOUNDARY,
buffer_mode=cs.BufferMode.ROLLING,
stripe_configs=[input_stripe_config],
copy_tensor=False,
)
tensor_configs = {tensor_1: tensor_config_1, tensor_2: tensor_config_2}
open_configs = frozenset([tensor_config_2])
part_group = frozenset([part_1])
interior_region = SRAM
memory_usage = 100
cycles = 20
plan_1 = cs.Plan(
tensor_configs=tensor_configs,
open_configs=open_configs,
output_config=tensor_config_2,
part_group=part_group,
interior_region=interior_region,
memory_usage=memory_usage,
cycles=cycles,
)
tensor_configs = {
tensor_2: tensor_config_2,
tensor_3: tensor_config_3,
tensor_4: tensor_config_4,
tensor_5: tensor_config_5,
}
open_configs = frozenset([tensor_config_2, tensor_config_3])
part_group = frozenset([part_2])
interior_region = SRAM
memory_usage = 200
cycles = 30
plan_2 = cs.Plan(
tensor_configs=tensor_configs,
open_configs=open_configs,
output_config=tensor_config_5,
part_group=part_group,
interior_region=interior_region,
memory_usage=memory_usage,
cycles=cycles,
)
merged_plan = plan_1.merge(plan_2)
assert merged_plan.tensor_configs == {
tensor_1: tensor_config_1,
tensor_2: tensor_config_2,
tensor_3: tensor_config_3,
tensor_4: tensor_config_4,
tensor_5: tensor_config_5,
}
assert merged_plan.open_configs == frozenset([tensor_config_3])
assert merged_plan.output_config == tensor_config_5
assert merged_plan.part_group == frozenset([part_1, part_2])
assert merged_plan.interior_region == interior_region
assert merged_plan.memory_usage == plan_1.memory_usage + plan_2.memory_usage
assert merged_plan.cycles == plan_1.cycles + plan_2.cycles
if __name__ == "__main__":
pytest.main([__file__])
|
[
"noreply@github.com"
] |
tmoreau89.noreply@github.com
|
b57aa04bb1157d20423de65671bee218d8715f6d
|
730b92e439dbb013950b8bbf417cfde1bb40f8b9
|
/Python/Add-Binary.py
|
8b8be13ae529418ef8672901ffeb760e078c1eb4
|
[] |
no_license
|
yuede/Lintcode
|
fdbca5984c2860c8b532b5f4d99bce400b0b26d0
|
d40b7ca1c03af7005cc78b26b877a769ca0ab723
|
refs/heads/master
| 2021-01-13T04:14:32.754210
| 2015-08-22T13:15:54
| 2015-08-22T13:15:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 833
|
py
|
class Solution:
# @param {string} a a number
# @param {string} b a number
# @return {string} the result
def addBinary(self, a, b):
# Write your code here
pa = len(a) - 1
pb = len(b) - 1
s = ""
d = 0
while pa >= 0 and pb >= 0:
cur = d + int(a[pa]) + int(b[pb])
pa -= 1
pb -= 1
s += str(cur % 2)
d = cur / 2
while pa >= 0:
cur = d + int(a[pa])
pa -= 1
s += str(cur % 2)
d = cur / 2
while pb >= 0:
cur = d + int(b[pb])
pb -= 1
s += str(cur % 2)
d = cur / 2
if d > 0:
s += str(d)
rs = ""
for i in range(len(s)):
rs += s[len(s) - 1 - i]
return rs
|
[
"jiangyi0425@gmail.com"
] |
jiangyi0425@gmail.com
|
97a5797d6b970d29dbea2c4c90e09131f13ca91c
|
e5efada3529d94875455c4230c8dabe27fb72a89
|
/apps/search/migrations/0015_advancedsearchpage_simplesearchpage.py
|
74a14dceeeef2ab60fb56655bb00ed68b2a72af6
|
[] |
no_license
|
alexmon1989/uma
|
d8c321fb0ec9b1a9039b1c83aeaaff774f657416
|
5dea579d634eeb1c8103c21157299b33ca5590f0
|
refs/heads/master
| 2023-08-03T04:31:13.598577
| 2023-07-22T18:17:13
| 2023-07-22T18:17:13
| 154,835,498
| 0
| 0
| null | 2023-03-02T11:20:54
| 2018-10-26T13:02:12
|
Nunjucks
|
UTF-8
|
Python
| false
| false
| 1,712
|
py
|
# Generated by Django 2.1.3 on 2019-10-10 13:38
import ckeditor_uploader.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('search', '0014_auto_20190719_1155'),
]
operations = [
migrations.CreateModel(
name='AdvancedSearchPage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description_uk', ckeditor_uploader.fields.RichTextUploadingField(verbose_name='Опис сторінки (укр.)')),
('description_en', ckeditor_uploader.fields.RichTextUploadingField(verbose_name='Опис сторінки (англ.)')),
],
options={
'verbose_name': 'Сторінка розширенного пошуку',
'verbose_name_plural': 'Сторінка розширенного пошуку',
},
),
migrations.CreateModel(
name='SimpleSearchPage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description_uk', ckeditor_uploader.fields.RichTextUploadingField(verbose_name='Опис сторінки (укр.)')),
('description_en', ckeditor_uploader.fields.RichTextUploadingField(verbose_name='Опис сторінки (англ.)')),
],
options={
'verbose_name': 'Сторінка простого пошуку',
'verbose_name_plural': 'Сторінка простого пошуку',
},
),
]
|
[
"alex.mon1989@gmail.com"
] |
alex.mon1989@gmail.com
|
52980438ee437a5977680307d4b13bd673f3b1a3
|
6d7a67be5c2aa1bcebdcfd5bec855c0172c8f01f
|
/convert_weight.py
|
55566963e7439f9fb4e9649bdd289f5114337916
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause",
"Apache-2.0"
] |
permissive
|
JWHennessey/stylegan2-pytorch-1
|
19184e1713b9bcfce6404fb6d19478f1dbcc56ec
|
88852e3695d3ffd9281787690c3f8796dc1e225a
|
refs/heads/master
| 2020-12-11T17:17:04.082956
| 2020-01-14T18:44:39
| 2020-01-14T18:44:39
| 233,909,977
| 0
| 0
|
NOASSERTION
| 2020-01-14T18:37:33
| 2020-01-14T18:37:32
| null |
UTF-8
|
Python
| false
| false
| 6,849
|
py
|
import argparse
import os
import sys
import pickle
import math
import torch
import numpy as np
from torchvision import utils
from model import Generator, Discriminator
def convert_modconv(vars, source_name, target_name, flip=False):
weight = vars[source_name + '/weight'].value().eval()
mod_weight = vars[source_name + '/mod_weight'].value().eval()
mod_bias = vars[source_name + '/mod_bias'].value().eval()
noise = vars[source_name + '/noise_strength'].value().eval()
bias = vars[source_name + '/bias'].value().eval()
dic = {
'conv.weight': np.expand_dims(weight.transpose((3, 2, 0, 1)), 0),
'conv.modulation.weight': mod_weight.transpose((1, 0)),
'conv.modulation.bias': mod_bias + 1,
'noise.weight': np.array([noise]),
'activate.bias': bias,
}
dic_torch = {}
for k, v in dic.items():
dic_torch[target_name + '.' + k] = torch.from_numpy(v)
if flip:
dic_torch[target_name + '.conv.weight'] = torch.flip(
dic_torch[target_name + '.conv.weight'], [3, 4]
)
return dic_torch
def convert_conv(vars, source_name, target_name, bias=True, start=0):
weight = vars[source_name + '/weight'].value().eval()
dic = {'weight': weight.transpose((3, 2, 0, 1))}
if bias:
dic['bias'] = vars[source_name + '/bias'].value().eval()
dic_torch = {}
dic_torch[target_name + f'.{start}.weight'] = torch.from_numpy(dic['weight'])
if bias:
dic_torch[target_name + f'.{start + 1}.bias'] = torch.from_numpy(dic['bias'])
return dic_torch
def convert_torgb(vars, source_name, target_name):
weight = vars[source_name + '/weight'].value().eval()
mod_weight = vars[source_name + '/mod_weight'].value().eval()
mod_bias = vars[source_name + '/mod_bias'].value().eval()
bias = vars[source_name + '/bias'].value().eval()
dic = {
'conv.weight': np.expand_dims(weight.transpose((3, 2, 0, 1)), 0),
'conv.modulation.weight': mod_weight.transpose((1, 0)),
'conv.modulation.bias': mod_bias + 1,
'bias': bias.reshape((1, 3, 1, 1)),
}
dic_torch = {}
for k, v in dic.items():
dic_torch[target_name + '.' + k] = torch.from_numpy(v)
return dic_torch
def convert_dense(vars, source_name, target_name):
weight = vars[source_name + '/weight'].value().eval()
bias = vars[source_name + '/bias'].value().eval()
dic = {'weight': weight.transpose((1, 0)), 'bias': bias}
dic_torch = {}
for k, v in dic.items():
dic_torch[target_name + '.' + k] = torch.from_numpy(v)
return dic_torch
def update(state_dict, new):
for k, v in new.items():
if k not in state_dict:
raise KeyError(k + ' is not found')
if v.shape != state_dict[k].shape:
raise ValueError(f'Shape mismatch: {v.shape} vs {state_dict[k].shape}')
state_dict[k] = v
def discriminator_fill_statedict(statedict, vars, size):
log_size = int(math.log(size, 2))
update(statedict, convert_conv(vars, f'{size}x{size}/FromRGB', 'convs.0'))
conv_i = 1
for i in range(log_size - 2, 0, -1):
reso = 4 * 2 ** i
update(statedict, convert_conv(vars, f'{reso}x{reso}/Conv0', f'convs.{conv_i}.conv1'))
update(statedict, convert_conv(vars, f'{reso}x{reso}/Conv1_down', f'convs.{conv_i}.conv2', start=1))
update(statedict, convert_conv(vars, f'{reso}x{reso}/Skip', f'convs.{conv_i}.skip', start=1, bias=False))
conv_i += 1
update(statedict, convert_conv(vars, f'4x4/Conv', 'final_conv'))
update(statedict, convert_dense(vars, f'4x4/Dense0', 'final_linear.0'))
update(statedict, convert_dense(vars, f'Output', 'final_linear.1'))
return statedict
def fill_statedict(state_dict, vars, size):
log_size = int(math.log(size, 2))
for i in range(8):
update(state_dict, convert_dense(vars, f'G_mapping/Dense{i}', f'style.{i + 1}'))
update(
state_dict,
{
'input.input': torch.from_numpy(
vars['G_synthesis/4x4/Const/const'].value().eval()
)
},
)
update(state_dict, convert_torgb(vars, 'G_synthesis/4x4/ToRGB', 'to_rgb1'))
for i in range(log_size - 2):
reso = 4 * 2 ** (i + 1)
update(
state_dict,
convert_torgb(vars, f'G_synthesis/{reso}x{reso}/ToRGB', f'to_rgbs.{i}'),
)
update(state_dict, convert_modconv(vars, 'G_synthesis/4x4/Conv', 'conv1'))
conv_i = 0
for i in range(log_size - 2):
reso = 4 * 2 ** (i + 1)
update(
state_dict,
convert_modconv(
vars,
f'G_synthesis/{reso}x{reso}/Conv0_up',
f'convs.{conv_i}',
flip=True,
),
)
update(
state_dict,
convert_modconv(
vars, f'G_synthesis/{reso}x{reso}/Conv1', f'convs.{conv_i + 1}'
),
)
conv_i += 2
return state_dict
if __name__ == '__main__':
device = 'cuda'
parser = argparse.ArgumentParser()
parser.add_argument('--repo', type=str, required=True)
parser.add_argument('--gen', action='store_true')
parser.add_argument('--disc', action='store_true')
parser.add_argument('path', metavar='PATH')
args = parser.parse_args()
sys.path.append(args.repo)
from dnnlib import tflib
tflib.init_tf()
with open(args.path, 'rb') as f:
generator, discriminator, g_ema = pickle.load(f)
size = g_ema.output_shape[2]
g = Generator(size, 512, 8)
state_dict = g.state_dict()
state_dict = fill_statedict(state_dict, g_ema.vars, size)
g.load_state_dict(state_dict)
latent_avg = torch.from_numpy(g_ema.vars['dlatent_avg'].value().eval())
ckpt = {'g_ema': state_dict, 'latent_avg': latent_avg}
if args.gen:
g_train = Generator(size, 512, 8)
g_train_state = g_train.state_dict()
g_train_state = fill_statedict(g_train_state, generator.vars, size)
ckpt['g'] = g_train_state
if args.disc:
disc = Discriminator(size)
d_state = disc.state_dict()
d_state = discriminator_fill_statedict(d_state, discriminator.vars, size)
ckpt['d'] = d_state
name = os.path.splitext(os.path.basename(args.path))[0]
torch.save(ckpt, name + '.pt')
batch_size = {256: 16, 512: 9, 1024: 4}
n_sample = batch_size.get(size, 25)
g = g.to(device)
x = torch.randn(n_sample, 512).to(device)
with torch.no_grad():
img, _ = g([x], truncation=0.5, truncation_latent=latent_avg.to(device))
utils.save_image(
img, name + '.png', nrow=int(n_sample ** 0.5), normalize=True, range=(-1, 1)
)
|
[
"kim.seonghyeon@snu.ac.kr"
] |
kim.seonghyeon@snu.ac.kr
|
782a5e2a11fe39696a75f0f5a033a5af024cc786
|
f8ffac4fa0dbe27316fa443a16df8a3f1f5cff05
|
/Python/Counting_Valleys.py
|
db3c7a3eda8be589ae74a986fadb83c8e44b2c00
|
[] |
no_license
|
ankitniranjan/HackerrankSolutions
|
e27073f9837787a8af7a0157d95612028c07c974
|
e110c72d3b137cf4c5cef6e91f58a17452c54c08
|
refs/heads/master
| 2023-03-16T19:06:17.805307
| 2021-03-09T16:28:39
| 2021-03-09T16:28:39
| 292,994,949
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 528
|
py
|
import math
import os
import random
import re
import sys
# Complete the countingValleys function below.
def countingValleys(n, s):
level=valley=0
for i in range(n):
if(s[i]=='U'):
level+=1
if(level==0):
valley+=1
else:
level-=1
return valley
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
s = input()
result = countingValleys(n, s)
fptr.write(str(result) + '\n')
fptr.close()
|
[
"noreply@github.com"
] |
ankitniranjan.noreply@github.com
|
ab55393ddc0e46a0f229ce84b50466d0ac1cb266
|
65701888f7e09716b83ddbb965a50b7c62b0f287
|
/blocks/google/common_block.py
|
fb2ba923f68bc8aedfef5cc46a894ff664e758b9
|
[] |
no_license
|
ColinKennedy/auto_docstring
|
6a4a27c16434cb6d94db435226758a09627d9252
|
dbca838630faf410a277069aedbecb82cfeedae9
|
refs/heads/master
| 2021-04-12T12:36:31.825008
| 2018-11-05T01:49:49
| 2018-11-05T01:49:49
| 89,107,892
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,343
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# TODO : Just had an idea. Why not change the gross "if X.is_valid(obj): return X(obj)
# into a single classmethod? That'd look way better and potentially be
# easier to loop over
#
'''The classes and functions needed to parse the types of all astroid nodes.
This module does most of the heavy-lifting for args return-types. It can
parse functions within functions, infer an object's type, and even recursively
traverse imported modules to get an object's type.
'''
# IMPORT STANDARD LIBRARIES
import abc
import os
# IMPORT THIRD-PARTY LIBRARIES
import six
# IMPORT LOCAL LIBRARIES
from ...config import environment
from ...parsing import visit
from ...core import check
from . import common_type
@six.add_metaclass(abc.ABCMeta)
class CommonBlock(object):
'''An abstract class used to implement a Google-style block.
Attributes:
label (str): The block display text.
'''
label = 'Header label'
@staticmethod
@abc.abstractmethod
def draw(info):
'''Create the docstring lines to represent the given `info`.
Args:
info (dict[str]):
The parsed AST node whose type needs to be found and then
converted into a string.
Returns:
list[str]: The lines to create.
'''
return []
@abc.abstractproperty
def name(self):
'''str: A unique name to use to identify this block-type.'''
return '_unique_id'
@classmethod
def get_starting_lines(cls):
'''list[str]: Get the label used for the top of this block.'''
return ['{}:'.format(cls.label)]
@staticmethod
def get_spacing():
return
@staticmethod
def get_spacing():
'''int: Get the number of newlines to separate each docstring block.'''
try:
return int(os.getenv('AUTO_DOCSTRING_BLOCK_SPACING', '1'))
except TypeError:
return 1
@staticmethod
def _expand_types(obj, include_type=False):
r'''Wrap the given `obj` with a specific docstring-class wrapper.
Args:
obj (`astroid.NodeNG`):
Some node to wrap.
include_type (bool, optional):
If True and `obj` is a container of some kind, for example
a list of strs, then `obj` will be printed like "list[str]".
If False, `obj` would be printed as just "str".
This parameter is used primarily mainly for keeping return-types
from accidentally printing its container-type twice when
the container is nested.
Default is False.
Returns:
`SpecialType` or `ComprehensionContainerType` or `ContainerType` \
or `IterableType` or `Type`: .
The wrapped type.
'''
if common_type.SpecialType.is_valid(obj):
return common_type.SpecialType(obj)
obj = visit.get_value(obj)
if common_type.DictComprehensionContainerType.is_valid(obj):
return common_type.DictComprehensionContainerType(obj)
if common_type.ComprehensionContainerType.is_valid(obj):
return common_type.ComprehensionContainerType(obj)
if common_type.ContainerType.is_valid(obj):
return common_type.ContainerType(obj)
if check.is_itertype(obj):
return common_type.IterableType(obj, include_type=include_type)
return common_type.Type(obj)
@staticmethod
def _change_type_to_str(*objs):
'''Create the full string of all return-types for the given `objs`.
Args:
*objs (list[:class:`auto_docstring.blocks.google.common_block.Type`]):
The types to change into strings.
Returns:
str: The final set of return types for the given objects. This string
will be added to the auto-generated docstrings, directly.
'''
items = []
for item in [obj.as_str() for obj in objs]:
if item not in items:
items.append(item)
return common_type.make_items_text(items)
@six.add_metaclass(abc.ABCMeta)
class MultiTypeBlock(CommonBlock):
'''The base-class used to create "Returns" and "Yields" blocks.'''
_info_key = '_some_key'
name = 'multitype_base_block'
@classmethod
def _process_args(cls, info):
expected_object = info.get(cls._info_key)
if not expected_object:
return []
indent = ''
# Check if I need this if-statement
if info.get('lines'):
indent = environment.get_default_indent()
info['indent'] = indent
obj_types = cls._expand_types(expected_object)
type_info_as_str = cls._change_type_to_str(*obj_types)
return [type_info_as_str]
@classmethod
def _build_indented_docstring_lines(cls, lines, indent='', multiline=False):
return [cls._make_line(line, indent=indent, multiline=multiline)
for line in lines]
@classmethod
def draw(cls, info):
# '''Create the docstring lines to represent the given `info`.
# Note:
# If no data is found for cls._info_key, this method will return
# an empty list.
# Args:
# info (dict[str, list[`astroid.NodeNG`]]):
# The parsed AST node whose type needs to be found and then
# converted into a string.
# Returns:
# list[str]: The lines to create.
# '''
lines = cls._process_args(info)
if not lines:
return []
starting_lines = []
all_lines = info.get('lines', [])
if all_lines:
starting_lines = cls.get_starting_lines()
multiline = is_multiline(all_lines)
docstring_lines = cls._build_indented_docstring_lines(
lines,
info.get('indent', ''),
multiline=is_multiline(all_lines),
)
return starting_lines + docstring_lines
@staticmethod
def _make_line(obj_type, indent, multiline=False):
'''Create the docstring line for the given input.
Args:
indent (str):
The amount of space to add to the docstring block.
obj_type (str):
The type of the object. Example: "tuple[str]", "bool".
multiline (`obj`, optional):
If True, get the user's preferred separator and place it between
the return type and the return description.
If False, force the separator to just be " " so that the return
statement will stay on a single line.
Default is False.
Returns:
str: The created docstring line.
'''
if obj_type:
# This ":" is needed for parsing by auto_docstring
obj_type = ':' + obj_type
if not multiline:
sep = ' '
else:
sep = environment.get_description_separator()
return '{indent}{{{obj_type}!f}}:{sep}{{!f}}.'.format(
indent=indent,
obj_type=obj_type,
sep=sep,
)
def is_multiline(lines):
return len(lines) > 1
|
[
"colinvfx@gmail.com"
] |
colinvfx@gmail.com
|
9b5f678ee01f74948e3abe78205622ca733d1def
|
f6d96e9505103428402ea9772fdd0b48c4dff7e9
|
/tests/test_models/test_place.py
|
4bd8e6e2e665353886e8de7c111a98acd68c7add
|
[] |
no_license
|
KarenCampo777/AirBnB_clone
|
8271a2a7f75c01ea875b9232a939f1f58f484705
|
95051e3c7c05837b89966caae55bb54eef81c95f
|
refs/heads/master
| 2023-03-14T03:41:18.367359
| 2021-02-24T22:32:17
| 2021-02-24T22:32:17
| 276,201,869
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,527
|
py
|
#!/usr/bin/python3
"""
Test module for place module
"""
from models.place import Place
import models
import unittest
import os
import datetime
class TestPlace(unittest.TestCase):
""" Testing an Place instance """
def setUp(self):
"""
Setting up the test instance
"""
self.my_base1 = Place()
self.my_base2 = Place()
def Tearown(self):
"""
Closing the test instance
"""
del self.my_base1
del self.my_base2
def test_create(self):
"""
Testing creation of an Place instance
"""
self.assertIsInstance(self.my_base1, Place)
def test_permissions(self):
"""
Testing file permissions to be executable
"""
self.assertTrue(os.access("models/place.py", os.X_OK))
def test_id(self):
"""
Testing if attribute id is as unique as a string type
"""
self.assertIsInstance(self.my_base1.id, str)
self.assertNotEqual(self.my_base1.id, self.my_base2.id)
def test_dates(self):
"""
Testing created_at and updated_at of instances
"""
self.assertIsInstance(self.my_base1.created_at, datetime.datetime)
self.assertIsInstance(self.my_base1.updated_at, datetime.datetime)
prev_date = self.my_base1.updated_at
self.my_base1.save()
self.assertNotEqual(prev_date, self.my_base1.updated_at)
def test_str_format(self):
"""
Testing the function __str__ to have the correct format
[<class name>] (<self.id>) <self.__dict__>
"""
o = self.my_base1
msg1 = o.__str__()
msg2 = "[{}] ({}) {}".format(o.__class__.__name__, o.id, o.__dict__)
self.assertEqual(msg1, msg2)
def test_to_dict(self):
"""
Testing to_dict function to return correct format
"""
ins = self.my_base1
obj = ins.to_dict()
self.assertIsInstance(obj, dict)
self.assertTrue('__class__' in obj)
self.assertEqual(obj['__class__'], 'Place')
self.assertIsInstance(obj['created_at'], str)
self.assertIsInstance(obj['updated_at'], str)
self.assertEqual(obj['created_at'], ins.created_at.isoformat())
self.assertEqual(obj['updated_at'], ins.updated_at.isoformat())
def test_docstring(self):
"""
Testing documentation on place
"""
self.assertIsNotNone(models.place.__doc__)
self.assertIsNotNone(Place.__doc__)
|
[
"andresbaymon@gmail.com"
] |
andresbaymon@gmail.com
|
ada10adc0bef6aee3f66cc6505c04af63ade6437
|
ca2818572d17285210792694ba1f07c99e11d9ad
|
/setup.py
|
209a4bd93203208084c183cf32cece8f76ddf3bd
|
[
"Apache-2.0"
] |
permissive
|
tomzhang/codesnap
|
cc335e8a63af70ed0121b222eb4fc2e35841b0b0
|
04e11176888243052c46a6a04a1ba63a8f80d684
|
refs/heads/master
| 2022-11-29T16:23:05.625385
| 2020-08-09T07:11:58
| 2020-08-09T07:11:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,134
|
py
|
import setuptools
from distutils.core import Extension
with open("README.md") as f:
long_description = f.read()
setuptools.setup(
name="codesnap",
version="0.0.4",
author="Tian Gao",
author_email="gaogaotiantian@hotmail.com",
description="A profiling tool that can visualize python code in flame graph",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/gaogaotiantian/codesnap",
packages=setuptools.find_packages("src"),
package_dir={"":"src"},
package_data={
"codesnap": [
"html/*.js",
"html/*.css",
"html/*.html"
]
},
ext_modules=[
Extension(
"codesnap.snaptrace",
sources = [
"src/codesnap/modules/snaptrace.c",
]
)
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Topic :: Software Development :: Quality Assurance",
],
python_requires=">=3.5",
)
|
[
"gaogaotiantian@hotmail.com"
] |
gaogaotiantian@hotmail.com
|
33bada0a6ebc9c86ad48aa12cb5fff42acd3588a
|
3b43cf4cfc666798ebe85ed1db8858034b13d45c
|
/tests/universal_functions_tests/power_tests/normal.py
|
dab58c185239f89bab51ee55c80dbe61e5d4326a
|
[
"Apache-2.0"
] |
permissive
|
Pandinosaurus/legate.numpy
|
5428b80a0a53ab882cd74b5dbf5fd86c7ee82199
|
896f4fd9b32db445da6cdabf7b78d523fca96936
|
refs/heads/master
| 2023-06-27T04:33:52.982601
| 2021-07-01T21:39:52
| 2021-07-01T21:39:52
| 358,820,941
| 0
| 0
|
Apache-2.0
| 2021-08-01T02:57:57
| 2021-04-17T08:06:05
|
C++
|
UTF-8
|
Python
| false
| false
| 1,000
|
py
|
# Copyright 2021 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import legate.numpy as lg
def test():
bases_np = np.random.randn(4, 5)
# avoid fractional exponents
exponents_np = np.random.randint(10, size=(4, 5)).astype(np.float64)
bases = lg.array(bases_np)
exponents = lg.array(exponents_np)
assert lg.allclose(
lg.power(bases, exponents), np.power(bases_np, exponents_np)
)
if __name__ == "__main__":
test()
|
[
"wonchanl@nvidia.com"
] |
wonchanl@nvidia.com
|
8a4209560e01a9bb2625b02445afa69dcf3b28fc
|
e7ff2f9e21a94f2956b8c79f268dc6d45b41237b
|
/Frontend/node_modules/watchpack-chokidar2/node_modules/fsevents/build/config.gypi
|
b5962c025c83982c05fecf7c1819e71e4893c18a
|
[
"MIT"
] |
permissive
|
vipul-07/MERN-Project
|
fcb4af686557b99b802404e8622905781e89bbc3
|
c0bdd3b5dfc73b2657b8563d069360e11466714a
|
refs/heads/master
| 2023-02-14T15:42:38.653627
| 2021-01-10T05:35:02
| 2021-01-10T05:35:02
| 317,460,195
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,709
|
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"build_v8_with_gn": "false",
"coverage": "false",
"dcheck_always_on": 0,
"debug_nghttp2": "false",
"debug_node": "false",
"enable_lto": "false",
"enable_pgo_generate": "false",
"enable_pgo_use": "false",
"error_on_warn": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_in": "../../deps/icu-tmp/icudt67l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_path": "deps/icu-small",
"icu_small": "false",
"icu_ver_major": "67",
"is_debug": 0,
"llvm_version": "11.0",
"napi_build_version": "6",
"node_byteorder": "little",
"node_debug_lib": "false",
"node_enable_d8": "false",
"node_install_npm": "true",
"node_module_version": 83,
"node_no_browser_globals": "false",
"node_prefix": "/usr/local",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_brotli": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_target_type": "executable",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_node_code_cache": "true",
"node_use_node_snapshot": "true",
"node_use_openssl": "true",
"node_use_v8_platform": "true",
"node_with_ltcg": "false",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_is_fips": "false",
"ossfuzz": "false",
"shlib_suffix": "83.dylib",
"target_arch": "x64",
"v8_enable_31bit_smis_on_64bit_arch": 0,
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_enable_lite_mode": 0,
"v8_enable_object_print": 1,
"v8_enable_pointer_compression": 0,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 1,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_siphash": 1,
"want_separate_host_toolset": 0,
"xcode_version": "11.0",
"nodedir": "/Users/apple/Library/Caches/node-gyp/14.10.1",
"standalone_static_library": 1,
"dry_run": "",
"legacy_bundling": "",
"save_dev": "",
"browser": "",
"commit_hooks": "true",
"only": "",
"viewer": "man",
"also": "",
"rollback": "true",
"sign_git_commit": "",
"audit": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"init_author_url": "",
"maxsockets": "50",
"shell": "/bin/zsh",
"metrics_registry": "https://registry.npmjs.org/",
"parseable": "",
"shrinkwrap": "true",
"init_license": "ISC",
"timing": "",
"if_present": "",
"cache_max": "Infinity",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"preid": "",
"fetch_retries": "2",
"registry": "https://registry.npmjs.org/",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"logs_max": "10",
"prefer_online": "",
"cache_lock_retries": "10",
"global_style": "",
"update_notifier": "true",
"audit_level": "low",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"offline": "",
"read_only": "",
"searchlimit": "20",
"access": "",
"json": "",
"allow_same_version": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/apple/.npm-init.js",
"userconfig": "/Users/apple/.npmrc",
"cidr": "",
"node_version": "14.10.1",
"user": "",
"auth_type": "legacy",
"editor": "vi",
"ignore_prepublish": "",
"save": "true",
"script_shell": "",
"tag": "latest",
"before": "",
"global": "",
"progress": "true",
"ham_it_up": "",
"optional": "true",
"searchstaleness": "900",
"bin_links": "true",
"force": "",
"save_prod": "",
"searchopts": "",
"depth": "Infinity",
"node_gyp": "/usr/local/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"rebuild_bundle": "true",
"sso_poll_frequency": "500",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"save_prefix": "^",
"scripts_prepend_node_path": "warn-only",
"sso_type": "oauth",
"strict_ssl": "true",
"tag_version_prefix": "v",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"save_exact": "",
"cache_lock_stale": "60000",
"prefer_offline": "",
"version": "",
"cache_min": "10",
"otp": "",
"cache": "/Users/apple/.npm",
"searchexclude": "",
"color": "true",
"package_lock": "true",
"fund": "true",
"package_lock_only": "",
"save_optional": "",
"user_agent": "npm/6.14.8 node/v14.10.1 darwin x64",
"ignore_scripts": "",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"send_metrics": "",
"init_version": "1.0.0",
"node_options": "",
"umask": "0022",
"scope": "",
"git": "git",
"init_author_name": "",
"onload_script": "",
"tmp": "/var/folders/0w/px0kn_6s561dhjplhgbypnj80000gn/T",
"unsafe_perm": "true",
"format_package_lock": "true",
"link": "",
"prefix": "/usr/local"
}
}
|
[
"apple@Apples-MacBook-Pro.local"
] |
apple@Apples-MacBook-Pro.local
|
f32cc09e9b5e4191dae2fb825a128f8ca6aa38c6
|
2e2a02ec8323982975ace3d249b22a42d8b97a1f
|
/skipper.py
|
11171dc6ca97629b3d735b09f2921f679e80ed68
|
[] |
no_license
|
datagovua/os-budget-ukraine
|
4e8c6d0373aead42890349befbd69bf8e8fef0a1
|
3a45f89c3872c9b9b45fb1206da445989b37b335
|
refs/heads/master
| 2021-01-13T02:49:03.608617
| 2016-12-22T20:59:14
| 2016-12-23T01:14:22
| 77,156,721
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 385
|
py
|
import logging
from datapackage_pipelines.wrapper import ingest, spew
_, datapackage, resource_iterator = ingest()
def intTryParse(value):
try:
int(value)
return True
except ValueError:
return False
def process(res):
for row in res:
if intTryParse(row['1.0']):
yield row
spew(datapackage, (process(res) for res in resource_iterator))
|
[
"vanuan@gmail.com"
] |
vanuan@gmail.com
|
b0258289543572c3d2fd2b3d83991eb4e2d9f4dc
|
24fe1f54fee3a3df952ca26cce839cc18124357a
|
/servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/orchs/svcsencap.py
|
83b1a538fc7c72069845b02465a56b59e320b8da
|
[] |
no_license
|
aperiyed/servicegraph-cloudcenter
|
4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff
|
9eb7975f2f6835e1c0528563a771526896306392
|
refs/heads/master
| 2023-05-10T17:27:18.022381
| 2020-01-20T09:18:28
| 2020-01-20T09:18:28
| 235,065,676
| 0
| 0
| null | 2023-05-01T21:19:14
| 2020-01-20T09:36:37
|
Python
|
UTF-8
|
Python
| false
| false
| 8,108
|
py
|
# coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class SvcsEncap(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = ClassMeta("cobra.model.orchs.SvcsEncap")
meta.moClassName = "orchsSvcsEncap"
meta.rnFormat = "encap-%(name)s"
meta.category = MoCategory.REGULAR
meta.label = "IP Pool Resource Instance"
meta.writeAccessMask = 0x2001
meta.readAccessMask = 0x2001
meta.isDomainable = False
meta.isReadOnly = False
meta.isConfigurable = True
meta.isDeletable = True
meta.isContextRoot = False
meta.childClasses.add("cobra.model.tag.Tag")
meta.childClasses.add("cobra.model.fault.Counts")
meta.childClasses.add("cobra.model.health.Inst")
meta.childClasses.add("cobra.model.aaa.RbacAnnotation")
meta.childClasses.add("cobra.model.orchs.RsSvcsEncapToSvcAlloc")
meta.childClasses.add("cobra.model.tag.Annotation")
meta.childNamesAndRnPrefix.append(("cobra.model.orchs.RsSvcsEncapToSvcAlloc", "rssvcsEncapToSvcAlloc-"))
meta.childNamesAndRnPrefix.append(("cobra.model.tag.Annotation", "annotationKey-"))
meta.childNamesAndRnPrefix.append(("cobra.model.aaa.RbacAnnotation", "rbacDom-"))
meta.childNamesAndRnPrefix.append(("cobra.model.tag.Tag", "tagKey-"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Counts", "fltCnts"))
meta.childNamesAndRnPrefix.append(("cobra.model.health.Inst", "health"))
meta.parentClasses.add("cobra.model.orchs.Config")
meta.superClasses.add("cobra.model.orchs.Entity")
meta.superClasses.add("cobra.model.orchs.Element")
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.naming.NamedIdentifiedObject")
meta.rnPrefixes = [
('encap-', True),
]
prop = PropMeta("str", "annotation", "annotation", 38579, PropCategory.REGULAR)
prop.label = "Annotation. Suggested format orchestrator:value"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("annotation", prop)
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "descr", "descr", 28290, PropCategory.REGULAR)
prop.label = "Description"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "encap", "encap", 28246, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("encap", prop)
prop = PropMeta("str", "encapNsName", "encapNsName", 28248, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 512)]
meta.props.add("encapNsName", prop)
prop = PropMeta("str", "extMngdBy", "extMngdBy", 40718, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "undefined"
prop._addConstant("msc", "msc", 1)
prop._addConstant("undefined", "undefined", 0)
meta.props.add("extMngdBy", prop)
prop = PropMeta("str", "guid", "guid", 28255, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.regex = ['[[a-z0-9]{8}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{12}]{0,1}']
meta.props.add("guid", prop)
prop = PropMeta("str", "id", "id", 28253, PropCategory.REGULAR)
prop.label = "Id"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("id", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "legLoc", "legLoc", 28245, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 512)]
meta.props.add("legLoc", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "monPolDn", "monPolDn", 28679, PropCategory.REGULAR)
prop.label = "Monitoring policy attached to this observable object"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("monPolDn", prop)
prop = PropMeta("str", "name", "name", 28294, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
prop.range = [(1, 128)]
meta.props.add("name", prop)
prop = PropMeta("str", "nameAlias", "nameAlias", 28417, PropCategory.REGULAR)
prop.label = "Name alias"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 63)]
prop.regex = ['[a-zA-Z0-9_.-]+']
meta.props.add("nameAlias", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "rtrId", "rtrId", 28247, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
meta.props.add("rtrId", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "uid", "uid", 8, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("uid", prop)
meta.namingProps.append(getattr(meta.props, "name"))
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Ancestor"
meta.deploymentQueryPaths.append(DeploymentPathMeta("CtxToNwIf", "Private Network to Interface", "cobra.model.nw.If"))
def __init__(self, parentMoOrDn, name, markDirty=True, **creationProps):
namingVals = [name]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
|
[
"rrishike@cisco.com"
] |
rrishike@cisco.com
|
80a7585e86a4e8633b65ccb5495c63da103934b7
|
8bbeb7b5721a9dbf40caa47a96e6961ceabb0128
|
/python3/216.Combination Sum III(组合总和 III).py
|
4c2a30d0cd8dad2c5c465ba3a4dfdb989f691e11
|
[
"MIT"
] |
permissive
|
lishulongVI/leetcode
|
bb5b75642f69dfaec0c2ee3e06369c715125b1ba
|
6731e128be0fd3c0bdfe885c1a409ac54b929597
|
refs/heads/master
| 2020-03-23T22:17:40.335970
| 2018-07-23T14:46:06
| 2018-07-23T14:46:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,388
|
py
|
"""
<div>
<p>Find all possible combinations of <i><b>k</b></i> numbers that add up to a number <i><b>n</b></i>, given that only numbers from 1 to 9 can be used and each combination should be a unique set of numbers.</p>
<p><strong>Note:</strong></p>
<ul>
<li>All numbers will be positive integers.</li>
<li>The solution set must not contain duplicate combinations.</li>
</ul>
<p><strong>Example 1:</strong></p>
<pre>
<strong>Input:</strong> <i><b>k</b></i> = 3, <i><b>n</b></i> = 7
<strong>Output:</strong> [[1,2,4]]
</pre>
<p><strong>Example 2:</strong></p>
<pre>
<strong>Input:</strong> <i><b>k</b></i> = 3, <i><b>n</b></i> = 9
<strong>Output:</strong> [[1,2,6], [1,3,5], [2,3,4]]
</pre>
</div>
<p>找出所有相加之和为 <em><strong>n</strong> </em>的 <strong><em>k </em></strong>个数的组合<strong><em>。</em></strong>组合中只允许含有 1 - 9 的正整数,并且每种组合中不存在重复的数字。</p>
<p><strong>说明:</strong></p>
<ul>
<li>所有数字都是正整数。</li>
<li>解集不能包含重复的组合。 </li>
</ul>
<p><strong>示例 1:</strong></p>
<pre><strong>输入:</strong> <em><strong>k</strong></em> = 3, <em><strong>n</strong></em> = 7
<strong>输出:</strong> [[1,2,4]]
</pre>
<p><strong>示例 2:</strong></p>
<pre><strong>输入:</strong> <em><strong>k</strong></em> = 3, <em><strong>n</strong></em> = 9
<strong>输出:</strong> [[1,2,6], [1,3,5], [2,3,4]]
</pre>
<p>找出所有相加之和为 <em><strong>n</strong> </em>的 <strong><em>k </em></strong>个数的组合<strong><em>。</em></strong>组合中只允许含有 1 - 9 的正整数,并且每种组合中不存在重复的数字。</p>
<p><strong>说明:</strong></p>
<ul>
<li>所有数字都是正整数。</li>
<li>解集不能包含重复的组合。 </li>
</ul>
<p><strong>示例 1:</strong></p>
<pre><strong>输入:</strong> <em><strong>k</strong></em> = 3, <em><strong>n</strong></em> = 7
<strong>输出:</strong> [[1,2,4]]
</pre>
<p><strong>示例 2:</strong></p>
<pre><strong>输入:</strong> <em><strong>k</strong></em> = 3, <em><strong>n</strong></em> = 9
<strong>输出:</strong> [[1,2,6], [1,3,5], [2,3,4]]
</pre>
"""
class Solution:
def combinationSum3(self, k, n):
"""
:type k: int
:type n: int
:rtype: List[List[int]]
"""
|
[
"lishulong@wecash.net"
] |
lishulong@wecash.net
|
c5daf96e1ec9ac90dc1db252619f073fb6d4df6d
|
179a0f995f5a3eb7a6005f8e96498ef21b2bf166
|
/docs/conf.py
|
45ccdf6d81b9baf63f859bf4fc96836c47707904
|
[
"MIT"
] |
permissive
|
VB6Hobbyst7/pycatia
|
845052a4584318bf0cf0861512203ddd337a7bca
|
cff309fe2b4802ff2b2c5c984f8064747f81065d
|
refs/heads/master
| 2023-04-14T20:28:51.427101
| 2021-04-27T11:03:42
| 2021-04-27T11:03:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,936
|
py
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
from unittest.mock import MagicMock
sys.path.insert(0, os.path.abspath('..'))
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return MagicMock()
MOCK_MODULES = ['pywin32', 'win32com.client', 'pywintypes']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# -- Project information -----------------------------------------------------
project = 'pycatia'
copyright = '2020, Paul Bourne'
author = 'Paul Bourne'
# The short X.Y version
version = '0.5.0'
# The full version, including alpha/beta/rc tags
release = version
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.intersphinx',
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.todo',
'sphinx_togglebutton',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'github_user': 'evereux',
'github_repo': 'pycatia',
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_theme_path = []
html_css_files = [
'css/pycatia.css',
]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'pycatiadoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pycatia.tex', 'pycatia Documentation',
'Paul Bourne', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pycatia', 'pycatia Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pycatia', 'pycatia Documentation',
author, 'pycatia', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
|
[
"evereux@gmail.com"
] |
evereux@gmail.com
|
8c1605776199c122465a2aa10d3dade49beec409
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02748/s229745856.py
|
07e6ff9b4f694f31ed7e0dfb26750d3f2b624a60
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 232
|
py
|
_, _, M = map(int, input().split())
A = list(map(int, input().split()))
B = list(map(int, input().split()))
xyc = [tuple(map(int, input().split())) for i in range(M)]
print(min([min(A)+min(B)]+[A[x-1]+B[y-1]-c for x, y, c in xyc]))
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
572938151b792f0f6e8e2bb10d5c6bd6a452af48
|
e5504d8c4880993b82d5583a11c5cc4623e0eac2
|
/Arrays/loopInCircularArray__IMP.py
|
768b8bdc06fdc4ba7a36f6287179bf3b4b92d756
|
[] |
no_license
|
noorulameenkm/DataStructuresAlgorithms
|
e5f87f426fc444d18f830e48569d2a7a50f5d7e0
|
7c3bb89326d2898f9e98590ceb8ee5fd7b3196f0
|
refs/heads/master
| 2023-06-08T19:29:42.507761
| 2023-05-28T16:20:19
| 2023-05-28T16:20:19
| 219,270,731
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,536
|
py
|
def circular_array_loop_exists(arr):
# TODO: Write your code here
for i in range(len(arr)):
slow = fast = i
is_forward = arr[i] >= 0
# if slow or fast becomes '-1' this means we can't find cycle for this number
while True:
# move one step for slow pointer
slow = get_next_index(arr, is_forward, slow)
# move one step for fast pointer
fast = get_next_index(arr, is_forward, fast)
if fast != -1:
# move another step for fast pointer
fast = get_next_index(arr, is_forward, fast)
if slow == -1 or fast == -1 or slow == fast:
break
if slow != -1 and slow == fast:
return True
return False
def get_next_index(arr, is_forward, current_index):
direction = arr[current_index] >= 0
if is_forward != direction:
return -1 # change in direction, return -1
next_index = (arr[current_index] + current_index) % len(arr)
# one element cycle, return -1
if next_index == current_index:
next_index = -1
return next_index
def main():
print(circular_array_loop_exists([1, 2, -1, 2, 2]))
print(circular_array_loop_exists([2, 2, -1, 2]))
print(circular_array_loop_exists([2, 1, -1, -2]))
main()
""""
We are given an array containing positive and negative numbers. Suppose the array contains a number ‘M’ at a particular index. Now,
if ‘M’ is positive we will move forward ‘M’ indices and if ‘M’ is negative move backwards ‘M’ indices.
You should assume that the array is circular which means two things:
If, while moving forward, we reach the end of the array, we will jump to the first element to continue the movement.
If, while moving backward, we reach the beginning of the array, we will jump to the last element to continue the movement.
Write a method to determine if the array has a cycle. The cycle should have more than one element and should follow one direction
which means the cycle should not contain both forward and backward movements.
"""
""""
Alternate Method
In our algorithm, we don’t keep a record of all the numbers that have been evaluated for cycles.
We know that all such numbers will not produce a cycle for any other instance as well.
If we can remember all the numbers that have been visited, our algorithm will improve to O(N) as,
then, each number will be evaluated for cycles only once. We can keep track of this by creating a separate
array however the space complexity of our algorithm will increase to O(N).
"""
|
[
"noorul.km@people10.com"
] |
noorul.km@people10.com
|
3aefb338c74473c31e9b8b9f5b57d93c9d53d0e5
|
5f957add3e3f7a1885d4f1b106de72e93c8fcb1a
|
/ExerciciosPython/ex072.py
|
a2215342cdcfa208efd442734fd5f94405993530
|
[
"MIT"
] |
permissive
|
mpatrickaires/curso-python
|
6e32cf785a3bc0076bb3ea24cd6d896604f4e774
|
aba023648527d53bfe18833b91210a7e528a84d7
|
refs/heads/main
| 2022-12-27T00:57:07.467940
| 2020-10-14T00:48:09
| 2020-10-14T00:48:09
| 302,203,176
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 698
|
py
|
extenso = ('zero', 'um', 'dois', 'três', 'quatro', 'cinco', 'seis', 'sete', 'oito', 'nove', 'dez', 'onze',
'doze', 'treze', 'catorze', 'quinze', 'dezesseis', 'dezessete', 'dezoito', 'dezenove', 'vinte')
while True:
numero = int(input('Digite um número entre 0 e 20: '))
while numero < 0 or numero > 20:
numero = int(input('Tente novamente. Digite um número entre 0 e 20: '))
print(f'Você digitou o número {extenso[numero]}')
continuar = str(input('Deseja continuar? [S/N] ')).strip().upper()
while continuar != 'S' and continuar != 'N':
continuar = str(input('Deseja continuar? [S/N] ')).strip().upper()
if continuar == 'N':
break
|
[
"mpatrickaires@gmail.com"
] |
mpatrickaires@gmail.com
|
1114a68d8b2e5c4fd05992b6c8ee4ca498cc92af
|
755e4e6e966433fe887f0f28f14916696b1588d7
|
/code/exceptions/exceptions.py
|
7e62995995ecbc57b12ce62c9ad0de8d73a94b9e
|
[] |
no_license
|
phildue/FingerspellingRecognition
|
f18518a6e2e29b769d131e5b54846f00213f3ff1
|
1b5236142734d7b50f0f4161ecc533b7d10347b8
|
refs/heads/master
| 2021-03-24T10:40:24.507766
| 2017-07-03T09:33:09
| 2017-07-03T09:33:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 167
|
py
|
class NotTrained(Exception):
pass
class NoRoiFound(Exception):
pass
class NoContoursFound(Exception):
pass
class DescriptorFailed(Exception):
pass
|
[
"phild@protonmail.com"
] |
phild@protonmail.com
|
d1684f57fb28491ecde85c741f45fcd4e4659cf8
|
ed9e4027cbd76fbac19598163b9673628cb07eea
|
/anjia/asgi.py
|
372aac7872de8b88fd3e438e294b71fb8dafce32
|
[
"BSD-2-Clause"
] |
permissive
|
ankiwoong/python_kindergarten
|
3a1f9a486a32866b5f37ba4673dfc2135a85eec0
|
43b1e15969f0d35073e2f7fb1286d8c094fd80a8
|
refs/heads/master
| 2022-09-01T08:11:27.374802
| 2020-05-27T08:45:14
| 2020-05-27T08:45:14
| 258,760,014
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 387
|
py
|
"""
ASGI config for anjia project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'anjia.settings')
application = get_asgi_application()
|
[
"ankiwoong@gmail.com"
] |
ankiwoong@gmail.com
|
6a50f6dc840ad5ee463050db663639df9a8ea7dd
|
e8b12e314782bf68347838599c8168e4a8019373
|
/CompareAlternatives.py
|
0d80231eb7ed1c3ac5094ee2f446c2fa5eed2155
|
[] |
no_license
|
HPM573/Lab_ParallelProcessing
|
0ce7e4b615afe9e2e2a281f79684e9067003aa1b
|
f2e6401f4a5dc057a150914653079c0284c92b4b
|
refs/heads/main
| 2023-05-12T06:03:15.275404
| 2023-05-02T13:58:18
| 2023-05-02T13:58:18
| 180,822,018
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,908
|
py
|
import EconEvalInputData as D
import ProbabilisticSupport as Support
import ProbilisticParamClasses as P
from ParallelClasses import ParallelMultiCohort
N_COHORTS = 200 # number of cohorts
if __name__ == '__main__': # this line is needed to avoid errors that occur on Windows computers
# create a multi-cohort to simulate under mono therapy
multiCohortMono = ParallelMultiCohort(
ids=range(N_COHORTS),
pop_size=D.POP_SIZE,
therapy=P.Therapies.MONO
)
multiCohortMono.simulate(sim_length=D.SIM_LENGTH)
# create a multi-cohort to simulate under combi therapy
multiCohortCombo = ParallelMultiCohort(
ids=range(N_COHORTS),
pop_size=D.POP_SIZE,
therapy=P.Therapies.COMBO
)
multiCohortCombo.simulate(sim_length=D.SIM_LENGTH)
# print the estimates for the mean survival time and mean time to AIDS
Support.print_outcomes(multi_cohort_outcomes=multiCohortMono.multiCohortOutcomes,
therapy_name=P.Therapies.MONO)
Support.print_outcomes(multi_cohort_outcomes=multiCohortCombo.multiCohortOutcomes,
therapy_name=P.Therapies.COMBO)
# draw survival curves and histograms
Support.plot_survival_curves_and_histograms(multi_cohort_outcomes_mono=multiCohortMono.multiCohortOutcomes,
multi_cohort_outcomes_combo=multiCohortCombo.multiCohortOutcomes)
# print comparative outcomes
Support.print_comparative_outcomes(multi_cohort_outcomes_mono=multiCohortMono.multiCohortOutcomes,
multi_cohort_outcomes_combo=multiCohortCombo.multiCohortOutcomes)
# report the CEA results
Support.report_CEA_CBA(multi_cohort_outcomes_mono=multiCohortMono.multiCohortOutcomes,
multi_cohort_outcomes_combo=multiCohortCombo.multiCohortOutcomes)
|
[
"reza.yaesoubi@yale.edu"
] |
reza.yaesoubi@yale.edu
|
48b211d3ffc2fe351f125460bfa2de347c5ad89c
|
6fcfb638fa725b6d21083ec54e3609fc1b287d9e
|
/python/frenetic-lang_pyretic/pyretic-master/pyretic/tests/test_mac_learner.py
|
19c0e4482c157a77029346d963681440c374e52d
|
[] |
no_license
|
LiuFang816/SALSTM_py_data
|
6db258e51858aeff14af38898fef715b46980ac1
|
d494b3041069d377d6a7a9c296a14334f2fa5acc
|
refs/heads/master
| 2022-12-25T06:39:52.222097
| 2019-12-12T08:49:07
| 2019-12-12T08:49:07
| 227,546,525
| 10
| 7
| null | 2022-12-19T02:53:01
| 2019-12-12T07:29:39
|
Python
|
UTF-8
|
Python
| false
| false
| 2,387
|
py
|
#!/usr/bin/python
from mininet.net import Mininet
from mininet.node import RemoteController
import os, shlex, subprocess, utils, time
from utils import init
### Module Parameters
def get_controller():
return 'pyretic.modules.mac_learner'
def run_mininet():
# mn = Mininet()
# s1 = mn.addSwitch('s1')
# s2 = mn.addSwitch('s2')
# s3 = mn.addSwitch('s3')
# h1 = mn.addHost('h1')
# h2 = mn.addHost('h2')
# h3 = mn.addHost('h3')
# mn.addLink(s1, s2)
# mn.addLink(s1, s3)
# mn.addLink(s2, s3)
# mn.addLink(h1, s1)
# mn.addLink(h2, s2)
# mn.addLink(h3, s3)
# mn.addController('c0', RemoteController)
# time.sleep(1)
# mn.run(mn.pingAll)
# Alternately, run mininet via the command line. Note that we need to use
# absolute path names because sudo mucks with the env.
mn = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../mininet.sh'))
cmd = '%s --topo cycle,3,4 --mac --test=pingall' % mn
subprocess.call(shlex.split(cmd))
def process_controller_output(oldf, newf):
lines = oldf.readlines()
lines.sort()
keywords = ['TEST', 'ERROR', 'error']
## filter out lines that do not contain one of the keywords
for line in lines:
for kw in keywords:
if line.find(kw) >= 0:
newf.write(line)
def process_mininet_output(oldf, newf):
lines = oldf.readlines()
lines.sort()
keywords = ['TEST', 'ERROR', 'error', 'received']
## filter out lines that do not contain one of the keywords
for line in lines:
for kw in keywords:
if line.find(kw) >= 0:
newf.write(line)
### Tests
test_mac_learner = utils.TestModule( __name__, __file__, get_controller, run_mininet, process_controller_output, process_mininet_output)
def test_mac_learner_i(init):
utils.run_test(test_mac_learner, init.test_dir, init.benchmark_dir, '-m i')
def test_mac_learner_r0(init):
utils.run_test(test_mac_learner, init.test_dir, init.benchmark_dir, '-m r0')
def test_mac_learner_p0(init):
utils.run_test(test_mac_learner, init.test_dir, init.benchmark_dir, '-m p0')
# def test_mac_learner_p0_nx(init):
# utils.run_test(test_mac_learner, init.test_dir, init.benchmark_dir, '-m p0 --nx')
### Executing this file starts the mininet instance for this test.
if __name__ == "__main__":
run_mininet()
|
[
"659338505@qq.com"
] |
659338505@qq.com
|
dc4ee8e84412fbe9e26fa41aea2ba61f0a80d687
|
3b11dc40c7d772fffeb4d8683e5c9791c41f6454
|
/custom/clients/ecobank/ecobank_inventory/models/inventory_account.py
|
54cf03c640a891acbae5ed78bf433efd0cd027f2
|
[] |
no_license
|
Jacky-odoo/Ecobank
|
b986352abac9416ab00008a4abaec2b1f1a1f262
|
5c501bd03a22421f47c76380004bf3d62292f79d
|
refs/heads/main
| 2023-03-09T18:10:45.058530
| 2021-02-25T14:11:12
| 2021-02-25T14:11:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,139
|
py
|
from odoo import api, fields, models
from odoo.exceptions import ValidationError
class InventoryUser(models.Model):
_name = 'inventory.account'
_rec_name = 'name_and_code'
name = fields.Char(string='Name', required=True)
code = fields.Char(string='Code', required=True)
name_and_code = fields.Char(compute='compute_name_code', store=True)
@api.multi
@api.depends('name', 'code')
def compute_name_code(self):
for rec in self:
if rec.code and rec.name:
rec.name_and_code = str(rec.name + " (" + rec.code + ")")
@api.multi
def copy(self, default=None):
raise ValidationError("Sorry you are not allowed to perform this operation. Error Code BYT001")
@api.constrains('name')
def check_name(self):
all_accounts = self.search([])
for account in all_accounts:
if self.name.lower() == account.name.lower() and self.id != account.id:
raise ValidationError("Error! Account Name already exist. BYT005")
_sql_constraints = [
('unique_code', 'unique (code)', "Account Code Already Exist !"),
]
|
[
"francisbnagura@gmail.com"
] |
francisbnagura@gmail.com
|
101e6d98e6ea5327b9632183ef8eb52de0c552e9
|
ff5eea95bb0827cb086c32f4ec1c174b28e5b82d
|
/gammapy/background/tests/test_ring.py
|
047cad9a887193d1551fbd48446204c72bfc2e9e
|
[] |
no_license
|
pflaumenmus/gammapy
|
4830cc5506a4052658f30077fa4e11d8c685ede0
|
7b5caf832c9950c886528ca107203ce9b83c7ebf
|
refs/heads/master
| 2021-01-15T23:27:46.521337
| 2013-09-25T14:23:35
| 2013-09-25T14:23:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,176
|
py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import division
import unittest
import pytest
import numpy as np
from numpy.testing import assert_almost_equal
from astropy.io import fits
from ..maps import Maps
from ..ring import RingBgMaker, outer_ring_radius
try:
import scipy
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
@pytest.mark.skipif('not HAS_SCIPY')
class TestRingBgMaker(unittest.TestCase):
def test_construction(self):
r = RingBgMaker(0.3, 0.5)
r.info()
def test_correlate(self):
image = np.zeros((10, 10))
image[5, 5] = 1
r = RingBgMaker(3, 6, 1)
image = r.correlate(image)
def test_correlate_maps(self):
n_on = np.ones((200, 200))
hdu = fits.ImageHDU(n_on, name='n_on')
maps = Maps([hdu])
maps['exclusion'].data[100:110, 100:110] = 0
r = RingBgMaker(10, 13, 1)
r.correlate_maps(maps)
class TestHelperFuntions(unittest.TestCase):
def test_compute_r_o(self):
actual = outer_ring_radius(1, 0, 1)
assert_almost_equal(actual, 1)
if __name__ == '__main__':
unittest.main()
|
[
"Deil.Christoph@gmail.com"
] |
Deil.Christoph@gmail.com
|
ff31f03d357f8dd02d1fef1e8193bb092e608bea
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02708/s465285057.py
|
b9e8c08a651992f50a225309892a6784c1a1572f
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 257
|
py
|
from itertools import accumulate
N,K = map(int,input().split())
acc = list(accumulate(range(N+1), lambda x,y:x+y))
ans = 0
mod = 10**9+7
for i in range(K, N+1):
r = acc[N] - acc[N-i]
l = acc[i-1]
ans = (ans+r-l+1) % mod
ans += 1
print(ans % mod)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
38eaeac29ebaa70dc88d888b36fe8d2e3156dd76
|
083b3f5b0d23c269c6a9ff1ea413e70fb799a497
|
/Leetcode Challenge/09_September_2020/Python/Week 5/2_First Missing Positive.py
|
5daf39e470ef89206f9440b17c1cc1717578a4f7
|
[] |
no_license
|
HectorIGH/Competitive-Programming
|
b2e02dff140d9ebb06c646f7be0b53ea0afe90c9
|
467058c63e8a7e76805feebe3020bac4d20516a6
|
refs/heads/master
| 2022-12-31T18:32:46.824626
| 2020-10-16T20:38:33
| 2020-10-16T20:38:33
| 279,733,136
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,454
|
py
|
#Given an unsorted integer array, find the smallest missing positive integer.
#
#Example 1:
#
#Input: [1,2,0]
#Output: 3
#Example 2:
#
#Input: [3,4,-1,1]
#Output: 2
#Example 3:
#
#Input: [7,8,9,11,12]
#Output: 1
#Follow up:
#
#Your algorithm should run in O(n) time and uses constant extra space.
#
# Hide Hint #1
#Think about how you would solve the problem in non-constant space. Can you apply that logic to the existing space?
# Hide Hint #2
#We don't care about duplicates or non-positive integers
# Hide Hint #3
#Remember that O(2n) = O(n)
class Solution:
def firstMissingPositive(self, nums: List[int]) -> int:
'''
nums = set((n for n in nums if n > 0))
i = 1
while True:
if i not in nums:
return i
i += 1
'''
if len(nums) == 0:
return 1
n = len(nums);
containsone = False;
for i in range(n):
if nums[i] == 1:
containsone = True
break
if not containsone:
return 1
for i in range(n):
if nums[i] <= 0 or nums[i] > n:
nums[i] = 1
for i in range(n):
val = nums[i]
pos = abs(val) - 1
if nums[pos] > 0:
nums[pos] = -1 * nums[pos];
for i in range(n):
if nums[i] > 0:
return i + 1
return n + 1
|
[
"HectorIGH@users.noreply.github.com"
] |
HectorIGH@users.noreply.github.com
|
21cc1ba23778a7ba76d8b97034ae2a2236266abf
|
864acf7235e330123c3d68ed14cdd8bf8eed800b
|
/crm/accounts/models.py
|
be98a7f2e0926b1e0b0ec5e7fd8a599dfe9597b2
|
[] |
no_license
|
wahid999/djangostuff
|
83f0ae53df5c53d192603d7aaf7ee72f8665c240
|
c102edfb13b8ba39930e44069122c5e545ef00ee
|
refs/heads/main
| 2023-07-04T20:02:32.550831
| 2021-08-13T16:07:20
| 2021-08-13T16:07:20
| 399,344,439
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,705
|
py
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Customer(models.Model):
user = models.OneToOneField(User, null=True, blank=True, on_delete=models.CASCADE)
name = models.CharField(max_length=200, null=True)
phone = models.CharField(max_length=200, null=True)
email = models.CharField(max_length=200, null=True)
profile_pic = models.ImageField(default="IMG_3940.JPG", null=True, blank=True)
date_created = models.DateTimeField(auto_now_add=True, null=True)
def __str__(self):
return self.name
class Tag(models.Model):
name = models.CharField(max_length=200, null=True)
def __str__(self):
return self.name
class Product(models.Model):
CATEGORY = (
('Indoor', 'Indoor'),
('Out Door', 'Out Door'),
)
name = models.CharField(max_length=200, null=True)
price = models.FloatField(null=True)
category = models.CharField(max_length=200, null=True, choices=CATEGORY)
description = models.CharField(max_length=200, null=True, blank=True)
date_created = models.DateTimeField(auto_now_add=True, null=True)
tags = models.ManyToManyField(Tag)
def __str__(self):
return self.name
class Order(models.Model):
STATUS = (
('Pending', 'Pending'),
('Out for delivery', 'Out for delivery'),
('Delivered', 'Delivered'),
)
customer = models.ForeignKey(Customer, null=True, on_delete= models.SET_NULL)
product = models.ForeignKey(Product, null=True, on_delete= models.SET_NULL)
date_created = models.DateTimeField(auto_now_add=True, null=True)
status = models.CharField(max_length=200, null=True, choices=STATUS)
note = models.CharField(max_length=1000, null=True)
def __str__(self):
return self.product.name
|
[
"wahidhussainturi@gmail.com"
] |
wahidhussainturi@gmail.com
|
7e06dd17c6c8f3382921b07d5a29bfd3f67c4817
|
846e642fd9b01d3b500d3efba4790761039eec24
|
/code/smtp.py
|
978b10738307ac891f4680f1e0a033f0d1ac1892
|
[] |
no_license
|
sachinyadav3496/Machine_Learning_Workshop
|
ffea23799c0f8477d9b5cc19b98e7d33a6364390
|
37f433631d1ae4e4db37c4baae6cdc3a7619423e
|
refs/heads/master
| 2020-11-24T11:49:45.936367
| 2020-05-01T08:38:10
| 2020-05-01T08:38:10
| 228,130,385
| 5
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,080
|
py
|
import smtplib
import getpass
def Main():
print("\n\n*************************welcome************************\n")
print("\nWelcom to Email Service \n")
print("Enter your login details - \n")
gmail_user = input("\n\nUserName : ")
gmail_password = getpass.getpass("Password : ")
try:
server = smtplib.SMTP_SSL('smtp.gmail.com', 465)
print("\n\nConnection established ")
server.ehlo()
server.login(gmail_user, gmail_password)
print("\n\nYou have Successfully logged in your account ",gmail_user)
except Exception as e:
print("\n\nError!!! in Connection ")
print(e)
exit(0)
sent_from = gmail_user
i = int(input("\n\nEnter no. of recipients - "))
print("\n\nEnter Recipients Email Addressess - \n")
to = []
for k in range(i):
to.append(input())
print()
subject = input("\n\nPlease Type in Subject of The Mail - ")
print("\n\nType in Your Message (Type in EOF to FINISH)\n\n")
message=[]
while True:
msg = input()
if msg.upper() == 'EOF' :
break
else :
message.append(msg)
print("\n\nMessege is Ready for Delivery\n\n ")
body = '\n'.join(message)
email_text = """From:%s
To:%s
Subject:%s
%s
"""%(sent_from, ", ".join(to), subject, body)
try:
print("\n\nEmail sending is in process - \n ")
server.sendmail(sent_from, to, email_text)
server.close()
except Exception as e:
print('\nSomething went wrong...',e)
else:
print("\nMessage Delivered to - \n")
for i in to:
print(i)
print()
print("\n\n**********************Exiting********************\n\n")
print("\n\nThanks For using Mail Service \n\n")
if __name__ == "__main__":
Main()
|
[
"sachinyadav3496@gmail.com"
] |
sachinyadav3496@gmail.com
|
ce3be2e0574e1ed136c469dfa1ef2ac357ed40cc
|
dfb8d3c365bd2ea27cef9af5cb00b7be1dae978d
|
/train.py
|
b23821e543d1b2a73205ead7e410f2b5b7bac887
|
[
"MIT"
] |
permissive
|
Windstudent/IRM-based-Speech-Enhancement-using-DNN
|
dd0cedfd4150fed69c55d33a744d0a6520fdf2d5
|
27a6f73b5b7fa91a4796e093e6ea3e30508a5c15
|
refs/heads/master
| 2020-07-05T15:34:52.712226
| 2019-05-07T14:36:40
| 2019-05-07T14:36:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,823
|
py
|
import argparse
import json
import os
import numpy as np
import torch
from torch.utils.data import DataLoader
from data.test_dataset import TestDataset
from data.train_dataset import TrainDataset
from trainer.trainer import Trainer
from utils.utils import initialize_config
def main(config, resume):
"""
训练脚本的入口函数
Notes:
1. 加载数据集
2. 初始化模型
3. 设置优化器
4. 选择损失函数
5. 训练脚本 run
Args:
config (dict): 配置项
resume (bool): 是否加载最近一次存储的模型断点
"""
torch.manual_seed(config["seed"])
np.random.seed(config["seed"])
train_dataset = TrainDataset(
mixture_dataset=config["train_dataset"]["mixture"],
mask_dataset=config["train_dataset"]["clean"],
limit=config["train_dataset"]["limit"],
offset=config["train_dataset"]["offset"],
)
train_data_loader = DataLoader(
dataset=train_dataset,
batch_size=config["train_dataset"]["batch_size"],
num_workers=config["train_dataset"]["num_workers"],
shuffle=config["train_dataset"]["shuffle"]
)
valid_dataset = TestDataset(
mixture_dataset=config["valid_dataset"]["mixture"],
clean_dataset=config["valid_dataset"]["clean"],
limit=config["valid_dataset"]["limit"],
offset=config["valid_dataset"]["offset"],
)
valid_data_loader = DataLoader(
dataset=valid_dataset
)
model = initialize_config(config["model"])
optimizer = torch.optim.Adam(
params=model.parameters(),
lr=config["optimizer"]["lr"]
)
loss_function = initialize_config(config["loss_function"])
trainer = Trainer(
config=config,
resume=resume,
model=model,
loss_function=loss_function,
optim=optimizer,
train_dl=train_data_loader,
validation_dl=valid_data_loader,
)
trainer.train()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='IRM Estimation using DNN in Speech Enhancement')
parser.add_argument("-C", "--config", required=True, type=str, help="训练配置文件(*.json)")
parser.add_argument('-D', '--device', default=None, type=str, help="本次实验使用的 GPU 索引,e.g. '1,2,3'")
parser.add_argument("-R", "--resume", action="store_true", help="是否从最近的一个断点处继续训练")
args = parser.parse_args()
if args.device:
os.environ["CUDA_VISIBLE_DEVICES"] = args.device
# load config file
config = json.load(open(args.config))
config["train_config_path"] = args.config
main(config, resume=args.resume)
|
[
"haoxiangsnr@gmail.com"
] |
haoxiangsnr@gmail.com
|
93e0d1af53bc2b9efd06b47d2a1c4276bdb0b0bd
|
5390d79dad71ad0d9ff9d0777435dcaf4aad16b3
|
/chapter_06/favorite_number.py
|
124b9763eeac8593df0e93e0c0e845aa9bc3e5dd
|
[] |
no_license
|
JasperMi/python_learning
|
19770d79cce900d968cec76dac11e45a3df9c34c
|
8111d0d12e4608484864dddb597522c6c60b54e8
|
refs/heads/master
| 2020-11-26T08:57:02.983869
| 2020-03-11T10:14:55
| 2020-03-11T10:14:55
| 218,935,548
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 497
|
py
|
favorite_numbers = {
'bob': 2,
'sarah': 6,
'martin': 8,
'katy': 9,
'tom': 10
}
print('bob' + "'s favorite number is " + str(favorite_numbers['bob']) + ".")
print('sarah' + "'s favorite number is " + str(favorite_numbers['sarah']) + ".")
print('martin' + "'s favorite number is " + str(favorite_numbers['martin']) + ".")
print('katy' + "'s favorite number is " + str(favorite_numbers['katy']) + ".")
print('tom' + "'s favorite number is " + str(favorite_numbers['tom']) + ".")
|
[
"darmi19@163.com"
] |
darmi19@163.com
|
b1f2bc27194e8f706625493989d95c5335783f9f
|
fc58366ed416de97380df7040453c9990deb7faa
|
/daoliagent/services/arp.py
|
7d9cf1622fdcd08505553150ef2cdef052d75232
|
[
"Apache-2.0"
] |
permissive
|
foruy/openflow-multiopenstack
|
eb51e37b2892074234ebdd5b501b24aa1f72fb86
|
74140b041ac25ed83898ff3998e8dcbed35572bb
|
refs/heads/master
| 2016-09-13T08:24:09.713883
| 2016-05-19T01:16:58
| 2016-05-19T01:16:58
| 58,977,485
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,005
|
py
|
from oslo.config import cfg
from ryu.lib.packet import arp
from ryu.lib.packet import ethernet
from ryu.lib.packet import packet
from ryu.lib import addrconv
from ryu.ofproto import ether
from daoliagent.services.base import PacketBase
from daoliagent.lib import SMAX
from daoliagent.openstack.common import log as logging
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class PacketARP(PacketBase):
priority = 5
def _arp(self, msg, dp, in_port, pkt_ether, pkt_arp, address):
ofp, ofp_parser, ofp_set, ofp_out = self.ofp_get(dp)
actions = [ofp_parser.OFPActionSetField(eth_src=address),
ofp_parser.OFPActionOutput(ofp.OFPP_IN_PORT)]
match = ofp_parser.OFPMatch(
in_port=in_port, eth_type=ether.ETH_TYPE_ARP,
arp_spa=pkt_arp.src_ip, arp_tpa=pkt_arp.dst_ip)
LOG.debug("arp response %(src_mac)s-%(src_ip)s -> %(dst_mac)s-%(dst_ip)s",
{'src_mac': address, 'src_ip': pkt_arp.dst_ip,
'dst_mac': pkt_arp.src_mac, 'dst_ip': pkt_arp.src_ip})
self.add_flow(dp, match, actions)
self.packet_out(msg, dp, actions)
def _redirect(self, msg, dp, in_port, pkt_ether, pkt_arp, output):
ofp, ofp_parser, ofp_set, ofp_out = self.ofp_get(dp)
actions = [ofp_parser.OFPActionOutput(output)]
match = ofp_parser.OFPMatch(
in_port=in_port, eth_type=ether.ETH_TYPE_ARP,
arp_spa=pkt_arp.src_ip, arp_tpa=pkt_arp.dst_ip)
self.add_flow(dp, match, actions)
self.packet_out(msg, dp, actions)
def run(self, msg, pkt_ether, pkt_arp, gateway, **kwargs):
dp = msg.datapath
in_port = msg.match['in_port']
ofp, ofp_parser, ofp_set, ofp_out = self.ofp_get(dp)
src_mac = pkt_arp.src_mac
dst_ip = pkt_arp.dst_ip
LOG.debug("arp request %(src_mac)s-%(src_ip)s -> %(dst_mac)s-%(dst_ip)s",
{'src_mac': src_mac, 'src_ip': pkt_arp.src_ip,
'dst_mac': pkt_arp.dst_mac, 'dst_ip': dst_ip})
if gateway.int_dev != gateway.ext_dev:
int_port = self.port_get(dp, devname=gateway.int_dev)
tap_port = self.port_get(dp, devname=gateway.vint_dev)
if not int_port or not tap_port:
return True
if in_port == int_port.port_no:
if pkt_arp.dst_ip == gateway['int_ip']:
self._redirect(msg, dp, in_port, pkt_ether, pkt_arp, tap_port.port_no)
return True
if in_port == tap_port.port_no:
if pkt_arp.src_ip == gateway['int_ip']:
self._redirect(msg, dp, in_port, pkt_ether, pkt_arp, int_port.port_no)
return True
port = self.port_get(dp, devname=gateway['ext_dev'])
if not port:
return True
if in_port == port.port_no:
if pkt_arp.dst_ip == gateway['ext_ip']:
self._redirect(msg, dp, in_port, pkt_ether, pkt_arp, ofp.OFPP_LOCAL)
return True
if in_port == ofp.OFPP_LOCAL:
if pkt_arp.src_ip == gateway['ext_ip']:
self._redirect(msg, dp, in_port, pkt_ether, pkt_arp, port.port_no)
return True
num_ip = addrconv.ipv4._addr(dst_ip).value
if pkt_arp.opcode != arp.ARP_REQUEST:
LOG.debug("unknown arp op %s", pkt_arp.opcode)
elif (num_ip & 0x0000FFFF == SMAX - 1):
#br_port = self.port_get(dp, devname=gateway['vext_dev'])
#self._arp(dp, in_port, pkt_ether, pkt_arp, br_port.hw_addr)
self._arp(msg, dp, in_port, pkt_ether, pkt_arp, gateway['vint_mac'])
else:
servers = self.db.server_get_by_mac(src_mac, dst_ip, False)
if servers['src'] and servers['dst']:
self._arp(msg, dp, in_port, pkt_ether, pkt_arp, servers['dst'].mac_address)
else:
self._arp(msg, dp, in_port, pkt_ether, pkt_arp, gateway['vint_mac'])
|
[
"wenxiang.wang1204@gmail.com"
] |
wenxiang.wang1204@gmail.com
|
0b2c7b6c78f2f20e685b99106e28b2dcfabe7a03
|
9d852841463c64f75da8a8579c32cea856d2073d
|
/leetcode/validate_binary_search_tree.py
|
4931cdb9a7da506dea78bd8a759a89b592284296
|
[] |
no_license
|
LarsIndus/algorithms-DS
|
2d94a5ba3e17de7c8d9e7ac4ace8eb70bb2a7331
|
32a64a4522f8474ab63421b06e945f6e44a441e1
|
refs/heads/master
| 2023-04-26T00:13:06.026785
| 2021-05-20T18:55:12
| 2021-05-20T18:55:12
| 243,239,680
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,476
|
py
|
"""
Leetcode Problem 98: Validate Binary Search Tree (Medium)
Given the root of a binary tree, determine if it is a valid binary search tree (BST).
A valid BST is defined as follows:
- The left subtree of a node contains only nodes with keys less than the node's key.
- The right subtree of a node contains only nodes with keys greater than the node's key.
- Both the left and right subtrees must also be binary search trees.
Complexity for this solution:
O(n) time and space
Source: https://www.youtube.com/watch?v=ofuXorE-JKE
"""
# Node implementation --------------------------------------------------------
class newNode:
# Construct to create a newNode
def __init__(self, data):
self.data = data
self.left = None
self.right = None
# Solution -------------------------------------------------------------------
def is_valid_BST(root):
return helper(root, float("-inf"), float("inf"))
def helper(root, min_value, max_value):
if root is None:
return True
if root.data < min_value or root.data > max_value:
return False
valid_left = helper(root.left, min_value, root.data)
valid_right = helper(root.right, root.data, max_value)
return valid_left and valid_right
# Testing --------------------------------------------------------------------
def main():
# Test 1: Empty tree
tree = None
if is_valid_BST(tree):
print("Passed test 1 (emtpy tree).")
else:
print("Test 1 (empty tree) failed!")
# Test 2: Only root node
tree = newNode(1)
if is_valid_BST(tree):
print("Passed test 2 (only root node).")
else:
print("Test 2 (only root node) failed!")
# Test 3: Valid BST
tree = newNode(2)
tree.left = newNode(1)
tree.right = newNode(3)
tree.left.left = newNode(0)
tree.right.left = newNode(2)
tree.right.right = newNode(9)
if is_valid_BST(tree):
print("Passed test 3 (valid tree).")
else:
print("Test 3 (valid tree) failed!")
# Test 4: Non-valid BST
tree = newNode(2)
tree.left = newNode(1)
tree.right = newNode(3)
tree.left.left = newNode(0)
tree.right.left = newNode(1)
tree.right.right = newNode(9)
if not is_valid_BST(tree):
print("Passed test 4 (non-valid tree).")
else:
print("Test 4 (non-valid tree) failed!")
if __name__ == '__main__':
main()
|
[
"test@test.com"
] |
test@test.com
|
c394f35d81a2eb6ac4c455dd44b7add384a8b18b
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/84/usersdata/203/57032/submittedfiles/lista1.py
|
4fe9d85ddd31f1891fa346e0bba2e39623b993ce
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 360
|
py
|
# -*- coding: utf-8 -*-
n=int(input('tamanho da lista: '))
l=[]
somai=0
qi=0
somap=0
qp=0
for i in range (1,n+1,1):
a=int(input('elemento da lista: '))
l.append(a)
for i in range (1,len(lista),1):
if l(i)%2==0:
somap=somap+l(i)
qp=qp+1
else:
somai=somai+l(i)
qi=qi+1
print(somai)
print(somap)
print(qi)
print(qp)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
9166a5025b83503317fc99cf5620f56acadc063c
|
35fb652b0b20e7352cacdc078e23464fad40ccf3
|
/web/controllers/food/food.py
|
ed79027f6fa2230bee2cb9150725d18254385a43
|
[] |
no_license
|
xiaoheng14/flask_wx_order
|
52f8fe01a473855c22a43c2651b102c291dbde04
|
be3314fdb0266eecf4ca7f5a55b2ea24078857c9
|
refs/heads/master
| 2020-08-23T03:59:19.006943
| 2018-11-19T12:21:25
| 2018-11-19T12:21:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 615
|
py
|
# _*_ coding: utf-8 _*_
"""
__author__ = 'lawtech'
__date__ = '2018/10/27 3:14 PM'
"""
from flask import Blueprint
from common.libs.helper import ops_render
route_food = Blueprint('food_page', __name__)
@route_food.route("/index")
def index():
return ops_render("food/index.html")
@route_food.route("/info")
def info():
return ops_render("food/info.html")
@route_food.route("/set")
def set():
return ops_render("food/set.html")
@route_food.route("/cat")
def cat():
return ops_render("food/cat.html")
@route_food.route("/cat-set")
def catSet():
return ops_render("food/cat_set.html")
|
[
"584563542@qq.com"
] |
584563542@qq.com
|
52fa7f6ab35d271fd30dbc1f96ddcee4a2df32b5
|
e74c2e5b85b9af58a6f9b4b6eea160fb66f6bb08
|
/aula11.py
|
a14bb8b989d099d4f7350a32cb0c4b75eb76c49b
|
[] |
no_license
|
Nokutomi/AulaPython
|
670cc27986aa3a12e528f5d1602929a524b632fc
|
1e97e4821b12a0ad0a4438d682c1e4d61a10f61d
|
refs/heads/master
| 2022-11-15T08:38:47.401055
| 2020-07-08T02:49:54
| 2020-07-08T02:49:54
| 275,640,239
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 657
|
py
|
lista = [1,10]
arquivo = open('teste.txt', 'r')
try:
texto = arquivo.read()
divisao = 10 / 0
# numero = lista[3]
# x = a
# print('Fechando arquivo')
# arquivo.close()
except ZeroDivisionError:
print('Nao e possivel realizar uma divisao por zero')
except ArithmeticError:
print('Houve um erro ao realizar uma operacao aritmetica')
except IndexError:
print('Erro ao acessar um indice invalido da lista')
except Exception as ex:
print('Erro desconhecido. Erro: {}'.format(ex))
else:
print('Executa quando nao ocorre excecao')
finally:
print('Sempre executa')
print('Fechando arquivo')
arquivo.close()
|
[
"you@example.com"
] |
you@example.com
|
a02a2341ab021509e596e6ab801c9b00af24f937
|
988385035443e5d46d29d96b15179509fd1c782e
|
/addToArrayForm.py
|
ea09a01733d9a2d3d3b61c25a1837f7b7368545e
|
[] |
no_license
|
mwoitek/leetcode-python3
|
c120ee1b1eb8e17f3a301026f25c643be9852953
|
eb9989d3768eba82275a57243c99796e74ccdd48
|
refs/heads/master
| 2022-12-28T21:19:51.215210
| 2020-10-18T06:17:27
| 2020-10-18T06:17:27
| 301,295,054
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 257
|
py
|
class Solution:
def addToArrayForm(self, A: List[int], K: int) -> List[int]:
A_str = "".join([str(num) for num in A])
A_int = int(A_str)
ans = A_int + K
ans_list = [int(char) for char in str(ans)]
return ans_list
|
[
"woitek@usp.br"
] |
woitek@usp.br
|
7091c8bb4d092cb28c4a6f0d1fe1a329abcb2805
|
40b20d7e5f4381a64bd264a562c4ae6d6721b01c
|
/14-it-generator/sentence_gen.py
|
a17c48f6811da8c5180ec412bacbf4618080cabf
|
[
"MIT"
] |
permissive
|
KyrieCham/example-code
|
7d2f0d5901bf80b49dd6b1e9ae1c37c9cb6df7f5
|
3dd11744d1c0b1f00860e985ee2a0761e73ef7e7
|
refs/heads/master
| 2020-04-18T00:56:06.384756
| 2019-01-22T19:27:43
| 2019-01-22T19:27:43
| 167,098,245
| 1
| 0
|
MIT
| 2019-01-23T01:52:48
| 2019-01-23T01:52:47
| null |
UTF-8
|
Python
| false
| false
| 446
|
py
|
"""
Sentence: iterate over words using a generator function
"""
import re
import reprlib
RE_WORD = re.compile('\w+')
class Sentence:
def __init__(self, text):
self.text = text
self.words = RE_WORD.findall(text)
def __repr__(self):
return 'Sentence(%s)' % reprlib.repr(self.text)
def __iter__(self):
for word in self.words: # <1>
yield word # <2>
return # <3>
# done! <4>
|
[
"luciano@ramalho.org"
] |
luciano@ramalho.org
|
c14cca36fd70f17c4adf7cf1050a549b485a5112
|
dd44e145ac547209f5f209bc9b1f09189bb8b5c7
|
/Python-OOP-July2021/04.Classes_and_objects-E/05.To-do-list/project/section.py
|
391f64c88e1e7f0db3acc9df9b8d20c2de06a156
|
[] |
no_license
|
todorovventsi/Software-Engineering
|
e3c1be8f0f72c85619518bb914d2a4dbaac270f8
|
64ffa6c80b190e7c6f340aaf219986f769f175ab
|
refs/heads/master
| 2023-07-09T05:35:14.522958
| 2021-08-15T14:35:55
| 2021-08-15T14:35:55
| 336,056,643
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,049
|
py
|
class Section:
def __init__(self, name):
self.name = name
self.tasks = []
def add_task(self, new_task):
if new_task not in self.tasks:
self.tasks.append(new_task)
return f"Task {new_task.details()} is added to the section"
return f"Task is already in the section {self.name}"
def complete_task(self, task_name):
for task in self.tasks:
if task.name == task_name:
task.completed = True
return f"Completed task {task.name}"
return f"Could not find task with the name {task_name}"
def clean_section(self):
completed_tasks = 0
for task in self.tasks:
if task.completed:
completed_tasks += 1
self.tasks.remove(task)
return f"Cleared {completed_tasks} tasks."
def view_section(self):
first_row = f"Section {self.name}:\n"
next_rows = [f"{task.details()}\n" for task in self.tasks]
return f"{first_row}{''.join(next_rows)}"
|
[
"todorov.ventsi@gmail.com"
] |
todorov.ventsi@gmail.com
|
9999bf5d93fa20451f61973a2e0ae14307aded8d
|
4b1cf07275a8f2abf30943b975d443485ef897ff
|
/data_generator.py
|
3805e30c71100e78de5cec92ba0c561a77bb426d
|
[
"MIT"
] |
permissive
|
gipsyblues/edge_ml_emotion_recognition
|
a0e1e0acc98d11f710542218b2603f72a8a93a4b
|
028e9a9264e7df5c48a047677b48f0c15e059e6c
|
refs/heads/master
| 2023-06-27T02:53:18.707806
| 2021-07-28T06:48:30
| 2021-07-28T06:48:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,108
|
py
|
import numpy as np
import cv2
import os
import imgaug as ia
import logging
from imgaug import augmenters as iaa
from imgaug.augmentables.segmaps import SegmentationMapsOnImage
from imgaug.augmentables.bbs import BoundingBox, BoundingBoxesOnImage
def _create_augment_pipeline():
sometimes = lambda aug: iaa.Sometimes(0.1, aug)
aug_pipe = iaa.Sequential(
[
iaa.Fliplr(0.5),
#iaa.Flipud(0.2),
iaa.Affine(translate_percent={"x": (-0.1, 0.1), "y": (-0.1, 0.1)}),
iaa.OneOf([iaa.Affine(scale=(0.8, 1.2)),
iaa.Affine(rotate=(-10, 10)),
iaa.Affine(shear=(-10, 10))]),
sometimes(iaa.OneOf([
iaa.GaussianBlur((0, 3.0)),
iaa.AverageBlur(k=(2, 7)),
iaa.MedianBlur(k=(3, 11)),
])),
sometimes(iaa.Sharpen(alpha=(0, 1.0), lightness=(0.75, 1.5))),
sometimes(iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05 * 255), per_channel=0.5)),
sometimes(iaa.OneOf([
iaa.Dropout((0.01, 0.1), per_channel=0.5),
iaa.CoarseDropout((0.03, 0.15), size_percent=(0.02, 0.05), per_channel=0.2),
])),
sometimes(iaa.Add((-10, 10), per_channel=0.5)),
sometimes(iaa.Multiply((0.5, 1.5), per_channel=0.5)),
sometimes(iaa.ContrastNormalization((0.5, 2.0), per_channel=0.5))
],
random_order=True
)
return aug_pipe
def process_image_classification(image, desired_w = None, desired_h = None, aug_pipe = None):
# resize the image to standard size
if (desired_w and desired_h) or aug_pipe:
if (desired_w and desired_h):
# Rescale image
image = ia.imresize_single_image(image, (desired_w, desired_h))
if aug_pipe:
image = aug_pipe(image=image)
return image
class DataGenerator():
def __init__(self, X_train, y_train, batch_size=32, img_size = 48, prefix='appa-real/imgs/', shuffle=True, augment=None):
self.X_train = X_train
self.y_train = y_train
self.batch_size = batch_size
self.img_size = img_size
self.prefix = prefix
self.class_num = y_train.shape[1]
self.shuffle = shuffle
self.sample_num = len(X_train)
self.augment = augment
if self.augment:
logging.info("Using augmentation for {self.prefix}")
self.aug_pipe = _create_augment_pipeline()
def __call__(self):
while True:
indexes = self.__get_exploration_order()
itr_num = int(len(indexes) // (self.batch_size * 2))
for i in range(itr_num):
batch_ids = indexes[i * self.batch_size * 2:(i + 1) * self.batch_size * 2]
X, y = self.__data_generation(batch_ids)
yield X, y
def __get_exploration_order(self):
indexes = np.arange(self.sample_num)
if self.shuffle:
np.random.shuffle(indexes)
return indexes
def __data_generation(self, batch_ids):
X = np.zeros(shape=(self.batch_size, self.img_size, self.img_size, 3))
y = np.zeros(shape=(self.batch_size, self.class_num))
for i in range(self.batch_size):
img = cv2.imread(self.prefix + self.X_train[batch_ids[i]], 1)
try:
if self.augment:
img = process_image_classification(img, self.img_size, self.img_size, self.aug_pipe)
except Exception as e:
print(self.prefix + self.X_train[batch_ids[i]], e)
img = img.astype(np.float32)
img /= 255.
img -= 0.5
img *= 2.
img = img[:, :, ::-1]
X[i, ::] = img
y[i, :] = self.y_train[batch_ids[i]]
return np.array(X), y
|
[
"dmitrywat@gmail.com"
] |
dmitrywat@gmail.com
|
ca9547928ab7a957dabd169f16fc201dc6d06efe
|
b83ff584bfcd9fce7a337ba1253287fc9afd03c7
|
/cmdline_fluency_countdown.py
|
c6564c0a2aa5dcf88e15805c147edba2570aebac
|
[] |
no_license
|
houstonhunt/fluencycountdown
|
6166eaf625f6e348213dcd5be8045ee218159900
|
d555b83972e05d09e1caafca61ea465c4ca3770c
|
refs/heads/master
| 2021-01-23T23:12:17.392090
| 2015-05-23T18:48:24
| 2015-05-23T18:48:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,720
|
py
|
#!/usr/bin/python
# cmdline_fluency_countdown.py
import pickle # used to save user progress (currently supporting 1 primary user)
import ConfigParser, os # used to parse language file
def init():
state = 0
try:
pickle.load(open("save.p", "rb"))
print "SUCCESS: loaded save file!"
state = 1
except:
config = ConfigParser.ConfigParser()
config.read('lang.cfg')
print "WELCOME: no save file found!"
print "Type a [language] you want to learn (example: English),"
print " or [list] then press [ENTER]"
selected_lang = raw_input()
# joke
if selected_lang == "English":
print "You already know English!"
quit()
elif selected_lang == "list":
list(selected_lang, config)
elif selected_language ==
def list(what, cp):
if what == "list":
print "list what? [all] [easy] [medium] [hard] [other] [about]"
selected_lang = raw_input()
if selected_lang == "all":
list1(cp)
list2(cp)
list3(cp)
listo(cp)
elif selected_lang == "easy":
list1(cp)
elif selected_lang == "medium":
list2(cp)
elif selected_lang == "hard":
list3(cp)
elif selected_lang == "other":
listo(cp)
elif selected_lang == "about":
print "Coded by Houston Hunt"
print "Times to mastering a language for English speakers"
print "is given by " + str(cp.get('Reference', 'reference'))
def list1(cp):
print cp.get('Languages', 'desc1')
print str(cp.get('Languages', 'cat1'))
def list2(cp):
print str(cp.get('Languages', 'desc2'))
print str(cp.get('Languages', 'cat2'))
def list3(cp):
print str(cp.get('Languages', 'desc3'))
print str(cp.get('Languages', 'cat3'))
def listo(cp):
print str(cp.get('Languages', 'desco'))
print str(cp.get('Languages', 'other'))
init()
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
09db4be45d5d63793dcd85353daabc9d84d3ac5d
|
08ca7028e0488c420fff8c831e9d4fd3e32ee292
|
/models/wideresnet.py
|
59ba6496518eab9bc92f85bceb9a2459910e4762
|
[] |
no_license
|
yogeshbalaji/Adversarial-training
|
0ee53fdbef2742788cbbc73ca592738347076fe2
|
3593c836f39c1313545fcc71e5ba8afa6f427326
|
refs/heads/master
| 2020-07-15T03:00:26.425582
| 2019-09-04T19:59:51
| 2019-09-04T19:59:51
| 205,464,494
| 12
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,517
|
py
|
from collections import OrderedDict
import torch
from torch import nn
import torch.nn.functional as F
from utils import data_normalize
def init_weight(*args):
return nn.Parameter(nn.init.kaiming_normal_(torch.zeros(*args), mode='fan_out', nonlinearity='relu'))
class Block(nn.Module):
"""
Pre-activated ResNet block.
"""
def __init__(self, width):
super().__init__()
self.bn0 = nn.BatchNorm2d(width, affine=False)
self.register_parameter('conv0', init_weight(width, width, 3, 3))
self.bn1 = nn.BatchNorm2d(width, affine=False)
self.register_parameter('conv1', init_weight(width, width, 3, 3))
def forward(self, x):
h = F.conv2d(F.relu(self.bn0(x)), self.conv0, padding=1)
h = F.conv2d(F.relu(self.bn1(h)), self.conv1, padding=1)
return x + h
class DownsampleBlock(nn.Module):
"""
Downsample block.
Does F.avg_pool2d + torch.cat instead of strided conv.
"""
def __init__(self, width):
super().__init__()
self.bn0 = nn.BatchNorm2d(width // 2, affine=False)
self.register_parameter('conv0', init_weight(width, width // 2, 3, 3))
self.bn1 = nn.BatchNorm2d(width, affine=False)
self.register_parameter('conv1', init_weight(width, width, 3, 3))
def forward(self, x):
h = F.conv2d(F.relu(self.bn0(x)), self.conv0, padding=1, stride=2)
h = F.conv2d(F.relu(self.bn1(h)), self.conv1, padding=1)
x_d = F.avg_pool2d(x, kernel_size=3, padding=1, stride=2)
x_d = torch.cat([x_d, torch.zeros_like(x_d)], dim=1)
return x_d + h
class WRN(nn.Module):
"""
Implementation of modified Wide Residual Network.
Differences with pre-activated ResNet and Wide ResNet:
* BatchNorm has no affine weight and bias parameters
* First layer has 16 * width channels
* Last fc layer is removed in favor of 1x1 conv + F.avg_pool2d
* Downsample is done by F.avg_pool2d + torch.cat instead of strided conv
First and last convolutional layers are kept in float32.
"""
def __init__(self, depth, width, num_classes):
super().__init__()
widths = [int(v * width) for v in (16, 32, 64)]
n = (depth - 2) // 6
self.register_parameter('conv0', init_weight(widths[0], 3, 3, 3))
self.group0 = self._make_block(widths[0], n)
self.group1 = self._make_block(widths[1], n, downsample=True)
self.group2 = self._make_block(widths[2], n, downsample=True)
self.bn = nn.BatchNorm2d(widths[2], affine=False)
self.register_parameter('conv_last', init_weight(num_classes, widths[2], 1, 1))
self.bn_last = nn.BatchNorm2d(num_classes)
self.mean = [125.3 / 255.0, 123.0 / 255.0, 113.9 / 255.0]
self.std = [63.0 / 255.0, 62.1 / 255.0, 66.7 / 255.0]
def _make_block(self, width, n, downsample=False):
def select_block(j):
if downsample and j == 0:
return DownsampleBlock(width)
return Block(width)
return nn.Sequential(OrderedDict(('block%d' % i, select_block(i)) for i in range(n)))
def forward(self, x):
x = data_normalize(x, self.mean, self.std)
h = F.conv2d(x, self.conv0, padding=1)
h = self.group0(h)
h = self.group1(h)
h = self.group2(h)
h = F.relu(self.bn(h))
h = F.conv2d(h, self.conv_last)
h = self.bn_last(h)
return F.avg_pool2d(h, kernel_size=h.shape[-2:]).view(h.shape[0], -1)
|
[
"yogesh22@ramawks95.umiacs.umd.edu"
] |
yogesh22@ramawks95.umiacs.umd.edu
|
e34145873aede1b65f5e55265e1505cc6bde3391
|
387cf5f72ed6679a4d9e04bddd16998a190c4caf
|
/problems/programmers/lv3/pgs-67258-sweep-slow.py
|
6a3ef6ae150570c9680bfdc5e53635a2e6635517
|
[] |
no_license
|
CodyBuilder-dev/Algorithm-Coding-Test
|
db4ee1e7565fbcef3140192225167eff42ad5c02
|
cca5c4ba8bc31679ab00aceccfd8d9d39c232f72
|
refs/heads/master
| 2021-07-24T00:34:41.888289
| 2021-07-21T14:29:00
| 2021-07-21T14:29:00
| 219,123,221
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,347
|
py
|
"""
제목 :보석 쇼핑
아이디어 :
"""
def solution(gems):
s = set(gems)
hash = {}
#interval_list = [] # 리스트에 넣고 저장
best_answer = [123456,456789]
for i,gem in enumerate(gems):
if gem not in hash:
hash[gem] = 0
hash[gem] = i
if len(hash) ==len(s):
temp_answer = [min(hash.values()) + 1, max(hash.values()) + 1]
if temp_answer[1] - temp_answer[0] < best_answer[1] - best_answer[0]:
best_answer = temp_answer
elif temp_answer[1] - temp_answer[0] == best_answer[1] - best_answer[0] \
and temp_answer[0] < best_answer[0]:
best_answer = temp_answer
return best_answer
print(solution(["DIA", "RUBY", "RUBY", "DIA", "DIA", "EMERALD", "SAPPHIRE", "DIA"]))
print(solution(["AA", "AB", "AC", "AA", "AC"]))
print(solution(["XYZ", "XYZ", "XYZ"]))
print(solution(["ZZZ", "YYY", "NNNN", "YYY", "BBB"]))
print(solution(["DIA", "EM", "EM", "RUB", "DIA"]))
print(solution(["A", "A", "B"])) #5 #10
print(solution(["AD","AA", "AB", "AC", "AA", "AC", "AD", "AB"]))
print(solution(["AD","AA", "AB", "AC", "AA", "AC", "AD", "AB", "AZ","AB","AC","AA"]))
print(solution(["AD","AA", "AB", "AC", "AA", "AC", "AC", "AD", "AB","AZ","AB","AD","AC","AA","AB","AZ","AA"]))
|
[
"imspecial1@u.sogang.ac.kr"
] |
imspecial1@u.sogang.ac.kr
|
c64f6276a76c1f9c5a452595cbcd25de501fd7f6
|
e65a448da4f82d6e7c95cfadc5e8dfd06ed05c62
|
/cinder/cinder/api/middleware/auth.py
|
cf898c9b07d780e57e877272a930772dd33360d5
|
[
"Apache-2.0"
] |
permissive
|
bopopescu/devstack
|
7a9d11bcc37884f3686e7178ebc25c178a6da283
|
6b73b164af7e5895501f1ca5dafebbba90510846
|
refs/heads/master
| 2022-11-19T19:58:43.536574
| 2015-01-29T09:00:59
| 2015-01-29T09:00:59
| 282,101,378
| 0
| 0
| null | 2020-07-24T02:17:48
| 2020-07-24T02:17:47
| null |
UTF-8
|
Python
| false
| false
| 6,014
|
py
|
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Common Auth Middleware.
"""
import os
from oslo.config import cfg
from oslo.serialization import jsonutils
import webob.dec
import webob.exc
from cinder.api.openstack import wsgi
from cinder import context
from cinder.i18n import _
from cinder.openstack.common import log as logging
from cinder.openstack.common.middleware import request_id
from cinder import wsgi as base_wsgi
use_forwarded_for_opt = cfg.BoolOpt(
'use_forwarded_for',
default=False,
help='Treat X-Forwarded-For as the canonical remote address. '
'Only enable this if you have a sanitizing proxy.')
CONF = cfg.CONF
CONF.register_opt(use_forwarded_for_opt)
LOG = logging.getLogger(__name__)
def pipeline_factory(loader, global_conf, **local_conf):
"""A paste pipeline replica that keys off of auth_strategy."""
pipeline = local_conf[CONF.auth_strategy]
if not CONF.api_rate_limit:
limit_name = CONF.auth_strategy + '_nolimit'
pipeline = local_conf.get(limit_name, pipeline)
pipeline = pipeline.split()
filters = [loader.get_filter(n) for n in pipeline[:-1]]
app = loader.get_app(pipeline[-1])
filters.reverse()
for filter in filters:
app = filter(app)
return app
class InjectContext(base_wsgi.Middleware):
"""Add a 'cinder.context' to WSGI environ."""
def __init__(self, context, *args, **kwargs):
self.context = context
super(InjectContext, self).__init__(*args, **kwargs)
@webob.dec.wsgify(RequestClass=base_wsgi.Request)
def __call__(self, req):
req.environ['cinder.context'] = self.context
return self.application
class CinderKeystoneContext(base_wsgi.Middleware):
"""Make a request context from keystone headers."""
@webob.dec.wsgify(RequestClass=base_wsgi.Request)
def __call__(self, req):
user_id = req.headers.get('X_USER')
user_id = req.headers.get('X_USER_ID', user_id)
if user_id is None:
LOG.debug("Neither X_USER_ID nor X_USER found in request")
return webob.exc.HTTPUnauthorized()
# get the roles
roles = [r.strip() for r in req.headers.get('X_ROLE', '').split(',')]
if 'X_TENANT_ID' in req.headers:
# This is the new header since Keystone went to ID/Name
project_id = req.headers['X_TENANT_ID']
else:
# This is for legacy compatibility
project_id = req.headers['X_TENANT']
project_name = req.headers.get('X_TENANT_NAME')
req_id = req.environ.get(request_id.ENV_REQUEST_ID)
# Get the auth token
auth_token = req.headers.get('X_AUTH_TOKEN',
req.headers.get('X_STORAGE_TOKEN'))
# Build a context, including the auth_token...
remote_address = req.remote_addr
service_catalog = None
if req.headers.get('X_SERVICE_CATALOG') is not None:
try:
catalog_header = req.headers.get('X_SERVICE_CATALOG')
service_catalog = jsonutils.loads(catalog_header)
except ValueError:
raise webob.exc.HTTPInternalServerError(
explanation=_('Invalid service catalog json.'))
if CONF.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For', remote_address)
ctx = context.RequestContext(user_id,
project_id,
project_name=project_name,
roles=roles,
auth_token=auth_token,
remote_address=remote_address,
service_catalog=service_catalog,
request_id=req_id)
req.environ['cinder.context'] = ctx
return self.application
class NoAuthMiddleware(base_wsgi.Middleware):
"""Return a fake token if one isn't specified."""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
if 'X-Auth-Token' not in req.headers:
user_id = req.headers.get('X-Auth-User', 'admin')
project_id = req.headers.get('X-Auth-Project-Id', 'admin')
os_url = os.path.join(req.url, project_id)
res = webob.Response()
# NOTE(vish): This is expecting and returning Auth(1.1), whereas
# keystone uses 2.0 auth. We should probably allow
# 2.0 auth here as well.
res.headers['X-Auth-Token'] = '%s:%s' % (user_id, project_id)
res.headers['X-Server-Management-Url'] = os_url
res.content_type = 'text/plain'
res.status = '204'
return res
token = req.headers['X-Auth-Token']
user_id, _sep, project_id = token.partition(':')
project_id = project_id or user_id
remote_address = getattr(req, 'remote_address', '127.0.0.1')
if CONF.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For', remote_address)
ctx = context.RequestContext(user_id,
project_id,
is_admin=True,
remote_address=remote_address)
req.environ['cinder.context'] = ctx
return self.application
|
[
"swethapts@gmail.com"
] |
swethapts@gmail.com
|
f6e400373186312a9fcf3e60bc466491e7ced87f
|
780b6cca690a213ac908b1cd5faef5366a18dc4e
|
/276_newbie_bite/save1_passed.py
|
7568b69b77be52f1d12ae46c2c3d5cec4cd7fba1
|
[] |
no_license
|
katkaypettitt/pybites-all
|
899180a588e460b343c00529c6a742527e4ea1bc
|
391c07ecac0d92d5dc7c537bcf92eb6c1fdda896
|
refs/heads/main
| 2023-08-22T16:33:11.171732
| 2021-10-24T17:29:44
| 2021-10-24T17:29:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 244
|
py
|
# Hint: Don't forget the 4 leading spaces to
# indicate your code is within the function.
a = 10
b = 5
def multiply_numbers(a, b):
return a * b
def enter_name():
username = input("What is your name?")
return username
|
[
"70788275+katrinaalaimo@users.noreply.github.com"
] |
70788275+katrinaalaimo@users.noreply.github.com
|
9de698aabcd24e0d8e7b125ea53adbb5167b3d8b
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02394/s366374910.py
|
34d6b2cc6782e5002623f9419f9f8a358a2dd94e
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 142
|
py
|
w, h, x, y, r = map(int, input().split())
if 0 <= (x-r) and (x+r) <= w and 0 <= (y-r) and (y+r) <= h:
print("Yes")
else:
print("No")
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
e2f80ae63c842ab915e70054164ea7ef16f417b2
|
15fb62305a2fa0146cc84b289642cc01a8407aab
|
/Python/119-pascalTriangle2.py
|
ca82b9b5ce299755fd88d42d79285542b566e463
|
[] |
no_license
|
geniousisme/leetCode
|
ec9bc91864cbe7520b085bdab0db67539d3627bd
|
6e12d67e4ab2d197d588b65c1ddb1f9c52a7e047
|
refs/heads/master
| 2016-09-09T23:34:03.522079
| 2015-09-23T16:15:05
| 2015-09-23T16:15:05
| 32,052,408
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 413
|
py
|
from math import factorial
class Solution:
# @param {integer} rowIndex
# @return {integer[]}
def getRow(self, rowIndex):
res = []
f = factorial
n = rowIndex
for k in xrange(rowIndex + 1):
res.append(f(n) / f(k) / f(n - k))
return res
if __name__ == '__main__':
s = Solution()
for i in xrange(10):
print s.getRow(i)
|
[
"chia-hao.hsu@aiesec.net"
] |
chia-hao.hsu@aiesec.net
|
790f7806b7f537150ccb4a127bd799627afad0e4
|
1f8344813458f669bdf77059220290a3b2a3cdd0
|
/tutorials-docs/thinking-in-coroutines/8_run_in_default_executor.py
|
81a53d28f3690104d9512aac1b837e073a2f0b81
|
[] |
no_license
|
gridl/asyncio-study-group
|
7c03e8640070ebe8d1103f27bc3c3da37a5a661f
|
1ba9cf90e21b5174518032d467e89526da219576
|
refs/heads/master
| 2020-07-02T07:03:12.364097
| 2017-12-26T20:55:09
| 2017-12-26T20:55:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 687
|
py
|
import time
import datetime
import asyncio
def blocking_call(seconds):
print(seconds, datetime.datetime.now())
time.sleep(seconds)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.call_later(5, loop.stop)
for i in range(1,4):
#по умолчанию используется concurrent.futures.ThreadPoolExecutor
# для этого надо передать executor = None
#количество потоков по умолчанию:
#number of processors on the machine, multiplied by 5
loop.run_in_executor(None, blocking_call, i)
try:
loop.run_forever()
finally:
loop.close()
|
[
"nataliya.samoylenko@gmail.com"
] |
nataliya.samoylenko@gmail.com
|
8ba3ca416a5d385c1158274f46e71ad3750148eb
|
e7af30370e277b459e1c49edcc0562d5b5c32abc
|
/Learning_ScikitLearn/Model/Linear_Classification/LogisticRegression_Classification.py
|
68bb53cef0d25d1f7959af186211991c7beda251
|
[] |
no_license
|
justgolikeme/My_MachineLearning
|
208ab766478662cf36ffa7f9202fed0ad6f0ad28
|
948a84684a2a6f1c9e613948ed246062468016bd
|
refs/heads/master
| 2022-05-13T05:02:48.488269
| 2020-01-03T07:27:50
| 2020-01-03T07:27:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,023
|
py
|
# -*- coding: utf-8 -*-
# @Time : 2019/12/16 15:55
# @Author : Mr.Lin
'''
用于分类的线性模型
线性模型也广泛应用于分类问题。我们首先来看二分类。这时可以利用下面的公式进行
预测:
ŷ = w[0] * x[0] + w[1] * x[1] + …+ w[p] * x[p] + b > 0
这个公式看起来与线性回归的公式非常相似,但我们没有返回特征的加权求和,而是为预
测设置了阈值(0)。如果函数值小于 0,我们就预测类别 -1;如果函数值大于 0,我们就
预测类别 +1。对于所有用于分类的线性模型,这个预测规则都是通用的。同样,有很多种
不同的方法来找出系数(w)和截距(b)。
对于用于回归的线性模型,输出 ŷ 是特征的线性函数,是直线、平面或超平面(对于更高
维的数据集)。对于用于分类的线性模型,决策边界是输入的线性函数。换句话说,(二
元)线性分类器是利用直线、平面或超平面来分开两个类别的分类器。本节我们将看到这
方面的例子。
学习线性模型有很多种算法。这些算法的区别在于以下两点:
• 系数和截距的特定组合对训练数据拟合好坏的度量方法;
• 是否使用正则化,以及使用哪种正则化方法。
不同的算法使用不同的方法来度量“对训练集拟合好坏”。由于数学上的技术原因,不可
能调节 w 和 b 使得算法产生的误分类数量最少。对于我们的目的,以及对于许多应用而
言,上面第一点(称为损失函数)的选择并不重要。
最常见的两种线性分类算法是 Logistic 回归(logistic regression)和线性支持向量机(linear
support vector machine,线性 SVM),前者在 linear_model.LogisticRegression 中实现,
后者在 svm.LinearSVC (SVC 代表支持向量分类器)中实现。虽然 LogisticRegression
的名字中含有回归(regression),但它是一种分类算法,并不是回归算法,不应与
LinearRegression 混淆。
'''
from sklearn.cross_validation import cross_val_predict, cross_val_score
from sklearn.linear_model import LogisticRegression
from Learning_ScikitLearn.Model.Linear_Classification.Data_Source import X_test,X_train,y_train,y_test,data_y,data_X
# logreg = LogisticRegression().fit(X_train, y_train)
# print("Training set score: {:.3f}".format(logreg.score(X_train, y_train)))
# print("Test set score: {:.3f}".format(logreg.score(X_test, y_test)))
# Training set score: 0.955
# Test set score: 0.958
#
# [0.94827586 0.9137931 0.92982456 0.94736842 0.96491228 0.98245614
# 0.94736842 0.94642857 0.96428571 0.96428571]
# print("")
# print(cross_val_score(logreg, data_X, data_y, cv=10))
def test_C_Parameter():
C = [0.1,1,10]
for c in C:
logreg = LogisticRegression(C=c)
logreg.fit(X_train,y_train)
print("C为:{}下的分数:{}\n".format(c,cross_val_score(logreg, data_X, data_y, cv=10)))
test_C_Parameter()
|
[
"2669093302@qq.com"
] |
2669093302@qq.com
|
7ad47d35b8b6d618120876ea81cee10cd4498f0f
|
329b48089c64ebefe78d52f1c71c73bdadadd4b4
|
/ML/m02_3_xor.py
|
f054586220c6b7e9e2f5ec6da088dfde56b25a5d
|
[] |
no_license
|
variablejun/keras__R
|
7f854570952ed97c48715047015786d873e512cb
|
9faf4814b46cda1ac0ddbf2a2f8236fa0394f144
|
refs/heads/main
| 2023-07-13T19:32:25.950500
| 2021-08-22T18:26:52
| 2021-08-22T18:26:52
| 398,870,548
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 617
|
py
|
from sklearn.svm import LinearSVC
import numpy as np
from sklearn.metrics import accuracy_score
#1 data
x_data = [[0,0],[0,1],[1,0],[1,1]]
y_data = [0,1,1,0]
#2 model
model = LinearSVC()
#3 fit
model.fit(x_data,y_data)
#4 평가
y_predict = model.predict(x_data)
print(x_data,' 의 예측값 : ',y_predict)
results= model.score(x_data, y_data)
print('score : ',results)
acc = accuracy_score(y_data,y_predict)
print('acc : ',acc)
'''
[[0, 0], [0, 1], [1, 0], [1, 1]] 의 예측값 : [0 0 0 0]
score : 0.5
acc : 0.5
[[0, 0], [0, 1], [1, 0], [1, 1]] 의 예측값 : [1 1 1 1]
score : 0.5
acc : 0.5
'''
|
[
"crescendo0217@gmail.com"
] |
crescendo0217@gmail.com
|
9d79f133ae46df0a2a814949bc56bb9b67709332
|
92754bb891a128687f3fbc48a312aded752b6bcd
|
/Algorithms/Python3.x/836-Rectangle_Overlap.py
|
109710852b3db1879f46f641e56714e64efbeca6
|
[] |
no_license
|
daidai21/Leetcode
|
ddecaf0ffbc66604a464c3c9751f35f3abe5e7e5
|
eb726b3411ed11e2bd00fee02dc41b77f35f2632
|
refs/heads/master
| 2023-03-24T21:13:31.128127
| 2023-03-08T16:11:43
| 2023-03-08T16:11:43
| 167,968,602
| 8
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 611
|
py
|
# Runtime: 32 ms, faster than 89.53% of Python3 online submissions for Rectangle Overlap.
# Memory Usage: 13.9 MB, less than 8.33% of Python3 online submissions for Rectangle Overlap.
class Solution:
def isRectangleOverlap(self, rec1: List[int], rec2: List[int]) -> bool:
return rec1[0] < rec2[2] and rec2[0] < rec1[2] and rec1[1] < rec2[3] and rec2[1] < rec1[3]
"""
(left1, right1), (left2, right2)
Meet the requirements of the topic Equivalent to :
left1 < x < right1 && left2 < x < right2
left1 < x < right2 && left2 < x < right1
left1 < right2 && left2 < right1
"""
|
[
"daidai4269@aliyun.com"
] |
daidai4269@aliyun.com
|
bfb211f64cb26ced576000456975b8ac4e62ba43
|
dab869acd10a3dc76e2a924e24b6a4dffe0a875f
|
/Laban/build/bdist.win32/winexe/temp/numpy.core.operand_flag_tests.py
|
abe53bfc427cda30a4fdef6d870c6ffe58b6c013
|
[] |
no_license
|
ranBernstein/Laban
|
d82aff9b0483dd007e03a06e51f7d635f62ed05d
|
54c88afa9493deacbdd182904cc5d180ecb208b4
|
refs/heads/master
| 2021-01-23T13:17:51.777880
| 2017-02-14T09:02:54
| 2017-02-14T09:02:54
| 25,508,010
| 3
| 1
| null | 2017-02-14T09:02:55
| 2014-10-21T07:16:01
|
Tcl
|
UTF-8
|
Python
| false
| false
| 379
|
py
|
def __load():
import imp, os, sys
try:
dirname = os.path.dirname(__loader__.archive)
except NameError:
dirname = sys.prefix
path = os.path.join(dirname, 'numpy.core.operand_flag_tests.pyd')
#print "py2exe extension module", __name__, "->", path
mod = imp.load_dynamic(__name__, path)
## mod.frozen = 1
__load()
del __load
|
[
"bernstein.ran@gmail.com"
] |
bernstein.ran@gmail.com
|
cde96ba8bed0f8a27d9a27fc09c79f90b37b0093
|
4781d9293b59a5072647bb179195b143c60621bd
|
/백준/3190_뱀/3190_뱀.py
|
466985fd6c7408c5d7d548c56da8c4c1f93da5da
|
[] |
no_license
|
chriskwon96/Algorithm_codes
|
bf98131f66ca9c091fe63db68b220527800069c9
|
edb7b803370e87493dad4a38ee858bb7bb3fd31d
|
refs/heads/master
| 2023-08-15T18:48:26.809864
| 2021-10-12T13:43:21
| 2021-10-12T13:43:21
| 387,803,476
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,488
|
py
|
di = [0, -1, 0, +1]
dj = [+1, 0, -1, 0]
N = int(input())
matrix = [[0]*N for _ in range(N)]
K = int(input())
for _ in range(K): #사과위치 1로 지정
i, j = map(int, input().split())
matrix[i-1][j-1] = 1
L = int(input())
q = [(0,0)] #뱀 몸
X1, k, cnt = 0, 0, 0
flag = 1
for _ in range(L):
X, C = input().split()
for _ in range(int(X)-X1):
head = q[0]
cnt += 1
n_x, n_y = head[0]+di[k], head[1]+dj[k]
if 0<=n_x<N and 0<=n_y<N and ((n_x, n_y) not in q): #다음칸이 판 안에 있고 내 몸이 아니라면
q.insert(0, (n_x, n_y)) #머리좌표 q에 삽입
if matrix[n_x][n_y]: #사과라면
matrix[n_x][n_y] = 0 #사과 지워주기
else:
q.pop() #사과가 아니면 꼬리 줄여주기
else: # 게임이 끝나면
print(cnt)
flag = 0
break
if not flag:
break
X1 = int(X)
if C == 'L':
k = (k+1)%4
else:
k = (k-1)%4
if flag: #인풋을 다 받아도 끝나지 않았다면
head = q[0]
n_x, n_y = head[0]+di[k], head[1]+dj[k]
while 0<=n_x<N and 0<=n_y<N and ((n_x, n_y) not in q):
cnt += 1
q.insert(0, (n_x, n_y))
if matrix[n_x][n_y]: #사과라면
matrix[n_x][n_y] = 0 #사과 지워주기
else:
q.pop() #사과가 아니면 꼬리 줄여주기
n_x, n_y = n_x + di[k], n_y + dj[k]
print(cnt+1)
|
[
"chriskwon96@naver.com"
] |
chriskwon96@naver.com
|
a7556063e49aff2dda7e2b3cc964e43037048d34
|
6cb1d8f1416af7b7c5c83ab35cb6928ea9955aff
|
/ch07/rnnlm_gen.py
|
a30f1107227f403e0e15f919e3f9b09e39193409
|
[] |
no_license
|
lee-saint/practice-nlp
|
f68ccc3140f725f3edcd7048c324b847583b7f20
|
19003fcd5f55f4f110417a3950a32bb5fba1850c
|
refs/heads/master
| 2020-12-01T20:05:15.014495
| 2020-01-21T09:22:18
| 2020-01-21T09:22:18
| 230,750,152
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,799
|
py
|
import numpy as np
from common.functions import softmax
from ch06.RNNLM import Rnnlm
from ch06.better_rnnlm import BetterRnnlm
from dataset import ptb
class RnnlmGen(Rnnlm):
def generate(self, start_id, skip_ids=None, sample_size=100):
word_ids = [start_id]
x = start_id
while len(word_ids) < sample_size:
x = np.array(x).reshape(1, 1)
score = self.predict(x)
p = softmax(score.flatten())
sampled = np.random.choice(len(p), size=1, p=p)
if (skip_ids is None) or (sampled not in skip_ids):
x = sampled
word_ids.append(int(x))
return word_ids
class BetterRnnlmGen(BetterRnnlm):
def generate(self, start_id, skip_ids=None, sample_size=100):
word_ids = [start_id]
x = start_id
while len(word_ids) < sample_size:
x = np.array(x).reshape(1, 1)
score = self.predict(x).flatten()
p = softmax(score).flatten()
sampled = np.random.choice(len(p), size=1, p=p)
if (skip_ids is None) or (sampled not in skip_ids):
x = sampled
word_ids.append(int(x))
return word_ids
if __name__ == '__main__':
corpus, word_to_id, id_to_word = ptb.load_data('train')
vocab_size = len(word_to_id)
corpus_size = len(corpus)
model = RnnlmGen()
model.load_params('../ch06/Rnnlm.pkl')
# 시작(start) 문자와 건너뜀(skip) 문자 설정
start_word = 'you'
start_id = word_to_id[start_word]
skip_words = ['N', '<unk>', '$']
skip_ids = [word_to_id[w] for w in skip_words]
# 문장 생성
word_ids = model.generate(start_id, skip_ids)
txt = ' '.join([id_to_word[i] for i in word_ids])
txt = txt.replace(' <eos>', '.\n')
print(txt)
better_model = BetterRnnlmGen()
better_model.load_params('../ch06/BetterRnnlm.pkl')
# 시작(start) 문자와 건너뜀(skip) 문자 설정
start_word = 'you'
start_id = word_to_id[start_word]
skip_words = ['N', '<unk>', '$']
skip_ids = [word_to_id[w] for w in skip_words]
# 문장 생성
word_ids = better_model.generate(start_id, skip_ids)
txt = ' '.join([id_to_word[i] for i in word_ids])
txt = txt.replace(' <eos>', '.\n')
print(txt)
better_model.reset_state()
model.reset_state()
start_words = 'the meaning of life is'
start_ids = [word_to_id[w] for w in start_words.split(' ')]
for x in start_ids[:-1]:
x = np.array(x).reshape(1, 1)
model.predict(x)
word_ids = model.generate(start_ids[-1], skip_ids)
word_ids = start_ids[:-1] + word_ids
txt = ' '.join([id_to_word[i] for i in word_ids])
txt = txt.replace(' <eos>', '.\n')
print('-' * 50)
print(txt)
|
[
"plutorian131@gmail.com"
] |
plutorian131@gmail.com
|
f45d517a51288fdf1af81238bef427c053fc9fbe
|
f47863b3a595cbe7ec1c02040e7214481e4f078a
|
/plugins/scan/libsys/1530.py
|
7d4393ead3d8ab208722872e6653f54514040048
|
[] |
no_license
|
gobiggo/0bscan
|
fe020b8f6f325292bda2b1fec25e3c49a431f373
|
281cf7c5c2181907e6863adde27bd3977b4a3474
|
refs/heads/master
| 2020-04-10T20:33:55.008835
| 2018-11-17T10:05:41
| 2018-11-17T10:05:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 617
|
py
|
#!/usr/bin/python
#-*- encoding:utf-8 -*-
# title:汇文libsys图书管理系统敏感信息泄露
#http://www.wooyun.org/bugs/wooyun-2010-0125785
def assign(service, arg):
if service == "libsys":
return True, arg
def audit(arg):
payload = 'include/config.properties'
url = arg + payload
code, head,res, errcode, _ = curl.curl2(url)
if code == 200 and 'host' and 'port' and 'user' and 'password' in res:
security_warning(url)
if __name__ == '__main__':
audit(assign('libsys', 'http://www.njjnlib.cn:8080/')[1])
audit(assign('libsys', 'http://202.201.163.2:8080/')[1])
|
[
"zer0i3@aliyun.com"
] |
zer0i3@aliyun.com
|
3a0aa4f6f46d50f9055d2000d1b39488f5c19f87
|
b341a8d120737297aa8fd394a23633dac9b5ccda
|
/accounts/migrations/0007_auto_20210122_1129.py
|
c7476e71ff6f0746f30db617c468bd59bbe23d1c
|
[] |
no_license
|
Minari766/disney_side_stories
|
16d97cb02bf00aa5439d59f753abb9a4706a30aa
|
aa2d88b1b0fdd87a27f41318bd3ec7352229b6ff
|
refs/heads/main
| 2023-08-15T07:03:16.922579
| 2021-10-03T07:47:22
| 2021-10-03T07:47:22
| 306,496,250
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 445
|
py
|
# Generated by Django 2.2 on 2021-01-22 02:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0006_auto_20210122_0127'),
]
operations = [
migrations.AlterField(
model_name='customuser',
name='icon',
field=models.ImageField(blank=True, null=True, upload_to='images', verbose_name='アイコン'),
),
]
|
[
"mina3.ryu0728@gmail.com"
] |
mina3.ryu0728@gmail.com
|
436645c364f840999119d1e57184125dbceeca14
|
1f006f0c7871fcde10986c4f5cec916f545afc9f
|
/apps/ice/plugins/oxml/oxml_wordNumbering_test.py
|
9d73299a89601ac0dd3e3d023fcdc93ea3e7a208
|
[] |
no_license
|
ptsefton/integrated-content-environment
|
248b8cd29b29e8989ec1a154dd373814742a38c1
|
c1d6b5a1bea3df4dde10cb582fb0da361dd747bc
|
refs/heads/master
| 2021-01-10T04:46:09.319989
| 2011-05-05T01:42:52
| 2011-05-05T01:42:52
| 36,273,470
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,070
|
py
|
#!/usr/bin/env python
#
# Copyright (C) 2010 Distance and e-Learning Centre,
# University of Southern Queensland
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
from unittest import TestCase
import sys
from oxml_wordNumbering import WordNumbering
testFile = "testData/numbering.xml"
class WordNumberingTest(TestCase):
def setUp(self):
f = open(testFile, "rb")
self.wordNumberingXmlStr = f.read()
f.close()
def tearDown(self):
pass
def testGetNumLevelInfo(self):
#word/numbering.xml
wordNum = WordNumbering(self.wordNumberingXmlStr)
numId = "1"
level = "0"
info = wordNum.getNumLevelInfo(numId, level)
expected = {'leftIndent': u'720', 'start': u'1', 'jc': u'left',
'text': u'%1.', 'format': u'decimal'}
self.assertEquals(info, expected)
def runUnitTests(locals):
print "\n\n\n\n"
if sys.platform=="cli":
import clr
import System.Console
System.Console.Clear()
print "---- Testing under IronPython ----"
else:
print "---- Testing ----"
# Run only the selected tests
args = list(sys.argv)
sys.argv = sys.argv[:1]
args.pop(0)
runTests = args
runTests = [ i.lower().strip(", ") for i in runTests]
runTests = ["test"+i for i in runTests if not i.startswith("test")] + \
[i for i in runTests if i.startswith("test")]
if runTests!=[]:
testClasses = [i for i in locals.values() \
if hasattr(i, "__bases__") and \
(TestCase in i.__bases__)]
testing = []
for x in testClasses:
l = dir(x)
l = [ i for i in l if i.startswith("test") and callable(getattr(x, i))]
for i in l:
if i.lower() not in runTests:
delattr(x, i)
else:
testing.append(i)
x = None
num = len(testing)
if num<1:
print "No selected tests found! - %s" % str(args)[1:-1]
elif num==1:
print "Running selected test - %s" % (str(testing)[1:-1])
else:
print "Running %s selected tests - %s" % (num, str(testing)[1:-1])
from unittest import main
main()
if __name__=="__main__":
runUnitTests(locals())
sys.exit(0)
|
[
"raward@gmail.com@110e3293-9ef9-cb8f-f479-66bdb1942d05"
] |
raward@gmail.com@110e3293-9ef9-cb8f-f479-66bdb1942d05
|
9330cd3f6095c574c0fa566a8d69be0fec19b834
|
a62a87ad976e3d35ea7879671190faf950ebaf3b
|
/scrapys/t.py
|
47ae7f7a675a471d9db25b8bb6a431b20fa33406
|
[] |
no_license
|
YangXiaoo/Django-web
|
144c8c1800d2a67bf8d1d203210aa351d31e8fb3
|
97903f309234fd1421a19a52a083f214172d6c79
|
refs/heads/master
| 2020-03-24T11:29:20.296017
| 2019-01-20T14:54:16
| 2019-01-20T14:54:16
| 142,687,045
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,264
|
py
|
# -*- coding: utf-8 -*-
import re
import urllib2
import pandas as pd
#获取原码
def get_content(page):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36',
'Accept-Language': 'zh-CN,zh;q=0.8'}
url ='http://search.51job.com/list/000000,000000,0000,00,9,99,python,2,'+ str(page)+'.html'
req = urllib2.Request(url,headers=headers)
res = urllib2.urlopen(req)
html = res.read()
re= unicode(html, "gbk").encode("utf8")
return re
def get(html):
reg = re.compile(r'class="t1 ">.*? href="(.*?)".*? <a target="_blank" title="(.*?)".*? <span class="t2"><a target="_blank" title="(.*?)" href="(.*?)".*?<span class="t3">(.*?)</span>.*?<span class="t4">(.*?)</span>.*? <span class="t5">(.*?)</span>',re.S)
items=re.findall(reg,html)
return items
def info_get(url):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36',
'Accept-Language': 'zh-CN,zh;q=0.8'}
req = urllib2.Request(url,headers=headers)
res = urllib2.urlopen(req)
html = res.read()
html = unicode(html, "gbk").encode("utf8")
reg = re.compile(r'<span class="sp4"><em class="(.*?)"></em>(.*?)</span>',re.S)
based_info = re.findall(reg,html)
reg_p = re.compile(r'<span class="el">(.*?)</span>',re.S)
kind = re.findall(reg_p,html)
return based_info,kind
def address(url):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36',
'Accept-Language': 'zh-CN,zh;q=0.8'}
req = urllib2.Request(url,headers=headers)
res = urllib2.urlopen(req)
html = res.read()
html = unicode(html, "gbk").encode("utf8")
reg_a = re.compile(r'<div class="tBorderTop_box bmsg">.*?</span>(.*?)</p>',re.S)
address = re.findall(reg_a,html)
return address
final = []
for j in range(1,2):
print("正在爬取第"+str(j)+"页数据...")
try:
html=get_content(j)
for i in get(html):
result = {}
with open ('51job.txt','a') as f:
f.write(i[0]+'\t'+i[1]+'\t'+i[2]+'\t'+i[3]+'\t'+i[4]+'\t'+i[5]+'\t'+i[6]+'\n')
f.close()
result['info_link'] = i[0]
info,kind = info_get(i[0])
count = 1
for n in info:
if count == 1:
result['experience'] = n[1]
count += 1
elif count == 2:
result['educational'] = n[1]
count += 1
else:
break
result['work_type'] = kind[0]
result['address'] = address
result['name'] = i[1]
result['company'] = i[2]
result['company_link'] = i[3]
result['work_place'] = i[4]
result['salary'] = i[5]
ad = address(i[3])
result['address'] = ad
result['publish_time'] = i[6]
final.append(result)
except:
pass
df = pd.DataFrame(final)
df.to_csv('51job-data_analysis.csv', mode = 'a',encoding = 'utf8')
|
[
"33798487+YangXiaoo@users.noreply.github.com"
] |
33798487+YangXiaoo@users.noreply.github.com
|
91cf1bbafb30679fda22289ccab052d7605c72e6
|
503d2f8f5f5f547acb82f7299d86886691966ca5
|
/typical90/typical90_cf.py
|
f610d0f1035ed452bc7992ce2b7ed0d6160b139f
|
[] |
no_license
|
Hironobu-Kawaguchi/atcoder
|
3fcb649cb920dd837a1ced6713bbb939ecc090a9
|
df4b55cc7d557bf61607ffde8bda8655cf129017
|
refs/heads/master
| 2023-08-21T14:13:13.856604
| 2023-08-12T14:53:03
| 2023-08-12T14:53:03
| 197,216,790
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 937
|
py
|
# https://atcoder.jp/contests/typical90/tasks/typical90_cf
# # def input(): return sys.stdin.readline().rstrip()
# # input = sys.stdin.readline
# from numba import njit
# from functools import lru_cache
# import sys
# input = sys.stdin.buffer.readline
# sys.setrecursionlimit(10 ** 7)
N = int(input())
S = input()
ans = 0
last = [-1]*2
for i in range(N):
if S[i]=='o':
last[0] = i
else:
last[1] = i
if last[0]==-1 or last[1]==-1:
continue
ans += min(last[0], last[1]) + 1
# print(ans, last)
print(ans)
# S = input()
# n = int(input())
# N, K = map(int, input().split())
# l = list(map(int, (input().split())))
# A = [[int(i) for i in input().split()] for _ in range(N)]
# import sys
# it = map(int, sys.stdin.buffer.read().split())
# N = next(it)
# @njit('(i8,i8[::1],i4[::1])', cache=True)
# def main():
# @lru_cache(None)
# def dfs():
# return
# return
# main()
|
[
"hironobukawaguchi3@gmail.com"
] |
hironobukawaguchi3@gmail.com
|
b9edcccc00c10227f91be8740e4d744c0cea4347
|
2b8047e9e73a2f6fd43897cff19cb7e7c7c464d4
|
/docssrc/source/conf.py
|
5d48fbeb3fa4a5a1f8afc2bbac54d3f8fcfb3638
|
[
"MIT"
] |
permissive
|
Peilonrayz/envee
|
548fe08330a3b43bee5da1d64a0e406c781b990e
|
66f5b6b1ff7f5966be794e1e3878418c560c1f65
|
refs/heads/master
| 2021-01-09T13:35:40.946529
| 2020-02-21T20:58:27
| 2020-02-21T20:58:27
| 242,321,085
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 984
|
py
|
import datetime
import pathlib
import sys
try:
import ConfigParser as configparser
except ImportError:
import configparser
FILE_PATH = pathlib.Path(__file__).absolute()
# Add documentation for tests
TLD = FILE_PATH.parent.parent.parent
sys.path.insert(0, str(TLD))
config = configparser.ConfigParser()
config.read(TLD / "setup.cfg")
project = "envee"
author = "Peilonrayz"
copyright = f"{datetime.datetime.now().year}, {author}"
release = config.get("src", "version")
master_doc = "index"
templates_path = ["_templates"]
exclude_patterns = []
doctest_global_setup = f"""
import {project}
"""
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.graphviz",
"sphinx.ext.githubpages",
"sphinx.ext.intersphinx",
"sphinx_autodoc_typehints",
"sphinx_rtd_theme",
]
intersphinx_mapping = {"python": ("https://docs.python.org/3", None)}
html_theme = "sphinx_rtd_theme"
html_static_path = ["_static"]
set_type_checking_flag = True
|
[
"peilonrayz@gmail.com"
] |
peilonrayz@gmail.com
|
fbdea07de6f18420b99a57b116c79adf1f0463a1
|
eac52a8ae7c539acedaedf8744bd8e20172f0af6
|
/general/decode_ways.py
|
33c70cc775b271c21d0bb448684acae24e9ffa65
|
[] |
no_license
|
mshekhar/random-algs
|
3a0a0f6e6b21f6a59ed5e1970b7a2bc2044e191f
|
7c9a8455f49027a754038b23aaa2df61fe5397ca
|
refs/heads/master
| 2020-03-26T16:29:42.694785
| 2019-07-18T20:57:55
| 2019-07-18T20:57:55
| 145,105,593
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,895
|
py
|
# A message containing letters from A-Z is being encoded to numbers using the following mapping:
#
# 'A' -> 1
# 'B' -> 2
# ...
# 'Z' -> 26
# Given a non-empty string containing only digits, determine the total number of ways to decode it.
#
# Example 1:
#
# Input: "12"
# Output: 2
# Explanation: It could be decoded as "AB" (1 2) or "L" (12).
# Example 2:
#
# Input: "226"
# Output: 3
# Explanation: It could be decoded as "BZ" (2 26), "VF" (22 6), or "BBF" (2 2 6).
# if not single_digit:
# all_single_possible[c] = False
# else:
# all_single_possible[c] = all_single_possible[c - 1] and all_single_possible[c]
# if c - 1 >= 0 and num_decodings[c - 1] > 0:
# num_decodings[c] = num_decodings[c - 1]
#
# if c - 1 >= 0:
# double_digit = self.get_decoding_count(s[c - 1] + i)
# if double_digit:
# print s[c - 1] + i, double_digit, num_decodings[c - 2] + int(all_single_possible[c - 2])
# if c - 2 >= 0 and num_decodings[c - 2] + int(all_single_possible[c - 2]) > 0:
# num_decodings[c] += num_decodings[c - 2] + 1
# elif c == 1:
# num_decodings[c] += 1
class Solution(object):
def get_decoding_count(self, s):
if not s.startswith('0') and 1 <= int(s) <= 26:
return 1
return 0
def numDecodings(self, s):
"""
:type s: str
:rtype: int
"""
num_decodings = [0] * len(s)
all_single_possible = True
for c, i in enumerate(s):
single_digit = self.get_decoding_count(i)
double_digit = 0
if c - 1 >= 0:
double_digit = self.get_decoding_count(s[c - 1] + i)
if not single_digit:
all_single_possible = False
if single_digit + double_digit > 0:
if single_digit:
num_decodings[c] = num_decodings[c - 1]
if all_single_possible and not num_decodings[c]:
num_decodings[c] = 1
if double_digit:
if c - 2 >= 0 and num_decodings[c - 2] > 0:
num_decodings[c] += num_decodings[c - 2]
elif c == 1:
num_decodings[c] += 1
# add one for all single decodings
# print num_decodings, all_single_possible
return num_decodings[-1]
print Solution().numDecodings("12"), 2
print Solution().numDecodings("226"), 3
print Solution().numDecodings("10"), 1
print Solution().numDecodings("103"), 1
print Solution().numDecodings("1032"), 1
print Solution().numDecodings("10323"), 1
print Solution().numDecodings("012"), 0
print Solution().numDecodings("110"), 1
print Solution().numDecodings("1212"), 5
# 1 2 1
# 12 1
# 1 21
#
# 1 2 1 2
#
# 12 1 2
# 12 12
#
# 1 21 2
# 1 2 12
# for i in ["0", "10", "10", "103", "1032", "10323"]:
# print(Solution().numDecodings(i))
|
[
"mayank@moengage.com"
] |
mayank@moengage.com
|
07a345dba33878564304037a609dba06de767c0c
|
36c00fe2afff4818c937e312ce0c6a79f35e2a77
|
/7-kyu/happy-birthday,-darling!/python/solution.py
|
ab407ea9bcebd79b2d18c37ed24e86ac2368a137
|
[] |
no_license
|
p-lots/codewars
|
0a67b6ee4c91180ff78c648421b9d2d64463ddc3
|
535faeee475c6b398124d6f5002b0e111406e8bb
|
refs/heads/master
| 2023-08-23T22:14:33.635011
| 2023-08-23T13:30:37
| 2023-08-23T13:30:37
| 195,320,309
| 0
| 0
| null | 2023-05-09T19:25:50
| 2019-07-05T01:40:15
|
Python
|
UTF-8
|
Python
| false
| false
| 164
|
py
|
def womens_age(n):
base = n // 2 if n % 2 == 0 else (n - 1) // 2
new_n = 20 if n % 2 == 0 else 21
return f"{n}? That's just {new_n}, in base {base}!"
|
[
"paul.calotta@gmail.com"
] |
paul.calotta@gmail.com
|
d0c7805015d0990484841901a310a10805e00cf6
|
39be02fe4f8e8362a7acc005f3e30dd6fe47990e
|
/newdata/oylereader.py
|
5ebdae4fcc852f8c821d74ed40ee95c9b06e915b
|
[] |
no_license
|
seferlab/geneexpress
|
e2f6fdaa49e40cd48d0572cd9ddb5d2f45566adb
|
ac35bde5ba52d24981ece74e532f46bbfff9019e
|
refs/heads/master
| 2022-12-19T08:33:16.925160
| 2020-09-29T13:51:30
| 2020-09-29T13:51:30
| 299,619,752
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 826
|
py
|
import os
import sys
import math
fname1 = "127 LCM time course Data Not normalized.txt"
fname2 = "127 LCM time course Quantile Normalized logbased 2 transformed.txt"
with open(fname1,"r") as infile:
for line in infile:
line = line.rstrip()
vals = line.split("\r")
splitted = vals[1].split("\t")
items1 = [float(splitted[tind]) for tind in xrange(1,len(splitted))]
with open(fname2,"r") as infile:
for line in infile:
line = line.rstrip()
vals = line.split("\r")
splitted = vals[1].split("\t")
items2 = [float(splitted[tind]) for tind in xrange(1,len(splitted))]
print items1[0:20]
print [math.log(titem,2.0) for titem in items1[0:10]]
print [math.log(titem+1.0,2.0) for titem in items1[0:10]]
print items2[0:20]
print items1[8:20]
|
[
"70752445+seferlab@users.noreply.github.com"
] |
70752445+seferlab@users.noreply.github.com
|
f0b3e6949b78c44d35bdedc65dcdd7d848eae7f3
|
54f352a242a8ad6ff5516703e91da61e08d9a9e6
|
/Source Codes/CodeJamData/10/33/17.py
|
b549582d467c3879831e6f099d36ecf18d3abe31
|
[] |
no_license
|
Kawser-nerd/CLCDSA
|
5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb
|
aee32551795763b54acb26856ab239370cac4e75
|
refs/heads/master
| 2022-02-09T11:08:56.588303
| 2022-01-26T18:53:40
| 2022-01-26T18:53:40
| 211,783,197
| 23
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,617
|
py
|
from itertools import count
def board(rows):
n = len(rows[0]) * 4
return [map(int, '{0:0{1}b}'.format(int(row, 16), n)) for row in rows]
def squares(board):
m, n = len(board), len(board[0])
#sq = {}
for r in xrange(m):
for c in xrange(n):
if board[r][c] == 2: continue
ns = findsquare(board, r, c)
yield ns, -r, -c
#fill(board, r, c, ns)
#sq[ns] = sq.get(ns, 0) + 1
#return sq
def solve(board):
result = {}
m, n = len(board), len(board[0])
while 1:
try:
n, r, c = max(squares(board))
except ValueError:
return result
result[n] = result.get(n, 0) + 1
fill(board, -r, -c, n)
def fill(board, r, c, n):
for i in xrange(r, r+n):
for j in xrange(c, c+n):
board[i][j] = 2
def findsquare(board, r, c):
x = board[r][c]
try:
for s in count(1):
for j in range(c, c+s+1):
x = 1 - x
if board[r+s][j] != x:
return s
for i in range(r+s-1, r-1, -1):
x = 1 - x
if board[i][c+s] != x:
return s
except IndexError:
return s
if __name__ == '__main__':
import sys
rl = iter(sys.stdin).next
for case in range(1, int(rl())+1):
M,N = map(int, rl().split())
lines = [rl().strip() for _ in range(M)]
b = board(lines)
sq = solve(b)
print 'Case #%d: %s' % (case, len(sq))
for k, v in sorted(sq.items(), reverse=True):
print k,v
|
[
"kwnafi@yahoo.com"
] |
kwnafi@yahoo.com
|
238eb7c3a48a487377b765829fcb5eee86416ff5
|
24cf311c53c29e4e332cea01ee4de8196253a7b7
|
/accounts/urls.py
|
ca8992d712669175ee1ef3193b0ea2d6ab348261
|
[] |
no_license
|
apengok/vsicravdoa
|
d017fe0c6a8606ef7bb74739354de1a2767b2a8a
|
e424b94007731189c2f14513798f2a9e9a45ba4c
|
refs/heads/master
| 2020-03-10T23:07:48.145583
| 2018-06-01T09:18:25
| 2018-06-01T09:18:25
| 129,634,250
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 777
|
py
|
from django.conf.urls import url
from .views import (
AccountHomeView,
# AccountEmailActivateView,
UserDetailUpdateView
)
app_name = 'account'
urlpatterns = [
url(r'^$', AccountHomeView.as_view(), name='home'),
url(r'^details/$', UserDetailUpdateView.as_view(), name='user-update'),
# url(r'history/products/$', UserProductHistoryView.as_view(), name='user-product-history'),
# url(r'^email/confirm/(?P<key>[0-9A-Za-z]+)/$',
# AccountEmailActivateView.as_view(),
# name='email-activate'),
# url(r'^email/resend-activation/$',
# AccountEmailActivateView.as_view(),
# name='resend-activation'),
]
# account/email/confirm/asdfads/ -> activation view
|
[
"apengok@163.com"
] |
apengok@163.com
|
a20abcac99856f482d5e3f7ec4d5c5c93878dacd
|
98f505e8275ed888818d8d6f77d27a9c275b55d8
|
/face.py
|
a6d86359d258eda63f01fe71ba8a00892e28e706
|
[] |
no_license
|
EHwooKim/telegram
|
13ac0afbd4ee5f91aa81b557183e9d8143fb1315
|
034ae64fa6283720fd55362b1b763cb3497ce4fc
|
refs/heads/master
| 2022-12-11T19:53:23.942523
| 2019-07-12T07:41:29
| 2019-07-12T07:41:29
| 196,533,974
| 0
| 0
| null | 2022-12-08T05:52:25
| 2019-07-12T07:48:30
|
Python
|
UTF-8
|
Python
| false
| false
| 959
|
py
|
import pprint
import requests
from decouple import config
# 0. 이미지 파일
file_url = 'https://api.telegram.org/file/bot823224197:AAFwM03Ie4P8dBH45aKI75sMO0okZpcIqic/photos/file_2.jpg'
response = requests.get(file_url, stream=True)
image = response.raw.read()
# 1. 네이버 API 설정
naver_client_id = config('NAVER_CLIENT_ID')
naver_client_secret = config('NAVER_CLIENT_SECRET')
# 2. URL 설정
naver_url = 'https://openapi.naver.com/v1/vision/celebrity'
# 3. 요청 보내기! POST
headers = {
'X-Naver-Client-Id': naver_client_id,
'X-Naver-Client-Secret': naver_client_secret
}
response = requests.post(naver_url,
headers=headers,
files={'image':image}).json()
best = response.get('faces')[0].get('celebrity')
if best.get('confidence') > 0.2:
text = f"{best.get('confidence')*100}%만큼 {best.get('value')}를 닮으셨네요~"
else :
text = '사람이 아닙니다'
print(text)
|
[
"ehwoo0707@naver.com"
] |
ehwoo0707@naver.com
|
52860b1da6917fcd830a4b178bd3d28e8c60bf70
|
99dfd25f07b748e0b9b04ac300e135dc20570e1c
|
/cart/urls.py
|
1731b6a31f6bbea06b4fcbb367549265a3127dd2
|
[] |
no_license
|
suipingooi/tgc10-django-deploy-checkpointPAUL
|
1ec45e7135263703ff3472216f8fdcfdb379d7f3
|
46b62cdce8396c2b0cc57ec1fca4e77c0eee1e1a
|
refs/heads/master
| 2023-04-16T05:11:20.535480
| 2021-04-14T12:02:43
| 2021-04-14T12:02:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 398
|
py
|
from django.urls import path
import cart.views
urlpatterns = [
path('add/<book_id>', cart.views.add_to_cart,
name="add_to_cart"),
path('', cart.views.view_cart, name='view_cart'),
path('remove/<book_id>', cart.views.remove_from_cart,
name="remove_from_cart"),
path('update_quantity/<book_id>', cart.views.update_quantity,
name="update_cart_quantity")
]
|
[
"chorkunxin@yahoo.com"
] |
chorkunxin@yahoo.com
|
2ca6a41f705f6ef795834db9d2bcbec1c4e7da99
|
9d0195aa83cc594a8c61f334b90375961e62d4fe
|
/JTTest/SL7/CMSSW_10_2_15/src/miniAODJobs600toInf/nano4.py
|
1366df4f33cc6ad0c152e7cd8e25ea82efda4cf6
|
[] |
no_license
|
rsk146/CMS
|
4e49592fc64f6438051544c5de18598db36ed985
|
5f8dab8c59ae556598b9747b52b88205fffc4dbe
|
refs/heads/master
| 2022-12-01T03:57:12.126113
| 2020-08-04T03:29:27
| 2020-08-04T03:29:27
| 284,863,383
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,363
|
py
|
# Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: nanoAOD_jetToolbox_cff -s NANO --mc --eventcontent NANOAODSIM --datatier NANOAODSIM --no_exec --conditions 102X_upgrade2018_realistic_v19 --era Run2_2018,run2_nanoAOD_102Xv1 --customise_commands=process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False))) --customise JMEAnalysis/JetToolbox/nanoAOD_jetToolbox_cff.nanoJTB_customizeMC --filein file:cms/xaastorage/MINIAOD/Xaa_photons/diPhoton/x500_a100/XaaNLOttQED0_BBAR_M-x500_a100_MINIAODSIM_1.root --fileout file:jetToolbox_nano_mc.root
import FWCore.ParameterSet.Config as cms
import files50
from files50 import *
from Configuration.StandardSequences.Eras import eras
process = cms.Process('NANO',eras.Run2_2018,eras.run2_nanoAOD_102Xv1)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('SimGeneral.MixingModule.mixNoPU_cfi')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_cff')
process.load('PhysicsTools.NanoAOD.nano_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
# Input source
process.source = cms.Source("PoolSource",
# fileNames = cms.untracked.vstring('file:/cms/xaastorage/MINIAOD/2016/GJets/HT_100to200/GJet_100to200_1.root '),
fileNames = cms.untracked.vstring(A),
secondaryFileNames = cms.untracked.vstring()
)
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('nanoAOD_jetToolbox_cff nevts:1'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.NANOAODSIMoutput = cms.OutputModule("NanoAODOutputModule",
compressionAlgorithm = cms.untracked.string('LZMA'),
compressionLevel = cms.untracked.int32(9),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('NANOAODSIM'),
filterName = cms.untracked.string('')
),
fileName = cms.untracked.string('file:jetToolbox_nano_mc_2018GJetsHT600toInf_50.root'),
outputCommands = process.NANOAODSIMEventContent.outputCommands
)
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, '102X_upgrade2018_realistic_v19', '')
# Path and EndPath definitions
process.nanoAOD_step = cms.Path(process.nanoSequenceMC)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.NANOAODSIMoutput_step = cms.EndPath(process.NANOAODSIMoutput)
# Schedule definition
process.schedule = cms.Schedule(process.nanoAOD_step,process.endjob_step,process.NANOAODSIMoutput_step)
from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask
associatePatAlgosToolsTask(process)
# customisation of the process.
# Automatic addition of the customisation function from PhysicsTools.NanoAOD.nano_cff
from PhysicsTools.NanoAOD.nano_cff import nanoAOD_customizeMC
#call to customisation function nanoAOD_customizeMC imported from PhysicsTools.NanoAOD.nano_cff
process = nanoAOD_customizeMC(process)
# Automatic addition of the customisation function from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff import nanoJTB_customizeMC
#call to customisation function nanoJTB_customizeMC imported from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
process = nanoJTB_customizeMC(process)
# End of customisation functions
# Customisation from command line
process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False)))
# Add early deletion of temporary data products to reduce peak memory need
from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete
process = customiseEarlyDelete(process)
# End adding early deletion
|
[
"rsk146@scarletmail.rutgers.edu"
] |
rsk146@scarletmail.rutgers.edu
|
fc521136d37bde83bf4b77d4966c06e6653d750b
|
3f6c16ea158a8fb4318b8f069156f1c8d5cff576
|
/.PyCharm2019.1/system/python_stubs/-1317042838/pandas/_libs/ops.py
|
a84d13f1d42b972f9af8b614b27048d38673cdb9
|
[] |
no_license
|
sarthak-patidar/dotfiles
|
08494170d2c0fedc0bbe719cc7c60263ce6fd095
|
b62cd46f3491fd3f50c704f0255730af682d1f80
|
refs/heads/master
| 2020-06-28T23:42:17.236273
| 2019-10-01T13:56:27
| 2019-10-01T13:56:27
| 200,369,900
| 0
| 0
| null | 2019-08-03T12:56:33
| 2019-08-03T11:53:29
|
Shell
|
UTF-8
|
Python
| false
| false
| 2,829
|
py
|
# encoding: utf-8
# module pandas._libs.ops
# from /var/www/newsbytes/CPP/venv/lib/python3.6/site-packages/pandas/_libs/ops.cpython-36m-x86_64-linux-gnu.so
# by generator 1.147
# no doc
# imports
import builtins as __builtins__ # <module 'builtins' (built-in)>
import operator as operator # /usr/lib/python3.6/operator.py
import numpy as np # /var/www/newsbytes/CPP/venv/lib/python3.6/site-packages/numpy/__init__.py
# functions
def maybe_convert_bool(*args, **kwargs): # real signature unknown
pass
def scalar_binop(*args, **kwargs): # real signature unknown
"""
Apply the given binary operator `op` between each element of the array
`values` and the scalar `val`.
Parameters
----------
values : ndarray[object]
val : object
op : binary operator
Returns
-------
result : ndarray[object]
"""
pass
def scalar_compare(*args, **kwargs): # real signature unknown
"""
Compare each element of `values` array with the scalar `val`, with
the comparison operation described by `op`.
Parameters
----------
values : ndarray[object]
val : object
op : {operator.eq, operator.ne,
operator.le, operator.lt,
operator.ge, operator.gt}
Returns
-------
result : ndarray[bool]
"""
pass
def vec_binop(*args, **kwargs): # real signature unknown
"""
Apply the given binary operator `op` pointwise to the elements of
arrays `left` and `right`.
Parameters
----------
left : ndarray[object]
right : ndarray[object]
op : binary operator
Returns
-------
result : ndarray[object]
"""
pass
def vec_compare(*args, **kwargs): # real signature unknown
"""
Compare the elements of `left` with the elements of `right` pointwise,
with the comparison operation described by `op`.
Parameters
----------
left : ndarray[object]
right : ndarray[object]
op : {operator.eq, operator.ne,
operator.le, operator.lt,
operator.ge, operator.gt}
Returns
-------
result : ndarray[bool]
"""
pass
def __pyx_unpickle_Enum(*args, **kwargs): # real signature unknown
pass
# no classes
# variables with complex values
__loader__ = None # (!) real value is '<_frozen_importlib_external.ExtensionFileLoader object at 0x7f43e7e6f8d0>'
__spec__ = None # (!) real value is "ModuleSpec(name='pandas._libs.ops', loader=<_frozen_importlib_external.ExtensionFileLoader object at 0x7f43e7e6f8d0>, origin='/var/www/newsbytes/CPP/venv/lib/python3.6/site-packages/pandas/_libs/ops.cpython-36m-x86_64-linux-gnu.so')"
__test__ = {}
|
[
"sarthakpatidar15@gmail.com"
] |
sarthakpatidar15@gmail.com
|
ce3333447ac28a3d89c0757d6ada515e638e5bd2
|
8410bb5a2e8849bb3a554b95ddc713d88f3440c4
|
/aws-dev/awsdev9/venv/Lib/site-packages/dns/rdtypes/ANY/SOA.py
|
aec81cad8ac916e9bc71052ecbc4983cdabbd126
|
[
"MIT"
] |
permissive
|
PacktPublishing/-AWS-Certified-Developer---Associate-Certification
|
ae99b6c1efb30e8fab5b76e3d8c821823a4cd852
|
b9838b4e038b42ad1813a296379cbbc40cab6286
|
refs/heads/master
| 2022-11-03T04:37:49.014335
| 2022-10-31T05:42:19
| 2022-10-31T05:42:19
| 219,964,717
| 13
| 11
|
MIT
| 2021-06-02T00:57:45
| 2019-11-06T09:54:09
|
Python
|
UTF-8
|
Python
| false
| false
| 4,597
|
py
|
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import struct
import dns.exception
import dns.rdata
import dns.name
class SOA(dns.rdata.Rdata):
"""SOA record
@ivar mname: the SOA MNAME (master name) field
@type mname: dns.name.Name object
@ivar rname: the SOA RNAME (responsible name) field
@type rname: dns.name.Name object
@ivar serial: The zone's serial number
@type serial: int
@ivar refresh: The zone's refresh value (in seconds)
@type refresh: int
@ivar retry: The zone's retry value (in seconds)
@type retry: int
@ivar expire: The zone's expiration value (in seconds)
@type expire: int
@ivar minimum: The zone's negative caching time (in seconds, called
"minimum" for historical reasons)
@type minimum: int
@see: RFC 1035"""
__slots__ = ['mname', 'rname', 'serial', 'refresh', 'retry', 'expire',
'minimum']
def __init__(self, rdclass, rdtype, mname, rname, serial, refresh, retry,
expire, minimum):
super(SOA, self).__init__(rdclass, rdtype)
self.mname = mname
self.rname = rname
self.serial = serial
self.refresh = refresh
self.retry = retry
self.expire = expire
self.minimum = minimum
def to_text(self, origin=None, relativize=True, **kw):
mname = self.mname.choose_relativity(origin, relativize)
rname = self.rname.choose_relativity(origin, relativize)
return '%s %s %d %d %d %d %d' % (
mname, rname, self.serial, self.refresh, self.retry,
self.expire, self.minimum)
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
mname = tok.get_name()
rname = tok.get_name()
mname = mname.choose_relativity(origin, relativize)
rname = rname.choose_relativity(origin, relativize)
serial = tok.get_uint32()
refresh = tok.get_ttl()
retry = tok.get_ttl()
expire = tok.get_ttl()
minimum = tok.get_ttl()
tok.get_eol()
return cls(rdclass, rdtype, mname, rname, serial, refresh, retry,
expire, minimum)
def to_wire(self, file, compress=None, origin=None):
self.mname.to_wire(file, compress, origin)
self.rname.to_wire(file, compress, origin)
five_ints = struct.pack('!IIIII', self.serial, self.refresh,
self.retry, self.expire, self.minimum)
file.write(five_ints)
def to_digestable(self, origin=None):
return self.mname.to_digestable(origin) + \
self.rname.to_digestable(origin) + \
struct.pack('!IIIII', self.serial, self.refresh,
self.retry, self.expire, self.minimum)
@classmethod
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
(mname, cused) = dns.name.from_wire(wire[: current + rdlen], current)
current += cused
rdlen -= cused
(rname, cused) = dns.name.from_wire(wire[: current + rdlen], current)
current += cused
rdlen -= cused
if rdlen != 20:
raise dns.exception.FormError
five_ints = struct.unpack('!IIIII',
wire[current: current + rdlen])
if origin is not None:
mname = mname.relativize(origin)
rname = rname.relativize(origin)
return cls(rdclass, rdtype, mname, rname,
five_ints[0], five_ints[1], five_ints[2], five_ints[3],
five_ints[4])
def choose_relativity(self, origin=None, relativize=True):
self.mname = self.mname.choose_relativity(origin, relativize)
self.rname = self.rname.choose_relativity(origin, relativize)
|
[
"sonalis@packtpub.com"
] |
sonalis@packtpub.com
|
d8adcfa0328f753994b60200ace6ca4d145e0f23
|
3d5bcd57b893c95bbcbfafe77bbc33c65432c9ed
|
/Algorithms/LeetCode/L0079exist.py
|
c6486ef5bca5b6781c64631e90da4eed40b18976
|
[] |
no_license
|
arunachalamev/PythonProgramming
|
c160f34c7cb90e82cd0d4762ff9dcb4abadf9c1c
|
ea188aaa1b72511aeb769a2829055d0aae55e73e
|
refs/heads/master
| 2021-06-04T03:50:37.976293
| 2020-11-12T19:52:28
| 2020-11-12T19:52:28
| 97,364,002
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 702
|
py
|
def exist(board, word):
m,n = len(board), len(board[0])
def search(i,j,word):
nonlocal m,n
if len(word) == 0:
return True
if i<0 or i==m or j <0 or j==n or board[i][j] !=word[0]:
return False
board[i][j] = '#'
for di,dj in [(0,1),(0,-1),(1,0),(-1,0)]:
if search (i+di, j+dj , word[1:]):
return True
board[i][j] = word[0]
return False
for i,row in enumerate(board):
for j,_ in enumerate(row):
if search(i,j,word):
return True
return False
print (exist([
['A','B','C','E'],
['S','F','C','S'],
['A','D','E','E']
], 'ABCCEDX'))
|
[
"arunachalamev@gmail.com"
] |
arunachalamev@gmail.com
|
0bf7dd56ef9f8d3dc81a166b9e292152ff8911ac
|
2aba3c043ce4ef934adce0f65bd589268ec443c5
|
/AOJ/courses/ITP1/3_C.py
|
221621fc02dd16be341b7f831191bed733e02394
|
[] |
no_license
|
kambehmw/algorithm_python
|
4f66593b77039d90515d1fcbecacdab8c811b92f
|
17222399dcc92fd8f908e5774a9883e2e89c486e
|
refs/heads/master
| 2020-06-02T12:44:11.322356
| 2020-05-18T13:22:05
| 2020-05-18T13:22:05
| 191,157,113
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 179
|
py
|
while True:
x, y = map(int, input().split())
if x == 0 and y == 0:
exit()
else:
if x < y:
print(x, y)
else:
print(y, x)
|
[
"kanbe.hmw@gmail.com"
] |
kanbe.hmw@gmail.com
|
8af8b1154126237b12e676c20db0981a5f9e3d8e
|
8a14a7724d00f1eb7791e53f8446e99ecc975605
|
/scripts/extract_features.py
|
95649f83351e38ae3501cff705bf80339edd1315
|
[
"Apache-2.0"
] |
permissive
|
aschn/picolo
|
3fa7b26d079fc9687de9c3e1e34cae774bcf8416
|
1f8f50e0709fdaef31bc38045ef9fd0c46aae2b5
|
refs/heads/master
| 2020-04-30T01:37:36.587287
| 2013-07-19T00:32:05
| 2013-07-19T00:32:05
| 9,307,233
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,229
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author Anna Schneider
@version 0.1
@brief Typical script using picolo to extract features from point particles
"""
import picolo
from shapes import shape_factory_from_values
import argparse
import os.path as path
import time
import csv
# start timer
start = time.time()
# parse command-line arguments
brief = 'Typical script using picolo to extract features from point particles.'
parser = argparse.ArgumentParser(description=brief)
parser.add_argument('filename', type=str, help='path to xy coord file')
parser.add_argument('shape', type=str, help='type of features to extract',
choices=['UnitCell', 'Fourier', 'Zernike'])
parser.add_argument('dist', type=float, help='distance cutoff to neighbors')
parser.add_argument('--train', action='store_true',
help='include flag to only get features for prespecified training rows')
args = parser.parse_args()
# set up file paths
rootname, ext = path.splitext(args.filename)
dirname = path.dirname(args.filename)
# set up matcher
matcher = picolo.Matcher(args.filename, delim=' ', name=rootname,
trainingcol=2)
# create and add default shape of correct type
shape = shape_factory_from_values(args.shape,
optdata={'neighbor_dist': args.dist,
'max_dist': args.dist})
matcher.shapes.add('test', shape)
# get ndarray of features and particle ids by comparing to 'test' shape
features = matcher.feature_matrix('test')
# open csv writer
outfile = '%s_%s_features.dat' % (rootname, args.shape)
writer = csv.writer(open(outfile, 'w'), delimiter=' ')
# write header
writer.writerow(['id'] + shape.get_components())
# loop over particle ids
if args.train:
inds = matcher.training_ids
else:
inds = range(matcher.config.N)
for ip in inds:
# only write features for particles with valid shapes
if matcher.get_features('test', ip).get('is_valid'):
# write row of features
writer.writerow([ip] + ['%0.4f' % x for x in features[ip]])
# end timer
end = time.time()
print 'Done with %s ... took %d seconds.' % (rootname, end-start)
|
[
"annarschneider@gmail.com"
] |
annarschneider@gmail.com
|
1eadf13b44ed3ecced195ac1f6974c5866be1f8b
|
37efda4646f478b66674e384e1bc139e7874d972
|
/practice/RaodtoMillionaire.py
|
7677b54444573abaec9ffa4c8c2fa22f69a24b2b
|
[] |
no_license
|
siberian122/kyoupuro
|
02c1c40f7c09ff0c07a1d50b727f860ad269d8b1
|
8bf5e5b354d82f44f54c80f1fc014c9519de3ca4
|
refs/heads/master
| 2023-04-04T02:45:29.445107
| 2021-04-20T07:37:47
| 2021-04-20T07:37:47
| 299,248,378
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 380
|
py
|
n = int(input())
a = list(map(int, input().split()))
b = []
for i in range(n-1):
num = a[i]-a[i+1]
b.append(num)
now = 1000
stock = 0
for i in range(n-1):
if b[i] > 0: # 売る
now += stock*a[i]
stock = 0
elif now > 0 and b[i] < 0: # 買う
stock += now//a[i]
now = now % a[i]
#print(now, stock)
now += a[-1]*stock
print(now)
|
[
"siberian1000@gmail.com"
] |
siberian1000@gmail.com
|
c96667e76a4d649fc180fffd2ee6abb688e027cb
|
d4fdbd68c42d6b9babe347cb3b65535e4d782172
|
/tensorflow_datasets/image/voc_test.py
|
1bbb9140e84808b1f66441b6ba103c2e8483ec03
|
[
"Apache-2.0"
] |
permissive
|
thanhkaist/datasets
|
2809260c5e95e96d136059bea042d1ed969a6fcf
|
02da35c558ec8ea704e744a2008c5cecb2e7a0a1
|
refs/heads/master
| 2020-06-04T16:13:14.603449
| 2019-06-14T22:01:33
| 2019-06-14T22:02:54
| 192,097,735
| 2
| 0
|
Apache-2.0
| 2019-06-15T16:02:18
| 2019-06-15T16:02:18
| null |
UTF-8
|
Python
| false
| false
| 1,060
|
py
|
# coding=utf-8
# Copyright 2019 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for PASCAL VOC image data loading."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_datasets import testing
from tensorflow_datasets.image import voc
class Voc2007Test(testing.DatasetBuilderTestCase):
DATASET_CLASS = voc.Voc2007
SPLITS = {
'train': 1,
'validation': 2,
'test': 3,
}
if __name__ == '__main__':
testing.test_main()
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
2fc48de98fbc2450366953e3be1285d20c36401a
|
ac8ffabf4d7339c5466e53dafc3f7e87697f08eb
|
/python_solutions/1080.insufficient-nodes-in-root-to-leaf-paths.py
|
4ba1ede95bb6688d9b4c3e860ddfe8e1d3dd646d
|
[] |
no_license
|
h4hany/leetcode
|
4cbf23ea7c5b5ecfd26aef61bfc109741f881591
|
9e4f6f1a2830bd9aab1bba374c98f0464825d435
|
refs/heads/master
| 2023-01-09T17:39:06.212421
| 2020-11-12T07:26:39
| 2020-11-12T07:26:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,173
|
py
|
from collections import Counter, defaultdict, OrderedDict, deque
from bisect import bisect_left, bisect_right
from functools import reduce, lru_cache
from typing import List
import itertools
import math
import heapq
import string
true = True
false = False
MIN, MAX, MOD = -0x3f3f3f3f, 0x3f3f3f3f, 1000000007
#
# @lc app=leetcode id=1080 lang=python3
#
# [1080] Insufficient Nodes in Root to Leaf Paths
#
# https://leetcode.com/problems/insufficient-nodes-in-root-to-leaf-paths/description/
#
# algorithms
# Medium (49.43%)
# Total Accepted: 14.4K
# Total Submissions: 29K
# Testcase Example: '[1,2,3,4,-99,-99,7,8,9,-99,-99,12,13,-99,14]\n1'
#
# Given the root of a binary tree, consider all root to leaf paths: paths from
# the root to any leaf. (A leaf is a node with no children.)
#
# A node is insufficient if every such root to leaf path intersecting this node
# has sum strictly less than limit.
#
# Delete all insufficient nodes simultaneously, and return the root of the
# resulting binary tree.
#
#
#
# Example 1:
#
#
#
# Input: root = [1,2,3,4,-99,-99,7,8,9,-99,-99,12,13,-99,14], limit = 1
#
# Output: [1,2,3,4,null,null,7,8,9,null,14]
#
#
#
# Example 2:
#
#
#
# Input: root = [5,4,8,11,null,17,4,7,1,null,null,5,3], limit = 22
#
# Output: [5,4,8,11,null,17,4,7,null,null,null,5]
#
#
#
# Example 3:
#
#
#
# Input: root = [1,2,-3,-5,null,4,null], limit = -1
#
# Output: [1,null,-3,4]
#
#
#
#
# Note:
#
#
# The given tree will have between 1 and 5000 nodes.
# -10^5 <= node.val <= 10^5
# -10^9 <= limit <= 10^9
#
#
#
#
#
#
#
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def sufficientSubset(self, root: TreeNode, limit: int) -> TreeNode:
if not root: return None
if not root.left and not root.right:
return root if root.val >= limit else None
root.left = self.sufficientSubset(root.left, limit - root.val)
root.right = self.sufficientSubset(root.right, limit - root.val)
return None if not root.left and not root.right else root
|
[
"ssruoz@gmail.com"
] |
ssruoz@gmail.com
|
75a63b080058ba26e1aa2ae9b422c95c519a403c
|
3e93c3bbe35c24bf7f1a75c612ab300f37063621
|
/C1/L1_18_mappingnameseq_namedtuple.py
|
f393d21fd2cf887c699056da4973e6a7725476db
|
[] |
no_license
|
rengokantai/orpycok3ed
|
5ac0195a48f02dcc5bbc720e812f637464215e8f
|
50ce744265dc6af0d1a4724ea52348faeb47764d
|
refs/heads/master
| 2021-01-10T05:05:53.477092
| 2016-03-12T20:04:45
| 2016-03-12T20:04:45
| 53,352,163
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 693
|
py
|
__author__ = 'Hernan Y.Ke'
from collections import namedtuple
# memorize this syntax
Me = namedtuple('Me',['first','last'])
me = Me(1,2)
print(me.first,me.last)
she=[Me(3,4),Me(5,6)]
#me = Me(first=1,last=2) # illegal!
me = me._replace(first=3)
print(me.first)
# get namedtuple
def get_num(tuplearr):
res=0
for param in tuplearr:
s = Me(*param) # iterate a array with namedtuple instance. param->All params of a instance
res+=s.first+s.last
return res
print(get_num(she))
#replace all params
def replace_params(tupleparams):
return me._replace(**tupleparams) # two stars. kwargs
newparams={'first':7,'last':8}
print(replace_params(newparams))
|
[
"yuriqiao@gmail.com"
] |
yuriqiao@gmail.com
|
ca2e60ef61a63bcc4473f3bb4ca159430fb5c13a
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/intentions/PyAnnotateTypesIntentionTest/methodAfterConstructorCall.py
|
0cdc87e27827504a3baf5a3c8d4524a6604e3e8c
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 133
|
py
|
class MyClass:
def __init__(self):
pass
def method(self, x):
pass
x = MyClass()
foo = x.met<caret>hod(42)
|
[
"mikhail.golubev@jetbrains.com"
] |
mikhail.golubev@jetbrains.com
|
55c2841b5ae6ddfc0e8c0cb6f34e33306f5fca3a
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/data/cirq_new/cirq_program/startCirq_pragma58.py
|
8e84b65dd9e10c0774f2965011964ccb0cbd933f
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,364
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=11
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
class Opty(cirq.PointOptimizer):
def optimization_at(
self,
circuit: 'cirq.Circuit',
index: int,
op: 'cirq.Operation'
) -> Optional[cirq.PointOptimizationSummary]:
if (isinstance(op, cirq.ops.GateOperation) and isinstance(op.gate, cirq.CZPowGate)):
return cirq.PointOptimizationSummary(
clear_span=1,
clear_qubits=op.qubits,
new_operations=[
cirq.CZ(*op.qubits),
cirq.X.on_each(*op.qubits),
cirq.X.on_each(*op.qubits),
]
)
#thatsNoCode
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.rx(1.6147786239451536).on(input_qubit[3])) # number=5
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.H.on(input_qubit[0])) # number=8
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=9
c.append(cirq.H.on(input_qubit[0])) # number=10
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=7
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq_pragma58.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
8dd7a8369a2f7b352443bc0d36d23dd32bcc554e
|
bf576b059cbecb0cbb8a6c885dcfded5bd685399
|
/4.Python course/3.Expand course/1.Small windmill/Small windmill.py
|
18c56da878b1cb6a7ef0d38234ce809b1bea040f
|
[] |
no_license
|
YahboomTechnology/Superbit-expansion-board
|
0d3c2fd06c5df9280d230af429931af2c48dc6d5
|
4df7e03426d486d2b2f8f649359eee2d62851083
|
refs/heads/master
| 2023-04-07T03:16:15.786669
| 2023-03-29T01:12:57
| 2023-03-29T01:12:57
| 206,778,307
| 13
| 8
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 426
|
py
|
from microbit import *
import superbit
a = 135
display.show(Image.HEART)
superbit.servo270(superbit.S1, 135)
superbit.motor_control(superbit.M1, 255, 0)
while True:
if button_a.is_pressed():
a = a - 1
if a < 0:
a = 0
superbit.servo270(superbit.S1, a)
elif button_b.is_pressed():
a = a + 1
if a > 270:
a = 270
superbit.servo270(superbit.S1, a)
|
[
"2448532184@qq.com"
] |
2448532184@qq.com
|
0ab091f1bac3f6b3782abb3cf2f34ba686b858fc
|
6dcd5f4bb4c39e2d887e5d557e188ba4c8a75081
|
/src/UsersDB.py
|
3d3f2264fceef218c5169ec87a6f6ca4b65d695f
|
[] |
no_license
|
Pella86/HappyRateBot
|
815653033593aedc22c779025d00bddec4614f46
|
f23f786a3c9dc19f2378958470d82974d018bd64
|
refs/heads/master
| 2020-03-22T00:16:38.670215
| 2018-07-22T11:50:53
| 2018-07-22T11:50:53
| 139,234,809
| 1
| 1
| null | 2018-07-22T06:41:21
| 2018-06-30T09:01:21
|
Python
|
UTF-8
|
Python
| false
| false
| 4,288
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 20 12:10:14 2018
@author: Mauro
"""
#==============================================================================
# Imports
#==============================================================================
# py imports
import os
import hashlib
import string
# my imports
import Databases
import UserProfile
import random
import Logging
#==============================================================================
# logging
#==============================================================================
# create logger
log = Logging.get_logger(__name__, "WARNING")
#==============================================================================
# Helpers
#==============================================================================
def get_hash_id(personid):
pid = hashlib.sha256()
pid.update(bytes(personid))
return pid.digest()
#==============================================================================
# User database
#==============================================================================
class UsersDB:
def __init__(self):
self.folder = "./databases/user_db"
if not os.path.isdir(self.folder):
os.mkdir(self.folder)
self.database = Databases.Database(self.folder, "user_")
self.database.loadDB()
self.database.update_uid()
log.info("loaded users database")
folder = "./databases/banned_user_db"
if not os.path.isdir(folder):
os.mkdir(folder)
self.banned_database = Databases.Database(folder, "banned_user_")
def getUsersList(self):
return self.database.getValues()
def check_nickname(self, user, text):
error_message = None
alphanumeric = string.ascii_letters + string.digits
if len(text) < 3:
error_message = "too short"
elif len(text) >= 15:
error_message = "too long"
elif not all(c in alphanumeric for c in text):
error_message = "invalid character"
elif text in [u.display_id for u in self.database.getValues()]:
error_message = "already present"
if error_message is None:
user.display_id = text
self.database[user.hash_id].setData(user)
return True
else:
return error_message
def banUser(self, user):
duser = self.database[user.hash_id]
self.deleteUser(user)
def addUser(self, person, chatid):
# hash the id
hash_id = get_hash_id(person.id)
if self.database.isNew(hash_id):
log.info("added new user to database: {}".format(self.database.short_uid))
# create a unique display id
start_number = 0x10000000
stop_number = 0xFFFFFFFF
display_id = random.randint(start_number,stop_number)
log.debug("display id {}".format(display_id))
# check for uniqueness
display_id_list = [user.display_id for user in self.database.getValues()]
while display_id in display_id_list:
display_id = random.randint(start_number,stop_number)
log.debug("new display id {}".format(display_id))
# language
lang_tag = person.language_code if person.language_code else "en"
# user instance
user = UserProfile.UserProfile(hash_id, display_id, chatid, lang_tag)
data = Databases.Data(hash_id, user)
self.database.addData(data)
def deleteUser(self, user):
data = self.database[user.hash_id]
self.database.deleteItem(data)
def hGetUser(self, hash_id):
return self.database[hash_id].getData()
def getUser(self, person):
log.debug("User already in database, got user")
hash_id = get_hash_id(person.id)
return self.database[hash_id].getData()
def setUser(self, user):
self.database[user.hash_id].setData(user)
def update(self):
log.info("updating database...")
self.database.updateDB()
|
[
"pigmeo127@gmail.com"
] |
pigmeo127@gmail.com
|
9980f2825f02826d27018b266928c8e25ef4e7d6
|
978248bf0f275ae688f194593aa32c267832b2b6
|
/xlsxwriter/test/comparison/test_autofilter06.py
|
354f84b2a2f1d14959c2854587b7e266fc15c235
|
[
"BSD-2-Clause-Views"
] |
permissive
|
satish1337/XlsxWriter
|
b0c216b91be1b74d6cac017a152023aa1d581de2
|
0ab9bdded4f750246c41a439f6a6cecaf9179030
|
refs/heads/master
| 2021-01-22T02:35:13.158752
| 2015-03-31T20:32:28
| 2015-03-31T20:32:28
| 33,300,989
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,859
|
py
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'autofilter06.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.txt_filename = test_dir + 'xlsx_files/' + 'autofilter_data.txt'
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""
Test the creation of a simple XlsxWriter file with an autofilter.
This test corresponds to the following examples/autofilter.pl example:
Example 6. Autofilter with filter for non-blanks.
"""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
# Set the autofilter.
worksheet.autofilter('A1:D51')
# Add filter criteria.
worksheet.filter_column(0, 'x == NonBlanks')
# Open a text file with autofilter example data.
textfile = open(self.txt_filename)
# Read the headers from the first line of the input file.
headers = textfile.readline().strip("\n").split()
# Write out the headers.
worksheet.write_row('A1', headers)
# Start writing data after the headers.
row = 1
# Read the rest of the text file and write it to the worksheet.
for line in textfile:
# Split the input data based on whitespace.
data = line.strip("\n").split()
# Convert the number data from the text file.
for i, item in enumerate(data):
try:
data[i] = float(item)
except ValueError:
pass
# Simulate a blank cell in the data.
if row == 6:
data[0] = ''
# Get some of the field data.
region = data[0]
# Check for rows that match the filter.
if region != '':
# Row matches the filter, no further action required.
pass
else:
# We need to hide rows that don't match the filter.
worksheet.set_row(row, options={'hidden': True})
# Write out the row data.
worksheet.write_row(row, 0, data)
# Move on to the next worksheet row.
row += 1
textfile.close()
workbook.close()
self.assertExcelEqual()
|
[
"jmcnamara@cpan.org"
] |
jmcnamara@cpan.org
|
2f8c03f052351b799bfba46a92f2566cc993aedd
|
5181d3b3ef8fe301ea2d6b095260e9d327c2fd79
|
/scripts/dl/download_hrrr.py
|
dad9ed84e463252c8a1b7b4fff6d35e96c53d1d1
|
[] |
no_license
|
danhreitz/iem
|
88113ef9c9c4a2918c9c2abdfd0510d5ca4ec819
|
ed490dcd6c2a8359f88cb805ccee8f6707566f57
|
refs/heads/master
| 2021-01-18T15:27:28.607250
| 2015-08-10T21:33:54
| 2015-08-10T21:33:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,340
|
py
|
"""
Since the NOAAPort feed of HRRR data does not have radiation, we should
download this manually from NCEP
Run at 40 AFTER for the previous hour
"""
import urllib2
import sys
import datetime
import os
def fetch(valid):
""" Fetch the radiation data for this timestamp
80:54371554:d=2014101002:ULWRF:top of atmosphere:anl:
81:56146124:d=2014101002:DSWRF:surface:anl:
"""
uri = valid.strftime(("http://www.ftp.ncep.noaa.gov/data/nccf/"
"nonoperational/com/hrrr/prod/hrrr.%Y%m%d/hrrr.t%Hz."
"wrfprsf00.grib2.idx"))
data = urllib2.urlopen(uri, timeout=30)
offsets = []
neednext = False
for line in data:
tokens = line.split(":")
if neednext:
offsets[-1].append(int(tokens[1]))
neednext = False
if tokens[3] in ['ULWRF', 'DSWRF']:
offsets.append([int(tokens[1]), ])
neednext = True
# Save soil temp and water at surface, 10cm and 40cm
if tokens[3] in ['TSOIL', 'SOILW']:
if tokens[4] in ['0-0 m below ground',
'0.01-0.01 m below ground',
'0.04-0.04 m below ground']:
offsets.append([int(tokens[1]), ])
neednext = True
outfn = valid.strftime(("/mesonet/ARCHIVE/data/%Y/%m/%d/model/hrrr/"
"%H/hrrr.t%Hz.3kmf00.grib2"))
outdir = os.path.dirname(outfn)
if not os.path.isdir(outdir):
os.makedirs(outdir, mode=0775) # make sure LDM can then write to dir
output = open(outfn, 'ab', 0664)
req = urllib2.Request(uri[:-4])
if len(offsets) != 8:
print("download_hrrr_rad warning, found %s gribs for %s" % (
len(offsets), valid))
for pr in offsets:
req.headers['Range'] = 'bytes=%s-%s' % (pr[0], pr[1])
f = urllib2.urlopen(req, timeout=30)
output.write(f.read())
output.close()
def main():
""" Go Main Go"""
ts = datetime.datetime.utcnow() - datetime.timedelta(hours=1)
if len(sys.argv) == 5:
ts = datetime.datetime(int(sys.argv[1]), int(sys.argv[2]),
int(sys.argv[3]), int(sys.argv[4]))
fetch(ts)
if __name__ == '__main__':
os.umask(0002)
main()
|
[
"akrherz@iastate.edu"
] |
akrherz@iastate.edu
|
3b79ecee53bab652825699f9a829541d12808883
|
53d22468fb1c9e0f4b4710a31fb7ac638549b8a7
|
/src/episode_stats.py
|
8c746bbebadc8b8367d5a5f0ae15a6bda7162cea
|
[
"MIT"
] |
permissive
|
binderwang/drivebot
|
768bcfe224d94b931c45c41ced2a1b0067c6417d
|
a8fb86731c52b7594dd135e8759622c29172b557
|
refs/heads/master
| 2020-12-14T09:48:59.857490
| 2016-05-03T03:17:58
| 2016-05-03T03:17:58
| 58,269,730
| 1
| 0
| null | 2016-05-07T14:33:18
| 2016-05-07T14:33:17
| null |
UTF-8
|
Python
| false
| false
| 261
|
py
|
#!/usr/bin/env python
import json
import sys
episode_id = 0
for line in sys.stdin:
episode = json.loads(line)
rewards = [event['reward'] for event in episode]
print "\t".join(map(str, [episode_id, len(episode), sum(rewards)]))
episode_id += 1
|
[
"matthew.kelcey@gmail.com"
] |
matthew.kelcey@gmail.com
|
f6b0c0ebfcfea1688b03ec725be8faebb3cbbbee
|
2598f255696842f043372dd68fe4d5fd48d1a41c
|
/Ofelia/expedient/src/python/expedient/clearinghouse/users/views.py
|
5bd342d561ba8106b5c71655fbdfedc0cbb0a6c3
|
[
"BSD-3-Clause"
] |
permissive
|
zanetworker/C-BAS
|
8e5442df83626e95d9562497278869ee3c4fad51
|
695c6f72490a02bbb308d44526631dbf426ab900
|
refs/heads/master
| 2021-01-01T06:55:39.085086
| 2014-08-11T09:37:42
| 2014-08-11T09:37:42
| 22,351,372
| 1
| 0
| null | 2014-08-08T16:15:54
| 2014-07-28T17:28:44
|
Python
|
UTF-8
|
Python
| false
| false
| 7,668
|
py
|
'''
Created on Dec 3, 2009
@author: jnaous
'''
from django.shortcuts import get_object_or_404
from django.http import HttpResponseRedirect, HttpResponseNotAllowed
from django.core.urlresolvers import reverse
from expedient.clearinghouse import users
from django.views.generic import create_update, simple
from django.contrib import auth
from expedient.common.permissions.shortcuts import must_have_permission,\
give_permission_to
from registration import views as registration_views
from expedient.clearinghouse.users.forms import FullRegistrationForm
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.auth.views import password_reset
from expedient.clearinghouse.users.forms import LDAPPasswordResetForm
def home(request):
'''show list of users and form for adding users'''
must_have_permission(request.user, User, "can_manage_users")
user_list = auth.models.User.objects.all().order_by('username')
if request.method == "GET":
pwd_form = auth.forms.UserCreationForm()
user_form = users.forms.UserForm()
userprofile_form = users.forms.UserProfileForm()
elif request.method == "POST":
pwd_form = auth.forms.UserCreationForm(request.POST)
user_form = users.forms.UserForm(request.POST)
userprofile_form = users.forms.UserProfileForm(request.POST)
# check that all data is valid
if pwd_form.is_valid() and user_form.is_valid() and userprofile_form.is_valid():
# create the user first
user = pwd_form.save()
# use the user to save the user info
user_form = users.forms.UserForm(request.POST, instance=user)
user = user_form.save()
# now store the user profile
up = users.models.UserProfile(user=user)
userprofile_form = users.forms.UserProfileForm(request.POST, instance=up)
userprofile_form.save()
return HttpResponseRedirect(reverse("users_saved", args=(user.id,)))
else:
return HttpResponseNotAllowed("GET", "POST")
return simple.direct_to_template(
request,
template='expedient/clearinghouse/users/home.html',
extra_context={
'user_list': user_list,
'pwd_form': pwd_form,
'user_form': user_form,
'userprofile_form': userprofile_form,
'breadcrumbs': (
("Home", reverse("home")),
("Manage users", request.path),
)
},
)
def detail(request, user_id=None):
if user_id == None:
user = request.user
else:
user = get_object_or_404(auth.models.User, pk=user_id)
must_have_permission(request.user, user, "can_edit_user")
profile = users.models.UserProfile.get_or_create_profile(user)
if request.method == "GET":
if user_id == None:
pwd_form = users.forms.PasswordChangeFormDisabled(user)
else:
pwd_form = users.forms.AdminPasswordChangeFormDisabled(user)
user_form = users.forms.UserForm(instance=user)
userprofile_form = users.forms.UserProfileForm(instance=profile)
elif request.method == "POST":
if request.POST.get("change_pwd", False):
data = request.POST
else:
data = None
if user_id == None:
pwd_form = users.forms.PasswordChangeFormDisabled(user, data)
else:
pwd_form = users.forms.AdminPasswordChangeFormDisabled(user, data)
user_form = users.forms.UserForm(request.POST, instance=user)
userprofile_form = users.forms.UserProfileForm(request.POST, instance=profile)
if user_form.is_valid() and userprofile_form.is_valid():
user = user_form.save()
userprofile_form = users.forms.UserProfileForm(request.POST, instance=profile)
userprofile_form.save()
if request.POST.get("change_pwd", False) and pwd_form.is_valid():
pwd_form.save()
return HttpResponseRedirect(reverse("users_saved", args=(user.id,)))
elif "change_pwd" not in request.POST:
return HttpResponseRedirect(reverse("users_saved", args=(user.id,)))
else:
return HttpResponseNotAllowed("GET", "POST")
try:
slice_set = user.slice_set.all()
except AttributeError:
slice_set = ()
return simple.direct_to_template(
request,
template='expedient/clearinghouse/users/detail.html',
extra_context={
'curr_user': user,
'slices': slice_set,
'pwd_form': pwd_form,
'user_form': user_form,
'show_owner': True,
'userprofile_form': userprofile_form,
'breadcrumbs': (
("Home", reverse("home")),
("Account for %s" % user.username, reverse("users_detail", args=[user.id])),
)
},
)
def saved(request, user_id):
user = get_object_or_404(auth.models.User, pk=user_id)
print user.id
return simple.direct_to_template(
request,
template='expedient/clearinghouse/users/saved.html',
extra_context={
'curr_user': user,
},
)
def delete(request, user_id):
user = get_object_or_404(auth.models.User, pk=user_id)
must_have_permission(request.user, user, "can_edit_user")
return create_update.delete_object(
request,
auth.models.User,
reverse("users_home"),
user_id,
template_name="expedient/clearinghouse/users/confirm_delete.html",
)
def register(request):
try:
return registration_views.register(
request,
form_class=FullRegistrationForm)
except Exception as e:
print "[ERROR] Exception at 'expedient.clearinghouse.users.views': user '%s' (%s) could not fully register. RegistrationForm module returned: %s" % (request.POST['username'], request.POST['email'], str(e))
return simple.direct_to_template(
request,
template='registration/registration_incomplete.html',
extra_context={
'exception': e,
'root_email': settings.ROOT_EMAIL,
'failed_username': request.POST['username'],
'failed_email': request.POST['email'],
},
)
def activate(request, activation_key):
template_name = 'registration/activate.html'
activation_key = activation_key.lower() # Normalize before trying anything with it.
# Import only here to avoid every time warning 'DeprecationWarning:
# the sha module is deprecated; use the hashlib module instead'
from registration.models import RegistrationProfile
account = RegistrationProfile.objects.activate_user(activation_key)
if account:
give_permission_to(
"can_edit_user", account, account, can_delegate=True)
return simple.direct_to_template(
request,
template=template_name,
extra_context={
'account': account,
'expiration_days': settings.ACCOUNT_ACTIVATION_DAYS,
},
)
def my_password_reset(request):
if request.method == 'GET':
return password_reset(request)
else:
email = request.POST['email']
users = User.objects.filter(email = email)
if len(users) == 1 and users[0].password == '!':
return HttpResponseRedirect(settings.OFREG_URL+settings.OFREG_RESET_PATH)
else:
return password_reset(request, password_reset_form=LDAPPasswordResetForm)
|
[
"umar.toseef@eict.de"
] |
umar.toseef@eict.de
|
8d637f9712aa8cd0fa725ea3c7b3285cb522f1da
|
be5a758c99f05c8ae8c224bf43335154114ee5f6
|
/kombu/compat.py
|
224f2e33e5d44865c3202047427a7e1c535ba30d
|
[
"BSD-3-Clause"
] |
permissive
|
bradjasper/kombu
|
160ed1b5651f91a87752df40791d01c91ca1fe16
|
4c9ac1436eb0468508f8b2cf1bda997535e1326d
|
refs/heads/master
| 2021-01-16T00:23:17.928400
| 2010-07-28T17:25:32
| 2010-07-28T17:25:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,431
|
py
|
from itertools import count
from kombu import entity
from kombu import messaging
def iterconsume(connection, consumer, no_ack=False, limit=None):
consumer.consume(no_ack=no_ack)
for iteration in count(0):
if limit and iteration >= limit:
raise StopIteration
yield connection.drain_events()
def entry_to_binding(queue, **options):
binding_key = options.get("binding_key") or options.get("routing_key")
e_durable = options.get("exchange_durable") or options.get("durable")
e_auto_delete = options.get("exchange_auto_delete") or \
options.get("auto_delete")
q_durable = options.get("queue_durable") or options.get("durable")
q_auto_delete = options.get("queue_auto_delete") or \
options.get("auto_delete")
e_arguments = options.get("exchange_arguments")
q_arguments = options.get("queue_arguments")
b_arguments = options.get("binding_arguments")
exchange = entity.Exchange(options.get("exchange"),
type=options.get("exchange_type"),
delivery_mode=options.get("delivery_mode"),
routing_key=options.get("routing_key"),
durable=e_durable,
auto_delete=e_auto_delete,
arguments=e_arguments)
return entity.Binding(queue,
exchange=exchange,
routing_key=binding_key,
durable=q_durable,
exclusive=options.get("exclusive"),
auto_delete=q_auto_delete,
queue_arguments=q_arguments,
binding_arguments=b_arguments)
class Publisher(messaging.Producer):
exchange = ""
exchange_type = "direct"
routing_key = ""
durable = True
auto_delete = False
_closed = False
def __init__(self, connection, exchange=None, routing_key=None,
exchange_type=None, durable=None, auto_delete=None, **kwargs):
self.connection = connection
self.backend = connection.channel()
self.exchange = exchange or self.exchange
self.exchange_type = exchange_type or self.exchange_type
self.routing_key = routing_key or self.routing_key
if auto_delete is not None:
self.auto_delete = auto_delete
if durable is not None:
self.durable = durable
if not isinstance(self.exchange, entity.Exchange):
self.exchange = entity.Exchange(name=self.exchange,
type=self.exchange_type,
routing_key=self.routing_key,
auto_delete=self.auto_delete,
durable=self.durable)
super(Publisher, self).__init__(self.backend, self.exchange,
**kwargs)
def send(self, *args, **kwargs):
return self.publish(*args, **kwargs)
def close(self):
self.backend.close()
self._closed = True
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
class Consumer(messaging.Consumer):
queue = ""
exchange = ""
routing_key = ""
exchange_type = "direct"
durable = True
exclusive = False
auto_delete = False
exchange_type = "direct"
_closed = False
def __init__(self, connection, queue=None, exchange=None,
routing_key=None, exchange_type=None, durable=None,
exclusive=None, auto_delete=None, **kwargs):
self.connection = connection
self.backend = connection.channel()
if durable is not None:
self.durable = durable
if exclusive is not None:
self.exclusive = exclusive
if auto_delete is not None:
self.auto_delete = auto_delete
self.queue = queue or self.queue
self.exchange = exchange or self.exchange
self.exchange_type = exchange_type or self.exchange_type
self.routing_key = routing_key or self.routing_key
exchange = entity.Exchange(self.exchange,
type=self.exchange_type,
routing_key=self.routing_key,
auto_delete=self.auto_delete,
durable=self.durable)
binding = entity.Binding(self.queue,
exchange=exchange,
routing_key=self.routing_key,
durable=self.durable,
exclusive=self.exclusive,
auto_delete=self.auto_delete)
super(Consumer, self).__init__(self.backend, binding, **kwargs)
def close(self):
self.cancel()
self.backend.close()
self._closed = True
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
def __iter__(self):
return self.iterqueue(infinite=True)
def fetch(self, no_ack=None, enable_callbacks=False):
if no_ack is None:
no_ack = self.no_ack
message = self.bindings[0].get(no_ack)
if message:
if enable_callbacks:
self.receive(message.payload, message)
return message
def process_next(self):
raise NotImplementedError("Use fetch(enable_callbacks=True)")
def discard_all(self, filterfunc=None):
if filterfunc is not None:
raise NotImplementedError(
"discard_all does not implement filters")
return self.purge()
def iterconsume(self, limit=None, no_ack=None):
return iterconsume(self.connection, self, no_ack, limit)
def wait(self, limit=None):
it = self.iterconsume(limit)
return list(it)
def iterqueue(self, limit=None, infinite=False):
for items_since_start in count():
item = self.fetch()
if (not infinite and item is None) or \
(limit and items_since_start >= limit):
raise StopIteration
yield item
class _CSet(messaging.Consumer):
def __init__(self, connection, *args, **kwargs):
self.connection = connection
self.backend = connection.channel()
super(_CSet, self).__init__(self.backend, *args, **kwargs)
def iterconsume(self, limit=None, no_ack=False):
return iterconsume(self.connection, self, no_ack, limit)
def discard_all(self):
return self.purge()
def add_consumer_from_dict(self, queue, **options):
self.bindings.append(entry_to_binding(queue, **options))
def add_consumer(self, consumer):
self.bindings.extend(consumer.bindings)
def close(self):
self.cancel()
self.channel.close()
def ConsumerSet(connection, from_dict=None, consumers=None,
callbacks=None, **kwargs):
bindings = []
if consumers:
for consumer in consumers:
map(bindings.extend, consumer.bindings)
if from_dict:
for queue_name, queue_options in from_dict.items():
bindings.append(entry_to_binding(queue_name, **queue_options))
return _CSet(connection, bindings, **kwargs)
|
[
"askh@opera.com"
] |
askh@opera.com
|
c3af127904d957a29958033e8898da66cbee1238
|
70ed82598c7ae19dc3de4a3a8400e9767b8a74b0
|
/Net/BaseNet/ResNet/fine_tuning_2.py
|
a5c28f115867e33b9eb23304dfaf71d8d7a0216b
|
[] |
no_license
|
UpCoder/MedicalImage
|
f255922b988392cd4c3a90715fb945ee20edb3b4
|
34c11562658e6f362ee7eb53740ba96209a22d45
|
refs/heads/master
| 2021-01-19T16:59:13.251726
| 2017-12-04T14:55:32
| 2017-12-04T14:55:32
| 101,031,357
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,832
|
py
|
# -*- coding: utf-8 -*-
# 使用patch训练好的模型,来对ROI进行微调
from resnet import inference_small, loss
import tensorflow as tf
from Config import Config as sub_Config
from Slice.MaxSlice.MaxSlice_Resize import MaxSlice_Resize
from tensorflow.examples.tutorials.mnist import input_data
from Tools import changed_shape, calculate_acc_error, acc_binary_acc
import numpy as np
from Patch.ValData import ValDataSet
from Patch.Config import Config as patch_config
def train(train_data_set, val_data_set, load_model_path, save_model_path):
x = tf.placeholder(
tf.float32,
shape=[
None,
sub_Config.IMAGE_W,
sub_Config.IMAGE_H,
sub_Config.IMAGE_CHANNEL
],
name='input_x'
)
y_ = tf.placeholder(
tf.float32,
shape=[
None,
]
)
tf.summary.histogram(
'label',
y_
)
global_step = tf.Variable(0, trainable=False)
# variable_average = tf.train.ExponentialMovingAverage(
# sub_Config.MOVING_AVERAGE_DECAY,
# global_step
# )
# vaeriable_average_op = variable_average.apply(tf.trainable_variables())
# regularizer = tf.contrib.layers.l2_regularizer(sub_Config.REGULARIZTION_RATE)
is_training = tf.placeholder('bool', [], name='is_training')
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('data_dir', '/tmp/cifar-data',
'where to store the dataset')
tf.app.flags.DEFINE_boolean('use_bn', True, 'use batch normalization. otherwise use biases')
y = inference_small(x, is_training=is_training,
num_classes=sub_Config.OUTPUT_NODE,
use_bias=FLAGS.use_bn,
num_blocks=3)
tf.summary.histogram(
'logits',
tf.argmax(y, 1)
)
loss_ = loss(
logits=y,
labels=tf.cast(y_, np.int32)
)
tf.summary.scalar(
'loss',
loss_
)
train_op = tf.train.GradientDescentOptimizer(
learning_rate=sub_Config.LEARNING_RATE
).minimize(
loss=loss_,
global_step=global_step
)
# with tf.control_dependencies([train_step, vaeriable_average_op]):
# train_op = tf.no_op(name='train')
with tf.variable_scope('accuracy'):
accuracy_tensor = tf.reduce_mean(
tf.cast(
tf.equal(x=tf.argmax(y, 1), y=tf.cast(y_, tf.int64)),
tf.float32
)
)
tf.summary.scalar(
'accuracy',
accuracy_tensor
)
saver = tf.train.Saver()
merge_op = tf.summary.merge_all()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
if load_model_path:
saver.restore(sess, load_model_path)
writer = tf.summary.FileWriter('./log/fine_tuning/train', tf.get_default_graph())
val_writer = tf.summary.FileWriter('./log/fine_tuning/val', tf.get_default_graph())
for i in range(sub_Config.ITERATOE_NUMBER):
images, labels = train_data_set.get_next_batch(sub_Config.BATCH_SIZE, sub_Config.BATCH_DISTRIBUTION)
images = changed_shape(images, [
len(images),
sub_Config.IMAGE_W,
sub_Config.IMAGE_W,
sub_Config.IMAGE_CHANNEL
])
_, loss_value, accuracy_value, summary, global_step_value = sess.run(
[train_op, loss_, accuracy_tensor, merge_op, global_step],
feed_dict={
x: images,
y_: labels
}
)
writer.add_summary(
summary=summary,
global_step=global_step_value
)
if i % 100 == 0 and i != 0 and save_model_path is not None:
# 保存模型 五分类每500步保存一下模型
import os
save_path = os.path.join(save_model_path, str(global_step_value))
if not os.path.exists(save_path):
os.mkdir(save_path)
save_path += '/model.ckpt'
print 'mode saved path is ', save_path
saver.save(sess, save_path)
if i % 100 == 0:
validation_images, validation_labels = val_data_set.get_next_batch()
validation_images = changed_shape(
validation_images,
[
len(validation_images),
sub_Config.IMAGE_W,
sub_Config.IMAGE_W,
1
]
)
validation_accuracy, validation_loss, summary, logits = sess.run(
[accuracy_tensor, loss_, merge_op, y],
feed_dict={
x: validation_images,
y_: validation_labels
}
)
calculate_acc_error(
logits=np.argmax(logits, 1),
label=validation_labels,
show=True
)
binary_acc = acc_binary_acc(
logits=np.argmax(logits, 1),
label=validation_labels,
)
val_writer.add_summary(summary, global_step_value)
print 'step is %d,training loss value is %g, accuracy is %g ' \
'validation loss value is %g, accuracy is %g, binary_acc is %g' % \
(global_step_value, loss_value, accuracy_value, validation_loss, validation_accuracy, binary_acc)
writer.close()
val_writer.close()
if __name__ == '__main__':
phase_name = 'ART'
state = ''
traindatapath = '/home/give/Documents/dataset/MedicalImage/MedicalImage/ROI/train'
valdatapath = '/home/give/Documents/dataset/MedicalImage/MedicalImage/ROI/val'
val_dataset = ValDataSet(new_size=[sub_Config.IMAGE_W, sub_Config.IMAGE_H],
phase=phase_name,
category_number=2,
shuffle=True,
data_path=valdatapath
)
train_dataset = ValDataSet(new_size=[sub_Config.IMAGE_W, sub_Config.IMAGE_H],
phase=phase_name,
category_number=2,
data_path=traindatapath,
shuffle=True,
)
train(
train_dataset,
val_dataset,
load_model_path=None,
save_model_path='/home/give/PycharmProjects/MedicalImage/Net/BaseNet/ResNet/models/fine_tuning/2-128/'
)
|
[
"546043882@qq.com"
] |
546043882@qq.com
|
efc54871703ecce3f1cb626bd1351abbdff392ef
|
34ef83114e02b173bd2d55eb53ad399e738a8e3c
|
/django/test_bootstrap/blog/models.py
|
b12d7ddd4b0a24411e62b4e99bf00bcafa60e565
|
[] |
no_license
|
vavilon/Python3
|
e976a18eb301e4953696d1e3f4730ed890da015a
|
8c79729747ce51d60ad685e6a2e58292954ed7eb
|
refs/heads/master
| 2023-01-09T13:44:37.408601
| 2018-01-25T22:41:14
| 2018-01-25T22:41:14
| 100,892,055
| 0
| 1
| null | 2022-12-26T20:29:27
| 2017-08-20T22:23:06
|
Python
|
UTF-8
|
Python
| false
| false
| 506
|
py
|
from django.db import models
# Create your models here.
from django.utils import timezone
class Post(models.Model):
author = models.ForeignKey('auth.User')
title = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateTimeField(
default=timezone.now)
published_date = models.DateTimeField(
blank=True, null=True
)
def publish(self):
self.published_date = timezone.now()
def __str__(self):
return self.title
|
[
"overon4ek@gmail.com"
] |
overon4ek@gmail.com
|
3fac458c8f38d04e4724c1f19199c6c517b324b6
|
675b72eae65f8e258794decf9627e5fdf8b04559
|
/plugin_tests/examples_test.py
|
aa8ae4a0e021a3a57aefdf2dd02021e68f45841a
|
[
"Apache-2.0"
] |
permissive
|
jbeezley/large_image
|
368f730ea6fe2c4b75a9c3412c08ce8f41be545a
|
ac4cbaff4ae2fbbde425d3cd1aee2ff03e6235c8
|
refs/heads/master
| 2021-01-11T06:15:48.687563
| 2016-10-24T17:09:08
| 2016-10-24T17:09:08
| 71,806,470
| 0
| 0
| null | 2016-10-24T16:04:04
| 2016-10-24T16:04:03
| null |
UTF-8
|
Python
| false
| false
| 1,695
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
##############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import os
import subprocess
import unittest
class LargeImageExamplesTest(unittest.TestCase):
def testAverageColor(self):
# Test running the program
testDir = os.path.dirname(os.path.realpath(__file__))
examplesDir = os.path.join(testDir, '../examples')
prog = 'average_color.py'
imagePath = os.path.join(os.environ['LARGE_IMAGE_DATA'],
'sample_image.ptif')
process = subprocess.Popen(
['python', prog, imagePath, '-m', '1.25'],
shell=False, stdout=subprocess.PIPE, cwd=examplesDir)
results = process.stdout.readlines()
self.assertEqual(len(results), 19)
finalColor = [float(val) for val in results[-1].split()[-3:]]
self.assertEqual(round(finalColor[0]), 245)
self.assertEqual(round(finalColor[1]), 247)
self.assertEqual(round(finalColor[2]), 247)
|
[
"david.manthey@kitware.com"
] |
david.manthey@kitware.com
|
434153e344fd51bbd477726190b6bffce6f42c4d
|
3de3dae722829727edfdd6cc3b67443a69043475
|
/edexOsgi/com.raytheon.uf.common.dataplugin.text/pythonPackages/dynamicserialize/dstypes/com/raytheon/uf/common/dataplugin/text/subscription/request/SubscriptionRequest.py
|
237472774c674b7b8fb879656ce996c5d08db82a
|
[
"LicenseRef-scancode-public-domain",
"Apache-2.0"
] |
permissive
|
Unidata/awips2
|
9aee5b7ec42c2c0a2fa4d877cb7e0b399db74acb
|
d76c9f96e6bb06f7239c563203f226e6a6fffeef
|
refs/heads/unidata_18.2.1
| 2023-08-18T13:00:15.110785
| 2023-08-09T06:06:06
| 2023-08-09T06:06:06
| 19,332,079
| 161
| 75
|
NOASSERTION
| 2023-09-13T19:06:40
| 2014-05-01T00:59:04
|
Java
|
UTF-8
|
Python
| false
| false
| 1,268
|
py
|
##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: Raytheon Company
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
# File auto-generated against equivalent DynamicSerialize Java class
#
# SOFTWARE HISTORY
#
# Date Ticket# Engineer Description
# ------------ ---------- ----------- --------------------------
# Sep 05, 2014 bclement Generated
class SubscriptionRequest(object):
def __init__(self):
self.message = None
def getMessage(self):
return self.message
def setMessage(self, message):
self.message = message
|
[
"mjames@unidata.ucar.edu"
] |
mjames@unidata.ucar.edu
|
d485028798e1c737f0af507daf3b21f679ec03ae
|
b55c368efdfe360123be1a2e7677cee53706d1f9
|
/VectorTrans/Main.py
|
7f33d97819742d7ae327669e60bb979628d2c4fb
|
[
"MIT"
] |
permissive
|
ZzhiWang/DRImplicitVecXform
|
207cd6ef6edf5bc90b2abb1242e2d7bb3b322f95
|
2ec0c64fb098e29ce74929f5e19bce90b2f5791c
|
refs/heads/master
| 2023-03-17T14:51:34.755756
| 2020-08-01T09:26:35
| 2020-08-01T09:26:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,904
|
py
|
import numpy as np
from Tools import Preprocess
from VectorTrans.DRTrans import DRTrans
from VectorTrans.MDSTrans import MDSTrans
from VectorTrans.TSNETrans import TSNETrans
from VectorTrans.PCATrans import PCATrans
from VectorTrans.MDSTransPlus import MDSTransPlus
from VectorTrans.TSNETransPlus import TSNETransPlus
from VectorTrans.CreateJson import JsonFile
def load_data():
X = np.loadtxt("..\\Data\\data.csv", dtype=np.float, delimiter=",")
label = np.loadtxt("..\\Data\\label.csv", dtype=np.int, delimiter=",")
return X, label
def run_example():
dr_method = 'MDS' # 'MDS' 't-SNE' 'PCA' 'MDSPlus' 't-SNEPlus'
X, label = load_data()
repeat = Preprocess.has_repeat(X)
if repeat:
print("Please recheck the input data for duplicate points")
return
X = Preprocess.normalize(X) # Optional
(n, d) = X.shape
trans = DRTrans()
if dr_method == 'MDS':
trans = MDSTrans(X, label=label, y_init=None, y_precomputed=False)
elif dr_method == 't-SNE':
trans = TSNETrans(X, label=label, y_init=None, perplexity=30.0)
elif dr_method == 'PCA':
trans = PCATrans(X, label=label)
elif dr_method == "MDSPlus":
trans = MDSTransPlus(X, label=label, y_init=None, y_precomputed=False)
elif dr_method == "t-SNEPlus":
trans = TSNETransPlus(X, label=label, y_init=None, perplexity=30.0)
else:
print("This method is not supported at this time: ", dr_method)
return
trans.transform(nbrs_k=20, MAX_EIGEN_COUNT=4, yita=0.1)
np.savetxt("..\\Data\\"+str(dr_method)+"_Y.csv", trans.Y, fmt='%.18e', delimiter=",")
if n*d < 1024 ** 3 / 2:
np.savetxt("..\\Data\\"+str(dr_method)+"_derivative.csv", trans.derivative, fmt='%.18e', delimiter=",")
json_file = JsonFile(path="..\\Data\\")
json_file.create_file(trans)
if __name__ == '__main__':
run_example()
|
[
"sdu2014@126.com"
] |
sdu2014@126.com
|
b9fc0ded63c3c6f0ff7857c261a68f18076d6d8e
|
9dc8c299ee7d4a225002127cc03b4253c8a721fd
|
/libs/unittest/live_related_condition.py
|
5604fdc9fc852993f3b40a2a692f9a1c3da1f49b
|
[] |
no_license
|
namesuqi/strategy_corgi
|
5df5d8c89bdf7a7c465c438048be20ef16120f4f
|
557b8f8eabf034c2a57c25e6bc581858dd4f1b6e
|
refs/heads/master
| 2020-03-07T04:00:18.313901
| 2018-03-29T07:50:50
| 2018-03-29T07:50:50
| 127,253,453
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,149
|
py
|
# !/usr/bin/python
# coding=utf-8
# author: JinYiFan
from config import *
from libs.module.live_seeds import *
import time
def wait_for_second(wait_time):
time.sleep(wait_time)
def change_config(live_file_count, live_peer_count, rate_of_peer_and_file):
"""
修改config文件的参数配置
:param live_file_count: 文件总数
:param live_peer_count: 播放的节点总数
:param rate_of_peer_and_file: 单个文件对应的播放节点数
"""
orm.session.query(Configs).filter(Configs.role == "live_file_count").update(
{"content": live_file_count})
orm.session.query(Configs).filter(Configs.role == "live_peer_count").update(
{"content": live_peer_count})
orm.session.query(Configs).filter(Configs.role == "rate_of_peer_and_file").update(
{"content": rate_of_peer_and_file})
orm.session.commit()
orm.session.close()
def change_peer_flow_to_0():
"""
将peer的CDN和P2P流量设为0
"""
orm.session.query(Live_Peer).update({"cdn": 0, "p2p": 0})
orm.session.commit()
orm.session.close()
def change_LF_flow_to_0():
"""
将LF的CDN和P2P流量设为0
"""
orm.session.query(Live_Seed).update({"upload": 0, "download": 0})
orm.session.commit()
orm.session.close()
def add_player(play_num):
"""
增加播放节点
:param play_num: 增加的播放节点数
"""
peer_num_infos = orm.session.query(Live_Online).offset(200).limit(play_num).all()
file_id = orm.session.query(Live_Peer).first().file_id
num = 0
for num in range(play_num):
live_peer_sdk = Live_Peer(peer_id=peer_num_infos[num].peer_id, version=peer_num_infos[num].sdk_version,
country=peer_num_infos[num].country, province_id=peer_num_infos[num].province_id,
city_id=peer_num_infos[num].city_id, isp_id=peer_num_infos[num].isp_id,
file_id=file_id, chunk_id=get_random_chunk_id(), operation=OPERATION, cdn=CDN,
p2p=P2P, ssid=peer_num_infos[num].ssid, p2penable=P2PENABLE)
orm.session.add(live_peer_sdk)
num += 1
orm.session.commit()
orm.session.close()
def one_peer_multi_channel(channel_num):
"""
一个播放节点播放多个频道
:param channel_num: 一个播放节点播放的频道数
"""
peer_info = orm.session.query(Live_Peer).first()
file_info = orm.session.query(Live_File).offset(5).limit(channel_num).all()
for num in range(channel_num - 1):
live_peer_sdk = Live_Peer(peer_id=peer_info.peer_id, version=peer_info.version, country=peer_info.country,
province_id=peer_info.province_id, city_id=peer_info.city_id, isp_id=peer_info.isp_id,
file_id=file_info[num].file_id, chunk_id=get_random_chunk_id(), operation=OPERATION,
cdn=CDN, p2p=P2P, ssid=peer_info.ssid, p2penable=P2PENABLE)
orm.session.add(live_peer_sdk)
num += 1
orm.session.commit()
orm.session.close()
def del_player(del_num):
"""
删除播放节点
:param del_num: 删除的播放节点数
"""
peer_infos = orm.session.query(Live_Peer).all()
session_ids = list()
for peer_info in peer_infos:
session_ids.append(peer_info.ssid)
num = 0
for num in range(del_num):
orm.session.query(Live_Peer).filter_by(ssid=session_ids[num]).delete()
num += 1
orm.session.commit()
orm.session.close()
def del_seed(del_num):
"""
删除雷锋节点
:param del_num: 删除的雷锋节点数
"""
seed_infos = orm.session.query(Live_Seed).all()
session_ids = list()
for seed_info in seed_infos:
session_ids.append(seed_info.ssid)
num = 0
for num in range(del_num):
orm.session.query(Live_Seed).filter_by(ssid=session_ids[num]).delete()
num += 1
orm.session.commit()
orm.session.close()
if __name__ == "__main__":
del_seed(20)
# add_player(3)
# one_peer_multi_channel(3)
# del_player(2)
|
[
"suqi_name@163.com"
] |
suqi_name@163.com
|
b8acc579b13a7bb35130f20698e3489073b14792
|
773deb7825ff84eec3e0cf6ae8266d07251df392
|
/CHAPTER05/bw41.py
|
c7231b7bd8d7d2ba190f52df2a0fa74e6f62a961
|
[] |
no_license
|
kji0205/py
|
3ca9c2a351af05ce62d7c7c3c261ed98a7e8290d
|
b45ffb3424b7c0da8192d431cb7ad7933c60ef81
|
refs/heads/master
| 2021-01-20T18:57:51.603386
| 2016-06-23T14:24:57
| 2016-06-23T14:24:57
| 61,639,536
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 905
|
py
|
# 진정한 병렬성을 실현하려면 concurrent.futures를 고려하자
import logging
from pprint import pprint
from sys import stdout as STDOUT
from time import time
def gcd(pair):
a, b = pair
low = min(a, b)
for i in range(low, 0, -1):
if a % i == 0 and b % i == 0:
return i
numbers = [(1963309, 2265973), (2030677, 3814172),
(1551645, 2229620), (2039045, 2020802)]
start = time()
results = list(map(gcd, numbers))
end = time()
print('Took %.3f seconds' % (end - start))
#
from concurrent.futures import ThreadPoolExecutor
start = time()
pool = ThreadPoolExecutor(max_workers=2)
results = list(pool.map(gcd, numbers))
end = time()
print('Took %.3f seconds' % (end - start))
#
from concurrent.futures import ProcessPoolExecutor
start = time()
pool = ProcessPoolExecutor(max_workers=2)
results = list(pool.map(gcd, numbers))
end = time()
print('Took %.3f seconds' % (end - start))
|
[
"kji0205@gmail.com"
] |
kji0205@gmail.com
|
ab12cc2538c903dfca478ff16c8508153a7312c9
|
994ea22f35c635fdf139af9282b0d3a3d86ea34a
|
/ud120-projects-intro_to_machine_learning/decision_tree/dt_author_id.py
|
667e184f992ddbc3679ee4787f6ce8ba6bcc894a
|
[] |
no_license
|
zjyx147/Udacity
|
ac371fbc5b5b456e88b411657ef5a28c3b071c6c
|
d86fadd537dbacc6f8142b043e71527b0448bae3
|
refs/heads/master
| 2022-06-23T14:25:41.242353
| 2019-06-20T20:12:13
| 2019-06-20T20:12:13
| 191,207,247
| 0
| 0
| null | 2022-06-21T22:07:35
| 2019-06-10T16:42:18
|
DIGITAL Command Language
|
UTF-8
|
Python
| false
| false
| 1,128
|
py
|
#!/usr/bin/python
"""
This is the code to accompany the Lesson 3 (decision tree) mini-project.
Use a Decision Tree to identify emails from the Enron corpus by author:
Sara has label 0
Chris has label 1
"""
import sys
from time import time
sys.path.append("../tools/")
from email_preprocess import preprocess
### features_train and features_test are the features for the training
### and testing datasets, respectively
### labels_train and labels_test are the corresponding item labels
features_train, features_test, labels_train, labels_test = preprocess()
#########################################################
### your code goes here ###
from sklearn import tree
from sklearn.metrics import accuracy_score
#features_train = features_train[:len(features_train)/100]
#labels_train = labels_train[:len(labels_train)/100]
clf = tree.DecisionTreeClassifier(min_samples_split=40)
clf.fit(features_train, labels_train)
pred = clf.predict(features_test)
print len(features_train[0])
print "accuracy: ", accuracy_score(pred, labels_test)
#########################################################
|
[
"zjyx147@gmail.com"
] |
zjyx147@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.