blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5e70dd92d37b33cee451a79110869edc240504ee
|
40b182f143fa034051fbfc80dc1bc77b204fdb89
|
/fft_2d.py
|
26fca42cbe8257f3a225e76e740aa65f3a6ab6f1
|
[] |
no_license
|
phaustin/fft_2d
|
02ab6707cbbb1c3fcd836c4e9a6323b4439bed2a
|
a891669e2c70c70a87efa9b254e9103ec3d93af5
|
refs/heads/master
| 2016-09-06T10:51:35.568580
| 2014-02-11T06:52:04
| 2014-02-11T06:52:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,875
|
py
|
from __future__ import division
from netCDF4 import Dataset
import numpy as np
import math
from scipy import fftpack
from matplotlib import pyplot as plt
#plt.switch_backend('Agg') #batch
plt.switch_backend('MacOSX') #interactive
test=Dataset('a17.nc')
tau=test.variables['tau'][:2000,:2000]
nn = tau.shape[0] # size of each column of the 2D array
mm = tau.shape[1] # size of each row of the array
m = int(math.floor(mm/2)) #midpoint
scale=0.025 #pixel size in km
x_dens = np.arange(0,(m))+1
x_dens = (x_dens-m)/(mm*scale)
delta_k = 1./scale #1/km
nyquist = delta_k*0.5
fft_tau = fftpack.fft2(tau)
tr_tau = fftpack.fftshift(fft_tau)
e_dens = tr_tau*np.conjugate(tr_tau)/(mm*mm)
e_dens = e_dens.real
plt.close('all')
fig,ax=plt.subplots(2,2)
ax[0,0].set_title('title1')
im1=ax[0,0].imshow(tau)
im2=ax[1,0].imshow(np.log(e_dens))
im3=ax[0,1].hist(tau.ravel())
im4=ax[1,1].hist(np.log(e_dens.ravel()))
plt.draw()
cbar_ax = fig.add_axes([0.45, 0.55, 0.03, 0.3])
fig.colorbar(im1,cax=cbar_ax)
fig.tight_layout()
fig.canvas.draw()
plt.show()
bnstep=2.
nbns = int(round((math.sqrt(2)*mm/bnstep),0)+1)
e_spec = np.zeros(nbns,np.float)
cnt = np.zeros(nbns,np.float)
for i in range(mm):
if (i%100) == 0:
print "\t\trow: "+str(i)+" completed"
for j in range(mm):
r = math.sqrt(((i+1)-mm/2)**2+((j+1)-mm/2)**2)
bn = int(math.floor(r/bnstep))
e_spec[bn]=e_spec[bn]+ np.abs(e_dens[i,j])**2.
cnt[bn]=cnt[bn]+1
for i in range(nbns):
if cnt[i]>0:
e_spec[i]=e_spec[i]/cnt[i]/(4*(math.pi**2))
e_spec=np.sqrt(e_spec)
delta_k=nyquist/nbns
x_ax=np.linspace(delta_k,nyquist,nbns)
fig=plt.figure(2)
fig.clf()
ax1=fig.add_subplot(111)
ax1.loglog(x_ax,e_spec)
l0=1.
slope=(-8/3.)
analytic=l0*x_ax**slope
the_line=l0
ax1.loglog(x_ax,analytic,'r-')
fig.tight_layout()
fig.canvas.draw()
plt.show()
|
[
"paustin@eos.ubc.ca"
] |
paustin@eos.ubc.ca
|
d22177499c618ff254b7d77a4adadebd0fbc9c16
|
82ccbe6f52a89e0acd1b88cd3de6c2c434bcef6c
|
/lib/PanGenomeAPI/fetch_summary/main.py
|
64197ada34555c2089d05a55099c48806cd693d0
|
[
"MIT"
] |
permissive
|
kbaseapps/PanGenomeAPI
|
e51da942cb61f4341824e7b296fa2b694b7deda6
|
52dfc557279824f3b1c3b0a537528ccfaee39ab1
|
refs/heads/master
| 2021-07-20T04:30:31.032517
| 2021-02-23T22:45:09
| 2021-02-23T22:45:09
| 89,646,247
| 0
| 3
|
MIT
| 2021-02-23T22:45:09
| 2017-04-27T22:42:41
|
Python
|
UTF-8
|
Python
| false
| false
| 7,818
|
py
|
"""
Fetch and construct summary data for previewing a pangenome.
"""
from installed_clients.WorkspaceClient import Workspace as Workspace
def fetch_pangenome_summary(
pangenome_ref: str,
workspace_url: str,
token: str) -> dict:
"""
Construct a summary data object for a single pangenome, used in the
"simple_summary" method.
Args:
pangenome_ref: Workspace reference to the pangenome object
workspace_url: URL of the Workspace being used in the current env
token: authorization token for fetching the data
Returns:
A python object adhering to the SimpleSummaryResult type in
PanGenomeAPI.spec
"""
ws_client = Workspace(workspace_url, token=token)
# Download the full pangenome workspace dataset
resp = ws_client.get_objects2({
'objects': [{'ref': pangenome_ref}]
})
data = resp['data'][0]['data']
# Fetch the object infos for each genome
genome_refs = [{"ref": ref} for ref in data["genome_refs"]]
genome_infos = ws_client.get_object_info3({
"objects": genome_refs,
"includeMetadata": 1
})["infos"]
name_mapping = _genome_name_mapping(genome_infos)
ret = {
"pangenome_id": data["id"],
"genomes_count": len(data["genome_refs"]),
"genes": _count_genes(data),
"families": _count_families(data),
"genomes": _genome_counts(data, genome_infos, name_mapping),
"shared_family_map": _shared_family_map(data, name_mapping),
"genome_ref_name_map": name_mapping,
}
return ret
def _count_genes(pg_data: dict) -> dict:
"""
Calculate gene counts for a pangenome object
Args:
pg_data: workspace data object for the Pangenome
Returns:
Dict of counts with the GeneFamilyReport type in PanGenomeAPI.spec
"""
counts = {
"genes_count": 0,
"homolog_family_genes_count": 0,
"singleton_family_genes_count": 0,
}
for family in pg_data["orthologs"]:
count = len(family["orthologs"])
counts["genes_count"] += count
if count == 1:
counts["singleton_family_genes_count"] += count
elif count > 1:
counts["homolog_family_genes_count"] += count
return counts
def _count_families(pg_data: dict) -> dict:
"""
Aggregate counts for the homolog families in the pangenome
Args:
pg_data: workspace data object for the Pangenome
Returns:
dict matching the type FamilyReport from PanGenomeAPI.spec
"""
counts = {
"families_count": 0,
"homolog_families_count": 0,
"singleton_families_count": 0,
}
counts["families_count"] = len(pg_data["orthologs"])
for family in pg_data["orthologs"]:
count = len(family["orthologs"])
if count == 1:
counts["singleton_families_count"] += 1
elif count > 1:
counts["homolog_families_count"] += 1
return counts
def _genome_name_mapping(genome_infos: list) -> dict:
"""
Construct a mapping of genome workspace reference to sciname
Args:
pg_data: workspace data object for the Pangenome
genome_infos: list of object info tuples (with metadata) for every
genome in the pangenome
Returns:
Mapping of genome ref to scientific name and obj name
"""
ret = {}
names = set()
# Fetch the object infos for every ref
for info in genome_infos:
ref = _get_ref(info)
sciname = info[-1].get("Name", "unknown taxon")
# Create a unique display name for each genome
name = sciname
if name in names:
name = f"{sciname} ({ref})"
names.add(name)
ret[ref] = name
return ret
def _genome_counts(
pg_data: dict,
genome_infos: list,
name_mapping: dict) -> dict:
"""
Aggregate counts of genes and families for every genome
Args:
pg_data: workspace data object for the Pangenome
genome_infos: list of genome info tuples for each object
name_mapping: mapping of workspace ref to readable name for use as keys
Returns:
Mapping of genome ref to GenomeGeneFamilyReport (from
PanGenomeAPI.spec)
"""
# Initialize the result structure
ret = {}
for name in name_mapping.values():
ret[name] = {
"genome_genes": 0,
"genome_homolog_family_genes": 0,
"genome_singleton_family_genes": 0,
"genome_homolog_family": 0,
}
# Set total feature counts from the obj info
for info in genome_infos:
key = name_mapping[_get_ref(info)]
ret[key]["genome_genes"] = _get_feature_count(info)
# Aggregate other counts from the ortholog families
for family in pg_data["orthologs"]:
count = len(family["orthologs"])
found_genomes = set()
for gene in family["orthologs"]:
genome_ref = gene[2]
key = name_mapping[genome_ref]
if count > 1:
ret[key]["genome_homolog_family_genes"] += 1
found_genomes.add(genome_ref)
for ref in found_genomes:
ret[name_mapping[ref]]["genome_homolog_family"] += 1
# Set the singleton family gene counts to be the difference of the total
# features and the homolog family counts
for ref in pg_data["genome_refs"]:
key = name_mapping[ref]
total = ret[key]["genome_genes"]
homologs = ret[key]["genome_homolog_family_genes"]
ret[key]["genome_singleton_family_genes"] = total - homologs
return ret
def _shared_family_map(pg_data: dict, name_mapping: dict) -> dict:
"""
Calculate the number of shared ortholog families between any two genomes
Args:
pg_data: workspace data object for the Pangenome
name_mapping: mapping of workspace ref to readable name for use as keys
Returns:
dict where keys are genome refs, and values are mapping of genome refs
to shared family counts.
Example: {"1": {"2": 10}} represents genome "1" and "2" sharing 10
families
"""
# Initialize the return structure
ret = {}
for ref1 in pg_data["genome_refs"]:
key1 = name_mapping[ref1]
ret[key1] = {}
for ref2 in pg_data["genome_refs"]:
key2 = name_mapping[ref2]
ret[key1][key2] = 0
# Aggregate counts of all genomes that share genes in an ortholog family
for family in pg_data["orthologs"]:
if len(family["orthologs"]) <= 1:
# We only record non-singletons
continue
genome_refs = set(orth[2] for orth in family["orthologs"])
for ref1 in genome_refs:
for ref2 in genome_refs:
key1, key2 = name_mapping[ref1], name_mapping[ref2]
ret[key1][key2] += 1
return ret
def _get_feature_count(genome_info: dict) -> int:
"""
Get the total feature count (coding and non-coding) for a genome.
We fetch this number from the genome metadata.
Older Genome versions store this as "Number features", while newer versions
(>=9) store it as "Number of Protein Encoding Genes".
Genome versions before 8 (older than July, 2014) have no metadata and
aren't supported for now.
"""
valid_keys = ("Number of Protein Encoding Genes", "Number features")
meta = genome_info[-1]
for key in valid_keys:
if key in meta:
return int(meta[key])
# TODO fallback to something else?
raise RuntimeError(
"Unable to read the number of features "
f"from the Genome metadata: {genome_info}")
def _get_ref(info: list) -> str:
"""Get the workspace reference from an info tuple"""
return f"{info[6]}/{info[0]}/{info[4]}"
|
[
"jayrbolton@gmail.com"
] |
jayrbolton@gmail.com
|
cc15ab4da0e3d63290d555da764c4651e5b116d5
|
2368797b51548c0f6393d63bf4973898ac99d528
|
/strings/easy/q443.py
|
dfc657c4a91311384a0c289c38920c173469005f
|
[] |
no_license
|
pengzhefu/LeetCodePython
|
595887d1625666962e7e959ffa148580f9b89ada
|
59eff778a5fd5cff3b5b6b88c6c7e76dd213dfb0
|
refs/heads/master
| 2021-06-08T19:44:52.487031
| 2021-06-01T15:44:29
| 2021-06-01T15:44:29
| 175,763,155
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,906
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 30 23:39:02 2019
@author: pengz
"""
'''
Given an array of characters, compress it in-place.
The length after compression must always be smaller than or equal to the original array.
Every element of the array should be a character (not int) of length 1.
After you are done modifying the input array in-place, return the new length of the array.
Follow up:
Could you solve it using only O(1) extra space?
Example 1:
Input:
["a","a","b","b","c","c","c"]
Output:
Return 6, and the first 6 characters of the input array should be: ["a","2","b","2","c","3"]
Explanation:
"aa" is replaced by "a2". "bb" is replaced by "b2". "ccc" is replaced by "c3".
'''
## 这道题的第二种方法就一直没写对!!!要多写几遍!要记住!
def compress(chars:list) -> int: ## try O(1) space, written by my own
if len(chars) <=1:
return len(chars)
i = len(chars) -1
while i >= 0:
ret = 1
if i != 0:
if chars[i] != chars[i-1]:
i = i-1
else:
while i > 0 and chars[i] == chars[i-1]: ## 计算连续的个数,跳出这个循环的时候,i应该在连续的第一个
ret += 1
tmp = i
i = i-1
chars.pop(tmp)
index = i+1
x = 0
ins = str(ret)
while x < len(ins):
chars.insert(index,ins[x])
x = x+1
index = index+1
# i = i-1 ## 是不需要 i-1的,因为不i-1的话,会发现现在的char[i]和char[i-1]是不一样的,
## 从上面的if去 i-1,
else:
i=i-1
return len(chars)
def compress2(chars:list) -> int: ## using two pointers, i and index, i遍历list, index做插入位置
i =0
index =0 ## index既是插入数字的位置,也是要插入相对应字母的位置,先插入字母,然后index+1就可以是数字了
while i < len(chars):
tmp = chars[i]
count = 0
while i < len(chars) and chars[i] == tmp: ## 这个循环跳出的时候i已经在不同的字母的第一位了
i= i+1
count= count+1
chars[index] =tmp ## 如果之前有插入过次数,那么这时候的index也是插入过次数的后一位,应该拿来放字母
## 这个字母需要用之前缓存的tmp的字母,此时i已经在和tmp不同的字母的首位了
index =index + 1
if count > 1:
for c in str(count):
chars[index] = c
index =index + 1
return index
chars = ["a","a","a","a","a","a","a","b","b","c","d","d","e"]
a = compress2(chars)
|
[
"32311379+pengzhefu@users.noreply.github.com"
] |
32311379+pengzhefu@users.noreply.github.com
|
e963b984443ec4d68c597960486998c74e5281de
|
be6e135014a7553b8f13b99435369e3d53b58585
|
/course_python/Python/student_oop.py
|
43d9d213d2b47e37a632402e8d7e6afe58f5f69c
|
[] |
no_license
|
ankitsoni5/python
|
a5555a6371e12b170703b8c16a4e8aab5988a373
|
a5fcf618a476cb1745095f038b9118ce724c0b7e
|
refs/heads/master
| 2020-09-10T23:35:45.001477
| 2019-11-23T13:41:37
| 2019-11-23T13:41:37
| 221,866,000
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,290
|
py
|
from com.ankit.collage.student import Student
#s1 = Student() #student object
#internally
# 1. get address - 4001 Student Object
# 2. Student : __init__(4001)
print(Student.count) # it will give the number of count of object created of Student class.
s1 = Student('Abhishek', 'Male', 1, 90)
#internally
# 1. get address - 4001 Student Object
# 2. Student : __init__(4001,'Abhishek', 'Male', 1, 90)
# create an attribute in an object
'''s1.name = 'Ankit'
s1.gender = 'M'
s1.roll = 21
s1.marks = 80
s2 = Student() # 2nd student object
s2.name = 'Soni'
s2.gender = 'M'
s2.roll = 22
s2.marks = 54
'''
print(Student.count)
s2 = Student('Soni','M',22,54) # 2nd student object
print(s1.getdetails())
print(s1.get_grades())
print(s1.get_name_and_roll()) #using tuple as a return data.
tu = s1.get_name_and_roll()
name, roll = tu[0],tu[1] # getting saterate values from returen value
# internally
#print(Student.getdetails(s1))
print(s2.getdetails())
print(s2.get_grades())
print(Student.count)
s3 = Student() # to run this also make the __init__ method with default arguments so that if you don't pass any arg it will take default values
print(s3.getdetails())
print(s3.get_grades())
print(Student.count)
print(Student.get_min_attendence())
|
[
"apple@Apples-MacBook-Pro.local"
] |
apple@Apples-MacBook-Pro.local
|
3f9a0b1d182a5ddd38813da6721ae1a290403895
|
b4bc5fb10b0d498cb0d3e5ee2ce3473b10b553e5
|
/fast_transformers/recurrent/attention/self_attention/adamax_attention.py
|
1cac02af4c80484f39d1bc654fd0d6ccdea11efe
|
[] |
no_license
|
minhtannguyen/momentum-transformer-code-submission
|
2f0005028ab7e32957612f642330acd802bded8e
|
68b11ce5564a8212cd91cb2093b457a00d511046
|
refs/heads/master
| 2023-05-31T19:20:57.380490
| 2021-06-04T15:08:26
| 2021-06-04T15:08:26
| 373,784,396
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,429
|
py
|
"""Implement the causally masked linear attention as a recurrent model."""
import torch
from torch.nn import Module
from ....attention_registry import RecurrentAttentionRegistry, Optional, Float, Int, \
Callable, EventDispatcherInstance
from ....events import EventDispatcher
from ....feature_maps import elu_feature_map
from ..._utils import check_state
class RecurrentAdamaxAttention(Module):
"""Implement fast_transformers.attention.causal_linear_attention as a
fixed-dimensional state recurrent model.
See fast_transformers.attention.linear_attention and
fast_transformers.attention.causal_linear_attention for the general concept
of replacing the softmax with feature maps.
Arguments
---------
feature_map: callable, a callable that applies the feature map to the
last dimension of a tensor (default: elu(x)+1)
eps: float, a small number to ensure the numerical stability of the
denominator (default: 1e-6)
event_dispatcher: str or EventDispatcher instance to be used by this
module for dispatching events (default: the default
global dispatcher)
"""
def __init__(self, query_dimensions, mu, stepsize, beta, feature_map=None, eps=1e-6,
event_dispatcher=""):
super(RecurrentAdamaxAttention, self).__init__()
self.feature_map = (
feature_map(query_dimensions) if feature_map else
elu_feature_map(query_dimensions)
)
self.eps = eps
self.event_dispatcher = EventDispatcher.get(event_dispatcher)
# for adam transformer
self.mu = mu
self.stepsize = stepsize
self.beta = beta
def forward(self, query, key, value, state=None, memory=None):
# Normalize state/memory
state = check_state(state, memory)
# If this is a new sequence reinitialize the feature map
if state is None:
self.feature_map.new_feature_map()
# Apply the feature map to the query and key
Q = self.feature_map.forward_queries(query)
K = self.feature_map.forward_keys(key)
# Extract some shapes
N, H, D = Q.shape
_, _, M = value.shape
# Extract the memory or initialize it
if state is None:
Si = query.new_zeros((N, H, D, M))
Zi = query.new_zeros((N, H, D))
Pi = query.new_zeros((N, H, D, M))
Mi = query.new_zeros((N, H, D, M))
else:
Si, Zi, Pi, Mi, _ = state
# Ensure the batch size did not change
if len(Si) != N:
raise ValueError("The batch size changed during iteration")
# Update the internal state
#
# NOTE: The if clause is added due to GitHub PR #10. Simply using the
# following two lines does not perform the operation in place which
# means it is slower for inference.
if K.grad_fn is not None or value.grad_fn is not None:
Zi = Zi + K
Ui = torch.einsum("nhd,nhm->nhdm", K, value)
Pi = self.mu * Pi - self.stepsize * Ui
Mi = torch.max(self.beta * Mi, torch.abs(Ui))
Si = Si - Pi/torch.sqrt(Mi + 1e-16)
else:
Zi += K
Ui = torch.einsum("nhd,nhm->nhdm", K, value)
Pi *= self.mu
Pi -= self.stepsize * Ui
Mi = torch.max(self.beta * Mi, torch.abs(Ui))
Si -= Pi/torch.sqrt(Mi + 1e-16)
# Compute the output
Z = 1. / (torch.einsum("nhd,nhd->nh", Q, Zi) + self.eps)
V = torch.einsum("nhd,nhdm,nh->nhm", Q, Si, Z)
return V, [Si, Zi, Pi, Mi, Ui]
# Register the attention implementation so that it becomes available in our
# builders
# RecurrentAttentionRegistry.register(
# "momentum-linear", RecurrentMomentumAttention,
# [
# ("query_dimensions", Int),
# ("feature_map", Optional(Callable)),
# ("event_dispatcher", Optional(EventDispatcherInstance, ""))
# ]
# )
RecurrentAttentionRegistry.register(
"adamax-linear", RecurrentAdamaxAttention,
[
("query_dimensions", Int),
("mu", Float),
("stepsize", Float),
("beta", Float),
("feature_map", Optional(Callable)),
("event_dispatcher", Optional(EventDispatcherInstance, ""))
]
)
|
[
"mn15@rice.edu"
] |
mn15@rice.edu
|
c3d6c4f46612b7f7c5ec6f9758883ee9cf8a0f4a
|
d051f3fe9fda31b72fa0ddce67aa1f4293c7c37c
|
/infer/local_gibbs_move.py
|
e8d91c772ccb38d12b759ff202759798f9ddefa4
|
[
"BSD-3-Clause"
] |
permissive
|
davmre/sigvisa
|
4e535215b6623310d8f5da64258f6fa9a378f9fd
|
91a1f163b8f3a258dfb78d88a07f2a11da41bd04
|
refs/heads/master
| 2021-03-24T10:24:52.307389
| 2018-01-05T19:33:23
| 2018-01-05T19:33:23
| 2,321,239
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,990
|
py
|
import numpy as np
from sigvisa.models.distributions import PiecewiseLinear
from sigvisa.utils.array import index_to_time, time_to_index
"""
Methods to propose a param value from a piecewise linear approximation
to the Gibbs conditional. This can be based on either the full model
posterior (proxylp_full) or a cheap proxy based on the noise model
applied to a local region of the unexplained envelope (proxylp_localenv).
"""
def proxylp_full(sg, wn, node):
def proxylp(candidate):
node.set_value(candidate)
if node.model is not None:
nlp = node.log_p()
else:
cd = node.joint_conditional_dist()
nlp = cd.log_p(candidate)
lp = nlp + wn.log_p()
return float(lp)
return proxylp
def proxylp_localenv(sg, wn, eid, phase, param):
tmnodes = sg.get_template_nodes(eid, wn.sta, phase, wn.band, wn.chan)
k, node = tmnodes[param]
tmvals = dict([(p, n.get_value(key=k)) for (p, (k, n)) in tmnodes.items()])
atime = tmvals['arrival_time']
peak_time = tmvals['arrival_time'] + np.exp(tmvals['peak_offset'])
unexplained = wn.unexplained_env(eid, phase)
peak_idx = time_to_index(peak_time, wn.st, wn.srate)
start_idx_true = time_to_index(atime, wn.st, wn.srate)
end_idx_true = int(peak_idx + 60*wn.srate)
start_idx = max(0, start_idx_true)
end_idx = min(wn.npts, end_idx_true)
start_offset = start_idx - start_idx_true
if end_idx-start_idx < wn.srate:
# if less than 1s of available signal, don't even bother
return None
unexplained_local = unexplained[start_idx:end_idx]
n = len(unexplained_local)
def proxylp(candidate):
tmvals[param] = candidate
l = tg.abstract_logenv_raw(tmvals, srate=wn.srate, fixedlen=n+start_offset)
diff = unexplained_local - np.exp(l[start_offset:])
lp = wn.nm_env.log_p(diff) + node.model.log_p(candidate, cond=node._pv_cache)
return float(lp)
return proxylp
def approximate_scalar_gibbs_distribution(sg, wn, eid, phase, param,
node, proxylp, prior_weight = 0.0):
class priormodel(object):
def __init__(self, node):
if node.model is not None:
self.model = node.model
else:
self.model = node.joint_conditional_dist()
self.pv = node._pv_cache
def log_p(self, x, **kwargs):
return float(self.model.log_p(x, cond=self.pv, **kwargs))
def sample(self, **kwargs):
return float(self.model.sample(cond=self.pv, **kwargs))
assert (not node.deterministic())
tg = sg.template_generator(phase)
lbounds, hbounds = tg.low_bounds(), tg.high_bounds()
# generate a range of plausible values based on the prior,
# and on the current value v (which should already be adapted
# somewhat to the data).
pv = node._pv_cache
v = float(node.get_value())
if node.model is not None:
pred = node.model.predict(cond=pv)
std = np.sqrt(node.model.variance(cond=pv, include_obs=True))
else:
cd = node.joint_conditional_dist()
pred = cd.predict()
std = np.sqrt(cd.variance())
if param=="tt_residual":
prior_min, prior_max = -25, 25
elif param=="mult_wiggle_std":
prior_min = 0.1
prior_max = 0.99
else:
prior_min, prior_max = pred-4*std, pred+4*std
prior_min = min(prior_min, v-4*std)
prior_max = max(prior_max, v + 4*std)
if param in lbounds:
prior_min = max(prior_min, lbounds[param])
prior_max = min(prior_max, hbounds[param])
candidates = np.linspace(prior_min, prior_max, 20)
candidates = np.array(sorted(list(candidates) + [v,]))
# compute the logp at each of these candidates
lps = np.array([proxylp(candidate) for candidate in candidates])
# now refine the approximation in regions of high probability
def bad_indices(lps, candidates):
best_idx = np.argmax(lps)
best_lp = np.max(lps)
lp_diff = np.abs(np.diff(lps))
# an lp is "significant" if it or its neighbor is above the threshold
thresh = best_lp - 3
significant_lps = ( lps[:-1] > thresh ) + ( lps[1:] > thresh )
# a "bad step" is where we have a sharp boundary next to a significant lp.
# that is, the significant lps are the areas where it's important to
# approximate the posterior well, and a large difference in lp between adjacent
# candidates means we're not doing that.
badsteps = significant_lps * (lp_diff > 1)
bad_idxs = np.arange(len(lps)-1)[badsteps]
# if we've already refined a lot at a particular bad idx,
# just give up since there's probably a genuine discontinuity there
c_diff = np.abs(np.diff(candidates))
hopeless = c_diff < 1e-3
bad_idxs = [idx for idx in bad_idxs if not hopeless[idx]]
return bad_idxs
bad_idxs = bad_indices(lps, candidates)
while len(bad_idxs) > 0:
new_candidates = []
new_lps = []
for idx in bad_idxs:
c1 = candidates[idx]
c2 = candidates[idx+1]
c = c1 + (c2-c1)/2.0
new_candidates.append(c)
new_lps.append( proxylp(c))
# merge the new candidates into their sorted positions in
# the existing list
full_c = np.concatenate((candidates, new_candidates))
full_lps = np.concatenate((lps, new_lps))
perm = sorted(np.arange(len(full_c)), key = lambda i : full_c[i])
candidates = np.array(full_c[perm])
lps = np.array(full_lps[perm])
bad_idxs = bad_indices(lps, candidates)
node.set_value(v)
p = PiecewiseLinear(candidates, np.array(lps), mix_weight = prior_weight, mix_dist = priormodel(node))
return p
|
[
"dmoore@cs.berkeley.edu"
] |
dmoore@cs.berkeley.edu
|
33e0ba3e0a69e34cf7ebd41107f6f66e2889c636
|
c9293ab68d0235a1830a3634a41a5b65b4eb5d6a
|
/Lessons/Section-03/lesson_0087/main.py
|
525fed4f815ccabfb1476b840d227c9ecfdc9c6d
|
[] |
no_license
|
lipegomes/python-django-udemy-studies
|
4f836497ee10ece7ee5b40af1b636bb1c03deb75
|
938fa6a05f9505b8eaf6e7e6bc1c5e199b670432
|
refs/heads/master
| 2023-01-07T01:22:16.855346
| 2020-11-03T13:49:54
| 2020-11-03T13:49:54
| 283,852,942
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,200
|
py
|
"""
Faça uma lista de tarefas com as seguintes opções:
- Adicionar uma tarefa
- Listar tarefas
- Opção de desfazer (a5 cada vez que chamarmos, desfaz a última ação)
- Opção de refazer (a cada vez que chamarmos. refaz a última ação)
"""
def show_op(todo_list):
print()
print("Tarefas: ")
print(todo_list)
print()
def do_undo(todo_list, redo_list):
if not todo_list:
print("Nada a desfazer")
return
last_todo = todo_list.pop()
redo_list.append(last_todo)
def do_redo(todo_list, redo_list):
if not redo_list:
print("Nada a refazer")
return
last_redo = redo_list.pop()
todo_list.append(last_redo)
def do_add(todo, todo_list):
todo_list.append(todo)
if __name__ == "__main__":
todo_list = []
redo_list = []
while True:
todo = input("Digite uma tarefa ou ls,undo, redo: ")
if todo == "ls":
show_op(todo_list)
continue
elif todo == "undo":
do_undo(todo_list, redo_list)
continue
elif todo == "redo":
do_redo(todo_list, redo_list)
continue
do_add(todo, todo_list)
|
[
"fgdl.py91@gmail.com"
] |
fgdl.py91@gmail.com
|
a6967f5aeb2b2541339e96e0ff361039c1c4a1ef
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5753053697277952_1/Python/wiz4rd/solve.py
|
71ffac888475eabb0c81b27c4c0114a8c34376e6
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,255
|
py
|
# http://code.google.com/codejam/contest/4314486/dashboard#s=p0
from collections import defaultdict
from re import match
from numpy import array, argsort
from time import sleep
from numpy.linalg import matrix_rank
def biggest(a,N): return argsort(a)[::-1][:N]
def absolute(ls): s = int(sum(ls)/2);return any(e>s or e<0 for e in ls)
def read_file(fname):
res = []
with open(fname,"r") as f:
data = [l.strip() for l in f.readlines()][1:]
for N, Ps in zip(data[::2],data[1::2]):
res.append(list(map(int, Ps.split(" "))))
return res
def solve_all(fname):
problems = read_file("%s.in" % fname)
case = 1
text = ""
for p in problems:
print("Solving Case #%s" % case)
res = solve(p)
text += "Case #%s: %s\n" % (case, res)
case+=1
with open("%s.out" % fname, "w") as out:
out.write(text)
def solve(Ps):
return bt(Ps, [])
mask = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def bt(Ps, steps):
# generate alternatives
big1, big2 = biggest(Ps, 2)
res = Ps[:]
res[big1]-=1
res[big2]-=1
if absolute(res):
res[big2]+=1
steps.append(mask[big1])
else:
steps.append(mask[big1]+mask[big2])
if all(e==0 for e in res):
return " ".join(steps)
else:
return bt(res,steps)
solve_all("large")
|
[
"alexandra1.back@gmail.com"
] |
alexandra1.back@gmail.com
|
4620c177df3478e05cc6d84a76cbc7bc79c80768
|
7ce479cac0a14d924159db9c784e3325b8f0bce7
|
/schemaorgschemas/Thing/Intangible/StructuredValue/PriceSpecification/PaymentChargeSpecification/__init__.py
|
cd82c29e455e399753f86c2537f0d702dd8cd6b2
|
[] |
no_license
|
EvelineAndreea/AGRe
|
1f0c27237eb047a60bbcfb8d73e3157035406409
|
b952125896a82741f6617c259dd4060954583180
|
refs/heads/master
| 2020-04-08T16:08:11.517166
| 2018-11-28T07:15:56
| 2018-11-28T07:15:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,227
|
py
|
# -*- coding: utf-8 -*-
from schemaorgschemas.Thing import potentialActionProp, descriptionProp, sameAsProp, imageProp, urlProp, mainEntityOfPageProp, additionalTypeProp, alternateNameProp, nameProp
from schemaorgschemas.Thing.Intangible.StructuredValue.PriceSpecification import validFromProp, priceCurrencyProp, priceProp, maxPriceProp, eligibleTransactionVolumeProp, valueAddedTaxIncludedProp, eligibleQuantityProp, validThroughProp, minPriceProp
from schemaorgschemas.djangoschema import SchemaObject, SchemaProperty, SchemaEnumProperty, SCHEMA_ORG
from django.conf import settings
class PaymentChargeSpecificationSchema(SchemaObject):
"""Schema Mixin for PaymentChargeSpecification
Usage: place after django model in class definition, schema will return the schema.org url for the object
The costs of settling the payment using a particular payment method.
"""
def __init__(self):
self.schema = 'PaymentChargeSpecification'
class appliesToDeliveryMethodProp(SchemaProperty):
"""
SchemaField for appliesToDeliveryMethod
Usage: Include in SchemaObject SchemaFields as your_django_field = appliesToDeliveryMethodProp()
schema.org description:The delivery method(s) to which the delivery charge or payment charge specification applies.
prop_schema returns just the property without url#
format_as is used by app templatetags based upon schema.org datatype
used to reference DeliveryMethod"""
_prop_schema = 'appliesToDeliveryMethod'
_expected_schema = 'DeliveryMethod'
_enum = False
_format_as = "ForeignKey"
class appliesToPaymentMethodProp(SchemaProperty):
"""
SchemaField for appliesToPaymentMethod
Usage: Include in SchemaObject SchemaFields as your_django_field = appliesToPaymentMethodProp()
schema.org description:The payment method(s) to which the payment charge specification applies.
prop_schema returns just the property without url#
format_as is used by app templatetags based upon schema.org datatype
used to reference PaymentMethod"""
_prop_schema = 'appliesToPaymentMethod'
_expected_schema = 'PaymentMethod'
_enum = False
_format_as = "ForeignKey"
# schema.org version 2.0
|
[
"mihai.nechita95@gmail.com"
] |
mihai.nechita95@gmail.com
|
2ddc5ca7f522f8159ec1ca17599d40bf2c4eca88
|
912c4445e7041869d1c8535a493b78d7ee35424b
|
/status/api/tests.py
|
fc4d45246e3fead061e60c2bd54215da6084c563
|
[] |
no_license
|
maltezc/Udemy-DjangoRestAPI
|
3f243ec97ea5e8e9d6ddc2005986b6a05aa11097
|
de6f885cf0cddaf22fb6fd72d18fc805b9ce48d2
|
refs/heads/master
| 2022-12-14T06:04:43.011691
| 2018-08-05T01:10:17
| 2018-08-05T01:10:17
| 140,590,753
| 0
| 0
| null | 2022-11-22T02:48:04
| 2018-07-11T14:56:08
|
Python
|
UTF-8
|
Python
| false
| false
| 7,199
|
py
|
import os
import shutil # shell utility method
import tempfile
from PIL import Image # pip install pillow
from django.urls import reverse
from rest_framework import status
from rest_framework.reverse import reverse as api_reverse
from rest_framework.test import APITestCase
from django.contrib.auth import get_user_model
from django.conf import settings
from rest_framework_jwt.settings import api_settings
from status.models import Status
User = get_user_model()
jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER
jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER
class StatusAPITestCase(APITestCase):
def setUp(self):
user = User.objects.create(username='testcfeuser', email='hello@cfe.com')
user.set_password("yeahhhcfe")
user.save()
status_obj = Status.objects.create(user=user, content='Hello There!')
def _test_statuses(self):
self.assertEqual(Status.objects.count(), 1)
def status_user_token(self):
auth_url = api_reverse('api-auth:login')
auth_data = {
'username': 'testcfeuser',
'password': 'yeahhhcfe',
}
auth_response = self.client.post(auth_url, auth_data, format='json')
token = auth_response.data.get("token", 0)
self.client.credentials(HTTP_AUTHORIZATION='JWT ' + token)
def create_item(self):
self.status_user_token() #calls def status usertoken above instead of having to repeat code
url = api_reverse('api-status:list')
data = {
'content': 'some cool test content'
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(Status.objects.count(), 2)
return response.data
def test_empty_create_item(self):
self.status_user_token() #calls def status usertoken above instead of having to repeat code
url = api_reverse('api-status:list')
data = {
'content': None,
'image': None,
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
return response.data
def test_status_create_with_image(self):
self.status_user_token()
url = api_reverse('api-status:list')
# (w, h) = (800, 1200)
# (255, 255, 255)
image_item = Image.new('RGB', (800, 1280), (0, 124, 174))
tmp_file = tempfile.NamedTemporaryFile(suffix='.jpg')
image_item.save(tmp_file, format='JPEG')
with open(tmp_file.name, 'rb') as file_obj:
data = {
'content': "come cool test content",
'image': file_obj
}
response = self.client.post(url, data, format='multipart') # multipart allows you to handle data coming through
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(Status.objects.count(), 2)
temp_img_dir = os.path.join(settings.MEDIA_ROOT, 'status', 'testcfeuser')
if os.path.exists(temp_img_dir):
shutil.rmtree(temp_img_dir)
def test_status_create_with_image_and_desc(self):
self.status_user_token()
url = api_reverse('api-status:list')
# (w, h) = (800, 1200)
# (255, 255, 255)
image_item = Image.new('RGB', (800, 1280), (0, 124, 174))
tmp_file = tempfile.NamedTemporaryFile(suffix='.jpg')
image_item.save(tmp_file, format='JPEG')
with open(tmp_file.name, 'rb') as file_obj:
data = {
'content': None,
'image': file_obj
}
response = self.client.post(url, data, format='multipart') # multipart allows you to handle data coming through
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
print(response.data)
img_data = response.data.get('image')
self.assertNotEqual(img_data, None)
self.assertEqual(Status.objects.count(), 2)
temp_img_dir = os.path.join(settings.MEDIA_ROOT, 'status', 'testcfeuser')
if os.path.exists(temp_img_dir):
shutil.rmtree(temp_img_dir)
def test_status_create(self):
data = self.create_item()
data_id = data.get("id")
rud_url = api_reverse('api-status:detail', kwargs={"id": data_id})
rud_data = {
'content': 'another new content'
}
'''
get method / retrieve
'''
get_response = self.client.get(rud_url, format='json')
self.assertEqual(get_response.status_code, status.HTTP_200_OK)
def test_status_update(self):
data = self.create_item()
data_id = data.get("id")
rud_url = api_reverse('api-status:detail', kwargs={"id": data_id})
rud_data = {
'content': 'another new content'
}
'''
put / update
'''
put_response = self.client.put(rud_url, rud_data, format='json')
self.assertEqual(put_response.status_code, status.HTTP_200_OK)
rud_response_data = put_response.data
self.assertEqual(rud_response_data['content'], rud_data['content'])
def test_status_delete(self):
data = self.create_item()
data_id = data.get("id")
rud_url = api_reverse('api-status:detail', kwargs={"id": data_id})
rud_data = {
'content': 'another new content'
}
'''
delete method
'''
del_response = self.client.delete(rud_url, format='json')
self.assertEqual(del_response.status_code, status.HTTP_204_NO_CONTENT)
'''
Not Found
'''
get_response = self.client.delete(rud_url, format='json')
self.assertEqual(get_response.status_code, status.HTTP_404_NOT_FOUND)
def test_status_no_token_create(self):
url = api_reverse('api-status:list')
data = {
'content': 'some cool test content'
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_other_user_permissions_api(self):
data = self.create_item()
data_id = data.get("id")
user = User.objects.create(username='userjmitch')
payload = jwt_payload_handler(user)
token = jwt_encode_handler(payload)
self.client.credentials(HTTP_AUTHORIZATION='JWT ' + token)
rud_url = api_reverse('api-status:detail', kwargs={'id':data_id})
rud_data = {
'content':"smashing"
}
get_ = self.client.get(rud_url, format='json')
put_ = self.client.put(rud_url, rud_data, format='json')
delete_ = self.client.delete(rud_url, format='json')
self.assertEqual(get_.status_code, status.HTTP_200_OK)
self.assertEqual(put_.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(delete_.status_code, status.HTTP_403_FORBIDDEN)
|
[
"cflux.maltez@live.com"
] |
cflux.maltez@live.com
|
918ad7bb0117b30c6486fbc80d1fd1b193eca18c
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_393/ch118_2020_10_04_20_23_11_549205.py
|
0e12279bb35457c3ba0953ad701ea15292ec51a7
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 187
|
py
|
import math
def reflexao_total_interna(n1,n2,teta2):
y= math.radians((((n1/n2)*math.sin(math.radians(teta2)))))
if y > 1:
return True
else:
return False
|
[
"you@example.com"
] |
you@example.com
|
04d560051f784a7adef1e2450c98a8917ecc9863
|
297efd4afeb46c0b56d9a975d76665caef213acc
|
/src/multiplicity/migrations/0044_referencespacelocation_active.py
|
c685d927bce6613caf9aa25e774e3ac226361258
|
[
"MIT"
] |
permissive
|
metabolism-of-cities/metabolism-of-cities-platform-v3
|
67716c3daae86a0fe527c18aef26ce29e069cbcc
|
c754d3b1b401906a21640b8eacb6b724a448b31c
|
refs/heads/master
| 2022-12-06T22:56:22.207853
| 2020-08-25T09:53:51
| 2020-08-25T09:53:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 422
|
py
|
# Generated by Django 2.1.3 on 2019-03-04 07:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('multiplicity', '0043_auto_20190209_1723'),
]
operations = [
migrations.AddField(
model_name='referencespacelocation',
name='active',
field=models.BooleanField(db_index=True, default=True),
),
]
|
[
"paul@penguinprotocols.com"
] |
paul@penguinprotocols.com
|
a02e962f1c5d82a41e748de8fb1d38b119166c0c
|
2f557f60fc609c03fbb42badf2c4f41ef2e60227
|
/CondTools/Ecal/python/copyTrivialAlignEB_cfg.py
|
c690529c0fa086e7e9df506f16670a72c30d21b9
|
[
"Apache-2.0"
] |
permissive
|
CMS-TMTT/cmssw
|
91d70fc40a7110832a2ceb2dc08c15b5a299bd3b
|
80cb3a25c0d63594fe6455b837f7c3cbe3cf42d7
|
refs/heads/TMTT_1060
| 2020-03-24T07:49:39.440996
| 2020-03-04T17:21:36
| 2020-03-04T17:21:36
| 142,576,342
| 3
| 5
|
Apache-2.0
| 2019-12-05T21:16:34
| 2018-07-27T12:48:13
|
C++
|
UTF-8
|
Python
| false
| false
| 1,416
|
py
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("TEST")
process.load("CalibCalorimetry.EcalTrivialCondModules.EcalTrivialCondRetriever_cfi")
process.load("CondCore.DBCommon.CondDBCommon_cfi")
#process.CondDBCommon.connect = 'oracle://cms_orcoff_prep/CMS_COND_ECAL'
#process.CondDBCommon.DBParameters.authenticationPath = '/afs/cern.ch/cms/DB/conddb/'
process.CondDBCommon.connect = 'sqlite_file:EBAlign.db'
process.MessageLogger = cms.Service("MessageLogger",
debugModules = cms.untracked.vstring('*'),
destinations = cms.untracked.vstring('cout')
)
process.source = cms.Source("EmptyIOVSource",
firstValue = cms.uint64(1),
lastValue = cms.uint64(1),
timetype = cms.string('runnumber'),
interval = cms.uint64(1)
)
process.PoolDBOutputService = cms.Service("PoolDBOutputService",
process.CondDBCommon,
timetype = cms.untracked.string('runnumber'),
toPut = cms.VPSet(
cms.PSet(
record = cms.string('EBAlignmentRcd'),
tag = cms.string('EBAlignment_zero_offline')
)
)
)
process.dbCopy = cms.EDAnalyzer("EcalDBCopy",
timetype = cms.string('runnumber'),
toCopy = cms.VPSet(
cms.PSet(
record = cms.string('EBAlignmentRcd'),
container = cms.string('EBAlignment')
)
)
)
process.prod = cms.EDAnalyzer("EcalTrivialObjectAnalyzer")
process.p = cms.Path(process.prod*process.dbCopy)
|
[
"giulio.eulisse@gmail.com"
] |
giulio.eulisse@gmail.com
|
3c7381700dd51e46681693cb02fba56bbe79e2f3
|
cea30aead7f0b529ee072c1bcab2896777e1408d
|
/PreprocessingCropsData/venv/Lib/site-packages/mpl_toolkits/tests/__init__.py
|
d5bd2047c484dceb46dbd677b7d1236edf7ab7ae
|
[] |
no_license
|
pgj9702/FarmSolution
|
3730ab3ca983b335ed48a60935c5fa6e3983cbb1
|
a8cacc45b8519e79b51ab65b9539a01f5006e64f
|
refs/heads/master
| 2023-03-30T15:41:10.312044
| 2021-03-31T08:47:23
| 2021-03-31T08:47:23
| 334,019,778
| 0
| 1
| null | 2021-02-22T09:32:57
| 2021-01-29T02:52:46
|
Python
|
UTF-8
|
Python
| false
| false
| 375
|
py
|
from pathlib import Path
# Check that the test directories exist
if not (Path(__file__).parent / "baseline_images").exists():
raise IOError(
'The baseline image directory does not exist. '
'This is most likely because the test area_data is not installed. '
'You may need to install matplotlib from source to get the '
'test area_data.')
|
[
"cao147147@naver.com"
] |
cao147147@naver.com
|
5bdcc6cf975d1c91609041403c71ecffcb959e0c
|
62def70e2d802375b1ad28b0ac85fee2010ee0a9
|
/displays/ws2801/rainbow.py
|
f9bd668f5edc1adab7f890ed626b53de332c52fb
|
[] |
no_license
|
MarkAYoder/BeagleBoard-exercises
|
c48028b6e919d8c04dedfd2040a133c760f0f567
|
2fab7c7f7aa09bf101168dfb279e690bc43a6514
|
refs/heads/master
| 2023-07-22T08:06:19.482358
| 2023-07-12T19:24:51
| 2023-07-12T19:24:51
| 5,111,513
| 48
| 41
| null | 2021-07-29T18:02:29
| 2012-07-19T15:07:14
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,091
|
py
|
#!/usr/bin/env python
import time
import math
import sys
from LedStrip_WS2801 import LedStrip_WS2801
amp = 10
f = 10
shift = 3
def fillAll(ledStrip, color, sleep):
for i in range(0, ledStrip.nLeds):
ledStrip.setPixel(i, color)
ledStrip.update()
def rainbow(ledStrip, nrOfleds):
phase = 0
skip = 60
for i in range(nrOfleds-skip, nrOfleds):
ledStrip.setPixel(i, [0, 0, 0])
while True:
for i in range(0, nrOfleds-skip):
r = int((amp * (math.sin(2*math.pi*f*(i-phase-0*shift)/nrOfleds) + 1)) + 1)
g = int((amp * (math.sin(2*math.pi*f*(i-phase-1*shift)/nrOfleds) + 1)) + 1)
b = int((amp * (math.sin(2*math.pi*f*(i-phase-2*shift)/nrOfleds) + 1)) + 1)
ledStrip.setPixel(i, [r, g, b])
ledStrip.update()
phase = phase + 0.5
time.sleep(0.050)
if __name__ == '__main__':
if len(sys.argv) == 1:
nrOfleds = 240
else:
nrOfleds = int(sys.argv[1])
delayTime = 0.0
ledStrip = LedStrip_WS2801(nrOfleds)
rainbow(ledStrip, nrOfleds)
|
[
"Mark.A.Yoder@Rose-Hulman.edu"
] |
Mark.A.Yoder@Rose-Hulman.edu
|
23469463dfbc6631814a0468add53c75df07336e
|
7950c4faf15ec1dc217391d839ddc21efd174ede
|
/problems/0572.0_Subtree_of_Another_Tree.py
|
04f22ba2c1aebcb28258a3c091754703156231d3
|
[] |
no_license
|
lixiang2017/leetcode
|
f462ecd269c7157aa4f5854f8c1da97ca5375e39
|
f93380721b8383817fe2b0d728deca1321c9ef45
|
refs/heads/master
| 2023-08-25T02:56:58.918792
| 2023-08-22T16:43:36
| 2023-08-22T16:43:36
| 153,090,613
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,194
|
py
|
'''
Success
Details
Runtime: 340 ms, faster than 10.24% of Python online submissions for Subtree of Another Tree.
Memory Usage: 14.6 MB, less than 53.07% of Python online submissions for Subtree of Another Tree.
'''
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution(object):
def isSubtree(self, s, t):
"""
:type s: TreeNode
:type t: TreeNode
:rtype: bool
"""
if self.isIdentical(s, t):
return True
if s.left and self.isSubtree(s.left, t):
return True
if s.right and self.isSubtree(s.right, t):
return True
return False
def isIdentical(self, node1, node2):
if not node1 and not node2:
return True
elif not node1 or not node2:
return False
if node1.val != node2.val:
return False
return self.isIdentical(node1.left, node2.left) and self.isIdentical(node1.right, node2.right)
|
[
"lixiang@rxthinking.com"
] |
lixiang@rxthinking.com
|
6b070f4656d6fd36f68bc0dd598ac9f15b9f3123
|
a7aabc5bd71b5ef6bdddee9908efcc840930e13c
|
/tests/testapp/tests/test_utils.py
|
509c0a384f223b47432eab62359e52d05cb6b9c5
|
[
"BSD-2-Clause"
] |
permissive
|
enterstudio/towel
|
b7567261b325d19d621af126553ac33350f9a927
|
6892788527b8a111cbf5963e909964aabc96d740
|
refs/heads/master
| 2021-07-05T05:49:11.654374
| 2016-11-21T08:43:41
| 2016-11-21T08:43:41
| 85,775,854
| 0
| 0
|
NOASSERTION
| 2021-07-03T01:07:07
| 2017-03-22T02:25:57
|
Python
|
UTF-8
|
Python
| false
| false
| 3,733
|
py
|
from __future__ import absolute_import, unicode_literals
from django.template import Template, Context
from django.test import TestCase
from towel.utils import (
related_classes, safe_queryset_and, tryreverse, substitute_with)
from testapp.models import Person, EmailAddress
class UtilsTest(TestCase):
def test_related_classes(self):
"""Test the functionality of towel.utils.related_classes"""
person = Person.objects.create(
family_name='Muster',
given_name='Hans',
)
EmailAddress.objects.create(
person=person,
email='hans@example.com',
)
self.assertEqual(
set(related_classes(person)),
set((Person, EmailAddress)),
)
def test_safe_queryset_and(self):
class AnyException(Exception):
pass
def _transform_nothing(queryset):
raise AnyException
qs1 = EmailAddress.objects.search('blub').transform(
_transform_nothing).select_related()
qs2 = EmailAddress.objects.distinct().reverse().select_related(
'person')
qs3 = EmailAddress.objects.all()
qs = safe_queryset_and(safe_queryset_and(qs1, qs2), qs3)
self.assertEqual(qs._transform_fns, [_transform_nothing])
self.assertFalse(qs.query.standard_ordering)
self.assertEqual(qs.query.select_related, {'person': {}})
self.assertTrue(qs.query.distinct)
self.assertEqual(qs.count(), 0)
self.assertRaises(AnyException, list, qs)
qs = safe_queryset_and(
EmailAddress.objects.select_related(),
EmailAddress.objects.select_related(),
)
self.assertTrue(qs.query.select_related)
self.assertFalse(qs.query.distinct)
qs = safe_queryset_and(
EmailAddress.objects.all(),
EmailAddress.objects.select_related(),
)
self.assertTrue(qs.query.select_related)
def test_tryreverse(self):
self.assertEqual(tryreverse('asdf42'), None)
self.assertEqual(tryreverse('admin:index'), '/admin/')
def test_substitute_with(self):
p1 = Person.objects.create()
p2 = Person.objects.create()
p1.emailaddress_set.create()
p1.emailaddress_set.create()
p1.emailaddress_set.create()
p2.emailaddress_set.create()
p2.emailaddress_set.create()
self.assertEqual(Person.objects.count(), 2)
self.assertEqual(EmailAddress.objects.count(), 5)
substitute_with(p1, p2)
p = Person.objects.get()
self.assertEqual(p2, p)
self.assertEqual(EmailAddress.objects.count(), 5)
def test_template_tag_helpers(self):
testcases = [
('', ''),
('{% testtag %}', 'ARGS: KWARGS:'),
('{% testtag 3 4 5 %}', 'ARGS: 3,4,5 KWARGS:'),
('{% testtag 3 "4" 5 %}', 'ARGS: 3,4,5 KWARGS:'),
('{% testtag abcd "42" %}', 'ARGS: yay,42 KWARGS:'),
('{% testtag "abcd" "42" %}', 'ARGS: abcd,42 KWARGS:'),
('{% testtag "abcd" "42" a=b %}', 'ARGS: abcd,42 KWARGS: a='),
('{% testtag "abcd" a="b" "42" %}', 'ARGS: abcd,42 KWARGS: a=b'),
('{% testtag bla="blub" blo="blob" %}',
'ARGS: KWARGS: bla=blub,blo=blob'),
('{% testtag bla=blub blo="blob" %}',
'ARGS: KWARGS: bla=blubber,blo=blob'),
]
for test, result in testcases:
t = Template('{% load testapp_tags %}' + test)
self.assertHTMLEqual(t.render(Context({
'abcd': 'yay',
'bla': 'blaaa',
'blub': 'blubber',
})), result)
|
[
"mk@spinlock.ch"
] |
mk@spinlock.ch
|
b8a6d4b5c488151244256500ffaab2184cecdabc
|
c6eb52478346d4c0b272035f45f62f6b1dccf2c3
|
/data_science_py/26_spark/lambda_expressions.py
|
e961135b4db95e1a1a503192e7ba57b3086ce7a9
|
[] |
no_license
|
antichown/udemy_courses
|
88732eea17eac8614152aa815d57c64fa54d0104
|
d308fe478a67cb7fc395d99d798ac58fdc1f58c4
|
refs/heads/master
| 2022-11-06T22:28:08.118570
| 2020-07-17T02:00:19
| 2020-07-17T02:00:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,652
|
py
|
#!/usr/bin/env python
# coding: utf-8
# #lambda expressions
#
# One of Pythons most useful (and for beginners, confusing) tools is the lambda
# expression. lambda expressions allow us to create "anonymous" functions. This
# basically means we can quickly make ad-hoc functions without needing to properly
# define a function using def.
#
# Function objects returned by running lambda expressions work exactly the same
# as those created and assigned by defs. There is key difference that makes lambda
# useful in specialized roles:
#
# **lambda's body is a single expression, not a block of statements.**
#
# * The lambda's body is similar to what we would put in a def body's return
# statement. We simply type the result as an expression instead of explicitly
# returning it. Because it is limited to an expression, a lambda is less general
# that a def. We can only squeeze design, to limit program nesting. lambda is
# designed for coding simple functions, and def handles the larger tasks.
#
# Lets slowly break down a lambda expression by deconstructing a function:
def square(num):
result = num**2
return result
print(square(2))
# Continuing the breakdown:
def square2(num):
return num**2
print(square2(2))
# We can actually write this in one line (although it would be bad style to do so)
def square3(num): return num**2
print(square3(2))
# This is the form a function that a lambda expression intends to replicate. A
# lambda expression can then be written as:
print(lambda num: num**2)
# Note how we get a function back. We can assign this function to a label:
square4 = lambda num: num**2
print(square4(2))
# And there you have it! The breakdown of a function into a lambda expression!
# Lets see a few more examples:
#
# ##Example 1
# Check it a number is even
even = lambda x: x%2==0
print(even(3))
print(even(4))
# ##Example 2
# Grab first character of a string:
first = lambda s: s[0]
print(first('hello'))
# ##Example 3
# Reverse a string:
rev = lambda s: s[::-1]
print(rev('hello'))
# ##Example 4
# Just like a normal function, we can accept more than one function into a lambda expresssion:
adder = lambda x,y : x+y
print(adder(2,3))
# lambda expressions really shine when used in conjunction with map(),filter()
# and reduce(). Each of those functions has its own lecture, so feel free to explore
# them if your very itnerested in lambda.
# I highly recommend reading this blog post at [Python Conquers the Universe]
# (https://pythonconquerstheuniverse.wordpress.com/2011/08/29/lambda_tutorial/)
# for a great breakdown on lambda expressions and some explanations of common confusions!
|
[
"ec2-user@ip-172-31-91-31.ec2.internal"
] |
ec2-user@ip-172-31-91-31.ec2.internal
|
2874f74de9f3de37544a1e61636acc55e31149f1
|
67d94cea8a4e48683e74ad7da26ab4b02ae37c19
|
/demo/services/qotm.py
|
ab405266a3e4504b3d51242754b2179293a01958
|
[
"Apache-2.0"
] |
permissive
|
smoshtaghi/ambassador
|
fa39ec86acdde3b76706e37a5273c252b62fda66
|
f653780befd65d72f955e94f5fac146d8794c712
|
refs/heads/master
| 2020-05-02T09:52:45.928453
| 2019-03-26T20:22:35
| 2019-03-26T20:22:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,956
|
py
|
#!/usr/bin/env python
from flask import Flask, jsonify, request, Response
import datetime
import functools
import logging
import os
import random
import signal
import time
__version__ = "0.0.1"
PORT = int(os.getenv("PORT", "5000"))
HOSTNAME = os.getenv("HOSTNAME")
LOG_LEVEL = os.getenv("LOG_LEVEL", "DEBUG")
app = Flask(__name__)
# Quote storage
#
# Obviously, this would more typically involve a persistent backing store. That's not
# really needed for a demo though.
quotes = [
"Abstraction is ever present.",
"A late night does not make any sense.",
"A principal idea is omnipresent, much like candy.",
"Nihilism gambles with lives, happiness, and even destiny itself!",
"The light at the end of the tunnel is interdependent on the relatedness of motivation, subcultures, and management.",
"Utter nonsense is a storyteller without equal.",
"Non-locality is the driver of truth. By summoning, we vibrate.",
"A small mercy is nothing at all?",
"The last sentence you read is often sensible nonsense.",
"668: The Neighbor of the Beast."
]
# Utilities
class RichStatus (object):
def __init__(self, ok, **kwargs):
self.ok = ok
self.info = kwargs
self.info['hostname'] = HOSTNAME
self.info['time'] = datetime.datetime.now().isoformat()
self.info['version'] = __version__
# Remember that __getattr__ is called only as a last resort if the key
# isn't a normal attr.
def __getattr__(self, key):
return self.info.get(key)
def __bool__(self):
return self.ok
def __nonzero__(self):
return bool(self)
def __contains__(self, key):
return key in self.info
def __str__(self):
attrs = ["%s=%s" % (key, self.info[key])
for key in sorted(self.info.keys())]
astr = " ".join(attrs)
if astr:
astr = " " + astr
return "<RichStatus %s%s>" % ("OK" if self else "BAD", astr)
def toDict(self):
d = {'ok': self.ok}
for key in self.info.keys():
d[key] = self.info[key]
return d
@classmethod
def fromError(self, error, **kwargs):
kwargs['error'] = error
return RichStatus(False, **kwargs)
@classmethod
def OK(self, **kwargs):
return RichStatus(True, **kwargs)
template = '''
<HTML><HEAD><Title>{title}</Title></Head>
<BODY>
<P><span style="color: {textcolor}">{message}</span><P>
</BODY>
</HTML>
'''
def standard_handler(f):
func_name = getattr(f, '__name__', '<anonymous>')
@functools.wraps(f)
def wrapper(*args, **kwds):
rc = RichStatus.fromError("impossible error")
session = request.headers.get('x-qotm-session', None)
username = request.headers.get('x-authenticated-as', None)
logging.debug("%s %s: session %s, username %s, handler %s" %
(request.method, request.path, session, username, func_name))
headers_string = ', '.join("{!s}={!r}".format(key, val)
for (key, val) in request.headers.items())
logging.debug("headers: %s" % (headers_string))
try:
rc = f(*args, **kwds)
except Exception as e:
logging.exception(e)
rc = RichStatus.fromError("%s: %s %s failed: %s" % (
func_name, request.method, request.path, e))
code = 200
# This, candidly, is a bit of a hack.
if session:
rc.info['session'] = session
if username:
rc.info['username'] = username
if not rc:
if 'status_code' in rc:
code = rc.status_code
else:
code = 500
if rc.json:
resp = jsonify(rc.toDict())
resp.status_code = code
else:
info = {
'title': "Quote of the Moment %s" % __version__,
'textcolor': 'pink',
'message': 'This moment is inadequate for a quote.',
}
if rc:
info['textcolor'] = 'black'
info['message'] = rc.quote
else:
info['textcolor'] = 'red'
info['message'] = rc.error
resp = Response(template.format(**info), code)
if session:
resp.headers['x-qotm-session'] = session
return resp
return wrapper
# REST endpoints
####
# GET /health does a basic health check. It always returns a status of 200
# with an empty body.
@app.route("/health", methods=["GET", "HEAD"])
@standard_handler
def health():
return RichStatus.OK(msg="QotM health check OK")
####
# GET / returns a random quote as the 'quote' element of a JSON dictionary. It
# always returns a status of 200.
@app.route("/", methods=["GET"])
@standard_handler
def statement():
return RichStatus.OK(quote=random.choice(quotes),
json=request.args.get('json', False))
####
# GET /quote/quoteid returns a specific quote. 'quoteid' is the integer index
# of the quote in our array above.
#
# - If all goes well, it returns a JSON dictionary with the requested quote as
# the 'quote' element, with status 200.
# - If something goes wrong, it returns a JSON dictionary with an explanation
# of what happened as the 'error' element, with status 400.
#
# PUT /quote/quotenum updates a specific quote. It requires a JSON dictionary
# as the PUT body, with the the new quote contained in the 'quote' dictionary
# element.
#
# - If all goes well, it returns the new quote as if you'd requested it using
# the GET verb for this endpoint.
# - If something goes wrong, it returns a JSON dictionary with an explanation
# of what happened as the 'error' element, with status 400.
@app.route("/quote/<idx>", methods=["GET", "PUT"])
@standard_handler
def specific_quote(idx):
try:
idx = int(idx)
except ValueError:
return RichStatus.fromError("quote IDs must be numbers", status_code=400)
if (idx < 0) or (idx >= len(quotes)):
return RichStatus.fromError("no quote ID %d" % idx, status_code=400)
if request.method == "PUT":
j = request.json
if (not j) or ('quote' not in j):
return RichStatus.fromError("must supply 'quote' via JSON dictionary", status_code=400)
quotes[idx] = j['quote']
return RichStatus.OK(quote=quotes[idx],
json=request.args.get('json', False))
####
# POST /quote adds a new quote to our list. It requires a JSON dictionary
# as the POST body, with the the new quote contained in the 'quote' dictionary
# element.
#
# - If all goes well, it returns a JSON dictionary with the new quote's ID as
# 'quoteid', and the new quote as 'quote', with a status of 200.
# - If something goes wrong, it returns a JSON dictionary with an explanation
# of what happened as the 'error' element, with status 400.
@app.route("/quote", methods=["POST"])
@standard_handler
def new_quote():
j = request.json
if (not j) or ('quote' not in j):
return RichStatus.fromError("must supply 'quote' via JSON dictionary", status_code=400)
quotes.append(j['quote'])
idx = len(quotes) - 1
return RichStatus.OK(quote=quotes[idx], quoteid=idx)
@app.route("/crash", methods=["GET"])
@standard_handler
def crash():
logging.warning("dying in 1 seconds")
time.sleep(1)
os.kill(os.getpid(), signal.SIGTERM)
time.sleep(1)
os.kill(os.getpid(), signal.SIGKILL)
# Mainline
def main():
app.run(debug=True, host="0.0.0.0", port=PORT)
if __name__ == "__main__":
logging.basicConfig(
# filename=logPath,
level=LOG_LEVEL, # if appDebug else logging.INFO,
format="%%(asctime)s demo-qotm %s %%(levelname)s: %%(message)s" % __version__,
datefmt="%Y-%m-%d %H:%M:%S"
)
logging.info("initializing on %s:%d" % (HOSTNAME, PORT))
main()
|
[
"flynn@datawire.io"
] |
flynn@datawire.io
|
6f537f2a2eb98ce9355e639f4b1a40938a2975e3
|
763d2f0a40c905bc9cbcd83e21c8d716072fcf90
|
/chapter01/04.py
|
faf5d15b0528921cf23472e4699f4bf3a532a3d9
|
[] |
no_license
|
s19014/ProgrammingTraining2
|
c707dc0a9dc1f4678f91fc05ded6bb1419db4f7a
|
c28b452d0a52c0e8481731bd1cda6b1aba88228d
|
refs/heads/master
| 2022-11-08T18:27:36.692911
| 2020-06-29T03:36:51
| 2020-06-29T03:36:51
| 262,897,354
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 813
|
py
|
'''
“Hi He Lied Because Boron Could Not Oxidize Fluorine. New Nations Might Also
Sign Peace Security Clause. Arthur King Can.”という文を単語に分解し,
1, 5, 6, 7, 8, 9, 15, 16, 19番目の単語は先頭の1文字,それ以外の単語は先頭に
2文字を取り出し,取り出した文字列から単語の位置(先頭から何番目の単語か)への
連想配列(辞書型もしくはマップ型)を作成せよ.
'''
text = '''Hi He Lied Because Boron Could Not Oxidize Fluorine. New Nations Might Also Sign Peace Security Clause. Arthur King Can.'''
text_list = text.split()
answer = {}
for index, word in enumerate(text_list, 1):
if index in [1, 5, 6, 7, 8, 9, 15, 16, 19]:
answer[index] = word[0]
else:
answer[index] = word[:2]
print(answer)
|
[
"s19014@std.it-college.ac.jp"
] |
s19014@std.it-college.ac.jp
|
d1b8b380409305b112615cc2f6a3b1f200a89b38
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_268/ch34_2020_03_29_20_16_22_997795.py
|
7ac92c7d8b2fdc1116c6e200a907b413c1ee9f71
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 484
|
py
|
def eh_primo(n) :
if (n <= 1) :
return False
if (n <= 3) :
return True
if (n % 2 == 0 or n % 3 == 0) :
return False
i = 5
while(i * i <= n) :
if (n % i == 0 or n % (i + 2) == 0) :
return False
i = i + 6
return True
def maior_primo_menor_que(n):
a=0
b=n-1
if n<=1:
return -1
while a>n:
if eh_primo(b):
return b
a+=1
b-=1
|
[
"you@example.com"
] |
you@example.com
|
ab59db052bdf4934ce86436713e6a01207d89d3c
|
7b750c5c9df2fb05e92b16a43767c444404de7ae
|
/src/leetcode/python3/leetcode735.py
|
ba41f98ebc046b632377456faffcf0601cd24c7a
|
[] |
no_license
|
renaissance-codes/leetcode
|
a68c0203fe4f006fa250122614079adfe6582d78
|
de6db120a1e709809d26e3e317c66612e681fb70
|
refs/heads/master
| 2022-08-18T15:05:19.622014
| 2022-08-05T03:34:01
| 2022-08-05T03:34:01
| 200,180,049
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,394
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from typing import List
"""
行星碰撞
"""
# 暴力求解 292ms
class Solution:
def asteroidCollision(self, asteroids: List[int]) -> List[int]:
tasteroids = asteroids
result = []
change = True
while change:
for x in tasteroids:
if len(result) == 0:
result.append(x)
else:
p = result.pop()
if p > 0 and x < 0:
if p > -x:
result.append(p)
elif p < -x:
result.append(x)
else:
result.append(p)
result.append(x)
if len(result) < len(tasteroids):
tasteroids = result
result = []
else:
change = False
return result
# 循环的位置不同
class Solution2:
def asteroidCollision(self, asteroids: List[int]) -> List[int]:
result = []
for x in asteroids:
while len(result) and result[-1] > 0 and x < 0:
p = result.pop()
if p > -x:
x = p
elif p == -x:
break
else:
result.append(x)
return result
|
[
"jack.li@eisoo.com"
] |
jack.li@eisoo.com
|
b7b874acea85c0c1c0b468fa63bfcdec322d8e33
|
d4a874792cc86f64dba859392deacd9e7e6721fc
|
/monitor.py
|
01ae60fc559113881e40b008bc3cc7858202075c
|
[] |
no_license
|
tarungoyal1/python_scripts
|
a1b0e725813a0277b812f75a73fac330d92405cb
|
ac362c753d61c3430c46863235bb263ecc62a053
|
refs/heads/master
| 2020-03-28T14:23:26.896106
| 2018-11-13T00:49:17
| 2018-11-13T00:49:17
| 148,483,014
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 998
|
py
|
import time
import glob
import shutil
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
class MyHandler(FileSystemEventHandler):
def on_created(self, event):
folder = "10_Practical Uses of S3"
current = glob.glob("*.mp4")
if len(current)!=0:
path = "S3_Master_class/"+folder+"/"
allFiles = glob.glob(path+"*.mp4")
count = len(allFiles)
currentname = current[0]
shutil.copy2(currentname, path+str(count+1)+"_"+currentname)
shutil.os.remove(currentname)
# print ("Got it!")
if __name__ == "__main__":
event_handler = MyHandler()
observer = Observer()
observer.schedule(event_handler, path='.', recursive=False)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
|
[
"tarun13317@gmail.com"
] |
tarun13317@gmail.com
|
b1a813aef6ba473bea7a5c2fd78a298593eb2d7a
|
cb70b467312f2fb8f8415bac03476d207acb990d
|
/study_case/crawler_7x24_study3.py
|
fe7a0ea1a66fd9f850e00e2ec518e92fe0c5fcf8
|
[] |
no_license
|
eddiewang-wgq/python-interface
|
ded532dbad1dc943420823e91ba5f00637fa978e
|
d232cfc3a7ffc27f8f186d577265bc93e89b9b54
|
refs/heads/master
| 2023-03-27T00:52:50.018082
| 2021-03-30T07:53:08
| 2021-03-30T07:53:08
| 352,916,495
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,741
|
py
|
# !/usr/bin/python3
# -*- coding: utf-8 -*-
"""
@Author : pgsheng
@Time : 2018/8/13 9:31
"""
import sys
import time
import pandas
from PyQt5.QtCore import QUrl
from PyQt5.QtWebEngineWidgets import QWebEnginePage
from PyQt5.QtWidgets import QApplication
from bs4 import BeautifulSoup
from public import config
class Sina_7x24(QWebEnginePage):
def __init__(self):
self.is_first = True
self.html = ''
self.task_time = []
self.task_info = []
self.app = QApplication(sys.argv) # PyQt5
QWebEnginePage.__init__(self) # PyQt5
def _sina(self):
url = 'http://finance.sina.com.cn/7x24/'
self.loadFinished.connect(self._on_load_finished) # PyQt5
self.load(QUrl(url)) # PyQt5
self.app.exec_() # PyQt5
data_list = self._news()
if self.is_first:
for data in data_list:
self.task_time.append(data['n_time'])
self.task_info.append(data['n_info'])
print(data['n_time'], data['n_info'])
time.sleep(0.1)
self.is_first = False
else:
for data in data_list:
if data['n_time'] in self.task_time:
pass
else:
self.task_time.append(data['n_time'])
self.task_info.append(data['n_info'])
print('-' * 30)
print('新消息', data['n_time'], data['n_info'])
total = {'Time': self.task_time[::-1], 'Content': self.task_info[::-1]}
# ( 运行起始点 )用pandas模块处理数据并转化为excel文档
df = pandas.DataFrame(total)
df.to_excel(config.study_case_path + r'data\7x24_3.xlsx', 'Sheet1')
time.sleep(15)
self._sina() # 每隔 N 秒跑一次
def _news(self): # 获取新闻函数
news_list = []
soup = BeautifulSoup(self.html, 'lxml')
info_list = soup.select('.bd_i_og')
for info in info_list: # 获取页面中自动刷新的新闻
n_time = info.select('p.bd_i_time_c')[0].text # 新闻时间及内容
n_info = info.select('p.bd_i_txt_c')[0].text
data = {
'n_time': n_time,
'n_info': n_info
}
news_list.append(data)
return news_list[::-1] # 这里倒序,这样打印时才会先打印旧新闻,后打印新新闻
def _on_load_finished(self):
self.html = self.toHtml(self.callable) # PyQt5
def callable(self, html_str):
self.html = html_str
self.app.quit() # PyQt5
def start(self):
self._sina()
if __name__ == '__main__':
mw = Sina_7x24()
mw.start()
|
[
"2568080700@qq.com"
] |
2568080700@qq.com
|
69240e74f4667dcc2eca03b64939ad5d07446fa2
|
0ffb18f4d58961ca675d8294eb2154f69061989f
|
/examples/pipeliner/fastqc_pipeline_example.py
|
4a2c7ea0b317b3fd00e841734a13324343f5b38b
|
[] |
no_license
|
nandr0id/auto_process_ngs
|
a794e904e6d24b0e0403941b44c884374f95850e
|
9b09f20b344d0ee87227e8771a479aa7c04f1837
|
refs/heads/master
| 2020-06-26T03:23:53.225029
| 2019-06-12T12:11:32
| 2019-06-12T12:11:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,931
|
py
|
#!/usr/bin/env python
#
# Example pipeline to run Fastqc on one or more Fastq files
# but ignoring any with zero reads
import os
import argparse
from bcftbx.FASTQFile import nreads
from auto_process_ngs.pipeliner import PipelineTask
from auto_process_ngs.pipeliner import PipelineFunctionTask
from auto_process_ngs.pipeliner import PipelineCommandWrapper
from auto_process_ngs.pipeliner import Pipeline
class RunFastqc(PipelineTask):
# Run Fastqc on multiple files
def init(self,fastqs,out_dir):
# Inputs:
# - fastqs: list of input Fastq files
# - out_dir: where to put the Fastqc outputs
# Outputs:
# - files: list of output Fastqc HTML files
self.add_output('files',list())
def setup(self):
if not os.path.exists(self.args.out_dir):
os.mkdir(self.args.out_dir)
for fq in self.args.fastqs:
self.add_cmd(
PipelineCommandWrapper("Run FastQC",
"fastqc",
"-o",self.args.out_dir,
fq))
def finish(self):
for fq in self.args.fastqs:
if fq.endswith(".gz"):
fq = os.path.splitext(fq)[0]
out_file = os.path.join(
self.args.out_dir,
os.path.splitext(
os.path.basename(fq))[0]+"_fastqc.html")
if not os.path.exists(out_file):
self.fail(message="Missing output file: %s" % out_file)
else:
self.output.files.append(out_file)
class FilterEmptyFastqs(PipelineFunctionTask):
# Filter Fastq files based on read count
def init(self,fastqs):
self.add_output('fastqs',list())
def setup(self):
for fq in self.args.fastqs:
self.add_call("Filter out empty fastqs",
self.filter_empty_fastqs,fq)
def filter_empty_fastqs(self,*fastqs):
filtered_fastqs = list()
for fq in fastqs:
if nreads(fq) > 0:
print "%s" % fq
filtered_fastqs.append(fq)
return filtered_fastqs
def finish(self):
for result in self.result():
for fq in result:
self.output.fastqs.append(fq)
if __name__ == "__main__":
# Command line
p = argparse.ArgumentParser()
p.add_argument("fastqs",nargs='+',metavar="FASTQ")
args = p.parse_args()
# Make and run a pipeline
ppl = Pipeline()
filter_empty_fastqs = FilterEmptyFastqs("Filter empty Fastqs",
args.fastqs)
run_fastqc = RunFastqc("Run Fastqc",
filter_empty_fastqs.output.fastqs,
os.getcwd())
ppl.add_task(filter_empty_fastqs)
ppl.add_task(run_fastqc,requires=(filter_empty_fastqs,))
ppl.run()
print run_fastqc.output()
|
[
"peter.briggs@manchester.ac.uk"
] |
peter.briggs@manchester.ac.uk
|
613fefd8380bf4435858fd0857c0dfb569fafb41
|
1dfba6d8c60a534d6bdeb985697fba913da5fe9b
|
/src/mceditlib/bench/time_loadsave.py
|
52fb8a10945703382abccd3848abbce13e03efe2
|
[
"BSD-3-Clause"
] |
permissive
|
shipbiulder101/mcedit2
|
2d88a6933bac3010f5bedcdd65d542587841a19f
|
44179472b7834c803da243a82d731f9ef555764d
|
refs/heads/master
| 2021-01-12T21:52:56.581572
| 2015-10-20T21:30:34
| 2015-10-20T21:30:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,665
|
py
|
"""
time_loadall
"""
from __future__ import absolute_import, division, print_function
import logging
import timeit
from mceditlib.test import templevel
import gc
log = logging.getLogger(__name__)
def loadall():
ents = 0
for cPos in pos[cStart:cEnd]:
chunk = dim.getChunk(*cPos)
ents += len(chunk.Entities) + len(chunk.TileEntities)
# lc = len(editor._loadedChunks)
# if lc > 20:
# refs = gc.get_referrers(chunk)
# print("Referrers:\n%s" % refs)
# print("WorldEditor: _loadedChunks: %d (_pending_removals: %d)" % (lc, len(editor._loadedChunks._pending_removals)))
print("[Tile]Entities: ", ents)
def saveall():
for cPos in pos[cStart:cEnd]:
dim.getChunk(*cPos).dirty = True
editor.saveChanges()
import sys
if len(sys.argv) > 1:
filename = sys.argv[1]
else:
filename = "AnvilWorld_1.8"
editor = templevel.TempLevel(filename)
dim = editor.getDimension()
cStart = 0
cEnd = 10000
chunkCount = cEnd - cStart
pos = list(dim.chunkPositions())
loadTime = timeit.timeit(loadall, number=1)
print("Loaded %d chunks in %.02fms (%f cps)" % (chunkCount, loadTime * 1000, chunkCount/loadTime))
print("Cache hits: %d, misses: %d, rejects: %d, max rejects: %d, queue: %d" % (
editor._chunkDataCache.hits, editor._chunkDataCache.misses,
editor._chunkDataCache.rejects, editor._chunkDataCache.max_rejects,
len(editor._chunkDataCache.queue)))
print("WorldEditor: _loadedChunks: %d" % (len(editor._loadedChunks),))
#saveTime = timeit.timeit(saveall, number=1)
#print("Saved %d chunks in %.02fms (%f cps)" % (chunkCount, saveTime * 1000, chunkCount/saveTime))
|
[
"codewarrior@hawaii.rr.com"
] |
codewarrior@hawaii.rr.com
|
6f20ea4026405a9598e5facc2c46b3e34bc3f1db
|
80579d5cf31edd31750b644d6eb46a2b29ff4972
|
/CandidateApp/migrations/0004_auto_20191104_0734.py
|
392e3a7ab7e8e002f5e272f8bbfeb223b6a9d073
|
[] |
no_license
|
Nigar-mr/Vacancies
|
fb2935202488d957a4cccece0ac68a3ec052aa87
|
a8c4605e66cb4cf425abd6565b265df5b458e26d
|
refs/heads/master
| 2023-01-27T11:36:32.836172
| 2020-12-02T14:41:50
| 2020-12-02T14:41:50
| 317,890,920
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 865
|
py
|
# Generated by Django 2.2.6 on 2019-11-04 07:34
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('CandidateApp', '0003_citymodel_countrymodel'),
]
operations = [
migrations.RemoveField(
model_name='candidatecv',
name='location',
),
migrations.AddField(
model_name='candidatecv',
name='city',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='CandidateApp.CityModel'),
),
migrations.AddField(
model_name='candidatecv',
name='country',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='CandidateApp.CountryModel'),
),
]
|
[
"muradlinigar@gmail.com"
] |
muradlinigar@gmail.com
|
91de258357da03040a37deec4cce1b055beea036
|
4ce94e6fdfb55a889a0e7c4788fa95d2649f7bca
|
/User/apps/logreg/urls.py
|
13f5528d4db332284dfc1f9219fa72fb983eed21
|
[] |
no_license
|
HaochengYang/Django-class-assignment
|
4018d8eb0619a99ebe8c3e47346d29934aafc66b
|
cb8f920f432209f88c810407ca646ee7dec82e22
|
refs/heads/master
| 2021-06-08T20:05:22.876794
| 2016-12-19T23:39:22
| 2016-12-19T23:39:22
| 75,032,572
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 309
|
py
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$',views.index, name="index"),
url(r'^register$',views.register, name="register"),
url(r'^login$',views.login, name="login"),
url(r'^main$',views.main, name="main"),
url(r'^logout$',views.logout, name="logout")
]
|
[
"haocheng0906@gmail.com"
] |
haocheng0906@gmail.com
|
8644cb81bf0fcfd6f2c5b8dbf1b318cdfb99784c
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_370/ch120_2020_04_01_03_35_57_135848.py
|
9d294a0499ab557c0f53778c9d2f27dd6b98aebe
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 839
|
py
|
import random
dinheiro = 100
while dinheiro > 0:
print (dinheiro)
aposta= int(input("Quanto voce quer apostar? "))
if aposta != 0:
n_ou_p=input("Voce quer apostar em um numero (n) ou paridade(p)? ")
if n_ou_p == "n":
numero=int(input("Escolha um numero entre 1 e 36: "))
r1=random.randint(0,36)
if numero == r1:
dinheiro += 35 * aposta
else:
dinheiro -= aposta
else:
paridade = input("Escolha entre Par(p) ou Impar(i): ")
r1 = ramdom.randint (0,36)
if paridade == 'p' and r1 % 2 == 0:
dinheiro += aposta
elif paridade == "i" and r1 % 2 != 0:
dinheiro += aposta
else:
dinheiro -= aposta
else:
dinheiro=0
|
[
"you@example.com"
] |
you@example.com
|
29a81e6881db1268d23434dc6980737b6eb640d4
|
52855d750ccd5f2a89e960a2cd03365a3daf4959
|
/ABC/ABC102_A.py
|
5b3e4fd37e2243daff35ceda1216e174bf71c576
|
[] |
no_license
|
takuwaaan/Atcoder_Study
|
b15d4f3d15d48abb06895d5938bf8ab53fb73c08
|
6fd772c09c7816d147abdc50669ec2bbc1bc4a57
|
refs/heads/master
| 2021-03-10T18:56:04.416805
| 2020-03-30T22:36:49
| 2020-03-30T22:36:49
| 246,477,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 189
|
py
|
# 最大公約数
def gcd(a, b):
while b != 0:
a, b = b, a % b
return a
# 最小公倍数
def lcm(a, b):
return a * b // gcd(a, b)
N = int(input())
print(lcm(2, N))
|
[
"takutotakuwan@gmail.com"
] |
takutotakuwan@gmail.com
|
bffb93f11e2d156068446b36e4f0a07a925befaa
|
9efe98cd4e2c4b23230ba8af2b98609b0a8654a5
|
/articles/urls.py
|
cc4823f2d18d59695b2fe6aa45fc3bc87e971e07
|
[] |
no_license
|
Jordan-Rob/django-news
|
f0855d10f9056ad491d90d9086284ed3de468227
|
fc73862274522a16530d2fd3497d6e90b7b109c2
|
refs/heads/master
| 2021-09-25T11:37:50.529863
| 2020-02-26T13:50:42
| 2020-02-26T13:50:42
| 241,844,795
| 1
| 0
| null | 2021-09-22T18:45:20
| 2020-02-20T09:34:47
|
Python
|
UTF-8
|
Python
| false
| false
| 551
|
py
|
from django.urls import path
from .views import (
ArticleListView,
ArticleCreateView,
ArticleDeleteView,
ArticleDetailView,
ArticleUpdateView,
)
urlpatterns = [
path('', ArticleListView.as_view(), name='article_list'),
path('new/', ArticleCreateView.as_view(), name='article_new'),
path('<int:pk>/', ArticleDetailView.as_view(), name='article_detail'),
path('<int:pk>/edit/', ArticleUpdateView.as_view(), name='article_edit'),
path('<int:pk>/delete/', ArticleDeleteView.as_view(), name='article_delete'),
]
|
[
"jordanrob709@gmail.com"
] |
jordanrob709@gmail.com
|
8579c92447a21ce7b508108375db792656afff0a
|
7357d367b0af4650ccc5b783b7a59090fdde47bb
|
/library/k8s_v1_config_map.py
|
f1f041f9632980fdd20118debaac00931c8b2207
|
[
"MIT"
] |
permissive
|
BarracudaPff/code-golf-data-python
|
fb0cfc74d1777c4246d56a5db8525432bf37ab1a
|
42e8858c2ebc6a061012bcadb167d29cebb85c5e
|
refs/heads/main
| 2023-05-29T05:52:22.856551
| 2020-05-23T22:12:48
| 2020-05-23T22:12:48
| 378,832,634
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,574
|
py
|
DOCUMENTATION = """
module: k8s_v1_config_map
short_description: Kubernetes ConfigMap
description:
- Manage the lifecycle of a config_map object. Supports check mode, and attempts to
to be idempotent.
version_added: 2.3.0
author: OpenShift (@openshift)
options:
annotations:
description:
- Annotations is an unstructured key value map stored with a resource that may
be set by external tools to store and retrieve arbitrary metadata. They are
not queryable and should be preserved when modifying objects.
type: dict
api_key:
description:
- Token used to connect to the API.
cert_file:
description:
- Path to a certificate used to authenticate with the API.
type: path
context:
description:
- The name of a context found in the Kubernetes config file.
data:
description:
- Data contains the configuration data. Each key must consist of alphanumeric
characters, '-', '_' or '.'.
type: dict
debug:
description:
- Enable debug output from the OpenShift helper. Logging info is written to KubeObjHelper.log
default: false
type: bool
force:
description:
- If set to C(True), and I(state) is C(present), an existing object will updated,
and lists will be replaced, rather than merged.
default: false
type: bool
host:
description:
- Provide a URL for acessing the Kubernetes API.
key_file:
description:
- Path to a key file used to authenticate with the API.
type: path
kubeconfig:
description:
- Path to an existing Kubernetes config file. If not provided, and no other connection
options are provided, the openshift client will attempt to load the default
configuration file from I(~/.kube/config.json).
type: path
labels:
description:
- Map of string keys and values that can be used to organize and categorize (scope
and select) objects. May match selectors of replication controllers and services.
type: dict
name:
description:
- Name must be unique within a namespace. Is required when creating resources,
although some resources may allow a client to request the generation of an appropriate
name automatically. Name is primarily intended for creation idempotence and
configuration definition. Cannot be updated.
namespace:
description:
- Namespace defines the space within each name must be unique. An empty namespace
is equivalent to the "default" namespace, but "default" is the canonical representation.
Not all objects are required to be scoped to a namespace - the value of this
field for those objects will be empty. Must be a DNS_LABEL. Cannot be updated.
password:
description:
- Provide a password for connecting to the API. Use in conjunction with I(username).
resource_definition:
description:
- Provide the YAML definition for the object, bypassing any modules parameters
intended to define object attributes.
type: dict
src:
description:
- Provide a path to a file containing the YAML definition of the object. Mutually
exclusive with I(resource_definition).
type: path
ssl_ca_cert:
description:
- Path to a CA certificate used to authenticate with the API.
type: path
state:
description:
- Determines if an object should be created, patched, or deleted. When set to
C(present), the object will be created, if it does not exist, or patched, if
parameter values differ from the existing object's attributes, and deleted,
if set to C(absent). A patch operation results in merging lists and updating
dictionaries, with lists being merged into a unique set of values. If a list
contains a dictionary with a I(name) or I(type) attribute, a strategic merge
is performed, where individual elements with a matching I(name_) or I(type)
are merged. To force the replacement of lists, set the I(force) option to C(True).
default: present
choices:
- present
- absent
username:
description:
- Provide a username for connecting to the API.
verify_ssl:
description:
- Whether or not to verify the API server's SSL certificates.
type: bool
requirements:
- kubernetes == 4.0.0
"""
EXAMPLES = """
"""
RETURN = """
api_version:
description: Requested API version
type: string
config_map:
type: complex
returned: when I(state) = C(present)
contains:
api_version:
description:
- APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
type: str
data:
description:
- Data contains the configuration data. Each key must consist of alphanumeric
characters, '-', '_' or '.'.
type: complex
contains: str, str
kind:
description:
- Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to. Cannot
be updated. In CamelCase.
type: str
metadata:
description:
- Standard object's metadata.
type: complex
"""
def main():
try:
module = KubernetesAnsibleModule("config_map", "v1")
except KubernetesAnsibleException as exc:
raise Exception(exc.message)
try:
module.execute_module()
except KubernetesAnsibleException as exc:
module.fail_json(msg="Module failed!", error=str(exc))
if __name__ == "__main__":
main()
|
[
"sokolov.yas@gmail.com"
] |
sokolov.yas@gmail.com
|
a616047134756ed93653e3641eeadb7056c6d93e
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/permutations_20200723155542.py
|
0a2cb82160de953b2379765e3b9c3fe802252d19
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 293
|
py
|
def perm(arr):
# sort the array
if len(arr) == 0:
return 0
else:
arr.sort()
perm = set(arr)
maxValue = max(arr)
if len(perm) == maxValue:
return 1
eli:
print(perm([1,1,1]))
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
20d466c872a9e9cc3e8c0d6993541a1ee769c0e9
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/verbs/_dissed.py
|
bfd39eb88b44f035eafe50cb7e30ebacde72a66b
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 226
|
py
|
from xai.brain.wordbase.verbs._diss import _DISS
#calss header
class _DISSED(_DISS, ):
def __init__(self,):
_DISS.__init__(self)
self.name = "DISSED"
self.specie = 'verbs'
self.basic = "diss"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
7b7236e04ce84478b326d1bf5639d8e284d3c418
|
47175228ce25812549eb5203fc8b86b76fec6eb9
|
/API_scripts/dfp/dfp_python3/v201411/custom_field_service/create_custom_fields.py
|
f810346ad2e1082080513f0a6bbf67f474035af9
|
[] |
no_license
|
noelleli/documentation
|
c1efe9c2bdb169baa771e9c23d8f4e2683c2fe20
|
a375698b4cf0776d52d3a9d3c17d20143bd252e1
|
refs/heads/master
| 2021-01-10T05:41:30.648343
| 2016-02-13T05:46:31
| 2016-02-13T05:46:31
| 51,477,460
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,146
|
py
|
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example creates custom fields.
To determine which custom fields exist, run get_all_custom_fields.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
Tags: CustomFieldService.createCustomFields
"""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
import uuid
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
custom_field_service = client.GetService(
'CustomFieldService', version='v201411')
# Create custom field objects.
custom_fields = [
{
'name': 'Customer comments #%s' % uuid.uuid4(),
'entityType': 'LINE_ITEM',
'dataType': 'STRING',
'visibility': 'FULL'
}, {
'name': 'Internal approval status #%s' % uuid.uuid4(),
'entityType': 'LINE_ITEM',
'dataType': 'DROP_DOWN',
'visibility': 'FULL'
}
]
# Add custom fields.
custom_fields = custom_field_service.createCustomFields(custom_fields)
# Display results.
for custom_field in custom_fields:
print(('Custom field with ID \'%s\' and name \'%s\' was created.'
% (custom_field['id'], custom_field['name'])))
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
|
[
"noelle@makermedia.com"
] |
noelle@makermedia.com
|
7f45e9874c2bec2f7cbb448475e5c45d63d5972f
|
315450354c6ddeda9269ffa4c96750783963d629
|
/CMSSW_7_0_4/src/TotemRawData/Readers/test/.svn/text-base/raw_data_example.py.svn-base
|
fe26aa920afdb0d2be6cac4e77ee9b7bb3cb236e
|
[] |
no_license
|
elizamelo/CMSTOTEMSim
|
e5928d49edb32cbfeae0aedfcf7bd3131211627e
|
b415e0ff0dad101be5e5de1def59c5894d7ca3e8
|
refs/heads/master
| 2021-05-01T01:31:38.139992
| 2017-09-12T17:07:12
| 2017-09-12T17:07:12
| 76,041,270
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 934
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("rpReconstruction")
# minimum of logs
process.load("Configuration.TotemCommon.LoggerMin_cfi")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(2)
)
process.load('TotemRawData.Readers.RawDataSource_cfi')
process.source.verbosity = 10
process.source.printProgressFrequency = 1
process.source.fileNames.append('/castor/cern.ch/totem/LHCRawData/2015/Physics/run_EVB-wn10_9298.068.vmeb')
process.source.fileNames.append('/castor/cern.ch/totem/LHCRawData/2015/Physics/run_9487_EVB11_2.005.srs')
# raw to digi conversion
process.load('TotemCondFormats.DAQInformation.DAQMappingSourceXML_cfi')
process.DAQMappingSourceXML.mappingFileNames.append('TotemCondFormats/DAQInformation/data/rp_220.xml')
process.load('TotemRawData.RawToDigi.Raw2DigiProducer_cfi')
process.Raw2DigiProducer.verbosity = 0
process.p = cms.Path(
process.Raw2DigiProducer
)
|
[
"eliza@cern.ch"
] |
eliza@cern.ch
|
|
ee93cddd16945c87b62476643878d8e30493b53a
|
3db4afb573e6e82e9308c43ae13e9426c5c77c80
|
/glue/conftest.py
|
7ed2f145177687ccad2672345c69031f834095a5
|
[
"BSD-3-Clause"
] |
permissive
|
mariobuikhuizen/glue
|
af8a53498fd4e2365bf98e5089677efdcdb67127
|
6b968b352bc5ad68b95ad5e3bb25550782a69ee8
|
refs/heads/master
| 2023-01-08T07:33:03.006145
| 2020-09-20T09:25:06
| 2020-09-20T09:25:06
| 298,285,919
| 0
| 0
|
NOASSERTION
| 2020-09-24T13:22:37
| 2020-09-24T13:22:36
| null |
UTF-8
|
Python
| false
| false
| 3,493
|
py
|
import os
import sys
import warnings
import pytest
try:
from qtpy import PYSIDE2
except Exception:
PYSIDE2 = False
from glue.config import CFG_DIR as CFG_DIR_ORIG
try:
import objgraph
except ImportError:
OBJGRAPH_INSTALLED = False
else:
OBJGRAPH_INSTALLED = True
STDERR_ORIGINAL = sys.stderr
ON_APPVEYOR = os.environ.get('APPVEYOR', 'False') == 'True'
def pytest_runtest_teardown(item, nextitem):
sys.stderr = STDERR_ORIGINAL
global start_dir
os.chdir(start_dir)
def pytest_addoption(parser):
parser.addoption("--no-optional-skip", action="store_true", default=False,
help="don't skip any tests with optional dependencies")
start_dir = None
def pytest_configure(config):
global start_dir
start_dir = os.path.abspath('.')
os.environ['GLUE_TESTING'] = 'True'
if config.getoption('no_optional_skip'):
from glue.tests import helpers
for attr in helpers.__dict__:
if attr.startswith('requires_'):
# The following line replaces the decorators with a function
# that does noting, effectively disabling it.
setattr(helpers, attr, lambda f: f)
# Make sure we don't affect the real glue config dir
import tempfile
from glue import config
config.CFG_DIR = tempfile.mkdtemp()
# Start up QApplication, if the Qt code is present
try:
from glue.utils.qt import get_qapp
except Exception:
# Note that we catch any exception, not just ImportError, because
# QtPy can raise a PythonQtError.
pass
else:
get_qapp()
# Force loading of plugins
from glue.main import load_plugins
load_plugins()
def pytest_report_header(config):
from glue import __version__
glue_version = "%20s:\t%s" % ("glue", __version__)
from glue._deps import get_status
return os.linesep + glue_version + os.linesep + os.linesep + get_status()
def pytest_unconfigure(config):
os.environ.pop('GLUE_TESTING')
# Reset configuration directory to original one
from glue import config
config.CFG_DIR = CFG_DIR_ORIG
# Remove reference to QApplication to prevent segmentation fault on PySide
try:
from glue.utils.qt import app
app.qapp = None
except Exception: # for when we run the tests without the qt directories
# Note that we catch any exception, not just ImportError, because
# QtPy can raise a PythonQtError.
pass
if OBJGRAPH_INSTALLED and not ON_APPVEYOR:
# Make sure there are no lingering references to GlueApplication
obj = objgraph.by_type('GlueApplication')
if len(obj) > 0:
objgraph.show_backrefs(objgraph.by_type('GlueApplication'))
warnings.warn('There are {0} remaining references to GlueApplication'.format(len(obj)))
# Uncomment when checking for memory leaks
# objgraph.show_most_common_types(limit=100)
# With PySide2, tests can fail in a non-deterministic way with the following
# error:
#
# AttributeError: 'PySide2.QtGui.QStandardItem' object has no attribute 'connect'
#
# Until this can be properly debugged and fixed, we xfail any test that fails
# with this exception
if PYSIDE2:
def pytest_runtest_call(__multicall__):
try:
__multicall__.execute()
except AttributeError as exc:
if 'PySide2.QtGui.QStandardItem' in str(exc):
pytest.xfail()
|
[
"thomas.robitaille@gmail.com"
] |
thomas.robitaille@gmail.com
|
6d98ccacbdf0ae25f09883599e16712c57df834b
|
f350464b0ec1d2747a93ea533b04746c8ff68c18
|
/setup.py
|
0b1315dc2d83cf67c02b1f8d493b7614673acf85
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
erdogant/hgboost
|
ed4e0e31aff87fba2c9591b59b70ffc7a91c27b7
|
8492331b15b883918d2159a61563932ae82bf313
|
refs/heads/master
| 2023-08-17T14:08:01.729366
| 2023-08-15T17:01:03
| 2023-08-15T17:01:03
| 257,025,146
| 48
| 15
|
NOASSERTION
| 2020-09-11T08:09:36
| 2020-04-19T14:48:23
|
Python
|
UTF-8
|
Python
| false
| false
| 1,549
|
py
|
import setuptools
import re
# versioning ------------
VERSIONFILE="hgboost/__init__.py"
getversion = re.search( r"^__version__ = ['\"]([^'\"]*)['\"]", open(VERSIONFILE, "rt").read(), re.M)
if getversion:
new_version = getversion.group(1)
else:
raise RuntimeError("Unable to find version string in %s." % (VERSIONFILE,))
# Setup ------------
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
install_requires=['datazets', 'pypickle', 'matplotlib','numpy','pandas','tqdm','hyperopt','lightgbm','catboost','xgboost','classeval','treeplot','df2onehot','colourmap','seaborn'],
python_requires='>=3',
name='hgboost',
version=new_version,
author="Erdogan Taskesen",
author_email="erdogant@gmail.com",
description="hgboost is a python package for hyperparameter optimization for xgboost, catboost and lightboost for both classification and regression tasks.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://erdogant.github.io/hgboost",
download_url = 'https://github.com/erdogant/hgboost/archive/'+new_version+'.tar.gz',
packages=setuptools.find_packages(), # Searches throughout all dirs for files to include
include_package_data=True, # Must be true to include files depicted in MANIFEST.in
license_files=["LICENSE"],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
|
[
"erdogant@gmail.com"
] |
erdogant@gmail.com
|
2f10c0fc4429d0811c309e418f22f5c1c3ef9850
|
0a973640f0b02d7f3cf9211fcce33221c3a50c88
|
/.history/src/check_update_20210125140228.py
|
03eaacfa439dd94d42ef81213aea4ae61bc8634b
|
[] |
no_license
|
JiajunChen123/IPO_under_review_crawler
|
5468b9079950fdd11c5e3ce45af2c75ccb30323c
|
031aac915ebe350ec816c05a29b5827fde588567
|
refs/heads/main
| 2023-02-26T08:23:09.622725
| 2021-02-04T10:11:16
| 2021-02-04T10:11:16
| 332,619,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,989
|
py
|
import datetime
def check_update():
prjtypes = ['ipo','refinance','reproperty']
for prjtype in prjtypes:
proj_list_new = index_getter(prjtype)
proj_list_old = load_pickle(os.getcwd()+'/saved_config/sz_index'+'_'+'prjtype'+'.pkl')
updated_idx = [index for (index, d) in enumerate(proj_list_new) if d["updtdt"] == datetime.today().strftime('%Y-%m-%d')]
print("there are {} projects have been updated!".format(len(updatedidx)))
for new_idx in updated_idx:
# name,projid = proj_list_new[i]['cmpnm'],proj_list_new[i]['prjid']
# old_idx = next((index for (index, d) in enumerate(proj_list_old) if d["cmpnm"] == name), None)
# if old_idx is not None:
# # for key, value in proj_list_new[i].items():
# # if proj_list_old[old_index][key] != value:
# # print(key,value,proj_list_old[old_index][key])
# if proj_list_new[new_idx]['prjst'] != proj_list_old[old_idx]['prjst']:
raw_data = data_getter(proj_list_new[new_idx]['prjid'])
cleaned_data = data_process(raw_data)
directory = os.getcwd()+'/data/'+cleaned_data['prjType'] + '/' + cleaned_data['prjName']
save_obj(cleaned_data, directory +'/clean_info')
print('company:', cleaned_data['prjName'],'is updated')
def update_allStockInfo():
listOfFiles = list()
for (dirpath, dirnames, filenames) in os.walk(os.getcwd()+):
listOfFiles += [os.path.join(dirpath, file) for file in filenames]
allStock_info = []
for i in listOfFiles:
if os.path.basename(i) == 'info.pkl':
print('clean up company:', os.path.dirname(i))
raw_data = load_pickle(i)
cleaned_data = data_process(raw_data)
allStock_info.append(cleaned_data)
save_obj(cleaned_data, os.path.dirname(i), 'clean_info')
print('clean up company:', os.path.dirname(i))
to_dataframe(allStock_info)
save_obj(allStock_info, os.getcwd(), 'allStock_info')
|
[
"chenjiajun.jason@outlook.com"
] |
chenjiajun.jason@outlook.com
|
ec669664a01bd273b8ee1ae285da9a46c1b96850
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/rna-transcription/2569808f732d45a1a7c5dbd7f66d16e1.py
|
db46159bc52e0c46102481740258245a199f94aa
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038
| 2016-03-16T03:18:00
| 2016-03-16T03:18:42
| 59,454,921
| 4
| 0
| null | 2016-05-23T05:40:56
| 2016-05-23T05:40:56
| null |
UTF-8
|
Python
| false
| false
| 213
|
py
|
def to_rna(input):
dnaTypes = {"G":"C","C":"G","T":"A","A":"U"}
output = []
for each in input:
value = dnaTypes.get(each,'not found')
output.append(value)
return ''.join(output)
|
[
"rrc@berkeley.edu"
] |
rrc@berkeley.edu
|
4c5b98dcb0e5971fa8e16b9150b20e53eaa42687
|
90f729624737cc9700464532a0c67bcbfe718bde
|
/lino_xl/lib/invoicing/__init__.py
|
e4adea047c066bd0b4bff8eeb6e269f443914fa4
|
[
"AGPL-3.0-only"
] |
permissive
|
lino-framework/xl
|
46ba6dac6e36bb8e700ad07992961097bb04952f
|
642b2eba63e272e56743da2d7629be3f32f670aa
|
refs/heads/master
| 2021-05-22T09:59:22.244649
| 2021-04-12T23:45:06
| 2021-04-12T23:45:06
| 52,145,415
| 1
| 5
|
BSD-2-Clause
| 2021-03-17T11:20:34
| 2016-02-20T09:08:36
|
Python
|
UTF-8
|
Python
| false
| false
| 2,783
|
py
|
# -*- coding: UTF-8 -*-
# Copyright 2016-2019 Rumma & Ko Ltd
# License: GNU Affero General Public License v3 (see file COPYING for details)
"""
Adds functionality for **invoicing**, i.e. automatically generating
invoices from data in the database.
See :doc:`/specs/invoicing`.
"""
from lino.api.ad import Plugin, _
from django.utils.text import format_lazy
class Plugin(Plugin):
verbose_name = _("Invoicing")
# needs_plugins = ['lino_xl.lib.ledger']
needs_plugins = ['lino_xl.lib.sales']
voucher_model = 'sales.VatProductInvoice'
item_model = 'sales.InvoiceItem'
"""
The database model into which invoiceable objects should create
invoice items. Default value refers to :class:`sales.InvoiceItem
<lino_xl.lib.sales.models.InvoiceItem>`.
This model will have an injected GFK field `invoiceable`.
"""
invoiceable_label = _("Invoiced object")
def on_site_startup(self, site):
from lino.core.utils import resolve_model
self.item_model = resolve_model(self.item_model)
# ivm = self.item_model._meta.get_field('voucher').remote_field.model
# if self.voucher_model != ivm:
# raise Exception("voucher_model is {} but should be {}".format(
# self.voucher_model, ivm))
self.voucher_model = resolve_model(self.voucher_model)
def get_voucher_type(self):
# from lino.core.utils import resolve_model
# model = resolve_model(self.voucher_model)
# return self.site.modules.ledger.VoucherTypes.get_for_model(model)
return self.site.models.ledger.VoucherTypes.get_for_model(
self.voucher_model)
def setup_main_menu(config, site, user_type, m):
mg = site.plugins.sales
m = m.add_menu(mg.app_label, mg.verbose_name)
m.add_action('invoicing.Plan', action='start_plan')
# Area = site.models.invoicing.Area
# # Areas = site.models.invoicing.Areas
# for obj in Area.objects.all():
# # m.add_instance_action(obj, action='start_invoicing')
# # m.add_action(obj, action='start_invoicing')
# m.add_action(
# 'invoicing.PlansByArea', 'start_invoicing',
# label=format_lazy(_("Create invoices {}"), obj),
# params=dict(master_instance=obj))
def setup_config_menu(self, site, user_type, m):
mg = site.plugins.sales
m = m.add_menu(mg.app_label, mg.verbose_name)
m.add_action('invoicing.Tariffs')
m.add_action('invoicing.Areas')
def setup_explorer_menu(self, site, user_type, m):
mg = site.plugins.sales
m = m.add_menu(mg.app_label, mg.verbose_name)
m.add_action('invoicing.AllPlans')
m.add_action('invoicing.SalesRules')
|
[
"luc.saffre@gmail.com"
] |
luc.saffre@gmail.com
|
0e98660cef8d593393fd7f5239d57526602a5f0e
|
a56252fda5c9e42eff04792c6e16e413ad51ba1a
|
/resources/usr/local/lib/python2.7/dist-packages/bx/seq/seq.py
|
727c700f5b0caaf1da917454450ecd64e677025f
|
[
"Apache-2.0"
] |
permissive
|
edawson/parliament2
|
4231e692565dbecf99d09148e75c00750e6797c4
|
2632aa3484ef64c9539c4885026b705b737f6d1e
|
refs/heads/master
| 2021-06-21T23:13:29.482239
| 2020-12-07T21:10:08
| 2020-12-07T21:10:08
| 150,246,745
| 0
| 0
|
Apache-2.0
| 2019-09-11T03:22:55
| 2018-09-25T10:21:03
|
Python
|
UTF-8
|
Python
| false
| false
| 4,887
|
py
|
"""
Classes to support "biological sequence" files.
:Author: Bob Harris (rsharris@bx.psu.edu)
"""
# DNA reverse complement table
DNA_COMP = " - " \
" TVGH CD M KN YSA BWXR tvgh cd m kn ysa bwxr " \
" " \
" "
class SeqFile(object):
"""
A biological sequence is a sequence of bytes or characters. Usually these
represent DNA (A,C,G,T), proteins, or some variation of those.
class attributes:
file: file object containing the sequence
revcomp: whether gets from this sequence should be reverse-complemented
False => no reverse complement
True => (same as "-5'")
"maf" => (same as "-5'")
"+5'" => minus strand is from plus strand's 5' end (same as "-3'")
"+3'" => minus strand is from plus strand's 3' end (same as "-5'")
"-5'" => minus strand is from its 5' end (as per MAF file format)
"-3'" => minus strand is from its 3' end (as per genome browser,
but with origin-zero)
name: usually a species and/or chromosome name (e.g. "mule.chr5"); if
the file contains a name, that overrides this one
gap: gap character that aligners should use for gaps in this sequence
"""
def __init__(self, file=None, revcomp=False, name="", gap=None):
self.file = file
if (revcomp == True): self.revcomp = "-5'"
elif (revcomp == "+3'"): self.revcomp = "-5'"
elif (revcomp == "+5'"): self.revcomp = "-3'"
elif (revcomp == "maf"): self.revcomp = "-5'"
else: self.revcomp = revcomp
self.name = name
if (gap == None): self.gap = "-"
else: self.gap = gap
self.text = None # (subclasses fill in text and
self.length = 0 # length or they most override get())
def close(self):
assert (self.file != None)
self.file.close()
self.file = None
def extract_name(self,line):
try:
return line.split()[0]
except:
return ""
def set_text(self,text):
self.text = text
self.length = len(text)
def __str__ (self):
text = ""
if (self.name != None): text += self.name + " "
text += self.get(0,self.length)
return text
def get(self, start, length):
"""
Fetch subsequence starting at position `start` with length `length`.
This method is picky about parameters, the requested interval must
have non-negative length and fit entirely inside the NIB sequence,
the returned string will contain exactly 'length' characters, or an
AssertionError will be generated.
"""
# Check parameters
assert length >= 0, "Length must be non-negative (got %d)" % length
assert start >= 0,"Start must be greater than 0 (got %d)" % start
assert start + length <= self.length, \
"Interval beyond end of sequence (%s..%s > %s)" % ( start, start + length, self.length )
# Fetch sequence and reverse complement if necesary
if not self.revcomp:
return self.raw_fetch( start, length )
if self.revcomp == "-3'":
return self.reverse_complement(self.raw_fetch(start,length))
assert self.revcomp == "-5'", "unrecognized reverse complement scheme"
start = self.length - (start+length)
return self.reverse_complement(self.raw_fetch(start,length))
def raw_fetch(self, start, length):
return self.text[start:start+length]
def reverse_complement(self,text):
comp = [ch for ch in text.translate(DNA_COMP)]
comp.reverse()
return "".join(comp)
class SeqReader(object):
"""Iterate over all sequences in a file in order"""
def __init__(self, file, revcomp=False, name="", gap=None):
self.file = file
self.revcomp = revcomp
self.name = name
self.gap = gap
self.seqs_read = 0
def close(self):
self.file.close()
def __iter__(self):
return SeqReaderIter(self)
def next(self): # subclasses should override this method and return the
return # .. next sequence (of type SeqFile or a subclass) read
# .. from self.file
class SeqReaderIter(object):
def __init__(self,reader):
self.reader = reader
def __iter__(self):
return self
def next(self):
v = self.reader.next()
if not v: raise StopIteration
return v
|
[
"szarate@dnanexus.com"
] |
szarate@dnanexus.com
|
9c85f5d5791d3d14ac61c80470e39516b5b5b94a
|
b162de01d1ca9a8a2a720e877961a3c85c9a1c1c
|
/389.find-the-difference.python3.py
|
787bd1ca19ae6a5ffdf1788af2408a2944611bb1
|
[] |
no_license
|
richnakasato/lc
|
91d5ff40a1a3970856c76c1a53d7b21d88a3429c
|
f55a2decefcf075914ead4d9649d514209d17a34
|
refs/heads/master
| 2023-01-19T09:55:08.040324
| 2020-11-19T03:13:51
| 2020-11-19T03:13:51
| 114,937,686
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 727
|
py
|
#
# [389] Find the Difference
#
# https://leetcode.com/problems/find-the-difference/description/
#
# algorithms
# Easy (52.22%)
# Total Accepted: 126.9K
# Total Submissions: 243.1K
# Testcase Example: '"abcd"\n"abcde"'
#
#
# Given two strings s and t which consist of only lowercase letters.
#
# String t is generated by random shuffling string s and then add one more
# letter at a random position.
#
# Find the letter that was added in t.
#
# Example:
#
# Input:
# s = "abcd"
# t = "abcde"
#
# Output:
# e
#
# Explanation:
# 'e' is the letter that was added.
#
#
class Solution:
def findTheDifference(self, s, t):
"""
:type s: str
:type t: str
:rtype: str
"""
|
[
"richnakasato@hotmail.com"
] |
richnakasato@hotmail.com
|
5caec5d9a8ccca40938aa679181df75ee6367197
|
6d3ac655065e7592d7d8f6692b48546fb9adab75
|
/VirtualEnvAndPagination/env/bin/pip-3.7
|
f324b76b04fee41f8ec85adbfd6cbdd528e977ac
|
[] |
no_license
|
lensherrggg/Django2Masterclass-BuildWebAppsWithPython-Django
|
81aa3fe582801d887b526bf3e022539cc0c739f5
|
22631e46cf48cb4206f8b77da664685fa141fec3
|
refs/heads/master
| 2021-04-08T11:43:23.265987
| 2020-03-27T12:56:50
| 2020-03-27T12:56:50
| 248,772,215
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 292
|
7
|
#!/Users/rui/Documents/Code/Learning/Django2MasterClass/VirtualEnvironment/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"gr98522@gmail.com"
] |
gr98522@gmail.com
|
7753cb95d4466b610a3d6b8af4f2b00cc22c1d9b
|
a6f52b361be35ecd0b750289145d1dd5e5d1d48d
|
/src/profiler/formatters/SimpleTableFormatter.py
|
34c51c4caadb51c8ef5c62c9146a34e98f4bf99b
|
[] |
no_license
|
deeptiagrawa/Flow123d-python-utils
|
d360329d28b1c24d600e309fabf09539bce80dee
|
a80b2dd10eb77ed0fc6469c675dc400feffdcc09
|
refs/heads/master
| 2021-01-12T05:31:26.649733
| 2016-12-20T13:28:05
| 2016-12-20T13:28:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,009
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# author: Jan Hybs
from __future__ import absolute_import
import re
import os
from utils.dotdict import DotDict
from utils.logger import Logger
class SimpleTableFormatter (object):
"""
Class which takes json object from flow123d benchmark profiler report
and returns simple table-like text string
"""
def __init__(self):
self.json = None
self.output = ""
self.headerCols = []
self.maxNameSize = 11
self.bodyRows = []
self.maxBodySize = None
self.headerFields = ("tag", "call count (max)", "max T", "min/max T", "avg T", "total T", "source", "line")
self.styles = {
"linesep": os.linesep, "padding": 0,
"min_width": 9, "colsep": '',
"rowsep": '', "space_header": 3,
"leading_char": " . ",
"remove_prefix": "/src/"
}
self.totalDuration = None
self.totalDurationMeasured = None
def set_styles(self, styles):
"""Overrides default styles"""
self.styles.update(styles)
# make sure some values are actually ints
self.styles["min_width"] = int(self.styles["min_width"])
self.styles["padding"] = int(self.styles["padding"])
self.styles["space_header"] = int(self.styles["space_header"])
self.styles["colsep_start"] = self.styles["colsep"] + " "
self.styles["colsep_end"] = " " + self.styles["colsep"]
def convert_style(self):
self.set_styles(self.styles)
self.styles = DotDict(self.styles)
def format(self, json):
""""Formats given json object"""
self.convert_style()
self.json = json
self.process_header(json)
self.process_body(json, 0)
self.maxBodySize = [n + self.styles.padding for n in self.maxBodySize]
self.maxNameSize = self.maxNameSize + self.styles.space_header
lineDivider = (sum(self.maxBodySize) + 2 + len(self.maxBodySize) * 2) * self.styles.rowsep
fmtHead = "{{:{self.maxNameSize}s}}{{}}{self.styles.linesep}".format(self=self)
for pair in self.headerCols:
self.output += fmtHead.format(*pair)
self.output += lineDivider
self.output += self.styles.linesep
self.output += self.styles.colsep_start
for i in range(len(self.headerFields)):
fmt = "{{:^{maxBodySize}s}}{colsep}".format(maxBodySize=self.maxBodySize[i], colsep=self.styles.colsep_end)
self.output += fmt.format(self.headerFields[i])
self.output += self.styles.linesep
self.output += lineDivider
self.output += self.styles.linesep
for tup in self.bodyRows:
self.output += self.styles.colsep_start
fields = []
for i in range(len(self.maxBodySize)):
fields.append(("{:" + tup[i][0] + "" + str(self.maxBodySize[i]) + "s}").format(tup[i][1]))
self.output += self.styles.colsep_end.join(fields)
self.output += self.styles.colsep_end + self.styles.linesep
# self.output += fmtBody.format (*tup)
self.output += lineDivider
return self.output
def append_to_header(self, name, value=None, linebreak=False):
"""Appends entry to header column list, if no value was given
value from json object by given name will be taken
"""
value = value if value is not None else self.json[name.lower().replace(" ", "-")]
self.headerCols.append((name, str(value) + (linebreak if linebreak else "")))
if self.maxNameSize < len(str(name)):
self.maxNameSize = len(str(name))
def append_to_body(self, values):
"""Appends entry to body row list.
value is tupple of tupples, where inner tupper has two elements, first formatting character, and second value,
formatting character is used in string format() method
designating alignment
< for left
> for right
^ for center
"""
self.bodyRows.append(values)
# default empty array
if self.maxBodySize is None:
self.maxBodySize = [self.styles.min_width] * len(values)
# update max length
for i in range(len(self.maxBodySize)):
self.maxBodySize[i] = max(self.maxBodySize[i], len(str(values[i][1])))
def process_header(self, json):
"""Appends header information"""
self.append_to_header("Program name")
self.append_to_header("Program version")
self.append_to_header("Program branch")
self.append_to_header("Program revision")
self.append_to_header("Program build")
if 'source-dir' in json:
self.append_to_header("Source dir")
self.append_to_header("Timer resolution", linebreak=self.styles.linesep)
desc = re.sub("\s+", " ", json["task-description"], re.M)
self.append_to_header("Task description", desc)
self.append_to_header("Task size", linebreak=self.styles.linesep)
self.append_to_header("Run process count")
self.append_to_header("Run started", json["run-started-at"])
self.append_to_header("Run ended", json["run-finished-at"])
def process_body(self, json, level):
"""Recursive body processing"""
# first occurrence of cumul-time-sum is whole-program's measured time
if self.totalDurationMeasured is None and "cumul-time-sum" in json:
self.totalDurationMeasured = json['cumul-time-sum']
if level > 0:
abs_prc = (json["cumul-time-sum"] / self.totalDurationMeasured) * 100
rel_prc = json["percent"]
path = json["file-path"]
if str(json["file-path"]).startswith(self.styles.remove_prefix):
path = path[len(self.styles.remove_prefix):]
# safe average
if (json["call-count-sum"] != 0):
avg_cumul_time = json["cumul-time-sum"] / json["call-count-sum"]
else:
avg_cumul_time = json["cumul-time-sum"]
# safe min max ratio
cumul_time_max = max(json["cumul-time-max"], json["cumul-time-min"])
cumul_time_min = min(json["cumul-time-max"], json["cumul-time-min"])
if (json["cumul-time-max"] > 0):
min_max_ratio = cumul_time_min / cumul_time_max
else:
min_max_ratio = 0
self.append_to_body((
("<", "{abs_prc:6.2f} {leading} {rel_prc:5.2f} {tag}".format(
abs_prc=abs_prc, leading=self.styles.leading_char * (level - 1), rel_prc=rel_prc, tag=json["tag"])),
("^", "{:d}".format(json["call-count-max"])),
("^", "{:1.4f}".format(json["cumul-time-max"])),
("^", "{:1.4f}".format(min_max_ratio)),
("^", "{:1.4f}".format(avg_cumul_time)),
("^", "{:1.4f}".format(json["cumul-time-sum"])),
("<", "{path:s}, {function:s}()".format(function=json["function"], path=path)),
("^", "{line:5d}".format(line=json["file-line"]))
))
if 'children' in json:
for child in json["children"]:
try:
self.process_body(child, level + 1)
except Exception as e:
import json as j
if 'children' in child:
del child['children']
child_repr = j.dumps(child, indent=4)
Logger.instance().warning(
'Caught exception while processing profiler data: {e}'.format(e=e.message))
Logger.instance().warning('Exception', exc_info=e)
Logger.instance().warning('problem node (without children) is:\n{child}'.format(child=child_repr))
def timedelta_milliseconds(self, td):
return td.days * 86400000 + td.seconds * 1000 + td.microseconds / 1000
|
[
"jan.hybs@tul.cz"
] |
jan.hybs@tul.cz
|
e969a74760c8c4b91e8b2dcd06db47ff9cbccf46
|
10317d4492bcc5a85518c8c9c6edce56cccb7050
|
/Document Scanner/transform_new.py
|
012814806bec195d2e650f2d62128556692278b9
|
[] |
no_license
|
kishan/Image-Recognition-Test-Grader
|
066a640adbc6bce5181cf0fb2d8c6f9a2a8d60e1
|
7b4e603f7a483cfed622df8d9896d9ff2719526a
|
refs/heads/master
| 2021-01-01T19:43:07.716688
| 2015-08-25T20:00:56
| 2015-08-25T20:00:56
| 41,382,408
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,411
|
py
|
# import the necessary packages
import numpy as np
import cv2
import math
################################################################################
## The following functions have been written by me but the approach is derived
## from the following link: http://www.pyimagesearch.com/2014/08/25/4-point-
## opencv-getperspective-transform-example/
################################################################################
# take in array of 4 coordinates and order coordinates so first coordinate
# is top-left corner and rest are in order moving clockwise
def order_points(points):
#create empty list of 4 coordinates
numOfPoints = 4
valuesPerPoint = 2
ordered_points = np.zeros((numOfPoints,valuesPerPoint), dtype= "float32")
# add x and y componenets of each coordinate
sumOfCoordinates = np.sum(points, axis = 1)
#find difference of x and y components of each coordinate
differenceOfCoordinates = np.diff(points, axis=1)
# find smallest sum and difference of coordinates
smallestSumIndex = np.argmin(sumOfCoordinates)
smallestDifferenceIndex = np.argmin(differenceOfCoordinates)
# find largest sum and difference of coordinates
largestSumIndex = np.argmax(sumOfCoordinates)
largestDifferenceIndex = np.argmax(differenceOfCoordinates)
# top-left coordinate has smallest coordinate sum
ordered_points[0] = points[smallestSumIndex]
# top-left coordinate has smallest coordinate difference
ordered_points[1] = points[smallestDifferenceIndex]
# top-left coordinate has largest coordinate sum
ordered_points[2] = points[largestSumIndex]
# top-left coordinate has largest coordinate difference
ordered_points[3] = points[largestDifferenceIndex]
return ordered_points
def distance_between_points(point1, point2):
x1 = point1[0]
y1 = point1[1]
x2 = point2[0]
y2 = point2[1]
return math.sqrt((x2 - x1)**2 + (y2 - y1)**2)
# takes in image and list of 4 coordinates for each corner of object in image,
# and returns image of object in vertical position with excess background
# removed
def four_point_transform(image, points):
# unpack points in order
ordered_points = order_points(points)
top_left, top_right = ordered_points[0], ordered_points[1]
bottom_right, bottom_left = ordered_points[2], ordered_points[3]
# find the max width of the object in the image
topWidth = int(distance_between_points(top_left, top_right))
bottomWidth = int(distance_between_points(bottom_left, bottom_right))
maxWidth = max(topWidth, bottomWidth)
# find the max height of the object in the image
topHeight = int(distance_between_points(top_left, bottom_left))
bottomHeight = int(distance_between_points(top_right, bottom_right))
maxHeight = max(topHeight, bottomHeight)
# create array of corner points for final image
new_top_left = [0, 0]
new_top_right = [maxWidth - 1, 0]
new_bottom_right = [maxWidth - 1, maxHeight - 1]
new_bottom_left = [0, maxHeight - 1]
new_coordinates = np.array([new_top_left, new_top_right, new_bottom_right,
new_bottom_left], dtype = "float32")
# calculate 3x3 matrix of a perspective transform
M = cv2.getPerspectiveTransform(ordered_points, new_coordinates)
# apply perspective transform matrix to image
transformed_image = cv2.warpPerspective(image, M, (maxWidth, maxHeight))
# return the transformed image
return transformed_image
|
[
"kspatel2018@gmail.com"
] |
kspatel2018@gmail.com
|
94bf08a7ce82fd2b63625597118eb7bbeb7fe531
|
f445450ac693b466ca20b42f1ac82071d32dd991
|
/generated_tempdir_2019_09_15_163300/generated_part008601.py
|
faee6c6b3a4acdcb9b277c59692c5169f5000ac5
|
[] |
no_license
|
Upabjojr/rubi_generated
|
76e43cbafe70b4e1516fb761cabd9e5257691374
|
cd35e9e51722b04fb159ada3d5811d62a423e429
|
refs/heads/master
| 2020-07-25T17:26:19.227918
| 2019-09-15T15:41:48
| 2019-09-15T15:41:48
| 208,357,412
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,923
|
py
|
from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher141898(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({0: 1}), [
(VariableWithCount('i4.3.0', 1, 1, S(0)), Add)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Add
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher141898._instance is None:
CommutativeMatcher141898._instance = CommutativeMatcher141898()
return CommutativeMatcher141898._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 141897
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i4.3.1.0_1', S(1))
except ValueError:
pass
else:
pass
# State 141899
if len(subjects) >= 1:
tmp2 = subjects.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i4.3.1.0', tmp2)
except ValueError:
pass
else:
pass
# State 141900
if len(subjects) == 0:
pass
# 0: x*b
yield 0, subst2
subjects.appendleft(tmp2)
if len(subjects) >= 1 and isinstance(subjects[0], Mul):
tmp4 = subjects.popleft()
associative1 = tmp4
associative_type1 = type(tmp4)
subjects5 = deque(tmp4._args)
matcher = CommutativeMatcher141902.get()
tmp6 = subjects5
subjects5 = []
for s in tmp6:
matcher.add_subject(s)
for pattern_index, subst1 in matcher.match(tmp6, subst0):
pass
if pattern_index == 0:
pass
# State 141903
if len(subjects) == 0:
pass
# 0: x*b
yield 0, subst1
subjects.appendleft(tmp4)
return
yield
from matchpy.matching.many_to_one import CommutativeMatcher
from collections import deque
from .generated_part008602 import *
from matchpy.utils import VariableWithCount
from multiset import Multiset
|
[
"franz.bonazzi@gmail.com"
] |
franz.bonazzi@gmail.com
|
d38095f6f65c9f07ad7abbb070590af3d8ef7a7d
|
ded10c2f2f5f91c44ec950237a59225e8486abd8
|
/.history/2/path_integral_naive_sampling_20200419015912.py
|
09700ceb920c45902a696fba97ea21eac5e153d9
|
[] |
no_license
|
jearistiz/Statistical-Physics-Projects
|
276a86407b32ded4e06b32efb2fadbd8eff8daed
|
d9c5b16a50856e148dc8604d92b6de3ea21fc552
|
refs/heads/master
| 2022-11-05T03:41:23.623050
| 2020-06-28T06:36:05
| 2020-06-28T06:36:05
| 254,909,897
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,101
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
import os
import numpy as np
import matplotlib.pyplot as plt
from time import time
import pandas as pd
# Author: Juan Esteban Aristizabal-Zuluaga
# date: 202004151200
def rho_free(x,xp,beta):
"""Uso: devuelve elemento de matriz dsnsidad para el caso de una partícula libre en un toro infinito."""
return (2.*np.pi*beta)**(-0.5) * np.exp(-(x-xp)**2 / (2 * beta) )
def harmonic_potential(x):
"""Devuelve valor del potencial armónico para una posición x dada"""
return 0.5* x**2
def anharmonic_potential(x):
"""Devuelve valor de potencial anarmónico para una posición x dada"""
# return np.abs(x)*(1+np.cos(x)) #el resultado de este potencial es interesante
return 0.5*x**2 - x**3 + x**4
def QHO_canonical_ensemble(x,beta):
"""
Uso: calcula probabilidad teórica cuántica de encontrar al oscilador armónico
(inmerso en un baño térmico a temperatura inversa beta) en la posición x.
Recibe:
x: float -> posición
beta: float -> inverso de temperatura en unidades reducidas beta = 1/T.
Devuelve:
probabilidad teórica cuántica en posición x para temperatura inversa beta.
"""
return (np.tanh(beta/2.)/np.pi)**0.5 * np.exp(- x**2 * np.tanh(beta/2.))
def path_naive_sampling( N_path = 10,beta = 4., N_iter = int(1e5), delta = 0.5,
potential = harmonic_potential, append_every = 1 ):
"""
Uso:
"""
dtau = beta/N_path
path_x = [0.] * N_path
pathss_x = [path_x[:]]
t_0 = time()
N_iter = int(N_iter)
for step in range(N_iter):
k = np.random.randint(0,N_path)
#Periodic boundary conditions
knext, kprev = (k+1) % N_path, (k-1) % N_path
x_new = path_x[k] + np.random.uniform(-delta,delta)
old_weight = ( rho_free(path_x[kprev],path_x[k],dtau) *
np.exp(- dtau * potential(path_x[k])) *
rho_free(path_x[k],path_x[knext],dtau) )
new_weight = ( rho_free(path_x[kprev],x_new,dtau) *
np.exp(- dtau * potential(x_new)) *
rho_free(x_new,path_x[knext],dtau) )
if np.random.uniform(0,1) < new_weight/old_weight:
path_x[k] = x_new
if step%append_every == 0:
pathss_x.append(path_x[:])
t_1 = time()
print('Path integral naive sampling: %d iterations -> %.2E seconds'%(N_iter,t_1-t_0))
pathss_x = np.array(pathss_x)
return pathss_x
def figures_fn( pathss_x, beta = 4 , N_plot = 201, x_max = 3, N_iter=int(1e5), append_every=1,
N_beta_ticks = 11, msq_file='file.csv', file_name='path-plot-prueba',
show_theory=True, show_matrix_squaring=True, show_path=True, save_plot=True,
show_plot=True, show_compare_hist=True, show_complete_path_hist=True):
pathss_x = np.array(pathss_x)
script_dir=os.path.dirname(os.path.abspath(__file__))
x_plot = np.linspace(-x_max,x_max,N_plot)
N_path = len(pathss_x[-1])
# Agranda letra en texto en figuras generadas
plt.rc('text', usetex=True) #usa latex en texto de figuras
plt.rcParams.update({'font.size':15,'text.latex.unicode':True})
# Crea figura
fig, ax1 = plt.subplots()
# Grafica histograma, teórico y si se pide un camino aleatorio
ax1.set_xlabel(u'$x$')
ax1.set_ylabel(u'$\pi^{(Q)} (x;\\beta)$')
if show_theory:
lns1 = ax1.plot(x_plot,QHO_canonical_ensemble(x_plot,beta),label=u'Teórico')
if show_matrix_squaring:
msq_file = script_dir + '/' + msq_file
matrix_squaring_data = pd.read_csv(msq_file, index_col=0, comment='#')
lns2 = ax1.plot( matrix_squaring_data['position_x'],matrix_squaring_data['prob_density'],
label = u'Algoritmo Matrix\nSquaring')
lns3 = ax1.hist(pathss_x[:,0], bins=int(np.sqrt(N_iter/append_every)), normed=True,
label=u'Integral de camino\nnaive sampling',alpha=.40)
if show_compare_hist:
lns5 = ax1.hist(pathss_x[:,np.random.choice(np.arange(1,N_path))], bins=int(np.sqrt(N_iter/append_every)), normed=True,
label=u'Comparación hist. $x[k]$',alpha=.40)
if show_complete_path_hist:
pathss_x2 = pathss_x.copy()
pathss_x2 = pathss_x2.flatten()
lns6 = ax1.hist(pathss_x2, bins=int(np.sqrt(N_iter*N_path/append_every)), normed=True,
label=u'Comparación tomando\npath completo $\{x[k]\}_k$',alpha=.40)
ax1.tick_params(axis='y')
ax1.set_ylim(bottom=0)
ax1.set_xlim(-x_max,x_max)
if not show_path:
plt.legend(loc = 'best', fontsize=12)
if save_plot:
plt.savefig(script_dir+'/'+file_name+'.eps')
if show_plot:
plt.show()
plt.close()
if show_path:
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
ax2.set_ylabel(u'$\\tau$') # we already handled the x-label with ax1
beta_plot = np.linspace(0,beta,N_path+1)
path_plot = list(pathss_x[-1])
path_plot.append(pathss_x[-1][0])
lns4 = ax2.plot(path_plot, beta_plot,'o-',c='k',label=u'Path')
ax2.tick_params(axis='y')
beta_ticks = np.linspace(0,beta,N_beta_ticks)
ax2.set_yticks(beta_ticks)
ax2.set_yticklabels(u'$%.2f$'%b for b in beta_ticks)
ax2.set_ylim(bottom=0)
ax2.set_xlim(-x_max,x_max)
# Solution for having legends that share two different scales
# if show_theory and show_matrix_squaring and show_compare_hist and show_complete_path_hist:
# leg = lns1 + lns2 + [lns3[2][0]] + lns4 + [lns5[2][0]] + [lns6[2][0]]
# elif show_theory and not show_matrix_squaring and show_compare_hist and show_complete_path_hist:
# leg = lns1 + [lns3[2][0]] + lns4 + [lns5[2][0]] + [lns6[2][0]]
# elif not show_theory and show_matrix_squaring and show_compare_hist and show_complete_path_hist:
# leg = lns2 + [lns3[2][0]] + lns4 + [lns5[2][0]] + [lns6[2][0]]
# elif not show_theory and not show_matrix_squaring and show_compare_hist and show_complete_path_hist:
# leg = [lns3[2][0]] + lns4 + [lns5[2][0]] + [lns6[2][0]]
if not show_theory:
lns1 = [0]
if not show_compare_hist:
lns5 = [0]
if not show_complete_path_hist:
lns6 = [0]
leg_test = lns1 + lns2 + [lns3[2][0]] + lns4 + [lns5[2][0]] + [lns6[2][0]]
labs = []
leg = []
for i,l in enumerate(leg_test):
try:
labs.append(l.get_label())
leg.append(leg_test[i])
except:
pass
ax1.legend(leg, labs, loc='best',title=u'$\\beta=%.2f$'%beta, fontsize=12)
fig.tight_layout() # otherwise the right y-label is slightly clipped
if save_plot:
plt.savefig(script_dir+'/'+file_name+'-path_true.eps')
if show_plot:
plt.show()
plt.close()
return 0
N_path = 10
beta = 4.
N_iter = int(1e4)
delta = 0.5
potential, potential_string = harmonic_potential, 'harmonic_potential'
append_every = 1
msq_file = 'pi_x-ms-harmonic_potential-x_max_5.000-nx_201-N_iter_7-beta_fin_4.000.csv'
N_plot = 201
x_max = 3
x_plot = np.linspace(-x_max,x_max,N_plot)
plot_file_name = 'pi_x-pi-plot-%s-x_max_%.3f-N_path_%d-N_iter_%d-beta_fin_%.3f'\
%(potential_string,x_max,N_path,N_iter,beta)
pathss_x = path_naive_sampling( N_path = N_path, beta = beta, N_iter = N_iter, delta = 0.5,
potential = harmonic_potential, append_every = 1 )
figures_fn( pathss_x, beta = beta , N_plot = N_plot, x_max = x_max, N_iter=N_iter,
append_every=1, N_beta_ticks = N_path+1, msq_file=msq_file,
file_name=plot_file_name, show_theory=True , show_matrix_squaring=True,
show_path=True, save_plot=True, show_plot=True)
|
[
"jeaz.git@gmail.com"
] |
jeaz.git@gmail.com
|
74f2757220a88898a6e4b5dad3c63b866232286d
|
a37bf3343be428c453e480c7a411a91b125ab1d1
|
/deb/openmediavault/usr/lib/python3/dist-packages/openmediavault/datamodel/datamodel.py
|
c25bf348fca4181454ad38ac5f9f7141e39083bb
|
[] |
no_license
|
zys1310992814/openmediavault
|
8e73ccd66fefaddd03385834137887614726812c
|
337f37729783d9bf3a08866c0dbc8b25c53b9ca3
|
refs/heads/master
| 2020-04-20T14:18:57.505953
| 2019-02-02T15:18:07
| 2019-02-02T15:18:07
| 168,894,447
| 1
| 0
| null | 2019-02-03T00:41:55
| 2019-02-03T00:41:55
| null |
UTF-8
|
Python
| false
| false
| 3,163
|
py
|
# -*- coding: utf-8 -*-
#
# This file is part of OpenMediaVault.
#
# @license http://www.gnu.org/licenses/gpl.html GPL Version 3
# @author Volker Theile <volker.theile@openmediavault.org>
# @copyright Copyright (c) 2009-2018 Volker Theile
#
# OpenMediaVault is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# OpenMediaVault is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OpenMediaVault. If not, see <http://www.gnu.org/licenses/>.
__all__ = ["Datamodel"]
import abc
import json
class Datamodel:
def __init__(self, model):
"""
:param model: The data model as Python dictionary or JSON string.
"""
# Convert into a JSON object if it is a string.
if isinstance(model, str):
model = json.loads(model)
else:
if not isinstance(model, dict):
raise TypeError("Expected dictionary.")
self._model = model
def as_dict(self):
"""
Get the data model as Python dictionary.
:returns: Returns the data model as Python dictionary.
"""
return self._model
@property
def model(self):
"""
Get the data model as Python dictionary.
:returns: Returns the data model as Python dictionary.
"""
return self.as_dict()
@property
def id(self): # pylint: disable=invalid-name
"""
Get the model identifier, e.g. 'conf.service.rsyncd.module'.
:returns: Returns the model identifier.
"""
if not "id" in self.model:
return ""
return self.model['id']
@property
def alias(self):
"""
Get the model identifier alias.
:returns: Returns the model identifier alias.
"""
if not "alias" in self.model:
return ""
return self._model['alias']
@property
def title(self):
"""
Get the model title, e.g. 'SSH certificate'.
:returns: Returns the model titel.
"""
if not "title" in self.model:
return ""
return self._model['title']
@property
def description(self):
"""
Get the model description.
:returns: Returns the model description.
"""
if not "description" in self.model:
return ""
return self._model['description']
@abc.abstractmethod
def validate(self, data):
"""
Validate the specified data against the data model.
:param data: The JSON data to validate.
:returns: None.
"""
def __str__(self):
"""
Return the data model as JSON string.
:returns: Returns a JSON string.
"""
return json.dumps(self.model)
|
[
"votdev@gmx.de"
] |
votdev@gmx.de
|
8e7c9117b9b08ed4a03b43f4ae8a2a8d9daa1d19
|
ef4f9cfca5cc0fbeb5ac6547d607bd51c52d53cc
|
/UnityPy/EndianBinaryReader.py
|
a85fc05e554a4f5d912a0af360ccb2cc9e4d1324
|
[
"MIT"
] |
permissive
|
l3iggs/UnityPy
|
c1fe52719990817c00232834d9436dfb6a70ee57
|
01822260261e395565f357b33d5dab35e1a847b3
|
refs/heads/master
| 2021-03-27T22:56:13.402044
| 2020-03-03T12:12:49
| 2020-03-03T12:12:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,950
|
py
|
import io
import struct
from .math import Color, Matrix4x4, Quaternion, Vector2, Vector3, Vector4, Rectangle
class EndianBinaryReader:
endian: str
Length: int
Position: int
stream: io.BufferedReader
def __init__(self, input_, endian='>'):
if isinstance(input_, (bytes, bytearray)):
self.stream = io.BytesIO(input_)
elif isinstance(input_, (io.BytesIO, io.BufferedReader)):
self.stream = input_
else:
# test if input is a streamable object
try:
p = input_.tell()
input_.read(1)
input_.seek(p)
assert (p == input_.tell())
self.stream = input_
except:
raise ValueError("Invalid input type - %s." % type(input_))
self.endian = endian
self.Length = self.stream.seek(0, 2)
self.Position = 0
def get_position(self):
return self.stream.tell()
def set_position(self, value):
self.stream.seek(value)
Position = property(get_position, set_position)
@property
def bytes(self):
last_pos = self.Position
self.Position = 0
ret = self.read()
self.Position = last_pos
return ret
def dispose(self):
self.stream.close()
pass
def read(self, *args):
return self.stream.read(*args)
def read_byte(self) -> int:
return struct.unpack(self.endian + "b", self.read(1))[0]
def read_u_byte(self) -> int:
return struct.unpack(self.endian + "B", self.read(1))[0]
def read_bytes(self, num) -> bytes:
return self.read(num)
def read_short(self) -> int:
return struct.unpack(self.endian + "h", self.read(2))[0]
def read_int(self) -> int:
return struct.unpack(self.endian + "i", self.read(4))[0]
def read_long(self) -> int:
return struct.unpack(self.endian + "q", self.read(8))[0]
def read_u_short(self) -> int:
return struct.unpack(self.endian + "H", self.read(2))[0]
def read_u_int(self) -> int:
return struct.unpack(self.endian + "I", self.read(4))[0]
def read_u_long(self) -> int:
return struct.unpack(self.endian + "Q", self.read(8))[0]
def read_float(self) -> float:
return struct.unpack(self.endian + "f", self.read(4))[0]
def read_double(self) -> float:
return struct.unpack(self.endian + "d", self.read(8))[0]
def read_boolean(self) -> bool:
return bool(struct.unpack(self.endian + "?", self.read(1))[0])
def read_string(self, size=None, encoding="utf-8") -> str:
if size is None:
ret = self.read_string_to_null()
else:
ret = struct.unpack(f"{self.endian}{size}is", self.read(size))[0]
try:
return ret.decode(encoding)
except UnicodeDecodeError:
return ret
def read_string_to_null(self, max_length=32767) -> str:
ret = []
c = b""
while c != b"\0" and len(ret) < max_length and self.Position != self.Length:
ret.append(c)
c = self.read(1)
if not c:
raise ValueError("Unterminated string: %r" % ret)
return b"".join(ret).decode('utf8', 'replace')
def read_aligned_string(self):
length = self.read_int()
if 0 < length <= self.Length - self.Position:
string_data = self.read_bytes(length)
result = string_data.decode('utf8', 'backslashreplace')
self.align_stream(4)
return result
return ""
def align_stream(self, alignment=4):
pos = self.Position
mod = pos % alignment
if mod != 0:
self.Position += alignment - mod
def read_quaternion(self):
return Quaternion(self.read_float(), self.read_float(), self.read_float(), self.read_float())
def read_vector2(self):
return Vector2(self.read_float(), self.read_float())
def read_vector3(self):
return Vector3(self.read_float(), self.read_float(), self.read_float())
def read_vector4(self):
return Vector4(self.read_float(), self.read_float(), self.read_float(), self.read_float())
def read_rectangle_f(self):
return Rectangle(self.read_float(), self.read_float(), self.read_float(), self.read_float())
def read_color4(self):
return Color(self.read_float(), self.read_float(), self.read_float(), self.read_float())
def read_matrix(self):
return Matrix4x4(self.read_float_array(16))
def read_array(self, command, length: int):
return [command() for i in range(length)]
def read_boolean_array(self):
return self.read_array(self.read_boolean, self.read_int())
def read_u_short_array(self):
return self.read_array(self.read_u_short, self.read_int())
def read_int_array(self, length=0):
return self.read_array(self.read_int, length if length else self.read_int())
def read_u_int_array(self, length=0):
return self.read_array(self.read_u_int, length if length else self.read_int())
def read_float_array(self, length=0):
return self.read_array(self.read_float, length if length else self.read_int())
def read_string_array(self):
return self.read_array(self.read_aligned_string, self.read_int())
def read_vector2_array(self):
return self.read_array(self.read_vector2, self.read_int())
def read_vector4_array(self):
return self.read_array(self.read_vector4, self.read_int())
def read_matrix_array(self):
return self.read_array(self.read_matrix, self.read_int())
|
[
"rkolbe96@gmail.com"
] |
rkolbe96@gmail.com
|
81371c236c1b31b828834065ba1f4590cc065578
|
91948d5be26636f1f2b941cb933701ea626a695b
|
/problem002_uber_product.py
|
43dee7ed290c090339c4cb64d1c47645fbcbabf7
|
[
"MIT"
] |
permissive
|
loghmanb/daily-coding-problem
|
4ae7dd201fde5ee1601e0acae9e9fc468dcd75c9
|
b2055dded4276611e0e7f1eb088e0027f603aa7b
|
refs/heads/master
| 2023-08-14T05:53:12.678760
| 2023-08-05T18:12:38
| 2023-08-05T18:12:38
| 212,894,228
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,460
|
py
|
'''
This problem was asked by Uber.
Given an array of integers, return a new array such that each element at index i
of the new array is the product of all the numbers in the original array except
the one at i.
For example, if our input was [1, 2, 3, 4, 5], the expected output would be
[120, 60, 40, 30, 24]. If our input was [3, 2, 1], the expected output would be
[2, 3, 6].
Follow-up: what if you can't use division?
'''
def solve(int_list):
all_product = 1
for x in int_list:
all_product *= x
return list(map(lambda x:int(all_product/x), int_list))
'''
for [a, b, c, d, e]
first step:
for each item is calculated from previous value to previous position value in main array
l1 = [1. a, (a)b, (ab)c, (abc)d]
second step;
repeat what happend before in reverse (from right to left)
l2 = [b(cde), c(de), d(e), e, 1]
and finally:
ans = l1[i]*l2[i] for each item
'''
def solve_without_division(int_list):
n = len(int_list)
#from left t right
l1 = [1]*n
for i in range(1, n):
l1[i] = l1[i-1] * int_list[i-1]
#from right to left
l2 = [1]*n
for i in range(1, n):
l2[n-i-1] = l2[n-i] * int_list[n-i]
ans = [l1[i]*l2[i] for i in range(n)]
return ans
if __name__ == '__main__':
test_list = [[1, 2, 3, 4, 5],
[3, 2, 1]]
for l in test_list:
print( 'list: ', l, ' output#1: ', solve(l), 'output#2: ', solve_without_division(l) )
|
[
"loghmanb@gmail.com"
] |
loghmanb@gmail.com
|
9186c66a5e468ddac1f6938a4ca7a42f605deadf
|
a6e4a6f0a73d24a6ba957277899adbd9b84bd594
|
/sdk/python/pulumi_azure_native/servicefabricmesh/v20180701preview/get_volume.py
|
bf1e34036b579879a38e2e8ba7e5a4239246a83e
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
MisinformedDNA/pulumi-azure-native
|
9cbd75306e9c8f92abc25be3f73c113cb93865e9
|
de974fd984f7e98649951dbe80b4fc0603d03356
|
refs/heads/master
| 2023-03-24T22:02:03.842935
| 2021-03-08T21:16:19
| 2021-03-08T21:16:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,980
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetVolumeResult',
'AwaitableGetVolumeResult',
'get_volume',
]
@pulumi.output_type
class GetVolumeResult:
"""
This type describes a volume resource.
"""
def __init__(__self__, azure_file_parameters=None, description=None, id=None, location=None, name=None, provider=None, provisioning_state=None, tags=None, type=None):
if azure_file_parameters and not isinstance(azure_file_parameters, dict):
raise TypeError("Expected argument 'azure_file_parameters' to be a dict")
pulumi.set(__self__, "azure_file_parameters", azure_file_parameters)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provider and not isinstance(provider, str):
raise TypeError("Expected argument 'provider' to be a str")
pulumi.set(__self__, "provider", provider)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="azureFileParameters")
def azure_file_parameters(self) -> Optional['outputs.VolumeProviderParametersAzureFileResponse']:
"""
This type describes a volume provided by an Azure Files file share.
"""
return pulumi.get(self, "azure_file_parameters")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
User readable description of the volume.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified identifier for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> str:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def provider(self) -> str:
"""
Provider of the volume.
"""
return pulumi.get(self, "provider")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
State of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
"""
return pulumi.get(self, "type")
class AwaitableGetVolumeResult(GetVolumeResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVolumeResult(
azure_file_parameters=self.azure_file_parameters,
description=self.description,
id=self.id,
location=self.location,
name=self.name,
provider=self.provider,
provisioning_state=self.provisioning_state,
tags=self.tags,
type=self.type)
def get_volume(resource_group_name: Optional[str] = None,
volume_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVolumeResult:
"""
This type describes a volume resource.
:param str resource_group_name: Azure resource group name
:param str volume_name: The identity of the volume.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['volumeName'] = volume_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:servicefabricmesh/v20180701preview:getVolume', __args__, opts=opts, typ=GetVolumeResult).value
return AwaitableGetVolumeResult(
azure_file_parameters=__ret__.azure_file_parameters,
description=__ret__.description,
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
provider=__ret__.provider,
provisioning_state=__ret__.provisioning_state,
tags=__ret__.tags,
type=__ret__.type)
|
[
"noreply@github.com"
] |
MisinformedDNA.noreply@github.com
|
25854f44a5cc291fbdd5dfec90ae8bea919c44ef
|
201356e09fb6dd82d36ed5b93b08a29482b68fb2
|
/HD - Intro/Task 20/example.py
|
ddb49c8255c3ccf527b2f2d8dfdf08f3094bf33d
|
[] |
no_license
|
M45t3rJ4ck/Py-Code
|
5971bad5304ea3d06c1cdbd065941271c33e4254
|
32063d149824eb22163ea462937e4c26917a8b14
|
refs/heads/master
| 2020-04-08T05:03:44.772327
| 2018-11-26T06:41:03
| 2018-11-26T06:41:03
| 159,044,079
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,058
|
py
|
#************* HELP *****************
#REMEMBER THAT IF YOU NEED SUPPORT ON ANY ASPECT OF YOUR COURSE SIMPLY LOG IN TO www.hyperiondev.com/support TO:
#START A CHAT WITH YOUR MENTOR, SCHEDULE A CALL OR GET SUPPORT OVER EMAIL.
#*************************************
# *** IF YOU DID NOT INSTALL NOTEPAD++ AND PYTHON (VERSION 2.7.3 or 2.7.5) ***
# *** PLEASE STOP READING THIS NOW, OPEN THE INSTALLERS FOLDER IN THIS DIRECTORY,
# RUN BOTH FILES TO INSTALL NOTEPAD++ AND PYTHON. ***
# PLEASE ENSURE YOU OPEN THIS FILE IN NOTEPAD++ OR IDLE otherwise you will not be able to read it.
# *** NOTE ON COMMENTS ***
# This is a comment in Python.
# Comments can be placed anywhere in Python code and the computer ignores them - they are intended to be read by humans.
# Any line with a # in front of it is a comment.
# Please read all the comments in this example file and all others.
# ================= Consolidation Task ==================
# Everything you have been learning has been tested with application tasks for each section.
# Now however, you will now be utilising everything you have learnt so far to solve this problem.
# You've covered all the necessary content to solve a large real-life data problem.
# Data management is an extremely important component of the Computer Science field and this task will provide you with first hand experience.
# You may find it helpful to revise what you've learnt from your previous tasks.
# This task will also require you to do some individual research as this is an essential component to being a successful software developer.
# You now have all the tools for the compulsory task.
# ================= File Input and Output ==================
# ************ Example 1 ************
# Write a file
out_file = open("test.txt","w")
out_file.write("This Text is going to out file\nLook at it and see!")
out_file.close()
# ************ Example 2 ************
# Read a file
print("\nExample 2:")
in_file = open("test.txt","r")
text = in_file.read()
in_file.close()
print(text)
# ================= The split() Method ==================
# ************ Example 3 ************
print("\nExample 3:")
print("This is a bunch of words".split())
# prints out ['This', 'is', 'a', 'bunch', 'of', 'words']
text = "First batch, second batch, third, fourth"
print(text.split(","))
# prints out ['First batch', ' second batch', ' third', ' fourth']
# ************ Example 4 ************
print("\nExample 4:")
text = "First batch, second batch, third, fourth"
list = text.split(",")
print(len(list))
# prints out 4
print(list[-1])
# prints out 'fourth'
list = text.split(",",2)
print(list)
print(len(list))
# prints out 3
print(list[-1])
# prints out 'third, fourth'
# ****************** END OF EXAMPLE CODE ********************* #
# == Make sure you have read and understood all of the code in this Python file.
# == Please complete the compulsory task below to proceed to the next task ===
# == Ensure you complete your work in this folder so that one of our tutors can easily locate and mark it ===
# ================= Compulsory Task ==================
# After you've read and understand all of example.py, create a new Python file called amazon.py inside this folder.
# This programming problem is one posed to new software developer applicants to Amazon, the software development company you've probably bought a book or dvd from once in your life.
# Inside amazon.py, write Python code to read in the contents of the textfile 'input.txt', and for each line in input.txt.
# Write out a new line in a new text file output.txt that computes the answer to some operation on a list of numbers.
# If the input.txt file has the following:
# min: 1,2,3,5,6
# max: 1,2,3,5,6
# avg: 1,2,3,5,6
# Your program should generate an output.txt file as following:
# The min of [1, 2, 3, 5, 6] is 1
# The max of [1, 2, 3, 5, 6] is 6
# The avg of [1, 2, 3, 5, 6] is 3.4
# Assume that the only operations given in the input file are 'min', 'max' and 'avg', and that the operation is always followed by a list of comma separated integers.
# Your program should handle any combination of operations, any lengths of input numbers. You can assume that the list of input numbers are always valid ints, and is never empty.
# ================= BONUS Optional Task ==================
# Change your program to also handle the operation 'px' where x is a number from 10 to 90 and defines the x percentile of the list of numbers. For example:
# input.txt:
# min: 1,2,3,5,6
# max: 1,2,3,5,6
# avg: 1,2,3,5,6
# p90: 1,2,3,4,5,6,7,8,9,10
# sum: 1,2,3,5,6
# min: 1,5,6,14,24
# max: 2,3,9
# p70: 1,2,3
# Your output.txt should read:
# The min of [1, 2, 3, 5, 6] is 1
# The max of [1, 2, 3, 5, 6] is 6
# The avg of [1, 2, 3, 5, 6] is 3.4
# The 90th percentile of [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] is 9
# The sum of [1, 2, 3, 5, 6] is 17
# The min of [1, 5, 6, 14, 24] is 1
# The max of [2, 3, 9] is 9
# The 70th percentile of [1, 2, 3] is 2
|
[
"wavoges@gmail.com"
] |
wavoges@gmail.com
|
ab9afc1e2ce503d83c3056e69b4288ec408dfa41
|
b7b7b9976a33d7eab9bd1ac0da89465ce38c20b9
|
/tests/0800_builder/03_get_globals.py
|
730a75c907d48e302eb80b7ed158156ae55b66eb
|
[
"MIT"
] |
permissive
|
sveetch/Optimus
|
87070ae99890a2c69dc28d5582cd680cd7d516dc
|
983aebeccd2ada7a5a0ab96f9296d4bba1112022
|
refs/heads/master
| 2021-10-12T00:11:33.012608
| 2021-10-09T22:54:29
| 2021-10-09T22:54:29
| 6,760,885
| 2
| 1
| null | 2013-12-16T18:08:42
| 2012-11-19T13:39:29
|
Python
|
UTF-8
|
Python
| false
| false
| 1,572
|
py
|
import os
from optimus.pages.builder import PageBuilder
def test_get_globals(minimal_basic_settings, fixtures_settings, caplog):
"""
Context should be correclty filled with context globals (SITE shortcut,
settings, optimus version)
"""
projectdir = os.path.join(fixtures_settings.fixtures_path, "basic_template")
settings = minimal_basic_settings(projectdir)
# Init builder with default environment
builder = PageBuilder(settings)
assert builder.jinja_env.globals["SITE"]["name"] == "basic"
assert builder.jinja_env.globals["debug"] is True
# Tamper settings to change context
settings.SITE_NAME = "Foobar"
settings.DEBUG = False
context = builder.get_globals()
assert context["SITE"]["name"] == "Foobar"
assert context["SITE"]["web_url"] == "http://localhost"
assert context["debug"] is False
assert "OPTIMUS" in context
assert "_SETTINGS" in context
assert context["_SETTINGS"]["LANGUAGE_CODE"] == "en_US"
def test_get_globals_https(minimal_basic_settings, fixtures_settings, caplog):
"""
When setting 'HTTPS_ENABLED' is enabled, 'SITE.web_url' should start with
'https://'.
"""
projectdir = os.path.join(fixtures_settings.fixtures_path, "basic_template")
settings = minimal_basic_settings(projectdir)
# Init builder with default environment
builder = PageBuilder(settings)
# Tamper settings to change context
settings.HTTPS_ENABLED = True
context = builder.get_globals()
assert context["SITE"]["web_url"] == "https://localhost"
|
[
"sveetch@gmail.com"
] |
sveetch@gmail.com
|
49e638f83de804b23bbf4d3eeae59f14e094fe55
|
641f6cc8f956b8c318b9d438e31ada4b6ebc1b5f
|
/models/qa.py
|
2b1848e4b4adc5f78c9c9bdc5bb9bcf4ca74e13c
|
[
"Apache-2.0"
] |
permissive
|
linkinpark213/quantization-networks-cifar10
|
61754f4beddc1da5c1b407c4e7880d54c172099c
|
7214733beed2f1d661633baadabdb300150b15b1
|
refs/heads/master
| 2022-12-05T19:34:41.656292
| 2020-08-09T06:46:23
| 2020-08-09T06:46:23
| 286,180,844
| 9
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,469
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# qa.py is used to quantize the activation of model.
from __future__ import print_function, absolute_import
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
from torch.autograd import Variable
import numpy as np
from utils.cluster import params_cluster
class SigmoidT(torch.autograd.Function):
""" sigmoid with temperature T for training
we need the gradients for input and bias
for customization of function, refer to https://pytorch.org/docs/stable/notes/extending.html
"""
@staticmethod
def forward(ctx, input, scales, n, b, T):
"""
Sigmoid T forward propagation.
Formula:
\sum_i^N{ \frac{ 1 }{1 + e^{T * {x - b}}} }
Args:
ctx: A SigmoidTBackward context object.
input: The input tensor, which is a parameter in a network.
scales: A list of floating numbers with length = n. The scales of the unit step functions.
n: An integer. The number of possible quantization values - 1.
b: A list of integers with length = n. The biases of the unit step functions.
T: An integer. The temperature.
Returns:
A tensor with same shape as the input.
"""
ctx.save_for_backward(input)
ctx.T = T
ctx.b = b
ctx.scales = scales
ctx.n = n
# \sum_i^n{ sigmoid(T(beta * x_i - b_i)) }
buf = ctx.T * (input - ctx.b[0])
buf = torch.clamp(buf, min=-10.0, max=10.0)
output = ctx.scales[0] / (1.0 + torch.exp(-buf))
for k in range(1, ctx.n):
buf = ctx.T * (input - ctx.b[k])
buf = torch.clamp(buf, min=-10.0, max=10.0)
output += ctx.scales[k] / (1.0 + torch.exp(-buf))
return output
@staticmethod
def backward(ctx, grad_output):
"""
Backward propagation of the activation quantization.
Args:
ctx: A SigmoidTBackward context object.
grad_output: The gradients propagated backwards to this layer.
Returns:
A tuple of 5 elements. Gradients for input, scales, n, b and T.
However, none of scales, n, b and T require gradients so only the first element is not None.
"""
# set T = 1 when train binary model in the backward.
ctx.T = 1
input, = ctx.saved_tensors
b_buf = ctx.T * (input - ctx.b[0])
b_buf = torch.clamp(b_buf, min=-10.0, max=10.0)
b_output = ctx.scales[0] / (1.0 + torch.exp(-b_buf))
temp = b_output * (1 - b_output) * ctx.T
for j in range(1, ctx.n):
b_buf = ctx.T * (input - ctx.b[j])
b_buf = torch.clamp(b_buf, min=-10.0, max=10.0)
b_output = ctx.scales[j] / (1.0 + torch.exp(-b_buf))
temp += b_output * (1 - b_output) * ctx.T
grad_input = Variable(temp) * grad_output
# corresponding to grad_input
return grad_input, None, None, None, None
sigmoidT = SigmoidT.apply
def step(x, b):
"""
The step function for ideal quantization function in test stage.
"""
y = torch.zeros_like(x)
mask = torch.gt(x - b, 0.0)
y[mask] = 1.0
return y
class Quantization(nn.Module):
"""
Quantization Activation. Only used when activations are quantized too.
Args:
quant_values: the target quantized values, like [-4, -2, -1, 0, 1 , 2, 4]
quan_bias and init_beta: the data for initialization of quantization parameters (biases, beta)
- for activations, format as `N x 1` for biases and `1x1` for (beta)
we need to obtain the intialization values for biases and beta offline
Shape:
- Input: :math:`(N, C, H, W)`
- Output: :math:`(N, C, H, W)` (same shape as input)
Usage:
- for activations, just pending this module to the activations when build the graph
"""
def __init__(self, quant_values, outlier_gamma=0.001):
super(Quantization, self).__init__()
self.values = quant_values
self.outlier_gamma = outlier_gamma
# number of sigmoids
self.n = len(self.values) - 1
self.alpha = Parameter(torch.Tensor([1]))
self.beta = Parameter(torch.Tensor([1]))
self.register_buffer('biases', torch.zeros(self.n))
self.register_buffer('scales', torch.zeros(self.n))
# boundary = np.array(quan_bias)
self.init_scale_and_offset()
self.inited = False
# self.init_biases(boundary)
# self.init_alpha_and_beta(init_beta)
def init_scale_and_offset(self):
"""
Initialize the scale and offset of quantization function.
"""
for i in range(self.n):
gap = self.values[i + 1] - self.values[i]
self.scales[i] = gap
def init_biases(self, biases):
"""
Initialize the bias of quantization function.
init_data in numpy format.
"""
# activations initialization (obtained offline)
assert biases.size == self.n
self.biases.copy_(torch.from_numpy(biases))
# print('baises inited!!!')
def init_alpha_and_beta(self, beta):
"""
Initialize the alpha and beta of quantization function.
init_data in numpy format.
"""
# activations initialization (obtained offline)
self.beta.data = torch.Tensor([beta]).cuda()
self.alpha.data = torch.reciprocal(self.beta.data)
def forward(self, input, T=1):
if not self.inited:
print('Initializing activation quantization layer')
params = input.data.detach().cpu().numpy()
biases, (min_value, max_value) = params_cluster(params, self.values, gamma=self.outlier_gamma)
print('biases = {}'.format(biases))
self.init_biases(np.array(biases))
# Method in Quantization Networks
# self.init_alpha_and_beta((self.values[-1] * 5) / (4 * input.data.abs().max()))
# Method in Fully Quantized Networks
self.init_alpha_and_beta(self.values[-1] / max_value)
self.inited = True
return input
input = input.mul(self.beta)
if self.training:
output = sigmoidT(input, self.scales, self.n, self.biases, T)
else:
output = step(input, b=self.biases[0]) * self.scales[0]
for i in range(1, self.n):
output += step(input, b=self.biases[i]) * self.scales[i]
output = output.mul(self.alpha)
return output
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
alpha_key = prefix + 'alpha'
beta_key = prefix + 'beta'
if alpha_key in state_dict and beta_key in state_dict:
super()._load_from_state_dict(state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs)
self.inited = True
else:
error_msgs.append('Activation quantization parameters not found for {} '.format(prefix[:-1]))
def __repr__(self):
return 'Quantization(alpha={}, beta={}, values={}, n={})'.format(self.alpha.data, self.beta.data, self.values,
self.n)
|
[
"linkinpark213@outlook.com"
] |
linkinpark213@outlook.com
|
8c33daabcaf0a5c55bd5b54898f7859bf4bdf18e
|
898e263e9264804df750fe24cc767e08856c9e09
|
/storage/cloud-client/storage_get_metadata.py
|
e146e9321ad0dcce46c51ef75daccb4cf14ce381
|
[
"Apache-2.0"
] |
permissive
|
HoleCat/echarlosperros
|
98da28d0fc76c57459ce4c9a53c89e62c350f754
|
b67460de0467e05b42a763c4430b26ecfd97c2aa
|
refs/heads/main
| 2023-01-21T15:29:13.091406
| 2020-12-03T01:33:00
| 2020-12-03T01:33:00
| 318,039,531
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,575
|
py
|
#!/usr/bin/env python
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
# [START storage_get_metadata]
from google.cloud import storage
def blob_metadata(bucket_name, blob_name):
"""Prints out a blob's metadata."""
# bucket_name = 'your-bucket-name'
# blob_name = 'your-object-name'
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
blob = bucket.get_blob(blob_name)
print("Blob: {}".format(blob.name))
print("Bucket: {}".format(blob.bucket.name))
print("Storage class: {}".format(blob.storage_class))
print("ID: {}".format(blob.id))
print("Size: {} bytes".format(blob.size))
print("Updated: {}".format(blob.updated))
print("Generation: {}".format(blob.generation))
print("Metageneration: {}".format(blob.metageneration))
print("Etag: {}".format(blob.etag))
print("Owner: {}".format(blob.owner))
print("Component count: {}".format(blob.component_count))
print("Crc32c: {}".format(blob.crc32c))
print("md5_hash: {}".format(blob.md5_hash))
print("Cache-control: {}".format(blob.cache_control))
print("Content-type: {}".format(blob.content_type))
print("Content-disposition: {}".format(blob.content_disposition))
print("Content-encoding: {}".format(blob.content_encoding))
print("Content-language: {}".format(blob.content_language))
print("Metadata: {}".format(blob.metadata))
print("Custom Time: {}".format(blob.custom_time))
print("Temporary hold: ", "enabled" if blob.temporary_hold else "disabled")
print(
"Event based hold: ",
"enabled" if blob.event_based_hold else "disabled",
)
if blob.retention_expiration_time:
print(
"retentionExpirationTime: {}".format(
blob.retention_expiration_time
)
)
# [END storage_get_metadata]
if __name__ == "__main__":
blob_metadata(bucket_name=sys.argv[1], blob_name=sys.argv[2])
|
[
"201602035x@gmail.com"
] |
201602035x@gmail.com
|
7b8b55dc7677f2959a75c9ee3e91b6b7e9a29037
|
d00e29c27d4d4cccbee8f3923d2d837a2d04eedb
|
/sush_utils/simpleFlask.py
|
ec4e06723bfd911d8136e309d7c2e9cc7c4b7c6b
|
[] |
no_license
|
sush80/switcherPy
|
23bfa054f0ed4ab636c5fd69ac70dd28a957747f
|
6098431bf526a7ca46d659d73bb859d9fa163f5a
|
refs/heads/master
| 2021-07-25T02:40:11.107318
| 2018-11-17T18:30:33
| 2018-11-17T18:30:33
| 112,855,804
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,566
|
py
|
from flask import Flask
try:
from sush_utils.sush_utils import system_uptime #import if this file is a lib
except:
from sush_utils import system_uptime #import fallback if file runs locally
def start_simple_flask_not_returning():
app = Flask(__name__)
@app.route('/')
def hello_world():
uptime_hours = system_uptime.string_get()
return 'Hello, World! Uptime: ' + uptime_hours
#use_reloader = False is to prevent this file to be started multiple times, resulting in multiple threads
# and duplicated data structurs
app.run(host='0.0.0.0', port=5000, debug=False,use_reloader=False)
if __name__ == "__main__":
import logging
logger = logging.getLogger('simple_flask')
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler('simple_flask.log')
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(threadName)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh) # FileHandler
logger.addHandler(ch)
logger.info("starting flask now...")
try:
start_simple_flask_not_returning()
except Exception as e:
logger.error("Exception : " + str(e))
logger.info("flask done - exit")
|
[
"you@example.com"
] |
you@example.com
|
a42bb018ff03bfd9a53342898564db3d33b7a2af
|
9f2450da1c4fd7844e0e162a94c7edb53c27fe72
|
/wm_compositional/assembly.py
|
5cbdd69811fa36d2496a095452a9adb6fafe4cc5
|
[] |
no_license
|
sorgerlab/indra_apps
|
6626a06dad9e7f820c71d7e03bdf42a6308746cc
|
3f20ca3f7b3855636607c63b1956c404bfe1b16e
|
refs/heads/master
| 2021-06-11T02:00:28.866305
| 2021-04-26T01:33:41
| 2021-04-26T01:33:41
| 128,094,623
| 1
| 8
| null | 2021-04-26T01:29:05
| 2018-04-04T17:10:11
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 5,614
|
py
|
import os
import glob
import tqdm
import logging
from indra.sources import eidos, hume, cwms, sofia
from indra.statements import Influence, Event
from indra.tools import assemble_corpus as ac
from indra.ontology.world.ontology import WorldOntology
from indra.pipeline import register_pipeline, AssemblyPipeline
from indra_world.assembly.operations import *
from indra_world.sources.dart import process_reader_outputs
from indra_world.corpus import Corpus
from indra.statements import stmts_to_json_file
reader_versions = {'flat':
{'cwms': '2020.08.28',
'hume': 'r2020_08_19_4',
'sofia': '1.1',
'eidos': '1.0.3'},
'compositional':
{'cwms': '2020.09.03',
'hume': 'r2020_09_28_4',
'sofia': '1.1',
'eidos': '1.0.4'}}
ont_url = 'https://github.com/WorldModelers/Ontologies/blob/'\
'25690a258d02fdf1f35ce9140f7cd54145e2b30c/'\
'CompositionalOntology_v2.1_metadata.yml'
logger = logging.getLogger('wm_compositional.assembly')
def concept_matches_compositional(concept):
wm = concept.db_refs.get('WM')
if not wm:
return concept.name
wm_top = tuple(entry[0] if entry else None for entry in wm[0])
return wm_top
def matches_compositional(stmt):
if isinstance(stmt, Influence):
key = (stmt.__class__.__name__,
concept_matches_compositional(stmt.subj.concept),
concept_matches_compositional(stmt.obj.concept),
stmt.polarity_count(),
stmt.overall_polarity()
)
elif isinstance(stmt, Event):
key = (stmt.__class__.__name__,
concept_matches_compositional(stmt.concept),
stmt.delta.polarity)
return str(key)
@register_pipeline
def print_grounding_stats(statements):
logger.info('-----------------------------------------')
logger.info('Number of Influences: %s' % len([s for s in statements if
isinstance(s, Influence)]))
grs = []
gr_combos = []
evidences = 0
evidence_by_reader = defaultdict(int)
for stmt in statements:
if isinstance(stmt, Influence):
for concept in [stmt.subj.concept, stmt.obj.concept]:
grs.append(concept.get_grounding())
gr_combos.append((stmt.subj.concept.get_grounding(),
stmt.obj.concept.get_grounding()))
evidences += len(stmt.evidence)
for ev in stmt.evidence:
evidence_by_reader[ev.source_api] += 1
logger.info('Unique groundings: %d' % len(set(grs)))
logger.info('Unique combinations: %d' % len(set(gr_combos)))
logger.info('Number of evidences: %d' % evidences)
logger.info('Number of evidences by reader: %s' %
str(dict(evidence_by_reader)))
logger.info('-----------------------------------------')
return statements
if __name__ == '__main__':
readers = ['sofia', 'eidos', 'hume', 'cwms']
grounding = 'compositional'
do_upload = False
stmts = []
for reader in readers:
version = reader_versions[grounding][reader]
pattern = '*' if reader != 'sofia' \
else ('*_new' if grounding == 'compositional' else '*_old')
fnames = glob.glob('/Users/ben/data/dart/%s/%s/%s' % (reader, version,
pattern))
print('Found %d files for %s' % (len(fnames), reader))
for fname in tqdm.tqdm(fnames):
if reader == 'eidos':
pp = eidos.process_json_file(fname, grounding_mode=grounding)
elif reader == 'hume':
pp = hume.process_jsonld_file(fname, grounding_mode=grounding)
elif reader == 'cwms':
pp = cwms.process_ekb_file(fname, grounding_mode=grounding)
elif reader == 'sofia':
pp = sofia.process_json_file(fname, grounding_mode=grounding)
doc_id = os.path.basename(fname)[:32]
for stmt in pp.statements:
for ev in stmt.evidence:
if 'provenance' not in ev.annotations:
ev.annotations['provenance'] = [
{'document': {'@id': doc_id}}]
else:
prov = ev.annotations['provenance'][0]['document']
prov['@id'] = doc_id
stmts += pp.statements
if grounding == 'compositional':
validate_grounding_format(stmts)
ap = AssemblyPipeline.from_json_file('assembly_%s.json' % grounding)
assembled_stmts = ap.run(stmts)
if do_upload:
corpus_id = 'compositional_v4'
stmts_to_json_file(assembled_stmts, '%s.json' % corpus_id)
meta_data = {
'corpus_id': corpus_id,
'description': ('Assembly of 4 reader outputs with the '
'compositional ontology (%s).' % ont_url),
'display_name': 'Compositional ontology assembly v3',
'readers': readers,
'assembly': {
'level': 'grounding',
'grounding_threshold': 0.6,
},
'num_statements': len(assembled_stmts),
'num_documents': 382
}
corpus = Corpus(corpus_id, statements=assembled_stmts,
raw_statements=stmts,
meta_data=meta_data)
corpus.s3_put()
|
[
"ben.gyori@gmail.com"
] |
ben.gyori@gmail.com
|
b49b0aa10664dfb2a463c9db9a3ee3391a1bb550
|
edd72c118fdca69cc58b6a85ac1b0f153f44d2f8
|
/ruts/datasets/dataset.py
|
3c7cf834387bd5281f7957adabb42f192ff10053
|
[
"MIT"
] |
permissive
|
webprogrammer77/ruTS
|
7e459561aefd31ab1c0cfdc6503c9e90ea3392c7
|
c3c95f99162115ea2c522ee7b90cfc1ee7de91e5
|
refs/heads/master
| 2022-12-12T11:12:01.423729
| 2020-09-04T14:08:21
| 2020-09-04T14:08:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,774
|
py
|
from abc import ABCMeta, abstractmethod
from typing import Any, Dict, Generator
class Dataset(object, metaclass=ABCMeta):
"""
Абстрактный класс для работы с набором данных
Аргументы:
name (str): Наименование набора данных
meta (dict): Справочная информация о наборе данных
Методы:
check_data: Проверка наличия всех необходимых директорий и файлов в наборе данных
get_texts: Получение текстов (без заголовков) из набора данных
get_records: Получение записей (с заголовками) из набора данных
download: Загрузка набора данных из сети
"""
__test__ = False
@abstractmethod
def __init__(self, name, meta=None):
self.name = name
self.meta = meta or {}
def __repr__(self):
return f"Набор данных('{self.name}')"
@property
def info(self):
info = {'Наименование': self.name}
info.update(self.meta)
return info
@abstractmethod
def __iter__(self):
raise NotImplementedError
@abstractmethod
def check_data(self) -> bool:
raise NotImplementedError
@abstractmethod
def get_texts(self, *args: Any) -> Generator[str, None, None]:
raise NotImplementedError
@abstractmethod
def get_records(self, *args: Any) -> Generator[Dict[str, Any], None, None]:
raise NotImplementedError
@abstractmethod
def download(self, force: bool = False):
raise NotImplementedError
|
[
"kouki.sergey@gmail.com"
] |
kouki.sergey@gmail.com
|
dbb50044dbe2f11200f23c22e4710f8b05fe4a41
|
97676f59bdd398f00bc0939c40a30c1f07e523c6
|
/course-files/lectures/lecture11/answers/03_give_grade.py
|
a105e0a34f90cb9671110df7c99d3182aa0e2b81
|
[] |
no_license
|
eecs110/fall2020
|
db1b0f9036f9a0036ff5cc6ba5c30ba6fa5cffed
|
81363c4f1c192f8366456a44df8988298809146b
|
refs/heads/master
| 2023-01-29T05:49:36.452055
| 2020-12-02T20:58:43
| 2020-12-02T20:58:43
| 289,575,477
| 1
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 436
|
py
|
def give_grade(score):
if score >= 90:
return 'A'
elif score >= 80:
return 'B'
elif score >= 70:
return 'C'
elif score >= 60:
return 'D'
else:
return 'F'
print('65:', give_grade(65))
print('99:', give_grade(99))
print('84:', give_grade(84))
print('76:', give_grade(76))
print('20:', give_grade(20))
## Visualize this execution (a visual representation): https://goo.gl/RPqCLc
|
[
"vanwars@gmail.com"
] |
vanwars@gmail.com
|
4f82a4e97672d4f0c016b0d1b25590076f38187a
|
847177b00a6d28a075b57f29ae37a3da7d9ce823
|
/setup.py
|
d28ffb184f2b7fe85e528e37e1565693800c8b3b
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] |
permissive
|
pcdshub/recordits
|
3faccfb76b3fdcabd575e21da3d6c81079b2daf0
|
c89f6b59accd430f26ea38b1660f74350be8aa37
|
refs/heads/master
| 2021-11-12T10:33:25.038276
| 2021-10-29T16:56:22
| 2021-10-29T16:56:22
| 235,641,592
| 0
| 2
|
NOASSERTION
| 2021-04-15T18:27:18
| 2020-01-22T18:48:04
|
Python
|
UTF-8
|
Python
| false
| false
| 2,101
|
py
|
import sys
from os import path
from setuptools import find_packages, setup
import versioneer
min_version = (3, 6)
if sys.version_info < min_version:
error = """
recordits does not support Python {0}.{1}.
Python {2}.{3} and above is required. Check your Python version like so:
python3 --version
This may be due to an out-of-date pip. Make sure you have pip >= 9.0.1.
Upgrade pip like so:
pip install --upgrade pip
""".format(*sys.version_info[:2], *min_version)
sys.exit(error)
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as readme_file:
readme = readme_file.read()
with open(path.join(here, 'requirements.txt')) as requirements_file:
# Parse requirements.txt, ignoring any commented-out lines.
requirements = [line for line in requirements_file.read().splitlines()
if not line.startswith('#')]
git_requirements = [r for r in requirements if r.startswith('git+')]
if git_requirements:
print('User must install the following packages manually:')
print()
print("\n".join(f'* {r}' for r in git_requirements))
print()
setup(
name='recordits',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
license='BSD',
author='SLAC National Accelerator Laboratory',
packages=find_packages(exclude=['docs', 'tests']),
description='Recording points for LCLS-II',
long_description=readme,
url='https://github.com/pcdshub/recordits',
entry_points={
'console_scripts': [
# 'some.module:some_function',
],
},
include_package_data=True,
package_data={
'recordits': [
# When adding files here, remember to update MANIFEST.in as well,
# or else they will not be included in the distribution on PyPI!
# 'path/to/data_file',
]
},
install_requires=requirements,
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Natural Language :: English',
'Programming Language :: Python :: 3',
],
)
|
[
"klauer@slac.stanford.edu"
] |
klauer@slac.stanford.edu
|
96f253385c98c35544ec68f6785f80678b6acdea
|
4bd5e9b67d98bfcc9611bd8b774c9ab9f4f4d446
|
/Python基础笔记/3/作业3/1.py
|
d2a733ff528865f5b1eea2cad44364b041c4a977
|
[] |
no_license
|
zhenguo96/test1
|
fe21510aea7feb674e52fd7a86d4177666f841c5
|
0d8de7e73e7e635d26462a0bc53c773d999498be
|
refs/heads/master
| 2020-05-03T13:09:53.592103
| 2019-04-06T07:08:47
| 2019-04-06T07:08:47
| 178,646,627
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 462
|
py
|
# 1.根据(1-7)的数值不同。打印对应的星期英文
day = float(input("请输入1-7中的一个数:"))
if day == 1:
print("Monday")
elif day == 2:
print("Tuesday")
elif day == 3:
print("Wednesday")
elif day == 4:
print("Thursday")
elif day == 5:
print("Friday")
elif day == 6:
print("Saturday")
elif day == 7:
print("Sunday")
else:
print("输入格式错误!")
|
[
"1148421588@qq.com"
] |
1148421588@qq.com
|
204e935385d1c4510ae07351fb087e2c6b689276
|
6e25c7af9e1b9e5905ae9e839ff6f3f8fd4ed221
|
/video_auto/二次剪辑/util/img_utils.py
|
bfebc3947b9e25a1f0635e409084a269d56eb759
|
[
"Apache-2.0"
] |
permissive
|
roceys/tools_python
|
8dc6f2d21b68f682eec412beb56524e7d72d194c
|
9c8d5c1c7c1ae4a4c857a65f5b5f14da1c90e425
|
refs/heads/master
| 2023-01-05T06:18:50.411999
| 2020-11-01T18:02:37
| 2020-11-01T18:02:37
| 271,776,767
| 0
| 0
|
Apache-2.0
| 2020-11-01T18:02:38
| 2020-06-12T10:58:36
| null |
UTF-8
|
Python
| false
| false
| 1,431
|
py
|
#!/usr/bin/env python
# encoding: utf-8
"""
@version: v1.0
@author: xag
@license: Apache Licence
@contact: xinganguo@gmail.com
@site: http://www.xingag.top
@software: PyCharm
@file: img_utils.py
@time: 2019-12-25 14:23
@description:图片工具类
"""
import cv2
from moviepy.video.VideoClip import ImageClip
from moviepy.editor import VideoFileClip
def one_pic_to_video(image_path, output_video_path, fps, time):
"""
一张图片合成视频
one_pic_to_video('./../source/1.jpeg', './../source/output.mp4', 25, 10)
:param path: 图片文件路径
:param output_video_path:合成视频的路径
:param fps:帧率
:param time:时长
:return:
"""
image_clip = ImageClip(image_path)
img_width, img_height = image_clip.w, image_clip.h
# 总共的帧数
frame_num = (int)(fps * time)
img_size = (int(img_width), int(img_height))
fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
video = cv2.VideoWriter(output_video_path, fourcc, fps, img_size)
for index in range(frame_num):
frame = cv2.imread(image_path)
# 直接缩放到指定大小
frame_suitable = cv2.resize(frame, (img_size[0], img_size[1]), interpolation=cv2.INTER_CUBIC)
# 把图片写进视频
# 重复写入多少次
video.write(frame_suitable)
# 释放资源
video.release()
return VideoFileClip(output_video_path)
|
[
"xinganguo@gmail.com"
] |
xinganguo@gmail.com
|
850c8eb11111bbb5c8b00fe7fb4a184d18a91817
|
cc8dd1bf3ff193e24e636ef6aad54ce18e831270
|
/进程和线程/进程/文件拷贝.py
|
49ee1a03b083346d1c86568287e7ba80e75bb3c5
|
[] |
no_license
|
RelaxedDong/python_base
|
c27cbc1c06914826d3287dae46a9fe0dd2bff7b0
|
7f865c7d5bdb6454f3b20cd899dbaf19092fb360
|
refs/heads/master
| 2022-01-08T12:43:58.851895
| 2019-04-24T03:17:13
| 2019-04-24T03:17:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 915
|
py
|
#encoding:utf-8
import os
import time
def Copyfile(path,topath):
pr = open(path,'rb')
pw = open(topath,'wb')
context = pr.read()
pw.write(context)
pr.close()
pw.close()
from multiprocessing import Process,Pool
if __name__ == '__main__':
path = r'E:\pycharm_pro\基础文件\tkinter'
rofile = r'E:\pycharm_pro\基础文件\进程和线程\tofile'
# start = time.time()
# pathlist = os.listdir(path)
# for file in pathlist:
# Copyfile(os.path.join(path,file),os.path.join(rofile,file))
# end = time.time()
# print('耗时:%0.002f'%(end-start))
filelist = os.listdir(path)
pp = Pool(4)
start = time.time()
for filename in filelist:
pp.apply_async(func=Copyfile,args=(os.path.join(path,filename),os.path.join(rofile,filename)))
pp.close()
pp.join()
end = time.time()
print('耗时:%0.002f'%(end-start))
|
[
"1417766861@qq.com"
] |
1417766861@qq.com
|
de8b9176832903430ba8a05a2215df9f4216345b
|
25ebc03b92df764ff0a6c70c14c2848a49fe1b0b
|
/daily/20210426/example_python/05fake.py
|
50aa749d0e3b91197180026ae281b1887a96409f
|
[] |
no_license
|
podhmo/individual-sandbox
|
18db414fafd061568d0d5e993b8f8069867dfcfb
|
cafee43b4cf51a321f4e2c3f9949ac53eece4b15
|
refs/heads/master
| 2023-07-23T07:06:57.944539
| 2023-07-09T11:45:53
| 2023-07-09T11:45:53
| 61,940,197
| 6
| 0
| null | 2022-10-19T05:01:17
| 2016-06-25T11:27:04
|
Python
|
UTF-8
|
Python
| false
| false
| 5,605
|
py
|
import asyncio
import logging
import itertools
import sys
import time
from functools import partial
logger = logging.getLogger(__name__)
debug = True
logging.basicConfig(
level=logging.INFO, format="%(relativeCreated)-10d" + logging.BASIC_FORMAT
)
q = asyncio.Queue()
loop = asyncio.get_event_loop()
ev = asyncio.Event()
async def worker(client):
logger.info("init")
await ev.wait()
logger.info("start")
r = []
END = None
while True:
futs = []
items = []
async def bulk_get():
nonlocal items
while True:
item = await q.get()
if item is END:
q.task_done()
if len(items) > 0:
return
items = END # xxx
return
action, args = item
if debug:
logger.info("action %s -- %r", action, "") # )args)
else:
print(".", file=sys.stderr, end="")
sys.stderr.flush()
items.append((action, args))
try:
await asyncio.wait_for(bulk_get(), 0.1)
except asyncio.TimeoutError:
pass
if items is None:
if q.empty():
break
continue
if len(items) == 0:
print(":", q.empty(), q._unfinished_tasks)
continue
# print("@", len(items))
for action, args in items:
if action == list_clusters:
async def do_list_cluster():
clusters = await loop.run_in_executor(
None, partial(list_clusters, client)
)
for c in clusters:
q.put_nowait((list_services, (c, None)))
q.task_done()
futs.append(loop.create_task(do_list_cluster()))
elif action == list_services:
async def do_list_services(cluster, next_token):
services, new_next_token = await loop.run_in_executor(
None,
partial(list_services, client, cluster, next_token=next_token),
)
if new_next_token is not None:
await q.put((list_services, (cluster, new_next_token)))
for parts in _chunk(services, 10):
await q.put((describe_services, (cluster, parts)))
q.task_done()
futs.append(loop.create_task(do_list_services(*args)))
elif action == describe_services:
async def do_describe_services(cluster, services):
res = await loop.run_in_executor(
None,
partial(describe_services, client, cluster, services=services),
)
await q.put((None, (cluster, res)))
q.task_done()
futs.append(loop.create_task(do_describe_services(*args)))
elif action is None: # end
async def do_end(cluster, services):
for s in services:
r.append(s)
q.task_done()
return True # has end
futs.append(loop.create_task(do_end(*args)))
else:
raise RuntimeError(f"unexpected action {action}")
has_end = False
for is_end in await asyncio.gather(*futs):
if is_end is True:
has_end = True
if has_end:
await q.put(END)
await q.join()
return r
def _chunk(iterable, n):
it = iter(iterable)
while True:
chunk_it = itertools.islice(it, n)
try:
first_el = next(chunk_it)
except StopIteration:
return
yield tuple(itertools.chain((first_el,), chunk_it))
def list_clusters(client):
time.sleep(0.5)
return ["app", "spot-batch"]
def list_services(client, cluster, *, next_token=None):
time.sleep(0.5)
if cluster == "app":
if next_token is None:
return [f"app{i:02d}" for i in range(20)], "next_token_00"
elif next_token == "next_token_00":
return [f"app{i:02d}" for i in range(20, 40)], "next_token_01"
else:
return [f"app{i:02d}" for i in range(20, 60)], None
elif cluster == "spot-batch":
if next_token is None:
return [f"spot-batch{i:02d}" for i in range(20)], "next_token_00"
elif next_token == "next_token_00":
return [f"spot-batch{i:02d}" for i in range(20, 40)], "next_token_01"
elif next_token == "next_token_01":
return [f"spot-batch{i:02d}" for i in range(40, 60)], "next_token_02"
else:
return [f"spot-batch{i:02d}" for i in range(60, 80)], None
else:
raise NotImplementedError(f"unexpected cluster, {cluster}")
def describe_services(client, cluster, *, services):
time.sleep(0.5)
r = []
for name in services:
assert isinstance(name, str), name
r.append({"name": name, "desiredCount": 1, "runningCount": 1, "prefix": "O "})
return r
q.put_nowait((list_clusters, None))
ev.set()
loop.set_debug(debug)
client = None
res = loop.run_until_complete(worker(client))
for s in sorted(res, key=lambda s: s["name"]):
# if s["prefix"] == "O ":
# continue
print(f"""{s["prefix"]} {s["name"]} ({s["runningCount"]} / {s["desiredCount"]})""")
|
[
"ababjam61+github@gmail.com"
] |
ababjam61+github@gmail.com
|
87b0b6633115e323d2d9df2d3cafe146cffbe018
|
bf769a3a3935a8e08f11fdf606f2e2e2bc6a5307
|
/PyQt/chapter06_layout_management/qt06_vboxLayout.py
|
707632895a91fb55826d20bc92403902d0124a20
|
[] |
no_license
|
metanoia1989/QTStudy
|
b71f2c8cf6fd001a14db3f1b5ece82c1cc7f7a93
|
29465c6bb9fc0ef2e50a9bf2f66d996ecbd086c0
|
refs/heads/master
| 2021-12-25T16:50:26.915441
| 2021-10-10T01:26:14
| 2021-10-10T01:26:14
| 193,919,811
| 3
| 2
| null | 2021-01-25T09:23:30
| 2019-06-26T14:22:41
|
HTML
|
UTF-8
|
Python
| false
| false
| 856
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
from PyQt5.QtWidgets import QApplication, QWidget, QVBoxLayout, QPushButton
from PyQt5.QtCore import Qt
class Winform(QWidget):
def __init__(self, parent=None):
super(Winform, self).__init__(parent)
self.setWindowTitle("水平布局管理例子")
self.resize(330, 150)
# 水平布局按照从左到右的顺序进行添加按钮部件
hlayout = QVBoxLayout()
hlayout.addWidget(QPushButton(str(1)))
hlayout.addWidget(QPushButton(str(2)))
hlayout.addWidget(QPushButton(str(3)))
hlayout.addWidget(QPushButton(str(4)))
hlayout.addWidget(QPushButton(str(5)))
self.setLayout(hlayout)
if __name__ == "__main__":
app = QApplication(sys.argv)
form = Winform()
form.show()
sys.exit(app.exec_())
|
[
"sogaxili@gmail.com"
] |
sogaxili@gmail.com
|
2b689a5b3c3d0097066a25f9ba2cfac34a59247f
|
7b60c68ddda39ef82f5d49404bbcf62cc83e4860
|
/crawl/beautifuksoup_css_mu.py
|
4289b4574115bb7d8aa6ac2ca49f2c85612e9e95
|
[] |
no_license
|
joycejhang/learningml
|
da802e0ab9cfb6cce89791561870c0078cfaaaf9
|
884ed0541bcb257bb82e77c126ab77c927fe9add
|
refs/heads/master
| 2020-04-22T15:04:58.445844
| 2019-07-04T11:31:03
| 2019-07-04T11:31:03
| 170,466,049
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 524
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 28 11:03:06 2018
@author: Joyce
"""
from bs4 import BeautifulSoup
from urllib.request import urlopen
# if has Chinese, apply decode()
html = urlopen("https://morvanzhou.github.io/static/scraping/list.html").read().decode('utf-8')
#print(html)
soup = BeautifulSoup(html, features='lxml')
# use class to narrow search
month = soup.find_all('li', {"class": "month"})
for m in month:
print(m.get_text())
"""
一月
二月
三月
四月
五月
"""
|
[
"noreply@github.com"
] |
joycejhang.noreply@github.com
|
93726b81910c6ad643b0e87010087cf632f63008
|
116967cd9f326d74a83c7ce01a826e1a83265ade
|
/nilearn/nilearn/_utils/logger.py
|
c7b48108880aadb0fd7d07ea94dba256dda0952f
|
[
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] |
permissive
|
bcipolli/nilearn-RSA
|
f73ea5e04ac94c0c2d4c7ac5f5219779a3067596
|
0ac8595c1ce3e44b6b5ec25d1906f558088ab297
|
refs/heads/master
| 2020-04-15T04:01:37.084977
| 2015-08-15T17:35:22
| 2015-08-15T17:35:22
| 31,421,013
| 4
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,467
|
py
|
"""Logging facility for nilearn"""
# Author: Philippe Gervais
# License: simplified BSD
import inspect
from sklearn.base import BaseEstimator
# The technique used in the log() function only applies to CPython, because
# it uses the inspect module to walk the call stack.
def log(msg, verbose=1, object_classes=(BaseEstimator, ),
stack_level=1, msg_level=1):
"""Display a message to the user, depending on the verbosity level.
This function allows to display some information that references an object
that is significant to the user, instead of a internal function. The goal
is to make user's code as simple to debug as possible.
Parameters
----------
msg: str
message to display
verbose: int
current verbosity level. Message is displayed if this value is greater
or equal to msg_level.
object_classes: tuple of type
classes that should appear to emit the message
stack_level: int
if no object in the call stack matches object_classes, go back that
amount in the call stack and display class/function name thereof.
msg_level: int
verbosity level at and above which message should be displayed to the
user. Most of the time this parameter can be left unchanged.
Notes
=====
This function does tricky things to ensure that the proper object is
referenced in the message. If it is called e.g. inside a function that is
called by a method of an object inheriting from any class in
object_classes, then the name of the object (and the method) will be
displayed to the user. If several matching objects exist in the call
stack, the highest one is used (first call chronologically), because this
is the one which is most likely to have been written in the user's script.
"""
if verbose >= msg_level:
stack = inspect.stack()
object_frame = None
for f in reversed(stack):
frame = f[0]
current_self = frame.f_locals.get("self", None)
if isinstance(current_self, object_classes):
object_frame = frame
func_name = f[3]
object_self = current_self
break
if object_frame is None: # no object found: use stack_level
if stack_level >= len(stack):
stack_level = -1
object_frame, _, _, func_name = stack[stack_level][:4]
object_self = object_frame.f_locals.get("self", None)
if object_self is not None:
func_name = "%s.%s" % (object_self.__class__.__name__, func_name)
print("[{func_name}] {msg}".format(func_name=func_name, msg=msg))
def _compose_err_msg(msg, **kwargs):
"""Append key-value pairs to msg, for display.
Parameters
==========
msg: string
arbitrary message
kwargs: dict
arbitrary dictionary
Returns
=======
updated_msg: string
msg, with "key: value" appended. Only string values are appended.
Example
=======
>>> _compose_err_msg('Error message with arguments...', arg_num=123, \
arg_str='filename.nii', arg_bool=True)
'Error message with arguments...\\narg_str: filename.nii'
>>>
"""
updated_msg = msg
for k, v in sorted(kwargs.items()):
if isinstance(v, basestring): # print only str-like arguments
updated_msg += "\n" + k + ": " + v
return updated_msg
|
[
"bcipolli@ucsd.edu"
] |
bcipolli@ucsd.edu
|
4077521437b981d8ff9757c2997464dde7df70b1
|
a2d36e471988e0fae32e9a9d559204ebb065ab7f
|
/huaweicloud-sdk-cbr/huaweicloudsdkcbr/v1/model/create_vault_request.py
|
2a2804e36925be33e00569a4aee8d8ffd6fbc9bc
|
[
"Apache-2.0"
] |
permissive
|
zhouxy666/huaweicloud-sdk-python-v3
|
4d878a90b8e003875fc803a61414788e5e4c2c34
|
cc6f10a53205be4cb111d3ecfef8135ea804fa15
|
refs/heads/master
| 2023-09-02T07:41:12.605394
| 2021-11-12T03:20:11
| 2021-11-12T03:20:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,898
|
py
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class CreateVaultRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'body': 'VaultCreateReq'
}
attribute_map = {
'body': 'body'
}
def __init__(self, body=None):
"""CreateVaultRequest - a model defined in huaweicloud sdk"""
self._body = None
self.discriminator = None
if body is not None:
self.body = body
@property
def body(self):
"""Gets the body of this CreateVaultRequest.
:return: The body of this CreateVaultRequest.
:rtype: VaultCreateReq
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this CreateVaultRequest.
:param body: The body of this CreateVaultRequest.
:type: VaultCreateReq
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreateVaultRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
57eb43417bd20b75f6c126e37b186c8b79cdad4c
|
1e6e3bb707920fdb01ebca23eaf81097c558d918
|
/openslides_backend/action/actions/theme/create.py
|
cfc24dfc000b85bd1c397ed32265f40dd3f2792e
|
[
"MIT"
] |
permissive
|
OpenSlides/openslides-backend
|
cbd24589f82a6f29bde02611610511870bb6abbf
|
d8511f5138db4cc5fe4fa35e2a0200f766bd49c5
|
refs/heads/main
| 2023-08-23T11:54:25.064070
| 2023-08-22T11:15:45
| 2023-08-22T11:15:45
| 231,757,840
| 6
| 22
|
MIT
| 2023-09-14T16:23:41
| 2020-01-04T12:17:38
|
Python
|
UTF-8
|
Python
| false
| false
| 1,802
|
py
|
from typing import Any, Dict
from ....action.mixins.archived_meeting_check_mixin import CheckForArchivedMeetingMixin
from ....models.models import Theme
from ....permissions.management_levels import OrganizationManagementLevel
from ....shared.util import ONE_ORGANIZATION_ID
from ...generics.create import CreateAction
from ...util.default_schema import DefaultSchema
from ...util.register import register_action
THEME_REQ_FIELDS = ["name", "primary_500", "accent_500", "warn_500"]
THEME_OPT_FIELDS = [
"primary_50",
"primary_100",
"primary_200",
"primary_300",
"primary_400",
"primary_600",
"primary_700",
"primary_800",
"primary_900",
"primary_a100",
"primary_a200",
"primary_a400",
"primary_a700",
"accent_50",
"accent_100",
"accent_200",
"accent_300",
"accent_400",
"accent_600",
"accent_700",
"accent_800",
"accent_900",
"accent_a100",
"accent_a200",
"accent_a400",
"accent_a700",
"warn_50",
"warn_100",
"warn_200",
"warn_300",
"warn_400",
"warn_600",
"warn_700",
"warn_800",
"warn_900",
"warn_a100",
"warn_a200",
"warn_a400",
"warn_a700",
"headbar",
"yes",
"no",
"abstain",
]
@register_action("theme.create")
class ThemeCreate(CreateAction, CheckForArchivedMeetingMixin):
"""
Action to create an theme.
"""
model = Theme()
schema = DefaultSchema(Theme()).get_create_schema(
required_properties=THEME_REQ_FIELDS,
optional_properties=THEME_OPT_FIELDS,
)
permission = OrganizationManagementLevel.CAN_MANAGE_ORGANIZATION
def update_instance(self, instance: Dict[str, Any]) -> Dict[str, Any]:
instance["organization_id"] = ONE_ORGANIZATION_ID
return instance
|
[
"noreply@github.com"
] |
OpenSlides.noreply@github.com
|
efc7e0e6f4eeae2a0226ec1569a0cea893878697
|
0a973640f0b02d7f3cf9211fcce33221c3a50c88
|
/.history/src/csrc_reply_20210204142902.py
|
e86cb08dbc1e87888c905e9f2d8605734a986bf1
|
[] |
no_license
|
JiajunChen123/IPO_under_review_crawler
|
5468b9079950fdd11c5e3ce45af2c75ccb30323c
|
031aac915ebe350ec816c05a29b5827fde588567
|
refs/heads/main
| 2023-02-26T08:23:09.622725
| 2021-02-04T10:11:16
| 2021-02-04T10:11:16
| 332,619,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,784
|
py
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : csrc_reply.py
@Time : 2021/02/04 14:14:51
@Author : Jiajun Chen
@Version : 1.0
@Contact : 554001000@qq.com
@License : (C)Copyright 2017-2018, Liugroup-NLPR-CASIA
'''
# 证监会发行监管部首次公开发行反馈意见爬虫
from bs4 import BeautifulSoup
from urllib.parse import urljoin
import re
import requests
import time
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36'}
baseUrl = 'http://www.csrc.gov.cn/pub/newsite/fxjgb/scgkfxfkyj/'
def download_page(nexturl):
r = requests.get(nexturl,headers= headers)
r.encoding = 'utf-8'
soup = BeautifulSoup(r.text,'html.parser')
projList = soup.find(id='myul').findAll('a')
for proj in projList:
href= proj['href']
docUrl = baseUrl + href
title = proj.text
print('fetching: ',title)
pageInfo = requests.get(docUrl,headers=headers)
pageInfo.encoding='utf-8'
docLink = re.findall(r'file_appendix=\'<a href=\"(.*)\">',pageInfo.text)[0]
doc = requests.get(urljoin(docUrl,docLink),headers=headers,stream=True)
with open('C:/Users/chen/Desktop/IPO_info/data/证监会文件/{}.docx'.format(title),'wb') as f:
f.write(doc.content)
time.sleep(2)
def get_all():
r = requests.get(baseUrl,headers= headers)
r.encoding = 'utf-8'
soup = BeautifulSoup(r.text,'html.parser')
numPage = re.findall(r'var countPage = (.*)//',soup.text)[0]
numPage = int(numPage)
download_page(baseUrl)
for i in range(1,numPage):
nextUrl = baseUrl + 'index_{}.html'.format(i)
download_page(nextUrl)
def check_update():
|
[
"chenjiajun.jason@outlook.com"
] |
chenjiajun.jason@outlook.com
|
d23facb8aa301aa21a5529a832694e258dc33e2e
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_138/ch82_2020_04_11_22_11_44_249634.py
|
5db60cc74f33a51436b8f74d6967226febdf2be1
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 211
|
py
|
def primeiras_ocorrencias(string):
dicionario={}
i=0
for a in string:
if a not in dicionario:
dicionario[a]=i
i+=1
else:
i+=1
return dicionario
|
[
"you@example.com"
] |
you@example.com
|
2e7c9b3525bfa87a6aabdb81158e2cb030a87cbb
|
2f9dd97f4b6f8bf164d8550b46bfe6313dc84c6c
|
/src/pmr2/bives/view.py
|
dd8eb581ac9f986ad1333cf04a74064e6054d2fd
|
[] |
no_license
|
PMR2/pmr2.bives
|
89ec960283883e218c9b35455a84ac1ad46e57c0
|
d9273df7a8eb97d707ca14eeab6def3a5d01df3f
|
refs/heads/master
| 2021-05-15T02:27:09.348400
| 2020-01-17T01:53:31
| 2020-01-17T01:54:37
| 34,535,404
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,156
|
py
|
import json
import requests
import zope.component
from zope.browserpage.viewpagetemplatefile import ViewPageTemplateFile
from plone.registry.interfaces import IRegistry
from pmr2.z3cform.page import SimplePage
from pmr2.app.workspace.browser.browser import FilePage
from .interfaces import ISettings
registry_prefix = 'pmr2.bives.settings'
def call_bives(files, commands, session=None):
if session is None:
session = requests.Session()
data = {
'files': files,
'commands': commands,
}
registry = zope.component.getUtility(IRegistry)
try:
settings = registry.forInterface(ISettings, prefix=registry_prefix)
except KeyError:
view.status = (u'Could not load settings for pmr2.bives. Please '
'check the installation status for this add-on.')
return
try:
r = session.post(settings.bives_endpoint, data=json.dumps(data))
results = r.json()
# It can be successfully decode so it should be safe(TM)
results = r.text
except ValueError:
results = '{"error": "Server returned unexpected results"}'
except requests.exceptions.ConnectionError:
results = '{"error": "Error connecting to BiVeS server."}'
except requests.exceptions.RequestException:
results = '{"error": "Unexpected exception when handling BiVeS."}'
return results
def apply_bives_view(view, files, commands, attributes):
results = call_bives(files, commands, view.session)
view.diff_view = view.diff_viewer(view.context, view.request)
view.diff_view.results = results
for k, v in attributes.items():
setattr(view.diff_view, k, v)
class BiVeSExposurePickFilePage(SimplePage):
template = ViewPageTemplateFile('bives_exposure_pick_file.pt')
label = ''
def physical_path(self):
return '/'.join(self.context.getPhysicalPath())
class BiVeSWorkspacePickFilePage(FilePage):
template = ViewPageTemplateFile('bives_workspace_pick_file.pt')
label = ''
def physical_path(self):
return '/'.join(self.context.getPhysicalPath())
class BiVeSDiffViewer(SimplePage):
template = ViewPageTemplateFile('bives_simple_diff.pt')
label = u'BiVeS Model Diff Viewer'
results = None
raw_source = None
raw_target = None
class BiVeSSingleViewer(SimplePage):
template = ViewPageTemplateFile('bives_single.pt')
label = u'BiVeS Model Viewer'
results = None
raw_source = None
class BiVeSBaseView(SimplePage):
label = u'BiVeS Viewer'
commands = ['singleCompHierarchyJson',]
diff_viewer = BiVeSSingleViewer
diff_view = None
def extract_file(self):
# return the file
return self.context.absolute_url()
def update(self):
self.request['disable_border'] = 1
super(BiVeSBaseView, self).update()
# post the data to BiVeS
files = (self.extract_file(),)
commands = self.commands
apply_bives_view(self, files, commands, {})
def render(self):
if not self.diff_view:
return super(BiVeSBaseView, self).render()
return self.diff_view()
|
[
"tommy.yu@auckland.ac.nz"
] |
tommy.yu@auckland.ac.nz
|
953907553899b6a1d9fd87afa1c6f70dd6cc6f31
|
589b5eedb71d83c15d44fedf60c8075542324370
|
/stock/stcok_pool.py
|
e618017842cb50e51927821649c22bb60a2cee4e
|
[] |
no_license
|
rlcjj/quant
|
4c2be8a8686679ceb675660cb37fad554230e0d4
|
c07e8f0f6e1580ae29c78c1998a53774a15a67e1
|
refs/heads/master
| 2020-03-31T07:15:48.111511
| 2018-08-27T05:29:00
| 2018-08-27T05:29:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,094
|
py
|
import pandas as pd
from quant.param.param import Parameter
from datetime import datetime
from WindPy import w
w.start()
class StockPool(object):
"""
下载和得到当前所有的股票池
load_all_stock_code_now
get_all_stock_code_now
"""
def __init__(self):
self.name = "All_Stock_Code"
self.load_out_file = Parameter().get_load_out_file(self.name)
self.read_file = Parameter().get_read_file(self.name)
def load_all_stock_code_now(self, source="wind_terminal"):
if source == "wind_terminal":
today = datetime.today().strftime('%Y-%m-%d')
data = w.wset("sectorconstituent", "date=" + today + ";sectorid=a001010100000000")
data = pd.DataFrame(data.Data, index=data.Fields, columns=data.Codes).T
now_wind_list = list(data['wind_code'].values)
data = w.wset("sectorconstituent", "date=" + today + ";sectorid=a001010m00000000")
data = pd.DataFrame(data.Data, index=data.Fields, columns=data.Codes).T
delist_list = list(data['wind_code'].values)
now_list = self.get_all_stock_code_now()
update_list = list(set(now_list) | set(now_wind_list) | set(delist_list))
update_list.sort()
update_code = pd.DataFrame(update_list, columns=['code'])
update_code.to_csv(self.load_out_file)
print("################# Loading All Stock Code ############################################")
def get_all_stock_code_now(self, source="wind_terminal"):
if source == "wind_terminal":
code = pd.read_csv(self.read_file, encoding='gbk', index_col=[0])
now_list = list(code['code'].values)
else:
now_list = []
return now_list
if __name__ == '__main__':
# StockPool
################################################################################
StockPool().load_all_stock_code_now()
print(StockPool().get_all_stock_code_now())
################################################################################
|
[
"1119332482@qq.com"
] |
1119332482@qq.com
|
fd35c54e1fa82f624c63b4fa96fc49e7a4b26b09
|
9f112cd0aeb1447dee06ded576d99b61701cbdc3
|
/ec-backend/src/ad/urls.py
|
a8546f87a5cd8ac17f23da1a35b3ed6db41309d3
|
[] |
no_license
|
ForeverDreamer/embarrassment-cyclopedia
|
44e13fbd7210ebc634d0fbab321c0f4598072ff3
|
f69bc88a6a8e734cbb3d37ab173f557708653789
|
refs/heads/master
| 2023-01-10T17:20:44.181077
| 2020-07-15T03:20:27
| 2020-07-15T03:20:27
| 206,903,622
| 0
| 0
| null | 2023-01-07T09:30:23
| 2019-09-07T02:22:09
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 251
|
py
|
from django.urls import path
from .views import (
AdInfoListView,
AdInfoDetailView,
)
urlpatterns = [
path('', AdInfoListView.as_view(), name='adinfo-list'),
path('<str:pk>/', AdInfoDetailView.as_view(), name='adinfo-detail'),
]
|
[
"499361328@qq.com"
] |
499361328@qq.com
|
9dfeb0a328d15d9455aa51f046ccdb764f5f44c2
|
c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c
|
/cases/pa3/sample/object_attr_set_eval_order-228.py
|
a511e83aab6b4ea3a1fa28424a3b776260bdc205
|
[] |
no_license
|
Virtlink/ccbench-chocopy
|
c3f7f6af6349aff6503196f727ef89f210a1eac8
|
c7efae43bf32696ee2b2ee781bdfe4f7730dec3f
|
refs/heads/main
| 2023-04-07T15:07:12.464038
| 2022-02-03T15:42:39
| 2022-02-03T15:42:39
| 451,969,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 463
|
py
|
class A(object):
a:int = 42
class B(A):
b:bool = True
def __init__(self:"B"):
print("B")
a:A = None
b:B = None
def get_b() -> B:
print("Getting B")
return b
def get_one() -> int:
print("Getting 1")
return 1
def get_false() -> bool:
print("Getting False")
return False
a = b = B()
get_b().a = get_one()
print("Assigned B.a")
get_b().b = get_false()
print("Assigned B.b")
print(a.a)
print($Parameters)
print(b.b)
|
[
"647530+Virtlink@users.noreply.github.com"
] |
647530+Virtlink@users.noreply.github.com
|
70b6f7ab540eaa8dcf83f872958f371a9adeaaac
|
59da45955862686374e438b5367978856df7c284
|
/component_services/metadata_searcher_service/metadata_search_service.py
|
85492def977c190d6dd8568d7baf1a6f47ed24ee
|
[] |
no_license
|
nguyentran0212/IoTSE-prototypes
|
04531fb3a8d14d1eaa5eba712c773bd7531fd04d
|
3c9dde27cf1818fbf74508520eec3f35778f60f5
|
refs/heads/master
| 2021-09-20T10:48:47.330873
| 2018-08-08T08:20:46
| 2018-08-08T08:20:46
| 116,230,859
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,223
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 3 13:22:31 2018
@author: nguyentran
"""
from ISEPLib.abstract_services import AbsSearcherService
import ISEPLib.entity as entity
import redis
import pickle
from pymongo import MongoClient
from pprint import pprint
class SearcherService(AbsSearcherService):
def __init__(self, redis_host = "localhost", redis_port = 6379, redis_db = 0, mongo_host = "localhost", mongo_port = 27017, query_type = "metadata_query", *args, **kwargs):
super().__init__(*args, **kwargs)
self.query_type = query_type
# Create client to redis database
self.redis_client = redis.StrictRedis(host = redis_host, port = int(redis_port), db = redis_db)
self.redis_query_id_key = "metadata_searcher:query_id:"
# Create client to mongo database
self.mongo_client = MongoClient(host = mongo_host, port = int(mongo_port))
self.mongo_db = self.mongo_client.sensor_metadata_db
self.mongo_col = self.mongo_db.sensor_metadata_collection
def _query(self, query, wf_id = ""):
"""
This function search the database to resolve the given query
and put search result into a storage space, and return the identifier
of that list of search results to the client
"""
# Extract the relevant query from the input query message
query_id = query.query_ID
query_content = query.query_content.get(self.query_type, {})
pprint(query_content)
# Perform the search to find sensors
result_curs = self.mongo_col.find(query_content)
mongo_count = self.mongo_col.find(query_content).count()
self.redis_client.set("DEBUG_metadata_mongo_count:" + query_id, mongo_count)
result_set = entity.ResultSet(query_ID = query_id, query_instance = query)
self.redis_client.set("DEBUG_metadata_init_result_count:" + query_id, len(result_set.results))
ite_num = 0
for result in result_curs:
score = {"score" : 100}
iot_content = entity.IoTContent(iot_content_dict=result)
result_set.add_IoTContent_score(iot_content.to_dict(), score)
ite_num += 1
self.redis_client.set("DEBUG_metadata_ite_count:" + query_id, ite_num)
self.redis_client.set("DEBUG_metadata_result_count:" + query_id, len(result_set.results))
# Store result set in Redis and return a key for client to retrieve results
p_result_set = pickle.dumps(result_set)
self.redis_client.set(self.redis_query_id_key + query_id, p_result_set)
return query_id
def _getResult(self, query_id, wf_id = ""):
"""
This function returns the set of results generated from a previous query
"""
# with open("test_cookie.txt", "a") as f:
# f.write("From %s: %s\n" % (self, wf_id))
p_result_set = self.redis_client.get(self.redis_query_id_key + query_id)
result_set = pickle.loads(p_result_set)
# Following lines are only for supporting the mockup data
result_set.query_ID = query_id
result_set.query_instance["query_ID"] = query_id
return result_set
|
[
"nguyen.tran@adelaide.edu.au"
] |
nguyen.tran@adelaide.edu.au
|
2546e39b763ccbbb1261c64dc52b2e08d7bbb98a
|
2d34a6033884b22e41588c82ebe657546aef07c7
|
/project/blog/models.py
|
bd36137f7fcbaa6e71aa0e0c0466252f28047d4d
|
[] |
no_license
|
alifanov/jbm_boilerplate
|
73c6c5f394f20a851e860328847d2ac45a174d6f
|
7012885dbf1455c2ca2abf849bd36685c4a58286
|
refs/heads/master
| 2020-04-03T16:18:35.760013
| 2018-11-21T09:48:11
| 2018-11-21T09:48:11
| 155,399,665
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,531
|
py
|
from django.db import models
from django.dispatch import receiver
from channels.layers import get_channel_layer
from asgiref.sync import async_to_sync
class TimeItem(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
class Tag(models.Model):
name = models.CharField(max_length=30)
class Meta:
ordering = ["name"]
class Post(TimeItem):
title = models.CharField(max_length=255)
text = models.TextField()
tags = models.ManyToManyField(Tag, blank=True)
class Meta:
ordering = ["-updated_at"]
def send_msg_to_ws(text):
layer = get_channel_layer()
async_to_sync(layer.group_send)(
"notifications", {"type": "receive.json", "content": {"msg": text}}
)
@receiver(models.signals.post_save, sender=Post, weak=False)
def post_created_handler(sender, instance, created, *args, **kwargs):
if created:
send_msg_to_ws("New post created")
@receiver(models.signals.post_delete, sender=Post, weak=False)
def post_removed_handler(sender, instance, *args, **kwargs):
send_msg_to_ws("Post deleted")
@receiver(models.signals.post_save, sender=Tag, weak=False)
def tag_created_handler(sender, instance, created, *args, **kwargs):
if created:
send_msg_to_ws("New tag created")
@receiver(models.signals.post_delete, sender=Tag, weak=False)
def tag_removed_handler(sender, instance, *args, **kwargs):
send_msg_to_ws("Tag deleted")
|
[
"lifanov.a.v@gmail.com"
] |
lifanov.a.v@gmail.com
|
b727cabcd9bd3806c071a7cd260842269b8aba0a
|
fa7e75212e9f536eed7a78237a5fa9a4021a206b
|
/MASIR/python/masir/utils/webprivacy_gather.py
|
1cc02f6f1aa6fb7dd5e9c09160e708d3db64575e
|
[] |
no_license
|
kod3r/SMQTK
|
3d40730c956220a3d9bb02aef65edc8493bbf527
|
c128e8ca38c679ee37901551f4cc021cc43d00e6
|
refs/heads/master
| 2020-12-03T09:12:41.163643
| 2015-10-19T14:56:55
| 2015-10-19T14:56:55
| 44,916,678
| 1
| 0
| null | 2015-10-25T15:47:35
| 2015-10-25T15:47:35
| null |
UTF-8
|
Python
| false
| false
| 2,728
|
py
|
# coding=utf-8
import bson
import datetime
import logging
import os
import urllib2
from masir.utils.SimpleBoundingBox import SimpleBoundingBox
def wp_gather_image_and_info(output_dir, db_collection, region_bbox):
"""
Gather the imagery and metadata (as BSON) from the webprivacy database to an
output directory given date and region constraints.
:param output_dir: Directory to write files
:type output_dir: str
:param db_collection: pymongo database collection object
:type db_collection: pymongo.collection.Collection
:param region_bbox: Geographic region constraint.
:type region_bbox: SimpleBoundingBox
"""
log = logging.getLogger('gather_tweet_image_and_info')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
log.info("Performing Query")
after = datetime.datetime(2014, 2, 1)
q = db_collection.find({
"$and": [
{"media": {"$gt": {"$size": 0}}},
{"media.0.type": "photo"},
{"created": {"$gte": after}},
{"geo.0": {"$gte": region_bbox.min_x, "$lte": region_bbox.max_x}},
{"geo.1": {"$gte": region_bbox.min_y, "$lte": region_bbox.max_y}},
]
}, timeout=False)
log.info("Scanning results")
count = 0
try:
for i, e in enumerate(q):
log.info("[%9d] %s -> %s %s", i, e['_id'], e['geo'],
e['media'][0]['media_url'])
media_path = e['media'][0]['media_url']
media_filetype = os.path.splitext(media_path)[1]
output_image = os.path.join(output_dir, str(e['_id']) + media_filetype)
output_bson = os.path.join(output_dir, str(e['_id']) + ".bson")
try:
with open(output_image, 'wb') as output_image_file:
output_image_file.write(urllib2.urlopen(media_path).read())
with open(output_bson, 'wb') as output_md_file:
output_md_file.write(bson.BSON.encode(e))
count += 1
except Exception, ex:
# Skip all files that cause errors anywhere. Remove anything
# written
log.info(" - ERROR: %s", str(ex))
log.info(" - Skipping")
if os.path.exists(output_image):
os.remove(output_image)
if os.path.exists(output_bson):
os.remove(output_bson)
finally:
# Since we checked out a cursor without a server-side timeout, make sure
# that we catch whatever and close the cursor when we're done / if
# things fail.
q.close()
log.info("Discovered %d matching entries with valid images.", count)
|
[
"paul.tunison@kitware.com"
] |
paul.tunison@kitware.com
|
e1dee49a14067807a2a659197ce76a63a98252dd
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/40/usersdata/68/19112/submittedfiles/funcoes.py
|
ba6a46880a5b2a250478615ce64169b4cf2b8ded
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,018
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
#Fatorial
def fatorial (m):
m_fat=1
for i in range (2,m+1,1):
m_fat=m_fat * i
return m_fat
#Pi
def calculaPi (m):
soma_pi=0
j=2
for i in range (0,m,1):
if i%2==0:
soma_pi=soma_pi+(4/(j*(j+1)*(j+2)))
else:
soma_pi=soma_pi-(4/(j*(j+1)*(j+2)))
j=j+2
pi=3+soma_pi
return pi
#Cosseno
def calculaCosseno (e):
soma_cosseno=0
i=1
j=2
a=(e**j)/fatorial(j) #TINHA UM ERRO AQUI <-------------------- ERRO
print ("primeiro a: %.4f" %a)
while a>e:
a=(e**j)/fatorial(j) # <-------------- TEM Q CALCULAR UM NOVO VALOR DE a A CADA REPETIÇÃO
if i%2!=0:
soma_cosseno=soma_cosseno-a
else:
soma_cosseno=soma_cosseno+a
j=j+2
i=i+1
print soma_cosseno
cosseno=1-soma_cosseno
return cosseno
#RazaoAurea
def calculaRazaoAurea (cosseno):
razaoAurea= 2*cosseno
return razaoAurea
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
d2807766442c8de2ee23a2b0a79a5f75b3617f38
|
c98b383eeef0e7c12504270fe92071569fec9736
|
/testMe/testone/tests.py
|
24714ddf88517e0cfc1a201200d9e36cd5ef85fd
|
[
"MIT"
] |
permissive
|
MyHiHi/test
|
088048117346b81f7c51fadc00cce4d3f21496ac
|
0cedfbbfad53021569faef933b9fdff20e897617
|
refs/heads/master
| 2020-03-09T02:10:03.949154
| 2019-06-12T09:44:42
| 2019-06-12T09:44:42
| 128,533,935
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,163
|
py
|
from django.test import TestCase
import requests
def get_json(url):
headers = {
"Host": "www.lagou.com",
"Connection": "keep-alive",
"Origin": "https://www.lagou.com",
"X-Anit-Forge-Code": "0",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"Accept": "application/json, text/javascript, */*; q=0.01",
"X-Requested-With": "XMLHttpRequest",
"X-Anit-Forge-Token": None,
"Referer": "https://www.lagou.com/jobs/list_%E6%95%B0%E6%8D%AE%E5%88%86%E6%9E%90?labelWords=&fromSearch=true&suginput=",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "zh-CN,zh;q=0.9",
}
params={
"needAddtionalResult": "false"
}
data={
"first": "true",
"pn": "1",
"kd": "数据分析"
}
res=requests.post(url,headers=headers,data=data)
res.encoding='utf-8'
return res.json()
url='https://www.lagou.com/jobs/positionAjax.json?needAddtionalResult=false'
data=get_json(url)
print(data)
|
[
"909535692@qq.com"
] |
909535692@qq.com
|
6d5d099a2907453a5c13a6219c0d42b982c456a3
|
480e33f95eec2e471c563d4c0661784c92396368
|
/CondTools/Ecal/python/copyTrivial_orcoff_LaserOnly.py
|
f1755f2e56183685dce9bf8a0127949e8bcbb272
|
[
"Apache-2.0"
] |
permissive
|
cms-nanoAOD/cmssw
|
4d836e5b76ae5075c232de5e062d286e2026e8bd
|
4eccb8a758b605875003124dd55ea58552b86af1
|
refs/heads/master-cmsswmaster
| 2021-01-23T21:19:52.295420
| 2020-08-27T08:01:20
| 2020-08-27T08:01:20
| 102,867,729
| 7
| 14
|
Apache-2.0
| 2022-05-23T07:58:09
| 2017-09-08T14:03:57
|
C++
|
UTF-8
|
Python
| false
| false
| 2,360
|
py
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("TEST")
process.load("CalibCalorimetry.EcalTrivialCondModules.EcalTrivialCondRetriever_cfi")
process.EcalTrivialConditionRetriever.laserAPDPNTime1 = cms.untracked.string('0')
process.EcalTrivialConditionRetriever.laserAPDPNTime2 = cms.untracked.string('1')
process.EcalTrivialConditionRetriever.laserAPDPNTime3 = cms.untracked.string('2')
process.load("CondCore.DBCommon.CondDBCommon_cfi")
#process.CondDBCommon.connect = 'oracle://cms_orcoff_prep/CMS_COND_ECAL'
process.CondDBCommon.DBParameters.authenticationPath = '/afs/cern.ch/cms/DB/conddb/'
process.CondDBCommon.connect = 'sqlite_file:DB.db'
process.MessageLogger = cms.Service("MessageLogger",
debugModules = cms.untracked.vstring('*'),
destinations = cms.untracked.vstring('cout')
)
process.source = cms.Source("EmptyIOVSource",
firstValue = cms.uint64(1),
lastValue = cms.uint64(1),
timetype = cms.string('runnumber'),
interval = cms.uint64(1)
)
# timetype = cms.untracked.string('timestamp'),
process.PoolDBOutputService = cms.Service("PoolDBOutputService",
process.CondDBCommon,
timetype = cms.untracked.string('timestamp'),
toPut = cms.VPSet(
cms.PSet(
record = cms.string('EcalLaserAlphasRcd'),
tag = cms.string('EcalLaserAlphas_mc')
),
cms.PSet(
record = cms.string('EcalLaserAPDPNRatiosRcd'),
tag = cms.string('EcalLaserAPDPNRatios_mc')
),
cms.PSet(
record = cms.string('EcalLaserAPDPNRatiosRefRcd'),
tag = cms.string('EcalLaserAPDPNRatiosRef_mc')
))
)
# timetype = cms.string('timestamp'),
process.dbCopy = cms.EDAnalyzer("EcalDBCopy",
timetype = cms.string('timestamp'),
toCopy = cms.VPSet(
cms.PSet(
record = cms.string('EcalLaserAlphasRcd'),
container = cms.string('EcalLaserAlphas')
),
cms.PSet(
record = cms.string('EcalLaserAPDPNRatiosRcd'),
container = cms.string('EcalLaserAPDPNRatios')
),
cms.PSet(
record = cms.string('EcalLaserAPDPNRatiosRefRcd'),
container = cms.string('EcalLaserAPDPNRatiosRef')
))
)
process.prod = cms.EDAnalyzer("EcalTrivialObjectAnalyzer")
process.p = cms.Path(process.prod*process.dbCopy)
|
[
"giulio.eulisse@gmail.com"
] |
giulio.eulisse@gmail.com
|
3c2e2d353cbbce71f6839c272c8aec8f4b16215a
|
76e931912629c37beedf7c9b112b53e7de5babd7
|
/3-mouth04/day03/mysite3/bookstore/migrations/0002_auto_20210110_1743.py
|
6150a4cd907207df18066c991e6c09691c209a1c
|
[
"Apache-2.0"
] |
permissive
|
gary-gggggg/gary
|
c59ac21d8e065f296ff986d11a0e4cbf186a1bc4
|
d8ba30ea4bc2b662a2d6a87d247f813e5680d63e
|
refs/heads/main
| 2023-02-23T06:54:34.500683
| 2021-02-01T10:17:02
| 2021-02-01T10:17:02
| 334,905,744
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 623
|
py
|
# Generated by Django 2.2.12 on 2021-01-10 09:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bookstore', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='book',
name='market_price',
field=models.DecimalField(decimal_places=2, default=0.0, max_digits=7, verbose_name='零售价'),
),
migrations.AddField(
model_name='book',
name='pub',
field=models.CharField(default='', max_length=50, verbose_name='出版社'),
),
]
|
[
"673248932@qq.com"
] |
673248932@qq.com
|
d348aca8174320a1d59058b5bb76e9b425f38c2d
|
eed93ecbb01acb180085bed4960a4b2fd4a2322d
|
/wishbook/models.py
|
372decb65cb08595289fd1a8658a2716acd51846
|
[] |
no_license
|
srakrn/happyearthday
|
beb8a52cdc73a19b48143e43154feaacf4ac174a
|
0da2fd4ac31db1a7df13f12c78c65db641d13794
|
refs/heads/master
| 2022-05-17T19:33:00.219871
| 2021-06-10T10:24:22
| 2021-06-10T10:24:22
| 254,112,489
| 0
| 1
| null | 2022-04-22T23:22:12
| 2020-04-08T14:33:55
|
Python
|
UTF-8
|
Python
| false
| false
| 560
|
py
|
from django import forms
from django.db import models
# Create your models here.
class Wish(models.Model):
wish_text = models.CharField(verbose_name="ข้อความอวยพร", max_length=240)
wish_owner = models.CharField(verbose_name="ชื่อผู้อวยพร", max_length=50)
shown = models.BooleanField(default=True)
created = models.DateTimeField(auto_now_add=True)
def __str__(self):
return "{} from {}".format(
self.wish_text[: min(len(self.wish_text), 10)], self.wish_owner
)
|
[
"tanersirakorn@gmail.com"
] |
tanersirakorn@gmail.com
|
825e7a1384caaa635209d8fbde10c2ebc6fb3172
|
12485bb945ab8af6ff6a5f3d9d4c542a7bcf95f8
|
/server/src/uds/core/util/decorators.py
|
1996f00746c892461214b269ffa03dfe7a9366ad
|
[] |
no_license
|
morfeuj/openuds
|
6ef0c4bed624def0090efa6abdd2600b9be81a8b
|
26e429019e5fe5b01ee1a476c879d8f8333b0ab0
|
refs/heads/master
| 2020-12-15T15:11:33.598430
| 2020-01-20T16:42:33
| 2020-01-20T16:42:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,264
|
py
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2019 Virtual Cable S.L.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of Virtual Cable S.L. nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
@author: Adolfo Gómez, dkmaster at dkmon dot com
"""
from functools import wraps
import logging
import inspect
import typing
from uds.core.util.html import checkBrowser
from uds.web.util import errors
logger = logging.getLogger(__name__)
RT = typing.TypeVar('RT')
# Decorator that protects pages that needs at least a browser version
# Default is to deny IE < 9
def denyBrowsers(
browsers: typing.Optional[typing.List[str]] = None,
errorResponse: typing.Callable = lambda request: errors.errorView(request, errors.BROWSER_NOT_SUPPORTED)
) -> typing.Callable[[typing.Callable[..., RT]], typing.Callable[..., RT]]:
"""
Decorator to set protection to access page
Look for samples at uds.core.web.views
"""
denied: typing.List[str] = browsers or ['ie<9']
def wrap(view_func: typing.Callable[..., RT]) -> typing.Callable[..., RT]:
@wraps(view_func)
def _wrapped_view(request, *args, **kwargs) -> RT:
"""
Wrapped function for decorator
"""
for b in denied:
if checkBrowser(request, b):
return errorResponse(request)
return view_func(request, *args, **kwargs)
return _wrapped_view
return wrap
def deprecated(func: typing.Callable[..., RT]) -> typing.Callable[..., RT]:
"""This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used."""
@wraps(func)
def new_func(*args, **kwargs) -> RT:
try:
caller = inspect.stack()[1]
logger.warning('Call to deprecated function %s from %s:%s.', func.__name__, caller[1], caller[2])
except Exception:
logger.info('No stack info on deprecated function call %s', func.__name__)
return func(*args, **kwargs)
return new_func
# Decorator that allows us a "fast&clean" caching system on service providers
#
# Decorator for caching
# Decorator that tries to get from cache before executing
def allowCache(
cachePrefix: str,
cacheTimeout: int,
cachingArgs: typing.Optional[typing.Union[typing.List[int], typing.Tuple[int], int]] = None,
cachingKWArgs: typing.Optional[typing.Union[typing.List[str], typing.Tuple[str], str]] = None,
cachingKeyFnc: typing.Optional[typing.Callable[[typing.Any], str]] = None
) -> typing.Callable[[typing.Callable[..., RT]], typing.Callable[..., RT]]:
"""Decorator that give us a "quick& clean" caching feature on service providers.
Note: This decorator is intended ONLY for service providers
:param cachePrefix: the cache key "prefix" (prepended on generated key from args)
:param cacheTimeout: The cache timeout in seconds
:param cachingArgs: The caching args. Can be a single integer or a list.
First arg (self) is 0, so normally cachingArgs are 1, or [1,2,..]
"""
keyFnc = cachingKeyFnc or (lambda x: '')
def allowCacheDecorator(fnc: typing.Callable[..., RT]) -> typing.Callable[..., RT]:
@wraps(fnc)
def wrapper(*args, **kwargs) -> RT:
argList: typing.List[str] = []
if cachingArgs:
ar = [cachingArgs] if not isinstance(cachingArgs, (list, tuple)) else cachingArgs
argList = [args[i] if i < len(args) else '' for i in ar]
if cachingKWArgs:
kw = [cachingKWArgs] if not isinstance(cachingKWArgs, (list, tuple)) else cachingKWArgs
argList += [str(kwargs.get(i, '')) for i in kw]
if argList:
cacheKey = '{}-{}.{}'.format(cachePrefix, keyFnc(args[0]), argList)
else:
cacheKey = '{}-{}.gen'.format(cachePrefix, keyFnc(args[0]))
data: typing.Any = None
if kwargs.get('force', False) is False and args[0].cache:
data = args[0].cache.get(cacheKey)
if 'force' in kwargs:
# Remove force key
del kwargs['force']
if data is None and args[0].cache: # Not in cache and object can cache it
data = fnc(*args, **kwargs)
try:
# Maybe returned data is not serializable. In that case, cache will fail but no harm is done with this
args[0].cache.put(cacheKey, data, cacheTimeout)
except Exception as e:
logger.debug('Data for %s is not serializable on call to %s, not cached. %s (%s)', cacheKey, fnc.__name__, data, e)
return data
return wrapper
return allowCacheDecorator
|
[
"dkmaster@dkmon.com"
] |
dkmaster@dkmon.com
|
98ae6996aa0b587767198da68169589e426738f7
|
0be27c0a583d3a8edd5d136c091e74a3df51b526
|
/reverse_each_word_except_first_and_last.py
|
27fcc10079794e2ac868b8e8a9287aad8791a631
|
[] |
no_license
|
ssangitha/guvicode
|
3d38942f5d5e27a7978e070e14be07a5269b01fe
|
ea960fb056cfe577eec81e83841929e41a31f72e
|
refs/heads/master
| 2020-04-15T05:01:00.226391
| 2019-09-06T10:08:23
| 2019-09-06T10:08:23
| 164,405,935
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 171
|
py
|
s=input()
l=list(s.split(" "))
a=[l[0]]
for i in range(1,len(l)):
if i==len(l)-1:
a.append(l[-1])
else:
c=l[i]
b=c[::-1]
a.append(b)
print(" ".join(a))
#reverse
|
[
"noreply@github.com"
] |
ssangitha.noreply@github.com
|
d16a62c88c77fdb893b4269bd3894ff2bf460ba0
|
02255565aff9ea18a4d566955cc53ca06090efa4
|
/python_django.py
|
cf4625e4823034de98520ac80998d9c35978e234
|
[] |
no_license
|
BrainiacRawkib/Practical-Python-for-Begineers
|
20a8a3697812bed78646c6af54a6dc195694109a
|
cb29ea1a38339fcf2fac005feb92b5a72ae98387
|
refs/heads/master
| 2020-12-01T09:10:06.802758
| 2019-12-28T15:27:40
| 2019-12-28T15:27:40
| 230,598,655
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 523
|
py
|
from django.db import models
class Student(models.Model):
FRESHMAN = 'FR'
SOPHOMORE = 'SO'
JUNIOR = 'JR'
SENIOR = 'SR'
YEAR_IN_SCHOOL_CHOICES = (
(FRESHMAN, 'Freshman'),
(SOPHOMORE, 'Sophomore'),
(JUNIOR, 'Junior'),
(SENIOR, 'Senior'),
)
year_in_school = models.CharField(
max_length=2,
choices=YEAR_IN_SCHOOL_CHOICES,
default=FRESHMAN,
)
def is_upperclass(self):
return self.year_in_school in (self.JUNIOR, self.SENIOR)
|
[
"brainiacrawkib@gmail.com"
] |
brainiacrawkib@gmail.com
|
abe8566f63107cd5f40e1645166a83765b1cfed1
|
f6b1db8c0503a292f6a1da31800269e0bb5f39bd
|
/web_flask/5-number_template.py
|
431a96556f0e1ae94db91f4d1e262f0713d039f8
|
[] |
no_license
|
arleybri18/AirBnB_clone_v2
|
142883fde2629c7eb75dddc8e4375a9ca1714555
|
111cabf15cadba09f018b2fe359eec68495035dc
|
refs/heads/master
| 2020-07-07T03:44:31.456739
| 2019-09-09T15:16:55
| 2019-09-09T15:16:55
| 203,235,771
| 0
| 0
| null | 2019-08-19T19:21:54
| 2019-08-19T19:21:54
| null |
UTF-8
|
Python
| false
| false
| 1,069
|
py
|
#!/usr/bin/python3
""" Import flask class """
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def hello():
""" Function to handle request """
return 'Hello HBNB!'
@app.route('/hbnb')
def hello_hbnb():
""" Function to handle request to path /hbnb """
return 'HBNB'
@app.route('/c/<text>')
def c_route(text):
""" Function to handle request with a variable """
return 'C %s' % text.replace('_', ' ')
@app.route('/python/')
@app.route('/python/<text>')
def python(text='is cool'):
""" Function to handle request with a variable and data default """
return 'Python %s' % text.replace('_', ' ')
@app.route('/number/<int:num>')
def numbers(num):
""" Function to handle request with a variable with specified type """
return '%d is a number' % num
@app.route('/number_template/<int:num>')
def numbers_temp(num):
""" Function to handle request and render template"""
return render_template('5-number.html', number=num)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000)
|
[
"arleybri18@gmail.com"
] |
arleybri18@gmail.com
|
178f94e8e5286eb5012579f4c8562e0b113ac483
|
5a1cb546cb5132cc98e8a08acc6233540bc2dd34
|
/forms.py
|
2ea5dd93c573fdf1bc0e5fea25e98bbc4f5492d8
|
[] |
no_license
|
RitikaSingh02/ChitChat
|
75cf19e00ce6c12a35cc081e55c4e0b378ee3347
|
9575daf0e61ba2f20797dfadf6ba246470dafbe0
|
refs/heads/master
| 2023-05-23T21:21:25.138134
| 2021-06-20T19:00:43
| 2021-06-20T19:00:43
| 378,376,078
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,741
|
py
|
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField
from wtforms.validators import InputRequired, Length, EqualTo, ValidationError
from models import User
def invalid_credentials(form, field):
""" Username and password checker """
password = field.data
username = form.username.data
print(username, password)
# Check username is invalid
user_data = User.query.filter_by(username=username).first()
if user_data is None:
raise ValidationError("Username or password is incorrect")
# Check password in invalid
class RegistrationForm(FlaskForm):
""" Registration form"""
username = StringField('username', validators=[InputRequired(message="Username required"), Length(
min=4, max=25, message="Username must be between 4 and 25 characters")])
password = PasswordField('password', validators=[InputRequired(message="Password required"), Length(
min=4, max=25, message="Password must be between 4 and 25 characters")])
confirm_pswd = PasswordField('confirm_pswd', validators=[InputRequired(
message="Password required"), EqualTo('password', message="Passwords must match")])
def validate_username(self, username):
user_object = User.query.filter_by(username=username.data).first()
if user_object:
raise ValidationError(
"Username already exists. Select a different username.")
class LoginForm(FlaskForm):
""" Login form """
username = StringField('username', validators=[
InputRequired(message="Username required")])
password = PasswordField('password', validators=[InputRequired(
message="Password required"), invalid_credentials])
|
[
"ritika2002singh@gmail.com"
] |
ritika2002singh@gmail.com
|
7ddbd53246a76a75ab59a084952c7bcf676d9df6
|
769843f3cb57b9a0e1c68d02637b881bd43a103b
|
/nanodet/model/arch/gfl.py
|
b39eb616f9a7f0d17203d726452db6c449690856
|
[] |
no_license
|
CaptainEven/MyNanoDet
|
9de3165ff14140eeabb362e793d8903f93cfdf77
|
9f5e5835bff8854d8d8c0041a7b3288ab017d7b6
|
refs/heads/master
| 2023-02-11T13:12:59.651520
| 2021-01-06T06:52:11
| 2021-01-06T06:52:11
| 319,273,525
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 524
|
py
|
from .one_stage import OneStage
class GFL(OneStage):
def __init__(self,
backbone_cfg,
fpn_cfg,
head_cfg, ):
"""
:param backbone_cfg:
:param fpn_cfg:
:param head_cfg:
"""
super(GFL, self).__init__(backbone_cfg,
fpn_cfg,
head_cfg)
def forward(self, x):
x = self.backbone(x)
x = self.fpn(x)
x = self.head(x)
return x
|
[
"765305261@qq.com"
] |
765305261@qq.com
|
db47d5fc38a06024d4c392f065371e46135a7707
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03608/s871426346.py
|
ab335095f513950ba52948b0a955d92571997d3a
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 514
|
py
|
from scipy.sparse.csgraph import floyd_warshall
import numpy as np
from itertools import permutations, combinations
n,m,R = map(int,input().split())
r=list(map(int,input().split()))
for i in range(R):
r[i]-=1
d = np.zeros((n,n))
# 入力
for i in range(m):
a,b,c = map(int,input().split())
a,b = a-1, b-1
d[a,b] = c
dist =floyd_warshall(d,directed=0).astype(int)
ans=10**10
for v in permutations(r):
tmp=0
for i in range(R-1):
tmp+=dist[v[i],v[i+1]]
ans=min(ans,tmp)
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
8442183c9909a042b13be80ea1fe9cd51b162c5e
|
fb64776f71eb2a469395a39c3ff33635eb388357
|
/apps/accounts/tests/unit/services/test_session_service.py
|
18ae5c6a9b811a936437f0a623fdd3f5404cbe91
|
[
"MIT"
] |
permissive
|
jimialex/django-wise
|
ec79d21c428fd1eea953362890051d2120e19f9e
|
3fdc01eabdff459b31e016f9f6d1cafc19c5a292
|
refs/heads/master
| 2023-04-30T20:59:51.625190
| 2021-05-10T06:55:40
| 2021-05-10T06:55:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,101
|
py
|
# -*- coding: utf-8 -*-
import json
import pytest
import requests_mock
from doubles import allow
from google.oauth2 import id_token
from rest_framework import status
from apps.accounts.models import User
from apps.accounts.api.error_codes import AccountsErrorCodes
from apps.contrib.api.exceptions.base import APIBaseException
from apps.accounts.services.user_service import UserService
from apps.accounts.services.session_service import SessionService
@pytest.mark.django_db
class SessionServiceTests:
@staticmethod
def test_process_google_token(test_user):
allow(id_token).verify_oauth2_token.and_return({
'iss': SessionService.GOOGLE_ACCOUNTS_URL,
})
allow(UserService).create_or_update_for_social_networks.and_return(test_user)
user = SessionService.process_google_token('valid_token')
assert user is not None
assert isinstance(user, User)
@staticmethod
def test_process_google_token_invalid_issuer():
allow(id_token).verify_oauth2_token.and_return({
'iss': 'https://any.server',
})
with pytest.raises(APIBaseException) as exec_info:
SessionService.process_google_token('valid_token')
assert exec_info.value.detail.code == AccountsErrorCodes.INVALID_GOOGLE_TOKEN_ISSUER.code
@staticmethod
def test_process_google_token_invalid_token():
allow(id_token).verify_oauth2_token.and_raise(ValueError('Token Error'))
with pytest.raises(APIBaseException) as exec_info:
SessionService.process_google_token('valid_token')
assert exec_info.value.detail.code == AccountsErrorCodes.INVALID_GOOGLE_TOKEN_ID.code
@staticmethod
def test_process_facebook_valid_access_token(test_user):
allow(UserService).create_or_update_for_social_networks.and_return(test_user)
access_token = 'valid_access_token'
with requests_mock.mock() as mock:
mock.get(
SessionService.make_facebook_profile_url(access_token),
text=json.dumps({
'email': test_user.email,
'first_name': test_user.first_name,
'last_name': test_user.last_name,
}),
status_code=status.HTTP_200_OK,
)
user = SessionService.process_facebook_token(access_token)
assert user is not None
assert isinstance(user, User)
@staticmethod
def test_process_facebook_token_invalid_access_token():
access_token = 'invalid_access_token'
with requests_mock.mock() as mock:
mock.get(
SessionService.make_facebook_profile_url(access_token),
text=json.dumps({'error': 'facebook_raised_error'}),
status_code=status.HTTP_200_OK,
)
with pytest.raises(APIBaseException) as exec_info:
SessionService.process_facebook_token(access_token)
assert exec_info.value.detail.code == AccountsErrorCodes.INVALID_FACEBOOK_ACCESS_TOKEN.code
@staticmethod
def test_process_facebook_token_invalid_access_token_from_format(test_user):
access_token = 'invalid_access_token'
with requests_mock.mock() as mock:
mock.get(
SessionService.make_facebook_profile_url(access_token),
text='',
status_code=status.HTTP_200_OK,
)
with pytest.raises(APIBaseException) as exec_info:
SessionService.process_facebook_token(access_token)
assert exec_info.value.detail.code == AccountsErrorCodes.INVALID_FACEBOOK_ACCESS_TOKEN.code
@staticmethod
def test_make_user_session(test_user):
session = SessionService.make_user_session(test_user)
assert 'access_token' in session
assert 'refresh_token' in session
@staticmethod
def test_validate_session(test_user):
plain_password = 'new_password'
test_user.set_password(plain_password)
test_user.save()
assert SessionService.validate_session(test_user, plain_password)
@staticmethod
def test_validate_session_invalid_credentials(test_user):
with pytest.raises(APIBaseException) as exec_info:
SessionService.validate_session(None, 'new_password')
assert exec_info.value.detail.code == AccountsErrorCodes.INVALID_CREDENTIALS.code
with pytest.raises(APIBaseException) as exec_info:
SessionService.validate_session(test_user, 'new_password')
assert exec_info.value.detail.code == AccountsErrorCodes.INVALID_CREDENTIALS.code
@staticmethod
def test_validate_session_inactive_account(test_user):
plain_password = 'another_password'
test_user.set_password(plain_password)
test_user.is_active = False
test_user.save()
with pytest.raises(APIBaseException) as exec_info:
SessionService.validate_session(test_user, plain_password)
assert exec_info.value.detail.code == AccountsErrorCodes.INACTIVE_ACCOUNT.code
|
[
"vicobits@gmail.com"
] |
vicobits@gmail.com
|
4699ac187da996ef09de31aa7def4cdc34852f34
|
d1ddb9e9e75d42986eba239550364cff3d8f5203
|
/google-cloud-sdk/lib/surface/help.py
|
0fc91d3eca0088a241bd20d9a83259c5cb961425
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
bopopescu/searchparty
|
8ecd702af0d610a7ad3a8df9c4d448f76f46c450
|
afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6
|
refs/heads/master
| 2022-11-19T14:44:55.421926
| 2017-07-28T14:55:43
| 2017-07-28T14:55:43
| 282,495,798
| 0
| 0
|
Apache-2.0
| 2020-07-25T17:48:53
| 2020-07-25T17:48:52
| null |
UTF-8
|
Python
| false
| false
| 1,379
|
py
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A calliope command that prints help for another calliope command."""
from googlecloudsdk.calliope import base
@base.ReleaseTracks(base.ReleaseTrack.GA)
class Help(base.Command):
"""Prints detailed help messages for the specified commands.
This command prints a detailed help message for the commands specified
after the ``help'' operand.
"""
@staticmethod
def Args(parser):
parser.add_argument(
'command',
nargs='*',
help="""\
A sequence of group and command names with no flags.
""")
def Run(self, args):
# --document=style=help to signal the metrics.Help() 'help' label in
# actions.RenderDocumentAction().Action().
self.ExecuteCommandDoNotUse(args.command + ['--document=style=help'])
return None
|
[
"vinvivo@users.noreply.github.com"
] |
vinvivo@users.noreply.github.com
|
56d9545172d93dc81f00b75c4467ced42d10d46d
|
5e0de59693445ef463e8c6a8c05876aa9d975e9a
|
/student/views.py
|
cbd41d5ec89b0e3f8c2689c6ddd658b403c5ec6b
|
[] |
no_license
|
ZhonggenLi/Student-Management-Web-
|
e511691f4d625e80b5f4460dce7c13788faffd14
|
f5a5c30d171a182abe660bccd2c407d3f6ccf7b4
|
refs/heads/master
| 2022-12-02T08:05:43.493136
| 2020-08-09T02:00:19
| 2020-08-09T02:00:19
| 285,771,013
| 0
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,785
|
py
|
from django.shortcuts import render
from django.http import HttpResponse
from django.db import models
from student.models import Student
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import redirect
# Create your views here.
@csrf_exempt
def insert(request):
if request.POST:
post = request.POST
new_student = Student(
num = post["num"],
name = post["name"],
chinese = post["chinese"],
math = post["math"],
english = post["english"],
physics = post["physics"],
chemistry = post["chemistry"],
allscore = int(post["chinese"])+int(post["math"])+int(post["english"])+int(post["physics"])+int(post["chemistry"]))
new_student.save()
return render(request, 'insert.html')
def list(request):
student_list = Student.objects.all()
c = {"student_list": student_list, }
return render(request, "list.html", c)
def delete(request):
delete_num = request.GET.get('delete_num')
Student.objects.get(num = delete_num).delete()
return render(request, "delete.html")
def updateStudent(request):
update_num = request.GET.get('update_num')
update_student = Student.objects.get(num=update_num)
a = {"update_student": update_student, }
if request.POST:
update_name = request.POST.get("name")
update_chinese = request.POST.get("chinese")
update_math = request.POST.get("math")
update_english = request.POST.get("english")
update_physics = request.POST.get("physics")
update_chemistry = request.POST.get("chemistry")
update_student.num = update_num
update_student.name = update_name
update_student.chinese = update_chinese
update_student.math = update_math
update_student.english = update_english
update_student.physics = update_physics
update_student.chemistry = update_chemistry
update_student.allscore =int(update_chemistry)+int(update_physics)+int(update_english)+int(update_math)+int(update_chinese)
update_student.save()
return render(request, "update.html", a)
def questu(request):
stu = {}
if request.POST:
quename = request.POST.get("name")
quenum = request.POST.get("num")
if quename:
student_list = Student.objects.filter(name = quename)
stu = {"student_list": student_list, }
elif quenum:
student_list = Student.objects.filter(num = quenum)
stu = {"student_list":student_list, }
return render(request, "questu.html", stu)
def sinsort(request):
stu = {}
if request.POST:
proj = request.POST.get("proj")
if proj == '1':
stulist = Student.objects.order_by("-chinese")
stu = {"stulist": stulist, "proj": proj, }
elif proj == '2':
stulist = Student.objects.order_by("-math")
stu = {"stulist": stulist, "proj": proj, }
elif proj == '3':
stulist = Student.objects.order_by("-english")
stu = {"stulist": stulist, "proj": proj, }
elif proj == '4':
stulist = Student.objects.order_by("-physics")
stu = {"stulist": stulist, "proj": proj, }
elif proj == '5':
stulist = Student.objects.order_by("-chemistry")
stu = {"stulist": stulist, "proj": proj, }
elif proj == '6':
stulist = Student.objects.order_by("-allscore")
stu = {"stulist":stulist,"proj":proj, }
return render(request, "sinsort.html", stu)
def fraction(request):
stu = {}
if request.POST:
score = request.POST.get("score")
if score == '1':
stulist = Student.objects.filter(allscore__gte=600)
stulist = sorted(stulist, key=lambda x:x.allscore, reverse=True)
stu = {"stulist": stulist, }
elif score == '2':
stulist = Student.objects.filter(allscore__gte=500, allscore__lt=600)
stulist = sorted(stulist, key=lambda x: x.allscore, reverse=True)
stu = {"stulist": stulist, }
elif score == '3':
stulist = Student.objects.filter(allscore__gte=400, allscore__lt=500)
stulist = sorted(stulist, key=lambda x: x.allscore, reverse=True)
stu = {"stulist": stulist, }
elif score == '4':
stulist = Student.objects.filter(allscore__gte=300, allscore__lt=400)
stulist = sorted(stulist, key=lambda x: x.allscore, reverse=True)
stu = {"stulist": stulist, }
elif score == '5':
stulist = Student.objects.filter(allscore__lte=300)
stulist = sorted(stulist, key=lambda x: x.allscore, reverse=True)
stu = {"stulist":stulist, }
return render(request, "fraction.html", stu)
|
[
"you@example.com"
] |
you@example.com
|
2c5e7974069afe82223007f602552e7aa63c0b86
|
f810836bea801f2fa85418ac7f5f5ffb0f3e0bda
|
/abc/abc107/B - Grid Compression.py
|
81b610852a92bc42137f84cad2c058bfd44c6c07
|
[] |
no_license
|
cocoinit23/atcoder
|
0afac334233e5f8c75d447f6adf0ddf3942c3b2c
|
39f6f6f4cc893e794d99c514f2e5adc9009ee8ca
|
refs/heads/master
| 2022-08-29T06:01:22.443764
| 2022-07-29T07:20:05
| 2022-07-29T07:20:05
| 226,030,199
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 332
|
py
|
h, w = map(int, input().split())
grid = []
for i in range(h):
temp = list(input())
if set(temp) != {'.'}:
grid.append(temp)
transpose = [list(x) for x in zip(*grid)]
ans = []
for l in transpose:
if set(l) != {'.'}:
ans.append(l)
ans = [list(x) for x in zip(*ans)]
for l in ans:
print(''.join(l))
|
[
"cocoinit23@gmail.com"
] |
cocoinit23@gmail.com
|
23d42de74d03b489f2e51fe784d08a6877779f89
|
6b7c93ee4dc224e3041cd3df8e1d8ab128144cb8
|
/dodo.py
|
27adb2c078501374359755278adfcebcf6302b9e
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
philip-luk-tangent-anim/doit-py
|
df915b88d7d3e9964f2d1463d3255ae9c415f2da
|
500731ce25e89f327d190b7b8b3fc02bbd71c0f1
|
refs/heads/master
| 2022-09-08T08:44:06.172002
| 2020-05-24T12:35:05
| 2020-05-24T12:35:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,271
|
py
|
import glob
import subprocess
from doitpy.pyflakes import Pyflakes
from doitpy.coverage import PythonPackage, Coverage
from doitpy.package import Package
from doitpy import docs
DOIT_CONFIG = {'default_tasks': ['pyflakes', 'test']}
def task_pyflakes():
flakes = Pyflakes()
yield flakes.tasks('*.py')
yield flakes.tasks('doitpy/*.py')
yield flakes.tasks('tests/**/*.py',
exclude_paths=['tests/sample/flake_fail.py',])
def task_test():
"""run unit-tests"""
# XXX
return {'actions': ['py.test tests']}
def task_coverage():
cov = Coverage([PythonPackage('doitpy', test_path='tests')],
config={'branch':False})
yield cov.all()
yield cov.src()
yield cov.by_module()
def task_package():
"""upload package to pypi"""
pkg = Package()
yield pkg.manifest_git()
yield pkg.sdist()
yield pkg.sdist_upload()
def task_docs():
doc_files = glob.glob('doc/*.rst') + ['README.rst']
yield docs.spell(doc_files, 'doc/dictionary.txt')
yield docs.sphinx('doc/', 'doc/_build/html/', task_dep=['spell'])
yield docs.pythonhosted_upload('doc/_build/html/', task_dep=['sphinx'])
##########################
from doit.tools import result_dep
init_file = 'doitpy/__init__.py'
def task_version():
"""update version on <pkg-name>/__init__.py and doc/conf.py"""
# get package version from setup.py
# version must be set with a string literal using single/double quotes
# but not triple-quotes.
def version_str2tuple(string):
parts = []
for part in string.split('.'):
parts.append(part if not part.isdigit() else int(part))
return tuple(repr(x) for x in parts)
def get_version():
#cmd = ("""awk 'match($0, /version[[:space:]]*=[[:space:]]*"""
# r"""['\''"](.*)['\''"].*/, ary) {print ary[1]}' setup.py""")
cmd = 'python setup.py --version'
version_str = subprocess.check_output(cmd, shell=True,
universal_newlines=True)
version_str = version_str.strip()
version_tuple = version_str2tuple(version_str)
return {
'version': '.'.join(version_tuple[:2]),
'release': version_str,
'tuple': version_tuple,
}
yield {
'name': 'get_from_setup',
'file_dep': ['setup.py'],
'actions': [get_version],
}
sed = "sed --in-place --regexp-extended "
yield {
'name': 'set_pkg',
'uptodate': [result_dep('version:get_from_setup')],
'getargs': {'version': ('version:get_from_setup', 'tuple')},
'actions': [
sed + r"'s/(__version__ = )(.*)/\1%(version)s/' " + init_file],
'targets': [init_file]
}
doc_file = 'doc/conf.py'
yield {
'name': 'set_doc',
'uptodate': [result_dep('version:get_from_setup')],
'getargs': {
'version': ('version:get_from_setup', 'version'),
'release': ('version:get_from_setup', 'release')},
'actions': [
sed + r""" "s/(version = )(.*)/\1'%(version)s'/" """ + doc_file,
sed + r""" "s/(release = )(.*)/\1'%(release)s'/" """ + doc_file,
]
}
|
[
"schettino72@gmail.com"
] |
schettino72@gmail.com
|
b98590e375b6fc48f66d64b21aa03c098ed29e85
|
c9d3b03512dc3b2d268d0e99560889226322487c
|
/ggH01j/cut_hww_7TeV/mH125bkg/shape.py
|
0fd39b064743c2b4a2496e9d1923046b12560d07
|
[] |
no_license
|
latinos/DatacardsConfigurations
|
57f9d8707b3987de0491c66aa9533f9447cfb043
|
25827f8f8284d50d680ce1527e3b8c17c27d7c4a
|
refs/heads/master
| 2021-01-22T03:05:27.692914
| 2015-04-13T08:22:02
| 2015-04-13T08:22:02
| 14,419,053
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,174
|
py
|
lumi=4.94
chans=['of_0j','of_1j', 'sf_0j', 'sf_1j']
# set of mc samples: 0j1j, vbf
mcset='0j1j-mH125'
dataset='Data2011'
variable='mll' # remember, y:x
selection='hww2011'
# TTree::Draw style as in h(nx,xmin,xmax, ny,ymin,ymax)
# shape range. can be an
# - hard-coded label
# - a tuple (nx,xmin,xmax)
# - 2d tuple (nx,xmin,xmax,ny,ymin,ymax)
# - 1d array ([x0,..,xn],)
# - 2d array ([x0,..,xn],[y0,...,ym])
#range=(4,80,280,16,0,200)
#range = ([80., 130., 180., 230., 280.],[0., 12.5, 25., 37.5, 50., 62.5, 75., 87.5, 100., 112.5, 125, 137.5, 150., 162.5, 175., 187.5, 200.])
# range = ([80., 130., 180., 280.],[0., 25., 37.5, 50., 62.5, 75., 87.5, 100., 112.5, 125, 140., 160., 190., 230., 310., 600.])
range = (1,0,200)
tag='mll_hww'
xlabel='m_{ll}'
# rebin=10
rebin=1
# directories
path_latino = '/shome/mtakahashi/HWW/Tree/ShapeAna/42x_494fb/tree_skim_wwmin/'
path_dd = '/shome/mtakahashi/HWW/Data/dd/hww_2011_494fb/'
#path_latino = '/afs/cern.ch/work/x/xjanssen/public/LatinoTrees/ShapeAnalysis/Tree/tree_skim_wwmin/'
#path_dd = '/afs/cern.ch/user/m/maiko/work/private/Data/dd/hww_2012_195fb/'
# other directories
path_shape_raw='raw'
path_shape_merged='merged'
|
[
"massironi.andrea@gmail.com"
] |
massironi.andrea@gmail.com
|
3a2c6507f4c805423f50e8689b45a9aa9f1b7965
|
d554b1aa8b70fddf81da8988b4aaa43788fede88
|
/5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/223/users/4342/codes/1764_1577.py
|
7620b15a0235fac3283e2333810c92277d214f4a
|
[] |
no_license
|
JosephLevinthal/Research-projects
|
a3bc3ca3b09faad16f5cce5949a2279cf14742ba
|
60d5fd6eb864a5181f4321e7a992812f3c2139f9
|
refs/heads/master
| 2022-07-31T06:43:02.686109
| 2020-05-23T00:24:26
| 2020-05-23T00:24:26
| 266,199,309
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 183
|
py
|
from numpy import *
acel=array(eval(input("aceleracao:")))
vo=array(eval(input("velocidade inicial:")))
N=int(input("numero positivo?"))
i=0
t=0
s=(((acel)*(t**2))/2)+ (vo*t)
while()
|
[
"jvlo@icomp.ufam.edu.br"
] |
jvlo@icomp.ufam.edu.br
|
c87505ef499af065bd826fc255a3323af1107f30
|
b503e79ccfca67c8114f5bd7a215f5ae993a0ba4
|
/airflow/security/permissions.py
|
983ebbd7f48ef4f41509dfb4a9f356808687fe2a
|
[
"Apache-2.0",
"BSD-3-Clause",
"Python-2.0",
"MIT"
] |
permissive
|
github/incubator-airflow
|
df1d9780f862ea1df8261ea6015dd50a4583f983
|
73f70e00b9fd294057f8ca6b714a85622f6d5dd5
|
refs/heads/gh-2.0.2
| 2023-07-29T18:08:43.140580
| 2022-09-14T18:23:42
| 2022-09-14T18:23:42
| 80,634,006
| 24
| 27
|
Apache-2.0
| 2023-04-18T04:24:36
| 2017-02-01T15:34:55
|
Python
|
UTF-8
|
Python
| false
| false
| 2,390
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Resource Constants
RESOURCE_ADMIN_MENU = "Admin"
RESOURCE_AIRFLOW = "Airflow"
RESOURCE_AUDIT_LOG = "Audit Logs"
RESOURCE_BROWSE_MENU = "Browse"
RESOURCE_DAG = "DAGs"
RESOURCE_DAG_PREFIX = "DAG:"
RESOURCE_DOCS_MENU = "Docs"
RESOURCE_DOCS_LINK = "Documentation"
RESOURCE_CONFIG = "Configurations"
RESOURCE_CONNECTION = "Connections"
RESOURCE_DAG_CODE = "DAG Code"
RESOURCE_DAG_RUN = "DAG Runs"
RESOURCE_IMPORT_ERROR = "ImportError"
RESOURCE_JOB = "Jobs"
RESOURCE_POOL = "Pools"
RESOURCE_PLUGIN = "Plugins"
RESOURCE_SLA_MISS = "SLA Misses"
RESOURCE_TASK_INSTANCE = "Task Instances"
RESOURCE_TASK_LOG = "Task Logs"
RESOURCE_TASK_RESCHEDULE = "Task Reschedules"
RESOURCE_VARIABLE = "Variables"
RESOURCE_WEBSITE = "Website"
RESOURCE_XCOM = "XComs"
RESOURCE_USERINFO_EDIT_VIEW = "UserInfoEditView"
RESOURCE_RESET_MY_PASSWORD_VIEW = "ResetMyPasswordView"
RESOURCE_USER_DB_MODELVIEW = "UserDBModelView"
RESOURCE_USER_OID_MODELVIEW = "UserOIDModelView"
RESOURCE_USER_LDAP_MODELVIEW = "UserLDAPModelView"
RESOURCE_USER_OAUTH_MODELVIEW = "UserOAuthModelView"
RESOURCE_USER_REMOTEUSER_MODELVIEW = "UserRemoteUserModelView"
# Action Constants
ACTION_CAN_CREATE = "can_create"
ACTION_CAN_READ = "can_read"
ACTION_CAN_EDIT = "can_edit"
ACTION_CAN_DELETE = "can_delete"
ACTION_CAN_ACCESS_MENU = "menu_access"
ACTION_CAN_THIS_FORM_GET = "can_this_form_get"
ACTION_CAN_THIS_FORM_POST = "can_this_form_post"
ACTION_RESETMYPASSWORD = "resetmypassword"
ACTION_CAN_USERINFO = "can_userinfo"
ACTION_USERINFOEDIT = "userinfoedit"
DEPRECATED_ACTION_CAN_DAG_READ = "can_dag_read"
DEPRECATED_ACTION_CAN_DAG_EDIT = "can_dag_edit"
|
[
"noreply@github.com"
] |
github.noreply@github.com
|
c2ae43c2b91dac46aa6f964347b09d8dd965258b
|
147d0863f4590649a90ea5f78c66974723a87247
|
/api/views.py
|
dd4ab9ddecab2559c7faf133dec1c6c30553d792
|
[] |
no_license
|
jinchuika/ligabot
|
af5bd5443dc0df7d929e7b866869ba075c91db55
|
69544912e1ac46f281ba2fc78ff913d60d9a2a38
|
refs/heads/master
| 2021-01-20T12:50:32.894359
| 2017-05-08T14:07:47
| 2017-05-08T14:07:47
| 90,419,163
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,365
|
py
|
from django.db.models import Q
from .serializers import MatchSerializer, TableSerializer, CompetitionSerializer, FixtureSerializer, TeamSerializer
from rest_framework import mixins, generics, viewsets
from api.api_request import RequestHandler
from api.models import Competition, Fixture, Team
class TeamMatchListView(generics.ListAPIView):
serializer_class = MatchSerializer
def get_queryset(self):
req = RequestHandler()
return req.get_team_scores(self.kwargs['team_id'])
class LeagueMatchListView(generics.ListAPIView):
serializer_class = MatchSerializer
def get_queryset(self):
req = RequestHandler()
return req.get_league_scores(self.kwargs['league_id'])
class LeagueStandingListView(generics.ListAPIView):
serializer_class = TableSerializer
def get_queryset(self):
req = RequestHandler()
return req.get_standings(self.kwargs['league_id'])
class CompetitionApiView(mixins.ListModelMixin, generics.GenericAPIView):
serializer_class = CompetitionSerializer
queryset = Competition.objects.all()
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
class FixtureViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = FixtureSerializer
queryset = Fixture.objects.all()
def get_queryset(self):
queryset = Fixture.objects.all()
competition_id = self.request.query_params.get('competition', None)
team_id = self.request.query_params.get('team', None)
home_id = self.request.query_params.get('home', None)
away_id = self.request.query_params.get('away', None)
if competition_id is not None:
queryset = queryset.filter(competition__id=competition_id)
if team_id is not None:
queryset = queryset.filter(Q(home_team__id=team_id) | Q(away_team__id=team_id))
if home_id is not None:
queryset = queryset.filter(home_team__id=home_id)
if away_id is not None:
queryset = queryset.filter(away_team__id=away_id)
return queryset
class CompetitionViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = CompetitionSerializer
queryset = Competition.objects.all()
class TeamViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = TeamSerializer
queryset = Team.objects.all()
|
[
"jinchuika@gmail.com"
] |
jinchuika@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.