content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def merge(A, p, q, r):
""" ソート済みの二区間、A[p..q] と A[q+1..r]をマージする
p < q < r
Θ(n)
"""
n1 = q - p + 1
n2 = r - q
L = [0] * n1
R = [0] * n2
for i in range(0, n1):
L[i] = A[p + i]
for j in range(0, n2):
R[j] = A[q + j + 1]
i = 0
j = 0
for k in range(p, r + 1):
if i == len(L):
copy(A, k, R, j)
break
elif j == len(R):
copy(A, k, L, i)
break
elif L[i] <= R[j]:
A[k] = L[i]
i += 1
else:
A[k] = R[j]
j += 1 | 5,326,500 |
def parse_csv_data(csv_filename: FileIO) -> list[str]:
"""Returns contents of 'csv_filename' as list of strings by row"""
try:
return open(csv_filename).readlines()
except FileNotFoundError:
logging.warning("File with path '%s' not found", csv_filename)
return [] | 5,326,501 |
def make_crews(crews, config, state, parameters, timeseries, deployment_days):
""" Generate crews using BaseCrew class.
Args:
crews (list): List of crews
config (dict): Method parameters
state (dict): Current state of LDAR-Sim
parameters (dict): Program parameters
timeseries (dict): Timeseries
deployment_days (list): days method can be deployed based on weather
--- Required in module.company.BaseCompany ---
"""
for i in range(config['n_crews']):
crews.append(BaseCrew(state, parameters, config,
timeseries, deployment_days, id=i + 1)) | 5,326,502 |
def generate_parquet_file(
name: str, columns: Mapping[str, str], num_rows: int, custom_rows: Mapping[int, Mapping[str, Any]] = None
) -> str:
"""Generates a random data and save it to a tmp file"""
filename = os.path.join(tmp_folder(), name + "." + filetype)
if os.path.exists(filename):
return filename
types = list(columns.values()) if num_rows else []
rows = [_generate_row(types) for _ in range(num_rows)]
for n, custom_row in (custom_rows or {}).items():
rows[n] = custom_row
return _save_parquet_file(filename, list(columns.keys()) if num_rows else [], rows) | 5,326,503 |
def decrease_others(obj: []) -> []:
"""
decrease the confidence of those that aren't increased
"""
# TODO: | 5,326,504 |
def _interpolate_sym(y0, Tkk, f_Tkk, y_half, f_yj, hs, H, k, atol, rtol,
seq=(lambda t: 4*t-2)):
"""
Symmetric dense output formula; used for example with the midpoint method.
It calculates a polynomial to interpolate any value from t0 (time at y0) to
t0+H (time at Tkk). Based on Dense Output for the GBS Method, II.9 pg
237-239.
Returns a polynomial that fulfills the conditions at II.9.40 (step 3). To
take into account: this coefficients were calculated for the shifted
polynomial with x -> x-1/2.
Parameters
----------
y0 : float
solution of ODE at the previous step, at t0
Tkk : float
solution of ODE once the step was taken, at t0+H
f_Tkk : float
function evaluation at Tkk, t0+H
y_half : 2D array
array containing for each extrapolation value (1...k) an array with the
intermediate (at half the integration interval) solution value.
f_yj : 3D array
array containing for each extrapolation value (1...k) an array with all
the function evaluations done at the intermediate solution values.
hs : array
array containing for each extrapolation value (1...k) the inner step
taken, H/nj (II.9.1 ref I)
H : float
integration step to take (the output, without interpolation, will be
calculated at t_curr+h) This value matches with the value H in ref I
and ref II.
k : int
number of extrapolation steps to take in this step (determines the
number of extrapolations performed to achieve a better integration
output, equivalent to the size of the extrapolation tableau).
rtol, atol : float
the input parameters rtol (relative tolerance) and atol (absolute
tolerance) determine the error control performed by the solver. See
function _error_norm(y1, y2, atol, rtol).
seq : callable(i), int i>=1
the step-number sequence (examples II.9.1 , 9.6, 9.35 ref I).
Returns
-------
poly (callable(t)
interpolation polynomial (see definition of poly(t) function in
_interpolation_poly())
"""
u = 2*k-3
u_1 = u - 1
ds = _compute_ds(y_half, f_yj, hs, k, seq=seq)
a_u = (u+5)*[None]
a_u_1 = (u_1+5)*[None]
for i in range(u+1):
a_u[i] = (H**i)*ds[i]/math.factorial(i)
a_u_1[0:u_1+1] = 1*a_u[0:u_1+1]
def A_inv(u):
return (2**(u-2))*np.matrix(
[[(-2*(3 + u))*(-1)**u, -(-1)**u, 2*(3 + u), -1],
[(4*(4 + u))*(-1)**u, 2*(-1)**u, 4*(4 + u), -2],
[(8*(1 + u))*(-1)**u, 4*(-1)**u, -8*(1 + u), 4],
[(-16*(2 + u))*(-1)**u, -8*(-1)**u, -16*(2 + u), 8]]
)
A_inv_u = A_inv(u)
A_inv_u_1 = A_inv(u_1)
b1_u = 1*y0
b1_u_1 = 1*y0
for i in range(u_1+1):
b1_u -= a_u[i]/(-2)**i
b1_u_1 -= a_u_1[i]/(-2)**i
b1_u -= a_u[u]/(-2)**u
b2_u = H*f_yj[1][0]
b2_u_1 = H*f_yj[1][0]
for i in range(1, u_1+1):
b2_u -= i* a_u[i]/(-2)**(i-1)
b2_u_1 -= i*a_u_1[i]/(-2)**(i-1)
b2_u -= u*a_u[u]/(-2)**(u-1)
b3_u = 1*Tkk
b3_u_1 = 1*Tkk
for i in range(u_1+1):
b3_u -= a_u[i]/(2**i)
b3_u_1 -= a_u_1[i]/(2**i)
b3_u -= a_u[u]/(2**u)
b4_u = H*f_Tkk
b4_u_1 = H*f_Tkk
for i in range(1, u_1+1):
b4_u -= i* a_u[i]/(2**(i-1))
b4_u_1 -= i*a_u_1[i]/(2**(i-1))
b4_u -= u*a_u[u]/(2**(u-1))
b_u = np.array([b1_u,b2_u,b3_u,b4_u])
b_u_1 = np.array([b1_u_1,b2_u_1,b3_u_1,b4_u_1])
x = A_inv_u*b_u
x = np.array(x)
x_1 = A_inv_u_1*b_u_1
x_1 = np.array(x_1)
a_u[u+1] = x[0]
a_u[u+2] = x[1]
a_u[u+3] = x[2]
a_u[u+4] = x[3]
a_u_1[u_1+1] = x_1[0]
a_u_1[u_1+2] = x_1[1]
a_u_1[u_1+3] = x_1[2]
a_u_1[u_1+4] = x_1[3]
return _interpolation_poly(a_u, a_u_1, H, 0.5, atol, rtol) | 5,326,505 |
def _gtin_fails_checksum(gtin: str) -> bool:
"""Determines if the provided gtin violates the check digit calculation.
Args:
gtin: a string representing the product's GTIN
Returns:
True if the gtin fails check digit validation, otherwise False.
"""
padded_gtin = gtin.zfill(14)
existing_check_digit = int(padded_gtin[-1])
target_check_digit = _calculate_check_digit(padded_gtin[:-1])
return target_check_digit != existing_check_digit | 5,326,506 |
def mergeGuideInfo(seq, startDict, pamPat, otMatches, inputPos, effScores, sortBy=None):
"""
merges guide information from the sequence, the efficiency scores and the off-targets.
creates rows with too many fields. Probably needs refactoring.
for each pam in startDict, retrieve the guide sequence next to it and score it
sortBy can be "effScore", "mhScore", "oofScore" or "pos"
"""
allEnzymes = readEnzymes()
guideData = []
guideScores = {}
hasNotFound = False
pamIdToSeq = {}
pamSeqs = list(flankSeqIter(seq.upper(), startDict, len(pamPat), True))
for pamId, pamStart, guideStart, strand, guideSeq, pamSeq, pamPlusSeq in pamSeqs:
# matches in genome
# one desc in last column per OT seq
if pamId in otMatches:
pamMatches = otMatches[pamId]
guideSeqFull = concatGuideAndPam(guideSeq, pamSeq)
mutEnzymes = matchRestrEnz(allEnzymes, guideSeq, pamSeq, pamPlusSeq)
posList, otDesc, guideScore, guideCfdScore, last12Desc, ontargetDesc, \
subOptMatchCount = \
makePosList(pamMatches, guideSeqFull, pamPat, inputPos)
# no off-targets found?
else:
posList, otDesc, guideScore = None, "Not found", None
guideCfdScore = None
last12Desc = ""
hasNotFound = True
mutEnzymes = []
ontargetDesc = ""
subOptMatchCount = False
seq34Mer = None
guideRow = [guideScore, guideCfdScore, effScores.get(pamId, {}), pamStart, guideStart, strand, pamId, guideSeq, pamSeq, posList, otDesc, last12Desc, mutEnzymes, ontargetDesc, subOptMatchCount]
guideData.append( guideRow )
guideScores[pamId] = guideScore
pamIdToSeq[pamId] = guideSeq
if sortBy == "pos":
sortFunc = (lambda row: row[3])
reverse = False
elif sortBy is not None and sortBy!="spec":
sortFunc = (lambda row: row[2].get(sortBy, 0))
reverse = True
else:
sortFunc = operator.itemgetter(0)
reverse = True
guideData.sort(reverse=reverse, key=sortFunc)
return guideData, guideScores, hasNotFound, pamIdToSeq | 5,326,507 |
def reset_rf_samples():
""" Undoes the changes produced by set_rf_samples.
"""
forest._generate_sample_indices = (lambda rs, n_samples:
forest.check_random_state(rs).randint(0, n_samples, n_samples)) | 5,326,508 |
def search_orf(seq:str, min_orf:int) -> list:
"""Search full orf over ceration length in 6 frames"""
scod = "M"
send = "*"
orf_regions = {}
# Load 6 reading frames
seq1 = seq
seq2 = seq1[1: ]
seq3 = seq1[2: ]
seq4 = rc_seq(seq1)
seq5 = seq4[1: ]
seq6 = seq4[2: ]
# Shrink to times of 3
seq1 = seq1[: len(seq1)//3*3]
seq2 = seq2[: len(seq2)//3*3]
seq3 = seq3[: len(seq3)//3*3]
seq4 = seq4[: len(seq4)//3*3]
seq5 = seq5[: len(seq5)//3*3]
seq6 = seq6[: len(seq6)//3*3]
# Translate 6 frames
trans1 = translate_exon(seq1)
trans2 = translate_exon(seq2)
trans3 = translate_exon(seq3)
trans4 = translate_exon(seq4)
trans5 = translate_exon(seq5)
trans6 = translate_exon(seq6)
# All the start and stop codons
start1 = [id_ for id_, cod in enumerate(trans1) if cod == scod]
start2 = [id_ for id_, cod in enumerate(trans2) if cod == scod]
start3 = [id_ for id_, cod in enumerate(trans3) if cod == scod]
start4 = [id_ for id_, cod in enumerate(trans4) if cod == scod]
start5 = [id_ for id_, cod in enumerate(trans5) if cod == scod]
start6 = [id_ for id_, cod in enumerate(trans6) if cod == scod]
end1 = [id_ for id_, cod in enumerate(trans1) if cod == send]
end2 = [id_ for id_, cod in enumerate(trans2) if cod == send]
end3 = [id_ for id_, cod in enumerate(trans3) if cod == send]
end4 = [id_ for id_, cod in enumerate(trans4) if cod == send]
end5 = [id_ for id_, cod in enumerate(trans5) if cod == send]
end6 = [id_ for id_, cod in enumerate(trans6) if cod == send]
if start1 and end1:
pos1 = start1[0]
pos2 = end1[0]
s_i = 0
e_i = 0
while s_i < len(start1):
# search for stop codon
pos1 = start1[s_i]
while e_i < len(end1) -1:
if pos2 < pos1:
e_i += 1
pos2 = end1[e_i]
else:
break
# No full orf
if pos2 < pos1:
break
# Identify orf
else:
if pos2 - pos1 >min_orf:
orf_regions[(pos1*3 + 1, pos2*3 + 3)] = trans1[pos1: pos2 +1]
s_i += 1
while s_i < len(start1):
pos1 = start1[s_i]
if pos1 < pos2:
s_i += 1
continue
break
if start2 and end2:
pos1 = start2[0]
pos2 = end2[0]
s_i = 0
e_i = 0
while s_i < len(start2):
# search for stop codon
pos1 = start2[s_i]
while e_i < len(end2) -1:
if pos2 < pos1:
e_i += 1
pos2 = end2[e_i]
else:
break
# No full orf
if pos2 < pos1:
break
# Identify orf
else:
if pos2 - pos1 >min_orf:
orf_regions[(pos1*3 + 2, pos2*3 + 4)] = trans2[pos1: pos2 +1]
s_i += 1
while s_i < len(start2):
pos1 = start2[s_i]
if pos1 < pos2:
s_i += 1
continue
break
if start3 and end3:
pos1 = start3[0]
pos2 = end3[0]
s_i = 0
e_i = 0
while s_i < len(start3):
# search for stop codon
pos1 = start3[s_i]
while e_i < len(end3) -1:
if pos2 < pos1:
e_i += 1
pos2 = end3[e_i]
else:
break
# No full orf
if pos2 < pos1:
break
# Identify orf
else:
if pos2 - pos1 >min_orf:
orf_regions[(pos1*3 + 3, pos2*3 + 5)] = trans3[pos1: pos2 +1]
s_i += 1
while s_i < len(start3):
pos1 = start3[s_i]
if pos1 < pos2:
s_i += 1
continue
break
if start4 and end4:
pos1 = start4[0]
pos2 = end4[0]
s_i = 0
e_i = 0
while s_i < len(start4):
# search for stop codon
pos1 = start4[s_i]
while e_i < len(end4) -1:
if pos2 < pos1:
e_i += 1
pos2 = end4[e_i]
else:
break
# No full orf
if pos2 < pos1:
break
# Identify orf
else:
if pos2 - pos1 >min_orf:
orf_regions[(len(seq) - pos1*3, len(seq) - pos2*3 -2)] = trans4[pos1: pos2 +1]
s_i += 1
while s_i < len(start4):
pos1 = start4[s_i]
if pos1 < pos2:
s_i += 1
continue
break
if start5 and end5:
pos1 = start5[0]
pos2 = end5[0]
s_i = 0
e_i = 0
while s_i < len(start5):
# search for stop codon
pos1 = start5[s_i]
while e_i < len(end5) -1:
if pos2 < pos1:
e_i += 1
pos2 = end5[e_i]
else:
break
# No full orf
if pos2 < pos1:
break
# Identify orf
else:
if pos2 - pos1 >min_orf:
orf_regions[(len(seq) - pos1*3 -1, len(seq) - pos2*3 -3,)] = trans5[pos1: pos2 +1]
s_i += 1
while s_i < len(start5):
pos1 = start5[s_i]
if pos1 < pos2:
s_i += 1
continue
break
if start6 and end6:
pos1 = start6[0]
pos2 = end6[0]
s_i = 0
e_i = 0
while s_i < len(start6):
# search for stop codon
pos1 = start6[s_i]
while e_i < len(end6) -1:
if pos2 < pos1:
e_i += 1
pos2 = end6[e_i]
else:
break
# No full orf
if pos2 < pos1:
break
# Identify orf
else:
if pos2 - pos1 >min_orf:
orf_regions[(len(seq) - pos1*3 -2, len(seq) - pos2*3 -4)] = trans6[pos1: pos2 +1]
s_i += 1
while s_i < len(start6):
pos1 = start6[s_i]
if pos1 < pos2:
s_i += 1
continue
break
return orf_regions
# def prot_dotplot(seq1: str, seq2:str, length=10, score="BLOSUM62.dat"):
"""Generate protein dotplot."""
score_dict = load_blastp_score(score)
seq1 = seq1.upper()
seq2 = seq2.upper()
# Generate score
score_m1 = np.zeros((len(seq2), len(seq1)), dtype=int)
for id_1, aa_1 in enumerate(seq1):
for id_2, aa_2 in enumerate(seq2):
score_m1[id_2][id_1] = score_dict[(aa_1, aa_2)]
dot_score = np.zeros((len(seq2) - length + 1, len(seq1) - length +1), dtype=int)
for id_1 in range(len(seq2) - length + 1):
for id_2 in range(len(seq1) - length + 1):
score_ = 0
for id_ in range(length):
score_ += score_m1[id_1 + id_][id_2 + id_]
return dot_score | 5,326,509 |
def test_word_combinations(text, expected_counter: Counter, i: Index, epoch: date):
"""Add date suffix"""
EPOCH_SUFFIX: str = ":1970:1"
expected_counter_dated = Counter(**{key + EPOCH_SUFFIX: value for key, value in expected_counter.items()})
i.index_text(text, epoch)
assert i.backend.counter == expected_counter_dated | 5,326,510 |
def getPath():
"""
Gets path of the from ./metadata.json/
"""
with open('metadata.json', 'r') as openfile:
global path
json_object = json.load(openfile)
pairs = json_object.items()
path = json_object["renamer"]["path"]
return path | 5,326,511 |
def read_ann_h5ad(file_path, spatial_key: Optional[str] = None):
"""
read the h5ad file in Anndata format, and generate the object of StereoExpData.
:param file_path: h5ad file path.
:param spatial_key: use .obsm[`'spatial_key'`] as position. If spatial data, must set.
:return: StereoExpData obj.
"""
data = StereoExpData(file_path=file_path)
# basic
# attributes = ["obsm", "varm", "obsp", "varp", "uns", "layers"]
# df_attributes = ["obs", "var"]
with h5py.File(data.file, mode='r') as f:
for k in f.keys():
if k == "raw" or k.startswith("raw."):
continue
if k == "X":
if isinstance(f[k], h5py.Group):
data.exp_matrix = h5ad.read_group(f[k])
else:
data.exp_matrix = h5ad.read_dataset(f[k])
elif k == "raw":
assert False, "unexpected raw format"
elif k == "obs":
cells_df = h5ad.read_dataframe(f[k])
data.cells.cell_name = cells_df.index.values
data.cells.total_counts = cells_df['total_counts'] if 'total_counts' in cells_df.keys() else None
data.cells.pct_counts_mt = cells_df['pct_counts_mt'] if 'pct_counts_mt' in cells_df.keys() else None
data.cells.n_genes_by_counts = cells_df['n_genes_by_counts'] if 'n_genes_by_counts' in cells_df.keys() else None
elif k == "var":
genes_df = h5ad.read_dataframe(f[k])
data.genes.gene_name = genes_df.index.values
# data.genes.n_cells = genes_df['n_cells']
# data.genes.n_counts = genes_df['n_counts']
elif k == 'obsm':
if spatial_key is not None:
if isinstance(f[k], h5py.Group):
data.position = h5ad.read_group(f[k])[spatial_key]
else:
data.position = h5ad.read_dataset(f[k])[spatial_key]
else: # Base case
pass
return data | 5,326,512 |
def get_token(token_method, acc=None, vo=None, idt=None, pwd=None):
"""
Gets a token with the token_method provided.
:param token_method: the method to get the token
:param acc: Rucio account string
:param idt: Rucio identity string
:param pwd: Rucio password string (in case of userpass auth_type)
:returns: None or token string
"""
if not acc:
acc = request.environ.get('HTTP_X_RUCIO_ACCOUNT')
if not vo:
vo = request.environ.get('HTTP_X_RUCIO_VO')
if not idt:
idt = request.environ.get('SSL_CLIENT_S_DN')
if not idt.startswith('/'):
idt = '/%s' % '/'.join(idt.split(',')[::-1])
if not (acc and vo and idt):
return None
try:
if pwd:
token = token_method(acc, idt, pwd, 'webui', request.environ.get('REMOTE_ADDR'), vo=vo).token
else:
token = token_method(acc, idt, 'webui', request.environ.get('REMOTE_ADDR'), vo=vo).token
return token
except:
return None | 5,326,513 |
def valid_float_0_to_1(val):
"""
:param val: Object to check, then throw an error if it is invalid
:return: val if it is a float between 0 and 1 (otherwise invalid)
"""
return validate(val, lambda x: 0 <= float(x) <= 1, float,
'Value must be a number between 0 and 1') | 5,326,514 |
def get_usps_data():
"""
"""
trainset = dsets.USPS(root='./data',
train=True,
transform=transforms.ToTensor(),
download=True)
testset = dsets.USPS(root='./data',
train=False,
transform=transforms.ToTensor(),
download=True)
traindata = np.array(trainset.data)
trainlabels = np.array(trainset.targets)
testdata = np.array(testset.data)
testlabels = np.array(testset.targets)
print('-' * 40)
print('USPS数据集原始数据信息(pytorch):')
print("--->USPS dataset train data's shape :", traindata.shape, 'dim:', traindata.ndim)
print("--->USPS dataset train label's shape:",trainlabels.shape,'dim',trainlabels.ndim)
print("--->USPS dataset test data's shape:", testdata.shape, 'dim:', testset.data.ndim)
print("--->USPS dataset test data's shape:", testlabels.shape,'dim',testlabels.ndim)
return traindata,trainlabels,testdata,testlabels | 5,326,515 |
def logout():
"""
Logs out a user
Returns:
(str): A JWT access token
"""
res = {}
try:
response = jsonify({"msg": "logout successful"})
unset_jwt_cookies(response)
return make_response(response), 200
except Exception as e:
res["data"] = None
res["msg"] = str(e)
return make_response(jsonify(res)), 400 | 5,326,516 |
def bm_reduction(mat):
""" Performs the Bloch-Messiah decomposition of single mode thermal state.
Said decomposition writes a gaussian state as a a thermal squeezed-rotated-displaced state
The function returns the thermal population, rotation angle and squeezing parameters
"""
if mat.shape != (2, 2):
raise ValueError("Covariance matrix mat must be 2x2")
detm = np.linalg.det(mat)
nth = 0.5*(np.sqrt(detm)-1)
mm = mat/np.sqrt(detm)
a = mm[0, 0]
b = mm[0, 1]
r = -0.5*np.arccosh((1+a*a+b*b)/(2*a))
theta = 0.5*np.arctan2((2*a*b), (-1+a*a-b*b))
return nth, theta, r | 5,326,517 |
def pinlattice_2ring_full():
"""Full, non-test instance of PinLattice object for testing
Subchannel object"""
n_ring = 2
pitch = 1.0
d_pin = 0.5
return dassh.PinLattice(n_ring, pitch, d_pin) | 5,326,518 |
def dmenu_view_previous_entry(entry, folders):
"""View previous entry
Args: entry (Item)
Returns: entry (Item)
"""
if entry is not None:
text = view_entry(entry, folders)
type_text(text)
return entry | 5,326,519 |
def unary_to_gast(node):
"""
Takes unary operation such as ! and converts it to generic AST.
javascript makes negative numbers unary expressions. This is our
current workaround.
"""
if node.operator == "-":
return {"type": "num", "value": node.argument.value * -1}
return {
"type": "unaryOp",
"op": node.operator,
"arg": js_router.node_to_gast(node.argument)
} | 5,326,520 |
def _norm_intensity(spectrum_intensity: np.ndarray) -> np.ndarray:
"""
Normalize spectrum peak intensities.
Parameters
----------
spectrum_intensity : np.ndarray
The spectrum peak intensities to be normalized.
Returns
-------
np.ndarray
The normalized peak intensities.
"""
return spectrum_intensity / np.linalg.norm(spectrum_intensity) | 5,326,521 |
def axes_to_list(axes_data: dict) -> list:
"""helper method to convert a dict of sensor axis graphs to a 2d array for graphing
"""
axes_tuples = axes_data.items()
axes_list = [axes[1].tolist() for axes in axes_tuples]
return axes_list | 5,326,522 |
def list(
repo_info: str,
git_host: str = DEFAULT_GIT_HOST,
use_cache: bool = True,
commit: str = None,
protocol: str = DEFAULT_PROTOCOL,
) -> List[str]:
"""Lists all entrypoints available in repo hubconf.
:param repo_info:
a string with format ``"repo_owner/repo_name[:tag_name/:branch_name]"`` with an optional
tag/branch. The default branch is ``master`` if not specified.
Example: ``"brain_sdk/MegBrain[:hub]"``
:param git_host:
host address of git repo.
Example: github.com
:param use_cache:
whether to use locally cached code or completely re-fetch.
:param commit:
commit id on github or gitlab.
:param protocol:
which protocol to use to get the repo, and HTTPS protocol only supports public repo on github.
The value should be one of HTTPS, SSH.
:return:
all entrypoint names of the model.
"""
hubmodule = _init_hub(repo_info, git_host, use_cache, commit, protocol)
return [
_
for _ in dir(hubmodule)
if not _.startswith("__") and callable(getattr(hubmodule, _))
] | 5,326,523 |
def repeatfunc(func: Callable, times: Int = None, *args):
"""Repeat calls to func with specified arguments.
Example: repeatfunc(random.random)
:param func: function to be called
:param times: amount of call times
"""
if times is None:
return starmap(func, repeat(args))
return starmap(func, repeat(args, times)) | 5,326,524 |
def command_group(launcher, backend, local_rank): # pylint: disable=unused-argument
"""Defines a command group for launching distributed jobs.
This function is mainly for interaction with the command line. The real
launching is executed by `main()` function, through `result_callback()`
decorator. In other words, the arguments obtained from the command line will
be passed to `main()` function. As for how the arguments are passed, it is
the responsibility of each command of this command group. Please refer to
`BaseConfig.get_command()` in `configs/base_config.py` for more details.
""" | 5,326,525 |
def create_shortcut(anti_ghosting: bool):
"""
creates a new shortcut on desktop
"""
try:
desktop = winshell.desktop()
path = os.path.join(desktop, "Fishybot ESO.lnk")
shell = Dispatch('WScript.Shell')
shortcut = shell.CreateShortCut(path)
if anti_ghosting:
shortcut.TargetPath = r"C:\Windows\System32\cmd.exe"
python_dir = os.path.join(os.path.dirname(sys.executable), "pythonw.exe")
shortcut.Arguments = f"/C start /affinity 1 /low {python_dir} -m fishy"
else:
shortcut.TargetPath = os.path.join(os.path.dirname(sys.executable), "python.exe")
shortcut.Arguments = "-m fishy"
shortcut.IconLocation = manifest_file("icon.ico")
shortcut.save()
logging.info("Shortcut created")
except Exception:
traceback.print_exc()
logging.error("Couldn't create shortcut") | 5,326,526 |
def prod(values: Iterable[int]) -> int:
"""Compute the product of the integers."""
return functools.reduce(operator.mul, values) | 5,326,527 |
def get_stars_dict(stars):
"""
Transform list of stars into dictionary where keys are their names
Parameters
----------
stars : list, iterable
Star objects
Return
------
dict
Stars dictionary
"""
x = {}
for st in stars:
try:
x[st.name] = st
except:
pass
return x | 5,326,528 |
def main():
"""Start the bot."""
logger.debug('start bot')
# Create the EventHandler and pass it your bot's token.
updater = Updater(TOKEN_TELEGRAM)
# Get the dispatcher to register handlers
dp = updater.dispatcher
# on different commands - answer in Telegram
dp.add_handler(CommandHandler("start", start))
dp.add_handler(CommandHandler("help", help))
dp.add_handler(CommandHandler("hilfe", help))
dp.add_handler(CommandHandler("room", room))
dp.add_handler(CommandHandler("time", start_times))
dp.add_handler(CommandHandler("raum", room))
dp.add_handler(CommandHandler("zeit", start_times))
updater.dispatcher.add_handler(CallbackQueryHandler(button))
# on noncommand i.e message - echo the message on Telegram
dp.add_handler(MessageHandler(Filters.text, echo))
# on noncommand i.e message - echo the message on Telegram
# log all errors
dp.add_error_handler(error)
# Start the Bot
updater.start_polling()
# Run the bot until you press Ctrl-C or the process receives SIGINT,
# SIGTERM or SIGABRT. This should be used most of the time, since
# start_polling() is non-blocking and will stop the bot gracefully.
i = 1
while True:
time.sleep(60)
logger.debug('update')
pyc.update()
logger.debug(pyc.sessions)
updater.idle() | 5,326,529 |
def initialize_molecular_pos(
key: PRNGKey,
nchains: int,
ion_pos: Array,
ion_charges: Array,
nelec_total: int,
init_width: float = 1.0,
dtype=jnp.float32,
) -> Tuple[PRNGKey, Array]:
"""Initialize a set of plausible initial electron positions.
For each chain, each electron is assigned to a random ion and then its position is
sampled from a normal distribution centered at that ion with diagonal covariance
with diagonal entries all equal to init_width.
If there are no more electrons than there are ions, the assignment is done without
replacement. If there are more electrons than ions, the assignment is done with
replacement, and the probability of choosing ion i is its relative charge (as a
fraction of the sum of the ion charges).
"""
nion = len(ion_charges)
replace = True
if nelec_total <= nion:
replace = False
assignments = []
for _ in range(nchains):
key, subkey = jax.random.split(key)
choices = jax.random.choice(
subkey,
nion,
shape=(nelec_total,),
replace=replace,
p=ion_charges / jnp.sum(ion_charges),
)
assignments.append(ion_pos[choices])
elecs_at_ions = jnp.stack(assignments, axis=0)
key, subkey = jax.random.split(key)
return key, elecs_at_ions + init_width * jax.random.normal(
subkey, elecs_at_ions.shape, dtype=dtype
) | 5,326,530 |
def urlencode(query, *args, **kwargs):
"""Handle nested form-data queries and serialize them appropriately.
There are times when a website expects a nested form data query to be sent
but, the standard library's urlencode function does not appropriately
handle the nested structures. In that case, you need this function which
will flatten the structure first and then properly encode it for you.
When using this to send data in the body of a request, make sure you
specify the appropriate Content-Type header for the request.
.. code-block:: python
import requests
from requests_toolbelt.utils import formdata
query = {
'my_dict': {
'foo': 'bar',
'biz': 'baz",
},
'a': 'b',
}
resp = requests.get(url, params=formdata.urlencode(query))
# or
resp = requests.post(
url,
data=formdata.urlencode(query),
headers={
'Content-Type': 'application/x-www-form-urlencoded'
},
)
Similarly, you can specify a list of nested tuples, e.g.,
.. code-block:: python
import requests
from requests_toolbelt.utils import formdata
query = [
('my_list', [
('foo', 'bar'),
('biz', 'baz'),
]),
('a', 'b'),
]
resp = requests.get(url, params=formdata.urlencode(query))
# or
resp = requests.post(
url,
data=formdata.urlencode(query),
headers={
'Content-Type': 'application/x-www-form-urlencoded'
},
)
For additional parameter and return information, see the official
`urlencode`_ documentation.
.. _urlencode:
https://docs.python.org/3/library/urllib.parse.html#urllib.parse.urlencode
"""
expand_classes = (dict, list, tuple)
original_query_list = _to_kv_list(query)
if not all(_is_two_tuple(i) for i in original_query_list):
raise ValueError("Expected query to be able to be converted to a "
"list comprised of length 2 tuples.")
query_list = original_query_list
while any(isinstance(v, expand_classes) for _, v in query_list):
query_list = _expand_query_values(query_list)
return _urlencode(query_list, *args, **kwargs) | 5,326,531 |
def tmpEnv(**environ):
"""
Temporarily set the process environment variables.
>>> with tmpEnv(PLUGINS_DIR=u'test/plugins'):
... "PLUGINS_DIR" in os.environ
True
>>> "PLUGINS_DIR" in os.environ
False
:param environ: Environment variables to set
"""
oldEnviron = dict(os.environ)
os.environ.update(environ)
try:
yield
finally:
os.environ.clear()
os.environ.update(oldEnviron) | 5,326,532 |
def zca_whiten_np(images, epsilon=1e-6):
"""Whitening the images using numpy/scipy.
Stolen from https://github.com/keras-team/keras-preprocessing/blob/master/keras_preprocessing/image/image_data_generator.py
A good answer on ZCA vs. PCA: https://stats.stackexchange.com/questions/117427/what-is-the-difference-between-zca-whitening-and-pca-whitening
Parameters
----------
images : np.array, shape (B, H, W, C)
Returns
-------
whitened : np.array, same shape as input
"""
B, H, W, C = images.shape
# Make image into vectors
flatten = np.reshape(images, (B, H * W * C))
# Set the batch mean to 0
mean = np.mean(flatten)
flatten = flatten - mean
# Get covariance matrix
# (H * W * C, H * W * C)
sigma = np.dot(flatten.T, flatten) / B
# (H * W * C, H * W * C), (H * W * C,)
# u: Unitary matrix having left singular vectors as columns
u, s, _ = scipy.linalg.svd(sigma)
s_inv = 1. / np.sqrt(s[np.newaxis] + epsilon)
principal_components = (u * s_inv).dot(u.T)
whitex = np.dot(flatten, principal_components)
whitex = np.reshape(whitex, (B, H, W, C))
whitex = whitex.astype(np.float32) # just in case
return whitex | 5,326,533 |
def find_best_post_processing(cmd):
"""
Find best postprocessing approach to use with the new dataset based on the f0.5 score macro-averaged.
"""
# add stop words to the list found during the development of rcATT
stop_words = stopwords.words('english')
new_stop_words = ["'ll", "'re", "'ve", 'ha', 'wa',"'d", "'s", 'abov', 'ani', 'becaus', 'befor', 'could', 'doe', 'dure', 'might', 'must', "n't", 'need', 'onc', 'onli', 'ourselv', 'sha', 'themselv', 'veri', 'whi', 'wo', 'would', 'yourselv']
stop_words.extend(new_stop_words)
# download both dataset: original to the tool and added by the user
train_data_df = pd.read_csv('classification_tools/data/training_data_original.csv', encoding = "ISO-8859-1")
train_data_added = pd.read_csv('classification_tools/data/training_data_added.csv', encoding = "ISO-8859-1")
train_data_df.append(train_data_added, ignore_index = True)
# preprocess the report
train_data_df = prp.processing(train_data_df)
# split the dataset in 5 fold to be able to give a more accurate F0.5 score
kf = KFold(n_splits=5, shuffle = True, random_state=42)
reports = train_data_df[clt.TEXT_FEATURES]
overall_ttps = train_data_df[clt.ALL_TTPS]
# get current configuration parameters for post-processing method hanging-node to define new thresholds
parameters = joblib.load("classification_tools/data/configuration.joblib")
c = parameters[1][0]
d = parameters[1][1]
permutations = combinations(c, d)
f05_NO = [] #list of f0.5 score for all techniques predictions sets without post-processing
f05_HN = [] #list of f0.5 score for all techniques predictions sets with hanging node post-processing
f05_CP = [] #list of f0.5 score for all techniques predictions sets with confidence propagation post-processing
# retrieve minimum and maximum probabilities to use in MinMaxScaler
min_prob_tactics = 0.0
max_prob_tactics = 0.0
min_prob_techniques = 0.0
max_prob_techniques = 0.0
i = 6 # print progress bar counter
for index1, index2 in kf.split(reports, overall_ttps):
# splits the dataset according to the kfold split into training and testing sets, and data and labels
reports_train, reports_test = reports.iloc[index1], reports.iloc[index2]
overall_ttps_train, overall_ttps_test = overall_ttps.iloc[index1], overall_ttps.iloc[index2]
train_reports = reports_train[clt.TEXT_FEATURES]
test_reports = reports_test[clt.TEXT_FEATURES]
train_tactics = overall_ttps_train[clt.CODE_TACTICS]
train_techniques = overall_ttps_train[clt.CODE_TECHNIQUES]
test_tactics = overall_ttps_test[clt.CODE_TACTICS]
test_techniques = overall_ttps_test[clt.CODE_TECHNIQUES]
# Define a pipeline combining a text feature extractor with multi label classifier for the tactics predictions
pipeline_tactics = Pipeline([
('columnselector', prp.TextSelector(key = 'processed')),
('tfidf', TfidfVectorizer(tokenizer = prp.LemmaTokenizer(), stop_words = stop_words, max_df = 0.90)),
('selection', SelectPercentile(chi2, percentile = 50)),
('classifier', OneVsRestClassifier(LinearSVC(penalty = 'l2', loss = 'squared_hinge', dual = True, class_weight = 'balanced'), n_jobs = 1))
])
# train the model and predict the tactics
pipeline_tactics.fit(train_reports, train_tactics)
pred_tactics = pipeline_tactics.predict(test_reports)
predprob_tactics = pipeline_tactics.decision_function(test_reports)
if np.amin(predprob_tactics) < min_prob_tactics:
min_prob_tactics = np.amin(predprob_tactics)
if np.amax(predprob_tactics) > max_prob_tactics:
max_prob_tactics = np.amax(predprob_tactics)
if cmd:
print_progress_bar(i)
# Define a pipeline combining a text feature extractor with multi label classifier for the techniques predictions
pipeline_techniques = Pipeline([
('columnselector', prp.TextSelector(key = 'processed')),
('tfidf', TfidfVectorizer(tokenizer = prp.StemTokenizer(), stop_words = stop_words, min_df = 2, max_df = 0.99)),
('selection', SelectPercentile(chi2, percentile = 50)),
('classifier', OneVsRestClassifier(LinearSVC(penalty = 'l2', loss = 'squared_hinge', dual = False, max_iter = 1000, class_weight = 'balanced'), n_jobs = 1))
])
# train the model and predict the techniques
pipeline_techniques.fit(train_reports, train_techniques)
pred_techniques = pipeline_techniques.predict(test_reports)
predprob_techniques = pipeline_techniques.decision_function(test_reports)
if np.amin(predprob_techniques) < min_prob_techniques:
min_prob_techniques = np.amin(predprob_techniques)
if np.amax(predprob_techniques) > max_prob_techniques:
max_prob_techniques = np.amax(predprob_techniques)
i+=2
if cmd:
print_progress_bar(i)
# calculate the F0.5 score for each type of post processing and append to the list to keep track over the different folds
f05_NO.append(fbeta_score(test_techniques, pred_techniques, beta = 0.5, average = 'macro'))
f05_HN.extend(hanging_node_threshold_comparison(pred_tactics, predprob_tactics, pred_techniques, predprob_techniques, test_techniques, permutations))
i+=2
if cmd:
print_progress_bar(i)
CPres, _ = confidence_propagation(predprob_tactics, pred_techniques, predprob_techniques)
i+=2
if cmd:
print_progress_bar(i)
f05_CP.append(fbeta_score(test_techniques, CPres, beta = 0.5, average = 'macro'))
i+=2
save_post_processing_comparison=[]
# find the F0.5 average for each post-processing
fb05_NO_avg = np.mean(f05_NO)
fb05_CP_avg = np.mean(f05_CP)
best_HN=[]
fb05_Max_HN_avg = 0
if cmd:
print_progress_bar(48)
for ps in permutations:
sum = []
for prhn in f05_HN:
if ps == prhn[0]:
sum.append(prhn[1])
avg_temp = np.mean(sum)
if avg_temp >= fb05_Max_HN_avg:
fb05_Max_HN_avg = avg_temp
best_HN = ps
# define the best post-processing based on the F0.5 score average
if fb05_NO_avg >= fb05_CP_avg and fb05_NO_avg >= fb05_Max_HN_avg:
save_post_processing_comparison = ["N"]
elif fb05_CP_avg >= fb05_Max_HN_avg and fb05_CP_avg >= fb05_NO_avg:
save_post_processing_comparison = ["CP"]
else:
save_post_processing_comparison = ["HN"]
save_post_processing_comparison.extend([best_HN, [min_prob_tactics, max_prob_tactics], [min_prob_techniques, max_prob_techniques]])
# save the results
joblib.dump(save_post_processing_comparison, "classification_tools/data/configuration.joblib")
if cmd:
print_progress_bar(50)
print() | 5,326,534 |
def set_kernel(kernel, **kwargs):
"""kernelsを指定する
Parameters
----------
kernel : str or :obj:`gpytorch.kernels`
使用するカーネル関数を指定する
基本はstrで指定されることを想定しているものの、自作のカーネル関数を入力することも可能
**kwargs : dict
カーネル関数に渡す設定
Returns
-------
out : :obj:`gpytorch.kernels`
カーネル関数のインスタンス
"""
if isinstance(kernel, str):
if kernel in {'CosineKernel'}:
return ScaleKernel(
CosineKernel(**kwargs)
)
elif kernel in {'LinearKernel'}:
return ScaleKernel(
LinearKernel(**kwargs)
)
elif kernel in {'MaternKernel'}:
return ScaleKernel(
MaternKernel(**kwargs)
)
elif kernel in {'PeriodicKernel'}:
return ScaleKernel(
PeriodicKernel(**kwargs)
)
elif kernel in {'RBFKernel'}:
return ScaleKernel(
RBFKernel(**kwargs)
)
elif kernel in {'RQKernel'}:
return ScaleKernel(
RQKernel(**kwargs)
)
elif kernel in {'SpectralMixtureKernel'}:
# SpectralMixtureKernelはScaleKernelを使えない
return SpectralMixtureKernel(**kwargs)
else:
raise ValueError
elif kernels.__name__ in str(type(kernel)):
return kernel | 5,326,535 |
def prepare_multi(multi):
"""Processes the formatted string and generates a list of rules to update.
Parameters
----------
multi : str
A comma-separated string of devices and rules to update.
Format: `devicename|mac|rule`.
Example: `mediaserver||plex_rule,|4A:DA:61:1C:B5:24|vpn_rule`.
Returns
-------
"""
if not ENTRIES:
multi = multi.split(',')
log(f'MULTI => Found {len(multi)} entries.', 2)
for entry in multi:
entry = entry.split('|')
if len(entry) == 3:
ENTRIES.append({
'hostname': entry[0],
'mac': entry[1],
'rule': entry[2]
})
else:
log(f'MULTI => Entry {entry} does not follow the required format. Skipping.', 0) | 5,326,536 |
def holding_period_return(multivariate_df: pd.DataFrame, lag: int, ending_lag: int = 0, skip_nan: bool = False):
"""
Calculate the rolling holding period return for each column
Holding period return for stock = Price(t - ending_lag) / Price(t - lag) - 1
:param multivariate_df: DataFrame
:param lag: int
:param ending_lag: int used to shift the final observation backwards
:param skip_nan: bool
:return: DataFrame
"""
return _general_dataframe_function(multivariate_df=multivariate_df, func=_holding_period_return, return_lag=None,
skip_nan=skip_nan, lag=lag, ending_lag=ending_lag) | 5,326,537 |
def test_slim_eval_nonuniform(benchmark):
"""Benchmark slim_eval_fun with non-uniform core tensors"""
slim_eval_runner(benchmark, uniform=False) | 5,326,538 |
def _unpack_var(var):
"""
Parses key : value pair from `var`
Parameters
----------
var : str
Entry from HEAD file
Returns
-------
name : str
Name of attribute
value : object
Value of attribute
Examples
--------
>>> var = "type = integer-attribute\\nname = BRICK_TYPES\\ncount = 1\\n1\\n"
>>> name, attr = _unpack_var(var)
>>> print(name, attr)
BRICK_TYPES 1
>>> var = "type = string-attribute\\nname = TEMPLATE_SPACE\\ncount = 5\\n'ORIG~"
>>> name, attr = _unpack_var(var)
>>> print(name, attr)
TEMPLATE_SPACE ORIG
"""
err_msg = ('Please check HEAD file to ensure it is AFNI compliant. '
f'Offending attribute:\n{var}')
atype, aname = TYPE_RE.findall(var), NAME_RE.findall(var)
if len(atype) != 1:
raise AFNIHeaderError(f'Invalid attribute type entry in HEAD file. {err_msg}')
if len(aname) != 1:
raise AFNIHeaderError(f'Invalid attribute name entry in HEAD file. {err_msg}')
atype = _attr_dic.get(atype[0], str)
attr = ' '.join(var.strip().splitlines()[3:])
if atype is not str:
try:
attr = [atype(f) for f in attr.split()]
except ValueError:
raise AFNIHeaderError('Failed to read variable from HEAD file '
f'due to improper type casting. {err_msg}')
else:
# AFNI string attributes will always start with open single quote and
# end with a tilde (NUL). These attributes CANNOT contain tildes (so
# stripping is safe), but can contain single quotes (so we replace)
attr = attr.replace('\'', '', 1).rstrip('~')
return aname[0], attr[0] if len(attr) == 1 else attr | 5,326,539 |
def cmd_ok(packet: Packet, cmd: int,
writer: io.BufferedWriter = None) -> bool:
"""
Returns true if command is okay, and logs if not.
If the command is INCORRECT, a packet is sent to the
client with BAD_CMD command.
"""
ok = True
if packet is None:
Log.debug('Failed to read packet!')
ok = False
elif packet.cmd != cmd:
Log.err('Packet command incorrect! '
f'Expected: "{protocol_utils.DEBUG_COMMANDS[cmd]}", '
f'Got: "{protocol_utils.DEBUG_COMMANDS[packet.cmd]}"')
if writer is not None:
send_packet(writer, protocol_utils.Commands.BAD_CMD)
ok = False
return ok | 5,326,540 |
def get_representation(keypoint_coordinates: torch.Tensor,
image: torch.Tensor,
feature_map: torch.Tensor) -> (torch.Tensor, torch.Tensor):
"""
:param keypoint_coordinates: Tensor of key-point coordinates in (N, 2/3)
:param image: Tensor of current image in (N, C, H, W)
:param feature_map: Tensor of feature map for key-point in (N, H', W')
:return:
"""
N, C, H, W = image.shape
# Feature maps are converted to 0-1 masks given a threshold
alpha = 0.5
mask = torch.round(feature_map).unsqueeze(1).to(image.device) # (N, H', W'), rounds to the closest integer
# Use erosion iteratively
intensities = []
erosion_kernel = torch.ones(size=(3, 3)).to(image.device)
_img = mask
count = 0
while True:
_morphed = erosion(_img,
kernel=erosion_kernel,
engine='convolution')
_morphed = F.interpolate(input=_morphed, size=(H, W))
_img = torch.mul(_morphed, image)
if count == 0:
laplacian_img = laplacian(input=_img, kernel_size=3)
laplacian_sum = laplacian_img.sum(dim=(1, 2, 3))
count += 1
intensity = _img.sum(dim=(1, 2, 3))
intensities.append(intensity)
if - 1e-3 <= intensity.mean() <= 1e-3:
break
features = torch.empty(size=(image.shape[0], 5)).to(image.device)
for n in range(image.shape[0]):
features[n, ...] = torch.tensor([
keypoint_coordinates[n, 0],
keypoint_coordinates[n, 1],
intensities[-1][n],
intensities[-2][n] if len(intensities) >= 2 else intensities[-1][n],
intensities[-3][n] if len(intensities) >= 3 else intensities[-1][n]
])
return features, laplacian_sum | 5,326,541 |
def test_create_full_point_variable_indexes_slices(
double_pendulum_phase_backend_fixture):
"""Slices for endpoint Pycollo variables are correctly indexed."""
phase_backend = double_pendulum_phase_backend_fixture
phase_backend.create_variable_symbols()
phase_backend.preprocess_variables()
phase_backend.create_full_point_variable_indexes_slices()
assert phase_backend.y_point_full_slice == slice(0, 8)
assert phase_backend.q_point_full_slice == slice(8, 9)
assert phase_backend.t_point_full_slice == slice(9, 11)
assert phase_backend.qt_point_full_slice == slice(8, 11)
assert phase_backend.y_point_qt_point_full_split == 8 | 5,326,542 |
def setup_proxy(whois_https_proxy):
"""
is a proxy needed?
:param whois_https_proxy:
:return:
"""
if whois_https_proxy:
uri = urlparse.urlparse(whois_https_proxy)
socks.set_default_proxy(socks.PROXY_TYPE_HTTP, uri.hostname, uri.port)
socket.socket = socks.socksocket | 5,326,543 |
async def async_setup(opp: OpenPeerPowerType, config: ConfigType):
"""Set up the System Health component."""
opp.components.websocket_api.async_register_command(handle_info)
return True | 5,326,544 |
def getSoup(url: str, ftrs: str = "html5lib") -> bsp:
"""
Function to extract soup from the url passed in, returns a bsp object.
"""
rspns = requests.get(url)
return bsp(rspns.content, ftrs) | 5,326,545 |
def _fit_HoRT(T_ref, HoRT_ref, a_low, a_high, T_mid):
"""Fit a[5] coefficient in a_low and a_high attributes given the
dimensionless enthalpy
Parameters
----------
T_ref : float
Reference temperature in K
HoRT_ref : float
Reference dimensionless enthalpy
T_mid : float
Temperature to fit the offset
Returns
-------
a6_low_out : float
Lower a6 value for NASA polynomial
a6_high_out : float
Higher a6 value for NASA polynomial
"""
a6_low_out = (HoRT_ref - get_nasa_HoRT(a=a_low, T=T_ref)) * T_ref
a6_high = (HoRT_ref - get_nasa_HoRT(a=a_high, T=T_ref)) * T_ref
# Correcting for offset
H_low_last_T = get_nasa_HoRT(a=a_low, T=T_mid) + a6_low_out / T_mid
H_high_first_T = get_nasa_HoRT(a=a_high, T=T_mid) + a6_high / T_mid
H_offset = H_low_last_T - H_high_first_T
a6_high_out = T_mid * (a6_high / T_mid + H_offset)
return a6_low_out, a6_high_out | 5,326,546 |
def load_scipy_special__cephes(finder: ModuleFinder, module: Module) -> None:
"""
The scipy.special._cephes is an extension module and the scipy module
imports * from it in places; advertise the global names that are used
in order to avoid spurious errors about missing modules.
"""
module.AddGlobalName("gammaln") | 5,326,547 |
def get_q_k_size(database,elph_save):
"""
Get number of k and q points in the (IBZ) grids
"""
# kpoints
db = Dataset(database+"/SAVE/ns.db1")
Nk = len(db.variables['K-POINTS'][:].T)
db.close()
# qpoints
Nq = len(glob('./elph_dir/s.dbph_0*'))
return Nq,Nk | 5,326,548 |
def false(feedback, msg, comment, alias_used="false"):
"""
Marks a post as a false positive
:param feedback:
:param msg:
:return: String
"""
post_data = get_report_data(msg)
if not post_data:
raise CmdException("That message is not a report.")
post_url, owner_url = post_data
feedback_type = FALSE_FEEDBACKS[alias_used]
feedback_type.send(post_url, feedback)
post_id, site, post_type = fetch_post_id_and_site_from_url(post_url)
add_false_positive((post_id, site))
user = get_user_from_url(owner_url)
if user is not None:
if feedback_type.blacklist:
add_whitelisted_user(user)
result = "Registered " + post_type + " as false positive and whitelisted user."
elif is_blacklisted_user(user):
remove_blacklisted_user(user)
result = "Registered " + post_type + " as false positive and removed user from the blacklist."
else:
result = "Registered " + post_type + " as false positive."
else:
result = "Registered " + post_type + " as false positive."
try:
if msg.room.id != 11540:
msg.delete()
except Exception: # I don't want to dig into ChatExchange
pass
if comment:
Tasks.do(Metasmoke.post_auto_comment, comment, feedback.owner, url=post_url)
return result if not feedback_type.always_silent else "" | 5,326,549 |
def so3_to_SO3_(so3mat):
"""
Convert so(3) to SO(3)
Parameters
----------
so3mat (tf.Tensor):
so(3)
N x 3 x 3
Returns
------
ret (tf.Tensor):
SO(3)
N x 3 x 3
"""
omgtheta = so3_to_vec(so3mat)
c_1 = near_zero(tf.norm(omgtheta,axis=1))
c_2 = tf.math.logical_not(c_1)
b_1 = tf.cast(c_1, tf.int32)
b_2 = tf.cast(c_2, tf.int32)
idx_1 = tf.cast(tf.squeeze(tf.where(b_1), axis=1), tf.int32)
idx_2 = tf.cast(tf.squeeze(tf.where(b_2), axis=1), tf.int32)
partitions = b_1*0 + b_2*1
partitioned_inp = tf.dynamic_partition(so3mat, partitions, 2)
inp_1 = partitioned_inp[0]
inp_2 = partitioned_inp[1]
ret_1 = tf.tile( tf.expand_dims(tf.eye(3), axis=0), tf.stack([tf.shape(idx_1)[0], 1, 1], 0))
omgtheta_2 = so3_to_vec(inp_2)
theta_2 = tf.expand_dims(angvel_to_axis_ang(omgtheta_2)[1], axis=1)
omgmat_2 = inp_2 / theta_2
ret_2 = tf.eye(3) + tf.sin(theta_2) * omgmat_2 + (1 - tf.cos(theta_2)) * tf.matmul(omgmat_2,omgmat_2)
rets = [ret_1,ret_2]
ids = [idx_1,idx_2]
return tf.dynamic_stitch(ids,rets) | 5,326,550 |
def set_non_keyable(node_name, attributes):
"""
Sets the given attributes of the given node name as a non keyale attributes
:param node_name: str, name of a Maya node
:param attributes: list<str>, list of attributes in the node that we want to set as non keyable attributes
"""
attributes = python.force_list(attributes)
for attr in attributes:
name = '{}.{}'.format(node_name, attr)
maya.cmds.setAttr(name, k=False, cb=True)
if maya.cmds.getAttr(name, type=True) == 'double3':
attributes.append('{}X'.format(attr))
attributes.append('{}Y'.format(attr))
attributes.append('{}Z'.format(attr)) | 5,326,551 |
def batch_get_item(RequestItems=None, ReturnConsumedCapacity=None):
"""
The BatchGetItem operation returns the attributes of one or more items from one or more tables. You identify requested items by primary key.
A single operation can retrieve up to 16 MB of data, which can contain as many as 100 items. BatchGetItem will return a partial result if the response size limit is exceeded, the table's provisioned throughput is exceeded, or an internal processing failure occurs. If a partial result is returned, the operation returns a value for UnprocessedKeys . You can use this value to retry the operation starting with the next item to get.
For example, if you ask to retrieve 100 items, but each individual item is 300 KB in size, the system returns 52 items (so as not to exceed the 16 MB limit). It also returns an appropriate UnprocessedKeys value so you can get the next page of results. If desired, your application can include its own logic to assemble the pages of results into one data set.
If none of the items can be processed due to insufficient provisioned throughput on all of the tables in the request, then BatchGetItem will return a ProvisionedThroughputExceededException . If at least one of the items is successfully processed, then BatchGetItem completes successfully, while returning the keys of the unread items in UnprocessedKeys .
By default, BatchGetItem performs eventually consistent reads on every table in the request. If you want strongly consistent reads instead, you can set ConsistentRead to true for any or all tables.
In order to minimize response latency, BatchGetItem retrieves items in parallel.
When designing your application, keep in mind that DynamoDB does not return items in any particular order. To help parse the response by item, include the primary key values for the items in your request in the ProjectionExpression parameter.
If a requested item does not exist, it is not returned in the result. Requests for nonexistent items consume the minimum read capacity units according to the type of read. For more information, see Capacity Units Calculations in the Amazon DynamoDB Developer Guide .
See also: AWS API Documentation
Examples
This example reads multiple items from the Music table using a batch of three GetItem requests. Only the AlbumTitle attribute is returned.
Expected Output:
:example: response = client.batch_get_item(
RequestItems={
'string': {
'Keys': [
{
'string': {
'S': 'string',
'N': 'string',
'B': b'bytes',
'SS': [
'string',
],
'NS': [
'string',
],
'BS': [
b'bytes',
],
'M': {
'string': {'... recursive ...'}
},
'L': [
{'... recursive ...'},
],
'NULL': True|False,
'BOOL': True|False
}
},
],
'AttributesToGet': [
'string',
],
'ConsistentRead': True|False,
'ProjectionExpression': 'string',
'ExpressionAttributeNames': {
'string': 'string'
}
}
},
ReturnConsumedCapacity='INDEXES'|'TOTAL'|'NONE'
)
:type RequestItems: dict
:param RequestItems: [REQUIRED]
A map of one or more table names and, for each table, a map that describes one or more items to retrieve from that table. Each table name can be used only once per BatchGetItem request.
Each element in the map of items to retrieve consists of the following:
ConsistentRead - If true , a strongly consistent read is used; if false (the default), an eventually consistent read is used.
ExpressionAttributeNames - One or more substitution tokens for attribute names in the ProjectionExpression parameter. The following are some use cases for using ExpressionAttributeNames :
To access an attribute whose name conflicts with a DynamoDB reserved word.
To create a placeholder for repeating occurrences of an attribute name in an expression.
To prevent special characters in an attribute name from being misinterpreted in an expression.
Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:
Percentile
The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide ). To work around this, you could specify the following for ExpressionAttributeNames :
{'#P':'Percentile'}
You could then use this substitution in an expression, as in this example:
#P = :val
Note
Tokens that begin with the : character are expression attribute values , which are placeholders for the actual value at runtime.
For more information on expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide .
Keys - An array of primary key attribute values that define specific items in the table. For each primary key, you must provide all of the key attributes. For example, with a simple primary key, you only need to provide the partition key value. For a composite key, you must provide both the partition key value and the sort key value.
ProjectionExpression - A string that identifies one or more attributes to retrieve from the table. These attributes can include scalars, sets, or elements of a JSON document. The attributes in the expression must be separated by commas. If no attribute names are specified, then all attributes will be returned. If any of the requested attributes are not found, they will not appear in the result. For more information, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide .
AttributesToGet - This is a legacy parameter. Use ProjectionExpression instead. For more information, see AttributesToGet in the Amazon DynamoDB Developer Guide .
(string) --
(dict) --Represents a set of primary keys and, for each key, the attributes to retrieve from the table.
For each primary key, you must provide all of the key attributes. For example, with a simple primary key, you only need to provide the partition key. For a composite primary key, you must provide both the partition key and the sort key.
Keys (list) -- [REQUIRED]The primary key attribute values that define the items and the attributes associated with the items.
(dict) --
(string) --
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
S (string) --An attribute of type String. For example:
'S': 'Hello'
N (string) --An attribute of type Number. For example:
'N': '123.45'
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
B (bytes) --An attribute of type Binary. For example:
'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'
SS (list) --An attribute of type String Set. For example:
'SS': ['Giraffe', 'Hippo' ,'Zebra']
(string) --
NS (list) --An attribute of type Number Set. For example:
'NS': ['42.2', '-19', '7.5', '3.14']
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
(string) --
BS (list) --An attribute of type Binary Set. For example:
'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']
(bytes) --
M (dict) --An attribute of type Map. For example:
'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}
(string) --
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
L (list) --An attribute of type List. For example:
'L': ['Cookies', 'Coffee', 3.14159]
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
NULL (boolean) --An attribute of type Null. For example:
'NULL': true
BOOL (boolean) --An attribute of type Boolean. For example:
'BOOL': true
AttributesToGet (list) --This is a legacy parameter. Use ProjectionExpression instead. For more information, see Legacy Conditional Parameters in the Amazon DynamoDB Developer Guide .
(string) --
ConsistentRead (boolean) --The consistency of a read operation. If set to true , then a strongly consistent read is used; otherwise, an eventually consistent read is used.
ProjectionExpression (string) --A string that identifies one or more attributes to retrieve from the table. These attributes can include scalars, sets, or elements of a JSON document. The attributes in the ProjectionExpression must be separated by commas.
If no attribute names are specified, then all attributes will be returned. If any of the requested attributes are not found, they will not appear in the result.
For more information, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide .
ExpressionAttributeNames (dict) --One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames :
To access an attribute whose name conflicts with a DynamoDB reserved word.
To create a placeholder for repeating occurrences of an attribute name in an expression.
To prevent special characters in an attribute name from being misinterpreted in an expression.
Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:
Percentile
The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide ). To work around this, you could specify the following for ExpressionAttributeNames :
{'#P':'Percentile'}
You could then use this substitution in an expression, as in this example:
#P = :val
Note
Tokens that begin with the : character are expression attribute values , which are placeholders for the actual value at runtime.
For more information on expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide .
(string) --
(string) --
:type ReturnConsumedCapacity: string
:param ReturnConsumedCapacity: Determines the level of detail about provisioned throughput consumption that is returned in the response:
INDEXES - The response includes the aggregate ConsumedCapacity for the operation, together with ConsumedCapacity for each table and secondary index that was accessed. Note that some operations, such as GetItem and BatchGetItem , do not access any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity information for table(s).
TOTAL - The response includes only the aggregate ConsumedCapacity for the operation.
NONE - No ConsumedCapacity details are included in the response.
:rtype: dict
:return: {
'Responses': {
'string': [
{
'string': {
'S': 'string',
'N': 'string',
'B': b'bytes',
'SS': [
'string',
],
'NS': [
'string',
],
'BS': [
b'bytes',
],
'M': {
'string': {'... recursive ...'}
},
'L': [
{'... recursive ...'},
],
'NULL': True|False,
'BOOL': True|False
}
},
]
},
'UnprocessedKeys': {
'string': {
'Keys': [
{
'string': {
'S': 'string',
'N': 'string',
'B': b'bytes',
'SS': [
'string',
],
'NS': [
'string',
],
'BS': [
b'bytes',
],
'M': {
'string': {'... recursive ...'}
},
'L': [
{'... recursive ...'},
],
'NULL': True|False,
'BOOL': True|False
}
},
],
'AttributesToGet': [
'string',
],
'ConsistentRead': True|False,
'ProjectionExpression': 'string',
'ExpressionAttributeNames': {
'string': 'string'
}
}
},
'ConsumedCapacity': [
{
'TableName': 'string',
'CapacityUnits': 123.0,
'Table': {
'CapacityUnits': 123.0
},
'LocalSecondaryIndexes': {
'string': {
'CapacityUnits': 123.0
}
},
'GlobalSecondaryIndexes': {
'string': {
'CapacityUnits': 123.0
}
}
},
]
}
:returns:
(string) --
"""
pass | 5,326,552 |
def find_worst_offenders(
all_resource_type_stats: Dict[str, ResourceTypeStats],
version: str,
) -> Dict[str, ResourceTypeStats]:
"""
Finds the resource types with the worst polymorphing and nesting
"""
# find the resource type with the most number of shapes
most_polymorphic_resource_type = None
deepest_resource_type_by_mean = None
deepest_resource_type_by_max = None
for resource_type, resource_type_stats in all_resource_type_stats.items():
if version not in resource_type_stats.counts:
continue
shapes = resource_type_stats.shapes[version]
depths = resource_type_stats.depths[version]
if most_polymorphic_resource_type is None or len(shapes) > len(
all_resource_type_stats[most_polymorphic_resource_type].shapes[version]
):
most_polymorphic_resource_type = resource_type
if (
deepest_resource_type_by_mean is None
or depths.mean()
> all_resource_type_stats[deepest_resource_type_by_mean]
.depths[version]
.mean()
):
deepest_resource_type_by_mean = resource_type
if (
deepest_resource_type_by_max is None
or depths.max()
> all_resource_type_stats[deepest_resource_type_by_max]
.depths[version]
.max()
):
deepest_resource_type_by_max = resource_type
return {
"version": version,
"most_polymorphic": all_resource_type_stats[most_polymorphic_resource_type],
"deepest_by_mean": all_resource_type_stats[deepest_resource_type_by_mean],
"deepest_by_max": all_resource_type_stats[deepest_resource_type_by_max],
} | 5,326,553 |
def optimize(nn_last_layer, correct_label, learning_rate, num_classes):
"""
Build the TensorFLow loss and optimizer operations.
:param nn_last_layer: TF Tensor of the last layer in the neural network
:param correct_label: TF Placeholder for the correct label image
:param learning_rate: TF Placeholder for the learning rate
:param num_classes: Number of classes to classify
:return: Tuple of (logits, train_op, cross_entropy_loss)
"""
#Find logits --> reshape last layer so that rows represents all pixels and
#columns represents classes
logits = tf.reshape(nn_last_layer, (-1, num_classes), name="fcn_logits")
correct_label_reshaped = tf.reshape(correct_label, (-1, num_classes))
#calculate distance from actual labels using cross entropy
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=correct_label_reshaped[:])
#take mean for total loss
loss_op = tf.reduce_mean(cross_entropy, name="fcn_loss")
#optimizer to reduce loss
train_op = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss_op, name="fcn_train_op")
return logits, train_op, loss_op | 5,326,554 |
async def all_pairs(factory, weth, dai, wbtc, paused_token):
"""all_pairs set up a very specific arbitrage opportunity.
We want a opportunity that requires less then 2 WETH and provides significant profit as
to be able to separate profit from gas costs. If the numbers do not make sense it is because
they are crafted to produce a high arbitrage opportunity and high slippage.
"""
p0 = await factory.setup_pair(
[weth, dai],
[
BigNumber(small / 500 / 300.0),
BigNumber(small / 500),
],
)
p1 = await factory.setup_pair(
[wbtc, dai],
[
BigNumber(large / 10000.0),
BigNumber(large / 10),
],
)
p2 = await factory.setup_pair(
[wbtc, weth],
[
BigNumber(large / 10000.0),
BigNumber(large / 300.0 + 31000),
],
)
p3 = await factory.setup_pair(
[paused_token, weth],
[
BigNumber(large / 10000.0),
BigNumber(large / 285.0),
],
)
paused_token.pause()
return await async_map(create_pool, [p0, p1, p2, p3]) | 5,326,555 |
def meeting_point(a, b, window=100, start=0):
"""
Determines the point where the moving average of a meets that of b
"""
cva = np.convolve(a, np.ones((window,))/window, mode='valid')
cvb = np.convolve(b, np.ones((window,))/window, mode='valid')
for x, (val_a, val_b) in enumerate(zip(cva, cvb)):
if x > start and val_a > val_b:
return x
return -1 | 5,326,556 |
def fixed_dictionaries(
mapping: Dict[T, SearchStrategy[Ex]],
*,
optional: Optional[Dict[T, SearchStrategy[Ex]]] = None,
) -> SearchStrategy[Dict[T, Ex]]:
"""Generates a dictionary of the same type as mapping with a fixed set of
keys mapping to strategies. ``mapping`` must be a dict subclass.
Generated values have all keys present in mapping, with the
corresponding values drawn from mapping[key]. If mapping is an
instance of OrderedDict the keys will also be in the same order,
otherwise the order is arbitrary.
If ``optional`` is passed, the generated value *may or may not* contain each
key from ``optional`` and a value drawn from the corresponding strategy.
Examples from this strategy shrink by shrinking each individual value in
the generated dictionary, and omitting optional key-value pairs.
"""
check_type(dict, mapping, "mapping")
for k, v in mapping.items():
check_strategy(v, "mapping[%r]" % (k,))
if optional is not None:
check_type(dict, optional, "optional")
for k, v in optional.items():
check_strategy(v, "optional[%r]" % (k,))
if type(mapping) != type(optional):
raise InvalidArgument(
"Got arguments of different types: mapping=%s, optional=%s"
% (nicerepr(type(mapping)), nicerepr(type(optional)))
)
if set(mapping) & set(optional):
raise InvalidArgument(
"The following keys were in both mapping and optional, "
"which is invalid: %r" % (set(mapping) & set(optional))
)
return FixedAndOptionalKeysDictStrategy(mapping, optional)
return FixedKeysDictStrategy(mapping) | 5,326,557 |
def estimate_R0(model, curves: pd.DataFrame, method="OLS", **kwargs) -> ValueStd:
"""
Estimate R0 from epidemic curves and model.
{args}
Returns:
A ValueStd with R0 and its associated standard deviation.
See Also:
naive_R0
OLS_R0
"""
return METHODS_R0[method](model, curves, **kwargs) | 5,326,558 |
async def test_discovery() -> None:
"""Test a successful call to discovery."""
session = Mock()
with patch(
"pydeconz.utils.request",
new=AsyncMock(
return_value=[
{
"id": "123456FFFFABCDEF",
"internalipaddress": "host1",
"internalport": "port1",
},
{
"id": "234567BCDEFG",
"internalipaddress": "host2",
"internalport": "port2",
},
]
),
):
response = await utils.discovery(session)
assert [
{"bridgeid": "123456ABCDEF", "host": "host1", "port": "port1"},
{"bridgeid": "234567BCDEFG", "host": "host2", "port": "port2"},
] == response | 5,326,559 |
def get_bone_list(armature, layer_list):
"""Get Bone name List of selected layers"""
ret = []
for bone in armature.data.bones:
if is_valid_layer(bone.layers, layer_list):
ret.append(bone.name)
return ret | 5,326,560 |
def clean_packages_list(packages):
"""
Remove comments from the package list
"""
lines = []
for line in packages:
if not line.startswith("#"):
lines.append(line)
return lines | 5,326,561 |
def IndividualBuilder(size, possList, probList):
"""
Args:
size (int) - the list size to be created
PossArr - a list of the possible mutations
types (mutation, deletion,...)
ProbArr - a list of the probibilities of the possible
mutations occuring.
Returns:
individual (list)
"""
if(len(list(possList)) != len(list(probList))):
raise Exception('len(PossArr) != len(ProbArr)')
individual = [0]*size
random.seed()
for i in range(size):
for j in range(len(possList)):
if(random.random() <= probList[j]):
individual[i] = possList[j]
return individual | 5,326,562 |
def getConfigId(dsn_string, test_data):
"""Returns the integer ID of the configuration name used in this run."""
# If we have not already done so, we query the local DB for the ID
# matching this sqlbench config name. If none is there, we insert
# a new record in the bench_config table and return the newly generated
# identifier.
benchmark_name = test_data['config_name']
query = "SELECT config_id FROM bench_config WHERE name = '%s'" %benchmark_name
retcode, result= execute_query(query, dsn_string=dsn_string)
if len(result) == 0:
# Insert a new record for this config and return the new ID...
query = "INSERT INTO bench_config (config_id, name) VALUES (NULL, '%s')" %benchmark_name
retcode, result= execute_query(query, dsn_string=dsn_string)
return getConfigId(dsn_string, test_data)
else:
config_id= int(result[0][0])
return config_id | 5,326,563 |
def get_file_open_command(script_path, turls, nthreads):
"""
:param script_path: path to script (string).
:param turls: comma-separated turls (string).
:param nthreads: number of concurrent file open threads (int).
:return: comma-separated list of turls (string).
"""
return "%s --turls=%s -w %s -t %s" % (script_path, turls, os.path.dirname(script_path), str(nthreads)) | 5,326,564 |
def invert_contactmap(cmap):
"""Method to invert a contact map
:param :py:obj:`~conkit.core.ContactMap` cmap: the contact map of interest
:returns: and inverted_cmap: the contact map corresponding with the inverted sequence (1-res_seq) \
(:py:obj:`~conkit.core.ContactMap`)
"""
inverted_cmap = ContactMap('inverted')
highest_residue_number = max([max(contact.id) for contact in cmap])
for contact in cmap:
new_contact = Contact(highest_residue_number + 1 - contact.res1_seq,
highest_residue_number + 1 - contact.res2_seq,
contact.raw_score)
inverted_cmap.add(new_contact)
inverted_cmap.sequence = cmap.sequence
return inverted_cmap | 5,326,565 |
def main_loop():
"""Loop."""
# myIRHelper.check()
animation.animation_helper.main_loop()
# myIRHelper.check()
# time.sleep(0.1) | 5,326,566 |
def indexes_with_respect_to_y(Y):
"""
Checks Y and returns indexes with respect to the groups.
Parameters
----------
Y : numpy array like of one single output.
Corresponding to a categorical variable.
Returns
-------
List of indexes corresponding to each group.
"""
categories = np.unique(Y)
n_c = categories.shape[0]
assert n_c >= 2
indexes = []
for cat in list(categories):
i_index = np.where(Y == cat)[0]
indexes.append(i_index)
return indexes | 5,326,567 |
def rrotate(x, disp):
"""Rotate x's bits to the right by disp."""
if disp == 0:
return x
elif disp < 0:
return lrotate(x, -disp)
disp &= 31
x = trim(x)
return trim((x >> disp) | (x << (32 - disp))) | 5,326,568 |
def test_space_net_no_crash_not_fitted():
"""Regression test."""
iris = load_iris()
X, y = iris.data, iris.target
X, mask = to_niimgs(X, [2, 2, 2])
for model in [SpaceNetRegressor, SpaceNetClassifier]:
assert_raises_regex(RuntimeError,
"This %s instance is not fitted yet" % (
model.__name__), model().predict, X)
model(mask=mask, alphas=1.).fit(X, y).predict(X) | 5,326,569 |
def main(cases_fname):
"""
This method use existing cluster, and then
for a given cluster launches pods (one pod per case),
which are read from input file
"""
if cases_fname is None:
return -1
cfg = read_config("config_cluster.json")
CID = cfg["CID"]
ZID = cfg["ZID"]
mtype = cfg["machine-type"]
docker = cfg["docker"]
gcr = cfg["gcr"]
project = cfg["project"]
print("From config_cluster.json:")
print(CID, ZID, mtype, docker, gcr, project)
print("Reading cases list from {0}".format(cases_fname))
cases = read_cases(cases_fname)
print("To compute Cases: {0}".format(len(cases)))
docker2run = os.path.join(gcr, project, docker) # full path to docker
for case in cases:
pod_name = make_json_pod("colpod.json", case, docker2run)
cmd = "kubectl create -f " + pod_name
rc = 0
for k in range(0, 2): # several attempts to make a pod
rc = subprocess.call(cmd, shell=True)
if rc == 0:
time.sleep(0.5)
break
if rc != 0:
print("Cannot make case {0}".format(case))
return 0 | 5,326,570 |
def upload_configuration_to_zk(zookeeper_quorum, solr_znode, config_set, config_set_dir, tmp_dir,
java64_home, retry = 5, interval = 10, solrconfig_content = None, jaas_file=None):
"""
Upload configuration set to zookeeper with solrCloudCli.sh
At first, it tries to download configuration set if exists into a temporary location, then upload that one to
zookeeper. If the configuration set does not exist in zookeeper then upload it based on the config_set_dir parameter.
"""
random_num = random.random()
tmp_config_set_dir = format('{tmp_dir}/solr_config_{config_set}_{random_num}')
solr_cli_prefix = __create_solr_cloud_cli_prefix(zookeeper_quorum, solr_znode, java64_home)
Execute(format('{solr_cli_prefix} --download-config --config-dir {tmp_config_set_dir} --config-set {config_set} --retry {retry} --interval {interval}'),
only_if=format("{solr_cli_prefix} --check-config --config-set {config_set} --retry {retry} --interval {interval}"))
appendableDict = {}
appendableDict["--jaas-file"] = jaas_file
if solrconfig_content is not None:
File(format("{tmp_config_set_dir}/solrconfig.xml"),
content=solrconfig_content,
only_if=format("test -d {tmp_config_set_dir}")
)
upload_tmp_config_cmd = format('{solr_cli_prefix} --upload-config --config-dir {tmp_config_set_dir} --config-set {config_set} --retry {retry} --interval {interval}')
upload_tmp_config_cmd = __append_flags_if_exists(upload_tmp_config_cmd, appendableDict)
Execute(upload_tmp_config_cmd,
only_if=format("test -d {tmp_config_set_dir}")
)
upload_config_cmd = format('{solr_cli_prefix} --upload-config --config-dir {config_set_dir} --config-set {config_set} --retry {retry} --interval {interval}')
upload_config_cmd = __append_flags_if_exists(upload_config_cmd, appendableDict)
Execute(upload_config_cmd,
not_if=format("test -d {tmp_config_set_dir}")
)
Directory(tmp_config_set_dir,
action="delete",
create_parents=True
) | 5,326,571 |
def setup_parameters():
"""
This function sets up the hyperparameters needed to train the model.
Returns:
hyperparameters : Dictionary containing the hyperparameters for the model.
options : Dictionary containing the options for the dataset location, augmentation, etc.
"""
# Dataset location for training.
dataset_fp = "/file/path/to/dataset"
# Indicate if LiDAR data is included. If it is then no padding or augmenting prior to training
# will be done. If you still want padding then enable it in the model.
use_lidar = True
# File with previously trained weights. Leave as None if you don't want to load any
pre_trained_weights = None
# Starting epoch
start_epoch = 1
# Max number of epochs
epochs = 80
# Starting learning rate for the Adam optimizer
learn_rate = 0.001
# Adjusts the learning rate by (learn_rate / 10) after every lr_change epochs
lr_change = 25
# Weighted Cross Entropy (put 1.0 for each class if you don't want to add weights)
class_weights = [0.5, 1.0]
# Indicate if you want to augment the training images and labels
augment = False
# Size of the batch fed into the model. Model can handle a larger batch size when training.
# Batch size used during training.
training_batch_size = 10
# Batch size used during validation.
valid_batch_size = 5
# Model's learned parameters (i.e. weights and biases) that achieved the lowest loss. Will be
# saved as "saved_model.pt". A file called "saved_model_last_epoch.pt" will also be saved with
# the learned parameters in the last completed epoch.
saved_model = "saved_model"
### MODEL PARAMETERS ###
# Number of input channels
in_channels = 4
# Number of output channels
n_classes = 2
# How deep the network will be
depth = 7
# Number of filters in the first layer (2**wf)
wf = 6
# Indicate if you want the model to pad the images back to their original dimensions
# Images need to be 256x256 if this is set to False
pad = True
# Specify if you want to enable batch normalization
batch_norm = True
# Supported modes are 'upconv' and 'upsample'
up_mode = 'upconv'
# Store the options in a dictionary
options = {
'pre_trained_weights': pre_trained_weights,
'start_epoch': start_epoch,
'dataset_fp': dataset_fp,
'saved_model': saved_model,
'in_channels': in_channels,
'n_classes': n_classes,
'augment': augment,
'use_lidar': use_lidar
}
# Store the hyperparameters in a dictionary
hyperparameters = {
'epochs': epochs,
'learn_rate': learn_rate,
'lr_change': lr_change,
'class_weights': class_weights,
'training_batch_size': training_batch_size,
'valid_batch_size': valid_batch_size,
'in_channels': in_channels,
'depth': depth,
'wf': wf,
'pad': pad,
'batch_norm': batch_norm,
'up_mode': up_mode
}
# Make sure the file paths exist
if pre_trained_weights is not None and not os.path.isfile(pre_trained_weights):
sys.exit('Error: Pre-trained weights file does not exist')
if not os.path.isdir(dataset_fp):
sys.exit('Error: Main file path to the training and validation images does not exist')
return hyperparameters, options | 5,326,572 |
def main(argv=None):
"""
Console scripts entry point.
"""
args = parse_args(argv)
style = DEFAULT_STYLE
# Validation
if not args.git and not args.cov and not args.text and not args.value and not args.print_version:
print("Not valid parameters. -git or -cov or -t and -v")
sys.exit(1)
if args.text and not args.value or not args.text and args.value:
print("Not valid parameters. -t and -v required")
sys.exit(1)
# Print version
if args.print_version:
print("all-badge v{}".format(__version__))
sys.exit(0)
if args.color:
if args.color not in COLORS.keys():
print("Color not valid.")
sys.exit(1)
if args.style:
if args.style not in STYLES:
print("Style not valid.")
sys.exit(1)
else:
style = args.style
# Custom badge
if args.text and args.value:
color = args.color if args.color else "green"
badge = get_badge(args.text, args.value, COLORS[color], style)
# Git
if args.git:
# Get git last tag
git_tag = get_git_tag()
git_text = args.text if args.text else "version"
color = args.color if args.color else "green"
badge = get_badge(git_text, git_tag, COLORS[color], style)
# Coverage
if args.cov:
# Check for coverage
if coverage is None:
print("Error: Python coverage module not installed.")
sys.exit(1)
# Generate badge
try:
total = get_total()
except coverage.misc.CoverageException as e:
print("Error: {} Did you run coverage first?".format(e))
sys.exit(1)
coverage_text = args.text if args.text else "coverage"
color = DEFAULT_COLOR if args.plain_color else get_color(total)
badge = get_badge(coverage_text, "{}%".format(total), color, style)
# Show or save output
if args.filepath:
path = save_badge(badge, args.filepath, args.force)
if not args.quiet:
print("Saved badge to {}".format(path))
else:
print(badge, end="") | 5,326,573 |
def submit(test_df, test_pred_dict):
"""
提交测试结果
:param test_df: DataFrame 测试集特征(包含userid和feedid)
:param test_pred_dict: dict 测试集各行为的预估概率
:return:
"""
# 1,样本和预估结果的拼接
test_pred_df = pd.DataFrame.from_dict(test_pred_dict)
test_df.reset_index(drop=True, inplace=True)
test_df = pd.concat([test_df[['userid', 'feedid']], test_pred_df], sort=False, axis=1)
# 2,保存
file_path = join(SAVE_HOME, 'submit', 'submit' + str(int(time.time())) + '.csv')
test_df.to_csv(file_path, header=True, index=False)
print('Save to: %s' % file_path)
return | 5,326,574 |
def search_for_start ( r, X, w0, applythreshold, hf0, pm=(.85,.93,1.,1.07,1.15), storeopt=False, modulation=False, doublemodulation=False):
"""Search for starting values
:Parameters:
*d*
data set
*w0*
coarse starting values
*pm*
increments or decrements
"""
logging.info ( "Initial preoptimization" )
Mwh = model.history_model ( r, X[:,:hf0],
applythreshold=applythreshold,
w0=w0,
lm=0.1, hf0=hf0, emiter=100 )
Mnh = model.history_model ( r, X[:,:hf0],
applythreshold=applythreshold,
w0=w0,
lm=0.1, hf0=hf0, emiter=100 )
M0 = model.history_model ( r, X[:,:hf0],
applythreshold=applythreshold,
w0=w0,
lm=0.1, hf0=hf0, emiter=100 )
if modulation:
Mhmod = model.history_model ( r, X[:,:hf0],
applythreshold=applythreshold,
w0=w0,
lm=0.1, hf0=hf0, emiter=100 )
# add the history weights the model with history
Mwh.w = np.concatenate ( (Mwh.w,np.zeros(6,'d')) )
Mwh.X = copy.copy(X)
Mwh.X = Mwh.X[:,:-2] # only the part that has no modulation
# add those same weights + modulatory terms to the hmod model
Mhmod.w = np.concatenate ( (Mhmod.w,np.zeros(15,'d')) )
Mhmod.X = X
# to check that the sizes work
print("X",np.shape(X))
print("Mwh",Mwh.w,np.shape(Mwh.X))
print("Mhmod",Mhmod.w,np.shape(Mhmod.X))
elif doublemodulation:
Mhmod = model.history_model ( r, X[:,:hf0],
applythreshold=applythreshold,
w0=w0,
lm=0.1, hf0=hf0, emiter=100 )
# add the history weights to the model with just history
Mwh.w = np.concatenate ( (Mwh.w,np.zeros(6,'d')) )
Mwh.X = copy.copy(X)
Mwh.X = Mwh.X[:,:-3] # only the part that has no modulation
# add those same weights + modulatory terms to the hmod model
Mhmod.w = np.concatenate ( (Mhmod.w,np.zeros(24,'d')) )
Mhmod.X = X
# to check that the sizes work
print("X",np.shape(X))
print("Mwh",Mwh.w,np.shape(Mwh.X))
print("Mhmod",Mhmod.w,np.shape(Mhmod.X))
else:
Mwh.w = np.concatenate ( (Mwh.w,np.zeros(6,'d')) )
Mwh.X = X
Mhmod = [] # return empty
nhind = 0
whind = 0
i = 1
for al in pm:
for lm in pm:
logging.info ( "::::: Optimizing from starting value %d :::::" % (i,) )
w0 = M0.w.copy()
w0[1:hf0] *= al
p0 = M0.pi.copy()
p0[0] *= lm;
p0[-1] = 1-p0[0]-p0[1]
if modulation or doublemodulation:
M_ = model.history_model ( r, X,
applythreshold=applythreshold,
w0=w0, p0=p0, nu0=M0.nu,
lm=0.1, hf0=hf0, verbose=True, emiter=300, storeopt=storeopt )
if Mhmod.loglikelihood < M_.loglikelihood:
logging.info ( " *model chosen for history + modulation*" )
Mhmod = M_
whind = i
if modulation:
M_ = model.history_model ( r, X[:,:-2],
applythreshold=applythreshold,
w0=w0, p0=p0, nu0=M0.nu,
lm=0.1, hf0=hf0, verbose=True, emiter=300, storeopt=storeopt )
elif doublemodulation:
M_ = model.history_model ( r, X[:,:-3],
applythreshold=applythreshold,
w0=w0, p0=p0, nu0=M0.nu,
lm=0.1, hf0=hf0, verbose=True, emiter=300, storeopt=storeopt )
if Mwh.loglikelihood < M_.loglikelihood:
logging.info ( " *model chosen for history*" )
Mwh = M_
whind = i
else:
M_ = model.history_model ( r, X,
applythreshold=applythreshold,
w0=w0, p0=p0, nu0=M0.nu,
lm=0.1, hf0=hf0, verbose=True, emiter=300, storeopt=storeopt )
if Mwh.loglikelihood < M_.loglikelihood:
logging.info ( " *model chosen for history*" )
Mwh = M_
whind = i
M_ = model.history_model ( r, X[:,:hf0],
applythreshold=applythreshold,
w0=w0, p0=p0, nu0=M0.nu,
lm=0.1, hf0=hf0, verbose=True, emiter=300, storeopt=storeopt )
if Mnh.loglikelihood < M_.loglikelihood:
logging.info ( " *model chosen for independent*" )
Mnh = M_
nhind = i
i += 1
logging.info ( "Mwh.w = %s\nMnh.w = %s" % (str(Mwh.w),str(Mnh.w)) )
logging.info ( "Mwh.ll = %g\nMnh.ll = %s" % (Mwh.loglikelihood,Mnh.loglikelihood) )
logging.info ( "Starting values:\n with history: %d\n without history: %d\n" % (whind,nhind) )
# NOW, THE HISTORY ONLY MODEL HAS SIZE 22!
print("X",np.shape(X))
print("Mwh",Mwh.w,np.shape(Mwh.X))
if modulation:
print("Mhmod",Mhmod.w,np.shape(Mhmod.X))
return Mnh,Mwh,Mhmod | 5,326,575 |
def replay(args):
"""
Starts a replay environment for the given replay directory, including setting up interfaces, running
a DNS server, and configuring and running an nginx server to serve the requests
"""
policy = None
cert_path = os.path.abspath(args.cert_path) if args.cert_path else None
key_path = os.path.abspath(args.key_path) if args.key_path else None
if args.policy:
log.debug("reading policy", push_policy=args.policy)
with open(args.policy, "r") as policy_file:
policy_dict = json.load(policy_file)
policy = Policy.from_dict(policy_dict)
with start_server(
args.replay_dir,
cert_path,
key_path,
policy,
cache_time=args.cache_time,
extract_critical_requests=args.extract_critical_requests,
):
while True:
time.sleep(86400) | 5,326,576 |
def point_sample(input, points, align_corners=False, **kwargs):
"""
A wrapper around :func:`grid_sample` to support 3D point_coords tensors
Unlike :func:`torch.nn.functional.grid_sample` it assumes point_coords to
lie inside ``[0, 1] x [0, 1]`` square.
Args:
input (Tensor): Feature map, shape (N, C, H, W).
points (Tensor): Image based absolute point coordinates (normalized),
range [0, 1] x [0, 1], shape (N, P, 2) or (N, Hgrid, Wgrid, 2).
align_corners (bool): Whether align_corners. Default: False
Returns:
Tensor: Features of `point` on `input`, shape (N, C, P) or
(N, C, Hgrid, Wgrid).
"""
def denormalize(grid):
"""Denormalize input grid from range [0, 1] to [-1, 1]
Args:
grid (Tensor): The grid to be denormalize, range [0, 1].
Returns:
Tensor: Denormalized grid, range [-1, 1].
"""
return grid * 2.0 - 1.0
add_dim = False
if points.dim() == 3:
add_dim = True
points = paddle.unsqueeze(points, axis=2)
output = F.grid_sample(
input, denormalize(points), align_corners=align_corners, **kwargs)
if add_dim:
output = paddle.squeeze(output, axis=3)
return output | 5,326,577 |
def compareStringsWithFloats(a,b,numTol = 1e-10, zeroThreshold = sys.float_info.min*4.0, removeWhitespace = False, removeUnicodeIdentifier = False):
"""
Compares two strings that have floats inside them. This searches for floating point numbers, and compares them with a numeric tolerance.
@ In, a, string, first string to use
@ In, b, string, second string to use
@ In, numTol, float, the numerical tolerance.
@ In, zeroThershold, float, it represents the value below which a float is considered zero (XML comparison only). For example, if zeroThershold = 0.1, a float = 0.01 will be considered as it was 0.0
@ In, removeWhitespace, bool, if True, remove all whitespace before comparing.
@ Out, compareStringWithFloats, (bool,string), (succeeded, note) where succeeded is a boolean that is true if the strings match, and note is a comment on the comparison.
"""
if a == b:
return (True,"Strings Match")
if a is None or b is None: return (False,"One of the strings contain a None")
if removeWhitespace:
a = removeWhitespaceChars(a)
b = removeWhitespaceChars(b)
if removeUnicodeIdentifier:
a = removeUnicodeIdentifiers(a)
b = removeUnicodeIdentifiers(b)
aList = splitIntoParts(a)
bList = splitIntoParts(b)
if len(aList) != len(bList):
return (False,"Different numbers of float point numbers")
for i in range(len(aList)):
aPart = aList[i].strip()
bPart = bList[i].strip()
if i % 2 == 0:
#In string
if aPart != bPart:
return (False,"Mismatch of "+shortText(aPart,bPart))
else:
#In number
aFloat = float(aPart)
bFloat = float(bPart)
aFloat = aFloat if abs(aFloat) > zeroThreshold else 0.0
bFloat = bFloat if abs(bFloat) > zeroThreshold else 0.0
if abs(aFloat - bFloat) > numTol:
return (False,"Numeric Mismatch of '"+aPart+"' and '"+bPart+"'")
return (True, "Strings Match Floatwise") | 5,326,578 |
def main():
"""
Main entry point of the indicator
"""
# First, attempt to connect to DBus service
# If you can't, then give up.
try:
bus = dbus.SessionBus()
dbus_object = bus.get_object(service.DBUS_OBJECT, service.DBUS_PATH)
dbus_client = dbus.Interface(dbus_object, service.DBUS_INTERFACE)
except:
print 'Could not connect to D-Bus backend!'
sys.exit(1)
watchdog = config.GConfWatchdog(dbus_client)
CuratorIndicator(dbus_client)
gtk.main() | 5,326,579 |
def login_render(auth_url: str) -> Any:
"""Return login page.
Arguments:
auth_url {str} -- Link to last.fm authorization page.
"""
return render_template("login.html", auth_url=auth_url, timestamp=time()) | 5,326,580 |
def save_image(file_path, mat, overwrite=True):
"""
Save 2D data to an image.
Parameters
----------
file_path : str
Path to the file.
mat : int or float
2D array.
overwrite : bool
Overwrite the existing file if True.
Returns
-------
str
Updated file path.
"""
if "\\" in file_path:
raise ValueError(
"Please use a file path following the Unix convention")
_, file_ext = os.path.splitext(file_path)
if not (file_ext == ".tif") or (file_ext == ".tiff"):
mat = np.uint8(255 * (mat - np.min(mat)) / (np.max(mat) - np.min(mat)))
_create_folder(file_path)
if not overwrite:
file_path = _create_file_name(file_path)
image = Image.fromarray(mat)
try:
image.save(file_path)
except IOError:
print(("Couldn't write to file {}").format(file_path))
raise
return file_path | 5,326,581 |
def test_can_remove_agent(volttron_instance):
""" Confirms that 'volttron-ctl remove' removes agent as expected. """
assert volttron_instance is not None
assert volttron_instance.is_running()
# Install ListenerAgent as the agent to be removed.
agent_uuid = volttron_instance.install_agent(
agent_dir=get_examples("ListenerAgent"), start=False)
assert agent_uuid is not None
started = volttron_instance.start_agent(agent_uuid)
assert started is not None
pid = volttron_instance.agent_pid(agent_uuid)
assert pid is not None and pid > 0
# Now attempt removal
volttron_instance.remove_agent(agent_uuid)
# Confirm that it has been removed.
pid = volttron_instance.agent_pid(agent_uuid)
assert pid is None | 5,326,582 |
def cleanup_cloud_storage(config, context, vdc):
""" Cleans up a previously created cloud storage resources """
log.info(("Removing persistent volumes in "
"virtual datacenter %s...") % vdc.getName())
for volume in vdc.listVolumes():
volume.delete() | 5,326,583 |
def receive_consensus_genome(*, session):
"""
Receive references to consensus genomes.
POST receiving/consensus-genome with a JSON body
"""
document = request.get_data(as_text = True)
LOG.debug(f"Received consensus genome")
datastore.store_consensus_genome(session, document)
return "", 204 | 5,326,584 |
async def test_send(
connection: StaticConnection, connection_id: str, asynchronously_received_messages
):
"""Test send message"""
sent_message = await connection.send_and_await_reply_async(
{
"@type": "https://github.com/hyperledger/aries-toolbox/tree/master/docs/admin-basicmessage/0.1/send",
"connection_id": connection_id,
"content": "Your hovercraft is full of eels.",
}
)
[recip_message] = await asynchronously_received_messages()
assert (
sent_message["@type"]
== "https://github.com/hyperledger/aries-toolbox/tree/master/docs/admin-basicmessage/0.1/sent"
)
assert (
recip_message["@type"]
== "did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/basicmessage/1.0/message"
)
assert recip_message["content"] == "Your hovercraft is full of eels." | 5,326,585 |
def switchUnicodeUrlOn(switch):
"""
Configure whether to use unicode (UTF-8) encoded URLs (default) or
Latin-1 encoded URLs.
@param switch: 1 if unicode URLs shall be used
"""
assert switch == 0 or switch == 1, "Pass boolean argument, please."
Constants.CONFIG_UNICODE_URL = switch | 5,326,586 |
def get_genres_from_games(games, their_games):
"""
From the games we will get the same genres
"""
genres = set()
for d in games:
n = d['id']
if n in their_games:
genres.add(d['Genre'])
return genres | 5,326,587 |
def set_password(cfg, username, password):
"""Change password of exists MySQL user"""
try:
change_password(cfg, username, password)
LOG.info('Password for user %s changed', username)
except ProxySQLUserNotFound:
LOG.error("User not found")
exit(1)
except MySQLError as err:
LOG.error('Failed to talk to database: %s', err)
except (NoOptionError, NoSectionError) as err:
LOG.error('Failed to parse config: %s', err)
exit(1)
except ProxySQLBackendNotFound as err:
LOG.error('ProxySQL backends not found: %s', err)
exit(1) | 5,326,588 |
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up an OpenUV sensor based on a config entry."""
openuv = hass.data[DOMAIN][DATA_OPENUV_CLIENT][entry.entry_id]
binary_sensors = []
for sensor_type in openuv.binary_sensor_conditions:
name, icon = BINARY_SENSORS[sensor_type]
binary_sensors.append(
OpenUvBinarySensor(
openuv, sensor_type, name, icon, entry.entry_id))
async_add_entities(binary_sensors, True) | 5,326,589 |
def webfinger(request):
"""
A thin wrapper around Bridgy Fed's implementation of WebFinger.
In most cases, this view simply redirects to the same endpoint at Bridgy.
However, Bridgy does not support the ``mailto:`` and ``xmpp:`` resource
schemes - quite reasonably, since there's no possible way to discover the
``acct:`` they go with! - so resources with those schemes are translated
locally into an ``https:`` URL representing the same person, and *then*
redirected to Bridgy.
Additionally, WebFinger requests with a missing or malformed resource will
be rejected immediately rather than passed on to Bridgy.
Note that the translation step will only be applied if there exists a
:model:`users.User` with matching email or XMPP address. Otherwise, the
original resource will be preserved in the redirect - and likely fail to
find anything at Bridgy's end either.
"""
if 'resource' not in request.GET:
return HttpResponseBadRequest('resource parameter missing')
resource = request.GET['resource']
try:
res = urlparse(resource)
except ValueError:
return HttpResponseBadRequest('resource parameter malformed')
if res.scheme in ('mailto', 'xmpp'):
try:
resource = https_resource_matching(res)
except User.DoesNotExist:
pass
query = urlencode({'resource': resource})
return HttpResponseRedirect(BRIDGY_FED + '?' + query) | 5,326,590 |
def send_to_teams(url, message_json, debug):
""" posts the json message to the ms teams webhook url """
headers = {'Content-Type': 'application/json'}
r = requests.post(url, data=message_json, headers=headers)
if r.status_code == requests.codes.ok:
if debug:
print('success')
return True
else:
if debug:
print('failure: {}'.format(r.reason))
return False | 5,326,591 |
def getSSData(args):
"""
Calls the `dump.py` (dumps snapshots data) or `run.py` (dumps commit changes per snapshot) scripts written by Baishakhi Ray. For usage info, run `pydoc get_snapshot_data.printUsage`.
Args to be provided as a list!
Args
----
data_root: string
Path to the directory that contains the `projects`, `snapshots`, and `corpus` directories. For each project (linux, bitcoin, libgit2, xbmc, etc.), `projects` contains a dump of its latest version; `snapshots` contains N directories for N snapshots; `corpus` will hold (for each snapshot!) the changes data that is dumped by the `run.py` script
language: string
can be "c", "cpp", or "java"
project: string
Name of the project to be processed
script_to_run: string
"dump" or "run" -- this argument decides which script will be executed. "dump" runs the dump.py script that dumps the snapshots. "run" runs the run.py script that generates the corpus data (i.e. change, learn, test folders for each snapshot). The `snapshot_interval_in_months` argument will be ignored in case of "run".
interval: string
Snapshots of the project will be taken every `snapshot_interval_in_months` months
"""
if len(args) != 6 or not os.path.isdir(args[1]) or not args[2] in ['c', 'cpp', 'java'] or not args[4] in ['dump', 'run']:
print(printUsage.__doc__)
sys.exit()
DATA = args[1]
PROJECTS_DIR = DATA + '/projects/'
SNAPSHOTS_DIR = DATA + '/snapshots/'
CORPUS_DIR = DATA + '/corpus/'
LANGUAGE = args[2]
PROJECT = args[3]
DUMP_OR_RUN = args[4]
INTERVAL = args[5]
if DUMP_OR_RUN == 'dump':
if not os.path.isdir(PROJECTS_DIR) or not os.path.isdir(SNAPSHOTS_DIR):
print("\nYour data folder (" + DATA + ") does not contain directories called `projects`, `snapshots`.")
print("These directories are required for dumping the snapshots. Aborting...")
sys.exit()
os.system('rm -rf ' + SNAPSHOTS_DIR + PROJECT + '/*')
os.system('python src/generate_snapshot_data/dump.py -p ' + PROJECTS_DIR + PROJECT + ' -v d -d ' + SNAPSHOTS_DIR + ' --conf src/generate_snapshot_data/config.ini -l ' + LANGUAGE + ' -m ' + str(INTERVAL))
elif DUMP_OR_RUN == 'run':
if not os.path.isdir(PROJECTS_DIR) or not os.path.isdir(SNAPSHOTS_DIR) or not os.path.isdir(CORPUS_DIR):
print("\nYour data folder (" + DATA + ") does not contain directories called `projects`, `snapshots`, `corpus`.")
print("These directories are required to dump the change history for each snapshot. Aborting...")
sys.exit()
os.system('mkdir -p ' + DATA + '/logs/')
os.system('python src/generate_snapshot_data/run.py -p ' + SNAPSHOTS_DIR + PROJECT + ' -d ' + CORPUS_DIR + PROJECT + ' -v d -l ' + LANGUAGE + ' --log ' + DATA + '/logs/' + PROJECT + '_log.txt --con src/generate_snapshot_data/config.ini') | 5,326,592 |
def get_candidate_dir():
"""
Returns a valid directory name to store the pictures.
If it can not be determined, "" is returned.
requires:
import os
import pathlib
import platform
https://docs.python.org/3/library/pathlib.html#pathlib.Path.home
New in version 3.5.
https://docs.python.org/3.8/library/platform.html#platform.system
Returns the system/OS name, such as 'Linux', 'Darwin', 'Java', 'Windows'.
An empty string is returned if the value cannot be determined.
"""
home_dir = pathlib.Path().home()
target_dir = home_dir
system = platform.system()
if system == "Windows":
target_dir = os.path.join(home_dir, "Pictures")
elif system == "Darwin":
target_dir = os.path.join(home_dir, "Pictures")
elif system == "Linux":
target_dir = os.path.join(home_dir, "Pictures")
if os.path.isdir(target_dir): # pylint: disable=no-else-return
return target_dir
elif os.path.isdir(home_dir):
return home_dir
else:
return "" | 5,326,593 |
def inflection_points(points, rise_axis, run_axis):
"""
Find the list of vertices that preceed inflection points in a curve. The
curve is differentiated with respect to the coordinate system defined by
`rise_axis` and `run_axis`.
Interestingly, `lambda x: 2*x + 1` should have no inflection points, but
almost every point on the line is detected. It's because a zero or zero
crossing in the second derivative is necessary but not sufficient to
detect an inflection point. You also need a higher derivative of odd
order that's non-zero. But that gets ugly to detect reliably using sparse
finite differences. Just know that if you've got a straight line this
method will go a bit haywire.
rise_axis: A vector representing the vertical axis of the coordinate system.
run_axis: A vector representing the the horiztonal axis of the coordinate system.
returns: a list of points in space corresponding to the vertices that
immediately preceed inflection points in the curve
"""
vg.shape.check(locals(), "points", (-1, 3))
vg.shape.check(locals(), "rise_axis", (3,))
vg.shape.check(locals(), "run_axis", (3,))
coords_on_run_axis = points.dot(run_axis)
coords_on_rise_axis = points.dot(rise_axis)
# Take the second order finite difference of the curve with respect to the
# defined coordinate system
finite_difference_1 = np.gradient(coords_on_rise_axis, coords_on_run_axis)
finite_difference_2 = np.gradient(finite_difference_1, coords_on_run_axis)
# Compare the product of all neighboring pairs of points in the second
# derivative. If a pair of points has a negative product, then the second
# derivative changes sign between those points. These are the inflection
# points.
is_inflection_point = np.concatenate(
[finite_difference_2[:-1] * finite_difference_2[1:] <= 0, [False]]
)
return points[is_inflection_point] | 5,326,594 |
def compile(raw_model):
"""Compile a raw model.
Parameters
----------
raw_model : list of dict
A raw GPTC model.
Returns
-------
dict
A compiled GPTC model.
"""
categories = {}
for portion in raw_model:
text = gptc.tokenizer.tokenize(portion['text'])
category = portion['category']
try:
categories[category] += text
except KeyError:
categories[category] = text
categories_by_count = {}
names = []
for category, text in categories.items():
if not category in names:
names.append(category)
categories_by_count[category] = {}
for word in text:
try:
categories_by_count[category][word] += 1/len(categories[category])
except KeyError:
categories_by_count[category][word] = 1/len(categories[category])
word_weights = {}
for category, words in categories_by_count.items():
for word, value in words.items():
try:
word_weights[word][category] = value
except KeyError:
word_weights[word] = {category:value}
model = {}
for word, weights in word_weights.items():
total = sum(weights.values())
model[word] = []
for category in names:
model[word].append(weights.get(category, 0)/total)
model['__names__'] = names
model['__version__'] = 2
model['__raw__'] = raw_model
return model | 5,326,595 |
def get_elements_html_by_attribute(*args, **kwargs):
"""Return the html of the tag with the specified attribute in the passed HTML document"""
return [whole for _, whole in get_elements_text_and_html_by_attribute(*args, **kwargs)] | 5,326,596 |
def patched_requests_mocker(requests_mock):
"""
This function mocks various PANOS API responses so we can accurately test the instance
"""
base_url = "{}:{}/api/".format(integration_params['server'], integration_params['port'])
# Version information
mock_version_xml = """
<response status = "success">
<result>
<sw-version>9.0.6</sw-version>
<multi-vsys>off</multi-vsys>
<model>Panorama</model>
<serial>FAKESERIALNUMBER</serial>
</result>
</response>
"""
version_path = "{}{}{}".format(base_url, "?type=version&key=", integration_params['key'])
requests_mock.get(version_path, text=mock_version_xml, status_code=200)
mock_response_xml = """
<response status="success" code="20">
<msg>command succeeded</msg>
</response>
"""
requests_mock.post(base_url, text=mock_response_xml, status_code=200)
return requests_mock | 5,326,597 |
def filter_gq(records, name, min_gq):
"""Filter records based on a minimum genotype quality value."""
for record in records:
if 'GQ' in record.format:
gq = record.samples[name]['GQ']
if gq is not None and gq < min_gq:
continue
yield record | 5,326,598 |
def test_replica_remove():
""" Test logical file remove, which should remove the file from the remote resource
"""
try:
tc = testing.get_test_config ()
the_url = tc.job_service_url # from test config file
the_session = tc.session # from test config file
replica_url = tc.replica_url
replica_directory = saga.replica.LogicalDirectory(replica_url)
home_dir = os.path.expanduser("~"+"/")
print "Creating temporary file of size %dM : %s" % \
(FILE_SIZE, home_dir+TEMP_FILENAME)
# create a file for us to use
with open(home_dir+TEMP_FILENAME, "wb") as f:
f.write ("x" * (FILE_SIZE * pow(2,20)) )
print "Creating logical directory object"
mydir = saga.replica.LogicalDirectory(replica_url)
print "Uploading temporary"
myfile = saga.replica.LogicalFile(replica_url+TEMP_FILENAME)
myfile.upload(home_dir + TEMP_FILENAME, \
"irods:///this/path/is/ignored/?resource="+IRODS_RESOURCE, saga.replica.OVERWRITE)
print "Removing temporary file."
myfile.remove()
assert True
except saga.SagaException as ex:
# print ex.traceback
assert False, "unexpected exception %s\n%s" % (ex.traceback, ex) | 5,326,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.