content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def train10():
"""
CIFAR-10 training set creator.
It returns a reader creator, each sample in the reader is image pixels in
[0, 1] and label in [0, 9].
:return: Training reader creator
:rtype: callable
"""
return reader_creator(
paddle.dataset.common.download(CIFAR10_URL, 'cifar', CIFAR10_MD5),
'data_batch', True) | 28,400 |
def update_aliens(ai_settings, stats, screen, sb, ship, aliens, bullets):
"""Verifies if the fleet is in a border and the updates the position of all aliens in the fleet"""
check_fleet_edges(ai_settings, aliens)
aliens.update()
#Verifies if a collision between aliens and the spaceship has occured
if pygame.sprite.spritecollideany(ship, aliens):
ship_hit(ai_settings, stats, screen, sb, ship, aliens, bullets)
#Check if any alien has reached the bottom of the screen
check_aliens_bottom(ai_settings, stats, screen, sb, ship, aliens, bullets) | 28,401 |
def _generate_one_direction_LSTM(transformer, X, W, R, B, initial_h, initial_c, P, clip,
act, dtype, hidden_size, batch_size):
"""Generate subgraph for one direction of unrolled LSTM layer
Args:
transformer (_ModelTransformerHelper): helper for model generation
X (list of str): names of tensors in input sequence. Each tensor shape: [batch_size, input_size]
W (str): name of concatenated weight tensor: [input, output, forget, cell]
R (str): name of concatenated recurrence weights tensor: [input, output, forget, cell]
B (str): name of concatenated bias tensor: [input, output, forget, cell]
initial_h (str or None): name of tensor containing initial hidden state. Shape [batch_size, hidden_size]
initial_c (str or None): name of tensor containing initial cell state. Shape [batch_size, hidden_size]
P (str or None): name of concatenated peephole tensor: [input, output, forget]
clip (float or None): range which clips input of activations
act (dict of str): activation functions {'f': 'Sigmoid', 'g': 'Tanh', 'h': 'Tanh'}
dtype (numpy dtype): data type used in created LSTM operation
hidden_size (int): hidden dimension
batch_size (int): batch dimension
"""
# one direction LSTM:
#
# For details see:
# https://github.com/onnx/onnx/blob/5cf5feef5ec3fd5527b2fdb6c29780e3b705059f/docs/Changelog.md#LSTM-7
#
# it = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Pi (.) Ct-1 + Wbi + Rbi)
# ft = f(Xt*(Wf^T) + Ht-1*(Rf^T) + Pf (.) Ct-1 + Wbf + Rbf)
# ct = g(Xt*(Wc^T) + Ht-1*(Rc^T) + Wbc + Rbc)
# Ct = ft (.) Ct-1 + it (.) ct
# ot = f(Xt*(Wo^T) + Ht-1*(Ro^T) + Po (.) Ct + Wbo + Rbo)
# Ht = ot (.) h(Ct)
#
# X - input tensor
# i - input gate
# o - output gate
# f - forget gate
# c - cell gate
# t - time step (t-1 means previous time step)
# W[iofc] - W parameter weight matrix for input, output, forget, and cell gates
# R[iofc] - R recurrence weight matrix for input, output, forget, and cell gates
# Wb[iofc] - W bias vectors for input, output, forget, and cell gates
# Rb[iofc] - R bias vectors for input, output, forget, and cell gates
# P[iof] - P peephole weight vector for input, output, and forget gates
# WB[iofc] - W parameter weight matrix for backward input, output, forget, and cell gates
# RB[iofc] - R recurrence weight matrix for backward input, output, forget, and cell gates
# WBb[iofc] - W bias vectors for backward input, output, forget, and cell gates
# RBb[iofc] - R bias vectors for backward input, output, forget, and cell gates
# PB[iof] - P peephole weight vector for backward input, output, and forget gates
# H - Hidden state
seq_length = len(X)
state_h_tensors = []
w_tensors = transformer.make_split(W, split_sizes=[hidden_size] * 4, axis=0)
W = {'i': w_tensors[0], 'o': w_tensors[1], 'f': w_tensors[2], 'c': w_tensors[3]}
r_tensors = transformer.make_split(R, split_sizes=[hidden_size] * 4, axis=0)
R = {'i': r_tensors[0], 'o': r_tensors[1], 'f': r_tensors[2], 'c': r_tensors[3]}
if B is not None:
separate_b_tensors = transformer.make_split(
B, split_sizes=[hidden_size] * 8, axis=0)
b_tensors = []
for i in range(4):
b_tensors += [
transformer.make_add(separate_b_tensors[i], separate_b_tensors[i + 4])
]
else:
b_tensors = [
transformer.make_constant_tensor(
np.zeros((hidden_size), dtype=dtype), 'zero_b')
] * 4
B = {'i': b_tensors[0], 'o': b_tensors[1], 'f': b_tensors[2], 'c': b_tensors[3]}
if initial_h is not None:
previous_h_state_tensor = initial_h
else:
previous_h_state_tensor = transformer.make_constant_tensor(
np.zeros((batch_size, hidden_size), dtype=dtype), 'initial_h')
if initial_c is not None:
previous_c_state_tensor = initial_c
else:
previous_c_state_tensor = transformer.make_constant_tensor(
np.zeros((batch_size, hidden_size), dtype=dtype), 'initial_c')
if P is not None:
p_tensors = transformer.make_split(P, split_sizes=[hidden_size] * 3, axis=0)
P = {'i': p_tensors[0], 'o': p_tensors[1], 'f': p_tensors[2]}
else:
zero = transformer.make_constant_tensor(
np.zeros((hidden_size), dtype=dtype), 'zero_peephole')
P = {'i': zero, 'o': zero, 'f': zero}
for i in range(seq_length):
# it = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Pi (.) Ct-1 + Wbi + Rbi)
it = transformer.make_gemm(X[i], W['i'], B['i'], trans_b=True)
it = transformer.make_gemm(previous_h_state_tensor, R['i'], it, trans_b=True)
peephole_it = transformer.make_mul(P['i'], previous_c_state_tensor)
it = transformer.make_add(it, peephole_it)
if clip is not None:
it = transformer.make_clip(it, min=-clip, max=clip)
it = transformer.make_act(it, act['f'])
# ft = f(Xt*(Wf^T) + Ht-1*(Rf^T) + Pf (.) Ct-1 + Wbf + Rbf)
ft = transformer.make_gemm(X[i], W['f'], B['f'], trans_b=True)
ft = transformer.make_gemm(previous_h_state_tensor, R['f'], ft, trans_b=True)
peephole_ft = transformer.make_mul(P['f'], previous_c_state_tensor)
ft = transformer.make_add(ft, peephole_ft)
if clip is not None:
ft = transformer.make_clip(ft, min=-clip, max=clip)
ft = transformer.make_act(ft, act['f'])
# ct = g(Xt*(Wc^T) + Ht-1*(Rc^T) + Wbc + Rbc)
ct = transformer.make_gemm(X[i], W['c'], B['c'], trans_b=True)
ct = transformer.make_gemm(previous_h_state_tensor, R['c'], ct, trans_b=True)
if clip is not None:
ct = transformer.make_clip(ct, min=-clip, max=clip)
ct = transformer.make_act(ct, act['g'])
# Ct = ft (.) Ct-1 + it (.) ct
ft_Ct = transformer.make_mul(ft, previous_c_state_tensor)
it_ct = transformer.make_mul(it, ct)
Ct = transformer.make_add(ft_Ct, it_ct)
previous_c_state_tensor = Ct
# ot = f(Xt*(Wo^T) + Ht-1*(Ro^T) + Po (.) Ct + Wbo + Rbo)
ot = transformer.make_gemm(X[i], W['o'], B['o'], trans_b=True)
ot = transformer.make_gemm(previous_h_state_tensor, R['o'], ot, trans_b=True)
peephole_ot = transformer.make_mul(P['o'], Ct)
ot = transformer.make_add(ot, peephole_ot)
if clip is not None:
ot = transformer.make_clip(ot, min=-clip, max=clip)
ot = transformer.make_act(ot, act['f'])
# Ht = ot (.) h(Ct)
Ht = transformer.make_act(Ct, act['h'])
Ht = transformer.make_mul(ot, Ht)
previous_h_state_tensor = Ht
state_h_tensors += [Ht]
return (state_h_tensors, previous_c_state_tensor) | 28,402 |
def FindTerms(Filename, NumTerms):
"""Reorders the first NumTerms of the output of Todd program to find omega breakpoints"""
f = open(Filename, 'r')
# Get the value of omega
Omega = int(f.readline().split()[1])
print "Omega =", Omega
# Skip these lines
for i in range(3):
f.readline()
Terms = []
for line in f:
s = line.split()
if len(s) == 0:
break
if s[0].isdigit():
Terms.append(int(s[0]))
f.close()
if NumTerms > len(Terms):
print("Requesting more terms than are available in file...exiting.")
exit()
print "Number of terms in file", Filename, ": ", len(Terms)
print "Number of terms to use:", str(NumTerms)
print
TermsSub = Terms[0:NumTerms]
TermsSub.sort()
# Create a list of numbers of terms for the full set for omega = 1 through Omega
FoundTerms = []
OmegaTerms = []
for i in range(Omega+1):
OmegaTerms.append(NumTermsOmega(i))
for i in range(Omega+1):
for j in range(len(TermsSub)):
if TermsSub[j] == OmegaTerms[i]:
print i, ": Found", OmegaTerms[i], "at position", j+1
FoundTerms = FoundTerms + [j+1]
break
if TermsSub[j] > OmegaTerms[i]:
print i, ": Found next term past", OmegaTerms[i], "at position", j+1
FoundTerms = FoundTerms + [j+1]
break
if TermsSub[len(TermsSub)-1] != OmegaTerms[Omega]:
print Omega, ": Last term at", len(TermsSub), "is less than", OmegaTerms[Omega]
FoundTerms = FoundTerms + [len(TermsSub)]
# Just here to put some extra space after running
print
return FoundTerms | 28,403 |
def hessian_power(h):
"""
Power in the hessian filter band
Frobenius norm squared
"""
if len(h) == 2:
p = np.abs(h[0])**2 + 2*np.abs(h[1])**2 + np.abs(h[2])**2
elif len(h) == 6:
p = np.abs(h[0])**2 + 2*np.abs(h[1])**2 + 2*np.abs(h[2])**2 + np.abs(h[3])**2 + 2*np.abs(h[4])**2 + np.abs(h[5])**2
else:
raise RuntimeError('Unsupported number of dimensions {}.'.format(len(h)))
return p | 28,404 |
def read_table(source, columns=None, memory_map=True):
"""
Read a pyarrow.Table from Feather format
Parameters
----------
source : str file path, or file-like object
columns : sequence, optional
Only read a specific set of columns. If not provided, all columns are
read.
memory_map : boolean, default True
Use memory mapping when opening file on disk
Returns
-------
table : pyarrow.Table
"""
reader = ext.FeatherReader()
reader.open(source, use_memory_map=memory_map)
if columns is None:
return reader.read()
column_types = [type(column) for column in columns]
if all(map(lambda t: t == int, column_types)):
table = reader.read_indices(columns)
elif all(map(lambda t: t == str, column_types)):
table = reader.read_names(columns)
else:
column_type_names = [t.__name__ for t in column_types]
raise TypeError("Columns must be indices or names. "
"Got columns {} of types {}"
.format(columns, column_type_names))
# Feather v1 already respects the column selection
if reader.version < 3:
return table
# Feather v2 reads with sorted / deduplicated selection
elif sorted(set(columns)) == columns:
return table
else:
# follow exact order / selection of names
new_fields = [table.schema.field(c) for c in columns]
new_schema = schema(new_fields, metadata=table.schema.metadata)
new_columns = [table.column(c) for c in columns]
return Table.from_arrays(new_columns, schema=new_schema) | 28,405 |
def save_classnames_in_image_maxcardinality(
rgb_img, label_img, id_to_class_name_map, font_color=(0, 0, 0), save_to_disk: bool = False, save_fpath: str = ""
) -> np.ndarray:
"""
Args:
rgb_img
label_img
id_to_class_name_map: Mapping[int,str]
Returns:
rgb_img
"""
H, W, C = rgb_img.shape
class_to_conncomps_dict = scipy_conn_comp(label_img)
for class_idx, conncomps_list in class_to_conncomps_dict.items():
mask_idx = find_max_cardinality_mask(conncomps_list)
maxsz_conncomp = conncomps_list[mask_idx]
text = id_to_class_name_map[class_idx]
y, x = get_mean_mask_location(maxsz_conncomp)
x -= 55
x = max(0, x)
x = min(W - 1, x)
# print(f'Class idx: {class_idx}: (x,y)=({x},{y})')
rgb_img = add_text_cv2(
rgb_img, text, coords_to_plot_at=(x, y), font_color=font_color, font_scale=1, thickness=2
)
if save_to_disk:
cv2_write_rgb(save_fpath, rgb_img)
return rgb_img | 28,406 |
def yolo2lite_mobilenet_body(inputs, num_anchors, num_classes, alpha=1.0):
"""Create YOLO_V2 Lite MobileNet model CNN body in Keras."""
mobilenet = MobileNet(input_tensor=inputs, weights='imagenet', include_top=False, alpha=alpha)
print('backbone layers number: {}'.format(len(mobilenet.layers)))
# input: 416 x 416 x 3
# mobilenet.output : 13 x 13 x (1024*alpha)
# conv_pw_11_relu(layers[73]) : 26 x 26 x (512*alpha)
# f1: 13 x 13 x (1024*alpha)
f1 = mobilenet.output
# f2: 26 x 26 x (512*alpha)
f2 = mobilenet.get_layer('conv_pw_11_relu').output
f1_channel_num = int(1024*alpha)
f2_channel_num = int(512*alpha)
y = yolo2lite_predictions((f1, f2), (f1_channel_num, f2_channel_num), num_anchors, num_classes)
return Model(inputs, y) | 28,407 |
def test_init_no_resume_file(flow_sampler, tmp_path, resume):
"""Test the init method when there is no run to resume from"""
model = MagicMock()
output = tmp_path / 'init'
output.mkdir()
output = str(output)
resume = resume
exit_code = 131
max_threads = 2
resume_file = 'test.pkl'
kwargs = dict(
nlive=1000,
pytorch_threads=1,
)
flow_sampler.save_kwargs = MagicMock()
with patch('nessai.flowsampler.NestedSampler', return_value='ns') as mock,\
patch('nessai.flowsampler.configure_threads') as mock_threads:
FlowSampler.__init__(
flow_sampler,
model,
output=output,
resume=resume,
exit_code=exit_code,
max_threads=max_threads,
resume_file=resume_file,
**kwargs,
)
mock_threads.assert_called_once_with(
max_threads=max_threads,
pytorch_threads=1,
n_pool=None,
)
mock.assert_called_once_with(
model,
output=os.path.join(output, ''),
resume_file=resume_file,
**kwargs,
)
assert flow_sampler.ns == 'ns'
flow_sampler.save_kwargs.assert_called_once_with(
kwargs
) | 28,408 |
def from_pandas(
X: pd.DataFrame,
max_iter: int = 100,
h_tol: float = 1e-8,
w_threshold: float = 0.0,
tabu_edges: List[Tuple[str, str]] = None,
tabu_parent_nodes: List[str] = None,
tabu_child_nodes: List[str] = None,
) -> StructureModel:
"""
Learn the `StructureModel`, the graph structure describing conditional dependencies between variables
in data presented as a pandas dataframe.
The optimisation is to minimise a score function :math:`F(W)` over the graph's
weighted adjacency matrix, :math:`W`, subject to the a constraint function :math:`h(W)`,
where :math:`h(W) == 0` characterises an acyclic graph.
:math:`h(W) > 0` is a continuous, differentiable function that encapsulated how acyclic the graph is
(less == more acyclic).
Full details of this approach to structure learning are provided in the publication:
Based on DAGs with NO TEARS.
@inproceedings{zheng2018dags,
author = {Zheng, Xun and Aragam, Bryon and Ravikumar, Pradeep and Xing, Eric P.},
booktitle = {Advances in Neural Information Processing Systems},
title = {{DAGs with NO TEARS: Continuous Optimization for Structure Learning}},
year = {2018},
codebase = {https://github.com/xunzheng/notears}
}
Args:
X: input data.
max_iter: max number of dual ascent steps during optimisation.
h_tol: exit if h(W) < h_tol (as opposed to strict definition of 0).
w_threshold: fixed threshold for absolute edge weights.
tabu_edges: list of edges(from, to) not to be included in the graph.
tabu_parent_nodes: list of nodes banned from being a parent of any other nodes.
tabu_child_nodes: list of nodes banned from being a child of any other nodes.
Returns:
StructureModel: graph of conditional dependencies between data variables.
Raises:
ValueError: If X does not contain data.
"""
data = deepcopy(X)
non_numeric_cols = data.select_dtypes(exclude="number").columns
if len(non_numeric_cols) > 0:
raise ValueError(
"All columns must have numeric data. "
"Consider mapping the following columns to int {non_numeric_cols}".format(
non_numeric_cols=non_numeric_cols
)
)
col_idx = {c: i for i, c in enumerate(data.columns)}
idx_col = {i: c for c, i in col_idx.items()}
if tabu_edges:
tabu_edges = [(col_idx[u], col_idx[v]) for u, v in tabu_edges]
if tabu_parent_nodes:
tabu_parent_nodes = [col_idx[n] for n in tabu_parent_nodes]
if tabu_child_nodes:
tabu_child_nodes = [col_idx[n] for n in tabu_child_nodes]
g = from_numpy(
data.values,
max_iter,
h_tol,
w_threshold,
tabu_edges,
tabu_parent_nodes,
tabu_child_nodes,
)
sm = StructureModel()
sm.add_nodes_from(data.columns)
sm.add_weighted_edges_from(
[(idx_col[u], idx_col[v], w) for u, v, w in g.edges.data("weight")],
origin="learned",
)
return sm | 28,409 |
def conv_seq_to_sent_symbols(seq, excl_symbols=None, end_symbol='.',
remove_end_symbol=True):
"""
Converts sequences of tokens/ids into a list of sentences (tokens/ids).
:param seq: list of tokens/ids.
:param excl_symbols: tokens/ids which should be excluded from the final
result.
:param end_symbol: self-explanatory.
:param remove_end_symbol: whether to remove from each sentence the end
symbol.
:return: list of lists, where each sub-list contains tokens/ids.
"""
excl_symbols = excl_symbols if excl_symbols else {}
assert end_symbol not in excl_symbols
coll = []
curr_sent = []
for symbol in seq:
if symbol in excl_symbols:
continue
if symbol == end_symbol:
if not remove_end_symbol:
curr_sent.append(symbol)
coll.append(curr_sent)
curr_sent = []
else:
curr_sent.append(symbol)
if curr_sent:
coll.append(curr_sent)
return coll | 28,410 |
def emphasize_match(seq, line, fmt='__{}__'):
"""
Emphasize the matched portion of string.
"""
indices = substr_ind(seq.lower(), line.lower(), skip_spaces=True)
if indices:
matched = line[indices[0]:indices[1]]
line = line.replace(matched, fmt.format(matched))
return line | 28,411 |
def param_value(memory, position, mode):
"""Get the value of a param according to its mode"""
if mode == 0: # position mode
return memory[memory[position]]
elif mode == 1: # immediate mode
return memory[position]
else:
raise ValueError("Unknown mode : ", mode) | 28,412 |
def check_array_shape(inp: np.ndarray, dims: tuple, shape_m1: int, msg: str):
"""check if inp shape is allowed
inp: test object
dims: list, list of allowed dims
shape_m1: shape of lowest level, if 'any' allow any shape
msg: str, error msg
"""
if inp.ndim in dims:
if inp.shape[-1] == shape_m1:
return None
if shape_m1 == "any":
return None
raise MagpylibBadUserInput(msg) | 28,413 |
def Implies(p, q, simplify=True, factor=False):
"""Factory function for Boolean implication expression."""
p = Expression.box(p)
q = Expression.box(q)
expr = ExprImplies(p, q)
if factor:
expr = expr.factor()
elif simplify:
expr = expr.simplify()
return expr | 28,414 |
def install_local_rpm(rpm_file, logger):
"""
install a local rpm file
:param rpm_file: path to the file
:param logger: rs log obj
:return:
"""
if check_local_rpm(rpm_file, logger):
logger.trace("{} RPM already installed. Do nothing.", rpm_file)
else:
command = "yum -y install {}".format(rpm_file)
logger.debug("Installing rpm '{}' using command '{}'", rpm_file, command)
execute_in_bash(command, logger) | 28,415 |
def r_power(r_amp):
"""Return the fraction of reflected power.
Parameters
----------
r_amp : float
The net reflection amplitude after calculating the transfer
matrix.
Returns
-------
R : numpy array
The model reflectance
"""
return np.abs(r_amp)**2 | 28,416 |
def wrap_get_server(layer_name, func):
""" Wrapper for memcache._get_server, to read remote host on all ops.
This relies on the module internals, and just sends an info event when this
function is called.
"""
@wraps(func) # XXX Not Python2.4-friendly
def wrapper(*f_args, **f_kwargs):
ret = func(*f_args, **f_kwargs)
try:
args = {'KVKey' : f_args[1]}
(host, _) = ret
if host:
if host.family == socket.AF_INET:
args['RemoteHost'] = host.ip
elif host.family == socket.AF_UNIX:
args['RemoteHost'] = 'localhost'
oboe.log('info', layer_name, keys=args,
store_backtrace=oboe._collect_backtraces('memcache'))
except Exception, e:
print >> sys.stderr, "Oboe error: %s" % e
return ret
return wrapper | 28,417 |
def shape(batch) -> (int, int):
"""Get count of machine/tasks of a batch"""
return len(batch), len(batch[0]) | 28,418 |
def uniprot_mappings(query: Union[str, List[str]],
map_from: str = 'ID',
map_to: str = 'PDB_ID',
) -> pd.DataFrame:
"""Map identifiers using the UniProt identifier mapping tool.
:param query: list or space delimited string of identifiers
:param map_from: type of input identifiers (default: accession)
:param map_to: type of desired output identifiers
(default: PDB identifiers)
See: https://www.uniprot.org/help/api_idmapping
"""
url = 'https://www.uniprot.org/uploadlists/'
if isinstance(query, list):
query = ' '.join(query)
params = {'from': map_from,
'to': map_to,
'format': 'tab',
'query': query,
}
response = requests.post(url, params)
if not response.ok:
raise ValueError("query is wrongly formatted and resulted in a server failure")
data = StringIO(response.text)
df = pd.read_csv(data, sep='\t')
df = df.rename(columns={'To': map_to, 'From': map_from})
return df | 28,419 |
def build_property_filter_spec(client_factory, property_specs, object_specs):
"""Builds the property filter spec.
:param client_factory: factory to get API input specs
:param property_specs: property specs to be collected for filtered objects
:param object_specs: object specs to identify objects to be filtered
:returns: property filter spec
"""
property_filter_spec = client_factory.create('ns0:PropertyFilterSpec')
property_filter_spec.propSet = property_specs
property_filter_spec.objectSet = object_specs
return property_filter_spec | 28,420 |
def get_user_name_from_token():
"""Extract user name and groups from ID token
returns:
a tuple of username and groups
"""
curl = _Curl()
token_info = curl.get_token_info()
try:
return token_info['name'], token_info['groups'], token_info['preferred_username']
except Exception:
return None, None | 28,421 |
def write_gro(filename: str, positions: np.array, simulation_cell: np.array):
"""
Write out one frame in a GROMACS trajectory.
residue number (5 positions, integer)
residue name (5 characters)
atom name (5 characters)
atom number (5 positions, integer)
position (in nm, x y z in 3 columns, each 8 positions with 3 decimal places)
Parameters
----------
filename
The name of a file to write to.
positions
An Nx3 array of atomic positions.
simulation cell
A 3x2 numpy array in the form [[xlo, xhi], [ylo, yhi], [zlo, zhi]].
Returns
-------
None
"""
assert positions.shape[0] <= 9999, ".gro files only support up to 9999 atoms."
cell_a = simulation_cell[0, 1] - simulation_cell[0, 0]
cell_b = simulation_cell[1, 1] - simulation_cell[1, 0]
cell_c = simulation_cell[2, 1] - simulation_cell[2, 0]
with open(filename, "w") as fi:
fi.write("Diaphite, t=0.0\n")
for idx, row in enumerate(positions, 1):
atom_name = f"C{idx}".rjust(5, " ")
fi.write(
f"{1: >5}DIAPH{atom_name}{idx: >5}{row[0]:8.3f}{row[1]:8.3f}{row[2]:8.3f}\n"
)
fi.write(f"{cell_a} {cell_b} {cell_c}\n") | 28,422 |
def plotPanel(ax,image,units='',extent=None,colorbar=False,cblabel=None,title='',cmap=plt.cm.viridis,contours=None,interpolation='bicubic',**kwargs):
"""Plot a single panel. To be called from :func:`multiplot() (see docstring there).`
"""
# what kind of animal is 'image'?
cls = None
try:
cls = image.__class__.__name__
except:
raise
# extract the data (2d array) to be plotted
if cls in ('Image','PSF'):
if units == '': # use native/attached values and units of image
data = image.data.value
units = image.data.unit
elif units is None:
data = image.data.value
else:
aux = image.getBrightness(units) # convert brightness to the desired units (at least try)
data = aux.value
units = str(aux.unit)
# if field-of-view information is present in 'image', use it for extent; if 'extent' given as argument, ignore image.FOV
if extent is None:
fov, axunit = image.FOV.value, str(image.FOV.unit)
rad = fov/2.
extent = [-rad,rad,-rad,rad]
elif cls == 'ndarray' or cls == 'MaskedArray':
data = image
axunit = 'pixel'
else:
raise AttributeError("Don't know how to plot 'image'. Must be either instance of class 'Image', or 'PSF, or a 2d array.")
# transpose here once for all future plotting in this function
data = data.T
if 'norm' in kwargs:
norm = kwargs['norm']
else:
# image normalization
norm = matplotlib.colors.Normalize() # possibly allow this to be an argument? (e.g. for absolute normalizations)
# norm = matplotlib.colors.PowerNorm(1.) # possibly allow this to be an argument? (e.g. for absolute normalizations)
if cls == 'PSF':
norm = matplotlib.colors.LogNorm()
# reserve space for colorbar (even if later not used)
divider = make_axes_locatable(ax)
# plot image
im = ax.imshow(data,origin='lower',extent=extent,interpolation=interpolation,cmap=cmap,norm=norm)
# plot contours if requested
if contours is not None:
ncon = 10
# auxdata = data[...]
min_ = np.min(data[data>0.])
max_ = np.max(data)
if contours == 'lin':
norm = matplotlib.colors.Normalize()
V = np.linspace(min_,max_,ncon)
elif contours == 'log':
norm = matplotlib.colors.LogNorm()
V = np.logspace(np.log10(min_),np.log10(max_),ncon)
else:
norm = None
V = np.array(contours)*data.max()
ax.contour(data,V,origin='lower',extent=extent,colors='w',linewidths=0.5,linestyles='-',corner_mask=True,norm=norm)
# set title, labels
if title is not None:
ax.set_title(title)
ax.set_xlabel('offset ({:s})'.format(axunit))
ax.set_ylabel('offset ({:s})'.format(axunit))
# make colorbar; set invisible if no colorbar requested
cax = divider.append_axes('right', size='5%', pad=0.05)
if colorbar == True:
cb = plt.colorbar(im,cax=cax,orientation='vertical')
if cls == 'Image' and units is not None:
cb.set_label(units)
if cblabel is not None:
cb.set_label(cblabel)
else:
cax.set_visible(False) | 28,423 |
def prob17(limit=1000):
"""
If the numbers 1 to 5 are written out in words: one, two, three, four,
five, then there are 3 + 3 + 5 + 4 + 4 = 19 letters used in total.
If all the numbers from 1 to 1000 (one thousand) inclusive were written out
in words, how many letters would be used?
NOTE: Do not count spaces or hyphens. For example, 342 (three hundred and
forty-two) contains 23 letters and 115 (one hundred and fifteen) contains
20 letters. The use of "and" when writing out numbers is in compliance with
British usage.
"""
digits = {1: 'one', 2: 'two', 3: 'three', 4: 'four', 5: 'five',
6: 'six', 7: 'seven', 8: 'eight', 9: 'nine'}
exceptions = {10: 'ten', 11: 'eleven', 12: 'twelve', 14: 'fourteen'}
bases = {2: 'twen', 3: 'thir', 4: 'for', 5: 'fif',
6: 'six', 7: 'seven', 8: 'eigh', 9: 'nine'}
powers = {1: 'teen', 10: 'ty', 100: 'hundred', 1000: 'thousand'}
count = 0
for num in range(1, limit + 1):
right = str(num)[-2:]
#print right
if int(right) == 0:
pass
elif int(right) in exceptions:
count += len(exceptions[int(right)])
elif 10 < int(right) < 20:
count += len(bases[int(right[1])]) + len(powers[1])
else:
if right[-1] != '0':
count += len(digits[int(right[-1])])
if len(right) == 2 and right[0] != '0':
count += len(bases[int(right[0])]) + len(powers[10])
if len(str(num)) > 2:
left = str(num)[:-2]
#print left
if right != '00':
count += 3
if left[-1] != '0':
count += len(digits[int(left[-1])]) + len(powers[100])
if len(left) == 2 and left[0] != '0':
count += len(digits[int(left[0])]) + len(powers[1000])
return count | 28,424 |
def mask_orbit_start_and_end(time, flux, orbitgap=1, expected_norbits=2,
orbitpadding=6/(24), raise_error=True):
"""
Ignore the times near the edges of orbits.
args:
time, flux
returns:
time, flux: with `orbitpadding` days trimmed out
"""
norbits, groups = lcmath.find_lc_timegroups(time, mingap=orbitgap)
if norbits != expected_norbits:
errmsg = 'got {} orbits, expected {}. groups are {}'.format(
norbits, expected_norbits, repr(groups))
if raise_error:
raise AssertionError(errmsg)
else:
print(errmsg)
print('returning what was passed')
return time, flux
sel = np.zeros_like(time).astype(bool)
for group in groups:
tg_time = time[group]
start_mask = (np.min(tg_time), np.min(tg_time) + orbitpadding)
end_mask = (np.max(tg_time) - orbitpadding, np.max(tg_time))
sel |= (
(time > max(start_mask)) & (time < min(end_mask))
)
return_time = time[sel]
return_flux = flux[sel]
return return_time, return_flux | 28,425 |
def cast_args(args):
"""The numbers are stored as strings via doctopt. Convert them to numbers."""
for key, val in args.items():
if key.startswith('--') and isinstance(val, str):
args[key] = _string_to_num(val) | 28,426 |
def to_cnf(expr):
"""
Convert a propositional logical sentence s to conjunctive normal form.
That is, of the form ((A | ~B | ...) & (B | C | ...) & ...)
Examples
========
>>> from sympy.logic.boolalg import to_cnf
>>> from sympy.abc import A, B, D
>>> to_cnf(~(A | B) | D)
And(Or(D, Not(A)), Or(D, Not(B)))
"""
# Don't convert unless we have to
if is_cnf(expr):
return expr
expr = sympify(expr)
expr = eliminate_implications(expr)
return distribute_and_over_or(expr) | 28,427 |
def plot_contours_for_all_classes(sample: Sample,
segmentation: np.ndarray,
foreground_class_names: List[str],
result_folder: Path,
result_prefix: str = "",
image_range: Optional[TupleFloat2] = None,
channel_index: int = 0) -> List[Path]:
"""
Creates a plot with the image, the ground truth, and the predicted segmentation overlaid. One plot is created
for each class, each plotting the Z slice where the ground truth has most pixels.
:param sample: The image sample, with the photonormalized image and the ground truth labels.
:param segmentation: The predicted segmentation: multi-value, size Z x Y x X.
:param foreground_class_names: The names of all classes, excluding the background class.
:param result_folder: The folder into which the resulting plot PNG files should be written.
:param result_prefix: A string prefix that will be used for all plots.
:param image_range: The minimum and maximum image values that will be mapped to the color map ranges.
If None, use the actual min and max values.
:param channel_index: The index of the image channel that should be plotted.
:return: The paths to all generated PNG files.
"""
check_size_matches(sample.labels[0], segmentation)
num_classes = sample.labels.shape[0]
if len(foreground_class_names) != num_classes - 1:
raise ValueError(
f"Labels tensor indicates {num_classes} classes, but got {len(foreground_class_names)} foreground "
f"class names: {foreground_class_names}")
plot_names: List[Path] = []
image = sample.image[channel_index, ...]
contour_arguments = [{'colors': 'r'}, {'colors': 'b', 'linestyles': 'dashed'}]
binaries = binaries_from_multi_label_array(segmentation, num_classes)
for class_index, binary in enumerate(binaries):
if class_index == 0:
continue
ground_truth = sample.labels[class_index, ...]
if is_missing_ground_truth(ground_truth):
continue
largest_gt_slice = get_largest_z_slice(ground_truth)
labels_at_largest_gt = ground_truth[largest_gt_slice]
segmentation_at_largest_gt = binary[largest_gt_slice, ...]
class_name = foreground_class_names[class_index - 1]
patient_id = sample.patient_id
if isinstance(patient_id, str):
patient_id_str = patient_id
else:
patient_id_str = f"{patient_id:03d}"
filename_stem = f"{result_prefix}{patient_id_str}_{class_name}_slice_{largest_gt_slice:03d}"
plot_file = plot_image_and_label_contour(image=image[largest_gt_slice, ...],
labels=[labels_at_largest_gt, segmentation_at_largest_gt],
contour_arguments=contour_arguments,
image_range=image_range,
plot_file_name=result_folder / filename_stem)
plot_names.append(plot_file)
return plot_names | 28,428 |
def match_gadgets_phasepoly(g: BaseGraph[VT,ET]) -> List[MatchPhasePolyType[VT]]:
"""Finds groups of phase-gadgets that act on the same set of 4 vertices in order to apply a rewrite based on
rule R_13 of the paper *A Finite Presentation of CNOT-Dihedral Operators*."""
targets: Dict[VT,Set[FrozenSet[VT]]] = {}
gadgets: Dict[FrozenSet[VT], Tuple[VT,VT]] = {}
inputs = g.inputs()
outputs = g.outputs()
for v in g.vertices():
if v not in inputs and v not in outputs and len(list(g.neighbors(v)))==1:
if g.phase(v) != 0 and g.phase(v).denominator != 4: continue
n = list(g.neighbors(v))[0]
tgts = frozenset(set(g.neighbors(n)).difference({v}))
if len(tgts)>4: continue
gadgets[tgts] = (n,v)
for t in tgts:
if t in targets: targets[t].add(tgts)
else: targets[t] = {tgts}
if g.phase(v) != 0 and g.phase(v).denominator == 4:
if v in targets: targets[v].add(frozenset([v]))
else: targets[v] = {frozenset([v])}
targets = {t:s for t,s in targets.items() if len(s)>1}
matches: Dict[FrozenSet[VT], Set[FrozenSet[VT]]] = {}
for v1,t1 in targets.items():
s = t1.difference(frozenset([v1]))
if len(s) == 1:
c = s.pop()
if any(len(targets[v2])==2 for v2 in c): continue
s = t1.difference({frozenset({v1})})
for c in [d for d in s if not any(d.issuperset(e) for e in s if e!=d)]:
if not all(v2 in targets for v2 in c): continue
if any(v2<v1 for v2 in c): continue # type: ignore
a = set()
for t in c: a.update([i for s in targets[t] for i in s if i in targets])
for group in itertools.combinations(a.difference(c),4-len(c)):
gr = list(group)+list(c)
b: Set[FrozenSet[VT]] = set()
for t in gr: b.update([s for s in targets[t] if s.issubset(gr)])
if len(b)>7:
matches[frozenset(gr)] = b
m: List[MatchPhasePolyType[VT]] = []
taken: Set[VT] = set()
for groupp, gad in sorted(matches.items(), key=lambda x: len(x[1]), reverse=True):
if taken.intersection(groupp): continue
m.append((list(groupp), {s:(gadgets[s] if len(s)>1 else list(s)[0]) for s in gad}))
taken.update(groupp)
return m | 28,429 |
def get_replaceid(fragment):
"""get replace id for shared content"""
replaceid=re.findall(r":[A-z]+:\s(.+)", fragment)[0]
return replaceid | 28,430 |
def _tower_fn(is_training, weight_decay, feature, label, data_format,
num_layers, batch_norm_decay, batch_norm_epsilon):
"""Build computation tower (Resnet).
Args:
is_training: true if is training graph.
weight_decay: weight regularization strength, a float.
feature: a Tensor.
label: a Tensor.
data_format: channels_last (NHWC) or channels_first (NCHW).
num_layers: number of layers, an int.
batch_norm_decay: decay for batch normalization, a float.
batch_norm_epsilon: epsilon for batch normalization, a float.
Returns:
A tuple with the loss for the tower, the gradients and parameters, and
predictions.
"""
model = cifar10_model.ResNetCifar10(
num_layers,
batch_norm_decay=batch_norm_decay,
batch_norm_epsilon=batch_norm_epsilon,
is_training=is_training,
data_format=data_format)
logits = model.forward_pass(feature, input_data_format='channels_last')
tower_pred = {
'classes': tf.argmax(input=logits, axis=1),
'probabilities': tf.nn.softmax(logits)
}
tower_loss = tf.losses.sparse_softmax_cross_entropy(
logits=logits, labels=label)
tower_loss = tf.reduce_mean(tower_loss)
model_params = tf.trainable_variables()
tower_loss += weight_decay * tf.add_n(
[tf.nn.l2_loss(v) for v in model_params])
tower_grad = tf.gradients(tower_loss, model_params)
return tower_loss, zip(tower_grad, model_params), tower_pred | 28,431 |
def DiscRate(t, dur):
"""Discount rates for the outer projection"""
return scen.DiscRate(dur) + DiscRateAdj(t) | 28,432 |
def outlierDivergence(dist1, dist2, alpha):
"""Defines difference between how distributions classify outliers.
Choose uniformly from Distribution 1 and Distribution 2, and then choose
an outlier point according to the induced probability distribution. Returns
the probability that said point would be classified differently by the other
distribution.
Parameters:
-dist1 (list or tuple of numbers): Distrubution 1
-dist2 (list of tuple of numbers): Distribution 2
-alpha: 100*alpha and 100*(1-alpha) are the percentile cutoffs of each
distribution for classifying values as outliers
"""
return (probDiffClass(dist1, dist2, alpha) + probDiffClass(dist2, dist1, alpha)) / 2 | 28,433 |
def load_data(root_path, data_path):
"""
:param root_path: root path of data
:param data_path: name of data
:return: datas, labels: arrays which are normalized
label_dict
"""
path = os.path.join(root_path, data_path)
f = open(path, 'r')
label_dict = {}
labels = []
datas = []
for line in f.readlines():
line = line.strip().split('\t')
if line[-1] not in label_dict:
label_dict[line[-1]] = len(label_dict)
labels.append(int(label_dict[line[-1]]))
data = list(map(float, line[: -1]))
datas.append(data)
datas = np.array(datas)
scaler = StandardScaler()
scaler.fit(datas)
return scaler.transform(datas), np.array(labels), label_dict | 28,434 |
def getCP2KBasisFromPlatoOrbitalGauPolyBasisExpansion(gauPolyBasisObjs, angMomVals, eleName, basisNames=None, shareExp=True, nVals=None):
""" Gets a BasisSetCP2K object, with coefficients normalised, from an iter of GauPolyBasis objects in plato format
Args:
gauPolyBasisObjs: (iter plato_pylib GauPolyBasis object) Each element is the Plato representation of a basis function
angMomVals: (iter of int) Angular momentum values for each orbital
eleName: (str) Label for the element used in this basis set
basisNames: (iter of str) Names used to specify this basis set in the CP2K input file (more than one allowed)
shareExp: (Bool, Optional) If True, will try to exploit shared exponents when generating the basis set
Returns
outBasis: (BasisSetCP2K Object) Convenient representation of a basis set for CP2K; this is the object that would be parsed from a CP2K basis file
"""
if basisNames is None:
basisNames = ["basis_set_a"]
if nVals is None:
nVals = [1 for x in gauPolyBasisObjs]
splitExponentSets = [getCP2KExponentSetFromGauPolyBasis(gaus, angMom, nVal) for gaus,angMom,nVal in it.zip_longest(gauPolyBasisObjs, angMomVals, nVals)]
if shareExp:
outExponentSets = _getExponentSetsWithSharedExponentPartsMerged(splitExponentSets)
else:
outExponentSets = splitExponentSets
outObj = BasisSetCP2K(eleName, basisNames, outExponentSets)
return outObj | 28,435 |
def longitude_to_utm_epsg(longitude):
"""
Return Proj4 EPSG for a given longitude in degrees
"""
zone = int(math.floor((longitude + 180) / 6) + 1)
epsg = '+init=EPSG:326%02d' % (zone)
return epsg | 28,436 |
def updateBarDone(outputlabel):
"""
Overwrite the previous message in stdout with the tag "Done" and a new message.
Parameters:
outputlabel - Required: Message to be printed (str)
progressbar - Optional: Set True if the previous message was the progress bar (bool)
"""
print(("[ "+'\033[0;32m'+"DONE "+'\033[0;39m'+"] "+outputlabel + '{:<80}').format(' ') + (" " + '{:<120}').format(' '), flush=True)
logging.info(outputlabel) | 28,437 |
def SupportVectorRegression(X_train, X_test, y_train, y_test, search, save=False):
"""
Support Vector Regression.
Can run a grid search to look for the best parameters (search=True) and
save the model to a file (save=True).
"""
if search:
# parameter values over which we will search
parameters = {'C': [0.1, 0.5, 1., 1.5, 2.],
'kernel': ['rbf', 'sigmoid', 'poly'],
'degree': [3, 5]}
s = SVR()
clf = grid_search.GridSearchCV(s, parameters, scoring='r2',
n_jobs=-1, verbose=1, cv=3)
else:
clf = SVR(verbose=1)
print '\nTraining...'
clf.fit(X_train, y_train)
print 'Done'
if search:
print 'The best score and estimator:'
print(clf.best_score_)
print(clf.best_estimator_)
print 'Best hyperparameters:'
print clf.best_params_
clf = clf.best_estimator
if save:
print 'Save the SVR model to a pickled file...'
fp = open('model/SVR.pkl', 'w')
cPickle.dump(clf, fp)
fp.close()
print '\nPredicting...'
predicted = clf.predict(X_test)
expected = y_test.copy()
print 'Done'
return predicted, expected | 28,438 |
def conv_relu_pool(X, conv_params, pool_params):
"""
Initializes weights and biases
and does a 2d conv-relu-pool
"""
# Initialize weights
W = tf.Variable(
tf.truncated_normal(conv_params, stddev=0.1)
)
b = tf.constant(0.1, shape=conv_params['shape'])
conv = tf.nn.conv2d(X, W,
strides=conv_params['strides'],
padding=conv_params['padding'])
# Simple ReLU activation function
conv = tf.nn.relu(conv)
# 2 by 2 max ppoling with a stride of 2
out = tf.nn.max_pool(conv,
ksize=pool_params['shape'],
strides=pool_params['strides'],
padding=pool_params['padding'])
return out | 28,439 |
def compute_node_depths(tree):
"""Returns a dictionary of node depths for each node with a label."""
res = {}
for leaf in tree.leaf_node_iter():
cnt = 0
for anc in leaf.ancestor_iter():
if anc.label:
cnt += 1
res[leaf.taxon.label] = cnt
return res | 28,440 |
def create_device(hostname, address, username="root", password=""):
"""Create and return a DeviceInfo struct."""
return DeviceInfo(hostname, address, username, password) | 28,441 |
def max_box(box1, box2):
"""
return the maximum of two bounding boxes
"""
ext = lambda values: min(values) if sum(values) <= 0 else max(values)
return tuple(tuple(ext(offs) for offs in zip(dim[0], dim[1])) for dim in zip(box1, box2)) | 28,442 |
def homogenize(a, w=1.0):
"""
Example:
a=[
[a00, a01],
[a10, a11],
[a20, a21]
], w=1
->
result=[
[a00, a01, w],
[a10, a11, w],
[a20, a21, w]
]
"""
return np.hstack([a, np.full((len(a), 1), w, a.dtype)]) | 28,443 |
def find_similar_term(term, dictionary):
"""
Returns a list of terms similar to the given one according to the Damerau-Levenshtein distance
https://en.wikipedia.org/wiki/Damerau%E2%80%93Levenshtein_distance
"""
return list(filter(lambda t: textdistance.damerau_levenshtein.distance(t, term) <= 2, dictionary)) | 28,444 |
def check_fields(cls, model, opts, label, fields):
""" Checks that fields listed on fields refers to valid model
fields on model parameter """
model_fields = [f.name for f in opts.fields]
for field in fields:
if not field in model_fields:
raise ImproperlyConfigured('"%s.%s" refers to field "%s" that is missing in model "%s".'
% (cls.__name__, label, field, model.__name__)) | 28,445 |
def calc_sign(string):
"""str/any->str
return MD5.
From: Biligrab, https://github.com/cnbeining/Biligrab
MIT License"""
return str(hashlib.md5(str(string).encode('utf-8')).hexdigest()) | 28,446 |
def compile_circuit(
circuit: cirq.Circuit,
*,
device: cirq.google.xmon_device.XmonDevice,
compiler: Callable[[cirq.Circuit], cirq.Circuit] = None,
routing_algo_name: Optional[str] = None,
router: Optional[Callable[..., ccr.SwapNetwork]] = None,
) -> Tuple[cirq.Circuit, Dict[cirq.ops.Qid, cirq.ops.Qid]]:
"""Compile the given model circuit onto the given device. This uses a
different compilation method than described in
https://arxiv.org/pdf/1811.12926.pdf Appendix A. The latter goes through a
7-step process involving various decompositions, routing, and optimization
steps. We route the model circuit and then run a series of optimizers on it
(which can be passed into this function).
Args:
circuit: The model circuit to compile.
device: The device to compile onto.
compiler: An optional function to deconstruct the model circuit's
gates down to the target devices gate set and then optimize it.
Returns: A tuple where the first value is the compiled circuit and the
second value is the final mapping from the model circuit to the compiled
circuit. The latter is necessary in order to preserve the measurement
order.
"""
compiled_circuit = circuit.copy()
# Swap Mapping (Routing). Ensure the gates can actually operate on the
# target qubits given our topology.
if router is None and routing_algo_name is None:
routing_algo_name = 'greedy'
swap_network = ccr.route_circuit(compiled_circuit,
ccr.xmon_device_to_graph(device),
router=router,
algo_name=routing_algo_name)
compiled_circuit = swap_network.circuit
# Compile. This should decompose the routed circuit down to a gate set that
# our device supports, and then optimize. The paper uses various
# compiling techniques - because Quantum Volume is intended to test those
# as well, we allow this to be passed in. This compiler is not allowed to
# change the order of the qubits.
if compiler:
compiled_circuit = compiler(compiled_circuit)
return compiled_circuit, swap_network.final_mapping() | 28,447 |
def remove_prefix(utt, prefix):
"""
Check that utt begins with prefix+" ", and then remove.
Inputs:
utt: string
prefix: string
Returns:
new utt: utt with the prefix+" " removed.
"""
try:
assert utt[: len(prefix) + 1] == prefix + " "
except AssertionError as e:
print("ERROR: utterance '%s' does not start with '%s '" % (utt, prefix))
print(repr(utt[: len(prefix) + 1]))
print(repr(prefix + " "))
raise e
return utt[len(prefix) + 1:] | 28,448 |
def table(command):
"""
Creates a report with a heading and a line of output showing
the average, maximum, and minimum temperatures
for EACH period within the period specfied in the command.
"""
#format of print from instruction pdf
print("{:>12} {:>5} {:>5} {:>5}".format('Period', 'Avg', 'Max', 'Min'))
print("")
df = temperature.dataframe
query = command[6:]
if(command=='table'): #ex: report? table
year_list = [] #store unique year value of df (Ex: '2008','2009','2010' etc)
start = df['Date'][0]
start = int(start[-4:]) ##1-Jan-2008 -->2008
end = df['Date'][len(df)-1]
end = int(end[-4:]) ##31-Dec-2015 -->2015
for i in range(start,end+1):
year_list.append(str(i)) #ex:[2008,2009,----,2015]
for i in range(len(year_list)):
#rslt-df selects rows where Date contains that year: ex:if i=2010 then all DD-MM-2010 selceted
rslt_df = df.loc[df['Date'].str.contains(year_list[i])]
period = year_list[i]
avg = rslt_df['Temperature'].mean() #finding avg of selected rows
maximum = rslt_df['Temperature'].max() #finding max of selected rows
minimum = rslt_df['Temperature'].min() ##finding min of selected rows
#printing values
print("{:>12} {:>5.1f} {:>5.1f} {:>5.1f}".format(period, avg, maximum, minimum))
elif(len(query)>=8): # report : table MMM_YYYY
#month_dict stores no. of date of a month
month_dict = {'Jan':31,'Feb':28,'Mar':31,'Apr':30,'May':31,'Jun':30,'Jul':31,'Aug':31,'Sep':30,
'Oct':31,'Nov':30,'Dec':31}
month = query[0:3]
#print(month)
year = int(query[-4:])
#print(year)
#correcting no. of day for february for that input year: 28/29-leap-year
if(month=='Feb'):
if(year%400==0):
month_dict['Feb']=29
elif(year%4==0 and year%100!=0):
month_dict['Feb']=29
#print(month_dict)
day_list = []
val = month_dict[month]
for i in range(1,val+1):
period = str(i)+'-'+query #'DD-MM-YYYY'
#rslt-df selects rows where Date is specific 'DD-MM-YYYY'
rslt_df = df.loc[df['Date']==period]
avg = rslt_df['Temperature'].mean() #finding avg temp of selected rows
maximum = rslt_df['Temperature'].max() #finding max temp of selected rows
minimum = rslt_df['Temperature'].min() #finding min temp of selected rows
if(len(period)==10):
period = '0'+period #D-MM-YYYY -->DD-MM-YYYY
print("{:>12} {:>5.1f} {:>5.1f} {:>5.1f}".format(period, avg, maximum, minimum))
else: # report : table YYYY
month_list = ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']
for i in range(len(month_list)):
period = month_list[i]+'-'+query #MM-YYYY
#rslt-df selects rows where Date contains specific 'MM-YYYY'
rslt_df = df.loc[df['Date'].str.contains(period)]
avg = rslt_df['Temperature'].mean() #finding avg temp of selected rows
maximum = rslt_df['Temperature'].max() #finding max temp of selected rows
minimum = rslt_df['Temperature'].min() ##finding min temp of selected rows
print("{:>12} {:>5.1f} {:>5.1f} {:>5.1f}".format(period, avg, maximum, minimum))
print("")
#return | 28,449 |
def format_ucx(name, idx):
"""
Formats a name and index as a collider
"""
# one digit of zero padding
idxstr = str(idx).zfill(2)
return "UCX_%s_%s" % (name, idxstr) | 28,450 |
def get_connection():
"""
Return a connection to the database and cache it on the `g` object.
Generally speaking, each app context has its own connection to the
database; these are destroyed when the app context goes away (ie, when the
server is done handling that request).
"""
if 'db_connection' not in g:
# mariadb.connect might throw an error if it can't connect to the
# database, but that's okay--Flask will just turn that into an HTTP
# 500 response, which is the correct behavior in this case.
g.db_connection = mariadb.connect(
user=current_app.config['DB_USER'],
password=current_app.config['DB_PASSWORD'],
host=current_app.config['DB_HOST'],
port=current_app.config['DB_PORT'],
database=current_app.config['DB_NAME'],
)
return g.db_connection | 28,451 |
def first_location_of_minimum(x):
"""
Returns the first location of the minimal value of x. The position is calculated relatively to the length of x.
:param x: the time series to calculate the feature of
:type x: pandas.Series
:return: the value of this feature
:return type: float
"""
x = np.asarray(x)
return np.argmin(x) / len(x) if len(x) > 0 else np.NaN | 28,452 |
def _positive_int(integer_string, strict=False, cutoff=None):
"""
Cast a string to a strictly positive integer.
"""
ret = int(integer_string)
if ret == -1:
return -1
if ret < 0 or (ret == 0 and strict):
raise ValueError()
if cutoff:
return min(ret, cutoff)
return ret | 28,453 |
def x1_0_mult(guess, slip):
"""
Compute x1_0 element-wise
:param guess: (np.array) [odds]
:param slip: (np.array) [odds]
:return: np.array
"""
return ((1.0+guess)/(guess*(1.0+slip)))/x0_mult(guess,slip) | 28,454 |
def compute_bootstrap_distribution(exp):
""" Computes bootstrap distributions for alpha and beta band power for all
three stimulation conditions.
For each condition, it loads that condition's raw tfr and pre-computed
bootstrap sampled indices. It then runs through each re-sampled index
and computes the re-sampled band power to create a bootstrap distribution.
Finally, it computes a bootstrap p-value testing for post-stimulation
toi power differences from 0.
Args:
exp: The experiment to collect data for. 'main' or 'saline'
Returns:
None. It saves all of the bootstrap information, including the
band power estimates, the band power bootstrap distributions, and
the post-stimulation toi bootstrap p-values into a compressed
numpy file.
"""
global power, times, freqs, chs, bootstrap_cond_ix, config, exper
exper = exp
# load in configurations
with open('./experiment_config.json', 'r') as f:
config = json.load(f)
# load in pre-computes bootstrap re-sample indices
f = '../data/stats/%s_experiment/condition_bootstrap_indices.npz' % exp
bootstrap_indices = np.load(f)
num_bootstrap_samples = bootstrap_indices['num_samples']
for condition in config['conditions']:
print('Computing Bootstrap Distribution for Condition: %s' % condition)
power, chs, times, freqs = load_power_data(exp, condition)
# compute the base band power
base_ix = np.arange(power.shape[0])
alpha_power, beta_power = compute_bootstrap_sample(base_ix, power,
times, freqs, chs,
config, exp)
# loop through all bootstrap samples in parallel
bootstrap_cond_ix = bootstrap_indices[condition]
par = Parallel(n_jobs=config['n_jobs'])
bootstrap_samples = par(delayed(compute_bootstrap_wrapper)(ix)
for ix in range(num_bootstrap_samples))
# collect all the bootstrap samples into single matrix
alpha_bootstrap_samples = np.vstack([s[0] for s in bootstrap_samples])
beta_bootstrap_samples = np.vstack([s[1] for s in bootstrap_samples])
# compute p-values
alpha_p = compute_bootstrap_p_value(alpha_power,
alpha_bootstrap_samples,
times, config['toi'])
beta_p = compute_bootstrap_p_value(beta_power,
beta_bootstrap_samples,
times, config['toi'])
# save
f = '../data/stats/%s_experiment/%s_bootstrap_info.npz' % (exp,
condition)
np.savez_compressed(f, alpha=alpha_power, beta=beta_power,
alpha_dist=alpha_bootstrap_samples,
beta_dist=beta_bootstrap_samples,
alpha_p=alpha_p, beta_p=beta_p, times=times) | 28,455 |
def get_xblock_app_config():
"""
Get whichever of the above AppConfig subclasses is active.
"""
return apps.get_app_config(XBlockAppConfig.label) | 28,456 |
def remove_experiment_requirement(request, object_id, object_type):
"""Removes the requirement from the experiment, expects requirement_id (PK of req object)
to be present in request.POST"""
if request.POST:
assert 'requirement_id' in request.POST
requirement_id = request.POST['requirement_id']
exp_or_package = get_package_or_experiment(request, object_type, object_id)
requirement = exp_or_package.requirements.filter(pk=requirement_id)
if requirement:
requirement = requirement[0]
with transaction.atomic():
exp_or_package.requirements.remove(requirement)
exp_or_package.save()
logger.info("deleted dependency %s from experiment %s", requirement, exp_or_package)
requirement.delete()
return JsonResponse({'deleted': True})
return JsonResponse({'deleted': False}) | 28,457 |
def _k_hot_from_label_names(labels: List[str], symbols: List[str]) -> List[int]:
"""Converts text labels into symbol list index as k-hot."""
k_hot = [0] * len(symbols)
for label in labels:
try:
k_hot[symbols.index(label)] = 1
except IndexError:
raise ValueError(
'Label %s did not appear in the list of defined symbols %r' %
(label, symbols))
return k_hot | 28,458 |
def upload(training_dir, algorithm_id=None, writeup=None, api_key=None, ignore_open_monitors=False):
"""Upload the results of training (as automatically recorded by your
env's monitor) to OpenAI Gym.
Args:
training_dir (Optional[str]): A directory containing the results of a training run.
algorithm_id (Optional[str]): An algorithm id indicating the paricular version of the algorithm (including choices of parameters) you are running (visit https://gym.openai.com/algorithms to create an id)
writeup (Optional[str]): A Gist URL (of the form https://gist.github.com/<user>/<id>) containing your writeup for this evaluation.
api_key (Optional[str]): Your OpenAI API key. Can also be provided as an environment variable (OPENAI_GYM_API_KEY).
"""
if not ignore_open_monitors:
open_monitors = monitoring._open_monitors()
if len(open_monitors) > 0:
envs = [m.env.spec.id if m.env.spec else '(unknown)' for m in open_monitors]
raise error.Error("Still have an open monitor on {}. You must run 'env.monitor.close()' before uploading.".format(', '.join(envs)))
env_info, training_episode_batch, training_video = upload_training_data(training_dir, api_key=api_key)
env_id = env_info['env_id']
training_episode_batch_id = training_video_id = None
if training_episode_batch:
training_episode_batch_id = training_episode_batch.id
if training_video:
training_video_id = training_video.id
if logger.level <= logging.INFO:
if training_episode_batch_id is not None and training_video_id is not None:
logger.info('[%s] Creating evaluation object from %s with learning curve and training video', env_id, training_dir)
elif training_episode_batch_id is not None:
logger.info('[%s] Creating evaluation object from %s with learning curve', env_id, training_dir)
elif training_video_id is not None:
logger.info('[%s] Creating evaluation object from %s with training video', env_id, training_dir)
else:
raise error.Error("[%s] You didn't have any recorded training data in {}. Once you've used 'env.monitor.start(training_dir)' to start recording, you need to actually run some rollouts. Please join the community chat on https://gym.openai.com if you have any issues.".format(env_id, training_dir))
evaluation = resource.Evaluation.create(
training_episode_batch=training_episode_batch_id,
training_video=training_video_id,
env=env_info['env_id'],
algorithm={
'id': algorithm_id,
},
writeup=writeup,
gym_version=env_info['gym_version'],
api_key=api_key,
)
logger.info(
"""
****************************************************
You successfully uploaded your evaluation on %s to
OpenAI Gym! You can find it at:
%s
****************************************************
""".rstrip(), env_id, evaluation.web_url())
return evaluation | 28,459 |
def partition_analysis(analysis: str) -> Tuple[List[FSTTag], FSTLemma, List[FSTTag]]:
"""
:return: the tags before the lemma, the lemma itself, the tags after the lemma
:raise ValueError: when the analysis is not parsable.
>>> partition_analysis('PV/e+fakeword+N+I')
(['PV/e'], 'fakeword', ['N', 'I'])
>>> partition_analysis('fakeword+N+I')
([], 'fakeword', ['N', 'I'])
>>> partition_analysis('PV/e+PV/ki+atamihêw+V+TA+Cnj+1Pl+2SgO')
(['PV/e', 'PV/ki'], 'atamihêw', ['V', 'TA', 'Cnj', '1Pl', '2SgO'])
"""
match = partition_pattern.match(analysis)
if not match:
raise ValueError(f"analysis not parsable: {analysis}")
pre, lemma, post = match.groups()
return (
[FSTTag(t) for t in pre.split("+") if t],
FSTLemma(lemma),
[FSTTag(t) for t in post.split("+") if t],
) | 28,460 |
def test_world_extent_mixed_flipped():
"""Test world extent after adding data with a flip."""
# Flipped data results in a negative scale value which should be
# made positive when taking into consideration for the step size
# calculation
np.random.seed(0)
layers = LayerList()
layer = Image(
np.random.random((15, 15)), affine=[[0, 1, 0], [1, 0, 0], [0, 0, 1]]
)
layers.append(layer)
np.testing.assert_allclose(layer._data_to_world.scale, (1, -1))
np.testing.assert_allclose(layers.extent.step, (1, 1)) | 28,461 |
def add_representer(data_type, representer, Dumper=Dumper):
"""
Add a representer for the given type.
Representer is a function accepting a Dumper instance
and an instance of the given data type
and producing the corresponding representation node.
"""
Dumper.add_representer(data_type, representer) | 28,462 |
def dummy_dictionary(
dummy_tokens=3,
additional_token_list=None,
dictionary_cls=pytorch_translate_dictionary.Dictionary,
):
"""First adds the amount of dummy_tokens that you specify, then
finally the additional_token_list, which is a list of string token values"""
d = dictionary_cls()
for i in range(dummy_tokens):
token = f"token_{i}"
d.add_symbol(token)
if additional_token_list is not None:
for token in additional_token_list:
d.add_symbol(token)
d.finalize(padding_factor=-1)
return d | 28,463 |
def lies_in_epsilon(x: Num, c: Num, e: Num) -> bool:
"""
Функция проверки значения x на принадлежность отрезку выда [c - e, c + e].
:param x: значение
:param c: значение попадание в epsilon-окрестность которого необходимо проверить
:param e: epsilon-окрестность вокруг значения c
:return: True - если точка лежит в интервале, иначе - False.
"""
if (x >= (c - e)) and (x <= (c + e)):
return True
return False | 28,464 |
def decimal_to_digits(decimal, min_digits=None):
"""
Return the number of digits to the first nonzero decimal.
Parameters
-----------
decimal: float
min_digits: int, minimum number of digits to return
Returns
-----------
digits: int, number of digits to the first nonzero decimal
"""
digits = abs(int(np.log10(decimal)))
if min_digits is not None:
digits = np.clip(digits, min_digits, 20)
return digits | 28,465 |
def test_remove_identity_key_with_valid_application_input():
"""
Given:
- Dictionary with three nested objects which the creator type is "application"
When
- When Parsing outputs to context
Then
- Dictionary to remove to first key and add it as an item in the dictionary
"""
res = remove_identity_key(
arguments["remove_identifier_data_application_type"]["CreatedBy"]
)
assert len(res.keys()) > 1 and res.get("Type")
assert res["ID"] == "test" | 28,466 |
def lcs(a, b):
"""
Compute the length of the longest common subsequence between two sequences.
Time complexity: O(len(a) * len(b))
Space complexity: O(min(len(a), len(b)))
"""
# This is an adaptation of the standard LCS dynamic programming algorithm
# tweaked for lower memory consumption.
# Sequence a is laid out along the rows, b along the columns.
# Minimize number of columns to minimize required memory
if len(a) < len(b):
a, b = b, a
# Sequence b now has the minimum length
# Quit early if one sequence is empty
if len(b) == 0:
return 0
# Use a single buffer to store the counts for the current row, and
# overwrite it on each pass
row = [0] * len(b)
for ai in a:
left = 0
diag = 0
for j, bj in enumerate(b):
up = row[j]
if ai == bj:
value = diag + 1
else:
value = max(left, up)
row[j] = value
left = value
diag = up
# Return the last cell of the last row
return left | 28,467 |
def sync_blocks(rpc_connections, *, wait=1, timeout=60, logger=None):
"""
Wait until everybody has the same tip.
sync_blocks needs to be called with an rpc_connections set that has least
one node already synced to the latest, stable tip, otherwise there's a
chance it might return before all nodes are stably synced.
"""
# Use getblockcount() instead of waitforblockheight() to determine the
# initial max height because the two RPCs look at different internal global
# variables (chainActive vs latestBlock) and the former gets updated
# earlier.
maxheight = max(x.getblockcount() for x in rpc_connections)
if logger:
logger.info("maxheight: " + str(maxheight))
start_time = cur_time = time.time()
while cur_time <= start_time + timeout:
tips = [r.waitforblockheight(maxheight, int(wait * 1000)) for r in rpc_connections]
if all(t["height"] == maxheight for t in tips):
if all(t["hash"] == tips[0]["hash"] for t in tips):
return
raise AssertionError("Block sync failed, mismatched block hashes:{}".format(
"".join("\n {!r}".format(tip) for tip in tips)))
cur_time = time.time()
raise AssertionError("Block sync to height {} timed out:{}".format(
maxheight, "".join("\n {!r}".format(tip) for tip in tips))) | 28,468 |
def paginate_years(year):
"""Return a list of years for pagination"""
START_YEAR = 2020 # first year that budgets were submitted using this system
y = int(year)
return (START_YEAR, False, y-1, y, y+1, False, settings.CURRENT_YEAR) | 28,469 |
def gene_rooflines_old():
"""Obsolete. Based on Kai's Jupyter notebook timings and estimated values,
before having measured results from NCU."""
labels = ['dgdxy ij_deriv', 'dgdxy update_rhs', 'dgdxy fused', 'dzv']
time = [1.04e-3, 1.287e-3, 1.091e-3, 1.258e-3]
flops = [396361728, 132120576, 528482304, 825753600]
dram = [830472192, 1056964608, 830472192, 651886592]
gfps = [381.117, 102.658, 484.402, 656.402]
bw = [798.531, 821.262, 761.203, 518.193]
ai = [0.477, 0.125, 0.636, 1.267]
theory_ai = [0.5, 0.125, 0.67, 1.17]
# NB: the NERSC roofline function uses base 2, these numbers from Kai's
# notebook are base 10
bw = [b*1.0e9/2**30 for b in bw]
gfps = [f*1.0e9/2**30 for f in gfps]
roofline('gene-rooflines', gflops, ai, None, None, labels, 'HBM')
#latex_table('gene-rooflines_HBM.tex',
# labels, time, gfps, bw, ai, theory_ai) | 28,470 |
def create(self, node=None):
"""Test the RBAC functionality of the `CREATE LIVE VIEW` command."""
Scenario(
run=create_without_create_view_privilege, setup=instrument_clickhouse_server_log
)
Scenario(
run=create_with_create_view_privilege_granted_directly_or_via_role,
setup=instrument_clickhouse_server_log,
)
Scenario(
run=create_with_revoked_create_view_privilege_revoked_directly_or_from_role,
setup=instrument_clickhouse_server_log,
)
Scenario(
run=create_without_source_table_privilege,
setup=instrument_clickhouse_server_log,
)
Scenario(
run=create_with_source_table_privilege_granted_directly_or_via_role,
setup=instrument_clickhouse_server_log,
)
Scenario(
run=create_with_subquery_privilege_granted_directly_or_via_role,
setup=instrument_clickhouse_server_log,
)
Scenario(
run=create_with_join_query_privilege_granted_directly_or_via_role,
setup=instrument_clickhouse_server_log,
)
Scenario(
run=create_with_join_subquery_privilege_granted_directly_or_via_role,
setup=instrument_clickhouse_server_log,
)
Scenario(
run=create_with_nested_views_privilege_granted_directly_or_via_role,
setup=instrument_clickhouse_server_log,
) | 28,471 |
def wait_for_service_tasks_state(
service_name,
expected_task_count,
expected_task_states,
timeout_sec=120
):
""" Returns once the service has at least N tasks in one of the specified state(s)
:param service_name: the service name
:type service_name: str
:param expected_task_count: the expected number of tasks in the specified state(s)
:type expected_task_count: int
:param expected_task_states: the expected state(s) for tasks to be in, e.g. 'TASK_RUNNING'
:type expected_task_states: [str]
:param timeout_sec: duration to wait
:type timeout_sec: int
:return: the duration waited in seconds
:rtype: int
"""
return time_wait(
lambda: task_states_predicate(service_name, expected_task_count, expected_task_states),
timeout_seconds=timeout_sec) | 28,472 |
def eval_add(lst):
"""Evaluate an addition expression. For addition rules, the parser will return
[number, [[op, number], [op, number], ...]]
To evaluate that, we start with the first element of the list as result value,
and then we iterate over the pairs that make up the rest of the list, adding
or subtracting depending on the operator.
"""
first = lst[0]
result = first
for n in lst[1]:
if n[0] == '+':
result += n[1]
else:
result -= n[1]
return result | 28,473 |
def run_cmd(cmdStr):
""" 用shell运行命令,并检查命令返回值。
仔细检查cmdStr,以避免可能存在的安全问题。
"""
cmdProc = subprocess.Popen(
cmdStr,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
stdout, stderr = cmdProc.communicate()
# check command return
if (0 != cmdProc.returncode or stderr):
err = ""
if stdout:
err += stdout + " "
if stderr:
err += stderr
exit_with_error(err)
return stdout | 28,474 |
def is_password_valid(plaintextpw: str, storedhash: str) -> bool:
"""
Checks if a plaintext password matches a stored hash.
Uses ``bcrypt``. The stored hash includes its own incorporated salt.
"""
# Upon CamCOPS from MySQL 5.5.34 (Ubuntu) to 5.1.71 (CentOS 6.5), the
# VARCHAR was retrieved as Unicode. We needed to convert that to a str.
# For Python 3 compatibility, we just str-convert everything, avoiding the
# unicode keyword, which no longer exists.
if storedhash is None:
storedhash = ""
storedhash = str(storedhash)
if plaintextpw is None:
plaintextpw = ""
plaintextpw = str(plaintextpw)
try:
h = bcrypt.hashpw(plaintextpw, storedhash)
except ValueError: # e.g. ValueError: invalid salt
return False
return h == storedhash | 28,475 |
def find_item(item_to_find, items_list):
"""
Returns True if an item is found in the item list.
:param item_to_find: item to be found
:param items_list: list of items to search in
:return boolean
"""
is_found = False
for item in items_list:
if (item[1] == item_to_find[1]) and mention_match(item[0], item_to_find[0]):
is_found = True
return is_found | 28,476 |
def init_output_masks_in_graph(graph: NNCFGraph, nodes: List):
"""
Initialize masks in groph for mask propagation algorithm
:param graph: NNCFNetwork
:param nodes: list with pruned nodes
"""
for node in graph.get_all_nodes():
node.data.pop('input_masks', None)
node.data.pop('output_mask', None)
for minfo in nodes:
mask = minfo.operand.binary_filter_pruning_mask
nncf_node = graph.get_node_by_id(minfo.nncf_node_id)
nncf_node.data['output_mask'] = mask | 28,477 |
def set_cron_job_schedule(cron_tpl: Dict[str, Any], schedule: str) -> NoReturn:
"""
Set the cron job schedule, if specifed, otherwise leaves default schedule
"""
if not schedule:
return
cron_spec = cron_tpl.setdefault("spec", {})
cron_spec["schedule"] = schedule | 28,478 |
def validate_target_types(target_type):
"""
Target types validation rule.
Property: SecretTargetAttachment.TargetType
"""
VALID_TARGET_TYPES = (
"AWS::RDS::DBInstance",
"AWS::RDS::DBCluster",
"AWS::Redshift::Cluster",
"AWS::DocDB::DBInstance",
"AWS::DocDB::DBCluster",
)
if target_type not in VALID_TARGET_TYPES:
raise ValueError(
"Target type must be one of : %s" % ", ".join(VALID_TARGET_TYPES)
)
return target_type | 28,479 |
def find_prefixed(root, prefix):
"""finds all elements in root that begin with the prefix, case insensitive"""
l_prefix = prefix.lower()
for x in root.iterdir():
if x.name.lower().startswith(l_prefix):
yield x | 28,480 |
def cprint(*objects, **kwargs):
"""Apply Color formatting to output in terminal.
Same as builtin print function with added 'color' keyword argument.
eg: cprint("data to print", color="red", sep="|")
available colors:
black
red
green
yellow
blue
pink
cyan
white
no-color
"""
colors = {
"black": "\033[0;30m",
"red": "\033[0;31m",
"green": "\033[0;92m",
"yellow": "\033[0;93m",
"blue": "\033[0;34m",
"pink": "\033[0;95m",
"cyan": "\033[0;36m",
"white": "\033[0;37m",
"no-color": "\033[0m"
}
color = kwargs.pop('color', 'no-color')
return print(colors[color], *objects, colors['no-color'], **kwargs) | 28,481 |
def handle_authbuf(tsuite, ssh, res_type):
"""Deals with the transfer of authbuf keys. Returns True if the authbuf key needs
to be pulled after lauching this object
Args:
tsuite: tsuite runtime.
ssh: remote server connection
res_type: slash2 resource type."""
if not hasattr(tsuite, "authbuf_obtained"):
tsuite.authbuf_obtained = False
if res_type == "mds" and not tsuite.authbuf_obtained:
log.debug("First MDS found at {0}; Copying authbuf key after launch".format(ssh.host))
return True
else:
assert(tsuite.authbuf_obtained != False)
log.debug("Authbuf key already obtained. Copying to {0}".format(ssh.host))
location = path.join(tsuite.build_dirs["datadir"], "authbuf.key")
try:
os.system("sudo chmod 0666 {0}".format(location))
ssh.copy_file(location, location)
os.system("sudo chmod 0400 {0}".format(location))
ssh.run("sudo chmod 0400 {0}".format(location))
except IOError:
log.critical("Failed copying authbuf key to {0}".format(ssh.host))
tsuite.shutdown()
return False | 28,482 |
def clean(tokens: List[str]) -> List[str]:
"""
Returns a list of unique tokens without any stopwords.
Input(s):
1) tokens - List containing all tokens.
Output(s):
1) unique_tokens - List of unique tokens with all stopwords
removed.
"""
# handle alphanumeric strings
unique_tokens = list(set(tokens))
for word in unique_tokens:
if word in hindi_stopwords:
unique_tokens.pop(word)
return unique_tokens | 28,483 |
def current_sessions(macfile):
"""
Show currently open sessions
"""
state, mac_to_name = read_state(macfile)
data = []
for mac in state['macs']:
try:
name = mac_to_name[mac]
except KeyError:
name = '-' # (unknown)
info = state['macs'][mac][-1]
if info['session_end'] == None:
data.append([mac, info['ip'], name, info['session_start']])
headers = ['MAC', 'IP', 'name', 'session start']
print(printutil.to_smart_columns(data, headers)) | 28,484 |
def validate_file_submission():
"""Validate the uploaded file, returning the file if so."""
if "sourceFile" not in flask.request.files:
raise util.APIError(400, message="file not provided (must "
"provide as uploadFile).")
# Save to GCloud
uploaded_file = flask.request.files["sourceFile"]
uploaded_file.seek(0)
return uploaded_file | 28,485 |
def oauth_redirect(request, consumer_key=None, secret_key=None,
request_token_url=None, access_token_url=None, authorization_url=None,
callback_url=None, parameters=None):
"""
View to handle the OAuth based authentication redirect to the service provider
"""
request.session['next'] = get_login_redirect_url(request)
client = OAuthClient(request, consumer_key, secret_key,
request_token_url, access_token_url, authorization_url, callback_url, parameters)
return client.get_redirect() | 28,486 |
def get_service_url():
"""Return the root URL of this service."""
if 'SERVICE_URL' in os.environ:
return os.environ['SERVICE_URL']
# the default location for each service is /api/<service>
return '/api/{}'.format(get_service_name()) | 28,487 |
def read_stats(stats):
"""
Read rsync stats and print a summary
:param stats: String
:return:
"""
if system.config['verbose']:
print(f'{output.Subject.DEBUG}{output.CliFormat.BLACK}{stats}{output.CliFormat.ENDC}')
_file_number = parse_string(stats, r'Number of regular files transferred:\s*([\d.]+)')
_file_size = parse_string(stats, r'Total transferred file size:\s*([\d.]+[MKG]?)')
if _file_number and _file_size:
output.message(
output.Subject.INFO,
f'Status: {_file_number[0]} file(s) transferred {output.CliFormat.BLACK}({_file_size[0]}Bytes){output.CliFormat.ENDC}'
) | 28,488 |
def to_hdf(data_dict, tgt, attrs=None, overwrite=True, warn=True):
"""Store a (possibly nested) dictionary to an HDF5 file or branch node
within an HDF5 file (an h5py Group).
This creates hardlinks for duplicate non-trivial leaf nodes (h5py Datasets)
to minimize storage space required for redundant datasets. Duplication is
detected via object hashing.
NOTE: Branch nodes are sorted before storing (by name) for consistency in
the generated file despite Python dictionaries having no defined ordering
among keys.
Parameters
----------
data_dict : Mapping
Dictionary, OrderedDict, or other Mapping to be stored
tgt : str or h5py.Group
Target for storing data. If `tgt` is a str, it is interpreted as a
filename; a file is created with that name (overwriting an existing
file, if present). After writing, the file is closed. If `tgt` is an
h5py.Group, the data is simply written to that Group and it is left
open at function return.
attrs : Mapping
Attributes to apply to the top-level entity being written. See
http://docs.h5py.org/en/latest/high/attr.html
overwrite : bool
Set to `True` (default) to allow overwriting existing file. Raise
exception and quit otherwise.
warn : bool
Issue a warning message if a file is being overwritten. Suppress
warning by setting to `False` (e.g. when overwriting is the desired
behaviour).
"""
if not isinstance(data_dict, Mapping):
raise TypeError('`data_dict` only accepts top-level'
' dict/OrderedDict/etc.')
def store_recursively(fhandle, node, path=None, attrs=None,
node_hashes=None):
"""Function for iteratively doing the work"""
path = [] if path is None else path
full_path = '/' + '/'.join(path)
node_hashes = OrderedDict() if node_hashes is None else node_hashes
if attrs is None:
sorted_attr_keys = []
else:
if isinstance(attrs, OrderedDict):
sorted_attr_keys = attrs.keys()
else:
sorted_attr_keys = sorted(attrs.keys())
if isinstance(node, Mapping):
logging.trace(' creating Group "%s"', full_path)
try:
dset = fhandle.create_group(full_path)
for key in sorted_attr_keys:
dset.attrs[key] = attrs[key]
except ValueError:
pass
for key in sorted(node.keys()):
if isinstance(key, str):
key_str = key
else:
key_str = str(key)
logging.warning(
'Making string from key "%s", %s for use as'
' name in HDF5 file', key_str, type(key)
)
val = node[key]
new_path = path + [key_str]
store_recursively(fhandle=fhandle, node=val, path=new_path,
node_hashes=node_hashes)
else:
# Check for existing node
node_hash = hash_obj(node)
if node_hash in node_hashes:
logging.trace(' creating hardlink for Dataset: "%s" -> "%s"',
full_path, node_hashes[node_hash])
# Hardlink the matching existing dataset
fhandle[full_path] = fhandle[node_hashes[node_hash]]
return
# For now, convert None to np.nan since h5py appears to not handle
# None
if node is None:
node = np.nan
logging.warning(
' encountered `None` at node "%s"; converting to'
' np.nan', full_path
)
# "Scalar datasets don't support chunk/filter options". Shuffling
# is a good idea otherwise since subsequent compression will
# generally benefit; shuffling requires chunking. Compression is
# not done here since it is slow, but can be done by
# post-processing the generated file(s).
if np.isscalar(node):
shuffle = False
chunks = None
else:
shuffle = True
chunks = True
# Store the node_hash for linking to later if this is more than
# a scalar datatype. Assumed that "None" has
node_hashes[node_hash] = full_path
# -- Handle special types -- #
# See h5py docs at
#
# https://docs.h5py.org/en/stable/strings.html#how-to-store-text-strings
#
# where using `bytes` objects (i.e., in numpy, np.string_) is
# deemed the most compatible way to encode objects, but apparently
# we don't have pytables compatibility right now.
#
# For boolean support, see
#
# https://docs.h5py.org/en/stable/faq.html#faq
# TODO: make written hdf5 files compatible with pytables
# see docs at https://www.pytables.org/usersguide/datatypes.html
if isinstance(node, string_types):
node = np.string_(node)
elif isinstance(node, bool): # includes np.bool
node = np.bool_(node) # same as np.bool8
elif isinstance(node, np.ndarray):
if issubclass(node.dtype.type, string_types):
node = node.astype(np.string_)
elif node.dtype.type in (bool, np.bool):
node = node.astype(np.bool_)
logging.trace(' creating dataset at path "%s", hash %s',
full_path, node_hash)
try:
dset = fhandle.create_dataset(
name=full_path, data=node, chunks=chunks, compression=None,
shuffle=shuffle, fletcher32=False
)
except TypeError:
try:
shuffle = False
chunks = None
dset = fhandle.create_dataset(
name=full_path, data=node, chunks=chunks,
compression=None, shuffle=shuffle, fletcher32=False
)
except Exception:
logging.error(' full_path: "%s"', full_path)
logging.error(' chunks : %s', str(chunks))
logging.error(' shuffle : %s', str(shuffle))
logging.error(' node : "%s"', str(node))
raise
for key in sorted_attr_keys:
dset.attrs[key] = attrs[key]
# Perform the actual operation using the dict passed in by user
if isinstance(tgt, str):
from pisa.utils.fileio import check_file_exists
fpath = check_file_exists(fname=tgt, overwrite=overwrite, warn=warn)
h5file = h5py.File(fpath, 'w')
try:
if attrs is not None:
h5file.attrs.update(attrs)
store_recursively(fhandle=h5file, node=data_dict)
finally:
h5file.close()
elif isinstance(tgt, h5py.Group):
store_recursively(fhandle=tgt, node=data_dict, attrs=attrs)
else:
raise TypeError('to_hdf: Invalid `tgt` type: %s' % type(tgt)) | 28,489 |
def render_template(content, context):
"""Render templates in content."""
# Fix None issues
if context.last_release_object is not None:
prerelease = context.last_release_object.prerelease
else:
prerelease = False
# Render the template
try:
render = Template(content)
render = render.render(
installed=context.installed,
pending_update=context.pending_update,
prerelease=prerelease,
selected_tag=context.selected_tag,
version_available=context.last_release_tag,
version_installed=context.version_installed
)
return render
except Exception as exception:
context.logger.warning("Error rendering info template {}".format(exception), "template")
return content | 28,490 |
def isJacobianOnS256Curve(x, y, z):
"""
isJacobianOnS256Curve returns boolean if the point (x,y,z) is on the
secp256k1 curve.
Elliptic curve equation for secp256k1 is: y^2 = x^3 + 7
In Jacobian coordinates, Y = y/z^3 and X = x/z^2
Thus:
(y/z^3)^2 = (x/z^2)^3 + 7
y^2/z^6 = x^3/z^6 + 7
y^2 = x^3 + 7*z^6
"""
fv = FieldVal
y2, z2, x3, result = fv(), fv(), fv(), fv()
y2.squareVal(y).normalize()
z2.squareVal(z)
x3.squareVal(x).mul(x)
result.squareVal(z2).mul(z2).mulInt(7).add(x3).normalize()
return y2.equals(result) | 28,491 |
def decode_base85(encoded_str):
"""Decodes a base85 string.
The input string length must be a multiple of 5, and the resultant
binary length is always a multiple of 4.
"""
if len(encoded_str) % 5 != 0:
raise ValueError('Input string length is not a multiple of 5; ' +
str(len(encoded_str)))
if not _char_to_value:
for i, ch in enumerate(_BASE85_CHARACTERS):
_char_to_value[ch] = i
result = ''
i = 0
while i < len(encoded_str):
acc = 0
for _ in range(5):
ch = encoded_str[i]
if ch not in _char_to_value:
raise ValueError('Invalid base85 character; "{}"'.format(ch))
new_acc = acc * 85 + _char_to_value[ch]
assert new_acc >= acc
acc = new_acc
i += 1
for _ in range(4):
result += chr(acc >> 24)
acc = (acc & 0x00ffffff) << 8
assert acc >= 0
return result | 28,492 |
def process_doc(doc: PDFDocument):
"""Process PDF Document, return info and metadata.
Some PDF store infomations such as title in field info,
some newer PDF sotre them in field metadata. The
processor read raw XMP data and convert to dictionary.
Parameters
----------
doc : PDFDocument
PDF Document object to process.
Returns
-------
info : dict
Field info of the doc, return {} if no info field.
metadata : dict
Field metadata of the doc, return {} if no metadata field.
"""
# if info is a list, resolve it
info = doc.info if doc.info else {}
if isinstance(info, list):
info = info[0]
# try to get metadata
if 'Metadata' in doc.catalog:
# resolve1 will resolve object recursively
# result of resolve1(doc.catalog['Metadata']) is PDFStream
metadata = resolve1(doc.catalog['Metadata']).get_data()
# use xmp_to_dict to resolve XMP and get metadata
metadata = xmp_to_dict(metadata)
else:
metadata = {}
return info, metadata | 28,493 |
def are_expected_items_in_list(test_case, actual_list, *expected_items):
"""
Checks whether the expected items are in the given list and only those are in the list.
Parameters
----------
test_case : unittest.TestCase
The test case that provides assert methods.
actual_list : list of object
The result list to check.
expected_items : list of object
The list of the expected items.
"""
test_case.assertIsNotNone(actual_list, 'The list should not be None.')
test_case.assertEqual(
len(expected_items),
len(actual_list),
'There are different number of items in expected and in actual lists ({}/{}).'.format(
len(expected_items),
len(actual_list)))
for item in expected_items:
test_case.assertIn(item, actual_list, 'The item \'{}\' is missing from the list.'.format(item)) | 28,494 |
def main():
""" Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
logger = logging.getLogger(__name__)
logger.info('making final data set from raw data')
c = Config()
url = c.read('DATA', 'url')
urls = get_urls(url) | 28,495 |
def score_lin_reg(est, X, y, sample_weight=None, level=1):
"""
Scores a fitted linear regression model.
Parameters
-----------
est:
The fitted estimator.
X: array-like, shape (n_samples, n_features)
The test X data.
y_true: array-like, shape (n_samples, )
The true responses.
sample_weight: array-like shape (n_samples,)
Sample weights.
level: int
How much data to return.
Output
------
scores: dict
Containts the scores.
"""
# get predictions
y_pred = est.predict(X)
out = {}
out['r2'] = r2_score(y_true=y, y_pred=y_pred,
sample_weight=sample_weight)
if level >= 1:
to_add = additional_regression_data(y_true=y, y_pred=y_pred,
coef=est.coef_,
sample_weight=sample_weight)
out.update(to_add)
return out | 28,496 |
def test_write_log_file():
"""Test writing to a log file
"""
sample_target = {
'new_val': {
'Location': '192.168.1.3',
'Optional': {
'init': ''
},
'PluginName': 'Harness',
'Port': '9800',
'id': '059ed5fc-0263-42b0-962d-7258003fd53a'
},
'old_val': None,
'ts': 15
}
sample_job = {
'new_val': {
'JobCommand': {
'CommandName': 'echo',
'Inputs': [
{
'Name': 'EchoString',
'Tooltip': 'This string will be echoed back',
'Type': 'textbox', 'Value': 'test'
}
],
'OptionalInputs': [],
'Output': True,
'Tooltip': '\nEcho\n\nClient Returns this string verbatim\n\nArguments:\n1. String to Echo\n\nReturns:\nString\n'
},
'JobTarget': {
'Location': '192.168.1.1',
'PluginName': 'Harness', 'Port': 0
},
'StartTime': 0,
'Status': 'Ready',
'id': '196a6737-d866-48a4-9a01-ec4d9510d7ab'
},
'old_val': None,
'ts': 15
}
run_audit.LOG_DIR = ""
run_audit.write_log_file("Brain.Targets", sample_target)
run_audit.write_log_file("Brain.Jobs", sample_job)
with open("{}{}.{}.log".format(run_audit.LOG_DIR, "Brain.Targets", run_audit.DAY_STRING), "r") as f:
for line in f:
assert line.replace("\n", "") == dumps({
"datetime": T.asctime(T.gmtime(sample_target["ts"])).upper(),
"namespace": "Brain.Targets",
"log": sample_target["new_val"]
})
remove("{}{}.{}.log".format(run_audit.LOG_DIR, "Brain.Targets", run_audit.DAY_STRING))
with open("{}{}.{}.log".format(run_audit.LOG_DIR, "Brain.Jobs", run_audit.DAY_STRING), "r") as f:
for line in f:
assert line.replace("\n", "") == dumps({
"datetime": T.asctime(T.gmtime(sample_job["ts"])).upper(),
"namespace": "Brain.Jobs",
"log": sample_job["new_val"]
})
remove("{}{}.{}.log".format(run_audit.LOG_DIR, "Brain.Jobs", run_audit.DAY_STRING)) | 28,497 |
def listen():
""" Connect to Twitter and listen tweets with PrintingListener """
api = get_api()
listener = PrintingListener(api)
stream = tweepy.Stream(auth=api.auth, listener=listener, tweet_mode='extended')
print("Listening to Twitter API.. (to stop press Ctrl-C)")
stream.filter(track=listener.keywords) | 28,498 |
def test_help():
"""Passing -h or --help => print help text."""
assert run(["rm", "-h"]).stdout.split(" ")[0] == "Usage:"
assert run(["rm", "--help"]).stdout.split(" ")[0] == "Usage:"
assert run(["rm", "-h"]).returncode > 0
assert run(["rm", "--help"]).returncode > 0 | 28,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.