content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
import itertools
def correlation_matrix(
spark, idf, list_of_cols="all", drop_cols=[], stats_unique={}, print_impact=False
):
"""
:param spark: Spark Session
:param idf: Input Dataframe
:param list_of_cols: List of columns to analyse e.g., ["col1","col2"].
Alternatively, columns can be specified in a string format,
where different column names are separated by pipe delimiter “|” e.g., "col1|col2".
"all" can be passed to include all columns for analysis.
Please note that this argument is used in conjunction with drop_cols i.e. a column mentioned in
drop_cols argument is not considered for analysis even if it is mentioned in list_of_cols.
:param drop_cols: List of columns to be dropped e.g., ["col1","col2"].
Alternatively, columns can be specified in a string format,
where different column names are separated by pipe delimiter “|” e.g., "col1|col2".
:param stats_unique: Takes arguments for read_dataset (data_ingest module) function in a dictionary format
to read pre-saved statistics on unique value count i.e. if measures_of_cardinality or
uniqueCount_computation (data_analyzer.stats_generator module) has been computed & saved before.
:return: Dataframe [attribute,*col_names]
Correlation between attribute X and Y can be found at an intersection of
a) row with value X in ‘attribute’ column and column ‘Y’, or
b) row with value Y in ‘attribute’ column and column ‘X’.
"""
if list_of_cols == "all":
num_cols, cat_cols, other_cols = attributeType_segregation(idf)
list_of_cols = num_cols + cat_cols
if isinstance(list_of_cols, str):
list_of_cols = [x.strip() for x in list_of_cols.split("|")]
if isinstance(drop_cols, str):
drop_cols = [x.strip() for x in drop_cols.split("|")]
if stats_unique == {}:
remove_cols = (
uniqueCount_computation(spark, idf, list_of_cols)
.where(F.col("unique_values") < 2)
.select("attribute")
.rdd.flatMap(lambda x: x)
.collect()
)
else:
remove_cols = (
read_dataset(spark, **stats_unique)
.where(F.col("unique_values") < 2)
.select("attribute")
.rdd.flatMap(lambda x: x)
.collect()
)
list_of_cols = list(
set([e for e in list_of_cols if e not in (drop_cols + remove_cols)])
)
if any(x not in idf.columns for x in list_of_cols) | (len(list_of_cols) == 0):
raise TypeError("Invalid input for Column(s)")
combis = [list(c) for c in itertools.combinations_with_replacement(list_of_cols, 2)]
hists = idf.select(list_of_cols).pm_make_histograms(combis)
grids = {k: get_2dgrid(h) for k, h in hists.items()}
odf_pd = spark_phik_matrix_from_hist2d_dict(spark.sparkContext, grids)
odf_pd["attribute"] = odf_pd.index
list_of_cols.sort()
odf = (
spark.createDataFrame(odf_pd)
.select(["attribute"] + list_of_cols)
.orderBy("attribute")
)
if print_impact:
odf.show(odf.count())
return odf | 647d655cc2c2d9234c9ea1e54dc08128ae5c622b | 28,900 |
def _model_variable_getter(getter, name, shape=None, dtype=None,
initializer=None, regularizer=None, trainable=True,
collections=None, caching_device=None,
partitioner=None, rename=None, use_resource=None,
**_):
"""Getter that uses model_variable for compatibility with core layers."""
short_name = name.split('/')[-1]
if rename and short_name in rename:
name_components = name.split('/')
name_components[-1] = rename[short_name]
name = '/'.join(name_components)
return slim.model_variable(
name, shape=shape, dtype=dtype, initializer=initializer,
regularizer=regularizer, collections=collections, trainable=trainable,
caching_device=caching_device, partitioner=partitioner,
custom_getter=getter, use_resource=use_resource) | b61f94d17efd344dbec264b3882b3df0c950e900 | 28,901 |
def password_check(value, user):
"""
Check that the password passes the following validation:
Password is long enough (based on configuration setting)
Password contains at least one digit
Password contains at least one letter
Password contains at least one uppercase character
Password contains at least one special character
Password has not been reused in a configurable number of password cycles
"""
if len(value) < settings.AUTH_PASSWORD_LENGTH:
raise serializers.ValidationError('Password is too short')
# check for digit
if not any(char.isdigit() for char in value):
raise serializers.ValidationError('Password must contain at least 1 digit.')
# check for letter
if not any(char.isalpha() for char in value):
raise serializers.ValidationError('Password must contain at least 1 letter.')
# check for upper case letter
if not any(char.isupper() for char in value):
raise serializers.ValidationError('Password must contain at least 1 uppercase letter.')
# check for symbol
if not any(not char.isalnum() for char in value):
raise serializers.ValidationError('Password must contain at least 1 special character.')
# new users won't have a password history at this point
if user is not None:
prior_passwords = PasswordHistory.objects.filter(user_id=user.id).order_by('created_at').reverse()[:settings.AUTH_PASSWORD_HISTORY_COUNT]
for p in prior_passwords:
hasher = identify_hasher(p.password)
if hasher.verify(value, p.password):
raise serializers.ValidationError('Password can not have been one of the last %d passwords' % settings.AUTH_PASSWORD_HISTORY_COUNT)
return value | 0c166198a4603edc866f43abea4e5be20050562f | 28,902 |
def copiar_dic(origen):
"""
Devuelve una copia nueva de ``origen`` con el menor estado compartido
posible.
Utiliza `fusionar_dics` debajo del capó, con un dict ``base`` vacío;
consulte su documentación para obtener detalles sobre el comportamiento.
.. versionadded:: 1.0
"""
return fusionar_dics({}, origen) | 8cf3cddc2bb999201c65af610196883fcbf97342 | 28,903 |
def get_all_users(conn):
"""Iterator that yields all IAM users, to simplify getting all
users. The amazon interface presents a "next" token that has to
be guarded. This makes that easier. However it doesn't present
the returned object, only the list of user dicts
Enables this sort of search:
In [210]: r = get_all_users(iconn)
In [211]: for user in r:
if user['user_id'] == iam_user:
print user
:type conn: boto.iam.connection object
:param conn: object as returned by connect()
"""
def data_lookup(response):
return response['list_users_response']['list_users_result']['users']
def marker_lookup(response):
return response['list_users_response']['list_users_result']['marker']
return k.aws.util.yield_aws_data(conn.get_all_users, 'marker', marker_lookup, data_lookup) | 0e75aced4948b0a9511a2fc8f230c9500b626c95 | 28,904 |
def to_weight(df: pd.DataFrame, renorm=True):
"""
Converts molar quantities to mass quantities of the same order.
E.g.:
mol% --> mass%
mol-ppm --> mass-ppm
"""
MWs = [pt.formula(c).mass for c in df.columns]
if renorm:
return renormalise(df.multiply(MWs))
else:
return df.multiply(MWs) | db2ae5e8bcc2a5b54a85166da16f4e4158bd4c9c | 28,905 |
import numpy
def sind(ang: ArrayLike) -> ArrayLike:
"""Return the sine of an angle specified in degrees.
Parameters
----------
ang : float
An angle, in degrees.
"""
return numpy.sin(numpy.radians(ang)) | f38563dc25a29b58e3448484f76913365646a311 | 28,906 |
import array
def linscale(x, lb_in, ub_in, lb_out, ub_out):
"""linscale scales x according to the provided bounds.
Parameters
----------
x : (N, x_dim) numpy array
lb_in : scalar or 1D list of len x_dim or (1,) or (x_dim,) numpy array
Returns
-------
xs : (N, x_dim) numpy array
"""
lb_in, ub_in, lb_out, ub_out = array(lb_in), array(ub_in), array(lb_out), array(ub_out)
xs = (x - lb_in) / (ub_in - lb_in) * (ub_out - lb_out) + lb_out
return xs | f9e4e095da2ab07e8f91edc76cb1a2bde35c2cbc | 28,907 |
def sync_from(src, dest):
"""Synchronize a directory from Dropbox."""
return False | fe1339c59c25044bdf48e50e12cab80aa9a7ec63 | 28,908 |
import re
def transcribe(seq: str) -> str:
"""
transcribes DNA to RNA by generating
the complement sequence with T -> U replacement
"""
#stores the complement, RNA seq
RNA_seq = ""
#generate complement sequence
#DNA->ACTG->A-><-T, C-><-G
#T->U replacement
keep_alpha_only = re.sub(r'[^a-zA-Z]', '', seq) #remove nums and special chars
alpha_no_spaces = keep_alpha_only.replace(" ", "") #remove spaces
str_split = alpha_no_spaces.split()
for i in length(str_split):
if str_split[i].upper() == "A":
RNA_seq = RNA_seq + "U"
elif str_split[i].upper() == "T":
RNA_seq = RNA_seq + "A"
elif str_split[i].upper() == "C":
RNA_seq = RNA_seq + "G"
elif str_split[i].upper() == "G":
RNA_seq = RNA_seq + "C"
return RNA_seq | baa9b13f537e02cc034732bd2d6e439e05acf70a | 28,909 |
def filter_timeline_actions(tim, **filters):
"""tim (dict) contains info for one TIM"""
actions = tim['timeline']
for field, required_value in filters.items():
if field == 'time':
# Times are given as closed intervals: either [0,134] or [135,150]
acceptable_times = range(required_value[0], required_value[1] + 1)
actions = filter(lambda action: action['time'] in acceptable_times, actions)
else:
# Removes actions for which action[field] != required_value
actions = filter(lambda action: action[field] == required_value, actions)
# filter returns an iterable object
actions = list(actions)
return actions | 9f354e7d9d40b3ad31fd9d7cb256598b1fde11ba | 28,910 |
def _make_n_rows(x, n, y=None):
"""
Multiplies or reduces the rows of x to get
exactly *n* rows.
"""
if n < x.shape[0]:
if y is None:
return x[:n].copy()
return x[:n].copy(), y[:n].copy()
if len(x.shape) < 2:
r = np.empty((n,), dtype=x.dtype)
if y is not None:
ry = np.empty((n,), dtype=y.dtype)
for i in range(0, n, x.shape[0]):
end = min(i + x.shape[0], n)
r[i:end] = x[0 : end - i]
if y is not None:
ry[i:end] = y[0 : end - i]
else:
r = np.empty((n, x.shape[1]), dtype=x.dtype)
if y is not None:
if len(y.shape) < 2:
ry = np.empty((n,), dtype=y.dtype)
else:
ry = np.empty((n, y.shape[1]), dtype=y.dtype)
for i in range(0, n, x.shape[0]):
end = min(i + x.shape[0], n)
r[i:end, :] = x[0 : end - i, :]
if y is not None:
if len(y.shape) < 2:
ry[i:end] = y[0 : end - i]
else:
ry[i:end, :] = y[0 : end - i, :]
if y is None:
return r
return r, ry | 8b29deb282a327945e73b73b0332e663040f6c40 | 28,911 |
def calculate_match_unnormd_fft(
params_h: Binary, params_d: Binary, fs, pad_low, pad_high, S_n=S_n_LISA
):
"""
Inner product of waveforms, maximized over Phi_c by taking absolute value
and t_c using the fast Fourier transform.
"""
df = fs[1] - fs[0]
wf_h = amp(fs, params_h) * jnp.exp(1j * Psi(fs, params_h))
wf_d = amp(fs, params_d) * jnp.exp(1j * Psi(fs, params_d))
Sns = S_n(fs)
# Use IFFT trick to maximize over t_c. Ref: Maggiore's book, eq. 7.171.
integrand = 4 * wf_h.conj() * wf_d / Sns * df
integrand_padded = jnp.concatenate((pad_low, integrand, pad_high))
# print(low_padding, high_padding, len(fs), N)
return jnp.abs(len(integrand_padded) * jnp.fft.ifft(integrand_padded)).max() | 841bf210be371d2a3b80d39330fbe60194a1972f | 28,912 |
def _force_merge(session, model, new_model):
"""Force merge an existing `model` with a `new_model` by copying the
primary key values from `model` to `new_model` before calling
``session.merge(model)``.
"""
pk_cols = mapper_primary_key(model.__class__)
for col in pk_cols:
setattr(new_model, col.name, getattr(model, col.name, None))
return session.merge(new_model) | 4e80e732c576a733e724b4b3aee1613facf3b366 | 28,913 |
def get_alignment(ra_difference, dec_difference):
"""
This function ...
:param ra_difference:
:param dec_difference:
:return:
"""
ra_alignment = None
dec_alignment = None
if np.isclose(ra_difference, 1.0, atol=0.0001): # if RA increment is close to 1 with a positive x increment
# Test wether the DEC increment is then close to zero
if np.isclose(dec_difference, 0.0, atol=0.01):
ra_alignment = "+"
# Else, something else is going on
else: raise ValueError("The orientation of the coordinate system is unclear: " + str(ra_difference) + " " + str(dec_difference))
elif np.isclose(ra_difference, -1.0, atol=0.0001):
# Test whether the DEC increment is then close to zero
if np.isclose(dec_difference, 0.0, atol=0.01):
ra_alignment = "-"
# Else, something else is going on
else: raise ValueError("The orientation of the coordinate system is unclear: " + str(ra_difference) + " " + str(dec_difference))
elif np.isclose(ra_difference, 0.0, atol=0.01):
# Test whether the DEC increment is then close to one
if np.isclose(dec_difference, 1.0, atol=0.0001):
dec_alignment = "+"
# Or close to -1 ..
elif np.isclose(dec_difference, -1.0, atol=0.0001):
dec_alignment = "-"
# If that is zero, something else is going on
else: raise ValueError("The orientation of the coordinate system is unclear: " + str(ra_difference) + " " + str(dec_difference))
else: raise ValueError("The orientation of the coordinate system is unclear: " + str(ra_difference) + " " + str(dec_difference))
return ra_alignment, dec_alignment | a209819d3e9cc47ace3ada1bcb084a02a23dc432 | 28,914 |
def three_to_one_with_mods(res: str) -> str:
"""
Converts three letter AA codes into 1 letter. Allows for modified residues.
:param res: Three letter residue code str:
:type res: str
:return: 1-letter residue code
:rtype: str
"""
return RESI_THREE_TO_1[res] | 6d54ebf5941933cf0f9ef5b72ec6d0b69598b444 | 28,915 |
def sent2features(sent, pos_lex=None, neg_lex=None):
"""
Converts a sentence (list of (word, pos_tag) tuples) into a list of feature
dictionaries
"""
return [word2features(sent, i, pos_lex, neg_lex) for i in range(len(sent))] | 91cc95abfc0c699e71bc4c2f96608ca2dd199d53 | 28,916 |
def effect_of_travel_range_on_afv_attractiveness():
"""
Real Name: b'Effect of travel range on AFV Attractiveness'
Original Eqn: b'AFV travel range/travel range max'
Units: b'Dmnl'
Limits: (None, None)
Type: component
b''
"""
return afv_travel_range() / travel_range_max() | f2058ae6be2a2cb356359f3889566405d87484cc | 28,917 |
def serialize_read(function_code: int, transaction_id: int, unit_address: int, first_address: int, count: int) -> str:
"""
Universal function for handling serialization of all read type messages.
Args:
function_code: Unique function code.
transaction_id: Unique ID of the transaction.
unit_address: Address of the referenced unit.
first_address: Starting address.
count: Number of items to be read.
Returns:
str: Hex representation of the message in string format.
"""
unit_address_hex = '{:02x}'.format(unit_address)
function_code_hex = '{:02x}'.format(function_code)
transaction_id_hex = '{:04x}'.format(transaction_id)
first_address_hex = '{:04x}'.format(first_address)
count_hex = '{:04x}'.format(count)
length_hex = '0006'
return (transaction_id_hex
+ protocol_code
+ length_hex
+ unit_address_hex
+ function_code_hex
+ first_address_hex
+ count_hex) | c329defe2a64ad33d2a5620e09b8d4145457416c | 28,918 |
import torch
from typing import Union
from typing import Tuple
from typing import Optional
def plot_locally_connected_weights(
weights: torch.Tensor,
n_filters: int,
kernel_size: Union[int, Tuple[int, int]],
conv_size: Union[int, Tuple[int, int]],
locations: torch.Tensor,
input_sqrt: Union[int, Tuple[int, int]],
wmin: float = 0.0,
wmax: float = 1.0,
im: Optional[AxesImage] = None,
lines: bool = True,
figsize: Tuple[int, int] = (5, 5),
cmap: str = "hot_r",
) -> AxesImage:
# language=rst
"""
Plot a connection weight matrix of a :code:`Connection` with `locally connected
structure <http://yann.lecun.com/exdb/publis/pdf/gregor-nips-11.pdf>_.
:param weights: Weight matrix of Conv2dConnection object.
:param n_filters: No. of convolution kernels in use.
:param kernel_size: Side length(s) of 2D convolution kernels.
:param conv_size: Side length(s) of 2D convolution population.
:param locations: Indices of input receptive fields for convolution population
neurons.
:param input_sqrt: Side length(s) of 2D input data.
:param wmin: Minimum allowed weight value.
:param wmax: Maximum allowed weight value.
:param im: Used for re-drawing the weights plot.
:param lines: Whether or not to draw horizontal and vertical lines separating input
regions.
:param figsize: Horizontal, vertical figure size in inches.
:param cmap: Matplotlib colormap.
:return: Used for re-drawing the weights plot.
"""
kernel_size = _pair(kernel_size)
conv_size = _pair(conv_size)
input_sqrt = _pair(input_sqrt)
reshaped = reshape_locally_connected_weights(
weights, n_filters, kernel_size, conv_size, locations, input_sqrt
)
n_sqrt = int(np.ceil(np.sqrt(n_filters)))
if not im:
fig, ax = plt.subplots(figsize=figsize)
im = ax.imshow(reshaped.cpu(), cmap=cmap, vmin=wmin, vmax=wmax)
div = make_axes_locatable(ax)
cax = div.append_axes("right", size="5%", pad=0.05)
if lines:
for i in range(
n_sqrt * kernel_size[0],
n_sqrt * conv_size[0] * kernel_size[0],
n_sqrt * kernel_size[0],
):
ax.axhline(i - 0.5, color="g", linestyle="--")
for i in range(
n_sqrt * kernel_size[1],
n_sqrt * conv_size[1] * kernel_size[1],
n_sqrt * kernel_size[1],
):
ax.axvline(i - 0.5, color="g", linestyle="--")
ax.set_xticks(())
ax.set_yticks(())
ax.set_aspect("auto")
plt.colorbar(im, cax=cax)
fig.tight_layout()
else:
im.set_data(reshaped)
return im | 26f41b33a41592cf1091b482dfc41daa62adc37e | 28,919 |
import copy
def draw_box_list(img_bgr, box_list, thickness=-1, color=None,
is_overlap=True, is_arrow=False, is_text=True, is_show=False, is_new=False, save_name=None):
"""
绘制矩形列表
"""
if is_new:
img_bgr = copy.deepcopy(img_bgr)
n_box = len(box_list)
if not color:
color_list = generate_colors(n_box) # 随机生成颜色
else:
color_list = [color] * n_box # 颜色范围
ori_img = copy.copy(img_bgr)
img_copy = copy.copy(img_bgr)
# 绘制颜色块
for idx, (box, color) in enumerate(zip(box_list, color_list)):
# rec_arr = np.array(box)
# ori_img = cv2.fillPoly(ori_img, [rec_arr], color_list[idx])
x_min, y_min, x_max, y_max = box
ori_img = cv2.rectangle(ori_img, pt1=(x_min, y_min), pt2=(x_max, y_max), color=color, thickness=thickness)
if is_overlap:
ori_img = cv2.addWeighted(ori_img, 0.5, img_copy, 0.5, 0)
ori_img = np.clip(ori_img, 0, 255)
# 绘制方向和序号
pre_point, next_point = None, None
pre_color = None
for idx, box in enumerate(box_list):
x_min, y_min, x_max, y_max = box
point = ((x_min + x_max) // 2, (y_min + y_max) // 2)
if is_arrow:
pre_point = point
if pre_point and next_point: # 绘制箭头
cv2.arrowedLine(ori_img, next_point, pre_point, pre_color, thickness=5,
line_type=cv2.LINE_4, shift=0, tipLength=0.05)
next_point = point
pre_color = color_list[idx]
if is_text:
draw_text(ori_img, str(idx), point) # 绘制序号
if is_show or save_name:
show_img_bgr(ori_img, save_name=save_name)
return ori_img | fc6e7b33617fbed33f0a0a24f9880ed0b0a0119e | 28,920 |
def adjust_protons(formula, protons):
"""
@param formula: chemical formula as string
@param protons: number of hydrogens to add/remove as intager
@return: new formula as string
"""
if not protons:
return (formula,"")
protons = int(protons)
Notes = ""
#The whole function assumes that there is a single formula string
#If the formula can be broken into components, it must first be merged
#This is because the proton layer only ever has a single component
if(len(formula.split('.'))>1):
print("Error: you must merge the formula components into a single formula string")
print("You can do so using Compounds.mergeFormula()")
return formula,"Unadjustable due to multiple components"
atoms = Compounds.parseFormula(formula)
if "H" in atoms:
atoms['H'] += protons
if atoms['H'] < 0:
Notes = 'Too Many Protons adjusted!'
if atoms['H'] == 0:
del atoms['H']
elif(len(atoms)==0):
#special case for the proton
atoms['H']=protons
formula = Compounds.buildFormula(atoms)
return (formula, Notes) | a422e90f20022b5e58ba3c36e92562d554cfea2e | 28,921 |
def angsep(ra1,dec1,ra2,dec2):
"""Compute the angular separations between two points on a sphere
Parameters:
- ra1 - Right ascension of first position
- dec1 - Declination of first position
- ra2 - Right ascension of second position
- dec2 - Declination of second position
Note: All input parameters are expected to be in degrees
Return value:
- angSep - the angular separation of the two positions in degrees
Usage:
angSep = angsep(ra1,dec1,ra2,dec2)
Description:
Using spherical trig, this method computers the angular separation of
two points on the celestial sphere
"""
ra1*=d2r
dec1*=d2r
ra2*=d2r
dec2*=d2r
diffCosine=cos(dec1)*cos(dec2)*cos(ra1-ra2)+sin(dec1)*sin(dec2)
dC='%.10f'%diffCosine#when the source is right at the center of the roi python sometimes adds extraneous digits at the end of the value i.e. instead of 1.0
#it returns 1.0000000000000024, which throws an error with the acos function
return acos(float(dC))/d2r | 430107145de60aa7f572f64824a34c2acc0dd353 | 28,922 |
from typing import Callable
from typing import Dict
from typing import Any
from typing import Type
def construct_schema_function_without_choices() -> Callable:
"""
Modifies model example and description if needed.
Note that schema extra has to be a function, otherwise it's called to soon
before all the relations are expanded.
:return: callable that will be run by pydantic to modify the schema
:rtype: Callable
"""
def schema_extra(schema: Dict[str, Any], model: Type["Model"]) -> None:
overwrite_example_and_description(schema=schema, model=model)
overwrite_binary_format(schema=schema, model=model)
return staticmethod(schema_extra) | e9d3d44013b088ba8f582f87828e99e8cb7654ab | 28,923 |
def is_cross_not_claimed(event_list, team):
"""Returns if event list has a cross-not-claimed Goalkeeper event; cross not successfully caught"""
cnc = False
for e in event_list[:1]:
if e.type_id == 53:
cnc = True
return cnc | c63080119d057f9eb8dfc950724f67f8e4d6be86 | 28,924 |
import numpy
import math
def _do_monte_carlo_run(pools, lulc_counts):
"""Do a single Monte Carlo run for carbon storage.
Returns a dict with the results, keyed by scenario, and
# including results for sequestration.
"""
# Sample carbon-per-grid-cell from the given normal distribution.
# We sample this independently for each LULC type.
lulc_carbon_samples = {}
for lulc_id, distribution in pools.items():
if not distribution['variance']:
lulc_carbon_samples[lulc_id] = distribution['total']
else:
lulc_carbon_samples[lulc_id] = numpy.random.normal(
distribution['total'],
math.sqrt(distribution['variance']))
# Compute the amount of carbon in each scenario.
results = {}
for scenario, counts in lulc_counts.items():
# Amount of carbon is the sum across all lulc types of:
# (number of grid cells) x (carbon per grid cell)
results[scenario] = sum(
count * lulc_carbon_samples[lulc_id]
for lulc_id, count in counts.items())
# Compute sequestration.
for scenario in ['fut', 'redd']:
if scenario not in results:
continue
results['sequest_%s' % scenario] = results[scenario] - results['cur']
return results | 03ddcf90c135ce02f7692557f31bb530390d2a7a | 28,925 |
def compute_partition(df):
"""Perform compute on a partition
Partitions are perfectly aligned along 1 minute and the timestamp index is
sorted.
Below is a reasonably costly operation that aggregates data from
each timestep. That dataframe would aggregate what the timestep had (i.e.,
measurements from all the hosts) Also, this is done with *all* of the
columns.
"""
if not isinstance(df, pd.DataFrame) and not isinstance(df, dd.DataFrame):
return pd.DataFrame(columns=[NODE, ANY_NAN, HOT_GPUS] + N_CORES_IN_BANDS + N_MEMS_IN_BANDS)
# Detect NaNs in each row.
df[ANY_NAN] = df[ALL_TEMPS].isna().any(axis=1)
# Replace temperature with bands.
df[ALL_TEMPS] = df[ALL_TEMPS].apply(pd.cut, bins=BANDS, right=False, labels=False)
# Count bands for each row.
for n_sensors_in_bands, temps in zip([N_CORES_IN_BANDS, N_MEMS_IN_BANDS], [CORE_TEMPS, MEM_TEMPS]):
for band, n_sensors_in_band in enumerate(n_sensors_in_bands):
df[n_sensors_in_band] = (df[temps] == band).sum(axis=1)
# Encode hot GPUs for each node.
are_hot_gpus = df[ALL_TEMPS] > 1
df[HOT_GPUS] = df[NODE] + ':' + are_hot_gpus.fillna('_').astype(int).astype(str).agg(''.join, axis=1)
df[HOT_GPUS] = df[HOT_GPUS].mask(~are_hot_gpus.any(axis=1))
agg = df.groupby(df.index).agg({NODE: 'size', ANY_NAN: 'sum', HOT_GPUS: lambda x: list(x.dropna()),
**{n: 'sum' for n in N_CORES_IN_BANDS + N_MEMS_IN_BANDS}})
return agg | d4b727c3160932a5655e6a6aa62c664d21d8d5e6 | 28,926 |
def first_impressions_view(request):
"""First impressions view."""
auth = False
try:
auth = request.cookies['auth_tkt']
auth_tools = request.dbsession.query(
MyModel
).filter(MyModel.category == 'admin').all()
except KeyError:
auth_tools = []
query = request.dbsession.query(MyModel)
content = query.filter(MyModel.page == 'about').all()
main_menu = query.filter(MyModel.subcategory == 'base').all()
submenu = [item for item in content if item.title == 'menu_place_holder']
main = [item for item in content if item.category == 'first_impressions']
steps = [item for item in content if item.category == 'steps']
return {
'auth': auth,
'auth_tools': auth_tools,
'main_menu': main_menu,
'content': content,
'submenu': submenu,
'main': main,
'steps': steps,
} | 6ab837580d07cf6b9bf0b48bbf296b9acc084384 | 28,927 |
def mysql_create_tables():
"""
when using mysql database, this is the function that is used to create the tables in the database for the first
time when you run the nettacker module.
Args:
None
Returns:
True if success otherwise False
"""
try:
db_engine = create_engine('mysql://{0}:{1}@{2}:{3}/{4}'.format(USER, PASSWORD, HOST, PORT, DATABASE))
Base.metadata.create_all(db_engine)
return True
except Exception as _:
return False | c953f24d98ad919f57cd262087731dc1fcfc8886 | 28,928 |
def get_camera_intrinsic_parameters(stub):
""" Get camera intrinsic params from gprc detection server. """
request = detection_server_pb2.Empty()
try:
response = stub.GetCameraIntrinsicParameters(request)
return response
except grpc.RpcError as err:
logger.error(err.details()) #pylint: disable=no-member
logger.error('{}, {}'.format(err.code().name, err.code().value)) #pylint: disable=no-member
exit(1) | 7c971ec183ffcff1d1173af2dd8a6d540d8c360b | 28,929 |
def version_strategy(version: str) -> AwesomeVersionStrategy:
"""Return the version stragegy."""
if is_buildver(version):
return AwesomeVersionStrategy.BUILDVER
if is_calver(version):
return AwesomeVersionStrategy.CALVER
if is_semver(version):
return AwesomeVersionStrategy.SEMVER
if is_special_container(version):
return AwesomeVersionStrategy.SPECIALCONTAINER
if is_simple(version):
return AwesomeVersionStrategy.SIMPLEVER
return AwesomeVersionStrategy.UNKNOWN | b62014bbdd10e04bf0b39e1619eac9e4c408bb83 | 28,930 |
import os
def get_name_from_path(path):
""" Parses a directory in 'mgmt_<name> ' format into the camel case name used by ARM. The
directory name should be in snake case. """
while path:
path, item = os.path.split(path)
if 'mgmt_' in item:
return snake_to_camel(item.replace('mgmt_', '', 1))
raise RuntimeError('You must specify a path with --src that includes a directory in the format \'mgmt_<name>\'') | 1e470aeddc1ece049be5851a021657ef5ec4fd72 | 28,931 |
def get_transcript_name(ids):
"""Get transcript name."""
return _search(transcript_name_re, ids) | bc8915f6efe1b468409e14bb622aa8d6b90fa26d | 28,932 |
def rank_items(ratings, similarities):
"""
(アイテムのインデックス, 予測した評価値)のタプルリストをランキング(評価値の降順ソート)で返す
"""
ranking = [ ]
for i, r in enumerate(ratings):
if r != -1:
continue
ranking.append((i, predict_rating(ratings, i, similarities)))
return sorted(ranking, key=lambda r: r[1], reverse=True) | dc9088e122a601664660eb712aa7437afb1290a1 | 28,933 |
def hex2(n, uppercase=True):
"""
Return 2 characters corresponding to the hexadecimal representation of `n`.
:param n: 0 <= int < 256
:param uppercase: bool
:return: str (length == 2)
"""
assert isinstance(n, int)
assert 0 <= n < 256
assert isinstance(uppercase, bool), type(uppercase)
return hex_fig(n//16) + hex_fig(n % 16) | 6cda31436914b116817ff7506d489265b51b68dc | 28,934 |
import csv
from typing import OrderedDict
def get_split_statistics(filepath):
"""
Computes the label frequency, amount of utterances and tokens for a
preprocessed tsv file.
"""
label_freq = defaultdict(int)
amount_tokens = 0
with open(filepath, 'r') as file:
reader = csv.DictReader(file, delimiter='\t')
for line in reader:
label_freq[line['label']] += 1
text = line['clean']
amount_tokens += len(text.split(' '))
amount_samples = sum([e for e in label_freq.values()])
label_freq = OrderedDict(sorted(label_freq.items(), key=lambda x: x[0]))
return label_freq, amount_samples, amount_tokens | 44e4d94fc547d08cabc7450fedc29b403ddd7320 | 28,935 |
def cast_int(value):
"""
Cast value to 32bit integer
Usage:
cast_int(1 << 31) == -1
(where as: 1 << 31 == 2147483648)
"""
value = value & 0xffffffff
if value & 0x80000000:
value = ~value + 1 & 0xffffffff
return -value
else:
return value | c9c6da6e072bff6b241bc3feda6b0b02cb08cdb3 | 28,936 |
def dense_fast_decode(
src_vocab_size,
trg_vocab_size,
max_in_len,
n_layer,
enc_n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
preprocess_cmd,
postprocess_cmd,
weight_sharing,
embedding_sharing,
beam_size,
batch_size,
max_out_len,
decode_alpha,
eos_idx,
params_type="normal"):
"""
Use beam search to decode. Caches will be used to store states of history
steps which can make the decoding faster.
"""
assert params_type == "normal" or params_type == "new" or params_type == "fixed"
data_input_names = dense_encoder_data_input_fields + fast_decoder_data_input_fields
all_inputs = make_all_inputs(data_input_names)
enc_inputs_len = len(encoder_data_input_fields)
dec_inputs_len = len(fast_decoder_data_input_fields)
enc_inputs = all_inputs[0:enc_inputs_len]
dec_inputs = all_inputs[enc_inputs_len:enc_inputs_len + dec_inputs_len]
enc_output = wrap_encoder(src_vocab_size, max_in_len, enc_n_layer, n_head,
d_key, d_value, d_model, d_inner_hid,
prepostprocess_dropout, attention_dropout,
relu_dropout, preprocess_cmd, postprocess_cmd,
weight_sharing, embedding_sharing, enc_inputs, params_type=params_type)
enc_bias = get_enc_bias(enc_inputs[0])
source_length, = dec_inputs
def beam_search(enc_output, enc_bias, source_length):
"""
beam_search
"""
max_len = layers.fill_constant(
shape=[1], dtype='int64', value=max_out_len)
step_idx = layers.fill_constant(
shape=[1], dtype='int64', value=0)
cond = layers.less_than(x=step_idx, y=max_len)
while_op = layers.While(cond)
caches_batch_size = batch_size * beam_size
init_score = np.zeros([1, beam_size]).astype('float32')
init_score[:, 1:] = -INF
initial_log_probs = layers.assign(init_score)
alive_log_probs = layers.expand(initial_log_probs, [batch_size, 1])
# alive seq [batch_size, beam_size, 1]
initial_ids = layers.zeros([batch_size, 1, 1], 'float32')
alive_seq = layers.expand(initial_ids, [1, beam_size, 1])
alive_seq = layers.cast(alive_seq, 'int64')
enc_output = layers.unsqueeze(enc_output, axes=[1])
enc_output = layers.expand(enc_output, [1, beam_size, 1, 1])
enc_output = layers.reshape(enc_output, [caches_batch_size, -1, d_model])
tgt_src_attn_bias = layers.unsqueeze(enc_bias, axes=[1])
tgt_src_attn_bias = layers.expand(tgt_src_attn_bias, [1, beam_size, n_head, 1, 1])
enc_bias_shape = layers.shape(tgt_src_attn_bias)
tgt_src_attn_bias = layers.reshape(tgt_src_attn_bias, [-1, enc_bias_shape[2],
enc_bias_shape[3], enc_bias_shape[4]])
beam_search = BeamSearch(beam_size, batch_size, decode_alpha, trg_vocab_size, d_model)
caches = [{
"k": layers.fill_constant(
shape=[caches_batch_size, 0, d_model],
dtype=enc_output.dtype,
value=0),
"v": layers.fill_constant(
shape=[caches_batch_size, 0, d_model],
dtype=enc_output.dtype,
value=0)
} for i in range(n_layer)]
finished_seq = layers.zeros_like(alive_seq)
finished_scores = layers.fill_constant([batch_size, beam_size],
dtype='float32', value=-INF)
finished_flags = layers.fill_constant([batch_size, beam_size],
dtype='float32', value=0)
with while_op.block():
pos = layers.fill_constant([caches_batch_size, 1, 1], dtype='int64', value=1)
pos = layers.elementwise_mul(pos, step_idx, axis=0)
alive_seq_1 = layers.reshape(alive_seq, [caches_batch_size, -1])
alive_seq_2 = alive_seq_1[:, -1:]
alive_seq_2 = layers.unsqueeze(alive_seq_2, axes=[1])
logits = wrap_decoder(
trg_vocab_size, max_in_len, n_layer, n_head, d_key,
d_value, d_model, d_inner_hid, prepostprocess_dropout,
attention_dropout, relu_dropout, preprocess_cmd,
postprocess_cmd, weight_sharing, embedding_sharing,
dec_inputs=(alive_seq_2, alive_seq_2, pos, None, tgt_src_attn_bias),
enc_output=enc_output, caches=caches, is_train=False, params_type=params_type)
alive_seq_2, alive_log_probs_2, finished_seq_2, finished_scores_2, finished_flags_2, caches_2 = \
beam_search.inner_func(step_idx, logits, alive_seq_1, alive_log_probs, finished_seq,
finished_scores, finished_flags, caches, enc_output,
tgt_src_attn_bias)
layers.increment(x=step_idx, value=1.0, in_place=True)
finish_cond = beam_search.is_finished(step_idx, source_length, alive_log_probs_2,
finished_scores_2, finished_flags_2)
layers.assign(alive_seq_2, alive_seq)
layers.assign(alive_log_probs_2, alive_log_probs)
layers.assign(finished_seq_2, finished_seq)
layers.assign(finished_scores_2, finished_scores)
layers.assign(finished_flags_2, finished_flags)
for i in xrange(len(caches_2)):
layers.assign(caches_2[i]["k"], caches[i]["k"])
layers.assign(caches_2[i]["v"], caches[i]["v"])
layers.logical_and(x=cond, y=finish_cond, out=cond)
finished_flags = layers.reduce_sum(finished_flags, dim=1, keep_dim=True) / beam_size
finished_flags = layers.cast(finished_flags, 'bool')
mask = layers.cast(layers.reduce_any(input=finished_flags, dim=1, keep_dim=True), 'float32')
mask = layers.expand(mask, [1, beam_size])
mask2 = 1.0 - mask
finished_seq = layers.cast(finished_seq, 'float32')
alive_seq = layers.cast(alive_seq, 'float32')
#print mask
finished_seq = layers.elementwise_mul(finished_seq, mask, axis=0) + \
layers.elementwise_mul(alive_seq, mask2, axis = 0)
finished_seq = layers.cast(finished_seq, 'int32')
finished_scores = layers.elementwise_mul(finished_scores, mask, axis=0) + \
layers.elementwise_mul(alive_log_probs, mask2)
finished_seq.persistable = True
finished_scores.persistable = True
return finished_seq, finished_scores
finished_ids, finished_scores = beam_search(enc_output, enc_bias, source_length)
return finished_ids, finished_scores | 86485e470c4f2aefa445807fc892e508e9690b28 | 28,937 |
def fixed_rotation_objective(link,ref=None,local_axis=None,world_axis=None):
"""Convenience function for fixing the given link at its current orientation
in space.
If local_axis and world_axis are not provided, the entire link's orientation
is constrained.
If only local_axis is provided, the link is constrained
to rotate about this local axis.
If only world_axis is provided,
the link is constrained to rotate about this world-space axis.
Returns:
(IKObjective or GeneralizedIKObjective)
"""
refcoords = ref.getTransform()[0] if ref is not None else so3.identity()
Rw = link.getTransform()
Rrel = so3.mul(so3.inv(refcoords),Rw[0])
obj = IKObjective()
obj.robot = link.robot()
if ref:
assert link.robot()==ref.robot(),"Can't do generalized fixed rotation objectives yet"
obj.setLinks(link.index,(-1 if ref is None else ref.index))
if local_axis is None and world_axis is None:
#fixed rotation objective
obj.setFixedRotConstraint(Rrel)
elif local_axis is None:
#fixed axis, given by world coordinates
Rrelinv = so3.inv(Rrel)
local_axis = so3.apply(Rrelinv,world_axis)
obj.setAxialRotConstraint(local_axis,world_axis)
elif world_axis is None:
#fixed point, given by local coordinates
world_axis = so3.apply(Rrel,local_axis)
obj.setAxialRotConstraint(local_axis,world_axis)
else:
raise ValueError("ik.fixed_rotation_objective does not accept both local_axis and world_axis keyword arguments")
return obj | 69f918217e5f5a310f5c81c7c0469347ccc9d068 | 28,938 |
def chinese_cut(text):
"""
进行中文分词
:param text: 中文数据
:return: 分词后的数据
"""
return ' '.join(jieba.cut(text)) | 0e70bf1d65dbb8d01118bdb6b8f248a860a169f1 | 28,939 |
def bottleneck_block_(inputs,
filters,
is_training,
strides,
use_projection=False,
pruning_method='baseline',
init_method='baseline',
data_format='channels_first',
end_sparsity=0.,
weight_decay=0.,
clip_log_alpha=8.,
log_alpha_threshold=3.,
name=None):
"""Bottleneck block variant for residual networks with BN after convolutions.
Args:
inputs: Input tensor, float32 or bfloat16 of size [batch, channels, height,
width].
filters: Int specifying number of filters for the first two convolutions.
is_training: Boolean specifying whether the model is training.
strides: Int specifying the stride. If stride >1, the input is downsampled.
use_projection: Boolean for whether the layer should use a projection
shortcut Often, use_projection=True for the first block of a block group.
pruning_method: String that specifies the pruning method used to identify
which weights to remove.
init_method: ('baseline', 'sparse') Whether to use standard initialization
or initialization that takes into the existing sparsity of the layer.
'sparse' only makes sense when combined with pruning_method == 'scratch'.
data_format: String that specifies either "channels_first" for [batch,
channels, height,width] or "channels_last" for [batch, height, width,
channels].
end_sparsity: Desired sparsity at the end of training. Necessary to
initialize an already sparse network.
weight_decay: Weight for the l2 regularization loss.
clip_log_alpha: Value at which to clip log_alpha (if pruning_method ==
'variational_dropout') during training.
log_alpha_threshold: Threshold at which to zero weights based on log_alpha
(if pruning_method == 'variational_dropout') during eval.
name: String that specifies name for model layer.
Returns:
The output activation tensor.
"""
shortcut = inputs
if use_projection:
# Projection shortcut only in first block within a group. Bottleneck blocks
# end with 4 times the number of filters.
filters_out = 4 * filters
end_point = 'bottleneck_projection_%s' % name
shortcut = conv2d_fixed_padding(
inputs=inputs,
filters=filters_out,
kernel_size=1,
strides=strides,
pruning_method=pruning_method,
init_method=init_method,
data_format=data_format,
end_sparsity=end_sparsity,
weight_decay=weight_decay,
clip_log_alpha=clip_log_alpha,
log_alpha_threshold=log_alpha_threshold,
is_training=is_training,
name=end_point)
shortcut = batch_norm_relu(
shortcut, is_training, relu=False, data_format=data_format)
end_point = 'bottleneck_1_%s' % name
inputs = conv2d_fixed_padding(
inputs=inputs,
filters=filters,
kernel_size=1,
strides=1,
pruning_method=pruning_method,
init_method=init_method,
data_format=data_format,
end_sparsity=end_sparsity,
weight_decay=weight_decay,
clip_log_alpha=clip_log_alpha,
log_alpha_threshold=log_alpha_threshold,
is_training=is_training,
name=end_point)
inputs = batch_norm_relu(
inputs, is_training, data_format=data_format)
end_point = 'bottleneck_2_%s' % name
inputs = conv2d_fixed_padding(
inputs=inputs,
filters=filters,
kernel_size=3,
strides=strides,
pruning_method=pruning_method,
init_method=init_method,
data_format=data_format,
end_sparsity=end_sparsity,
weight_decay=weight_decay,
clip_log_alpha=clip_log_alpha,
log_alpha_threshold=log_alpha_threshold,
is_training=is_training,
name=end_point)
inputs = batch_norm_relu(
inputs, is_training, data_format=data_format)
end_point = 'bottleneck_3_%s' % name
inputs = conv2d_fixed_padding(
inputs=inputs,
filters=4 * filters,
kernel_size=1,
strides=1,
pruning_method=pruning_method,
init_method=init_method,
data_format=data_format,
end_sparsity=end_sparsity,
weight_decay=weight_decay,
clip_log_alpha=clip_log_alpha,
log_alpha_threshold=log_alpha_threshold,
is_training=is_training,
name=end_point)
inputs = batch_norm_relu(
inputs, is_training, relu=False, init_zero=True, data_format=data_format)
return tf.nn.relu(inputs + shortcut) | 3e1a5448bbaef54a1be46e2c5067c1f7603b1551 | 28,940 |
def WaitForOperation(client, operation_ref, message):
"""Waits until the given operation finishes.
Wait loop terminates when the operation's status becomes 'DONE'.
Args:
client: interface to the Cloud Updater API
operation_ref: operation to poll
message: message to be displayed by progress tracker
Returns:
True iff the operation finishes with success
"""
with progress_tracker.ProgressTracker(message, autotick=False) as pt:
while True:
operation = client.zoneOperations.Get(
client.MESSAGES_MODULE.ReplicapoolupdaterZoneOperationsGetRequest(
project=operation_ref.project,
zone=operation_ref.zone,
operation=operation_ref.operation))
if operation.error:
return False
if operation.status == 'DONE':
return True
pt.Tick()
time_util.Sleep(2) | d94f8db634e1fd09426461d1b412de8ea404efed | 28,941 |
def ifloordiv():
"""//=: Inplace floor divide operator."""
class _Operand:
def __init__(self):
self.value = ''
def __ifloordiv__(self, other):
self.value = "int(val/{})".format(other)
return self
item = _Operand()
item //= 2
return item.value | 351c27cffe9a919641b43f62df166e93827f468f | 28,942 |
def search_custom_results(result_id):
"""
Search a result for predictions.
request['maxPredictionSort'] - when true sort by max prediction
request['all'] - include values in download
request['page'] - which page of results to show
request['perPage'] - items per page to show
:param result_id: str: uuid of the custom_predictions/custom_preferences we want to search
:return: json response with 'result' property containing an array of predictions
"""
args = request.args
search_args = SearchArgs(g_config.binding_max_offset, args)
format = args.get('format')
sort_by_max = args.get('maxPredictionSort')
if sort_by_max == 'false':
sort_by_max = None
all_values = args.get('all')
page = get_optional_int(args, 'page')
per_page = get_optional_int(args, 'per_page')
if search_args.is_last_page():
page = CustomResultData.determine_last_page(get_db(), result_id, per_page)
offset = None
if page and per_page:
offset = (page - 1) * per_page
predictions = CustomResultData.get_predictions(get_db(), result_id, sort_by_max, per_page, offset)
if format == 'tsv' or format == 'csv':
filename = "custom_result.{}".format(format)
separator = ','
if format == 'tsv':
separator = '\t'
return download_file_response(filename, make_download_custom_result(separator, all_values, predictions))
else:
return make_ok_json_response({
'page': page,
'result': predictions}) | 2ab31458eccec466c27a3e85d84352b5919a7047 | 28,943 |
def create_1_layer_ANN_classification(
input_shape,
num_hidden_nodes,
num_classes,
activation_function='relu'
):
"""
Creates a 1 layer classification ANN with the keras backend.
Arguments:
input_shape (int tuple): The shape of the input excluding batch.
num_hidden_nodes (int): Number of hidden nodes in the hidden layers.
num_classes (int): Number of classes the data contains.
activation_function (str/tf.keras.activations): Activation function to
use for the ANN. Default: ReLU.
Returns:
tf.keras.model.Sequential: A model of the ANN created.
"""
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=input_shape),
tf.keras.layers.Dense(
num_hidden_nodes,
activation=activation_function),
tf.keras.layers.Dense(num_classes, activation='softmax')
])
return model | 8e557be241f7b269cc12488dff9ecf96b3ff3fd2 | 28,944 |
def unsqueeze(x, axis, name=None):
"""
:alias_main: paddle.unsqueeze
:alias: paddle.unsqueeze, paddle.tensor.unsqueeze, paddle.tensor.manipulation.unsqueeze
Insert single-dimensional entries to the shape of input Tensor ``x``. Takes one
required argument axis, a dimension or list of dimensions that will be inserted.
Dimension indices in axis are as seen in the output tensor.
Args:
x (Tensor): The input Tensor to be unsqueezed. Supported data type: float32, float64, bool, int8, int32, int64.
axis (int|list|tuple|Tensor): Indicates the dimensions to be inserted. The data type is ``int32`` .
If ``axis`` is a list or tuple, the elements of it should be integers or Tensors with shape [1].
If ``axis`` is a Tensor, it should be an 1-D Tensor .
If ``axis`` is negative, ``axis = axis + ndim(x) + 1``.
name (str|None): Name for this layer. Please refer to :ref:`api_guide_Name`, Default None.
Returns:
Tensor: Unsqueezed Tensor with the same data type as input Tensor.
Examples:
.. code-block:: python
import paddle
paddle.disable_static()
x = paddle.rand([5, 10])
print(x.shape) # [5, 10]
out1 = paddle.unsqueeze(x, axis=0)
print(out1.shape) # [1, 5, 10]
out2 = paddle.unsqueeze(x, axis=[0, 2])
print(out2.shape) # [1, 5, 1, 10]
axis = paddle.fluid.dygraph.to_variable([0, 1, 2])
out3 = paddle.unsqueeze(x, axis=axis)
print(out3.shape) # [1, 1, 1, 5, 10]
"""
return layers.unsqueeze(x, axis, name) | d261f74692b8fb2ad3295aa2e621313a89b9197e | 28,945 |
def get_kills_info_player_match(player_id,match_id,prt=False):
"""
get ONE player in ONE match's kills information
:param player_id:
:param match_id:
:param prt:
:return: list of kill information
"""
info = get_player_info_match_id(player_id,match_id)
info_kills = {}
if('hero_kills' not in info.keys()):
if prt is True:
print("None")
return None
info_kills.update({'hero_kills': info['hero_kills'] } )
info_kills.update({'lane_kills':info['lane_kills']})
info_kills.update({'neutral_kills':info['neutral_kills']})
info_kills.update({'ancient_kills':info['ancient_kills']})
info_kills.update({'tower_kills':info['tower_kills']})
info_kills.update({'roshan_kills':info['roshan_kills']})
info_kills.update({'observer_kills':info['observer_kills']})
info_kills.update({'courier_kills':info['courier_kills']})
info_kills.update({'observer_kills':info['observer_kills']})
info_kills.update({'sentry_kills':info['sentry_kills']})
info_kills.update({'necronomicon_kills':info['necronomicon_kills']})
if(prt is True):
print(info_kills)
return info_kills | 7deca277a8315fa9f0f7dddbef0297a43b15b39e | 28,946 |
from typing import Dict
def update_args(args: Dict, inv_file: str, conf_file: str) -> Dict:
""" Add inventory file and config file in the correct spots inside the
arguments
Args:
args (Dict): controller args
inv_file (str): inventory file
conf_file (str): config file
Returns:
Dict: updated args
"""
args['inventory'] = inv_file
args['config'] = conf_file
return args | c1cd377785f0af26740d5cecd73186caaa6c79b6 | 28,947 |
def _add_vessel_class(df):
"""Creates 'Class' column based on vessel LOA ft."""
df.loc[:, "Class"] = "Panamax"
post_row = (df.loc[:, "LOA ft"] > 965)
post_loc = df.loc[post_row, :].index
post_pan = df.index.isin(post_loc)
df.loc[post_pan, "Class"] = "Post-Panamax"
return df | 5abec9f0bee8d7d6c734100c64a7624fdb5fb672 | 28,948 |
def async_generate_ptr_query(ip):
"""Generate a ptr query with the next random id."""
return message.make_query(ip.reverse_pointer, rdatatype.PTR) | 3b9086f7a3c13aead6c277362997968e65035b1a | 28,949 |
def masked_within_block_local_attention_1d(q, k, v, block_length=64, name=None):
"""Attention to the source and a neighborhood to the left within a block.
The sequence is divided into blocks of length block_length. Attention for a
given query position can only see memory positions less than or equal to the
query position in the corresponding block.
Args:
q: a Tensor with shape [batch, heads, length, depth_k]
k: a Tensor with shape [batch, heads, length, depth_k]
v: a Tensor with shape [batch, heads, length, depth_v]
block_length: an integer
name: an optional string
Returns:
a Tensor of shape [batch, heads, length, depth_v]
"""
with tf.variable_scope(
name, default_name="within_local_attention_1d", values=[q, k, v]):
batch, heads, length, depth_k = common_layers.shape_list(q)
depth_v = common_layers.shape_list(v)[-1]
if isinstance(block_length, tf.Tensor):
const = tf.contrib.util.constant_value(block_length)
if const is not None:
block_length = int(const)
# Pad query, key, value to ensure multiple of block length.
original_length = length
padding_size = tf.mod(-length, block_length)
length += padding_size
padding = [[0, 0], [0, 0], [0, padding_size], [0, 0]]
q = tf.pad(q, padding)
k = tf.pad(k, padding)
v = tf.pad(v, padding)
# Compute attention for all subsequent query blocks.
num_blocks = tf.div(length, block_length)
q = tf.reshape(q, [batch, heads, num_blocks, block_length, depth_k])
k = tf.reshape(k, [batch, heads, num_blocks, block_length, depth_k])
v = tf.reshape(v, [batch, heads, num_blocks, block_length, depth_v])
# [batch, heads, num_blocks, block_length, block_length]
attention = tf.matmul(q, k, transpose_b=True)
attention += tf.reshape(attention_bias_lower_triangle(block_length),
[1, 1, 1, block_length, block_length])
attention = tf.nn.softmax(attention)
# [batch, heads, num_blocks, block_length, depth_v]
output = tf.matmul(attention, v)
output = tf.reshape(output, [batch, heads, -1, depth_v])
# Remove the padding if introduced.
output = tf.slice(output, [0, 0, 0, 0], [-1, -1, original_length, -1])
output.set_shape([None if isinstance(dim, tf.Tensor) else dim for dim in
(batch, heads, length, depth_v)])
return output | 1babc20ef4d89bda21d4b96bc61c6017087f2927 | 28,950 |
import requests
def catch_all(path):
"""
Repeats the request to the latest version of default service by
replacing all occurrences of the hostname to the new_host defined
above. The replacement happens for:
- URL and URL query string
- Headers, including cookie headers
- The body, if we can decode it.=
Additionally:
- All headers starting with 'X-' are removed (to not mess-up with
Google's reverse proxies)
- The X-OAuth-Redirect header is added, containing the old hostname
"""
if flask.request.scheme == 'http':
flask.abort(400, 'Only HTTPS is supported.')
old_host = flask.request.host
method = flask.request.method
url = flask.request.url.replace(old_host, new_host).replace('%2F', '/')
headers = {k: v.replace(old_host, new_host)
for k, v in flask.request.headers.items()
if not (k in ['Forwarded']
or k.startswith('X-'))}
headers['X-OAuth-Redirect'] = f'https://{old_host}'
try:
data = flask.request.get_data(as_text=True)
data = data.replace(old_host, new_host)
except UnicodeDecodeError:
data = flask.request.get_data()
resp = requests.request(method=method, url=url, headers=headers, data=data,
allow_redirects=False)
new_headers = {k: v.replace(new_host, old_host)
for k, v in resp.headers.items()
if not (k in ['Content-Encoding', 'Transfer-Encoding']
or k.startswith('X-'))}
try:
new_content = resp.content.decode().replace(new_host, old_host)
except UnicodeDecodeError:
new_content = resp.content
return new_content, resp.status_code, new_headers | c11a1de657c6095a77a0114a965733526f1b926f | 28,951 |
def bulk_process(
debug = False, number_of_cycles_before_rate_analysis = 10, cell_ids = None,
):
"""
TEMPDOC
first calls process_single_file then, calls process_cell_id
"""
if cell_ids is None:
errors = list(
map(
lambda x: process_single_file(x, debug),
CyclingFile.objects.filter(
database_file__deprecated = False,
process_time__lte = F("import_time"),
)
)
)
all_current_cell_ids = CyclingFile.objects.filter(
database_file__deprecated = False
).values_list(
"database_file__valid_metadata__cell_id", flat = True,
).distinct()
else:
errors = list(
map(
lambda x: process_single_file(x, debug),
CyclingFile.objects.filter(
database_file__deprecated = False,
database_file__valid_metadata__cell_id__in = cell_ids,
process_time__lte = F("import_time"),
)
)
)
all_current_cell_ids = cell_ids
for cell_id in all_current_cell_ids:
process_cell_id(cell_id, number_of_cycles_before_rate_analysis)
return list(filter(lambda x: x["error"], errors)) | 88042214b69bdd7a1102ceae17aceb4facf9f22d | 28,952 |
def fix_community_medium(
tax,
medium,
min_growth=0.1,
max_import=1,
minimize_components=True,
n_jobs=4,
):
"""Augment a growth medium so all community members can grow in it.
Arguments
---------
tax : pandas.Dataframe
A taxonomy specification as passed to `micom.Community`.
medium : pandas.Series
A growth medium with exchange reaction IDs as index and positive
import fluxes as values.
min_growth : positive float
The minimum biomass production required for growth.
max_import : positive float
The maximum import rate for added imports.
minimize_components : boolean
Whether to minimize the number of media components rather than the
total flux.
n_jobs: int
The number of processes to use.
Returns
-------
pandas.Series
A new growth medium with the smallest amount of augmentations such
that all members of the community can grow in it.
"""
if medium[medium < 1e-6].any():
medium[medium < 1e-6] = 1e-6
logger.info(
"Some import rates were to small and were adjusted to 1e-6."
)
args = [
(row.id, row.file, medium, min_growth, max_import, minimize_components)
for _, row in tax.iterrows()
]
res = workflow(_fix_medium, args, n_jobs=n_jobs, unit="model(s)")
return pd.concat(res, axis=1).max(axis=1) | c77cf77ec29ea160bc1c623247f103dc88d7b9b5 | 28,953 |
def _p_wa_higher_wb(k1, k2, theta1, theta2):
"""
:param k1: Shape of wa
:param k2: Shape of wb
:param theta1: Scale of wa
:param theta2: Scale of wb
:return:
"""
a = k2
b = k1
x = theta1 / (theta1 + theta2)
return betainc(a, b, x) | a0b3934039ed480074faaba118c5a7e218a41905 | 28,954 |
def noisify_binary_asymmetric(y_train, noise, random_state=None):
"""mistakes:
1 -> 0: n
0 -> 1: .05
"""
P = np.eye(2)
n = noise
assert 0.0 <= n < 0.5
if noise > 0.0:
P[1, 1], P[1, 0] = 1.0 - n, n
P[0, 0], P[0, 1] = 0.95, 0.05
y_train_noisy = multiclass_noisify(y_train, P=P,
random_state=random_state)
actual_noise = (y_train_noisy != y_train).mean()
assert actual_noise > 0.0
print('Actual noise %.2f' % actual_noise)
y_train = y_train_noisy
return y_train, P | b0656d04d95a21108dd8c1ed76e464eed6c32b11 | 28,955 |
import json
def get_solved_problems(user_id, custom=False):
"""
Get the solved and unsolved problems of a user
@param user_id(Integer): user_id
@param custom(Boolean): If the user_id is a custom user
@return(Tuple): List of solved and unsolved problems
"""
if user_id is None:
return None
def _settify_return_value(data):
return map(lambda x: set(x), data)
db = current.db
stable = db.submission
stopstalk_handle = get_stopstalk_handle(user_id, custom)
redis_cache_key = "solved_unsolved_" + stopstalk_handle
data = current.REDIS_CLIENT.get(redis_cache_key)
if data:
return _settify_return_value(json.loads(data))
base_query = (stable.custom_user_id == user_id) if custom else (stable.user_id == user_id)
query = base_query & (stable.status == "AC")
problems = db(query).select(stable.problem_id, distinct=True)
solved_problems = set([x.problem_id for x in problems])
query = base_query
problems = db(query).select(stable.problem_id, distinct=True)
all_problems = set([x.problem_id for x in problems])
unsolved_problems = all_problems - solved_problems
data = [list(solved_problems), list(unsolved_problems)]
current.REDIS_CLIENT.set(redis_cache_key,
json.dumps(data, separators=JSON_DUMP_SEPARATORS),
ex=1 * 60 * 60)
return _settify_return_value(data) | 5a178953246e58db33a25e65b121772192804369 | 28,956 |
import os
def ispickle(filename):
"""Is the file a pickle archive file"""
return isfile(filename) and os.path.exists(filename) and (((fileext(filename) is not None) and fileext(filename).lower() in ['.pk', '.pkl']) or (filename[-4:] == '.pkl')) | 7cb6fc4b5f96951461dfa419faf6b4d3c0103390 | 28,957 |
import argparse
def get_arguments():
"""Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments.
"""
parser = argparse.ArgumentParser(description="Self Correction for Human Parsing")
parser.add_argument("--dataset", type=str, default='lip', choices=['lip', 'atr', 'pascal'])
parser.add_argument("--model-restore", type=str, default='', help="restore pretrained model parameters.")
parser.add_argument("--gpu", type=str, default='0', help="choose gpu device.")
parser.add_argument("--input-dir", type=str, default='', help="path of input image folder.")
parser.add_argument("--output-dir", type=str, default='', help="path of output image folder.")
parser.add_argument("--logits", action='store_true', default=False, help="whether to save the logits.")
return parser.parse_args() | 2411d96747a4a97008cb7433fcac380e3d535df2 | 28,958 |
import click
from typing import Optional
from typing import Union
def run_link(ctx: click.Context, link: str, language: str) -> Optional[Union[list, str]]:
"""
Make a multiline prompt for code input and send the code to the api.
The compiled output from the api is returned.
"""
console = ctx.obj["console"]
args = helpers.get_args(console)
stdin = helpers.get_stdin(console)
code = get_code(console, link)
if not code:
return
payload = PistonQuery(language=language, args=args, stdin=stdin, code=code)
data = services.query_piston(ctx, console, payload)
if len(data["output"]) == 0:
return "Your code ran without output."
return data["output"].split("\n") | 2dbb4fcaaf0898884bee73bb8380a307a4f21f74 | 28,959 |
def cdiff(alpha, beta):
"""
Difference between pairs :math:`x_i-y_i` around the circle,
computed efficiently.
:param alpha: sample of circular random variable
:param beta: sample of circular random variable
:return: distance between the pairs
"""
return center_angle(alpha - beta) | a820bf74bb5ad0edb1f33c80a2f9f2a9723d7e25 | 28,960 |
def check_preview_exists(source_id: str, checksum: str) -> Response:
"""
Handle request to check whether preview exists.
Parameters
----------
source_id : str
Unique identifier for the source package.
checksum : str
State of the source package to which this preview corresponds.
Returns
-------
dict
Metadata about the deposit.
int
HTTP status code.
dict
Headers to add to the response.
Raises
------
:class:`.NotFound`
Raised when a non-existant preview is requested.
"""
st = store.PreviewStore.current_session()
try:
preview_checksum = st.get_preview_checksum(source_id, checksum)
except store.DoesNotExist as e:
raise NotFound("No preview available") from e
headers = {"ETag": preview_checksum}
logger.debug("check_preview_exists: %s", headers)
return {}, HTTPStatus.OK, headers | ab76e3731864993020f968b4b23f55c1c8bb6bd5 | 28,961 |
def split_buffer(buffer):
"""Returns a tuples of tuples in the format of (data, separator). data should
be highlighted while separator should be printed unchanged, after data.
Args:
buffer (str): A string to split using SPLIT_RE.
"""
splits = SPLIT_RE.split(buffer)
# Append an empty separator in case of no splits or no separator at the end
splits.append('')
# Group all splits into format of (data, separator)
return tuple(zip(splits[0::2], splits[1::2])) | 33d344b2a91440c2e51304664a87431514fe6aff | 28,962 |
def save_verify_img(browser, id):
"""
Save the verification code picture in the current directory.
:param: browser
:param: id
return: verify_code
"""
verify_img = browser.find_element_by_id(id)
# Get the coordinates of the specified label
left = verify_img.location['x']
top = verify_img.location['y']
right = verify_img.location['x'] + verify_img.size['width']
bottom = verify_img.location['y'] + verify_img.size['height']
browser.save_screenshot("screenshot.png")
im = Image.open('screenshot.png')
# crop entire screenshot to get target image
im = im.crop((left, top, right, bottom))
verify_code = 'verify_code.png'
im.save(verify_code)
return verify_code | a406edadb7165512d54b613d2bd5136c2e81ea37 | 28,963 |
def write_values(value):
"""Write a `*values` line in an LTA file.
Parameters
----------
value : [sequence of] int or float or str
Returns
-------
str
"""
if isinstance(value, (str, int, float)):
return str(value)
else:
return ' '.join([str(v) for v in value]) | 705aeb1cbbe1ef3d9deca2b5e360f4c72cb3a25e | 28,964 |
def denormalize_spectrogram(S, is_mel):
"""Denormalize log-magnitude spectrogram."""
if is_mel: return S * hp.mel_normalize_variance + hp.mel_normalize_mean
else: return S * hp.lin_normalize_variance + hp.lin_normalize_mean | 8e26d1b2f038c201faa473d14763bb8916a38bd3 | 28,965 |
def get_pixel(color: str, intensity: float):
"""Converts a color and brightness to a Pixel"""
return Pixel(*[int(i*intensity) for i in color]) | d4198c4d003b098884eeaecffa407cb0b2066900 | 28,966 |
def findOverlapOrNearest(gs, ts, tree, start, end):
"""
first to check direct overlap with TSS, if no or multiple items, then get the close one
@param gs: {tss: cLoops2.ds.Gene}, tss is key and int
@pram ts: [tss]
@param tree: KDTree from TSSs
@param start: query start
@param end: query end
return gene and distance
"""
#step 1, find overlaps
rs = set()
for i in range(start, end + 1):
if i in gs:
rs.add(gs[i])
if len(rs) > 0:
rs = list(rs)
return rs, [0] * len(rs)
#find the nearest one
else:
d, i = tree.query([(start + end) / 2], k=1)
g = gs[ts[i][0]]
#d = ts[i][0] - (start+end)/2
d = int(d)
return [g], [d] | 8c3c8c85a22063a1f8f7ffcfdb832dd4b357a485 | 28,967 |
def record_to_csv_line(record: dict,
schema: dict,
data_flattening_max_level: int = 0) -> str:
"""
Transforms a record message to a CSV line
Args:
record: Dictionary that represents a csv line. Dict key is column name, value is the column value
schema: JSONSchema of the record
data_flattening_max_level: Max level of auto flattening if a record message has nested objects. (Default: 0)
Returns:
string of csv line
"""
flatten_record = flattening.flatten_record(record, schema, max_level=data_flattening_max_level)
# pipelinewise-target-snowflake uses json.dump() here. We're using list comprehension to
# handle escape sequences like '\t', '\n', '\r', etc. before they get to Snowflake
# Todo: This is currently a hacky way to get the behavior we want.
# I assume there are better ways to handle this.
csv_string = ','.join(
[
prep_csv_row_string(flatten_record, column)
for column in schema
]
)
return csv_string | 19ddcb6ed1c1aca642d61da92fde9473a8b4c12c | 28,968 |
import numpy
def get_front_types(locating_var_matrix_m01_s01,
warm_front_percentile=DEFAULT_FRONT_PERCENTILE,
cold_front_percentile=DEFAULT_FRONT_PERCENTILE):
"""Infers front type at each grid cell.
M = number of rows in grid
N = number of columns in grid
:param locating_var_matrix_m01_s01: M-by-N numpy array created by
`get_locating_variable`.
:param warm_front_percentile: Used to locate warm fronts. For grid cell
[i, j] to be considered part of a warm front, its locating value must be
<= the [q]th percentile of all non-positive values in the grid, where
q = `100 - warm_front_percentile`.
:param cold_front_percentile: Used to locate cold fronts. For grid cell
[i, j] to be considered part of a cold front, its locating value must be
>= the [q]th percentile of all non-negative values in the grid, where
q = `cold_front_percentile`.
:return: predicted_label_matrix: M-by-N numpy array, where the value at each
grid cell is from the list `front_utils.VALID_INTEGER_IDS`.
"""
error_checking.assert_is_numpy_array_without_nan(
locating_var_matrix_m01_s01)
error_checking.assert_is_numpy_array(
locating_var_matrix_m01_s01, num_dimensions=2)
error_checking.assert_is_greater(warm_front_percentile, 0.)
error_checking.assert_is_less_than(warm_front_percentile, 100.)
error_checking.assert_is_greater(cold_front_percentile, 0.)
error_checking.assert_is_less_than(cold_front_percentile, 100.)
warm_front_threshold_m01_s01 = numpy.percentile(
locating_var_matrix_m01_s01[locating_var_matrix_m01_s01 <= 0],
100 - warm_front_percentile)
cold_front_threshold_m01_s01 = numpy.percentile(
locating_var_matrix_m01_s01[locating_var_matrix_m01_s01 >= 0],
cold_front_percentile)
predicted_label_matrix = numpy.full(
locating_var_matrix_m01_s01.shape, front_utils.NO_FRONT_INTEGER_ID,
dtype=int)
predicted_label_matrix[
locating_var_matrix_m01_s01 <= warm_front_threshold_m01_s01
] = front_utils.WARM_FRONT_INTEGER_ID
predicted_label_matrix[
locating_var_matrix_m01_s01 >= cold_front_threshold_m01_s01
] = front_utils.COLD_FRONT_INTEGER_ID
return predicted_label_matrix | 92d4bc84839c8f7f5e23697807ce01ec947d10c0 | 28,969 |
def rainfall_rate(radar, gatefilter, kdp_name, zdr_name, refl_name='DBZ_CORR',
hydro_name='radar_echo_classification', temperature_name='temperature'):
"""
Rainfall rate algorithm from csu_radartools.
Parameters:
===========
radar:
Py-ART radar structure.
refl_name: str
Reflectivity field name.
zdr_name: str
ZDR field name.
kdp_name: str
KDP field name.
hydro_name: str
Hydrometeor classification field name.
Returns:
========
rainrate: dict
Rainfall rate.
"""
dbz = radar.fields[refl_name]['data'].filled(np.NaN)
zdr = radar.fields[zdr_name]['data'].filled(np.NaN)
fhc = radar.fields[hydro_name]['data']
try:
kdp = radar.fields[kdp_name]['data'].filled(np.NaN)
except AttributeError:
kdp = radar.fields[kdp_name]['data']
rain, _ = csu_blended_rain.calc_blended_rain_tropical(dz=dbz, zdr=zdr, kdp=kdp, fhc=fhc, band='C')
rain[(gatefilter.gate_excluded) | np.isnan(rain) | (rain < 0)] = 0
try:
temp = radar.fields[temperature_name]['data']
rain[temp < 0] = 0
except Exception:
pass
rainrate = {"long_name": 'Blended Rainfall Rate',
"units": "mm h-1",
"standard_name": "rainfall_rate",
'_Least_significant_digit': 2,
"description": "Rainfall rate algorithm based on Thompson et al. 2016.",
"data": rain.astype(np.float32)}
return rainrate | 6af6b29cc635659cef127f2fa780f2feb2f5926a | 28,970 |
def get_wrestlers():
"""Grabs a list of all of the wrestlers from the database.
@return A list of Wrestler objects, or an empty list if no
wrestlers have been created.
"""
wrestlers = db.get_wrestlers()
return map(lambda w: Wrestler(**w), wrestlers) | d0440780b0cd3e7fbb726718703ba6e81bf1ab1a | 28,971 |
import os
import logging
import re
def get_user_check_cls_def(user_def_file):
"""Get 'class UserCheck(object):' statement from userCheck.py.
Args:
user_def_file: The path of userCheck.py.
Returns:
xml_head_str: The 'class UserCheck' statement of userCheck.py.
"""
if not os.path.isfile(user_def_file):
logging.error("%s is not exist or is not file", user_def_file)
return
logging.info("merge user check definitions from script %s", user_def_file)
cls_str = "\n"
is_cls_code = False
idx = 0
try:
with open(user_def_file, encoding="utf-8") as file:
lines = file.readlines()
while idx < len(lines):
line = lines[idx]
# get code for class UserCheck
if re.match(r"^class UserCheck\(object\):\s+$", line):
is_cls_code = True
cls_str += line
idx += 1
continue
if is_cls_code:
if not re.match(r"\s+", line):
break
cls_str += line
idx += 1
except IOError as error:
logging.error("can't process user define file %s, because %s", user_def_file, error)
return cls_str | 258379602cb8188cd1191ad59473f764287ad9e9 | 28,972 |
def masterUniversalTransferSubacc(fromAccountType, toAccountType, asset, amount, fromEmail="", toEmail="", recvWindow=""):
"""# Universal Transfer (For Master Account)
#### `POST /sapi/v1/sub-account/universalTransfer (HMAC SHA256)`
### Weight:
1
### Parameters:
Name |Type |Mandatory |Description
--------|--------|--------|--------
fromEmail |STRING |NO |
toEmail |STRING |NO |
fromAccountType |STRING |YES |"SPOT","USDT_FUTURE","COIN_FUTURE"
toAccountType |STRING |YES |"SPOT","USDT_FUTURE","COIN_FUTURE"
asset |STRING |YES |
amount |DECIMAL |YES |
recvWindow |LONG |NO |
timestamp |LONG |YES |
"""
endpoint = '/sapi/v1/sub-account/universalTransfer'
params = {
"fromAccountType": fromAccountType,
"toAccountType": toAccountType,
"asset": asset,
"amount": amount
}
if fromEmail: params["fromEmail"] = fromEmail
if toEmail: params["toEmail"] = toEmail
if recvWindow: params["recvWindow"] = recvWindow
return postbinancedata_sig(endpoint, params) | d517c6c471aa4e7d90bde557116fd257abba2831 | 28,973 |
def allpass(source, g):
"""First order Shroeder all pass filter. y[n] + g y[n-1] = g.conjugate() x[n] + x[n-1]."""
return polezero(source, 1.0, g.conjugate(), g, 1.0) | 5b5bdeb4455c25ceb7b9df338938a868cf9de3b3 | 28,974 |
def read_data_and_split(train_csv_path = '../data/clean/bank_train.csv',
test_csv_path = '../data/clean/bank_test.csv'):
"""
Reads the data from the given paths and returns predictors and response
variables separately for train and test sets
Parameters
----------
train_csv_path: str containing the path of train csv. Default: '../data/clean/bank_train.csv'
test_csv_path: str containing the path of test csv. Default: '../data/clean/bank_test.csv'
Returns
-------
tuple: (X_train, y_train, X_test, y_test)
"""
try:
train_ds = pd.read_csv(train_csv_path)
test_ds = pd.read_csv(test_csv_path)
except (FileNotFoundError) as e:
print('Please check train and test filepaths')
raise(e)
try:
X_train, y_train = train_ds.drop('y_yes', axis=1), train_ds['y_yes']
X_test, y_test = test_ds.drop('y_yes', axis=1), test_ds['y_yes']
except KeyError:
print('Corrupted csv files. Please check the columns')
raise KeyError
return X_train, y_train, X_test, y_test | fac0abcbcb7e701a94fa68bf1e7b51cf92c4e800 | 28,975 |
def get_bro(fn_config, readonly=True):
"""
Convenience function to get a bro with a datastore initialized
with a config file
Args:
fn_config: path to the config file
readonly: load database readonly?
Returns:
A true bro
"""
store = datastore.DataStore()
store.read_config(fn_config)
store.resume_data(readonly=readonly)
bro = store.bro
return bro | b15136879e50c1e90f605b315ee827f5a05b72e7 | 28,976 |
def get_sample_metadata_token() -> str:
"""Get sample-metadata Bearer auth token for Azure or GCP depending on deployment config."""
deploy_config = get_deploy_config()
if deploy_config.cloud == "azure":
scope = f"api://smapi-{deploy_config.sample_metadata_project}/.default"
return get_azure_auth_token(scope)
assert deploy_config.cloud == "gcp"
audience = deploy_config.sample_metadata_host
return get_google_auth_token(audience) | 33e6abddee618578fb2a62506b81cee6cb8a3875 | 28,977 |
def replace_module_params(source, target, modules):
"""Replace selected module params in target by corresponding source values."""
source, _ = hk.data_structures.partition(
lambda module, name, value: module in modules,
source)
return hk.data_structures.merge(target, source) | 5ef7de2a7a05e6ab1f2301056a11998c5721cb29 | 28,978 |
import subprocess
def get_cdk_stacks(module_path, env_vars, context_opts):
"""Return list of CDK stacks."""
LOGGER.debug('Listing stacks in the CDK app prior to '
'diff')
return subprocess.check_output(
generate_node_command(
command='cdk',
command_opts=['list'] + context_opts,
path=module_path),
env=env_vars
).strip().split('\n') | e6ac5912b0e9e165d2989093b93c823f68b13ce7 | 28,979 |
def get_knmi_at_locations(model_ds,
start='2010', end=None,
nodata=-999):
"""get knmi data at the locations of the active grid cells in model_ds.
Parameters
----------
model_ds : xr.DataSet
dataset containing relevant model grid information
start : str or datetime, optional
start date of measurements that you want, The default is '2010'.
end : str or datetime, optional
end date of measurements that you want, The default is None.
nodata : int, optional
if the first_active_layer data array in model_ds has this value,
it means this cell is inactive in all layers. If nodata is None the
nodata value in model_ds is used.
the default is None
Raises
------
ValueError
wrong grid type specified.
Returns
-------
locations : pandas DataFrame
DataFrame with the locations of all active grid cells.
oc_knmi_prec : hydropandas.ObsCollection
ObsCollection with knmi data of the precipitation stations.
oc_knmi_evap : hydropandas.ObsCollection
ObsCollection with knmi data of the evaporation stations.
"""
# get locations
if model_ds.gridtype == 'structured':
locations = get_locations_structured(model_ds, nodata=nodata)
elif model_ds.gridtype == 'vertex':
locations = get_locations_vertex(model_ds, nodata=nodata)
else:
raise ValueError('gridtype should be structured or vertex')
# get knmi data stations closest to any grid cell
oc_knmi_prec = hpd.ObsCollection.from_knmi(locations=locations,
start=[start],
end=[end],
meteo_vars=["RD"])
oc_knmi_evap = hpd.ObsCollection.from_knmi(locations=locations,
start=[start],
end=[end],
meteo_vars=["EV24"])
return locations, oc_knmi_prec, oc_knmi_evap | 1e9cb066d818788c428835501be0ee04a3e3ec43 | 28,980 |
def coordinate_x_and_y_as_ndarray(x, y):
"""
Utility function to coordinate the two scalar/np.ndarray variables x and y
as NumPy Arrays. The following cases are considered:
1) if x is array of length n; y is array of length m, then:
x, y ---> (m, n) shaped arrays creating a mesh-grid
(see np.meshgrid documentation)
2) if x is array of length n; y is scalar, then:
y ---> array of length n, repeating its value n-times
3) if y is array of length m; x is scalar, then:
x ---> array of length m, repeating its value m-times
4) if both x and y are scalar, then:
y, x ---> array of length 1 made of their own values
"""
if is_iterable(x) and is_iterable(y):
# case 1
# creating a mesh-grid combining x and y
x, y = np.meshgrid(x, y)
elif is_iterable(x) and (not is_iterable(y)):
# case 2
# length of x
n = len(x)
# make y look like x
y = np.repeat(y, repeats=n)
elif (not is_iterable(x)) and is_iterable(y):
# case 3
# length of y
m = len(y)
# make x look like y
x = np.repeat(x, repeats=m)
else:
# case 4
# make x and y length-1 arrays
x = np.array([x])
y = np.array([y])
return x, y | bea9c1cc04d16fd3d7326c350b1169f0a12a0f7a | 28,981 |
def cos_sim(vec1, vec2):
"""
Returns the cosine similarity between given two vectors
"""
return vec1.dot(vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2)) | 4a35a419bfe3a51299ac94a4245c943516812889 | 28,982 |
def makeTrans(tup: tuple):
""" 生成一个字典格式的单词释义"""
def toStr(s):
return s if type(s) is str else str(s, encoding="utf-8")
res = None
if len(tup) >= 4:
res = {
"word": toStr(tup[0]),
"phonetic": toStr(tup[1]),
"translation": toStr(tup[2]),
"exchange": toStr(tup[3])
}
# log("makeTrans, res: %s" % (res))
return res | f5c578c83f0256cc8fa64abff2335de135ae9bfc | 28,983 |
def _keepbits_interface(da, keepbits):
"""Common interface to allowed keepbits types
Parameters
----------
da : :py:class:`xarray.DataArray`
Input data to bitround
keepbits : int, dict of {str: int}, :py:class:`xarray.DataArray` or :py:class:`xarray.Dataset`
How many bits to keep as int
Returns
-------
keep : int
Number of keepbits for variable given in ``da``
"""
assert isinstance(da, xr.DataArray)
if isinstance(keepbits, int):
keep = keepbits
elif isinstance(keepbits, dict):
v = da.name
if v in keepbits.keys():
keep = keepbits[v]
else:
raise ValueError(f"name {v} not for in keepbits: {keepbits.keys()}")
elif isinstance(keepbits, xr.Dataset):
assert keepbits.coords["inflevel"].shape <= (
1,
), "Information content is only allowed for one 'inflevel' here. Please make a selection."
if "dim" in keepbits.coords:
assert keepbits.coords["dim"].shape <= (
1,
), "Information content is only allowed along one dimension here. Please select one `dim`. To find the maximum keepbits, simply use `keepbits.max(dim='dim')`"
v = da.name
if v in keepbits.keys():
keep = int(keepbits[v])
else:
raise ValueError(f"name {v} not for in keepbits: {keepbits.keys()}")
elif isinstance(keepbits, xr.DataArray):
assert keepbits.coords["inflevel"].shape <= (
1,
), "Information content is only allowed for one 'inflevel' here. Please make a selection."
assert keepbits.coords["dim"].shape <= (
1,
), "Information content is only allowed along one dimension here. Please select one `dim`. To find the maximum keepbits, simply use `keepbits.max(dim='dim')`"
v = da.name
if v == keepbits.name:
keep = int(keepbits)
else:
raise KeyError(f"no keepbits found for variable {v}")
else:
raise TypeError(f"type {type(keepbits)} is not a valid type for keepbits.")
return keep | 94101e6d18a587c0d952caaf7154a05884dbcfb2 | 28,984 |
def calc_fraction(i, q, u, transmission_correction=1):
"""
Method for determining the fractional polarization.
Parameters
----------
i : float
Stokes I parameter.
q : float
Stokes Q parameter.
u : float
Stokes U parameter.
transmission_correction : float (Default = 1)
Correction factor to account for the leak of photons with
non-parallel electric field position angles. See Section 5.3.4
of the ACS Data Handbook.
Returns
-------
pol_fraction : float
Polarization fraction.
"""
pol_fraction = np.sqrt(q**2 + u**2) / i
pol_fraction *= transmission_correction
return pol_fraction | d2488351d860377ef8a38a7e6a29ff385798e3c6 | 28,985 |
from typing import Union
from re import S
def label(node: Union["SQLNode", None]) -> Symbol:
"""Returns a default alias for the node; to use when rendering to SQL.
The implementation for subclasses of SQLNodes is defined in `nodedefs.py`.
"""
if node is None:
return S("_")
else:
raise NotImplementedError(
f"label isn't implemented for the SQLNode type - {type(node)}"
) | a97295329dab1d54052b7f23656d23e4e0dc040e | 28,986 |
def mean(list):
"""Function that returns the mean of a list"""
sum = 0
for num in list:
sum += num
return sum/len(list) | 972544f64f87860a078405a4938226f7fab307c2 | 28,987 |
import six
def replace(
key,
value,
host=DEFAULT_HOST,
port=DEFAULT_PORT,
time=DEFAULT_TIME,
min_compress_len=DEFAULT_MIN_COMPRESS_LEN,
):
"""
Replace a key on the memcached server. This only succeeds if the key
already exists. This is the opposite of :mod:`memcached.add
<salt.modules.memcached.add>`
CLI Example:
.. code-block:: bash
salt '*' memcached.replace <key> <value>
"""
if not isinstance(time, six.integer_types):
raise SaltInvocationError("'time' must be an integer")
if not isinstance(min_compress_len, six.integer_types):
raise SaltInvocationError("'min_compress_len' must be an integer")
conn = _connect(host, port)
stats = conn.get_stats()
return conn.replace(key, value, time=time, min_compress_len=min_compress_len) | b8a83f75c423eb20499abf9ee99c39e38c849a61 | 28,988 |
import torch
def cpu():
"""Defined in :numref:`sec_use_gpu`"""
return torch.device('cpu') | 899a95ed4b806280eda315c17a2d3e6d3f94e039 | 28,989 |
def get(name):
"""
Get configuration identified by name
"""
return CONFIGS[name] | 5774bd9a505f6a9c402456f520a2cae268e4f0d4 | 28,990 |
def gpu_layout_factory(op):
"""
Generates a list of possible layouts given an op
Arguments:
op: Computation graph op which runs on the device
Returns:
List of possible layout assignment descriptors
"""
if isinstance(op, AssignOp):
return GPULayoutAssignment.generate_ew_layouts(op.args[0].axes, len(op.args[0].axes))
elif isinstance(op, UnaryElementWiseOp):
return GPULayoutAssignment.generate_ew_layouts(op.args[0].axes, 3)
elif isinstance(op, BinaryElementWiseOp):
return GPULayoutAssignment.generate_ew_layouts(op.args[0].axes, 3)
elif isinstance(op, ReductionOp):
return GPULayoutAssignment.generate_ew_layouts(op.axes, 2)
elif isinstance(op, OneHotOp):
return GPULayoutAssignment.generate_default_onehot_layout(op)
elif isinstance(op, TensorSizeOp):
return GPULayoutAssignment.generate_default_layout(op.axes, 3)
elif isinstance(op, Fill):
return GPULayoutAssignment.generate_default_layout(op.args[0].axes, 3)
elif isinstance(op, DotOp):
return GPULayoutAssignment.generate_default_dot_layout(op)
elif isinstance(op, ConvolutionOp):
return GPULayoutAssignment.generate_default_layout(op.axes, 3)
elif isinstance(op, bprop_conv):
return GPULayoutAssignment.generate_default_layout(op.axes, 3)
elif isinstance(op, update_conv):
return GPULayoutAssignment.generate_default_layout(op.axes, 3)
elif isinstance(op, DeconvolutionOp):
return GPULayoutAssignment.generate_default_layout(op.axes, 3)
elif isinstance(op, DeconvDerivOp):
return GPULayoutAssignment.generate_default_layout(op.axes, 3)
elif isinstance(op, PoolingOp):
return GPULayoutAssignment.generate_default_layout(op.axes, 3)
elif isinstance(op, BpropPoolOp):
return GPULayoutAssignment.generate_default_layout(op.axes, 3)
elif isinstance(op, TensorValueOp):
return GPULayoutAssignment.generate_default_layout(op.tensor.axes, 3)
elif isinstance(op, AssignableTensorOp):
return GPULayoutAssignment.generate_default_layout(op.axes, 3)
elif isinstance(op, LookupTableOp):
return GPULayoutAssignment.generate_default_lut_layout(op)
elif isinstance(op, (update_lut, bprop_lut)):
return GPULayoutAssignment.generate_default_layout(op.axes, 3)
elif isinstance(op, RngOp):
return GPULayoutAssignment.generate_default_layout(op.tensor.axes, 3)
elif isinstance(op, (GPUCudaSendOp, GPUCudaRecvOp)):
return GPULayoutAssignment.generate_default_layout(op.axes, 3)
elif isinstance(op, (GPUCudaScatterSendOp, GPUCudaScatterRecvOp, GPUCudaGatherRecvOp)):
return GPULayoutAssignment.generate_default_layout(op.axes, 3)
elif isinstance(op, GPUCudaGatherSendOp):
return GPULayoutAssignment.generate_default_layout(op.axes, 3)
elif isinstance(op, (GPUCudaAllReduceOp)):
return GPULayoutAssignment.generate_default_layout(op.axes, 3)
elif isinstance(op, CTCOp):
return GPULayoutAssignment.generate_default_layout(op.axes, 3)
else:
raise ValueError("Layouts not implemented for op type {}".format(op)) | fa9363368ec41454dcdd7ded508dc21b638bf985 | 28,991 |
def dropout(tensor,
noise_shape=None,
random_mask=None,
probability=0.1,
scale=True,
seed=None,
return_mask=False,
name="dropout"):
""" With probability `probability`, outputs `0` otherwise outputs the input element. If ``scale`` is True, the
input elements are scaled up by `1 / (1-probability)` so that the expected sum of the activations is unchanged.
By default, each element is kept or dropped independently. If `noise_shape`
is specified, it must be [broadcastable](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
to the shape of `x`, and only dimensions with `noise_shape[i] == shape(x)[i]`
will make independent decisions. For example, if `shape(x) = [k, l, m, n]`
and `noise_shape = [k, 1, 1, n]`, each batch and channel component will be
kept independently and each row and column will be kept or not kept together.
Args:
tensor (`Tensor`): an input tensor
noise_shape (`Tensor`): A 1-D `Tensor` of type `int32`, representing the shape for randomly generated drop flags
return_mask (`bool`): if `True`, returns the random mask used
random_mask (`Tensor`): a tensor used to create the random bernoulli mask
probability (`float` or `Tensor`): A scalar `Tensor` with the same type as x. The probability that each element
is kept.
scale (`bool`): if true rescales the non-zero elements to 1 / (1-drop_probability)
seed (`int`): A Python integer with the random number generator seed
name (`str`): a name for this operation
Returns:
tensor (`Tensor`): output tensor with the same `DType` as the input
Raises:
ValueError: if `probability` is not in `[0, 1]` or if `x` is not a floating point tensor.
"""
with tf.name_scope(name):
tensor = tf.convert_to_tensor(tensor, name="x")
if random_mask is not None:
random_mask = as_tensor(random_mask, tensor.dtype)
if not tensor.dtype.is_floating:
try:
tensor = tf.cast(tensor, tf.float32)
except Exception:
raise ValueError("x has to be a floating point tensor since it might be scaled"
"Got a %s tensor instead. and could not cast it" % tensor.dtype)
if not 0 <= probability < 1:
raise ValueError("drop probability must be a scalar tensor or a float in the "
"range [0, 1), got %g" % probability)
# Early return if nothing needs to be dropped.
if isinstance(probability, float) and probability == 0:
if return_mask:
return tensor, None
else:
return tensor
elif isinstance(probability, float) and probability == 1:
zeros = tf.zeros_like(tensor)
if return_mask:
return zeros, None
else:
return zeros
probability = tf.convert_to_tensor(
probability, dtype=tensor.dtype, name="drop_probability")
probability.get_shape().assert_is_compatible_with(tf.TensorShape([]))
# Do nothing if we know drop_probability == 0
const_val = tensor_util.constant_value(probability)
if const_val == 0:
if return_mask:
return tensor, None
else:
return tensor
elif const_val == 1:
zeros = tf.zeros_like(tensor)
if return_mask:
return zeros, None
else:
return zeros
noise_shape = _get_noise_shape(tensor, noise_shape)
if random_mask is None:
with tf.name_scope(name="random_mask"):
keep_prob = 1 - probability
random_state = tf.random.uniform(noise_shape, seed=seed, dtype=tensor.dtype)
mask = keep_prob + random_state
random_mask = tf.math.floor(mask, name="binary_mask")
if scale:
ret = tf.math.divide(tensor, tf.math.maximum(1 - probability, 1e-10)) * random_mask
else:
ret = tensor * random_mask
if not tf.executing_eagerly():
ret.set_shape(tensor.get_shape())
if return_mask:
return ret, random_mask
else:
return ret | bade8e9a66a288097860bb33e33a1503ba822262 | 28,992 |
def get_words_for_board_optimize_fast(word_tuples, board_size, packing_constant=1.1):
"""Try different combinations of words to maximize the overlap_avoidance_probability metric
First try to maximize the minimum, and if that can be easily achieved, then maximize the mean."""
# Identify the longest words included in the word set, call it len_class
# Identify all left-out words in the same len_class
#
# For each included word in the length-class, check what the metric would be
# if we swapped it with one of the words not in the included set.
#
# If the swap reslts in a higher metric value, make the swap, and go back to iterating from the beginning
#
# If we make it all the way through the list, or hit some max # of re-loops, terminate
word_tuples = sorted(word_tuples, key=lambda wt: len(wt.board))
max_word_tuple_idx_naive = (np.cumsum([len(wt.board) for wt in word_tuples]) < packing_constant * board_size**2).sum()
if max_word_tuple_idx_naive == len(word_tuples):
raise ValueError(f"Too few semantic neighbor words to pack a {board_size}x{board_size} board.")
word_tuples_naive = word_tuples[:max_word_tuple_idx_naive]
len_class = max(len(wt.board) for wt in word_tuples_naive)
word_tuples_sub = [wt for wt in word_tuples_naive if len(wt.board) < len_class]
word_tuples_incl = [wt for wt in word_tuples_naive if len(wt.board) == len_class]
word_tuples_excl = [wt for wt in word_tuples[max_word_tuple_idx_naive:] if len(wt.board) == len_class]
num_words, packing_level, mean_word_len, max_word_len, best_min_collision_avoidance_prob, best_mean_collision_avoidance_prob = get_word_set_stats(word_tuples_naive, board_size)
print("\nPre-optimization word stats:")
print(f" num_words: {num_words}")
print(f" packing_level: {packing_level:.3f}")
print(f" word_len (mean/max): {mean_word_len:.2f} / {max_word_len}")
print(f" collision_avoidance_prob (min/mean): {best_min_collision_avoidance_prob:.6f} / {best_mean_collision_avoidance_prob:.6f}")
loop_cnt, max_loops = 0, 2e3
while loop_cnt < max_loops:
better_set_found = False
for wt1 in word_tuples_incl:
for wt2 in word_tuples_excl:
loop_cnt += 1
word_tuples_incl_new = [wt if wt != wt1 else wt2 for wt in word_tuples_incl] # swap out wt1 for wt2
word_tuples_excl_new = [wt if wt != wt2 else wt1 for wt in word_tuples_excl] # swap out wt2 for wt1
_, _, _, _, min_collision_avoidance_prob, mean_collision_avoidance_prob = get_word_set_stats(word_tuples_sub+word_tuples_incl_new, board_size)
# Compare using a lexical ordering, with min prioritized over mean
if (min_collision_avoidance_prob, mean_collision_avoidance_prob) > (best_min_collision_avoidance_prob, best_mean_collision_avoidance_prob):
best_min_collision_avoidance_prob, best_mean_collision_avoidance_prob = min_collision_avoidance_prob, mean_collision_avoidance_prob
word_tuples_incl, word_tuples_excl = word_tuples_incl_new, word_tuples_excl_new
better_set_found = True
break
if better_set_found:
break
if not better_set_found:
# Made it outside the double-loop without short-cutting
# Means we checked all combinations without finding any improvements, therfore we're done
break
if loop_cnt < max_loops:
print(f"\nEnded optimization with loop_cnt={loop_cnt}")
else:
print(f"\nOptimization terminated early because exceeded max_loops={int(max_loops)}")
word_tuples_opt = word_tuples_sub + word_tuples_incl
removed_word_tuples = set(word_tuples_naive) - set(word_tuples_opt)
added_word_tuples = set(word_tuples_opt) - set(word_tuples_naive)
print(f"\nWords removed: {', '.join([wt.pretty for wt in removed_word_tuples])}")
print(f"Words added: {', '.join([wt.pretty for wt in added_word_tuples])}")
num_words, packing_level, mean_word_len, max_word_len, min_collision_avoidance_prob, mean_collision_avoidance_prob = get_word_set_stats(word_tuples_opt, board_size)
print("\nPost-optimization word stats:")
print(f" num_words: {num_words}")
print(f" packing_level: {packing_level:.3f}")
print(f" word_len (mean/max): {mean_word_len:.2f} / {max_word_len}")
print(f" collision_avoidance_prob (min/mean): {min_collision_avoidance_prob:.6f} / {mean_collision_avoidance_prob:.6f}")
return word_tuples_opt | 4c671ed58da3cb10f8493d28b3e4eda0aa265f73 | 28,993 |
import re
def parse_timedelta(time_str: str) -> timedelta:
"""
Parse a time string e.g. (2h13m) into a timedelta object. Stolen on the web
"""
regex = re.compile(
r"^((?P<days>[\.\d]+?)d)?((?P<hours>[\.\d]+?)h)?((?P<minutes>[\.\d]+?)m)?((?P<seconds>[\.\d]+?)s)?$"
)
time_str = replace(
time_str,
{
"sec": "s",
"second": "s",
"seconds": "s",
"minute": "m",
"minutes": "m",
"min": "m",
"mn": "m",
"days": "d",
"day": "d",
"hours": "h",
"hour": "h",
},
)
parts = regex.match(time_str)
if parts is None:
raise ValueError(
f"Could not parse any time information from '{time_str}'. Examples of valid strings: '8h', '2d8h5m20s', '2m4s'"
)
time_params = {
name: float(param) for name, param in parts.groupdict().items() if param
}
return timedelta(**time_params) | 886bdc01d707ba4e0b04a6b1f1ac665e2c3965bd | 28,994 |
from vrml import node
def delSceneGraph( cls ):
"""Delete the scenegraph associated with a prototype"""
return node.PrototypedNode.scenegraph.fdel( cls ) | e16e97c0c70423f459de1e43646db84b0f02f06a | 28,995 |
from typing import Any
def get_node_active_learning_heuristic_by_name(
name: str,
**kwargs: Any,
) -> NodeActiveLearningHeuristic:
"""
Factory method for node active learning heuristics.
:param name:
The name of the heuristic.
:param kwargs:
The keyword-based arguments.
:return:
An instance of a node active learning heuristic.
"""
# Get heuristic class
cls = get_node_active_learning_heuristic_class_by_name(name=name)
# instantiate
return cls(**kwargs) | aab06e172b15ee64b410cdd8ca90f9f137143078 | 28,996 |
from skimage.exposure import rescale_intensity
def apply_elastic_transform_intensity(imgs, labels, strength=0.08, shift=0.3, sigma_max=0.2, N=20, random_state=None):
"""This function wraps the elastic transform as well as adding random noise and gamma adjustment to augment a batch of images.
Parameters
----------
imgs : numpy array
array of input gray or RGB images:
(n_imgs x n_cols): gray image.
(n_imgs x n_cols x 3): RGB image.
labels : numpy array
array of corresponding annotation images for n different tasks, as represented by the number of image channels.
(n_imgs x n_cols x n_tasks): for n_tasks.
strength : float
the strength of the stretching in the elastic transform, see :meth:`elastic_transform`
shift : float
the maximum shift in pixel intensity in addition to addition of Gaussian noise.
sigma_max : float
defines the maximum standard deviation of the Gaussian noise corruption. The noise level added is a uniform variable on the range [0, sigma_max]
N : int
number of random deformations.
random_state : int or None
optionally set a random seed for the random generation.
Returns
-------
aug_imgs : numpy array
augmented image dataset, expanded N times.
aug_labels : numpy array
corresponding annotation image dataset, expanded N times.
"""
aug_imgs = []
aug_labels = []
n_imgs = len(imgs)
for i in range(n_imgs):
im = rescale_intensity(imgs[i])
lab = labels[i]
im_ = np.dstack([im, lab])
if len(lab.shape) == 3:
n_label_channels = lab.shape[-1]
if len(lab.shape) == 2:
n_label_channels = 1
n_img_channels = im_.shape[-1] - n_label_channels
for j in range(N):
im_out = elastic_transform(im_, im_.shape[1] * strength, im_.shape[1] * strength, im_.shape[1] * strength, random_state=random_state)
aug_imgs.append(im_out[:,:,:n_img_channels][None,:]) # no noise
aug_labels.append(im_out[:,:,n_img_channels:n_img_channels+n_label_channels][None,:]) # with noise.
aug_imgs.append(add_noise(im_out[:,:,:n_img_channels], shift=shift, sigma=np.random.uniform(0,sigma_max,1))[None,:])
aug_labels.append(im_out[:,:,n_img_channels:n_img_channels+n_label_channels][None,:])
aug_imgs.append(add_gamma(im_out[:,:,:n_img_channels], gamma=0.3)[None,:]) # random gamma enhancement.
aug_labels.append(im_out[:,:,n_img_channels:n_img_channels+n_label_channels][None,:])
aug_imgs = np.concatenate(aug_imgs, axis=0)
aug_labels = np.concatenate(aug_labels, axis=0)
return aug_imgs, aug_labels | 51af4c8dd8e5e76cf6ababc9096bbd1dd7f57a08 | 28,997 |
def byte_size(num, suffix='B'):
"""
Return a formatted string indicating the size in bytes, with the proper
unit, e.g. KB, MB, GB, TB, etc.
:arg num: The number of byte
:arg suffix: An arbitrary suffix, like `Bytes`
:rtype: float
"""
for unit in ['','K','M','G','T','P','E','Z']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Y', suffix) | 8db3a1c4c5e3740baf606abe3f0dcc108ab04e97 | 28,998 |
def reduce_grid_info(grid_fnames, noise_fnames=None, nprocs=1, cap_unique=1000):
"""
Computes the total minimum and maximum of the necessary quantities
across all the subgrids. Can run in parallel.
Parameters
----------
grid_fnames: list of str
subgrid file paths
noise_fnames: list of str (optional)
noise file for each subgrid
nprocs: int
Number of processes to use
cap_unique: int
Stop keeping track of the number of unique values once it
reaches this cap. This reduces the memory usage. (Typically, for
the fluxes, there are as many unique values as there are grid
points. Since we need to store all these values to check if
they're unique, a whole column of the grid is basically being
stored. This cap fixes this, and everything should keep working
in the rest of the code as long as cap_unique is larger than
whatever number of bins is being used.).
Returns
-------
info_dict: dictionary
{name of quantity: (min, max), ...}
"""
# Gather the mins and maxes for the subgrid
if noise_fnames is None:
arguments = [(g, None) for g in grid_fnames]
else:
arguments = list(zip(grid_fnames, noise_fnames))
# Use generators here for memory efficiency
parallel = nprocs > 1
if parallel:
p = Pool(nprocs)
info_dicts_generator = p.imap(unpack_and_subgrid_info, arguments)
else:
info_dicts_generator = (subgrid_info(*a) for a in arguments)
# Assume that all info dicts have the same keys
first_info_dict = next(info_dicts_generator)
qs = [q for q in first_info_dict]
union_min = {}
union_max = {}
union_unique = {}
# This last field can take up a lot of memory. A solution would be
# to allow a maximum number of values (50 is the default maximum
# number of bins anyway, and this value is needed to determine the
# number of bins).
for q in qs:
# Combine the values of the first subgrid
union_min[q] = first_info_dict[q]["min"]
union_max[q] = first_info_dict[q]["max"]
union_unique[q] = first_info_dict[q]["unique"]
# And all the other subgrids (the generator just continues)
for individual_dict in info_dicts_generator:
for q in qs:
union_min[q] = min(union_min[q], individual_dict[q]["min"])
union_max[q] = max(union_max[q], individual_dict[q]["max"])
if len(union_unique[q]) < cap_unique:
union_unique[q] = np.union1d(
union_unique[q], individual_dict[q]["unique"]
)
result_dict = {}
for q in qs:
result_dict[q] = {
"min": union_min[q],
"max": union_max[q],
"num_unique": len(union_unique[q]),
}
return result_dict | ceaad828396d8894def288d29357799b0bd8a10d | 28,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.