content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def HVA_TFIM_2D_data(weights, x, wires, n_layers=1, types = 1):
"""
1-d Ising-coupling HVA_TFIM feature map, according to 2008.02941v2.11075.
:param weights: trainable weights of shape 2*n_layers*n_wires
:param 1d x: input, len(x) is <= len(wires)
:param wires: list of wires on which the feature map acts
:param n_layers: number of repetitions of the first layer
"""
wires = range(0, 4)
n_wires = len(wires)
if types == 1:
n_weights_needed = 4 * n_layers
elif types == 2:
n_weights_needed = 2 * n_layers
else:
n_weights_needed = 6 * n_layers
if len(x) > n_wires:
raise ValueError("Feat map can encode at most {} features (which is the "
"number of wires), got {}.".format(n_wires, len(x)))
if len(weights) != n_weights_needed:
raise ValueError("Feat map needs {} weights, got {}."
.format(n_weights_needed, len(weights)))
for l in range(n_layers):
# inputs
for i in range(n_wires):
qml.Hadamard(wires=wires[i])
if types == 1:
_entanglerZ(x[0], wires[0], wires[1])
_entanglerZ(x[1], wires[2], wires[3])
_entanglerZ(weights[l * 4 ], wires[0], wires[3])
_entanglerZ(weights[l * 4 + 1], wires[1], wires[2])
elif types == 2:
_entanglerZ(weights[l * 2], wires[0], wires[1])
_entanglerZ(weights[l * 2], wires[2], wires[3])
_entanglerZ(weights[l * 2], wires[0], wires[3])
_entanglerZ(weights[l * 2], wires[1], wires[2])
else:
_entanglerZ(weights[l * 6 ], wires[0], wires[1])
_entanglerZ(weights[l * 6 + 1], wires[2], wires[3])
_entanglerZ(weights[l * 6 + 2], wires[0], wires[3])
_entanglerZ(weights[l * 6 + 3], wires[1], wires[2])
# repeat feature encoding once more at the end
# Either feed in feature
qml.RX(x[0], wires=wires[0])
qml.RX(x[1], wires=wires[2])
if types == 1:
qml.RX(weights[l * 4 + 2], wires=wires[1])
qml.RX(weights[l * 4 + 3], wires=wires[3])
elif types == 2:
qml.RX(weights[l * 2 + 1], wires=wires[1])
qml.RX(weights[l * 2 + 1], wires=wires[3])
else:
qml.RX(weights[l * 6 + 4], wires=wires[1])
qml.RX(weights[l * 6 + 5], wires=wires[3]) | 25,800 |
def _get_jones_types(name, numba_ndarray_type, corr_1_dims, corr_2_dims):
"""
Determine which of the following three cases are valid:
1. The array is not present (None) and therefore no Jones Matrices
2. single (1,) or (2,) dual correlation
3. (2, 2) full correlation
Parameters
----------
name: str
Array name
numba_ndarray_type: numba.type
Array numba type
corr_1_dims: int
Number of `numba_ndarray_type` dimensions,
including correlations (first option)
corr_2_dims: int
Number of `numba_ndarray_type` dimensions,
including correlations (second option)
Returns
-------
int
Enumeration describing the Jones Matrix Type
- 0 -- Not Present
- 1 -- (1,) or (2,)
- 2 -- (2, 2)
"""
if is_numba_type_none(numba_ndarray_type):
return JONES_NOT_PRESENT
if numba_ndarray_type.ndim == corr_1_dims:
return JONES_1_OR_2
elif numba_ndarray_type.ndim == corr_2_dims:
return JONES_2X2
else:
raise ValueError("%s.ndim not in (%d, %d)" %
(name, corr_1_dims, corr_2_dims)) | 25,801 |
def editing_passport_serial_handler(update: Update,
context: CallbackContext) -> int:
"""Get and save passport serial."""
new_state = editing_pd(update, context,
validator=validators.passport_serial_validator,
attribute='passport_serial',
state=PASSPORT_SERIAL,
)
return new_state | 25,802 |
def get_overview(ticker: str) -> pd.DataFrame:
"""Get alpha vantage company overview
Parameters
----------
ticker : str
Stock ticker
Returns
-------
pd.DataFrame
Dataframe of fundamentals
"""
# Request OVERVIEW data from Alpha Vantage API
s_req = f"https://www.alphavantage.co/query?function=OVERVIEW&symbol={ticker}&apikey={cfg.API_KEY_ALPHAVANTAGE}"
result = requests.get(s_req, stream=True)
# If the returned data was successful
if result.status_code == 200:
# Parse json data to dataframe
if "Note" in result.json():
console.print(result.json()["Note"], "\n")
return pd.DataFrame()
df_fa = pd.json_normalize(result.json())
# Keep json data sorting in dataframe
df_fa = df_fa[list(result.json().keys())].T
df_fa.iloc[5:] = df_fa.iloc[5:].applymap(lambda x: long_number_format(x))
clean_df_index(df_fa)
df_fa = df_fa.rename(
index={
"E b i t d a": "EBITDA",
"P e ratio": "PE ratio",
"P e g ratio": "PEG ratio",
"E p s": "EPS",
"Revenue per share t t m": "Revenue per share TTM",
"Operating margin t t m": "Operating margin TTM",
"Return on assets t t m": "Return on assets TTM",
"Return on equity t t m": "Return on equity TTM",
"Revenue t t m": "Revenue TTM",
"Gross profit t t m": "Gross profit TTM",
"Diluted e p s t t m": "Diluted EPS TTM",
"Quarterly earnings growth y o y": "Quarterly earnings growth YOY",
"Quarterly revenue growth y o y": "Quarterly revenue growth YOY",
"Trailing p e": "Trailing PE",
"Forward p e": "Forward PE",
"Price to sales ratio t t m": "Price to sales ratio TTM",
"E v to revenue": "EV to revenue",
"E v to e b i t d a": "EV to EBITDA",
}
)
return df_fa
return pd.DataFrame() | 25,803 |
def effective_sample_size(samples):
"""
Calculates ESS for a matrix of samples.
"""
try:
n_samples, n_params = samples.shape
except (ValueError, IndexError):
raise ValueError('Samples must be given as a 2d array.')
if n_samples < 2:
raise ValueError('At least two samples must be given.')
return [ess_single_param(samples[:, i]) for i in range(0, n_params)] | 25,804 |
def draw_windows():
"""
This draws out a window.
:return:
"""
martin.begin_fill() # lines 88-118 draw out a row consisting of 3 rectangles for windows
for i in range(2):
martin.pendown()
martin.forward(13)
martin.right(90)
martin.forward(20)
martin.right(90)
martin.penup()
martin.end_fill()
martin.forward(30)
martin.begin_fill()
for i in range(2):
martin.pendown()
martin.forward(13)
martin.right(90)
martin.forward(20)
martin.right(90)
martin.penup()
martin.end_fill()
martin.forward(30)
martin.begin_fill()
for i in range(2):
martin.pendown()
martin.forward(13)
martin.right(90)
martin.forward(20)
martin.right(90)
martin.penup()
martin.end_fill()
martin.hideturtle() | 25,805 |
def test_left_context_third_w():
"""
Test contexte gauche avec mot pivot en 3e position
Résultat chaîne de 2 tokens
"""
sent = "Imagine un monde où les animaux nous mangeraient Ils feraient des ragoûts de nous, des soupes de nous, des burgers de nous"
tokens = sent.split()
l_context = concordance_zola.find_l_context(tokens, 2, 5)
assert l_context == "Imagine un" | 25,806 |
def get_credentials(credentials_path):
"""
Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid it
returns None.
Returns:
Credentials, the obtained credential or None
"""
store = Storage(credentials_path)
credentials = store.get()
if not credentials or credentials.invalid:
return None
return credentials | 25,807 |
def setup(hass, config):
"""Set up this component."""
conf_track = config[DOMAIN][CONF_TRACK]
_LOGGER.info('version %s is starting, if you have ANY issues with this, please report'
' them here: https://github.com/custom-components/custom_updater', __version__)
ha_conf_dir = str(hass.config.path())
card_controller = CustomCards(hass, ha_conf_dir)
components_controller = CustomComponents(hass, ha_conf_dir)
def check_all_service(call):
"""Set up service for manual trigger."""
if not conf_track:
card_controller.cache_versions(call)
components_controller.cache_versions(call)
elif 'cards' in conf_track and 'components' in conf_track:
card_controller.cache_versions(call)
components_controller.cache_versions(call)
elif 'cards' in conf_track:
card_controller.cache_versions(call)
elif 'components' in conf_track:
components_controller.cache_versions(call)
def update_all_service(call):
"""Set up service for manual trigger."""
if not conf_track:
card_controller.update_all()
components_controller.update_all()
elif 'cards' in conf_track and 'components' in conf_track:
card_controller.update_all()
components_controller.update_all()
elif 'cards' in conf_track:
card_controller.update_all()
elif 'components' in conf_track:
components_controller.update_all()
if not conf_track or 'cards' in conf_track:
def upgrade_card_service(call):
"""Set up service for manual trigger."""
card_controller.upgrade_single(call.data.get(ATTR_CARD))
hass.services.register(DOMAIN, 'upgrade_single_card', upgrade_card_service)
if not conf_track or 'components' in conf_track:
def upgrade_component_service(call):
"""Set up service for manual trigger."""
components_controller.upgrade_single(call.data.get(ATTR_COMPONENT))
hass.services.register(DOMAIN, 'upgrade_single_component', upgrade_component_service)
track_time_interval(hass, card_controller.cache_versions, INTERVAL)
track_time_interval(hass, components_controller.cache_versions, INTERVAL)
hass.services.register(DOMAIN, 'check_all', check_all_service)
hass.services.register(DOMAIN, 'update_all', update_all_service)
return True | 25,808 |
def coset_enumeration_r(fp_grp, Y, max_cosets=None):
"""
This is easier of the two implemented methods of coset enumeration.
and is often called the HLT method, after Hazelgrove, Leech, Trotter
The idea is that we make use of ``scan_and_fill`` makes new definitions
whenever the scan is incomplete to enable the scan to complete; this way
we fill in the gaps in the scan of the relator or subgroup generator,
that's why the name relator-based method.
# TODO: complete the docstring
See Also
========
scan_and_fill,
References
==========
[1] Holt, D., Eick, B., O'Brien, E.
"Handbook of computational group theory"
>>> from sympy.combinatorics.free_groups import free_group
>>> from sympy.combinatorics.fp_groups import FpGroup, coset_enumeration_r
>>> F, x, y = free_group("x, y")
# Example 5.1 from [1]
>>> f = FpGroup(F, [x**3, y**3, x**-1*y**-1*x*y])
>>> C = coset_enumeration_r(f, [x])
>>> for i in range(len(C.p)):
... if C.p[i] == i:
... print(C.table[i])
[0, 0, 1, 2]
[1, 1, 2, 0]
[2, 2, 0, 1]
>>> C.p
[0, 1, 2, 1, 1]
# Example from exercises Q2 [1]
>>> f = FpGroup(F, [x**2*y**2, y**-1*x*y*x**-3])
>>> C = coset_enumeration_r(f, [])
>>> C.compress(); C.standardize()
>>> C.table
[[1, 2, 3, 4],
[5, 0, 6, 7],
[0, 5, 7, 6],
[7, 6, 5, 0],
[6, 7, 0, 5],
[2, 1, 4, 3],
[3, 4, 2, 1],
[4, 3, 1, 2]]
# Example 5.2
>>> f = FpGroup(F, [x**2, y**3, (x*y)**3])
>>> Y = [x*y]
>>> C = coset_enumeration_r(f, Y)
>>> for i in range(len(C.p)):
... if C.p[i] == i:
... print(C.table[i])
[1, 1, 2, 1]
[0, 0, 0, 2]
[3, 3, 1, 0]
[2, 2, 3, 3]
# Example 5.3
>>> f = FpGroup(F, [x**2*y**2, x**3*y**5])
>>> Y = []
>>> C = coset_enumeration_r(f, Y)
>>> for i in range(len(C.p)):
... if C.p[i] == i:
... print(C.table[i])
[1, 3, 1, 3]
[2, 0, 2, 0]
[3, 1, 3, 1]
[0, 2, 0, 2]
# Example 5.4
>>> F, a, b, c, d, e = free_group("a, b, c, d, e")
>>> f = FpGroup(F, [a*b*c**-1, b*c*d**-1, c*d*e**-1, d*e*a**-1, e*a*b**-1])
>>> Y = [a]
>>> C = coset_enumeration_r(f, Y)
>>> for i in range(len(C.p)):
... if C.p[i] == i:
... print(C.table[i])
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
# example of "compress" method
>>> C.compress()
>>> C.table
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
# Exercises Pg. 161, Q2.
>>> F, x, y = free_group("x, y")
>>> f = FpGroup(F, [x**2*y**2, y**-1*x*y*x**-3])
>>> Y = []
>>> C = coset_enumeration_r(f, Y)
>>> C.compress()
>>> C.standardize()
>>> C.table
[[1, 2, 3, 4],
[5, 0, 6, 7],
[0, 5, 7, 6],
[7, 6, 5, 0],
[6, 7, 0, 5],
[2, 1, 4, 3],
[3, 4, 2, 1],
[4, 3, 1, 2]]
# John J. Cannon; Lucien A. Dimino; George Havas; Jane M. Watson
# Mathematics of Computation, Vol. 27, No. 123. (Jul., 1973), pp. 463-490
# from 1973chwd.pdf
# Table 1. Ex. 1
>>> F, r, s, t = free_group("r, s, t")
>>> E1 = FpGroup(F, [t**-1*r*t*r**-2, r**-1*s*r*s**-2, s**-1*t*s*t**-2])
>>> C = coset_enumeration_r(E1, [r])
>>> for i in range(len(C.p)):
... if C.p[i] == i:
... print(C.table[i])
[0, 0, 0, 0, 0, 0]
Ex. 2
>>> F, a, b = free_group("a, b")
>>> Cox = FpGroup(F, [a**6, b**6, (a*b)**2, (a**2*b**2)**2, (a**3*b**3)**5])
>>> C = coset_enumeration_r(Cox, [a])
>>> index = 0
>>> for i in range(len(C.p)):
... if C.p[i] == i:
... index += 1
>>> index
500
# Ex. 3
>>> F, a, b = free_group("a, b")
>>> B_2_4 = FpGroup(F, [a**4, b**4, (a*b)**4, (a**-1*b)**4, (a**2*b)**4, \
(a*b**2)**4, (a**2*b**2)**4, (a**-1*b*a*b)**4, (a*b**-1*a*b)**4])
>>> C = coset_enumeration_r(B_2_4, [a])
>>> index = 0
>>> for i in range(len(C.p)):
... if C.p[i] == i:
... index += 1
>>> index
1024
"""
# 1. Initialize a coset table C for < X|R >
C = CosetTable(fp_grp, Y, max_cosets=max_cosets)
R = fp_grp.relators
A_dict = C.A_dict
A_dict_inv = C.A_dict_inv
p = C.p
for w in Y:
C.scan_and_fill(0, w)
alpha = 0
while alpha < C.n:
if p[alpha] == alpha:
for w in R:
C.scan_and_fill(alpha, w)
# if α was eliminated during the scan then break
if p[alpha] < alpha:
break
if p[alpha] == alpha:
for x in A_dict:
if C.table[alpha][A_dict[x]] is None:
C.define(alpha, x)
alpha += 1
return C | 25,809 |
def get_model(**kwargs):
"""
Returns the model.
"""
model = ShuffleNetV2(**kwargs)
return model | 25,810 |
def absolute_filter_change(baseline_state_dict, target_state_dict):
""" Calculate sum(abs(K2 - K1) / sum(K1))
Args:
baseline_state_dict (dict): state_dict of ori_net
target_state_dict (dict): state_dict of finetune_net
Returns:
sorted_diff (list): sorted values
sorted_index (list): sorted index of kernel
"""
# save all weight to list
baseline_weight_list = []
for key, value in baseline_state_dict.items():
if key.find('weight') != -1:
weight = value.reshape(-1, 3, 3)
baseline_weight_list.append(weight)
# [-1, 3, 3]
baseline_weight_list = torch.cat(baseline_weight_list, dim=0)
target_weight_list = []
for key, value in target_state_dict.items():
if key.find('weight') != -1:
weight = value.reshape(-1, 3, 3)
target_weight_list.append(weight)
# [-1, 3, 3]
target_weight_list = torch.cat(target_weight_list, dim=0)
sum_baseline_weight = torch.sum(torch.sum(abs(baseline_weight_list), dim=1), dim=1)
sum_baseline_weight = sum_baseline_weight.unsqueeze(1).unsqueeze(1)
diff = torch.sum(torch.sum(abs(target_weight_list - baseline_weight_list) / sum_baseline_weight, dim=1), dim=1)
return diff.cpu().numpy() | 25,811 |
def run_as_admin(argv=None, debug=False):
"""
Helper function to run Python script with admin privileges
"""
shell32 = ctypes.windll.shell32
if argv is None and shell32.IsUserAnAdmin():
return True
if argv is None:
argv = sys.argv
if hasattr(sys, '_MEIPASS'):
# Support pyinstaller wrapped program.
arguments =argv[1:]
else:
arguments = argv
argument_line = u' '.join(arguments)
executable = sys.executable
ret = shell32.ShellExecuteW(None, u"runas", executable, argument_line, None, 1)
if int(ret) <= 32:
return False
return None | 25,812 |
def global_function(a, b, c):
"""global_function documentation.""" | 25,813 |
def load_checkpoints(checkpoint_name):
"""
Load a pretrained checkpoint.
:param checkpoint_name: checkpoint filename
:return: model.state_dict, source_vocabulary, target_vocabulary,
"""
# Get checkpoint from file
checkpoint = torch.load(checkpoint_name, map_location=torch.device('cpu'))
# The epoch when training has been left
epoch = checkpoint['epoch']
# The time elapsed during training
time_elapsed = checkpoint['time_elapsed']
# Get state_dict of the model
model_state_dict = checkpoint['model_state_dict']
# Get the state_dict of the optimizer
optimizer_state_dict = checkpoint['optimizer_state_dict']
# Get source language vocabulary
src_vocabulary = checkpoint['src_vocabulary']
tgt_vocabulary = checkpoint['tgt_vocabulary']
return model_state_dict, optimizer_state_dict, epoch, time_elapsed, src_vocabulary, tgt_vocabulary | 25,814 |
def test_read_latest_all_gsp_historic(db_session):
"""Check main GB/pv/gsp route works"""
forecasts = make_fake_forecasts(
gsp_ids=list(range(0, 10)),
session=db_session,
t0_datetime_utc=datetime.now(tz=timezone.utc),
)
db_session.add_all(forecasts)
update_all_forecast_latest(forecasts=forecasts, session=db_session)
app.dependency_overrides[get_session] = lambda: db_session
response = client.get("/v0/GB/solar/gsp/forecast/all/?historic=True")
assert response.status_code == 200
r = ManyForecasts(**response.json())
assert len(r.forecasts) == 10
assert r.forecasts[0].forecast_values[0].expected_power_generation_megawatts <= 1 | 25,815 |
def __update_background():
"""Summary
"""
obj = get_sprite('__backdrop__')
if obj and obj._surf:
backdrop_heigth = obj._surf.get_height()
backdrop_width = obj._surf.get_width()
screen_width, screen_height = this.size
for y in range(0, screen_height, backdrop_heigth):
for x in range(0, screen_width, backdrop_width):
this.__screen.blit(obj._surf, (x, y)) | 25,816 |
def _extract_data(prices, n_markets):
""" Extract the open, close, high and low prices from the price matrix. """
os = prices[:, :, :n_markets]
cs = prices[:, :, n_markets:2*n_markets]
hs = prices[:, :, 2*n_markets:3*n_markets]
ls = prices[:, :, 3*n_markets:4*n_markets]
return os, cs, hs, ls | 25,817 |
def active(run_dir: str = './run',
datasets_dir: str = './data', dataset: str = 'mnist', dataset_size: int=60000,
augmentation: bool = False, validation: int = 0, shuffle: bool = False,
initial_balance : bool = False,
initial_num_per_class: int = 100,
subset_bias: int=0,
coreset_path: str=None,
coreset_loss_path: str=None,
weighted_loss: bool = False,
enable_intersect: bool = False,
intersect_method: str ='naive',
intersect_rate: float=0.5,
runs: int=0,
cross_test: str='none',
arch: str = 'preact20', optimizer: str = 'sgd',
epochs: Tuple[int, ...] = (1, 90, 45, 45),
learning_rates: Tuple[float, ...] = (0.01, 0.1, 0.01, 0.001),
momentum: float = 0.9, weight_decay: float = 5e-4,
batch_size: int = 128, eval_batch_size: int = 128,
proxy_arch: str = 'preact20', proxy_optimizer: str = 'sgd',
proxy_epochs: Tuple[int, ...] = (1, 90, 45, 45),
proxy_learning_rates: Tuple[float, ...] = (0.01, 0.1, 0.01, 0.001),
proxy_momentum: float = 0.9, proxy_weight_decay: float = 5e-4,
proxy_batch_size: int = 128, proxy_eval_batch_size: int = 128,
initial_subset: int = 1_000,
rounds: Tuple[int, ...] = (4_000, 5_000, 5_000, 5_000, 5_000),
selection_method: str = 'least_confidence',
precomputed_selection: Optional[str] = None,
train_target: bool = True,
eval_target_at: Optional[Tuple[int, ...]] = None,
cuda: bool = True,
device_ids: Tuple[int, ...] = tuple(range(cuda.device_count())),
num_workers: int = 0, eval_num_workers: int = 0,
seed: Optional[int] = None, checkpoint: str = 'best',
track_test_acc: bool = True):
"""
Perform active learning on CIFAR10 and CIFAR100.
If the model architectures (`arch` vs `proxy_arch`) or the learning rate
schedules don't match, "selection via proxy" (SVP) is performed and two
separate models are trained. The proxy is used for selecting which
examples to label, while the target is only used for evaluating the
quality of the selection. By default, the target model (`arch`) is
trained and evaluated after each selection round. To change this behavior
set `eval_target_at` to evaluate at a specific labeling budget(s) or set
`train_target` to False to skip evaluating the target model. You can
evaluate a series of selections later using the `precomputed_selection`
option.
Parameters
----------
run_dir : str, default './run'
Path to log results and other artifacts.
datasets_dir : str, default './data'
Path to datasets.
dataset : str, default 'cifar10'
Dataset to use in experiment (i.e., CIFAR10 or CIFAR100)
augmentation : bool, default True
Add data augmentation (i.e., random crop and horizontal flip).
validation : int, default 0
Number of examples from training set to use for valdiation.
shuffle : bool, default True
Shuffle training data before splitting into training and validation.
arch : str, default 'preact20'
Model architecture for the target model. `preact20` is short for
ResNet20 w/ Pre-Activation.
optimizer : str, default = 'sgd'
Optimizer for training the target model.
epochs : Tuple[int, ...], default (1, 90, 45, 45)
Epochs for training the target model. Each number corresponds to a
learning rate below.
learning_rates : Tuple[float, ...], default (0.01, 0.1, 0.01, 0.001)
Learning rates for training the target model. Each learning rate is
used for the corresponding number of epochs above.
momentum : float, default 0.9
Momentum for SGD with the target model.
weight_decay : float, default 5e-4
Weight decay for SGD with the target model.
batch_size : int, default 128
Minibatch size for training the target model.
eval_batch_size : int, default 128
Minibatch size for evaluation (validation and testing) of the target
model.
proxy_arch : str, default 'preact20'
Model architecture for the proxy model. `preact20` is short for
ResNet20 w/ Pre-Activation.
proxy_optimizer : str, default = 'sgd'
Optimizer for training the proxy model.
proxy_epochs : Tuple[int, ...], default (1, 90, 45, 45)
Epochs for training the proxy model. Each number corresponds to a
learning rate below.
proxy_learning_rates : Tuple[float, ...], default (0.01, 0.1, 0.01, 0.001)
Learning rates for training the proxy model. Each learning rate is
used for the corresponding number of epochs above.
proxy_momentum : float, default 0.9
Momentum for SGD with the proxy model.
proxy_weight_decay : float, default 5e-4
Weight decay for SGD with the proxy model.
proxy_batch_size : int, default 128
Minibatch size for training the proxy model.
proxy_eval_batch_size : int, default 128
Minibatch size for evaluation (validation and testing) of the model
proxy.
initial_subset : int, default 1,000
Number of randomly selected training examples to use for the initial
labeled set.
rounds : Tuple[int, ...], default (4,000, 5,000, 5,000, 5,000, 5,000)
Number of unlabeled exampels to select in a round of labeling.
selection_method : str, default least_confidence
Criteria for selecting unlabeled examples to label.
precomputed_selection : str or None, default None
Path to timestamped run_dir of precomputed indices.
train_target : bool, default True
If proxy and target are different, train the target after each round
of selection or specific rounds as specified below.
eval_target_at : Tuple[int, ...] or None, default None
If proxy and target are different and `train_target`, limit the
evaluation of the target model to specific labeled subset sizes.
cuda : bool, default True
Enable or disable use of available GPUs
device_ids : Tuple[int, ...], default True
GPU device ids to use.
num_workers : int, default 0
Number of data loading workers for training.
eval_num_workers : int, default 0
Number of data loading workers for evaluation.
seed : Optional[int], default None
Random seed for numpy, torch, and others. If None, a random int is
chosen and logged in the experiments config file.
checkpoint : str, default 'best'
Specify when to create a checkpoint for the model: only checkpoint the
best performing model on the validation data or the training data if
`validation == 0` ("best"), after every epoch ("all"), or only the last
epoch of each segment of the learning rate schedule ("last").
track_test_acc : bool, default True
Calculate performance of the models on the test data in addition or
instead of the validation dataset.'
"""
# Set seeds for reproducibility.
seed = utils.set_random_seed(seed)
# Capture all of the arguments to save alongside the results.
config = utils.capture_config(**locals())
# Create a unique timestamped directory for this experiment.
run_dir = utils.create_run_dir(run_dir, timestamp=config['timestamp'])
utils.save_config(config, run_dir)
# Update the computing arguments based on the runtime system.
use_cuda, device, device_ids, num_workers = utils.config_run_env(
cuda=cuda, device_ids=device_ids, num_workers=num_workers)
# Create the training dataset.
train_dataset = create_dataset(dataset, datasets_dir, train=True,
augmentation=augmentation)
# Verify there is enough training data for validation,
# the initial subset, and the selection rounds.
validate_splits(train_dataset, validation, initial_subset, rounds)
# Create the test dataset.
test_dataset = None
if track_test_acc:
test_dataset = create_dataset(dataset, datasets_dir, train=False,
augmentation=False)
#test_dataset = create_dataset(dataset, datasets_dir, train=True,
# augmentation=False)
# Calculate the number of classes (e.g., 10 or 100) so the model has
# the right dimension for its output.
#num_classes = len(set(train_dataset.labels)) # type: ignore
num_classes = 10
# Split the training dataset between training and validation.
unlabeled_pool, dev_indices = utils.split_indices(
train_dataset, validation, run_dir, shuffle=shuffle)
# Create the proxy to select which data points to label. If the
# selections were precomputed in another run or elsewhere, we can
# ignore this step.
##############################################################################################
########################### immedately remove this part #######################################
################################################################################################
# core_indices = np.loadtxt(coreset_path,dtype=int)
# sim_loss = np.loadtxt(coreset_loss_path,dtype=float)
# norm_sim_loss = (2000-sim_loss)/2000
# sim_min = np.min(sim_loss)
# sim_max = np.max(sim_loss)
# sim_loss = (sim_loss-sim_min)/(sim_max-sim_min)
# core_loss = np.zeros(dataset_size) # subset core loss in CIFAR-10 indices order
# sim_wloss = np.zeros(dataset_size)
# norm_sim_wloss = np.zeros(dataset_size)
# sim_wloss[core_indices] = sim_loss # Reorder sim_loss in CIFAR-10 indices order # Map back to original indices
# norm_sim_wloss[core_indices] = norm_sim_loss
# test_subset_size=18000
# if cross_test == 'reverse':
# print('############## Reverse cross test activated ##################')
# test_indices = np.argsort(norm_sim_wloss)[:test_subset_size]
# else:
# test_indices = np.argsort(norm_sim_wloss)[-test_subset_size:]
##################################################################################################
###################################################################################################
###################################################################################################
if precomputed_selection is None:
# Use a partial so the appropriate model can be created without
# arguments.
proxy_partial = partial(create_model_and_optimizer,
arch=proxy_arch,
num_classes=num_classes,
optimizer=proxy_optimizer,
learning_rate=proxy_learning_rates[0],
momentum=proxy_momentum,
weight_decay=proxy_weight_decay)
# Create a directory for the proxy results to avoid confusion.
proxy_run_dir = os.path.join(run_dir, 'proxy')
os.makedirs(proxy_run_dir, exist_ok=True)
# Create data loaders for validation and testing. The training
# data loader changes as labeled data is added, so it is
# instead a part of the proxy model generator below.
_, proxy_dev_loader, proxy_test_loader = create_loaders(
train_dataset,
batch_size=proxy_batch_size,
eval_batch_size=proxy_eval_batch_size,
test_dataset=test_dataset,
use_cuda=use_cuda,
num_workers=num_workers,
eval_num_workers=eval_num_workers,
indices=(unlabeled_pool, dev_indices))
# Create the proxy model generator (i.e., send data and get a
# trained model).
proxy_generator = generate_models(
proxy_partial, proxy_epochs, proxy_learning_rates,
train_dataset, proxy_batch_size,
device, use_cuda,
num_workers=num_workers,
device_ids=device_ids,
dev_loader=proxy_dev_loader,
test_loader=proxy_test_loader,
run_dir=proxy_run_dir,
checkpoint=checkpoint)
# Start the generator
next(proxy_generator)
# Check that the proxy and target are different models
are_different_models = check_different_models(config)
# Maybe create the target.
if train_target:
# If the proxy and target models aren't different, we don't
# need to create a separate model generator*.
# * Unless the proxy wasn't created because the selections were
# precomputed (see above).
if are_different_models or precomputed_selection is not None:
# Use a partial so the appropriate model can be created
# without arguments.
target_partial = partial(create_model_and_optimizer,
arch=arch,
num_classes=num_classes,
optimizer=optimizer,
learning_rate=learning_rates[0],
momentum=momentum,
weight_decay=weight_decay)
# Create a directory for the target to avoid confusion.
target_run_dir = os.path.join(run_dir, 'target')
os.makedirs(target_run_dir, exist_ok=True)
# Create data loaders for validation and testing. The training
# data loader changes as labeled data is added, so it is
# instead a part of the target model generator below.
_, target_dev_loader, target_test_loader = create_loaders(
train_dataset,
batch_size=batch_size,
eval_batch_size=eval_batch_size,
test_dataset=test_dataset,
use_cuda=use_cuda,
num_workers=num_workers,
eval_num_workers=eval_num_workers,
indices=(unlabeled_pool, dev_indices))
# Create the target model generator (i.e., send data and
# get a trained model).
target_generator = generate_models(
target_partial, epochs, learning_rates,
train_dataset, batch_size,
device, use_cuda,
num_workers=num_workers,
device_ids=device_ids,
dev_loader=target_dev_loader,
test_loader=target_test_loader,
run_dir=target_run_dir,
checkpoint=checkpoint)
# Start the generator
next(target_generator)
else:
# Proxy and target are the same, so we can just symlink
symlink_target_to_proxy(run_dir)
# Perform active learning.
if precomputed_selection is not None:
assert train_target, "Must train target if selection is precomuted"
assert os.path.exists(precomputed_selection)
# Collect the files with the previously selected data.
files = glob(os.path.join(precomputed_selection, 'proxy',
'*', 'labeled_*.index'))
indices = [np.loadtxt(file, dtype=np.int64) for file in files]
# Sort selections by length to replicate the order data was
# labeled.
selections = sorted(
zip(files, indices),
key=lambda selection: len(selection[1])) # type: ignore
# Symlink proxy directories and files for convenience.
symlink_to_precomputed_proxy(precomputed_selection, run_dir)
# Train the target model on each selection.
for file, labeled in selections:
print('Load labeled indices from {}'.format(file))
# Check whether the target model should be trained. If you
# have a specific labeling budget, you may not want to
# evaluate the target after each selection round to save
# time.
should_eval = (eval_target_at is None or
len(eval_target_at) == 0 or
len(labeled) in eval_target_at)
if should_eval:
# Train the target model on the selected data.
_, stats = target_generator.send(labeled)
utils.save_result(stats, os.path.join(run_dir, "target.csv"))
else: # Select which points to label using the proxy.
# Create initial random subset to train the proxy (warm start).
labeled = []
sim_loss = np.array([])
sim_wloss = np.array([])
core_indices = np.array([])
half_core_indices = np.array([]) # half core indices
isCoreLoss = False
if coreset_loss_path is not None:
isCoreLoss = True # Core Loss is/isn't defined
if coreset_path is not None:
isCoreset = True # Core Set is/isn't defined
core_indices = np.loadtxt(coreset_path,dtype=int) # Load core Indices resulting from self-supervised learning
# size : 50,000
if isCoreLoss:
sim_loss = np.loadtxt(coreset_loss_path,dtype=float) # Load core(simmmiarity) Loss resulting
# from self-supervised learning
# size : 50,000
norm_sim_loss = (1200-sim_loss)/1200 # extract cossim from simloss
### normalize scale between 0 and 1 ###
sim_min = np.min(sim_loss)
sim_max = np.max(sim_loss)
sim_loss = (sim_loss-sim_min)/(sim_max-sim_min)
### normalize mean and std ####
core_loss = np.zeros(dataset_size) # subset core loss in CIFAR-10 indices order
sim_wloss = np.zeros(dataset_size)
norm_sim_wloss = np.zeros(dataset_size)
sim_wloss[core_indices] = sim_loss # Reorder sim_loss in CIFAR-10 indices order # Map back to original indices
norm_sim_wloss[core_indices] = norm_sim_loss
if 'noncore' in coreset_path:
isCoreset = False
if initial_balance:
labeled = utils.split_balanced_core_indices(train_dataset, core_indices, core_loss, isCoreset,
initial_num_per_class, num_classes)
else:
if isCoreset: # core_subset
#labeled = core_indices[-initial_subset:]
half_core_indices = core_indices[-25000:]
labeled = np.argsort(norm_sim_wloss)[subset_bias:subset_bias+initial_subset]
#if cross_test == 'reverse':
# labeled = np.argsort(norm_sim_wloss)[-initial_subset:]
#else:
# labeled = np.argsort(norm_sim_wloss)[subset_bias:subset_bias+initial_subset]
#labeled = np.argsort(norm_sim_wloss)[subset_bias:subset_bias+initial_subset]
#labeled = np.argsort(norm_sim_wloss)[-initial_subset:]
if isCoreLoss:
core_loss[labeled] = sim_wloss[labeled]
else : # noncore_subset
print('############# non coreset ###############')
#labeled = core_indices[:initial_subset]
#half_core_indices = core_indices[:25000]
#if isCoreLoss:
# core_loss[labeled] = sim_loss[:initial_subset]
if weighted_loss:
core_arg = [labeled, core_loss] # For per-sample weighted loss
print("############ Weighted Per-sample Loss ###############")
else:
core_arg = labeled # For uniform loss
print("############ Uniform Per-sample Loss ###############")
# Save the index of the initial random subset
utils.save_index(labeled, run_dir,
'initial_subset_{}.index'.format(len(labeled)))
# Train the proxy on the initial random subset
model, stats = proxy_generator.send(core_arg)
else:
if initial_balance:
print("initial random subset with balanced data")
labeled=utils.split_balanced_indices(train_dataset, initial_num_per_class, num_classes)
else:
print("initial random subset with no balanced data")
labeled = np.random.permutation(unlabeled_pool)[:initial_subset]
# Save the index of the initial random subset
utils.save_index(labeled, run_dir,
'initial_subset_{}.index'.format(len(labeled)))
# Train the proxy on the initial random subset
model, stats = proxy_generator.send(labeled)
stats['weighted_loss'] = weighted_loss
stats['self-supervised'] = coreset_path
stats['runs'] = runs
stats['dataset'] = dataset
stats['initial size'] = len(labeled)
stats['subset bias'] = subset_bias
utils.save_result(stats, os.path.join(run_dir, "proxy.csv"))
for selection_size in rounds:
# Select additional data to label from the unlabeled pool
labeled, stats = select(model, train_dataset,
enable_intersect=enable_intersect,
intersect_method=intersect_method,
half_core_indices=half_core_indices,
sim_loss = sim_wloss,
mixing_rate = intersect_rate,
current=labeled,
pool=unlabeled_pool,
budget=selection_size,
method=selection_method,
batch_size=proxy_eval_batch_size,
device=device,
device_ids=device_ids,
num_workers=num_workers,
use_cuda=use_cuda)
utils.save_result(stats, os.path.join(run_dir, 'selection.csv'))
# Train the proxy on the newly added data.
if weighted_loss : # For per-sample weighted loss
core_loss = np.zeros(dataset_size)
core_loss[labeled] = sim_wloss[labeled]
core_arg = [labeled, core_loss]
model, stats = proxy_generator.send(core_arg)
else: # For uniform loss
print("############ Uniform Per-sample Loss ###############")
model, stats = proxy_generator.send(labeled)
utils.save_result(stats, os.path.join(run_dir, 'proxy.csv'))
# Check whether the target model should be trained. If you
# have a specific labeling budget, you may not want to
# evaluate the target after each selection round to save
# time.
should_eval = (eval_target_at is None or
len(eval_target_at) == 0 or
len(labeled) in eval_target_at)
if train_target and should_eval and are_different_models:
# Train the target model on the selected data.
_, stats = target_generator.send(labeled)
utils.save_result(stats, os.path.join(run_dir, "target.csv")) | 25,818 |
def make_screen():
"""creates the code for a new screen"""
return pygame.display.set_mode((800,600)) | 25,819 |
def listFiles(sDir,ext="_du.mpxml"):
"""
return 1 list of files
"""
lsFile = sorted([_fn
for _fn in os.listdir(sDir)
if _fn.lower().endswith(ext) or _fn.lower().endswith(ext)
])
return lsFile | 25,820 |
def flip_vert(r, c, row, col, reversed):
"""1번 연산"""
if reversed:
row, col = col, row
return row - 1 - r, c, reversed | 25,821 |
def periodic_forecast(
covariate,
output_array,
period,
):
"""Repeats the covariate with a periodic schedule.
Args:
covariate: Assumed to have dimensions [# locations x # days].
output_array: The output array where the results will be written.
period: The period with which to repeat the data.
"""
num_forecast_steps = output_array.shape[1]
n_periods = int(np.ceil(num_forecast_steps / float(period)))
num_days = covariate.shape[1]
last_week = covariate[:, -period:]
if num_days < period:
n_blocks = int(np.ceil(period / num_days))
last_week = np.tile(last_week, (1, n_blocks))[:, -period:]
repeated_weeks = np.tile(last_week, (1, n_periods))
output_array[:, :] = repeated_weeks[:, :num_forecast_steps] | 25,822 |
def binary_classification():
"""
逻辑回归
:return:
"""
model = Sequential()
model.add(Dense(32, activation='relu', input_dim=100))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy'])
data = np.random.random((10000, 100))
labels = np.random.randint(2, size=(10000, 1))
# Train the model, iterating on the data in batches of 32 samples
model.fit(data, labels, epochs=10, batch_size=32, verbose=1) | 25,823 |
def get_orders(self, **kwargs):
"""
|
| **Current All Open Orders (USER_DATA)**
| *Get all open orders on a symbol. Careful when accessing this with no symbol.*
| *If the symbol is not sent, orders for all symbols will be returned in an array.*
:API endpoint: ``GET /dapi/v1/openOrders``
:API doc: https://binance-docs.github.io/apidocs/delivery/en/#current-all-open-orders-user_data
:parameter symbol: string
:parameter recvWindow: optional int, the value cannot be greater than 60000
|
"""
url_path = "/dapi/v1/openOrders"
params = { **kwargs }
return self.sign_request("GET", url_path, params) | 25,824 |
def update_amount(amount_id: int):
"""This function update a data of amount
Args:
amount_id (int): id of amount
Returns:
Response: description of amount
"""
current_app.logger.debug('In PUT /api/amounts/<int:amount_id>')
response = None
try:
# Load data
data = request.get_json()
if 'id_ma' not in data:
data['id_ma'] = amount_id
# Validate fields to update
AmountValidationService.validate(data)
# Checks amount exist
AmountDBService.get_amount_by_id(amount_id)
# Check project not solde
AmountDBService.is_project_solde(receipt_id = data['id_r'])
# Check year unique by receipt
AmountDBService.check_unique_amount_by_year_and_receipt_id(data['annee_ma'], data['id_r'], amount_id)
# Check sum amount value
AmountDBService.check_sum_value(data, amount_id)
response = AmountDBService.update(data)
response = jsonify(response), 200
except ValueError as error:
current_app.logger.error(error)
response = jsonify(error.args[0]), error.args[1]
except Exception as e:
current_app.logger.error(e)
response = jsonify({'message': 'Une erreur est survenue lors de la modification du montant affecté'}), 500
finally:
return response | 25,825 |
def en_13757(data: bytes) -> int:
"""
Compute a CRC-16 checksum of data with the en_13757 algorithm.
:param bytes data: The data to be computed
:return: The checksum
:rtype: int
:raises TypeError: if the data is not a bytes-like object
"""
_ensure_bytes(data)
return _crc_16_en_13757(data) | 25,826 |
def DEFINE_multi_enum(name, default, enum_values, help=None, flag_values=_flagvalues.FLAGS, case_sensitive=True,
**args):
"""Registers a flag whose value can be a list strings from enum_values.
Use the flag on the command line multiple times to place multiple
enum values into the list. The 'default' may be a single string
(which will be converted into a single-element list) or a list of
strings.
Args:
name: str, the flag name.
default: Union[Iterable[Text], Text, None], the default value of the flag;
see `DEFINE_multi`.
enum_values: [str], a non-empty list of strings with the possible values for
the flag.
help: str, the help message.
flag_values: FlagValues, the FlagValues instance with which the flag will
be registered. This should almost never need to be overridden.
case_sensitive: Whether or not the enum is to be case-sensitive.
**args: Dictionary with extra keyword args that are passed to the
Flag __init__.
"""
parser = _argument_parser.EnumParser(enum_values, case_sensitive)
serializer = _argument_parser.ArgumentSerializer()
DEFINE_multi(parser, serializer, name, default, help, flag_values, **args) | 25,827 |
def get_agent_supported_features_list_for_extensions():
"""
List of features that the GuestAgent currently supports (like Extension Telemetry Pipeline, etc) needed by Extensions.
We need to send this list as environment variables when calling extension commands to inform Extensions of all the
features the agent supports.
:return: Dict containing all Extension supported features with the key as their names and the AgentFeature object as
the value if the feature is supported by the Agent.
Eg: {
CRPSupportedFeatureNames.ExtensionTelemetryPipeline: _ETPFeature()
}
"""
return dict((name, feature) for name, feature in __EXTENSION_ADVERTISED_FEATURES.items() if feature.is_supported) | 25,828 |
def test_wps_ext_proto_nack_m3_no_msg_type(dev, apdev):
"""WPS and NACK M3 no Message Type"""
eap_id, e_nonce, r_nonce, bssid = wps_nack_m3(dev, apdev)
logger.debug("Send NACK to STA")
msg, attrs = build_nack(eap_id, e_nonce, r_nonce, msg_type=None)
send_wsc_msg(dev[0], bssid, msg)
dev[0].request("WPS_CANCEL")
dev[0].wait_disconnected()
dev[0].flush_scan_cache() | 25,829 |
def GCMV(image, mask=None):
"""
:param image: input image, color (3 channels) or gray (1 channel);
:param mask: calc gamma value in the mask area, default is the whole image;
:return: gamma, and output
"""
# Step 1. Check the inputs: image
if np.ndim(image) == 3 and image.shape[-1] == 3: # color image
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
img = hsv[:, :, 2]
color_flag = True
elif np.ndim(image) == 2: # gray image
img = image
color_flag = False
else:
print("ERROR:check the input image of AGT function...")
return 1, None
if mask is not None:
mask = mask<255
else:
mask = np.ones_like(img)
# Step 2. Main steps of GCMV
n_img = img/255.0
mean = np.mean(n_img)
gamma_list = np.arange(0.01,1.01,0.01) if mean<=0.5 else np.arange(1.1,10.1,0.1)
score = np.zeros_like(gamma_list)
for k, gamma in enumerate(gamma_list):
t_img = np.power(n_img, gamma)
m1, v1 = np.mean(t_img, axis=0), np.var(t_img, axis=0)
m2, v2 = np.mean(t_img, axis=1), np.var(t_img, axis=1)
score[k] = np.mean(np.power(m1-0.5077,2)) + np.mean(np.power(m2-0.5077,2))+np.mean(np.power(v1-0.0268,2)) + np.mean(np.power(v2-0.0268,2))
# grid search for the optimal gamma
ind = np.argmin(score)
best_gamma =gamma_list[ind]
# print(best_gamma)
# Step 2.4 apply gamma transformation
n_img = (img+0.5)/256
output = np.power(n_img, best_gamma)
# Step 3.0 stretch back and post-process
# if mask is not None:
# output = (output * 256 - 0.5) * mask / 255.0
# else:
output = (output * 256 - 0.5)
output = output.round().astype(np.uint8)
if color_flag:
hsv[:, :, 2] = output
output = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
return best_gamma, output | 25,830 |
def downloadStaffFile(request: HttpRequest, filename: str) -> Union[HttpResponse, FileResponse]:
"""Serves the specified 'filename' validating the user is logged in and a staff user"""
return _downloadFileFromStorage(storages.StaffStorage(), filename) | 25,831 |
def get_view_renderer_type(*args):
"""
get_view_renderer_type(v) -> tcc_renderer_type_t
Get the type of renderer currently in use in the given view (
'ui_get_renderer_type' )
@param v (C++: TWidget *)
"""
return _ida_kernwin.get_view_renderer_type(*args) | 25,832 |
async def create(req):
"""
Add a new label to the labels database.
"""
data = req["data"]
async with AsyncSession(req.app["pg"]) as session:
label = Label(
name=data["name"], color=data["color"], description=data["description"]
)
session.add(label)
try:
await session.flush()
document = label.to_dict()
await session.commit()
except IntegrityError:
raise HTTPBadRequest(text="Label name already exists")
document = await apply_transforms(document, [SampleCountTransform(req.app["db"])])
headers = {"Location": f"/labels/{document['id']}"}
return json_response(document, status=201, headers=headers) | 25,833 |
def process_radial_velocity(procstatus, dscfg, radar_list=None):
"""
Estimates the radial velocity respect to the radar from the wind velocity
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : string. Dataset keyword
The input data type
latitude, longitude : float
arbitrary coordinates [deg] from where to compute the radial
velocity. If any of them is None it will be the radar position
altitude : float
arbitrary altitude [m MSL] from where to compute the radial
velocity. If None it will be the radar altitude
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
v_speed_field = None
h_speed_field = None
h_dir_field = None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype == 'wind_vel_v':
v_speed_field = get_fieldname_pyart(datatype)
if datatype == 'WIND_SPEED':
h_speed_field = get_fieldname_pyart(datatype)
if datatype == 'WIND_DIRECTION':
h_dir_field = get_fieldname_pyart(datatype)
if h_speed_field is None or h_dir_field is None:
warn('Horizontal wind speed and direction fields required'
' to estimate radial velocity')
return None, None
ind_rad = int(radarnr[5:8])-1
if radar_list[ind_rad] is None:
warn('No valid radar')
return None, None
radar = radar_list[ind_rad]
if h_speed_field not in radar.fields or h_dir_field not in radar.fields:
warn('Unable to estimate radial velocity. '
'Missing horizontal wind')
return None, None
h_speed = radar.fields[h_speed_field]['data']
h_dir = radar.fields[h_dir_field]['data']
if v_speed_field is None or v_speed_field not in radar.fields:
warn('Unknown vertical wind speed. Assumed 0')
if v_speed_field is None:
v_speed_field == 'vertical_wind_component'
v_speed = np.ma.zeros((radar.nrays, radar.ngates))
else:
v_speed = radar.fields[v_speed_field]['data']
# user defined parameters
lat = dscfg.get('latitude', None)
lon = dscfg.get('longitude', None)
alt = dscfg.get('altitude', None)
# get u and v wind components
h_dir_rad = np.deg2rad(h_dir)
speed_h_u = h_speed*np.sin(h_dir_rad) # eastward component
speed_h_v = h_speed*np.cos(h_dir_rad) # northward component
if lat is not None or lon is not None or alt is not None:
# get antenna coordinates respect to new radar location
if lat is None:
lat = radar.latitude['data'][0]
if lon is None:
lon = radar.longitude['data'][0]
if alt is None:
alt = radar.altitude['data'][0]
x, y = pyart.core.geographic_to_cartesian_aeqd(
radar.gate_longitude['data'], radar.gate_latitude['data'], lon,
lat)
z = radar.gate_altitude['data'] - alt
_, azimuths, elevations = pyart.core.cartesian_to_antenna(
x, y, z)
azi_2D_rad = np.deg2rad(azimuths)
ele_2D_rad = np.deg2rad(elevations)
else:
azi_2D_rad = np.broadcast_to(
np.deg2rad(radar.azimuth['data'])[:, np.newaxis],
(radar.nrays, radar.ngates))
ele_2D_rad = np.broadcast_to(
np.deg2rad(radar.elevation['data'])[:, np.newaxis],
(radar.nrays, radar.ngates))
r_speed = pyart.config.get_metadata('velocity')
# assuming no vertical velocity
# r_speed['data'] = h_speed*np.cos(h_dir_rad-azi_2D_rad)*np.cos(ele_2D_rad)
# with vertical velocity included
r_speed['data'] = (
(speed_h_u*np.sin(azi_2D_rad)+speed_h_v*np.cos(azi_2D_rad)) *
np.cos(ele_2D_rad)+np.sin(ele_2D_rad)*v_speed)
# prepare for exit
new_dataset = {'radar_out': deepcopy(radar)}
new_dataset['radar_out'].fields = dict()
new_dataset['radar_out'].add_field('velocity', r_speed)
return new_dataset, ind_rad | 25,834 |
def brute_force_diagonalize(answers, wordlist=WORDS, quiet=False):
"""
Find the most cromulent diagonalization for a set of answers, trying all
possible orders. See README.md for a cool example of this with 10 answers.
As a somewhat artificial example, let's suppose we have these seven
answers from the 2000 metas, but don't remember their order:
>>> metas = ['benjamins', 'billgates', 'donors', 'luxor', 'mansion', 'miserly', 'realty']
>>> brute_force_diagonalize(metas)[0] # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
Cromulence Text Info
9.5 RUN EAST
9.2 MIX LAST
9.1 MAX LAST
9.1 BE NOISY
8.8 LINE TO I
...
(9.5, 'RUN EAST', None)
Of course we were looking for the famous red herring "BE NOISY", but
"RUN EAST" sounds like a good way to find the coin also.
"""
results = []
seen = set()
answers = [parse_cell(word) for word in answers]
for i, permutation in enumerate(permutations(answers)):
if not quiet and i > 0 and i % 10000 == 0:
print("Tried %d permutations" % i)
try:
diag = diagonalize(permutation)
except IndexError:
continue
found = wordlist.search(diag, count=1, use_cromulence=True)
if found:
logprob, text = found[0]
slug = slugify(text)
if slug not in seen:
results.append((logprob, text, None))
seen.add(slug)
return wordlist.show_best_results(results) | 25,835 |
def train():
"""
MNIST training set creator.
It returns a reader creator, each sample in the reader is image pixels in
[-1, 1] and label in [0, 9].
:return: Training reader creator
:rtype: callable
"""
return reader_creator(
paddle.dataset.common.download(TRAIN_IMAGE_URL, 'mnist',
TRAIN_IMAGE_MD5),
paddle.dataset.common.download(TRAIN_LABEL_URL, 'mnist',
TRAIN_LABEL_MD5), 100) | 25,836 |
def f_setup():
"""Creates test file in `./data/` dir."""
open("../data/test.txt", "w") | 25,837 |
def sign_v2(key, msg):
"""
AWS version 2 signing by sha1 hashing and base64 encode.
"""
return base64.b64encode(hmac.new(key, msg.encode("utf-8"), hashlib.sha1).digest()) | 25,838 |
def graph_apply(fun, *args):
"""Currying wrapper around APP(-,-)."""
result = fun
for arg in args:
arg = as_graph(arg)
result = APP(result, arg)
return result | 25,839 |
def find_hcf(a, b) :
""" Finds the Highest Common Factor among two numbers """
#print('HCF : ', a, b)
if b == 0 :
return a
return find_hcf(b, a%b) | 25,840 |
def GAU_pdf(x: np.ndarray, mu: float, var: float) -> np.ndarray:
"""
Probability function of Guassian distribution
:param x: ndarray input parameters
:param mu: float mean of the distribution
:param var: float variance of the distribution
:return: ndarray probability of each sample
"""
k = (1 / (np.sqrt(2 * np.pi * var)))
up = -np.power(x - mu, 2) / (2 * var)
return k * np.exp(up) | 25,841 |
def version_compare(a, b): # real signature unknown; restored from __doc__
"""
version_compare(a: str, b: str) -> int
Compare the given versions; return a strictly negative value if 'a' is
smaller than 'b', 0 if they are equal, and a strictly positive value if
'a' is larger than 'b'.
"""
return 0 | 25,842 |
def line_status():
"""
设备线路详情
:return:
"""
device_id = request.args.get("device_id")
lines = Line.objects(device_id=device_id).all()
result = Monitor.device_status(device_id, lines)
result.pop(0)
return Success(result) | 25,843 |
def namedPositionals(func, args):
"""Given a function, and a sequence of positional arguments destined
for that function, identifies the name for each positional argument.
Variable positional arguments are given an automatic name.
:arg func: Function which will accept ``args`` as positionals.
:arg args: Tuple of positional arguments to be passed to ``func``.
"""
# Current implementation will
# result in naming collisions
# for something like this:
#
# def func(args0, *args):
# ...
# because of automatic vararg
# naming. But who would write
# a function like that anyway?
# Remove any decorators
# from the function
func = _unwrap(func)
# getargspec is the only way to
# get the names of positional
# arguments in Python 2.x.
if sys.version_info[0] < 3:
spec = inspect.getargspec(func)
argnames = spec.args
varargs = spec.varargs
# But getargspec is deprecated
# in python 3.x
else:
# getfullargspec is deprecated in
# python 3.5, but not in python 3.6.
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=DeprecationWarning)
spec = inspect.getfullargspec(func)
argnames = spec.args
varargs = spec.varargs
# we only care about the arguments
# that are being passed in
argnames = argnames[:len(args)]
# make up names for varargs
nvarargs = len(args) - len(argnames)
if varargs is not None and nvarargs > 0:
argnames += ['{}{}'.format(varargs, i) for i in range(nvarargs)]
return argnames | 25,844 |
def _hist_fig(df, pred, c):
"""
"""
bins = np.linspace(0, 1, 15)
unlabeled = pred[c][pd.isnull(df[c])].values
fig, (ax1, ax2) = plt.subplots(2,1)
# top plot: training data
pos_labeled = pred[c][(df[c] == 1)&(df["validation"] == False)].values
neg_labeled = pred[c][(df[c] == 0)&(df["validation"] == False)].values
train_auc = _auc(pos_labeled, neg_labeled)
if len(pos_labeled) > 0:
ax1.hist(pos_labeled, bins=bins, alpha=0.5,
label="labeled positive (train)", density=True)
if len(neg_labeled) > 0:
ax1.hist(neg_labeled, bins=bins, alpha=0.5,
label="labeled negative (train)", density=True)
if len(unlabeled) > 0:
ax1.hist(unlabeled, bins=bins, alpha=1., label="unlabeled",
density=True, histtype="step", lw=2)
# bottom plot: validation data
pos_labeled = pred[c][(df[c] == 1)&(df["validation"] == True)].values
neg_labeled = pred[c][(df[c] == 0)&(df["validation"] == True)].values
test_auc = _auc(pos_labeled, neg_labeled)
if len(pos_labeled) > 0:
ax2.hist(pos_labeled, bins=bins, alpha=0.5,
label="labeled positive (val)", density=True)
if len(neg_labeled) > 0:
ax2.hist(neg_labeled, bins=bins, alpha=0.5,
label="labeled negative (val)", density=True)
if len(unlabeled) > 0:
ax2.hist(unlabeled, bins=bins, alpha=1., label="unlabeled",
density=True, histtype="step", lw=2)
for a in [ax1, ax2]:
a.legend(loc="upper left")
a.set_xlabel("assessed probability", fontsize=14)
a.set_ylabel("frequency", fontsize=14)
title = "model outputs for '%s'\nAUC train %s, test AUC %s"%(c, train_auc, test_auc)
ax1.set_title(title, fontsize=14)
plt.close(fig)
return fig | 25,845 |
def is_responsive(url, code=200):
"""Check if something responds to ``url`` syncronously"""
try:
response = requests.get(url)
if response.status_code == code:
return True
except requests.exceptions.RequestException as _e:
pass
return False | 25,846 |
def run_ranking_module():
"""
Ranks the severity of the outliers based on three metrics.
Metric1 - Threshold based.
Metric2 - Uses the Pagerank Algorithm.
Metric3 - By building a feature dependency graph.
final_score of outlier = node_score of outlier * prob_score of outlier.
Input:
The directory path to perform the ranking analysis on.
Output:
A ranking hierarchy of all the bugs found.
"""
printINFO("\nEnter the Network Directory path for which you want to run Severity Module")
network_directory = input("=> ").strip()
node_rank_file = network_directory+"/node_ranks.json"
curr_directory = os.path.dirname(os.path.abspath(__file__))
outlier_directory_path = curr_directory+"/"+network_directory.split("/")[-1]
ranking_file_path = str(Path(curr_directory).parent)+'/ranking-severity/ranking.py'
print(ranking_file_path, node_rank_file, outlier_directory_path)
os.system('python3 '+ranking_file_path+' '+node_rank_file+' '+outlier_directory_path) | 25,847 |
def train(dataset_dir):
"""
Use provided dataset to train a classifier and calculate it's performance. Creates pickle files to
store the intermediate results along with the trained classifier.
Parameters
----------
dataset_dir : string
The relative path of the dataset to load. Must be an existing directory and it must end with `/`.
"""
df = load_dataset(dataset_dir)
train_df, test_df = train_test_split(df, test_size=0.2, shuffle=False)
x_train, y_test = reduce_dimension(train_df, test_df)
clf = train_classifier(x_train, train_df['Label'])
benchmark_classifier(clf, y_test, test_df['Label'], ['accuracy', 'precision_macro', 'recall_macro', 'f1_macro']) | 25,848 |
def fit_and_report(model, X, y, X_valid, y_valid):
"""
It fits a model and returns train and validation scores.
Parameters:
model (sklearn classifier model): The sklearn model
X (numpy.ndarray): The X part of the train set
y (numpy.ndarray): The y part of the train set
X_valid (numpy.ndarray): The X part of the validation set
y_valid (numpy.ndarray): The y part of the validation set
Returns:
scores (list): The list of scores of train and validation
"""
model.fit(X, y)
lr_probs = model.predict_proba(X)
lr_probs = lr_probs[:, 1]
lr_probs_val = model.predict_proba(X_valid)
lr_probs_val = lr_probs_val[:, 1]
# calculate scores
lr_auc = roc_auc_score(y, lr_probs)
lr_auc_val = roc_auc_score(y_valid, lr_probs_val)
scores = [lr_auc, lr_auc_val]
return scores | 25,849 |
def merge(vis: plt.bar,
array: np.ndarray,
array_size: int,
pause_short: float,
i: int = 0,
key: int = -1) \
-> None:
"""Recursively splits input in halves. Sorts each element at each level bottom up.
Complexity: Time - O(nlog(n)), Space - O(n), Stable
"""
if key == -1:
key = array_size - 1
if i < key:
j: int = (i + key) // 2
merge(vis, array, array_size, pause_short, i, j)
merge(vis, array, array_size, pause_short, j + 1, key)
_merge(vis, array, array_size, pause_short, i, j, key)
if i == 0 and key == array_size - 1:
plt.draw() | 25,850 |
def move_all_generation_to_high_voltage(data):
"""Move all generation sources to the high voltage market.
Uses the relative shares in the low voltage market, **ignoring transmission losses**. In theory, using the production volumes would be more correct, but these numbers are no longer updated since ecoinvent 3.2.
Empties out the medium and low voltage mixes."""
MIXES = {low_voltage_mix, medium_voltage_mix, high_voltage_mix}
mix_filter = lambda ds: ds['name'] in MIXES
for group in toolz.groupby("location", filter(mix_filter, data)).values():
assert len(group) == 3
high, low, medium = sorted(group, key=lambda x: x['name'])
medium_in_low = [ex for ex in low['exchanges']
if ex['name'] == medium_voltage_transformation][0]['amount']
high_in_low = [ex for ex in medium['exchanges']
if ex['name'] == high_voltage_transformation][0]['amount'] * \
medium_in_low
for exc in high['exchanges']:
if (exc['name'] in high_voltage_providers or (
"electricity" in exc['name'] and
"import from" in exc['name'])):
rescale_exchange(exc, high_in_low)
high['exchanges'].extend([rescale_exchange(exc, medium_in_low)
for exc in medium['exchanges']
if exc['name'] in medium_voltage_providers])
high['exchanges'].extend([exc
for exc in low['exchanges']
if exc['name'] in low_voltage_providers])
data = empty_medium_voltage_markets(data)
data = empty_low_voltage_markets(data)
return data | 25,851 |
def basic_meas(
user_config_dict,
simulation_config_dict,
btk_input,
):
"""Checks if detection output from the default meas generator matches
the pre-computed value .
The outputs from basic meas generator were visually checked and verified.
This function makes sure that and changes made to the btk pipeline will not
affect the detection results.
Args:
user_config_dict: Dictionary with information to run user defined
functions (filenames, file location of user algorithms).
simulation_config_dict (dict): Dictionary which sets the parameter
btk_input : Module that runs btk for an input config file.
"""
np.random.seed(int(simulation_config_dict["seed"]))
test_detect_centers = [
[[66, 69]],
[[48, 67]],
[[56, 54]],
[[53, 57]],
]
shifts = [
[[-2.4, -0.8, 0.9, 1.4], [-2.3, -0.4, 2.3, 1.9]],
[[-2.3, 2.0, 0.0, 0.4, 0.7], [1.6, 0.1, 0.7, 0.9, 2.3]],
[[0.6, -0.6, 1.7, 0.4, 2.3, 0.2], [-1.7, -1.1, -1.6, 0.7, 1.0, -1.5]],
[[-1.3, -1.0, 1.2, -2.3], [-0.2, -0.9, -1.8, 1.4]],
]
indexes = [
[
3,
1,
9,
6,
],
[6, 10, 3, 7, 4],
[10, 0, 7, 1, 9, 4],
[1, 3, 2, 8],
]
draw_blend_generator = btk_input.make_draw_generator(
user_config_dict, simulation_config_dict, shifts=shifts, indexes=indexes
)
measure_generator = btk_input.make_measure_generator(
user_config_dict, draw_blend_generator
)
output, deb, _ = next(measure_generator)
for i in range(len(output["blend_list"])):
detected_centers = deb[i]["peaks"]
np.testing.assert_array_almost_equal(
detected_centers,
test_detect_centers[i],
decimal=3,
err_msg="Did not get desired detected_centers",
)
pass | 25,852 |
def parser_first_text_or_content_if_could(html: etree._Element,
query_path: str) -> Union[str, None]:
"""
如果解析出的内容是一个数组,默认取的第一个
"""
nodes = html.xpath(query_path)
if not nodes:
return None
if len(nodes) > 0:
desc = nodes[0]
if hasattr(desc, 'text'):
return str(desc.text)
return str(desc)
return None | 25,853 |
def get_file_paths_from(dir: Text) -> List[Text]:
"""
list all file paths inside a directory.
:param dir: a directory path that need to list.
:return: a string list of file paths.
"""
if not os.path.exists(dir):
logging.info('{} does not exist.'.format(dir))
return None
file_paths = ["{}/{}".format(dir, x) for x in os.listdir(dir)]
return file_paths | 25,854 |
def get_all_quantity(results, q_func=None):
"""
"""
quantities = []
for res_name in results:
if q_func is not None:
# We change the quantity function
results[res_name].q_func = q_func
min_quantity = results[res_name].min_quantity
quantities.append(min_quantity)
return quantities | 25,855 |
def test_driver_get_latest_version(index_driver, database_conn):
"""
Tests retrieval of the latest record version
"""
baseid = str(uuid.uuid4())
for _ in range(10):
did = str(uuid.uuid4())
rev = str(uuid.uuid4())[:8]
size = 512
form = 'object'
baseid = str(uuid.uuid4())
created_date = datetime.now()
updated_date = datetime.now()
database_conn.execute(make_sql_statement("""
INSERT INTO index_record(did, baseid, rev, form, size, created_date, updated_date) VALUES (?,?,?,?,?,?,?)
""", (did, baseid, rev, form, size, created_date, updated_date)))
record = index_driver.get_latest_version(did)
assert record['did'] == did, 'record id does not match'
assert record['rev'] == rev, 'record revision does not match'
assert record['size'] == size, 'record size does not match'
assert record['form'] == form, 'record form does not match'
assert record['created_date'] == created_date.isoformat(), 'created date does not match'
assert record['updated_date'] == updated_date.isoformat(), 'updated date does not match' | 25,856 |
def generate_example_type_2a(problem, one_step_inferences):
"""Generates a type 2a training example.
Args:
problem: a lib.InferenceProblem instance.
one_step_inferences: the list of one step inferences that can be reahced
form the premises.
Returns:
An instance of "Example", or None if any issue was found.
"""
premises = problem.premises
example_type = "2a"
name_rule = random.choice([True, False])
inputs = ("What can be inferred from the following premises in a single "
"inference step (ignoring inferences that add new predicates or "
"constants)? ")
if name_rule:
inputs += "Name the inference rule being used: "
inputs += (". ".join([rules.render_logic_clause(x) for x in premises])) + "."
inferences_str = []
for [rule_inference, rule] in one_step_inferences:
rule_name = rule.rule_name
inference_str = rules.render_logic_clause(rule_inference)
if name_rule:
inference_str += f" can be inferred via the {rule_name} rule"
inferences_str.append(inference_str)
targets = (". ".join(inferences_str)) + "."
if not inferences_str:
example_type = "2a-empty"
targets = "Nothing can be inferred from these premises."
elif problem.contains_contradiction:
example_type = "2a-cont"
targets = ("Since the premises are contradictory, we can infer anything "
"from them.")
return lib.Example(inputs, targets, example_type, problem) | 25,857 |
def name_has_type_hint(name: str, frame: types.FrameType) -> str:
"""Identifies if a variable name has a type hint associated with it.
This can be useful if a user write something like::
name : something
use(name)
instead of::
name = something
use(name)
and sees a NameError.
HOWEVER, when an exception is raised, it seems that the only type hints
that are picked up correctly are those found in the global scope.
"""
type_hint_found_in_scope = _(
"A type hint found for `{name}` in the {scope} scope.\n"
"Perhaps you had used a colon instead of an equal sign and wrote\n\n"
" {name} : {hint}\n\n"
"instead of\n\n"
" {name} = {hint}\n"
)
nonlocals = get_variables_in_frame_by_scope(frame, "nonlocal")
scopes = (
("local", frame.f_locals),
("global", frame.f_globals),
("nonlocal", nonlocals),
)
for scope, scope_dict in scopes:
if "__annotations__" in scope_dict and name in scope_dict["__annotations__"]:
hint = scope_dict["__annotations__"][name]
# For Python 3.10+, all type hints are strings
if (
isinstance(hint, str)
and sys.version_info.major == 3
and sys.version_info.minor < 10
):
hint = repr(hint)
return type_hint_found_in_scope.format(name=name, scope=scope, hint=hint)
return "" | 25,858 |
def user_input(address, interface=None, name=None, filename='config.yaml'):
"""
Gather user input for adding an instrument to the YAML configuration file
Parameters
----------
address : dict
The interface as dict key (i.e. 'pyvisa') and the address as the value
name : str
Instrument name (as the top node) used in the YAML
Returns
-------
dict
The configuration dictionary that will be used to append the YAML
"""
# read current YAML
yaml_config = open(os.path.join(home, filename), 'r+')
current_configs = yaml.safe_load(yaml_config)
ok = False
if name is None:
while not ok:
name = input('Enter your desired name for the instrument:')
if len(name) == 0 or not isinstance(name, str):
print('Bad input, try again')
else:
ok = True
config = {name: {}}
if interface is None:
interface = 'pyvisa'
config[name] = {'address': {interface: address}}
# determine the class to assign
instrument_classes = find_instrument_classes()
print('What class to assign to this instrument?')
for num, ic in enumerate(instrument_classes):
print('({}) {}'.format(num, ic))
class_num = int(input(' Enter the number associated with the class: '))
if not isinstance(class_num, int) or (class_num > len(instrument_classes)):
print('Bad selection of class')
return {}
config[name]['python_class'] = instrument_classes[class_num]
# get location of CSV files
print('The instrument command CSV files are within:\n {}/'.format(current_configs['csv_directory']))
print('Enter where (within the directory above) this instruments CSV files are')
csv_loc = input(' An example is keysight/oscilloscope/MSOX3000 : ')
print(current_configs['csv_directory'])
csv_dir = os.path.join(current_configs['csv_directory'], csv_loc)
if not os.path.isdir(csv_dir):
print('Directory {} does not exist. Exiting'.format(csv_dir))
return {}
config[name]['csv_folder'] = csv_loc
return config | 25,859 |
def matchings(A, B):
"""
Iterate through all matchings of the sets `A` and `B`.
EXAMPLES::
sage: from sage.combinat.ncsym.ncsym import matchings
sage: list(matchings([1, 2, 3], [-1, -2]))
[[[1], [2], [3], [-1], [-2]],
[[1], [2], [3, -1], [-2]],
[[1], [2], [3, -2], [-1]],
[[1], [2, -1], [3], [-2]],
[[1], [2, -1], [3, -2]],
[[1], [2, -2], [3], [-1]],
[[1], [2, -2], [3, -1]],
[[1, -1], [2], [3], [-2]],
[[1, -1], [2], [3, -2]],
[[1, -1], [2, -2], [3]],
[[1, -2], [2], [3], [-1]],
[[1, -2], [2], [3, -1]],
[[1, -2], [2, -1], [3]]]
"""
lst_A = list(A)
lst_B = list(B)
# Handle corner cases
if len(lst_A) == 0:
if len(lst_B) == 0:
yield []
else:
yield [[b] for b in lst_B]
return
if len(lst_B) == 0:
yield [[a] for a in lst_A]
return
rem_A = lst_A[:]
a = rem_A.pop(0)
for m in matchings(rem_A, lst_B):
yield [[a]] + m
for i in range(len(lst_B)):
rem_B = lst_B[:]
b = rem_B.pop(i)
for m in matchings(rem_A, rem_B):
yield [[a, b]] + m | 25,860 |
def query_title_bar_text(shared_state):
"""return text for title bar, updated when screen changes."""
coll_name = shared_state["active_collection"].name
str_value = f"QUERY SOURCE: {coll_name}"
return str_value | 25,861 |
def input_handler2():
"""Run the wx event loop by processing pending events only.
This is like inputhook_wx1, but it keeps processing pending events
until stdin is ready. After processing all pending events, a call to
time.sleep is inserted. This is needed, otherwise, CPU usage is at 100%.
This sleep time should be tuned though for best performance.
"""
app = wx.GetApp()
global POLLTIME, ON_INTERRUPT
if app is not None:
if not wx.Thread_IsMain():
raise Exception('wx thread is not the main thread')
evtloop = wx.EventLoop()
activator = wx.EventLoopActivator(evtloop)
while not stdin_ready():
while evtloop.Pending():
evtloop.Dispatch()
app.ProcessIdle()
try:
sleep(POLLTIME)
except KeyboardInterrupt:
if hasattr(ON_INTERRUPT, '__call__'):
ON_INTERRUPT()
activator = None
# del activator
return 0 | 25,862 |
def create_upload_record(env, source_id, headers, cookies):
"""Creates an upload resource via the G.h Source API."""
post_api_url = f"{get_source_api_url(env)}/sources/{source_id}/uploads"
print(f"Creating upload via {post_api_url}")
res = requests.post(post_api_url,
json={"status": "IN_PROGRESS", "summary": {}},
cookies=cookies,
headers=headers)
if res and res.status_code == 201:
res_json = res.json()
return res_json["_id"]
e = RuntimeError(
f"Error creating upload record, status={res.status_code}, response={res.text}")
complete_with_error(e) | 25,863 |
def get_versions(sys):
"""Import stuff and get versions if module
Parameters
----------
sys : module
The sys module object.
Returns
-------
module_versions : dict
The module names and corresponding versions.
"""
module_versions = {}
for name, module in sys.modules.items():
if '.' in name:
continue
if isinstance(name, str) and len(name) and name[0] == '_':
continue
module_version = LooseVersion(getattr(module, '__version__', None))
module_version = getattr(module_version, 'vstring', None)
if module_version is None:
module_version = None
elif 'git' in module_version or '.dev' in module_version:
git_path = op.dirname(op.realpath(module.__file__))
head = _get_git_head(git_path)
module_version += '-HEAD:{}'.format(head)
module_versions[name] = module_version
return module_versions | 25,864 |
def day05_part1(file: str) -> int:
""" Solves advent of code: day05 part1 """
with open(file) as fid:
seats = [Seat(line.strip()) for line in fid]
highest_seat_num = max(seat.number for seat in seats)
return highest_seat_num | 25,865 |
def GetInput():
"""Get player inputs and lower-case the input"""
Input = str(input("{:>20s}".format("")))
print("\n \n \n \n \n")
return Input.lower() | 25,866 |
def validate_forward():
"""
Validate ports are forwarded
"""
here = os.path.dirname(os.path.abspath(__file__))
manifest = os.path.join(here, "templates", "nginx-pod.yaml")
kubectl("apply -f {}".format(manifest))
wait_for_pod_state("", "default", "running", label="app=nginx")
os.system("killall kubectl")
os.system("/snap/bin/microk8s.kubectl port-forward pod/nginx 5123:80 &")
attempt = 10
while attempt >= 0:
try:
resp = requests.get("http://localhost:5123")
if resp.status_code == 200:
break
except requests.RequestException:
pass
attempt -= 1
time.sleep(2)
assert resp.status_code == 200 | 25,867 |
def ldns_fskipcs_l(*args):
"""LDNS buffer."""
return _ldns.ldns_fskipcs_l(*args) | 25,868 |
def specific_parser(parser, log=False, run_folder=None, mode=None, tot_epochs=None, restoring_rep_path=None,
start_from_epoch=None, pretrained_GAN=None, GAN_epoch=None, data_dir_train=None, data_dir_train2=None,
data_dir_test=None, data_dir_test2=None, images_log_freq=None, batch_size=None, batch_size_SN=None,
acc_log_freq=None, loss_log_freq=None, experiment_name=None, run_description=None, prc_train=None,
prc_test=None, prc_val=None, sar_c=None, optical_c=None, N_classes=None, patch_size=None, SN_log_freq=None,
save_model_freq=None, lambda_identity=None, D_training_ratio=None, lambda_A=None, loss_type=None,
lambda_gp=None, res_block_N=None, pool_prc_O=None, pool_prc_S=None, buff_dim=None, th_low=None, th_high=None,
pool=None, conditioned=None, dropping=None, th_b_h_ratio=None, th_b_l_ratio=None, th_b_h_pool=None,
th_b_l_pool=None, drop_prc=None, seed=None):
"""
This is an intermediate layer between the general parser and the config routine to allow who use this code to easily
access parameters and change them when building his experiment
:param parser:
:param log: decide if print or not
:param run_folder: new value for run folder
:param mode: train mode
:param tot_epochs:
:param restoring_rep_path:
:param start_from_epoch:
:param pretrained_GAN:
:param GAN_epoch:
:param data_dir_train:
:param data_dir_train2:
:param data_dir_test:
:param data_dir_test2:
:param images_log_freq:
:param batch_size:
:param batch_size_SN:
:param acc_log_freq:
:param loss_log_freq:
:param experiment_name:
:param run_description:
:param prc_train:
:param prc_test:
:param prc_val:
:param sar_c:
:param optical_c:
:param N_classes:
:param patch_size:
:param SN_log_freq:
:param save_model_freq:
:param lambda_identity:
:param D_training_ratio:
:param lambda_A:
:param loss_type:
:param lambda_gp:
:param res_block_N:
:param pool_prc_O:
:param pool_prc_S:
:param buff_dim:
:param th_low:
:param th_high:
:param pool:
:param conditioned:
:param dropping:
:param th_b_h_ratio:
:param th_b_l_ratio:
:param th_b_h_pool:
:param th_b_l_pool:
:param drop_prc:
:return: args
"""
args = parser.parse_args()
print('SPECIFIC CONFIG')
args.log_dir = update_arg(args.log_dir, run_folder, 'log_dir', log)
args.tot_epochs = update_arg(args.tot_epochs, tot_epochs, 'tot_epochs', log)
args.mode = update_arg(args.mode, mode, 'mode', log)
args.restoring_rep_path = update_arg(args.restoring_rep_path, restoring_rep_path, 'restoring_rep_path', log)
args.start_from_epoch = update_arg(args.start_from_epoch, start_from_epoch, 'start_from_epoch', log)
args.pretrained_GAN = update_arg(args.pretrained_GAN, pretrained_GAN, 'pretrained_GAN', log)
args.GAN_epoch = update_arg(args.GAN_epoch, GAN_epoch, 'GAN_epoch', log)
args.data_dir_train = update_arg(args.data_dir_train, data_dir_train, 'data_dir_train', log)
args.data_dir_train2 = update_arg(args.data_dir_train2, data_dir_train2, 'data_dir_train2', log)
args.data_dir_test = update_arg(args.data_dir_test, data_dir_test, 'data_dir_test', log)
args.data_dir_test2 = update_arg(args.data_dir_test2, data_dir_test2, 'data_dir_test2', log)
args.images_log_freq = update_arg(args.images_log_freq, images_log_freq, 'images_log_freq', log)
args.batch_size = update_arg(args.batch_size, batch_size, 'batch_size', log)
args.batch_size_SN = update_arg(args.batch_size_SN, batch_size_SN, 'batch_size_SN', log)
args.acc_log_freq = update_arg(args.acc_log_freq, acc_log_freq, 'acc_log_freq', log)
args.loss_log_freq = update_arg(args.loss_log_freq, loss_log_freq, 'loss_log_freq', log)
args.experiment_name = update_arg(args.experiment_name, experiment_name, 'experiment_name', log)
args.run_description = update_arg(args.run_description, run_description, 'run_description', log)
args.prc_train = update_arg(args.prc_train, prc_train, 'prc_train', log)
args.prc_test = update_arg(args.prc_test, prc_test, 'prc_test', log)
args.prc_val = update_arg(args.prc_val, prc_val, 'prc_val', log)
args.sar_c = update_arg(args.sar_c, sar_c, 'sar_c', log)
args.optical_c = update_arg(args.optical_c, optical_c, 'optical_c', log)
args.N_classes = update_arg(args.N_classes, N_classes, 'N_classes', log)
args.patch_size = update_arg(args.patch_size, patch_size, 'patch_size', log)
args.SN_log_freq = update_arg(args.SN_log_freq, SN_log_freq, 'SN_log_freq', log)
args.save_model_freq = update_arg(args.save_model_freq, save_model_freq, 'save_model_freq', log)
args.lambda_identity = update_arg(args.lambda_identity, lambda_identity, 'lambda_identity', log)
args.D_training_ratio = update_arg(args.D_training_ratio, D_training_ratio, 'D_training_ratio', log)
args.lambda_A = update_arg(args.lambda_A, lambda_A, 'lambda_A', log)
args.loss_type = update_arg(args.loss_type, loss_type, 'loss_type', log)
args.lambda_gp = update_arg(args.lambda_gp, lambda_gp, 'lambda_gp', log)
args.res_block_N = update_arg(args.res_block_N, res_block_N, 'res_block_N', log)
args.pool_prc_O = update_arg(args.pool_prc_O, pool_prc_O, 'pool_prc_O', log)
args.pool_prc_S = update_arg(args.pool_prc_S, pool_prc_S, 'pool_prc_S', log)
args.buff_dim = update_arg(args.buff_dim, buff_dim, 'buff_dim', log)
args.th_low = update_arg(args.th_low, th_low, 'th_low', log)
args.th_high = update_arg(args.th_high, th_high, 'th_high', log)
args.pool = update_arg(args.pool, pool, 'pool', log)
args.conditioned = update_arg(args.conditioned, conditioned, 'conditioned', log)
args.dropping = update_arg(args.dropping, dropping, 'dropping', log)
args.th_b_h_ratio = update_arg(args.th_b_h_ratio, th_b_h_ratio, 'th_b_h_ratio', log)
args.th_b_l_ratio = update_arg(args.th_b_l_ratio, th_b_l_ratio, 'th_b_l_ratio', log)
args.th_b_h_pool = update_arg(args.th_b_h_pool, th_b_h_pool, 'th_b_h_pool', log)
args.th_b_l_pool = update_arg(args.th_b_l_pool, th_b_l_pool, 'th_b_l_pool', log)
args.drop_prc = update_arg(args.drop_prc, drop_prc, 'drop_prc', log)
args.seed = update_arg(args.seed, seed, 'seed', log)
return args | 25,869 |
def test_to_dataframe_dir_and_list_raises(
spect_dir_mat, spect_list_mat, annot_list_yarden
):
"""test that calling ``to_dataframe`` with both dir and list raises a ValueError"""
with pytest.raises(ValueError):
vak.io.spect.to_dataframe(
spect_format="mat",
spect_dir=spect_dir_mat,
spect_files=spect_list_mat,
annot_list=annot_list_yarden,
annot_format="yarden",
) | 25,870 |
def to_identifier(text):
"""Converts text to a valid Python identifier by replacing all
whitespace and punctuation and adding a prefix if starting with a digit"""
if text[:1].isdigit():
text = '_' + text
return re.sub('_+', '_', str(text).translate(TRANS_TABLE)) | 25,871 |
def scan(device, pullup, frequencies):
"""
Scan for connected I2C devices
Standard mode: 100kHz
Fast mode: 400kHz
Fast mode plus: 1MHz
high speed mode: 3.2MHz
"""
addr_info = {}
for clkFreq in frequencies:
i2c_bus = I2CBus(device, clock_frequency=clkFreq, enable_pullups=pullup)
rw_responses = i2c_bus.scan()
for address in rw_responses:
if address in addr_info:
addr_info[address].append(clkFreq)
else:
addr_info[address] = [clkFreq]
i2c_bus = I2CBus(device, clock_frequency=400000, enable_pullups=pullup)
# list output
print("I2C Bus Scan Report: (Address, Clock Frequency)")
if len(addr_info) == 0:
print("No devices found!")
return
print("Discovered %s I2C devices" % len(addr_info))
print()
for address in addr_info:
for clkHz in addr_info[address]:
print("%s @ %dkHz" % (hex(address), clkHz / 1000))
print()
print() | 25,872 |
def estimate_next_pos(measurement, OTHER = None):
"""Estimate the next (x, y) position of the wandering Traxbot
based on noisy (x, y) measurements."""
if OTHER is None:
# Setup Kalman Filter
[u, P, H, R] = setup_kalman_filter()
# OTHER = {'x': x, 'P': P, 'u': u, 'matrices':[H, R]}
x = matrix([[measurement[0]], [measurement[1]], [0], [0], [0]])
OTHER = {'z_list': deque([]), 'x': x,
'P': P, 'u': u, 'matrices': [H, R], 'step': 1
# 'zx': [measurement[0]]
}
OTHER['z_list'].append(np.array(measurement))
# return measurement, OTHER
# elif OTHER['step'] == 1:
# # Use first three measurements to seed the filter
# OTHER['step'] = 2
# OTHER['z_list'].append(np.array(measurement))
# # OTHER['zx'].append(measurement[0])
# # OTHER['x_list'].append(measurement)
# return measurement, OTHER
# elif OTHER['step'] == 2:
# OTHER['step'] = 3
# # Get last 3 measurements
# OTHER['z_list'].append(np.array(measurement))
# # OTHER['zx'].append(measurement[0])
# # Get initial estimate of state from the three measurements
# OTHER['x'] = state_from_measurements(OTHER['z_list'])
#
# # Initialization complete
# OTHER['step'] = -1
#
# # Use last 20 measurements only
# num_z = 1000
# # OTHER['x_list'] = deque(maxlen=num_z)
# # OTHER['z_list'] = deque(maxlen=num_z+1)
#
# # Predict next position of robot using the dynamics and current state
# next_state = robot_x_fn(OTHER['x'])
# # OTHER['x_list'].append(next_state)
# return (next_state.value[0][0], next_state.value[1][0]), OTHER
OTHER['z_list'].append(np.array(measurement))
x, P = extended_kalman_filter(measurement, OTHER['x'], OTHER['u'],
OTHER['P'], robot_F_fn, robot_x_fn, *OTHER['matrices'])
# OTHER['x_list'].append(x)
OTHER['x'] = x
OTHER['P'] = P
# print('Trace of P : '+str(P.trace()))
# Predict next position of robot
next_state = robot_x_fn(x)
est_xy = (next_state.value[0][0], next_state.value[1][0])
# You must return xy_estimate (x, y), and OTHER (even if it is None)
# in this order for grading purposes.
# xy_estimate = (3.2, 9.1)
# return z, OTHER
return est_xy, OTHER | 25,873 |
def AutoBusList(*args):
"""List of Buses or (File=xxxx) syntax for the AutoAdd solution mode."""
# Getter
if len(args) == 0:
return get_string(lib.Settings_Get_AutoBusList())
# Setter
Value, = args
if type(Value) is not bytes:
Value = Value.encode(codec)
lib.Settings_Set_AutoBusList(Value) | 25,874 |
def validate_inputs(*, input_data: pd.DataFrame) -> Tuple[pd.DataFrame, Optional[dict]]:
"""Check model inputs for unprocessable values."""
# convert syntax error field names (beginning with numbers)
# input_data.rename(columns=config.model_config.variables_to_rename, inplace=True)
input_data["TotalCharges"] = pd.to_numeric(
input_data["TotalCharges"], errors="coerce"
)
relevant_data = input_data[config.model_config.features].copy()
validated_data = drop_na_inputs(input_data=relevant_data)
errors = None
try:
# replace numpy nans so that pydantic can validate
MultipleChurnDataInputs(
inputs=validated_data.replace({np.nan: None}).to_dict(orient="records")
)
except ValidationError as error:
errors = error.json()
return validated_data, errors | 25,875 |
def params(chrom1, simtype, outdir, alpha, beta,
p_a, p_b, gamma_a, gamma_b, gamma_inter,
seed, chrom2, centerdis, localinds, radius, diffd):
"""
Sample simulation parameters.
"""
simulateyeast.cmd_sample_params(chrom1, simtype, outdir,
alpha=alpha, beta=beta,
p_a=p_a, p_b=p_b,
gamma_a=gamma_a, gamma_b=gamma_b, gamma_inter=gamma_inter,
seed=seed, chrom2=chrom2, centerdis=centerdis,
localinds=localinds, radius=radius, diffd=diffd) | 25,876 |
def create_env(n_envs, eval_env=False, no_log=False):
"""
Create the environment and wrap it if necessary
:param n_envs: (int)
:param eval_env: (bool) Whether is it an environment used for evaluation or not
:param no_log: (bool) Do not log training when doing hyperparameter optim
(issue with writing the same file)
:return: (Union[gym.Env, VecEnv])
"""
global hyperparams
global env_kwargs
# Do not log eval env (issue with writing the same file)
log_dir = None if eval_env or no_log else save_path
if n_envs == 1:
env = DummyVecEnv([make_env(env_id, 0, args.seed,
wrapper_class=env_wrapper, log_dir=log_dir,
env_kwargs=env_kwargs)])
else:
# env = SubprocVecEnv([make_env(env_id, i, args.seed) for i in range(n_envs)])
# On most env, SubprocVecEnv does not help and is quite memory hungry
env = DummyVecEnv([make_env(env_id, i, args.seed, log_dir=log_dir, env_kwargs=env_kwargs,
wrapper_class=env_wrapper) for i in range(n_envs)])
if normalize:
# Copy to avoid changing default values by reference
local_normalize_kwargs = normalize_kwargs.copy()
# Do not normalize reward for env used for evaluation
if eval_env:
if len(local_normalize_kwargs) > 0:
local_normalize_kwargs['norm_reward'] = False
else:
local_normalize_kwargs = {'norm_reward': False}
if args.verbose > 0:
if len(local_normalize_kwargs) > 0:
print(f"Normalization activated: {local_normalize_kwargs}")
else:
print("Normalizing input and reward")
env = VecNormalize(env, **local_normalize_kwargs)
# Optional Frame-stacking
if hyperparams.get('frame_stack', False):
n_stack = hyperparams['frame_stack']
env = VecFrameStack(env, n_stack)
print(f"Stacking {n_stack} frames")
if is_image_space(env.observation_space):
if args.verbose > 0:
print("Wrapping into a VecTransposeImage")
env = VecTransposeImage(env)
return env | 25,877 |
def ellipse(pts, pc=None, ab=None):
""" Distance function for the ellipse
centered at pc = [xc, yc], with a, b = [a, b]
"""
if pc is None:
pc = [0, 0]
if ab is None:
ab = [1., 2.]
return dist((pts - pc)/ab) - 1.0 | 25,878 |
def as_root(ctx):
"""Instruct fabric to use the root user"""
for conn in settings['hosts']:
conn.user = 'root'
# Reconnect as the root user
conn.close()
conn.open() | 25,879 |
def test_ReadFHD_select():
"""
test select on read with FHD files.
Read in FHD files with generic read & select on read, compare to read fhd
files then do select
"""
fhd_uv = UVData()
fhd_uv2 = UVData()
uvtest.checkWarnings(fhd_uv2.read, [testfiles], {'freq_chans': np.arange(2)},
message=['Warning: select on read keyword set',
'Telescope location derived from obs'],
nwarnings=2)
uvtest.checkWarnings(fhd_uv.read, [testfiles], known_warning='fhd')
fhd_uv.select(freq_chans=np.arange(2))
nt.assert_equal(fhd_uv, fhd_uv2) | 25,880 |
def _transform(
parsed_date_data: ParsedDate,
parsed_output_format_data: ParsedTargetFormat,
output_format: str,
output_timezone: str,
) -> str:
"""
This function transform parsed result into target format
Parameters
----------
parsed_date_data
generated year, month, day, hour, minute, second
parsed_output_format_data
generated year token, month token, day token, hour token,
minute token, second token of target format
output_format
target format string
output_timezone
target timezone string
"""
result = deepcopy(output_format)
if output_timezone != "":
parsed_date_data = _change_timezone(parsed_date_data, output_timezone)
# Handle year
result = _transform_year(
result, parsed_output_format_data.ymd_token["year_token"], parsed_date_data.ymd["year"]
)
# Handle day
result = _transform_day(
result, parsed_output_format_data.ymd_token["day_token"], parsed_date_data.ymd["day"]
)
# Handle hours
result = _transform_hms(
result,
str(parsed_output_format_data.hms_token["hour_token"]),
bool(parsed_output_format_data.hms_token["ispm"]),
parsed_date_data.hms["hour"],
)
# Handle minutes
result = _transform_hms(
result,
str(parsed_output_format_data.hms_token["minute_token"]),
False,
parsed_date_data.hms["minute"],
)
# Handle seconds
result = _transform_hms(
result,
str(parsed_output_format_data.hms_token["second_token"]),
False,
parsed_date_data.hms["second"],
)
# Handle month
result = _transform_month(
result, parsed_output_format_data.ymd_token["month_token"], parsed_date_data.ymd["month"]
)
# Handle weekday
result = _transform_weekday(
result, parsed_output_format_data.weekday_token, parsed_date_data.weekday
)
# Handle timezone
result = _transform_timezone(
result,
parsed_output_format_data.timezone_token,
str(parsed_date_data.tzinfo["timezone"]),
str(parsed_date_data.tzinfo["utc_add"]),
int(parsed_date_data.tzinfo["utc_offset_hours"]),
int(parsed_date_data.tzinfo["utc_offset_minutes"]),
)
return result | 25,881 |
def volunteer_dict_from_request(request: flask.Request, actor: str) -> dict:
"""Creates and returns a dict of volunteer info from the request.
`actor` is the ID/email of the person or entity that is triggering this.
"""
logging.debug('gapps.volunteer_dict_from_request: %s', list(request.values.items()))
# Make sure the user/form/request isn't trying to mess with fields that it
# shouldn't be.
for name, field in _S.volunteer.fields._asdict().items():
if not field.form_field and request.values.get(name) is not None:
# This causes the request processing to stop
flask.abort(400, description='invalid field')
if field.values is not None and request.values.get(name) is not None \
and not set(request.values.get(name).split(config.MULTIVALUE_DIVIDER)).issubset(field.values):
# This causes the request processing to stop
flask.abort(400, description='invalid field value')
volunteer = config.validate_volunteer(request.values)
if not volunteer:
logging.warning('gapps.volunteer_dict_from_request: config.validate_volunteer failed')
# This causes the request processing to stop
flask.abort(400, description='invalid input')
# We didn't validate the geoposition above, so do it now
geoposition = request.values.get(_GEOPOSITION_VALUE_KEY, '')
geoposition_required = _S.volunteer.fields.joined_latlong.required
if not utils.latlong_validator(geoposition, geoposition_required):
logging.warning('gapps.volunteer_dict_from_request: utils.latlong_validator failed')
flask.abort(400, description='invalid input')
geoaddress = helpers.address_from_latlong(geoposition)
# Set the GUID field
volunteer[_S.volunteer.fields.id.name] = str(uuid.uuid4())
# Set the timestamps
volunteer[_S.volunteer.fields.joined.name] = utils.current_datetime()
volunteer[_S.volunteer.fields.joined_by.name] = actor
volunteer[_S.volunteer.fields.joined_latlong.name] = geoposition
volunteer[_S.volunteer.fields.joined_address.name] = geoaddress
volunteer[_S.volunteer.fields.address_latlong.name] = \
helpers.latlong_for_record(_S.volunteer.fields, volunteer)
return volunteer | 25,882 |
def stop_next_turn():
"""
Dirty way to stop the MCTS in a clean way (without SIGINT or SIGTERM)...
the mcts finish current turn save data and stop (if you are using dft it can take some time...)
write "stop" in the file MCTS/stop_mcts
:return: None
"""
with open(p.f_stop) as f:
stop = f.read()
if "stop" in stop:
print("MCTS stopped with signal 'stop' in '%s' file" % p.f_stop)
return True
return False | 25,883 |
def inner(X):
"""
>>> X = [1, 2, 3, 4, 5]
>>> list(inner(X))
[1, 2, 3, 4, 5]
"""
for x in X:
yield x | 25,884 |
def test_clear_request(state, requested_sls_key):
"""
verify clearing a state request sent to the minion(s)
"""
ret = state.request("requested")
assert ret[requested_sls_key]["result"] is None
ret = state.clear_request()
assert ret is True | 25,885 |
async def flip_next(user_id: int, state: FSMContext):
"""Handles flipping mode - when user views his bookmarks or found comics"""
fsm_list = (await state.get_data()).get('fsm_list')
if fsm_list:
fsm_lang = (await state.get_data()).get('fsm_lang')
comic_lang = 'en' if not fsm_lang else fsm_lang
comic_id = fsm_list.pop(0)
await state.update_data(fsm_list=fsm_list)
if fsm_list:
await send_comic(user_id, comic_id=comic_id, keyboard=kboard.flipping, comic_lang=comic_lang)
else:
await bot.send_message(user_id, text=_("❗ <b>The last one:</b>"))
await send_comic(user_id, comic_id=comic_id, keyboard=kboard.navigation, comic_lang=comic_lang)
else:
# Bot uses a memory cache and sometimes reloaded, losing some data. Perfect crutch!
await bot.send_message(user_id,
text=_("❗ <b>Sorry, I was rebooted and forgot all the data... 😢\n"
"Please repeat your request.</b>"),
reply_markup=await kboard.menu_or_xkcding(user_id)) | 25,886 |
def pull_request_average_time_between_responses(self, repo_group_id, repo_id=None, group_by='month', time_unit='hours', begin_date=None, end_date=None):
""" Avegage time between responeses with merged_status and the time frame
:param repo_group_id: The repository's repo_group_id
:param repo_id: The repository's repo_id, defaults to None
:param group_by: The time frame the data is grouped by, options are: 'day', 'week', 'month' or 'year', defaults to 'month'
:param time_unit: Unit of time for data, options are: 'minutes', or 'hours', defaults to 'hours'
:param begin_date: Specifies the begin date, defaults to '1970-1-1 00:00:00'
:param end_date: Specifies the end date, defaults to datetime.now()
:return: DataFrame of average time beteen responses
=======
@register_metric()
def pull_request_merged_status_counts(self, repo_group_id, repo_id=None, begin_date='1970-1-1 00:00:01', end_date=None, group_by='week'):
>>>>>>> Stashed changes
"""
if not begin_date:
begin_date = '1970-1-1'
if not end_date:
end_date = datetime.datetime.now().strftime('%Y-%m-%d')
unit_options = ['year', 'month', 'week', 'day']
time_group_bys = []
for unit in unit_options.copy():
if group_by not in unit_options:
continue
time_group_bys.append('closed_{}'.format(unit))
del unit_options[0]
if not repo_id:
pr_all_SQL = s.sql.text("""
SELECT
repo_id,
repo_name,
repo_group_id,
rg_name AS repo_group_name,
date_part( 'year', pr_closed_at :: DATE ) AS closed_year,
date_part( 'month', pr_closed_at :: DATE ) AS closed_month,
date_part( 'week', pr_closed_at :: DATE ) AS closed_week,
date_part( 'day', pr_closed_at :: DATE ) AS closed_day,
(EXTRACT(epoch FROM average_time_between_responses)/3600) AS average_hours_between_responses,
(EXTRACT(epoch FROM average_time_between_responses)/60) AS average_minutes_between_responses,
CASE WHEN pr_merged_at IS NULL THEN 'Rejected' ELSE 'Merged' END AS merged_status,
count(*) AS num_pull_requests
FROM (
SELECT
repo_name,
repo_groups.repo_group_id,
rg_name,
pull_requests.repo_id,
pull_requests.pull_request_id,
pr_closed_at,
pr_created_at,
pr_merged_at,
(MAX(message.msg_timestamp) - MIN(message.msg_timestamp)) / COUNT(DISTINCT message.msg_timestamp) AS average_time_between_responses
FROM pull_request_message_ref, message, repo_groups,
pull_requests JOIN repo ON pull_requests.repo_id = repo.repo_id
WHERE pull_requests.repo_id IN
(SELECT repo_id FROM repo WHERE repo_group_id = :repo_group_id)
AND repo.repo_id = pull_requests.repo_id
AND pull_requests.pull_request_id = pull_request_message_ref.pull_request_id
AND pull_request_message_ref.msg_id = message.msg_id
AND repo_groups.repo_group_id = repo.repo_group_id
AND pr_created_at::DATE >= :begin_date ::DATE
AND pr_closed_at::DATE <= :end_date ::DATE
GROUP BY pull_requests.pull_request_id, repo.repo_id, repo.repo_name, repo_groups.repo_group_id, repo_groups.rg_name
) time_between_responses
GROUP BY closed_year, closed_month, merged_status, time_between_responses.pr_closed_at, time_between_responses.average_time_between_responses, time_between_responses.repo_id, time_between_responses.repo_name, time_between_responses.repo_group_id, time_between_responses.rg_name
""")
else:
pr_all_SQL = s.sql.text("""
SELECT
date_part( 'year', pr_closed_at :: DATE ) AS closed_year,
date_part( 'month', pr_closed_at :: DATE ) AS closed_month,
date_part( 'week', pr_closed_at :: DATE ) AS closed_week,
date_part( 'day', pr_closed_at :: DATE ) AS closed_day,
(EXTRACT(epoch FROM average_time_between_responses)/3600) AS average_hours_between_responses,
(EXTRACT(epoch FROM average_time_between_responses)/60) AS average_minutes_between_responses,
CASE WHEN pr_merged_at IS NULL THEN 'Rejected' ELSE 'Merged' END AS merged_status,
count(*) AS num_pull_requests
FROM (
SELECT pull_requests.pull_request_id,
pr_closed_at,
pr_created_at,
pr_merged_at,
(MAX(message.msg_timestamp) - MIN(message.msg_timestamp)) / COUNT(DISTINCT message.msg_timestamp) AS average_time_between_responses
FROM pull_requests, repo, pull_request_message_ref, message
WHERE repo.repo_id = :repo_id
AND repo.repo_id = pull_requests.repo_id
AND pull_requests.pull_request_id = pull_request_message_ref.pull_request_id
AND pull_request_message_ref.msg_id = message.msg_id
AND pr_created_at::DATE >= :begin_date ::DATE
AND pr_closed_at::DATE <= :end_date ::DATE
GROUP BY pull_requests.pull_request_id
) time_between_responses
GROUP BY closed_year, closed_month, merged_status, time_between_responses.pr_closed_at, time_between_responses.average_time_between_responses
""")
pr_all = pd.read_sql(pr_all_SQL, self.database,
params={'repo_id': repo_id, 'repo_group_id':repo_group_id,
'begin_date': begin_date, 'end_date': end_date})
if not repo_id:
pr_avg_time_between_responses = pr_all.groupby(['merged_status', 'repo_id', 'repo_name', 'repo_group_id', 'repo_group_name'] + time_group_bys).mean().reset_index()[['merged_status', 'repo_id', 'repo_name', 'repo_group_id', 'repo_group_name'] + time_group_bys + ['average_{}_between_responses'.format(time_unit)]]
else:
pr_avg_time_between_responses = pr_all.groupby(['merged_status'] + time_group_bys).mean().reset_index()[time_group_bys + ['merged_status', 'average_{}_between_responses'.format(time_unit)]]
return pr_avg_time_between_responses | 25,887 |
def create_money(request):
"""Create money object."""
if request.method == 'POST':
form = MoneyForm(request.POST, request.FILES)
if form.is_valid():
money = form.save(commit=False)
money.owner = request.user
money.save()
return redirect(money)
else:
return render(request, 'nec_bank/create_money.html', {'money_form': form})
else:
request.GET._mutable = True
request.GET['created_date'] = timezone.now().astimezone().strftime('%Y-%m-%d %H:%M:%S')
request.GET._mutable = False
form = MoneyForm(request.GET)
return render(request, 'nec_bank/create_money.html', {'money_form': form}) | 25,888 |
def stripper(reply: str, prefix=None, suffix=None) -> str:
"""This is a helper function used to strip off reply prefix and
terminator. Standard Python str.strip() doesn't work reliably because
it operates on character-by-character basis, while prefix/terminator
is usually a group of characters.
Args:
reply: String to be stripped.
prefix: Substring to remove from the beginning of the line.
suffix: Substring to remove from the end of the line.
Returns:
(str): Naked reply.
"""
if prefix is not None and reply.startswith(prefix):
reply = reply[len(prefix):]
if suffix is not None and reply.endswith(suffix):
reply = reply[:-len(suffix)]
return reply | 25,889 |
def resnet_50_generator(block_fn,
lst_layers,
num_classes,
pruning_method=None,
data_format='channels_first',
name=None):
"""Generator for ResNet v1 models.
Args:
block_fn: String that defines whether to use a `residual_block` or
`bottleneck_block`.
lst_layers: list of Ints that denotes number of blocks to include in each
block group. Each group consists of blocks that take inputs of the same
resolution.
num_classes: Int number of possible classes for image classification.
pruning_method: String that specifies the pruning method used to identify
which weights to remove.
data_format: String either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
name: String that specifies name for model layer.
Returns:
Model `function` that takes in `inputs` and `is_training` and returns the
output `Tensor` of the ResNet model.
"""
def model(inputs, is_training):
"""Creation of the model graph."""
with tf.variable_scope(name, 'resnet_model'):
inputs = conv2d_fixed_padding(
inputs=inputs,
filters=64,
kernel_size=7,
strides=2,
pruning_method=pruning_method,
data_format=data_format,
name='initial_conv')
inputs = tf.identity(inputs, 'initial_conv')
inputs = batch_norm_relu(inputs, is_training, data_format=data_format)
inputs = tf.layers.max_pooling2d(
inputs=inputs,
pool_size=3,
strides=2,
padding='SAME',
data_format=data_format,
name='initial_max_pool')
inputs = tf.identity(inputs, 'initial_max_pool')
inputs = block_group(
inputs=inputs,
filters=64,
block_fn=block_fn,
blocks=lst_layers[0],
strides=1,
is_training=is_training,
name='block_group1',
pruning_method=pruning_method,
data_format=data_format)
inputs = block_group(
inputs=inputs,
filters=128,
block_fn=block_fn,
blocks=lst_layers[1],
strides=2,
is_training=is_training,
name='block_group2',
pruning_method=pruning_method,
data_format=data_format)
inputs = block_group(
inputs=inputs,
filters=256,
block_fn=block_fn,
blocks=lst_layers[2],
strides=2,
is_training=is_training,
name='block_group3',
pruning_method=pruning_method,
data_format=data_format)
inputs = block_group(
inputs=inputs,
filters=512,
block_fn=block_fn,
blocks=lst_layers[3],
strides=2,
is_training=is_training,
name='block_group4',
pruning_method=pruning_method,
data_format=data_format)
pool_size = (inputs.shape[1], inputs.shape[2])
inputs = tf.layers.average_pooling2d(
inputs=inputs,
pool_size=pool_size,
strides=1,
padding='VALID',
data_format=data_format,
name='final_avg_pool')
inputs = tf.identity(inputs, 'final_avg_pool')
inputs = tf.reshape(inputs, [-1, 2048])
inputs = tf.layers.dense(
inputs=inputs,
units=num_classes,
kernel_initializer=tf.random_normal_initializer(stddev=.01),
name='final_dense')
inputs = tf.identity(inputs, 'final_dense')
return inputs
model.default_image_size = 224
return model | 25,890 |
def bracketBalanced(expression):
"""Check if an expression is balanced.
An expression is balanced if all the opening brackets(i.e. '(, {, [') have
a corresponding closing bracket(i.e. '), }, ]').
Args:
expression (str) : The expression to be checked.
Returns:
bool: True if expression is balanced. False if not balanced.
"""
bracket_dict = {'(': ')', '{': '}', '[': ']'}
stack = Stack()
for i in range(len(expression)):
if expression[i] in bracket_dict.keys():
stack.push(expression[i])
elif expression[i] in bracket_dict.values():
if stack.isEmpty() or expression[i] != bracket_dict[stack.peek()]:
return False
else:
stack.pop()
if stack.isEmpty():
return True
else:
return False | 25,891 |
def create_index_and_alias(index_name, update_alias=False):
"""
create an empty index and if update_alias is True then update the alias to point to it (default is False)
"""
date = datetime.today().strftime("%Y-%m-%d")
exists = es.indices.exists(f"{index_name}-{date}")
if not exists:
# create correct mapping for 'value' for fix and fix proposal stores.
if "fix-prop" in index_name:
es.indices.create(
f"{index_name}-{date}",
body={
"mappings": {
"properties": {
"fixes": {
"properties": {
"fix": {
"properties": {
"operands": {
"properties": {
"value": {"type": "text"}
}
}
}
}
}
}
}
}
},
)
elif "fix" in index_name:
es.indices.create(
f"{index_name}-{date}",
body={
"mappings": {
"properties": {
"fixes": {
"properties": {
"operands": {
"properties": {"value": {"type": "text"}}
}
}
}
}
}
},
)
else:
es.indices.create(f"{index_name}-{date}")
print(f"Created index {index_name}-{date}")
if update_alias:
update_index_alias(f"{index_name}-{date}")
print(f"Created index {index_name}-{date} with alias {index_name}") | 25,892 |
def load_charachips(dir, file):
"""キャラクターチップをロードしてCharacter.imagesに格納"""
file = os.path.join(dir, file)
fp = open(file, "r")
for line in fp:
line = line.rstrip()
data = line.split(",")
chara_id = int(data[0])
chara_name = data[1]
Character.images[chara_name] = split_image(load_image("charachip", "%s.png" % chara_name))
fp.close() | 25,893 |
def Leq(pressure, reference_pressure=REFERENCE_PRESSURE, axis=-1):
"""
Time-averaged sound pressure level :math:`L_{p,T}` or equivalent-continious sound pressure level :math:`L_{p,eqT}` in dB.
:param pressure: Instantaneous sound pressure :math:`p`.
:param reference_pressure: Reference value :math:`p_0`.
:param axis: Axis.
.. math:: L_{p,T} = L_{p,eqT} = 10.0 \\log_{10}{ \\left( \\frac{\\frac{1}{T} \\int_{t_1}^{t_2} p^2 (t) \\mathrm{d} t }{p_0^2} \\right)}
See section 2.3.
"""
return 10.0 * np.log10((pressure**2.0).mean(axis=axis) / reference_pressure**2.0) | 25,894 |
def trimBody(body):
""" Quick function for trimming away the fat from emails """
# Cut away "On $date, jane doe wrote: " kind of texts
body = re.sub(
r"(((?:\r?\n|^)((on .+ wrote:[\r\n]+)|(sent from my .+)|(>+[ \t]*[^\r\n]*\r?\n[^\n]*\n*)+)+)+)",
"",
body,
flags=re.I | re.M,
)
# Crop out quotes
lines = body.split("\n")
body = "\n".join([x for x in lines if not x.startswith(">")])
# Remove hyperlinks
body = re.sub(r"[a-z]+://\S+", "", body)
# Remove email addresses
body = re.sub(r"(<[^>]+>\s*\S+@\S+)", "", body)
body = re.sub(r"(\S+@\S+)", "", body)
return body | 25,895 |
def check_host_arp_table_deleted(host, asic, neighs):
"""
Verifies the ARP entry is deleted.
Args:
host: instance of SonicHost to run the arp show.
neighbor_ip: IP address of the neighbor to verify.
arptable: Optional arptable output, if not provided it will be fetched from host.
"""
if host.is_multi_asic:
arptable = host.switch_arptable(namespace=asic.namespace)['ansible_facts']
else:
arptable = host.switch_arptable()['ansible_facts']
neighs_present = []
for neighbor_ip in neighs:
if ':' in neighbor_ip:
table = arptable['arptable']['v6']
else:
table = arptable['arptable']['v4']
if neighbor_ip in table:
neighs_present.append(neighbor_ip)
logger.debug("On host {} asic {}, found neighbors {} that were supposed to be deleted".format(host, asic.asic_index, neighs_present))
return len(neighs_present) == 0 | 25,896 |
def fit_ellipses(contours):
"""
Fit ellipses to contour(s).
Parameters
----------
contours : ndarray or list
Contour(s) to fit ellipses to.
Returns
-------
ellipses : ndarray or list
An array or list corresponding to dimensions to ellipses fitted.
"""
if isinstance(contours, list):
ret = [cv2.fitEllipse(c) for c in contours]
else:
ret = cv2.fitEllipse(contours)
return ret | 25,897 |
def get_argument_parser() -> ArgumentParser:
"""
Get command line arguments.
"""
parser = ArgumentParser(
description="Say Hello")
subparsers = parser.add_subparsers(title="subcommands")
parser_count_above_below = subparsers.add_parser("say-hello")
parser_count_above_below.add_argument('-n', '--name',
help="a name")
parser_count_above_below.set_defaults(func=do_say_hello)
return parser | 25,898 |
def custom_client_datalist_json_path(datalist_json_path: str, client_id: str, prefix: str) -> str:
"""
Customize datalist_json_path for each client
Args:
datalist_json_path: default datalist_json_path
client_id: e.g., site-2
"""
# Customize datalist_json_path for each client
# - client_id: e.g. site-5
head, tail = os.path.split(datalist_json_path)
datalist_json_path = os.path.join(
head,
prefix + "_" + str(client_id) + ".json",
)
return datalist_json_path | 25,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.