content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def get_input(label, default=None):
"""Prompt the user for input.
:param label: The label of the prompt.
:param label: str
:param default: The default value.
:rtype: str | None
"""
if default:
_label = "%s [%s]: " % (label, default)
else:
_label = "%s: " % label
print("")
value = input(_label)
if not value:
return default
return value
| 16,000
|
def get_pretrain_data_text(data, batch_size, num_ctxes, shuffle,
num_buckets, vocab, tokenizer, max_seq_length, short_seq_prob,
masked_lm_prob, max_predictions_per_seq, whole_word_mask,
num_parts=1, part_idx=0, num_workers=1):
"""Get a data iterator from raw text documents.
Parameters
----------
batch_size : int
The batch size per GPU.
num_ctxes : int
The number of GPUs.
shuffle : bool
Whether to shuffle the data.
num_buckets : int
The number of buckets for the FixedBucketSampler for training.
vocab : BERTVocab
The vocabulary.
tokenizer : BERTTokenizer or BERTSPTokenizer
The tokenizer.
max_seq_length : int
The hard limit of maximum sequence length of sentence pairs.
short_seq_prob : float
The probability of sampling sequences shorter than the max_seq_length.
masked_lm_prob : float
The probability of replacing texts with masks/random words/original words.
max_predictions_per_seq : int
The hard limit of the number of predictions for masked words
whole_word_mask : bool
Whether to use whole word masking.
num_parts : int
The number of partitions for the dataset.
part_idx : int
The index of the partition to read.
num_workers : int
The number of worker processes for dataset contruction.
"""
num_files = len(nlp.utils.glob(data))
logging.info('%d files are found.', num_files)
assert num_files >= num_parts, \
'The number of training text files must be no less than the number of ' \
'workers/partitions (%d). Only %d files at %s are found.'%(num_parts, num_files, data)
dataset_params = {'tokenizer': tokenizer, 'max_seq_length': max_seq_length,
'short_seq_prob': short_seq_prob, 'masked_lm_prob': masked_lm_prob,
'max_predictions_per_seq': max_predictions_per_seq, 'vocab':vocab,
'whole_word_mask': whole_word_mask}
dataset_fn = SimpleDatasetFn(BERTPretrainDataset, dataset_params)
sampler_fn = BERTSamplerFn(batch_size, shuffle, num_ctxes, num_buckets)
dataloader_fn = BERTDataLoaderFn(num_ctxes, vocab)
split_sampler = nlp.data.SplitSampler(num_files, num_parts=num_parts, part_index=part_idx)
dataloader = DatasetLoader(data, split_sampler, dataset_fn, sampler_fn, dataloader_fn,
num_dataset_workers=num_workers)
return dataloader
| 16,001
|
def define_app_flags(scenario_num):
""" Define the TensorFlow application-wide flags
Returns:
FLAGS: TensorFlow flags
"""
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_boolean('save_model', False, 'save model to disk')
tf.app.flags.DEFINE_string('summaries_dir', './logs', 'tensorboard summaries')
tf.app.flags.DEFINE_string('ckpt_dir', './saved_models/', 'check point dir')
tf.app.flags.DEFINE_string('scenario_num', scenario_num, 'Scenario number')
tf.app.flags.DEFINE_string('errors_dir', './errors/', 'Errors dir')
return FLAGS
| 16,002
|
def longitude_validator(value):
"""Perform longitude validation.
"""
valid = -180 < value < 180
if not valid:
raise ValidationError(_('longitude not in range of -90 < value < 90'))
return value
| 16,003
|
def getVariables():
"""
Retrieves the variables.json file.
"""
if os.path.exists('variables.json'):
with open('variables.json') as jsonFile:
variables = json.loads(jsonFile.read())
return variables
else:
variables = {}
variables['path'] = ''
return variables
| 16,004
|
def stop_random_tasks(
cluster: str,
task_count: int = None,
task_percent: int = None,
service: str = None,
reason: str = "Chaos Testing",
configuration: Configuration = None,
secrets: Secrets = None,
) -> List[AWSResponse]:
"""
Stop a random number of tasks based on given task_count or task_percent
You can specify a cluster by its ARN identifier or, if not provided, the
default cluster will be picked up.
:param cluster: The ECS cluster Name
:param task_count: The number of tasks to stop
:param task_percent: The percentage of total tasks to stop
:param service: The ECS service name
:param reason: An explanation of why the service was stopped
:param configuration: access values used by actions/probes
:param secrets: values that need to be passed on to actions/probes
:return: List[Dict[str, Any]]
"""
if not any([task_count, task_percent]) or all([task_count, task_percent]):
raise FailedActivity('Must specify one of "task_count", "task_percent"')
client = aws_client("ecs", configuration, secrets)
validate(client, cluster, service)
tasks = list_running_tasks_in_cluster(
cluster=cluster, client=client, service=service
)
if task_percent:
task_count = int(float(len(tasks) * float(task_percent)) / 100)
if len(tasks) < task_count:
raise FailedActivity(
"Not enough running tasks in {} to satisfy "
"stop count {} ({})".format(cluster, task_count, len(tasks))
)
tasks = random.sample(tasks, task_count)
results = []
for task in tasks:
logger.debug(f"Stopping ECS task: {task}")
response = client.stop_task(cluster=cluster, task=task, reason=reason)
results.append(
{
"Task_Id": response["task"]["taskArn"],
"Desired_Status": response["task"]["desiredStatus"],
}
)
return results
| 16,005
|
def test_create_batch_multi_record_update_fails(shared_zone_test_context):
"""
Test recordsets with multiple records cannot be edited in batch (relies on config, skip-prod)
"""
client = shared_zone_test_context.ok_vinyldns_client
ok_zone = shared_zone_test_context.ok_zone
# record sets to setup
a_update_name = generate_record_name()
a_update_fqdn = a_update_name + ".ok."
a_update = get_recordset_json(ok_zone, a_update_name, "A", [{"address": "1.1.1.1"}, {"address": "1.1.1.2"}], 200)
txt_update_name = generate_record_name()
txt_update_fqdn = txt_update_name + ".ok."
txt_update = get_recordset_json(ok_zone, txt_update_name, "TXT", [{"text": "hello"}, {"text": "again"}], 200)
a_delete_name = generate_record_name()
a_delete_fqdn = a_delete_name + ".ok."
a_delete = get_recordset_json(ok_zone, a_delete_name, "A", [{"address": "1.1.1.1"}, {"address": "1.1.1.2"}], 200)
txt_delete_name = generate_record_name()
txt_delete_fqdn = txt_delete_name + ".ok."
txt_delete = get_recordset_json(ok_zone, txt_delete_name, "TXT", [{"text": "hello"}, {"text": "again"}], 200)
batch_change_input = {
"comments": "this is optional",
"changes": [
get_change_A_AAAA_json(a_update_fqdn, change_type="DeleteRecordSet"),
get_change_A_AAAA_json(a_update_fqdn, address="1.2.3.4"),
get_change_A_AAAA_json(a_update_fqdn, address="4.5.6.7"),
get_change_TXT_json(txt_update_fqdn, change_type="DeleteRecordSet"),
get_change_TXT_json(txt_update_fqdn, text="some-multi-text"),
get_change_TXT_json(txt_update_fqdn, text="more-multi-text"),
get_change_A_AAAA_json(a_delete_fqdn, change_type="DeleteRecordSet"),
get_change_TXT_json(txt_delete_fqdn, change_type="DeleteRecordSet"),
# adding an HVD so this will fail if accidentally run against wrong config
get_change_A_AAAA_json("high-value-domain")
]
}
to_delete = []
try:
for rs in [a_update, txt_update, a_delete, txt_delete]:
create_rs = client.create_recordset(rs, status=202)
to_delete.append(client.wait_until_recordset_change_status(create_rs, 'Complete'))
response = client.create_batch_change(batch_change_input, status=400)
def existing_err(name, type):
return 'RecordSet with name {} and type {} cannot be updated in a single '.format(name, type) + \
'Batch Change because it contains multiple DNS records (2).'
def new_err(name, type):
return 'Multi-record recordsets are not enabled for this instance of VinylDNS. ' \
'Cannot create a new record set with multiple records for inputName {} and type {}.'.format(name,
type)
assert_error(response[0], error_messages=[existing_err(a_update_fqdn, "A")])
assert_error(response[1], error_messages=[existing_err(a_update_fqdn, "A"), new_err(a_update_fqdn, "A")])
assert_error(response[2], error_messages=[existing_err(a_update_fqdn, "A"), new_err(a_update_fqdn, "A")])
assert_error(response[3], error_messages=[existing_err(txt_update_fqdn, "TXT")])
assert_error(response[4],
error_messages=[existing_err(txt_update_fqdn, "TXT"), new_err(txt_update_fqdn, "TXT")])
assert_error(response[5],
error_messages=[existing_err(txt_update_fqdn, "TXT"), new_err(txt_update_fqdn, "TXT")])
assert_error(response[6], error_messages=[existing_err(a_delete_fqdn, "A")])
assert_error(response[7], error_messages=[existing_err(txt_delete_fqdn, "TXT")])
finally:
clear_recordset_list(to_delete, client)
| 16,006
|
def usage():
"""
Print short help meesage.
"""
print('Usage:')
print(' ' + sys.argv[0] + ' --help')
print(' ' + sys.argv[0] + ' [options] /src/dir/path /dst/dir/path')
print('Options:')
print(' --move move files (will remove source files);')
print(' --quiet be quiet;')
print(' --dry-run do nothing, only report files and dirs processing;')
print(' --dnrcd do not remove cleared directories;')
print(' --chmod=Octal permissions for new files. Default is 0644.')
print('Time shifting options:')
print(' --year-shift=Integer')
print(' --month-shift=Integer')
print(' --day-shift=Integer')
print(' --hour-shift=Integer')
print(' --minute-shift=Integer')
print(' --second-shift=Integer')
sys.exit(1)
| 16,007
|
def plot_confusion_matrix(conf_matrix: np.ndarray, cmap: str = 'bwr',
ax=None, show: bool = False,
title: str = 'Confusion matrix') -> None:
"""Plots a confusion matrix.
Args:
conf_matrix (np.ndarray): confusion matrix.
cmap (str, optional): colormap recognized by matplotlib.
Defaults to 'bwr'.
ax (optional): matplotlib ax. Defaults to None.
show (bool, optional): True to call plt.show. Defaults to True.
title (bool, optional): title of the confusion matrix.
Defaults to 'Confusion Matrix'.
"""
if ax is None:
_, ax = plt.subplots(figsize=(5, 5))
# Plot matrix
ax.matshow(conf_matrix, cmap=cmap, alpha=0.3)
# Add values as text
for i in range(conf_matrix.shape[0]):
for j in range(conf_matrix.shape[1]):
ax.text(x=j, y=i, s=conf_matrix[i, j], va='center', ha='center',
size='x-large')
# Set labels
ax.set_xlabel('Predicted labels')
ax.set_ylabel('True labels')
ax.set_title(title)
# Show plot
if show:
plt.show()
| 16,008
|
def _build_colormap(data, hue, palette, order):
"""Builds a colormap."""
if hue is None:
color_map = {}
else:
if palette is None:
palette = sns.color_palette()
if order is None:
order = data[hue].unique()
color_map = OrderedDict(zip(order, palette))
return color_map
| 16,009
|
def f_is_oword(*args):
"""
f_is_oword(F, arg2) -> bool
See 'is_oword()'
@param F (C++: flags_t)
"""
return _ida_bytes.f_is_oword(*args)
| 16,010
|
def aggregate_CSV_files(data_path):
""" Aggregate the data in CSV files, specified in the config file, into a
single pandas DataFrame object. """
merge_queue = []
for path in data_path:
data_df = pd.read_csv(path, na_values = ['.']);
data_df.index = pd.to_datetime(data_df['DATE'], format='%Y-%m-%d')
data_df = data_df[data_df.index > c.START_DATE]
del data_df['DATE']
merge_queue.append(data_df)
aggregate_df = pd.concat(merge_queue, sort = True, axis = 1)
aggregate_df.sort_index(inplace = True)
return aggregate_df
| 16,011
|
def test_replace_in_list_single():
"""Test the function with a list of strings"""
x = ['aa', 'bb', 'cc']
replace_in_list(x, ['aa', 'bb'], ['dd', 'ee'])
assert x == ['dd', 'ee', 'cc']
| 16,012
|
def dR2(angle: np_float) -> np.ndarray:
"""Derivative of a rotation matrix around the second axis with respect to the rotation angle
Args:
angle: Scalar, list or numpy array of angles in radians.
Returns:
Numpy array: Rotation matrix or array of rotation matrices.
"""
zero = _zero(angle)
cosA, sinA = np.cos(angle), np.sin(angle)
return _roll_axes(np.array([[-sinA, zero, -cosA], [zero, zero, zero], [cosA, zero, -sinA]]))
| 16,013
|
def build_symm_filter_commands(chainfiles, chromref, outpath, cmd, jobcall):
"""
:return:
"""
chromfiles = collect_full_paths(chromref, '*.tsv')
assert chromfiles, 'No chromosome files found at location: {}'.format(chromref)
assm_chrom = dict()
for chrf in chromfiles:
assm = os.path.basename(chrf).split('_')[0]
sizes = read_chromsizes(chrf)
assm_chrom[assm] = list(sizes.keys())
params = []
for chf in chainfiles:
fn = os.path.basename(chf)
target, query = fn.split('.', 1)[0].split('_to_')
chroms = assm_chrom[query]
for c in chroms:
outname = '{}_to_{}.{}.symmap.tsv.gz'.format(target, query, c)
outfull = os.path.join(outpath, outname)
tmp = cmd.format(**{'chrom': c})
params.append([chf, outfull, tmp, jobcall])
if len(chainfiles) > 0:
assert params, 'No parameters created for chain symmetry filtering'
return params
| 16,014
|
def clear_predecessor(n):
"""
Sets n's predecessor to None
:param n: node on which to call clear_predecessor
:return: string of response
"""
def clear(node):
node.predecessor = None
n.event_queue.put(clear)
resp_header = {"status": STATUS_OK}
return utils.create_request(resp_header, {})
| 16,015
|
def resource_file():
"""
Create an empty resource file
:return:
"""
def _resource_file(dirname, filename):
filepath = os.path.join(dirname, filename)
open(filepath, 'a').close()
return filepath
return _resource_file
| 16,016
|
def batch_generator(X, y, batch_size, samples_per_epoch):
"""Generate mini-batches."""
number_of_batches = int(samples_per_epoch / batch_size)
shuffle_index = np.arange(np.shape(y)[0])
np.random.shuffle(shuffle_index)
X = X[shuffle_index, :]
y = y[shuffle_index]
for i in range(number_of_batches):
index_batch = shuffle_index[batch_size * i:batch_size * (i + 1)]
X_batch = X[index_batch, :]
if isinstance(X_batch, csr_matrix):
X_batch = X_batch.todense()
y_batch = y[index_batch]
yield np.array(X_batch), y_batch
| 16,017
|
def get_local_variable_influence(model, form_data):
"""
"""
row = format_data_to_row(form_data)
model_obj = read_model(model.path, model.file_type)
df = load_dataset_sample(model.dataset, nrows=50)
df = df[model.dataset.model_columns]
explainer = load_model_explainer_from_obj(model_obj, df)
prediction = list()
prediction.append(model_obj.predict(row)[0])
if hasattr(model_obj, 'predict_proba'):
prediction.append(model_obj.predict_proba(row)[0])
base_value = explainer.explainer.expected_value
variable_influence = compute_local_influence(explainer, row)
return variable_influence, prediction, base_value
| 16,018
|
def Print(text='', newline=True, colour=None):
"""Handle a line of output to the terminal.
In test mode this is recorded in a list. Otherwise it is output to the
terminal.
Args:
text: Text to print
newline: True to add a new line at the end of the text
colour: Colour to use for the text
"""
if print_test_mode:
print_test_list.append(PrintLine(text, newline, colour))
else:
if colour:
col = Color()
text = col.Color(colour, text)
print(text, end='')
if newline:
print()
else:
sys.stdout.flush()
| 16,019
|
def submit_experiment(body, **kwargs):
"""Submit an experiment
:param body: experiment payload
:type body: dict | bytes
:rtype: StatusSerializer
"""
serializer = ExperimentSerializer.from_dict(body)
check_experiment_permission(serializer, kwargs["token_info"])
stub = get_experiments_services_stub()
response = stub.Submit(job_pb2.Experiment(**body))
if response.status != 200:
return ErrorSerializer(status=response.status, title="Api Error",
detail=response.message), response.status
return StatusSerializer.from_dict(util.deserialize_protobuf(response))
| 16,020
|
def obj_assert_check(cls):
"""
The body of the assert check for an accessor
We allow all versions of add/delete/modify to use the same accessors
"""
if cls in ["of_flow_modify", "of_flow_modify_strict",
"of_flow_delete", "of_flow_delete_strict",
"of_flow_add"]:
return "IS_FLOW_MOD_SUBTYPE(obj->object_id)"
else:
return "obj->object_id == %s" % cls.upper()
| 16,021
|
def ripemd160(data: List[int]) -> List[int]:
"""
:param data:
:return:
"""
try:
bytes_data = bytes(data)
except TypeError:
raise NativeContractException
digest = hashlib.new("ripemd160", bytes_data).digest()
padded = 12 * [0] + list(digest)
return list(bytearray(bytes(padded)))
| 16,022
|
def has_duplicates(s:list) -> dict:
"""Returns True if any element appears more than once in a sequence."""
d = dict()
for char in s:
if char in d:
return True
d[char] = 1
return False
| 16,023
|
def test_slicing_on_instances_3():
"""
Like `test_slicing_on_instances_2` but uses a compound model that does not
have any invalid slices due to the resulting model being invalid
(originally test_slicing_on_instances_2 passed without any
ModelDefinitionErrors being raised, but that was before we prevented
invalid models from being created).
"""
model_a = Shift(1, name='a')
model_b = Shift(2, name='b')
model_c = Gaussian1D(3, 0, 0.1, name='c')
model_d = Scale(2, name='d')
model_e = Scale(3, name='e')
m = (model_a + model_b) | model_c | (model_d + model_e)
assert m[1:].submodel_names == ('b', 'c', 'd', 'e')
assert m[:].submodel_names == ('a', 'b', 'c', 'd', 'e')
assert m['a':].submodel_names == ('a', 'b', 'c', 'd', 'e')
assert m['c':'d'].submodel_names == ('c', 'd')
assert m[1:2].name == 'b'
assert m[2:7].submodel_names == ('c', 'd', 'e')
with pytest.raises(IndexError):
m['x']
with pytest.raises(IndexError):
m['a': 'r']
assert m[-4:4].submodel_names == ('b', 'c', 'd')
assert m[-4:-2].submodel_names == ('b', 'c')
| 16,024
|
def get_from_chain(J, domain, nof_coefficients, ncap=10000, disc_type='sp_quad', interval_type='lin',
mapping_type='lan_bath', permute=None, residual=True, low_memory=True, stable=False,
get_trafo=False, force_sp=False, mp_dps=30, sort_by=None, **kwargs):
"""
Returns star coefficients, constructed from chain coefficients via diagonalization
see chain.get and convert_chain_to_star for an explanation of the arguments.
Sort_by sorts the couplings and energies (if passed and not None), see utils.sorting.sort_star_coefficients
for details on the parameters.
:returns: gamma (couplings), xi (energies), info dict from both the conversion and the chain mapping
if get_trafo is set True, the dict only contains the latest transformation (from chain to star here)
"""
c0, omega, t, info = get_chain(J, domain, nof_coefficients, ncap=ncap, disc_type=disc_type,
interval_type=interval_type, mapping_type=mapping_type, permute=permute,
residual=residual, low_memory=low_memory, stable=stable,
get_trafo=False, **kwargs)
gamma, xi, trafo_info = convert_chain_to_star(c0, omega, t, force_sp=force_sp, mp_dps=mp_dps, get_trafo=get_trafo)
gamma, xi = sort_star_coefficients(gamma, xi, sort_by)
return gamma, xi, info.update(trafo_info)
| 16,025
|
def factory(kernel_type, cuda_type=None, gpu_mode=None, *args, **kwargs):
"""Return an instance of a kernel corresponding to the requested kernel_type"""
if cuda_type is None:
cuda_type = default.dtype
if gpu_mode is None:
gpu_mode = default.gpu_mode
# turn enum string to enum object
if isinstance(kernel_type, str):
try:
for c in [' ', '-']: # chars to be replaced for normalization
kernel_type = kernel_type.replace(c, '_')
kernel_type = Type[kernel_type.upper()]
except:
raise TypeError('kernel_type ' + kernel_type + ' could not be found')
if not isinstance(kernel_type, Type):
raise TypeError('kernel_type must be an instance of KernelType Enum')
if kernel_type in [Type.UNDEFINED, Type.NO_KERNEL]:
return None
res = None
hash = AbstractKernel.hash(kernel_type, cuda_type, gpu_mode, *args, **kwargs)
if hash not in instance_map:
res = kernel_type.value(gpu_mode=gpu_mode, cuda_type=cuda_type, *args, **kwargs) # instantiate
instance_map[hash] = res
else:
res = instance_map[hash]
assert res is not None
return res
| 16,026
|
def init_scaler(
scaler_parameters: Dict, fit_data: np.ndarray,
) -> Union[MinMaxScaler, StandardScaler, RobustScaler]:
"""Initialize and return scaler.
Args:
scaler_parameters: Parameters of scaler.
fit_data: Data to be fit.
Returns:
Selected scaler.
"""
scaler_type = scaler_parameters["scaler_type"]
if scaler_type == "RobustScaler":
scaler = RobustScaler()
elif scaler_type == "StandardScaler":
scaler = StandardScaler()
else:
scaler = MinMaxScaler()
scaler.fit(fit_data)
return scaler
| 16,027
|
def get_dynamic_resource(previous_length: str):
"""Get the job with job_name.
Returns:
None.
"""
name_to_node_usage = redis_controller.get_resource_usage(
previous_length=int(previous_length)
)
return name_to_node_usage
| 16,028
|
def MoveAndMerge(src_dir, dst_dir):
"""Moves data files from src_dir and merges with data files on dst_dir"""
file_utils.TryMakeDirs(os.path.join(dst_dir, ATT_DIR_NAME))
for att_name in os.listdir(os.path.join(src_dir, ATT_DIR_NAME)):
att_src_path = os.path.join(src_dir, ATT_DIR_NAME, att_name)
att_dst_path = os.path.join(dst_dir, ATT_DIR_NAME, att_name)
if not os.path.isfile(att_dst_path):
shutil.move(att_src_path, att_dst_path)
file_utils.SyncDirectory(os.path.join(dst_dir, ATT_DIR_NAME))
with open(os.path.join(dst_dir, EVENT_FILE_NAME), 'a') as dst_f:
with open(os.path.join(src_dir, EVENT_FILE_NAME), 'r') as src_f:
shutil.copyfileobj(src_f, dst_f)
dst_f.flush()
os.fdatasync(dst_f.fileno())
file_utils.SyncDirectory(dst_dir)
| 16,029
|
def _margo_bin(exe=""):
"""Returns the path of the margo executable.
"""
return gs.home_path("bin", exe or INSTALL_EXE)
| 16,030
|
def csl_density(basis, mini_cell, plane):
"""
returns the CSL density of a given plane and its d_spacing.
"""
plane = np.array(plane)
c = csl_vec(basis, mini_cell)
h = np.dot(c.T, plane)
h = smallest_integer(h)[0]
h = common_divisor(h)[0]
g = np.linalg.inv(np.dot(c.T, c))
h_norm = np.sqrt(np.dot(h.T, np.dot(g, h)))
density = 1/(h_norm * np.linalg.det(c))
return abs(density), 1 / h_norm
| 16,031
|
def _concatenate_multiple_freq(at_cache, multi_rt_array, multi_time_array,
inplace=False, tick_time_field='tradeTime'):
"""
Concatenate multiple frequency data.
Args:
at_cache(dict): current at multiple cache data
multi_rt_array(matrix): multiple real-time data array
multi_time_array(matrix): multiple real-time time array
inplace(Boolean): whether to replace the latest bar
tick_time_field(string): tick time field name
"""
column_size = at_cache[tick_time_field].size
total_column_size = column_size + len(multi_time_array)
if inplace:
total_column_size -= 1
increment_column_size = max(total_column_size - column_size, 1)
matrix = np.zeros((len(EQUITY_RT_VALUE_FIELDS), total_column_size))
for _, field in enumerate(EQUITY_RT_VALUE_FIELDS):
if inplace:
matrix[_, :(column_size - 1)] = at_cache[field][:-1]
else:
matrix[_, :column_size] = at_cache[field]
matrix[:, -increment_column_size:] = multi_rt_array.T
for i, _ in enumerate(matrix):
at_cache[EQUITY_RT_VALUE_FIELDS[i]] = _
matrix_time = np.empty((len(EQUITY_RT_TIME_FIELDS), total_column_size), dtype='|S16')
for _, field in enumerate(EQUITY_RT_TIME_FIELDS):
if inplace:
matrix_time[_, :(column_size - 1)] = at_cache[field][:-1]
else:
matrix_time[_, :column_size] = at_cache[field]
matrix_time[:, -increment_column_size:] = multi_time_array.T
for i, _ in enumerate(matrix_time):
at_cache[EQUITY_RT_TIME_FIELDS[i]] = _
| 16,032
|
def mw_Av():
"""Build the A_V attenuation by the MW towards M31."""
curve = SF11ExtinctionCurve()
ratio = curve['Landolt V'] # A_V / E(B-V) from T6 of SF2011
return ratio * 0.07
| 16,033
|
def load_model(model_uri):
"""
Load an H2O model from a local file (if ``run_id`` is ``None``) or a run.
This function expects there is an H2O instance initialised with ``h2o.init``.
:param model_uri: The location, in URI format, of the MLflow model. For example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
- ``models:/<model_name>/<model_version>``
- ``models:/<model_name>/<stage>``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#
artifact-locations>`_.
:return: An `H2OEstimator model object
<http://docs.h2o.ai/h2o/latest-stable/h2o-py/docs/intro.html#models>`_.
"""
local_model_path = _download_artifact_from_uri(artifact_uri=model_uri)
flavor_conf = _get_flavor_configuration(model_path=local_model_path, flavor_name=FLAVOR_NAME)
# Flavor configurations for models saved in MLflow version <= 0.8.0 may not contain a
# `data` key; in this case, we assume the model artifact path to be `model.h2o`
h2o_model_file_path = os.path.join(local_model_path, flavor_conf.get("data", "model.h2o"))
return _load_model(path=h2o_model_file_path)
| 16,034
|
def load_data(filenames):
"""Load a single file or sequence of files using skimage.io"""
filenames = [filenames, ] if isinstance(filenames, str) else filenames
loadfunc = tifffile.imread if all(f.lower().endswith("tif")
for f in filenames) else skio.imread
if len(filenames) > 1:
return np.array([loadfunc(f) for f in filenames], dtype=float)
elif len(filenames) == 1:
return loadfunc(filenames[0]).astype(float)
else:
raise Exception("load_data received an empty list")
| 16,035
|
def GetInverseMatrix(matrix):
"""
:param matrix: the matrix which will get its inverse matrix
:return: the inverse matrix(two dimensions only)
"""
matrix[0, 0], matrix[1, 1] = -matrix[1, 1], -matrix[0, 0]
matrix = matrix / -(matrix[0, 0] * matrix[1, 1] - matrix[0, 1] * matrix[1, 0])
return matrix
| 16,036
|
def petsc_memory_stats(log):
"""Return the memory stats section of PETSc's -log_view output as a dictionary."""
# first search for the 'Memory usage' header, then match anything that follows
# after the first line starting with --- up until the first line starting with =====
# re.DOTALL makes . match newlines as well
try:
memory_profile = re.finditer('Memory usage is given in bytes:.*?\n---[^\n].*?\n(.*?)\n===========', log, re.DOTALL).next().group(1)
except StopIteration:
# no memory stats section found (did you run with -log_view ?)
return None
stats = {}
for line in memory_profile.split('\n'):
try:
(object, profile) = re.finditer('(\s.*?)([0-9]+.*)', line).next().groups()
except StopIteration:
continue
profile = profile.split()
stats[object.strip()] = [int(x) for x in profile[0:3]] + [float(profile[3]),]
return stats
| 16,037
|
def test_tile_valid_default():
"""Should return a 3 bands array and a full valid mask."""
with COGReader(COG_NODATA) as cog:
# Full tile
data, mask = cog.tile(43, 24, 7)
assert data.shape == (1, 256, 256)
assert mask.all()
tile_bounds = WEB_MERCATOR_TMS.xy_bounds(43, 24, 7)
data_part, _ = cog.part(
tile_bounds,
bounds_crs=WEB_MERCATOR_TMS.crs,
width=256,
height=256,
max_size=None,
)
assert numpy.array_equal(data, data_part)
# Partial tile
data, mask = cog.tile(42, 24, 7)
assert data.shape == (1, 256, 256)
assert not mask.all()
# Expression
data, mask = cog.tile(43, 24, 7, expression="b1*2,b1-100")
assert data.shape == (2, 256, 256)
with pytest.warns(ExpressionMixingWarning):
data, _ = cog.tile(43, 24, 7, indexes=(1, 2, 3), expression="b1*2")
assert data.shape == (1, 256, 256)
data, mask = cog.tile(43, 24, 7, indexes=1)
assert data.shape == (1, 256, 256)
data, mask = cog.tile(43, 24, 7, indexes=(1, 1,))
assert data.shape == (2, 256, 256)
# We are using a file that is aligned with the grid so no resampling should be involved
with COGReader(COG_WEB) as cog:
img = cog.tile(147, 182, 9)
img_buffer = cog.tile(147, 182, 9, tile_buffer=10)
assert img_buffer.width == 276
assert img_buffer.height == 276
assert not img.bounds == img_buffer.bounds
assert numpy.array_equal(img.data, img_buffer.data[:, 10:266, 10:266])
| 16,038
|
def track2result(bboxes, labels, ids, num_classes):
"""Convert tracking results to a list of numpy arrays.
Args:
bboxes (torch.Tensor | np.ndarray): shape (n, 5)
labels (torch.Tensor | np.ndarray): shape (n, )
ids (torch.Tensor | np.ndarray): shape (n, )
num_classes (int): class number, including background class
Returns:
list(ndarray): tracking results of each class.
"""
valid_inds = ids > -1
bboxes = bboxes[valid_inds]
labels = labels[valid_inds]
ids = ids[valid_inds]
if bboxes.shape[0] == 0:
return [np.zeros((0, 6), dtype=np.float32) for i in range(num_classes)]
else:
if isinstance(bboxes, torch.Tensor):
bboxes = bboxes.cpu().numpy()
labels = labels.cpu().numpy()
ids = ids.cpu().numpy()
return [
np.concatenate((ids[labels == i, None], bboxes[labels == i, :]),
axis=1) for i in range(num_classes)
]
| 16,039
|
def build_result_dataframe(gh, pred, df):
""" Construct a datarame that contain the prediction.
:param gh: the geohas6 code of the prediction
:param pred: numpy array of prediction
:param df: the dataframe used for prediction
:returns: prediction dataframe
:rtype: pandas.core.frame.DataFrame
"""
# generate a sequence of timestamp
start_time = df.timestamp.values.max() + np.timedelta64(15, 'm')
timestamps = pd.date_range(start_time, periods=len(pred), freq='15T')
# calulate 'day' colum of the dataframe
dtdelta = (timestamps.date - df.timestamp.max().date())
dtdelta = list(map(lambda x: x.days, dtdelta))
days = dtdelta + df.day.max()
# calulate time of day
tod = list(map(lambda x: x.strftime('%H:%M'), timestamps.time))
# construct the result dictionary
res = {'geohash6': [gh] * len(pred),
'day': days,
'timestamp': tod,
'demand': pred
}
return pd.DataFrame(res)
| 16,040
|
def find_module(module_name: str, search_paths: Sequence[str | Path] | None = None) -> Path: # noqa: WPS231
"""Find a module in a given list of paths or in `sys.path`.
Parameters:
module_name: The module name.
search_paths: The paths to search into.
Raises:
ModuleNotFoundError: When the module cannot be found.
Returns:
The module file path.
"""
# optimization: pre-compute Paths to relieve CPU when joining paths
search = [path if isinstance(path, Path) else Path(path) for path in search_paths or sys.path]
parts = module_name.split(".")
# always search a .pth file first using the first part
for path in search:
top_pth = Path(f"{parts[0]}.pth")
abs_top_pth = path / top_pth
if abs_top_pth.exists():
with suppress(UnhandledPthFileError):
location = _handle_pth_file(abs_top_pth)
if location.suffix:
location = location.parent
search = [location.parent]
# TODO: possible optimization
# always break if exists?
break
# resume regular search
filepaths = [
# TODO: handle .py[cod] and .so files?
Path(*parts, "__init__.py"),
Path(*parts[:-1], f"{parts[-1]}.py"),
Path(*parts[:-1], f"{parts[-1]}.pth"),
Path(*parts), # namespace packages, try last
]
for path in search: # noqa: WPS440
for choice in filepaths:
abs_path = path / choice
# optimization: just check if the file exists,
# not if it's an actual file
if abs_path.exists():
if abs_path.name.endswith(".pth"):
try:
return _handle_pth_file(abs_path)
except UnhandledPthFileError as error:
raise ModuleNotFoundError(module_name) from error
return abs_path
raise ModuleNotFoundError(module_name)
| 16,041
|
def bandwidth_limited_write(in_file, out_file, kbits_per_sec_str,
post_delay_compression):
"""Bandwidth limited writing.
Args:
in_file: file, the file to read the data from.
out_file: file, the file to write the data to.
kbits_per_sec_str: string, the bandwidth speed.
post_delay_compression: bool, a hack to compensate for compressed data
vs uncompressed data.
"""
try:
kbits_per_sec = float(kbits_per_sec_str)
except ValueError:
kbits_per_sec = 0
if not kbits_per_sec:
while True:
data = in_file.read()
if not data:
return
out_file.write(data)
kbytes_per_sec = kbits_per_sec / 8
ms_per_k = 1000 / kbytes_per_sec
if post_delay_compression:
ms_per_k /= 2
chunk_size = 512
t0 = time()
chunks_sent = 0
delay_per_chunk = (ms_per_k / 1024.0) / 1024 * chunk_size
while True:
chunk = in_file.read(chunk_size)
if not chunk:
break
chunks_sent += 1
needed_sleep_time = chunks_sent * delay_per_chunk - (time() - t0)
if needed_sleep_time > 0:
sleep(needed_sleep_time)
out_file.write(chunk)
| 16,042
|
def test_logsources_type(self):
"""
Comprobacion de que el tipo de la fuente de seguridad coincide con su asociado
Returns:
"""
log_source = LogSources.objects.get(Type="Iptables")
self.assertEqual(log_source.get_type(), "Iptables")
| 16,043
|
def day_1_puzzle_1_solution() -> int:
"""Use this function to return the total fuel requirements for all of the modules.
This function is used for reading the text file of puzzle data and returning the
total amount of fuel that is required for the modules.
:return: the total fuel requirement.
"""
return sum([calculate_fuel(int(mass)) for mass in get_puzzle_input()])
| 16,044
|
def printc(*args, **kwargs):
""" Analog to the print() function, but accepts Color objects to change colors
Any Color objects will cause the output color to change for subsequent text.
Other objects will be printed as usual.
end is always printed without color, this avoids common problems if the trailing
return is printed with color attributes.
If color is off, the call is equivalent to
print(*[s for s in args if type(s) is not Color], **kwargs)
"""
file = kwargs.get('file', _sys.stdout)
use = willPrintColor(file)
if not use:
# strip all color objects and print
ss = [s for s in args if type(s) is not Color]
print(*ss, **kwargs)
return
sep0 = str(kwargs.get('sep', ' '))
end = str(kwargs.get('end', '\n'))
try:
if _need_flush: file.flush()
sep = None
for s in args:
if type(s) is Color:
_set_color(s, file)
else:
# handle separators. Colors do not trigger
# separators
if sep is not None:
_print_el(file, sep)
sep = None
_print_el(file, str(s))
sep = sep0
finally:
_set_color(C_RESET, file)
_print_el(file, end)
| 16,045
|
def is_description_style(style):
""" True if this is a style used for Relationships paragraph text """
return is_style(style, 'Normal') or is_style(style, 'Note')
| 16,046
|
def inject_content_head_last(html, content):
"""
将文本内容插入到head的尾部
:type html: str
:type content: str
:rtype: str
"""
head_end_pos = html.find("</head") # 找到 </head> 标签结束的位置
if head_end_pos == -1:
# 如果没有 </head> 就不进行插入
return html
return html[:head_end_pos] + content + html[head_end_pos:]
| 16,047
|
def download(ticker: str,
start: Union[pd.Timestamp, str] = None,
end: Union[pd.Timestamp, str] = None,
frequency: str = "day") -> pd.DataFrame:
"""
Download market data from yahoo finance using the yfinance library from ticker `ticker` from `start` to `end`
at a specific frequency (day, hour or minute).
:param str ticker: Ticker, e.g. "AAPL" or "GOOG".
:param pd.Timestamp,str start: Starting date for fetching the data as a pd.Timestamp or a "YYYY-MM-DD HH:MM:SS" str.
If None, the oldest possible date is used by yfinance. `start` is **always** truncated to max 730 days
from today for `frequency="1h"`and to max 30 days for `frequency="1m"`. Default is None.
:param pd.Timestamp,str end: End date for fetching the data as a pd.Timestamp or a "YYYY-MM-DD HH:MM:SS" str.
If None, today is used ( `pd.Timestamp.today().floor("D")` ). Default is None.
:param str frequency: Frequency at which the data is sampled, can be daily ("day", "daily", "d", "1d"), hourly
("hour", "hourly", "h", "1h") or every minute ("minute", "m", "1m"). Default is "day".
:return: market data as a pd.DataFrame with columns "Open", "High", "Low", "Close", "Adj Close", "Volume".
"""
today = pd.Timestamp.today().floor('D')
if end is None:
end = today
elif isinstance(end, str):
end = pd.Timestamp(end)
day_set = {"day", "daily", "d", "1d"}
hour_set = {"hour", "hourly", "h", "1h"}
minute_set = {"minute", "m", "1m"}
if frequency.lower() in day_set:
df = yf.download(ticker, start=start, end=end, interval="1d")
elif frequency.lower() in hour_set.union(minute_set):
if frequency.lower in hour_set:
frequency = "1h"
# Range is limited to 730 days max (including today so 729)
limit = pd.Timedelta(days=729)
# Dummy limit for the download
batchlimit = pd.Timedelta(days=1000)
else:
frequency = "1m"
# Range is limited to 30 days max (including today)
limit = pd.Timedelta(days=29)
# Limit of 7 days for the download of minute data
batchlimit = pd.Timedelta(days=7)
# Check the start point
if start is None:
start = today - limit
start = max(end - limit, today - limit)
# Download by batches (effective only for minute data)
local_start = start
local_end = min(local_start + batchlimit, end)
df = yf.download(ticker, start=local_start, end=local_end, interval=frequency)
while local_end < end:
local_start = local_end
local_end = min(local_start + batchlimit, end)
df = pd.concat((df, yf.download(ticker, start=local_start, end=local_end, interval=frequency)))
else:
raise ValueError(f"Wrong `frequency` argument ({frequency}). "
f"Should be in {day_set}, {hour_set} or {minute_set}.")
if df is None:
raise EmptyDataError
elif not isinstance(df, pd.DataFrame):
raise EmptyDataError
else:
if len(df) == 0:
raise EmptyDataError
if df.columns.nlevels == 2:
df = df.swaplevel(axis=1)
df.sort_index(axis=1, inplace=True)
return df
| 16,048
|
def sortUrlList(urlList):
"""Return ordered url list (localFile, DAP, HTTP, FTP)."""
#localList = [url for url in urlList if os.path.exists(url)]
#dodsList = [url for url in urlList if sciflo.utils.isDODS(url)]
#httpList = [url for url in urlList if not sciflo.utils.isDODS(url) and url.startswith('http')]
#ftpList = [url for url in urlList if url.startswith('ftp')]
#localList.extend(dodsList); localList.extend(httpList); localList.extend(ftpList)
fileUrlList = []
localList = []
dodsList = []
httpList = []
ftpList = []
allList = []
for url in urlList:
if isinstance(url, types.StringTypes) and '.xfr' in url: continue
if os.path.exists(url): localList.insert(0,url)
elif url.startswith('file://'): fileUrlList.insert(0, url)
elif url.startswith('http') and re.search(r'(dods|opendap)',url,re.IGNORECASE): dodsList.insert(0,url)
elif url.startswith('http'):
if '.ecs.nasa.gov' in url: httpList.insert(0,url)
else: httpList.append(url)
else: ftpList.append(url)
localList.sort(); localList.reverse()
#allList.extend(dodsList); allList.extend(ftpList); allList.extend(httpList)
#allList.extend(localList); allList.extend(fileUrlList)
allList.extend(ftpList); allList.extend(httpList); allList.extend(dodsList)
allList.extend(localList); allList.extend(fileUrlList)
return allList
| 16,049
|
def main(args):
""" Main method
"""
# await/async requires python >= 3.5
if sys.version_info.major < 3 and sys.version_info.minor < 5:
print("Error, language features require the latest python version.")
print("Please install python 3.8 or greater")
return 1
# Force tieried compilation off. It will effect both collection and replay
os.environ["COMPlus_TieredCompilation"] = "0"
coreclr_args = setup_args(args)
success = True
if coreclr_args.mode == "collect":
# Start a new SuperPMI Collection.
begin_time = datetime.datetime.now()
print("SuperPMI Collect")
print("------------------------------------------------------------")
print("Start time: {}".format(begin_time.strftime("%H:%M:%S")))
collection = SuperPMICollect(coreclr_args)
success = collection.collect()
print("Finished SuperPMI collect")
if coreclr_args.output_mch_path != None:
print("mch path: {}".format(coreclr_args.output_mch_path))
end_time = datetime.datetime.now()
print("Finish time: {}".format(end_time.strftime("%H:%M:%S")))
elif coreclr_args.mode == "replay":
# Start a new SuperPMI Replay
begin_time = datetime.datetime.now()
print("SuperPMI Replay")
print("------------------------------------------------------------")
print("Start time: {}".format(begin_time.strftime("%H:%M:%S")))
mch_file = coreclr_args.mch_file
jit_path = coreclr_args.jit_path
print("")
print("MCH Path: {}".format(mch_file))
print("JIT Path: {}".format(jit_path))
replay = SuperPMIReplay(coreclr_args, mch_file, jit_path)
success = replay.replay()
print("Finished SuperPMI replay")
end_time = datetime.datetime.now()
print("Finish time: {}".format(end_time.strftime("%H:%M:%S")))
elif coreclr_args.mode == "asmdiffs":
# Start a new SuperPMI Replay with AsmDiffs
begin_time = datetime.datetime.now()
print("SuperPMI ASM diffs")
print("------------------------------------------------------------")
print("Start time: {}".format(begin_time.strftime("%H:%M:%S")))
mch_file = coreclr_args.mch_file
base_jit_path = coreclr_args.base_jit_path
diff_jit_path = coreclr_args.diff_jit_path
print("")
print("MCH Path: {}".format(mch_file))
print("Base JIT Path: {}".format(base_jit_path))
print("Diff JIT Path: {}".format(diff_jit_path))
asm_diffs = SuperPMIReplayAsmDiffs(coreclr_args, mch_file, base_jit_path, diff_jit_path)
success = asm_diffs.replay_with_asm_diffs(coreclr_args.previous_temp_location)
print("Finished SuperPMI replay")
end_time = datetime.datetime.now()
print("Finish time: {}".format(end_time.strftime("%H:%M:%S")))
elif coreclr_args.mode == "upload":
begin_time = datetime.datetime.now()
print("SuperPMI upload")
print("------------------------------------------------------------")
print("Start time: {}".format(begin_time.strftime("%H:%M:%S")))
upload_mch(coreclr_args)
print("Finished SuperPMI upload")
end_time = datetime.datetime.now()
print("Finish time: {}".format(end_time.strftime("%H:%M:%S")))
elif coreclr_args.mode == "list-collections":
index = download_index(coreclr_args)
index_count = len(index)
print("SuperPMI list-collections")
print("")
print("{} different collections".format(index_count))
print("")
for item in index:
print(item)
print("")
else:
raise NotImplementedError(coreclr_args.mode)
return 0 if success else 1
| 16,050
|
def _ps_run_one_reset_kwargs(G, reset_kwargs: tuple, eval: bool):
"""
Sample one rollout with given init state and domain parameters, passed as a tuple for simplicity at the other end.
This function is used when a minimum number of rollouts was given.
"""
if len(reset_kwargs) != 2:
raise pyrado.ShapeErr(given=reset_kwargs, expected_match=(2,))
if not isinstance(reset_kwargs[0], np.ndarray):
raise pyrado.TypeErr(given=reset_kwargs[0], expected_type=np.ndarray)
if not isinstance(reset_kwargs[1], dict):
raise pyrado.TypeErr(given=reset_kwargs[1], expected_type=dict)
return rollout(
G.env, G.agent, eval=eval, reset_kwargs=dict(init_state=reset_kwargs[0], domain_param=reset_kwargs[1])
)
| 16,051
|
def k8s_config(monkeypatch):
"""Configure k8s for test-runs"""
monkeypatch.setattr(config, "api_server", "http://localhost:8080")
monkeypatch.setattr(config, "verify_ssl", False)
| 16,052
|
def test_compiling_a_sequence_not_compiling2(workspace, root, monkeypatch,
exopy_qtbot, dialog_sleep):
"""Test compiling a sequence that can be evaluated but not compiled.
"""
def __raise(*args, **kwargs):
return False, {}, {'test': False}
from exopy_pulses.testing.context import TestContext
monkeypatch.setattr(TestContext, 'compile_and_transfer_sequence',
__raise)
workbench = workspace.workbench
ui = workbench.get_plugin('enaml.workbench.ui')
ui.show_window()
exopy_qtbot.wait(10 + dialog_sleep)
root.external_vars = OrderedDict({'a': 1.5})
pulse1 = Pulse(def_1='1.0', def_2='{a}')
pulse2 = Pulse(def_1='{a} + 1.0', def_2='3.0')
pulse3 = Pulse(def_1='{4_start} + 0.5',
def_2='{4_start}+{4_duration}-0.5')
pulse4 = Pulse(def_1='2.0', def_2='0.5', def_mode='Start/Duration')
pulse5 = Pulse(def_1='3.0', def_2='0.5', def_mode='Start/Duration')
sequence2 = BaseSequence(time_constrained=True,
def_1='{3_stop} + 0.5', def_2='6')
sequence2.add_child_item(0, pulse3)
sequence1 = BaseSequence()
add_children(sequence1, (pulse2, sequence2, pulse4))
add_children(root, (pulse1, sequence1, pulse5))
workspace.state.sequence = root
dial = CompileDialog(workspace=workspace)
dial.show()
wait_for_window_displayed(exopy_qtbot, dial)
comp_widget = dial.central_widget().widgets()[0]
comp_widget.widgets()[-1].clicked = True
def assert_exec():
assert comp_widget.elapsed_time
assert comp_widget.errors
assert comp_widget.widgets()[-2].background == parse_color('red')
exopy_qtbot.wait_until(assert_exec)
| 16,053
|
def deprecated(message, exception=PendingDeprecationWarning):
"""Throw a warning when a function/method will be soon deprecated
Supports passing a ``message`` and an ``exception`` class
(uses ``PendingDeprecationWarning`` by default). This is useful if you
want to alternatively pass a ``DeprecationWarning`` exception for already
deprecated functions/methods.
Example::
>>> import warnings
>>> from functools import wraps
>>> message = "this function will be deprecated in the near future"
>>> @deprecated(message)
... def foo(n):
... return n+n
>>> with warnings.catch_warnings(record=True) as w:
... warnings.simplefilter("always")
... foo(4)
... assert len(w) == 1
... assert issubclass(w[-1].category, PendingDeprecationWarning)
... assert message == str(w[-1].message)
... assert foo.__name__ == 'foo'
8
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
warnings.warn(message, exception, stacklevel=2)
return func(*args, **kwargs)
return wrapper
return decorator
| 16,054
|
def check(config, content, filename):
"""
Run flake8 with the given ``config`` against the passed file.
Returns a ``list`` of :py:class:`flake.Violation`.
"""
with environment(config, content, filename) as env:
out = subprocess.check_output(['flake8',
'--exit-zero',
'--config',
env.config_filename,
'--format',
FLAKE8_REPORT_FORMAT,
env.filename],
universal_newlines=True)
return parse(out)
| 16,055
|
def x_dot(y):
"""x_dot(y)
Describes the differential equation for position as given in CW 12.
"""
return y
| 16,056
|
def get_comp_depends(comp_info, comps):
""" Get comp depends from comp index """
depends = []
for comp in comps:
if comp in comp_info:
depends += comp_info[comp]["dependencies"]
if depends:
depends += get_comp_depends(comp_info, depends)
return list(set(depends))
| 16,057
|
def get_naiveb_model(x_train: pd.DataFrame, y_train: pd.Series) -> GaussianNB:
"""
Trains and returns a naive Bayes model
Data must all be on the same scale in order to use naive Bayes
"""
gnb = GaussianNB(priors=None)
gnb.fit(x_train, y_train)
return gnb
| 16,058
|
def bakeClip(blend: Tuple[int, int] = tuple(1, 1),clipIndex: int = 1,keepOriginals: bool = False,name: str = "") -> None:
"""
クリップをベイク処理し、単一のクリップにブレンドするのに使用します。
-----------------------------------------
Flags:
-----------------------------------------
blend ([uint, uint]): ブレンドされるクリップのインデックスを指定します。
-----------------------------------------
clipIndex (uint): ベイク処理するクリップのインデックスを指定します。
-----------------------------------------
keepOriginals (boolean): Traxエディタ(TraxEditor)でオリジナルのクリップを維持し、バイザー(Visor)にマージされたクリップを配置します。既定ではマージされたクリップをスケジューリングし、バイザーでオリジナルのクリップを維持します。
-----------------------------------------
name (string): 作成する新しいクリップの名前を指定します。
-----------------------------------------
Return Value:
None: stringクリップ名
"""
pass
| 16,059
|
def updateDF(df, fields, id_patient):
"""
fields is a dictionary of column names and values.
The function updates the row of id_patient with the values in fields.
"""
for key in fields:
df.loc[df["id_patient"] == id_patient, key] = fields[key][0]
return df
| 16,060
|
def gcd(num1: int, num2: int) -> int:
"""Computes the greatest common divisor of integers a and b using
Euclid's Algorithm.
"""
while num2 != 0:
num1, num2 = num2, num1 % num2
return num1
| 16,061
|
def application(request):
"""
To use this application, the user must send a POST request with
base64 or form encoded encoded HTML content and the wkhtmltopdf Options in
request data, with keys 'base64_html' and 'options'.
The application will return a response with the PDF file.
"""
if request.method != 'POST':
return
hasHeader = False
hasFooter = False
images = []
request_is_json = request.content_type.endswith('json')
with tempfile.NamedTemporaryFile(suffix='.html') as footer_file:
with tempfile.NamedTemporaryFile(suffix='.html') as header_file:
with tempfile.NamedTemporaryFile(suffix='.html') as source_file:
if request_is_json:
# If a JSON payload is there, all data is in the payload
payload = json.loads(request.data)
source_file.write(payload['contents'].decode('base64'))
if payload.has_key('header'):
header_file.write(payload['header'].decode('base64'))
hasHeader = True
if payload.has_key('footer'):
footer_file.write(payload['footer'].decode('base64'))
hasFooter = True
if payload.has_key('images'):
for image in payload['images']:
if image.has_key('path') and image.has_key('contents'):
path = "/tmp/" +image['path']
if os.path.isdir(os.path.dirname(path))==False:
os.makedirs(os.path.dirname(path))
f = open(path, "w")
f.write(image['contents'].decode('base64'))
f.close()
images.append(path)
options = payload.get('options', {})
elif request.files:
# First check if any files were uploaded
source_file.write(request.files['file'].read())
# Load any options that may have been provided in options
options = json.loads(request.form.get('options', '{}'))
source_file.flush()
header_file.flush()
footer_file.flush()
# Evaluate argument to run with subprocess
args = ['wkhtmltopdf']
if hasHeader:
args.append('--header-html "file://%s"' % header_file.name)
if hasFooter:
args.append('--footer-html "file://%s"' % footer_file.name)
# Add Global Options
if options:
for option, value in options.items():
args.append('--%s' % option)
if value:
args.append('"%s"' % value)
# Add source file name and output file name
file_name = source_file.name
args += [file_name, file_name + ".pdf"]
# Execute the command using executor
execute(' '.join(args))
for image in images:
os.remove(image)
return Response(
wrap_file(request.environ, open(file_name + '.pdf')),
mimetype='application/pdf',
)
| 16,062
|
async def check_user_name(request):
"""Check if a user exists with provided username."""
log_request(request)
conn = await create_connection()
response = await users_query.users_search_duplicate(
conn, request.args.get("username")
)
conn.close()
return json({"exists": bool(response)})
| 16,063
|
def azip_longest(*aiterables, fillvalue=None):
"""async version of izip_longest with parallel iteration"""
return _azip(*aiterables, fillvalue=fillvalue, stop_any=False)
| 16,064
|
def stop_instance(args):
"""
Stops a running instance, shutting it down cleanly, and allows you to restart the instance at a later time
parameter: (string) zone
Name of the zone for request.
parameter: (string) instance
Name of the instance scoping this request.
"""
project = SERVICE_ACT_PROJECT_ID
instance = args.get('instance')
zone = args.get('zone')
request = compute.instances().stop(project=project, zone=zone, instance=instance)
response = request.execute()
data_res = {
'status': response.get('status'),
'kind': response.get('kind'),
'name': response.get('name'),
'id': response.get('id'),
'progress': response.get('progress'),
'operationType': response.get('operationType'),
}
ec = {'GoogleCloudCompute.Operations(val.id === obj.id)': response}
return_outputs(
tableToMarkdown('Google Cloud Compute Operations', data_res, removeNull=True),
ec,
response,
)
| 16,065
|
def drop_table(name, con):
"""
drop table from database
Parameters
----------
name : string, name of SQL table
con : sqlalchemy.engine.Engine or sqlite3.Connection
Returns
-------
True
Examples
--------
>>> import pandas as pd
>>> from sqlalchemy import create_engine
>>> from tidyframe import drop_table
>>>
>>> engine = create_engine("sqlite:///raw_table.db")
>>> df = pd.DataFrame([{"a": 1, "b": 2}, {"a": 1, "b": 2}])
>>> df.to_sql("raw_table", engine)
>>> drop_table("raw_table", engine)
True
"""
table = load_table_schema(name, con)
table.drop()
return True
| 16,066
|
def class_info_interface(**class_name):
"""
Set Class_Name, Class_Index, and DNN Model
\nclass_name (kwargs) : Input Class Name with list type,
if want to set class number, add tuple parameters
like 'class_info_interface(class_name = [list], class_number = [list])'
\nclass_number : Default the number of class_name
"""
global window
window = Tk()
window.title("Auto Labeling Input Class Name")
global entry_num
global entry_name
global entry_model
# 1. DNN Model Interface
ttk.Label(window, text = "DNN Model : ").grid(row = 0,
column = 0,
padx = 10,
pady = 10)
entry_model = ttk.Entry(window)
entry_model.grid(row = 0,
column = 1,
padx = 10,
pady = 10)
# 2. Class name Interface
ttk.Label(window, text = "Class name : ").grid(row = 1,
column = 0,
padx = 10,
pady = 10)
entry_name = ttk.Entry(window)
entry_name.grid(row = 1,
column = 1,
padx = 10,
pady = 10)
# 3. Class number Interface
ttk.Label(window, text = "Class number : ").grid(row = 2,
column = 0,
padx = 10,
pady = 10)
entry_num = ttk.Entry(window)
entry_num.grid(row = 2,
column = 1,
padx = 10,
pady = 10)
ttk.Button(window, text="OK", command=get_class_info).grid(row = 2,
column = 2,
padx = 10,
pady = 10)
# 4. User Name Guide Interface
if not class_name or class_name.__len__() is 0: # isEmpty == True
ttk.Label(window, text = "Username \n\n" +
"professor_seo \n" +
"jaeseok \n" +
"hun \n").grid(row = 3,
column = 1,
padx = 10,
pady = 10)
ttk.Label(window, text = "| Class Name\n\n" +
"| 0\n| 1\n| 2\n").grid(row = 3,
column = 2,
padx = 10,
pady = 10)
elif len(class_name) is not 0: # tuple variable
class_name_list = list()
for key, value in class_name.items():
print(key, value)
class_name_list.append(value)
# Class Name [0]
name_value = str()
index_value = str()
for i, name in enumerate(class_name_list[0]):
name_value = name_value + name + ' \n'
index_value = index_value + '| ' + str(i) + ' \n'
ttk.Label(window, text = "Username \n\n" +
name_value).grid(row = 3,
column = 1,
padx = 10,
pady = 10)
# Class Index [1]
if len(class_name) == 2:
index_value = str()
for index in class_name_list[1]:
index_value = index_value + '|' + \
str(index) + ' \n'
ttk.Label(window, text = "| Class Name\n\n" +
index_value).grid(row = 3,
column = 2,
padx = 10,
pady = 10)
print("list")
else:
raise ValueError("Not Supported value. See function docstring")
window.mainloop()
return user_name, user_num, dnn_model
| 16,067
|
def uint8(value):
"""
Create an SPL ``uint8`` value.
Returns:
Expression: Expression representing the value.
"""
return streamsx.spl.op.Expression('UINT8', int(value))
| 16,068
|
def normalize_data(x_train, x_test, x_val=None):
"""normalize input to zero mean one std assuming x_train, x_test are torch Tensors"""
m = x_train.mean(0)
s = x_train.std(0)
x_train -= m
x_test -= m
if x_val: x_val -= m
cols = []
cols0 = []
for i in range(s.size(0)):
if s[i] > 1e-9:
cols.append(i)
else:
cols0.append(i)
if not cols0: # no column has near zero std
x_train /= s
x_test /= s
if x_val: x_val /= s
elif cols: # some columns have near zero std
x_train[:, cols] /= s[cols]
x_test[:, cols] /= s[cols]
if x_val: x_val[:, cols] /= s[cols]
if cols0: # for columns with std ~ zero we just squash them
if x_val:
squash_data(x_train[:, cols0], x_test[:, cols0], x_val[:, cols0])
else:
squash_data(x_train[:, cols0], x_test[:, cols0])
| 16,069
|
def print_table(log_results,
platform_width = 0,
build_failures_width = 0,
test_failures_width = 0,
successful_width = 0,
space_char = " ",
list_separator = DEFAULT_LIST_SEPARATOR):
"""Print out a table in the requested format (text or markdown)."""
# Print table header
output_lines = list()
headers = [
re.sub(r'\b \b', space_char, PLATFORM_HEADER.ljust(platform_width)),
re.sub(r'\b \b', space_char,BUILD_FAILURES_HEADER.ljust(build_failures_width)),
re.sub(r'\b \b', space_char,TEST_FAILURES_HEADER.ljust(test_failures_width))
] + (
[re.sub(r'\b \b', space_char,SUCCESSFUL_TESTS_HEADER.ljust(successful_width))]
if FLAGS.include_successful else []
)
# Print header line.
output_lines.append(("|" + " %s |" * len(headers)) % tuple(headers))
# Print a |-------|-------|---------| line.
output_lines.append(("|" + "-%s-|" * len(headers)) %
tuple([ re.sub("[^|]","-", header) for header in headers ]))
# Iterate through platforms and print out table lines.
for platform in sorted(log_results.keys()):
if log_results[platform]["build_failures"] or log_results[platform]["test_failures"] or FLAGS.include_successful:
columns = [
re.sub(r'\b \b', space_char, platform.ljust(platform_width)),
format_result(log_results[platform]["build_failures"], justify=build_failures_width, list_separator=list_separator),
format_result(log_results[platform]["test_failures"], justify=test_failures_width, list_separator=list_separator),
] + (
[format_result(log_results[platform]["successful"], justify=successful_width, list_separator=list_separator)]
if FLAGS.include_successful else []
)
output_lines.append(("|" + " %s |" * len(headers)) % tuple(columns))
return output_lines
| 16,070
|
def test_two_tags_unshared_images(default_tag_policy, initialized_db):
"""
Repository has two tags with no shared images between them.
"""
with assert_gc_integrity():
repository = create_repository(latest=["i1", "i2", "i3"], other=["f1", "f2"])
delete_tag(repository, "latest")
assert_deleted(repository, "i1", "i2", "i3")
assert_not_deleted(repository, "f1", "f2")
| 16,071
|
def is_valid(filepath, digest, hashAlgo='md5'):
"""Verify the integrity of a file against a hash value."""
assert(isinstance(digest, str))
res = calculate(filepath, hashAlgo)
LOG.debug('Calculated digest: '+res)
LOG.debug(' Original digest: '+digest)
return res is not None and res == digest
| 16,072
|
def align_reconstruction_to_pdr(reconstruction, data):
"""
leveling and scaling the reconstructions to pdr
"""
if reconstruction.alignment.aligned:
return reconstruction
if not data.pdr_shots_exist():
return reconstruction
pdr_shots_dict = data.load_pdr_shots()
X, Xp = [], []
onplane, verticals = [], []
for shot_id in reconstruction.shots.keys():
X.append(reconstruction.shots[shot_id].pose.get_origin())
Xp.append(pdr_shots_dict[shot_id][0:3])
R = reconstruction.shots[shot_id].pose.get_rotation_matrix()
onplane.append(R[0,:])
onplane.append(R[2,:])
verticals.append(R[1,:])
X = np.array(X)
Xp = np.array(Xp)
# Estimate ground plane.
p = multiview.fit_plane(X - X.mean(axis=0), onplane, verticals)
Rplane = multiview.plane_horizontalling_rotation(p)
X = Rplane.dot(X.T).T
# Estimate 2d similarity to align to pdr predictions
T = tf.affine_matrix_from_points(X.T[:2], Xp.T[:2], shear=False)
s = np.linalg.det(T[:2, :2]) ** 0.5
A = np.eye(3)
A[:2, :2] = T[:2, :2] / s
A = A.dot(Rplane)
b = np.array([
T[0, 2],
T[1, 2],
Xp[:, 2].mean() - s * X[:, 2].mean() # vertical alignment
])
# Align points.
for point in reconstruction.points.values():
p = s * A.dot(point.coordinates) + b
point.coordinates = p.tolist()
# Align cameras.
for shot in reconstruction.shots.values():
R = shot.pose.get_rotation_matrix()
t = np.array(shot.pose.translation)
Rp = R.dot(A.T)
tp = -Rp.dot(b) + s * t
try:
shot.pose.set_rotation_matrix(Rp)
shot.pose.translation = list(tp)
except:
logger.debug("unable to transform reconstruction!")
return reconstruction
| 16,073
|
def test_validation_check_service_account_removed(
invalid_service_account_not_exist,
valid_google_project_patcher,
db_session,
cloud_manager,
):
"""
Test that an invalid service account whose policy does not exist is
removed from the database
"""
(
fence.scripting.google_monitor._get_user_email_list_from_google_project_with_owner_role
) = MagicMock()
(
fence.scripting.google_monitor._send_emails_informing_service_account_removal
) = MagicMock()
(fence.scripting.google_monitor._get_service_account_removal_reasons) = MagicMock()
validation_check(db=None)
assert (
fence.scripting.google_monitor._send_emails_informing_service_account_removal.call_count
== 1
)
assert (
db_session.query(UserServiceAccount)
.filter_by(email=invalid_service_account_not_exist["service_account"].email)
.count()
) == 0
| 16,074
|
def checkLengthSmaller(op, graph, frm, to):
"""
Confirm resulting video has less frames that source.
:param op:
:param graph:
:param frm:
:param to:
:return:
@type op: Operation
@type graph: ImageGraph
@type frm: str
@type to: str
"""
edge = graph.get_edge(frm, to)
durationChangeTuple = getValue(edge, 'metadatadiff.video.nb_frames')
if durationChangeTuple is None or \
(durationChangeTuple[0] == 'change' and int(durationChangeTuple[1]) < int(durationChangeTuple[2])):
return (Severity.ERROR,"Length of video is not shorter")
| 16,075
|
def enable_log(fmt='[%(asctime)s] [%(process)5s] %(levelname)s %(module)s %(name)s %(message)s',
enable_color=True, filename=None):
"""
Clears all log handlers, and adds color handler and/or file handlers
:param fmt: logging format string
:param enable_color: True to enable
:param filename: log file location
:return: Logger object
"""
lgr = logging.getLogger()
lgr.handlers.clear()
# if there's no special requirements for logging
# we still want the formatting.
if not enable_color and \
filename is None and \
filename != '':
loghandler = logging.StreamHandler()
logfmt = logging.Formatter(fmt)
loghandler.setFormatter(logfmt)
lgr.addHandler(loghandler)
return True
if enable_color:
loghandler = logging.StreamHandler()
logfmt = ColorLogFormatter(fmt)
loghandler.setFormatter(logfmt)
lgr.addHandler(loghandler)
if filename is not None and filename != '':
logfilename = abspath(filename)
fhandler = logging.FileHandler(logfilename)
logfmt = logging.Formatter(fmt)
fhandler.setFormatter(logfmt)
lgr.addHandler(fhandler)
return True
| 16,076
|
def build_con_and_ds(dataset: str):
"""
Builds test connector and test datasource for testing with API key
Leave this function in if ever want to run tests without skipping
due to there being no Bearer tokens
How to use:
Replace build_ds function with this one in test_aircall file
Be sure to also replace the endpoints inside the aircall connector file
"""
con = AircallConnector(name='mah_test', bearer_auth_id='abc123efg')
ds = AircallDataSource(name='mah_ds', domain='test_domain', dataset=dataset, limit=1,)
return con, ds
| 16,077
|
def _get_search_str_regex_main_body(join_with, last_date):
"""Returns something like:
(t1[0-5]\d\d\d\d|t160[0-2]\d\d|t16030\d|t16031[0-3])"""
todo_date = _get_todo_date(last_date + timedelta(1))
# yrs = _make_last_digit_all_values_less_last_digit(todo_date[:3])
# search_substrs = [yrs[-1]] #Only go back to the previous year
search_substrs = []
for i in range(2, 7):
regexed_date_i = _make_given_digit_all_values_less_than_current_val_regex(todo_date, i)
if regexed_date_i is not None:
search_substrs.append(regexed_date_i)
# search_substrs.append(todo_date)
search_str = join_with.join(search_substrs)
search_str = "(%s)" % search_str
return search_str
| 16,078
|
def _remove_existing_tmp_file(tmp_file):
"""Make sure the temporary file is removed."""
if os.path.isfile(tmp_file):
os.remove(tmp_file)
| 16,079
|
def test_misc():
"""Generic tests for exceptional cases that the parser needs to take into
consideration.
"""
# The atomic number is given in the NAT convention
filepath = "tests/data/misc/nat/HfS2_PBE0D3_ZD_fc3_supercell-00497.o"
archive = parse(filepath)
asserts_basic(archive)
asserts_basic_code_specific(archive)
system = archive.section_run[0].section_system[0]
assert set(system.atom_species) == set((16, 72))
# Tests that ghost atoms are ignored in the system. Maybe they need their
# own metainfo?
filepath = "tests/data/misc/ghosts/fevo46_sngt_ti_zero.cryst.out"
archive = parse(filepath)
asserts_basic(archive)
asserts_basic_code_specific(archive)
system = archive.section_run[0].section_system[0]
assert set(system.atom_species) == set((8, 26, 22, 38))
# Tests that substitutions are handled correctly
filepath = "tests/data/misc/substitution/neutral.cryst.out"
archive = parse(filepath)
asserts_basic(archive)
asserts_basic_code_specific(archive)
system = archive.section_run[0].section_system[0]
assert set(system.atom_species) == set((8, 26, 22, 38))
# Geometry optimization with constraints
filepath = "tests/data/misc/constraints/ionic1_fullspin_spinfx_2.cryst.out"
archive = parse(filepath)
asserts_basic(archive)
asserts_basic_code_specific(archive)
# Displacement of atoms
filepath = "tests/data/misc/displacement/fe50_x8_l0_re.cryst.out"
archive = parse(filepath)
asserts_basic(archive)
asserts_basic_code_specific(archive)
| 16,080
|
def is_palindrome_recursive(text, left=None, right=None):
"""time complexity: O(1) because you are checking which conditional will run, which does not involve any loops
text = str
left = int
right = int"""
if len(text) == 0:
return True
given = get_letters(text)
if left is None and right is None:
left = 0
right = len(str) - 1
if given[left] != given[right]:
return False
elif left >= right:
return True
else:
return is_palindrome_recursive(given, left+1, right-1)
| 16,081
|
def bcSet1():
"""
set boundary condition
"""
| 16,082
|
def folder_command(args):
"""
Edit the folder structure of the todo list.
usage: todone folder <command> <folder(s)>
Valid commands are:
new create a new folder with the given name
rename rename an existing folder
delete remove a folder
list list all folders
"""
parsed_args = parse_args(args)
command = parsed_args['command']
folders = parsed_args['folders']
if len(folders) < MIN_FOLDERS[command]:
raise pe.ArgumentError(
'Not enough folders provided (expected {})'.format(
MIN_FOLDERS[command]
)
)
elif len(folders) > MAX_FOLDERS[command]:
raise pe.ArgumentError(
'Too many folders provided'
)
FOLDER_DISPATCH[command](*folders)
print(COMMAND_MESSAGE[command].format(*folders))
| 16,083
|
def flip(m, axis=None):
"""Reverses the order of elements in an array along the given axis.
The shape of the array is preserved, but the elements are reordered.
Parameters
----------
m : array_like
Input array.
axis : None or int or tuple of ints, optional
Axis or axes along which to flip over. The default, axis=None, will flip over
all of the axes of the input array. If axis is negative it counts from the
last to the first axis.
If axis is a tuple of ints, flipping is performed on all of the axes
specified in the tuple.
Returns
-------
out : ndarray
A view of m with the entries of axis reversed. Since a view is returned, this
operation is done in constant time.
Note
----
flip(m, 0) is equivalent to flipud(m).
flip(m, 1) is equivalent to fliplr(m).
flip(m, n) corresponds to ``m[...,::-1,...]`` with ``::-1`` at position n.
flip(m) corresponds to ``m[::-1,::-1,...,::-1]`` with ``::-1`` at all positions.
flip(m, (0, 1)) corresponds to ``m[::-1,::-1,...]`` with ``::-1`` at position 0 and
position 1.
See Also
--------
flipud : Flips array in the up/down direction.
fliplr : Flips array in the left/right direction.
Examples
--------
>>> import nlcpy as vp
>>> A = vp.arange(8).reshape((2,2,2))
>>> A
array([[[0, 1],
[2, 3]],
<BLANKLINE>
[[4, 5],
[6, 7]]])
>>> vp.flip(A, 0)
array([[[4, 5],
[6, 7]],
<BLANKLINE>
[[0, 1],
[2, 3]]])
>>> vp.flip(A, 1)
array([[[2, 3],
[0, 1]],
<BLANKLINE>
[[6, 7],
[4, 5]]])
>>> vp.flip(A)
array([[[7, 6],
[5, 4]],
<BLANKLINE>
[[3, 2],
[1, 0]]])
>>> vp.flip(A, (0, 2))
array([[[5, 4],
[7, 6]],
<BLANKLINE>
[[1, 0],
[3, 2]]])
>>> A = vp.random.randn(3, 4, 5)
>>> vp.all(vp.flip(A, 2) == A[:, :, ::-1, ...])
array(True)
"""
m = nlcpy.asanyarray(m)
if axis is None:
indexer = (slice(None, None, -1),) * m.ndim
else:
if type(axis) is nlcpy.ndarray:
axis = axis.get()
if type(axis) not in (tuple, list):
try:
axis = [operator.index(axis)]
except TypeError:
pass
_axis = []
for ax in axis:
if type(ax) is nlcpy.ndarray:
ax = ax.get()
if type(ax) is numpy.ndarray:
if ax.size > 1:
raise TypeError(
'only size-1 arrays can be converted to Python scalars')
else:
ax = ax.item()
_axis.append(ax + m.ndim if ax < 0 else ax)
axis = _axis
if len(axis) != len(set(axis)):
raise ValueError('repeated axis')
indexer = [slice(None) for i in range(m.ndim)]
for ax in axis:
if ax >= m.ndim or ax < 0:
raise AxisError(
'axis {0} is out of bounds for array of dimension {1}'
.format(ax, m.ndim))
indexer[ax] = slice(None, None, -1)
indexer = tuple(indexer)
return m[indexer]
| 16,084
|
def radius_of_gyration(pos):
"""
Radius of gyration of a group of positions.
Does not account for periodic boundaries.
"""
com = np.mean(pos, axis = 0)
delta = pos - com
rgv = np.sqrt(np.sum(delta**2, axis = 0) / len(pos))
return np.linalg.norm(rgv)
| 16,085
|
def get_metadata_for_list(commit_range, git_dir=None, count=None,
series=None, allow_overwrite=False):
"""Reads out patch series metadata from the commits
This does a 'git log' on the relevant commits and pulls out the tags we
are interested in.
Args:
commit_range (str): Range of commits to count (e.g. 'HEAD..base')
git_dir (str): Path to git repositiory (None to use default)
count (int): Number of commits to list, or None for no limit
series (Series): Object to add information into. By default a new series
is started.
allow_overwrite (bool): Allow tags to overwrite an existing tag
Returns:
Series: Object containing information about the commits.
"""
if not series:
series = Series()
series.allow_overwrite = allow_overwrite
stdout = get_list(commit_range, git_dir, count)
pst = PatchStream(series, is_log=True)
for line in stdout.splitlines():
pst.process_line(line)
pst.finalise()
return series
| 16,086
|
def side_seperator(lsep,rsep):
"""
To have a custom side lined formatter.
A side-lined formatter is:
`[DATE] SEP "L_SEP" EVENT "R_SEP" LOG`
`loggy.side_seperator(lsep="||",rsep="||") # Default vals`
"""
fmt['ls']=lsep
fmt['rs']=rsep
return fmt
| 16,087
|
def clear():
"""Clear the terminal."""
os.system("cls" if os.name == "nt" else "clear")
| 16,088
|
def _semi_implicit_euler(ode_fun, jac_fun, y_olds, t_old,
f_old,dt, args, solver_parameters, J00, I):
"""
Calculate solution at t_old+dt using the semi-implicit Euler method.
Based on Section IV.9.25 of Ref II.
"""
y_older, y_old = y_olds
je_tot = 0
if(f_old is None):
f_yj = ode_fun(*(y_old, t_old)+args)
fe_tot = 1
else:
f_yj = f_old
fe_tot = 0
b = dt*f_yj
A = I-dt*J00
if(solver_parameters['initialGuess']):
# TODO: Using explicit Euler as a predictor doesn't seem to be
# effective (maybe because with extrapolation we are taking too big
# steps for the predictor be close to the solution).
# x0, f_yj, fe_tot_,je_tot=_explicit_euler(ode_fun, jac_fun,
# y_olds, t_old, f_yj, dt, args, solver_parameters)
# fe_tot += fe_tot_
x0 = y_old
else:
x0 = None
dy = linear_solve(A, b, iterative=solver_parameters['iterative'],
tol=solver_parameters['min_tol'], x0=x0)
y_new = y_old + dy
return (y_new, f_yj, fe_tot, je_tot)
| 16,089
|
def make_sequential(layer_configs, input):
"""Makes sequential layers automatically.
Arguments:
layer_configs: An OrderedDict that contains the configurations of a
sequence of layers. The key is the layer_name while the value is a dict
contains hyper-parameters needed to instantiate the corresponding
layer. The key of the inner dict is the name of the hyper-parameter and
the value is the value of the corresponding hyper-parameter. Note that
the key "layer_type" indicates the type of the layer.
input: A tensor that mimics the batch input of the model. The first dim
is the batch size. All other dims should be exactly the same as the
real input shape in the later training.
Returns:
A sequence of layers organized by nn.Sequential.
"""
layers = OrderedDict()
for layer_name in layer_configs:
arguments = deepcopy(layer_configs[layer_name])
layer_type = arguments.pop("layer_type")
input_shape = [int(j) for j in input.data.size()]
arguments["input_shape"] = input_shape
layers.update({layer_name: make_layer(layer_type, **arguments)})
input = layers[layer_name](input)
return nn.Sequential(layers)
| 16,090
|
def Pol_Dyn_ExploreWithNUTS(resultsList,totalSimDays=1000,numDaysRemain=1000,\
totalBudget=1000,numBudgetRemain=1000,policyParamList=[0],startDay=0):
"""
Grab intermediate and end node distribtuions via NUTS. Identify intermediate node
sample variances. Pick an intermediate node, weighed towards picking those
with higher sample variances. Pick an outlet from this intermediate node's
column in the transition matrix A, again by a weighting (where 0% nodes
have a non-zero probability of being selected). [log((p/1-p) + eps)?]
policyParamList = [number days to plan for, sensitivity, specificity, M,
Madapt, delta]
(Only enter the number of days to plan for in the main simulation code,
as the other parameters will be pulled from the respective input areas)
"""
#Initialize our output, a list with the above mentioned outputs
sampleSchedule = []
# How many days to plan for?
numDaysToSched = min(policyParamList[0],numDaysRemain)
usedBudgetSoFar = 0
firstTestDay = totalSimDays - numDaysRemain
if numDaysRemain == totalSimDays: # Our initial schedule should just be a distrubed exploration
currNode = resultsList[0][0]
for currDay in range(numDaysToSched):
numToTest = int(np.floor((numBudgetRemain-usedBudgetSoFar) / (numDaysRemain-currDay))) +\
min((numBudgetRemain-usedBudgetSoFar) % (numDaysRemain-currDay),1) # How many samples to conduct in the next day
for testInd in range(numToTest): # Iterate through our end nodes
if currNode > resultsList[len(resultsList)-1][0]:
currNode = resultsList[0][0]
sampleSchedule.append([firstTestDay+currDay,currNode])
currNode += 1
else:
sampleSchedule.append([firstTestDay+currDay,currNode])
currNode += 1
usedBudgetSoFar += 1
else: # Generate NUTS sample using current results and use it to generate a new schedule
ydata = []
nSamp = []
for rw in resultsList:
ydata.append(rw[2])
nSamp.append(rw[1])
A = simHelpers.GenerateTransitionMatrix(resultsList)
sens, spec, M, Madapt, delta = policyParamList[1:]
NUTSsamples = simEst.GenerateNUTSsamples(ydata,nSamp,A,sens,spec,M,Madapt,delta)
# Store sample variances for intermediate nodes
NUTSintVars = []
for intNode in range(A.shape[1]):
currVar = np.var(sps.expit(NUTSsamples[:,intNode]))
NUTSintVars.append(currVar)
# Normalize sum of all variances to 1
NUTSintVars = NUTSintVars/np.sum(NUTSintVars)
# Now pick from these samples to generate projections
for currDay in range(numDaysToSched):
numToTest = int(np.floor((numBudgetRemain-usedBudgetSoFar) / (numDaysRemain-currDay))) +\
min((numBudgetRemain-usedBudgetSoFar) % (numDaysRemain-currDay),1) # How many samples to conduct in the next day
for testInd in range(numToTest):
# Pick an intermediate node to "target", with more emphasis on higher sample variances
rUnif = random.uniform(0,1)
for intInd in range(A.shape[1]):
if rUnif < np.sum(NUTSintVars[0:(intInd+1)]):
targIntInd = intInd
break
# Go through the same process with the column of A
# pertaining to this target intermediate node
AtargCol = [row[targIntInd] for row in A]
# Add a small epsilon, for 0 values, and normalize
AtargCol = np.add(AtargCol,1e-3)
AtargCol = AtargCol/np.sum(AtargCol)
rUnif = random.uniform(0,1)
for intEnd in range(A.shape[0]):
if rUnif < np.sum(AtargCol[0:(intEnd+1)]):
currInd = intEnd
break
currNode = resultsList[currInd][0]
sampleSchedule.append([firstTestDay+currDay,currNode])
usedBudgetSoFar += 1
# Need to sort this list before passing it through
sampleSchedule.sort(key=lambda x: x[0])
return sampleSchedule
| 16,091
|
def calc_cos_t(hb_ratio, d, theta_s_i, theta_v_i, relative_azimuth):
"""Calculate t cossine.
Args:
hb_ratio (int): h/b.
d (numpy array): d.
theta_s_i (numpy array): theta_s_i.
theta_v_i (numpy array): theta_v_i.
relative_azimuth (numpy array): relative_azimuth.
Returns:
cos_t : numpy.array.
"""
return hb_ratio * numpy.sqrt(d*d + numpy.power(numpy.tan(theta_s_i)*numpy.tan(theta_v_i)*numpy.sin(relative_azimuth), 2)) / (sec(theta_s_i) + sec(theta_v_i))
| 16,092
|
def step_impl(context):
"""Go through responses and store any with HTTP protocol errors
(as caught by Requests) into the database
"""
new_findings = 0
for response in context.responses:
if response.get('server_protocol_error') is not None:
if fuzzdb.known_false_positive(context, response) is False:
fuzzdb.add_false_positive(context, response)
new_findings += 1
if new_findings > 0:
context.new_findings += new_findings
assert True
| 16,093
|
def guild_only() -> Callable:
"""A decorator that limits the usage of a slash command to guild contexts.
The command won't be able to be used in private message channels.
Example
---------
.. code-block:: python3
from discord import guild_only
@bot.slash_command()
@guild_only()
async def test(ctx):
await ctx.respond('You\'re in a guild.')
"""
def inner(command: Callable):
if isinstance(command, ApplicationCommand):
command.guild_only = True
else:
command.__guild_only__ = True
return command
return inner
| 16,094
|
def truncate(text, words=25):
"""Remove tags and truncate text to the specified number of words."""
return " ".join(re.sub("(?s)<.*?>", " ", text).split()[:words])
| 16,095
|
def _run_query_create_log(query, client, destination_table=None):
"""
Runs BigQuery queryjob
:param query: Query to run as a string
:param client: BigQuery client object
:return: QueryJob object
"""
# Job config
job_config = bigquery.QueryJobConfig()
if destination_table is not None:
job_config.destination = destination_table
else:
timestamp_name = datetime.now().strftime("query_%Y%m%d%H%M%S")
project = "cmap-big-table"
dataset = "cmap_query"
dest_tbl = ".".join([project, dataset, timestamp_name])
job_config.destination = dest_tbl
job_config.create_disposition = "CREATE_IF_NEEDED"
return client.query(query, job_config=job_config)
| 16,096
|
def sys_update_char(
asciiCode: int,
fontx: int,
fonty: int,
img: tcod.image.Image,
x: int,
y: int,
) -> None:
"""Dynamically update the current font with img.
All cells using this asciiCode will be updated
at the next call to :any:`tcod.console_flush`.
Args:
asciiCode (int): Ascii code corresponding to the character to update.
fontx (int): Left coordinate of the character
in the bitmap font (in tiles)
fonty (int): Top coordinate of the character
in the bitmap font (in tiles)
img (Image): An image containing the new character bitmap.
x (int): Left pixel of the character in the image.
y (int): Top pixel of the character in the image.
"""
lib.TCOD_sys_update_char(_int(asciiCode), fontx, fonty, img, x, y)
| 16,097
|
def read_disparity_gt(filename: str) -> np.ndarray:
"""
reads the disparity files used for training/testing.
:param filename: name of the file.
:return: data points.
"""
points = []
with open(filename, 'r') as file:
for line in file:
line = line.split(' ')
frame = int(line[0])
x_rgb = int(line[1])
y = int(line[2])
x_ir = int(line[3])
points.append([frame, x_rgb, y, x_ir])
return np.array(points, dtype=np.int32)
| 16,098
|
def test_q_as_field():
"""
Test that passing in water discharge as a grid field results in self.q
holding correct values
"""
#set up a 5x5 grid with one open outlet node and low initial elevations.
nr = 5
nc = 5
mg = RasterModelGrid((nr, nc), 10.0)
mg.add_zeros('node', 'topographic__elevation')
q = mg.add_zeros('node', 'user_imposed_discharge')
q[:] += 1.0 #add 1.0 m3/yr of water
mg['node']['topographic__elevation'] += mg.node_y / 100000 \
+ mg.node_x / 100000 \
+ np.random.rand(len(mg.node_y)) / 10000
mg.set_closed_boundaries_at_grid_edges(bottom_is_closed=True,
left_is_closed=True,
right_is_closed=True,
top_is_closed=True)
mg.set_watershed_boundary_condition_outlet_id(0,
mg['node']['topographic__elevation'],
-9999.)
# Create a D8 flow handler
fa = FlowAccumulator(mg, flow_director='D8',
depression_finder='DepressionFinderAndRouter')
# Instantiate the ErosionDeposition component...
ed = ErosionDeposition(mg, K=0.01, F_f=0.0, phi=0.0, v_s=0.001, m_sp=0.5,
n_sp=1.0, sp_crit=0.0,
discharge_field='user_imposed_discharge',
solver='basic')
#ensure that ed.q is everywhere equal to 1.0 m3/yr.
testing.assert_array_equal(np.ones(mg.number_of_nodes),
ed.q,
err_msg='E/D discharge field test failed',
verbose=True)
| 16,099
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.