content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def AllenAtlas(res_um=25, par=None):
"""
Instantiates an atlas.BrainAtlas corresponding to the Allen CCF at the given resolution
using the IBL Bregma and coordinate system
:param res_um: 25 or 50 um
:return: atlas.BrainAtlas
"""
if par is None:
# Bregma indices for the 10um Allen Brain Atlas, mlapdv
pdefault = {
'PATH_ATLAS': '/datadisk/BrainAtlas/ATLASES/Allen/',
'FILE_REGIONS': str(Path(__file__).parent.joinpath('allen_structure_tree.csv')),
'INDICES_BREGMA': list(np.array([1140 - (570 + 3.9), 540, 0 + 33.2]))
}
par = params.read('ibl_histology', default=pdefault)
if not Path(par.PATH_ATLAS).exists():
raise NotImplementedError("Atlas doesn't exist ! Mock option not implemented yet")
# TODO: mock atlas to get only the coordinate framework
pass
params.write('ibl_histology', par)
else:
par = Bunch(par)
# file_image = Path(path_atlas).joinpath(f'ara_nissl_{res_um}.nrrd')
file_image = Path(par.PATH_ATLAS).joinpath(f'average_template_{res_um}.nrrd')
file_label = Path(par.PATH_ATLAS).joinpath(f'annotation_{res_um}.nrrd')
image, header = nrrd.read(file_image, index_order='C') # dv, ml, ap
image = np.swapaxes(np.swapaxes(image, 2, 0), 1, 2) # image[iap, iml, idv]
label, header = nrrd.read(file_label, index_order='C') # dv, ml, ap
label = np.swapaxes(np.swapaxes(label, 2, 0), 1, 2) # label[iap, iml, idv]
# resulting volumes origin: x right, y front, z top
df_regions = pd.read_csv(par.FILE_REGIONS)
regions = BrainRegions(id=df_regions.id.values,
name=df_regions.name.values,
acronym=df_regions.acronym.values)
xyz2dims = np.array([1, 0, 2])
dims2xyz = np.array([1, 0, 2])
dxyz = res_um * 1e-6 * np.array([-1, -1, -1])
ibregma = (np.array(par.INDICES_BREGMA) * 10 / res_um)
return BrainAtlas(image, label, regions, dxyz, ibregma, dims2xyz=dims2xyz, xyz2dims=xyz2dims)
| 13,900
|
def mplot3d(f, var1, var2, show=True):
"""
Plot a 3d function using matplotlib/Tk.
"""
import warnings
warnings.filterwarnings("ignore", "Could not match \S")
p = import_module('pylab')
# Try newer version first
p3 = import_module('mpl_toolkits.mplot3d',
__import__kwargs={'fromlist': ['something']}) or import_module('matplotlib.axes3d')
if not p or not p3:
sys.exit("Matplotlib is required to use mplot3d.")
x, y, z = sample(f, var1, var2)
fig = p.figure()
ax = p3.Axes3D(fig)
#ax.plot_surface(x,y,z) #seems to be a bug in matplotlib
ax.plot_wireframe(x, y, z)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
if show:
p.show()
| 13,901
|
def nsd_delete1(ctx, name, force):
"""deletes a NSD/NSpkg
NAME: name or ID of the NSD/NSpkg to be deleted
"""
nsd_delete(ctx, name, force)
| 13,902
|
def get_devices_properties(device_expr,properties,hosts=[],port=10000):
"""
Usage:
get_devices_properties('*alarms*',props,
hosts=[get_bl_host(i) for i in bls])
props must be an string as passed to Database.get_device_property();
regexp are not enabled!
get_matching_device_properties enhanced with multi-host support
@TODO: Compare performance of this method with
get_matching_device_properties
"""
expr = device_expr
if not isSequence(properties): properties = [properties]
get_devs = lambda db, reg : [d for d in db.get_device_name('*','*')
if not d.startswith('dserver') and matchCl(reg,d)]
if hosts: tango_dbs = dict(('%s:%s'%(h,port),PyTango.Database(h,port))
for h in hosts)
else: tango_dbs = {get_tango_host():get_database()}
return dict(('/'.join((host,d) if hosts else (d,)),
db.get_device_property(d,properties))
for host,db in tango_dbs.items() for d in get_devs(db,expr))
| 13,903
|
def benchmark_classifier(cls, y_test, labels, scoring):
"""
Use 10-fold cross validation to benchmark the performance of the provided classifier.
Parameters
----------
cls : estimator object implementing `fit`
y_test : array
labels : list (of strings)
List containing the labels that match the elements of `y_test`.
scoring : list (of strings)
"""
print('Calculating classifier performance...')
for scr in scoring:
print(scr, ':', str(np.mean(cross_val_score(cls, y_test, labels, cv=10, scoring=scr))))
| 13,904
|
def atexit_shutdown_grace_period(grace_period=-1.0):
"""Return and optionally set the default worker cache shutdown grace period.
This only affects the `atexit` behavior of the default context corresponding to
:func:`trio_parallel.run_sync`. Existing and future `WorkerContext` instances
are unaffected.
Args:
grace_period (float): The time in seconds to wait for workers to
exit before issuing SIGKILL/TerminateProcess and raising `BrokenWorkerError`.
Pass `math.inf` to wait forever. Pass a negative value or use the default
value to return the current value without modifying it.
Returns:
float: The current grace period in seconds.
.. note::
This function is subject to threading race conditions."""
global ATEXIT_SHUTDOWN_GRACE_PERIOD
if grace_period >= 0.0:
ATEXIT_SHUTDOWN_GRACE_PERIOD = grace_period
return ATEXIT_SHUTDOWN_GRACE_PERIOD
| 13,905
|
def combine_files(root, pattern=None):
"""Combine all files in root path directory
Parameters:
root (str) : file path to directory of files
pattern (str) : optional file pattern to search for in directory
Returns:
combined files
"""
if pattern is not None:
files = [PurePath(path, name) for path, subdirs, files in os.walk(root) for name in files if fnmatch(name, pattern)]
combined_files = pd.concat([pd.read_csv(f) for f in files])
else:
files = [PurePath(path, name) for path, subdirs, files in os.walk(root) for name in files]
combined_files = pd.concat([pd.read_csv(f) for f in files])
run_date_transformation(combined_files)
return combined_files.sort_values(by="date")
| 13,906
|
def get_point(points, cmp, axis):
""" Get a point based on values of either x or y axys.
:cmp: Integer less than or greater than 0, representing respectively
< and > singhs.
:returns: the index of the point matching the constraints
"""
index = 0
for i in range(len(points)):
if cmp < 0:
if points[i][axis] < points[index][axis]:
index = i
else:
if points[i][axis] > points[index][axis]:
index = i
return index
| 13,907
|
def benchmark_parser_header_16(nb_headers, nb_fields, do_checksum=False):
"""
This method generate the P4 program to benchmark the P4 parser
:param nb_headers: the number of generic headers included in the program
:type nb_headers: int
:param nb_fields: the number of fields (16 bits) in each header
:type tbl_size: int
:returns: bool -- True if there is no error
"""
output_dir = 'output'
if not os.path.exists(output_dir):
os.makedirs(output_dir)
program = add_headers_and_parsers_16(nb_headers, nb_fields, do_checksum)
program += add_ingress_block_16()
arguments = 'inout headers hdr, inout metadata meta, inout standard_metadata_t standard_metadata'
program += add_control_block_16('egress', '', '', '', arguments)
applies = '\t\tpacket.emit(hdr.ethernet);\n'
applies += '\t\tpacket.emit(hdr.ptp);\n'
for i in range(nb_headers):
applies += '\t\tpacket.emit(hdr.header_%d);\n' % i
program += add_control_block_16('DeparserImpl', '', '', applies, 'packet_out packet, in headers hdr')
program += add_control_block_16('verifyChecksum', '', '', '', 'inout headers hdr, inout metadata meta')
program += add_control_block_16('computeChecksum', '', '', '', 'inout headers hdr, inout metadata meta')
program += add_main_module()
fwd_tbl = 'forward_table'
commands = cli_commands(fwd_tbl)
with open ('%s/commands.txt' % output_dir, 'w') as out:
out.write(commands)
write_output(output_dir, program)
get_parser_header_pcap(nb_fields, nb_headers, output_dir)
generate_pisces_command(output_dir, nb_headers, nb_fields, do_checksum)
return True
| 13,908
|
def check_result(reference, result_list, enable_assertion):
"""Checks result, warns when latency is abnormal.
Args:
reference: { environment : reference_time}, environment is a string tuple
while reference_time is a float number.
result_list: a list of tuple.
enable_assertion: bool, throw assertion when unexpected latencty detected.
"""
# Allow 30% variance.
variance_threshold = 0.30
print('******************** Check results *********************')
cnt = 0
# Drop first line(column name).
for result in result_list[1:]:
environment = result[:-1]
inference_time = result[-1]
if environment not in reference:
print(' * No matching record for [%s].' % (','.join(environment)))
cnt += 1
reference_latency = reference[environment]
up_limit = reference_latency * (1 + variance_threshold)
down_limit = reference_latency * (1 - variance_threshold)
if inference_time > up_limit:
msg = ((' * Unexpected high latency! [%s]\n'
' Inference time: %s ms Reference time: %s ms') %
(','.join(environment), inference_time, reference_latency))
print(msg)
cnt += 1
if inference_time < down_limit:
msg = ((' * Unexpected low latency! [%s]\n'
' Inference time: %s ms Reference time: %s ms') %
(','.join(environment), inference_time, reference_latency))
print(msg)
cnt += 1
print('******************** Check finished! *******************')
if enable_assertion:
assert cnt == 0, 'Benchmark test failed!'
| 13,909
|
def create_jobs_list(chunks, outdir, *filters):
# TO DO
# Figure out the packing/unpacking
"""
Create a list of dictionaries that hold information for the given
chunks
Arguments:
chunks: list: A list of lists. Each nested list contains the
filepaths to be processed
outdir: Path object: The directory where results will be written
filters: Callables
Return:
jobs_list: list: A list of dictionaries that holds information for
the execution of each chunk. Of the form
[
{'chunk_id' : int, (0,1,2,...)
'out_fp' : Path object, (outdir/chunk_<chunk_id>.fa.gz)
'fastas' : list of Path objects,
([PosixPath('path/to/PATRIC.faa'),...])
'filters' : list of functions
}
]
"""
jobs_list = []
for i, chunk in enumerate(chunks):
chunk_id = f"chunk_{i}"
chunk_out = f"{chunk_id}.fa.gz"
out_fp = outdir / pathlib.Path(chunk_out)
# chunk_skipped = f"{chunk_id}.skipped.txt"
chunk_fastas = chunk
chunk_dict = {
"chunk_id": chunk_id,
"fastas": chunk_fastas,
"out_fp": out_fp,
# Should there be an if filters or if len(filters) != 0 ?
"filters": [f for f in filters],
}
jobs_list.append(chunk_dict)
return jobs_list
| 13,910
|
def per_symbol_to_per_seq_probs(per_symbol_probs, tgt_out_idx):
""" Gather per-symbol probabilities into per-seq probabilities """
# per_symbol_probs shape: batch_size, seq_len, candidate_size
# tgt_out_idx shape: batch_size, seq_len
# output shape: batch_size, 1
return torch.prod(
torch.gather(per_symbol_probs, 2, tgt_out_idx.unsqueeze(-1)).squeeze(2),
dim=1,
keepdim=True,
)
| 13,911
|
def append_open_buildinfo(buildinfo_path, files=open_buildinfo_files):
"""Append BUILD-INFO.txt with open section for open_buildinfo_files"""
if os.path.exists(os.path.join(buildinfo_path, BUILDINFO)):
try:
bifile = open(os.path.join(buildinfo_path, BUILDINFO), "a")
try:
bifile.write(open_buildinfo % ', '.join(files))
finally:
bifile.close()
except IOError:
print "Unable to write to BUILD-INFO.txt"
pass
| 13,912
|
def solve_game(payoffs):
""" given payoff matrix for a zero-sum normal-form game,
return first mixed equilibrium (may be multiple)
returns a tuple of numpy arrays """
# .vertex_enumeration()
# .lemke_howson(initial_dropped_label=0) - does not return *all* equilibrium
game = nash.Game(payoffs)
equilibria = game.lemke_howson_enumeration()
# equilibria = game.support_enumeration() # non_degenerate=False, tol=10e-16
equilibrium = next(equilibria, None)
# Lemke-Howson couldn't find equilibrium OR
# Lemke-Howson return error - game may be degenerate. try other approaches
print(equilibrium)
print(equilibrium[0])
print(equilibrium[1])
if equilibrium is None or np.isnan(equilibrium[0]).any() or np.isnan(equilibrium[1]).any() or (equilibrium[0].shape != (payoffs.shape[0],) or equilibrium[1].shape != (payoffs.shape[1],)):
# try other
print('\n\n\n\n\nuh oh! degenerate solution')
print('payoffs are\n', payoffs)
equilibria = game.vertex_enumeration()
equilibrium = next(equilibria, None)
if equilibrium is None:
print('\n\n\n\n\nuh oh x2! degenerate solution again!!')
print('payoffs are\n', payoffs)
equilibria = game.support_enumeration() # non_degenerate=False, tol=10e-16
equilibrium = next(equilibria, None)
assert equilibrium is not None
return equilibrium
| 13,913
|
def emit_settings_changed_event(user, db_table, changed_fields: Dict[str, Tuple[Any, Any]]):
"""Emits an event for a change in a setting.
Args:
user (User): the user that this setting is associated with.
db_table (str): the name of the table that we're modifying.
changed_fields: all changed settings, with both their old and new values
Returns:
None
"""
for (setting_name, (old_value, new_value)) in changed_fields.items():
truncated_fields = truncate_fields(old_value, new_value)
truncated_fields['setting'] = setting_name
truncated_fields['user_id'] = user.id
truncated_fields['table'] = db_table
tracker.emit(
USER_SETTINGS_CHANGED_EVENT_NAME,
truncated_fields
)
# Announce field change
USER_FIELDS_CHANGED.send(sender=None, user=user, table=db_table, changed_fields=changed_fields)
| 13,914
|
def zone_features(df, zfeatures, aufeatures):
"""Create zone features from the data
Args:
df (DataFrame): Input dataframe
zfeatures (list): List of zone median features
aufeatures (list): List of zone autocorr features
Return: 2 dataframes
"""
# Medians from the last 1,3,6,12 months
zones_1y = df[(df['ds'] >= '2018-03-09') & (df['ds'] < '2019-03-10')].groupby(['zone_code'], as_index=False).agg({
'max_user': 'median',
'bandwidth_total': 'median'
})
zones_1y.columns = ['zone_code','median_user_1y','median_bw_1y']
zones_1m = df[(df['ds'] >= '2019-02-09') & (df['ds'] < '2019-03-10')].groupby(['zone_code'], as_index=False).agg({
'max_user': 'median',
'bandwidth_total': 'median'
})
zones_1m.columns = ['zone_code','median_user_1m','median_bw_1m']
zones_3m = df[(df['ds'] >= '2018-12-09') & (df['ds'] < '2019-03-10')].groupby(['zone_code'], as_index=False).agg({
'max_user': 'median',
'bandwidth_total': 'median'
})
zones_3m.columns = ['zone_code','median_user_3m','median_bw_3m']
zones_6m = df[(df['ds'] >= '2018-09-09') & (df['ds'] < '2019-03-10')].groupby(['zone_code'], as_index=False).agg({
'max_user': 'median',
'bandwidth_total': 'median'
})
zones_6m.columns = ['zone_code','median_user_6m','median_bw_6m']
# Autocorrelation features
zones_autocorr = df[(df['ds'] >= '2018-12-09') & (df['ds'] < '2019-03-10')].groupby(['zone_code'], as_index=False).agg({
'max_user': {
'lag_user_1d' :lambda x: pd.Series.autocorr(x, 24),
'lag_user_3d' :lambda x: pd.Series.autocorr(x, 3*24),
'lag_user_1w' :lambda x: pd.Series.autocorr(x, 24*7),
},
'bandwidth_total': {
'lag_bw_1d' :lambda x: pd.Series.autocorr(x, 24),
'lag_bw_3d' :lambda x: pd.Series.autocorr(x, 3*24),
'lag_bw_1w' :lambda x: pd.Series.autocorr(x, 24*7),
}
}).fillna(0)
zones_autocorr.columns.droplevel()
zones_autocorr.reset_index()
zones_autocorr.columns = ['zone_code','lag_user_1d','lag_user_3d','lag_user_1w','lag_bw_1d','lag_bw_3d','lag_bw_1w']
zones = reduce(lambda x,y: pd.merge(x,y, on='zone_code', how='inner'), [zones_1m, zones_3m, zones_6m, zones_1y])
# Scale the zone features
scale1, scale2 = MinMaxScaler(), MinMaxScaler()
zones[zfeatures] = scale1.fit_transform(zones[zfeatures])
zones_autocorr[aufeatures] = scale2.fit_transform(zones_autocorr[aufeatures])
return zones, zones_autocorr
| 13,915
|
def get_active_validator_indices(validators: [ValidatorRecord]) -> List[int]:
"""
Gets indices of active validators from ``validators``.
"""
return [i for i, v in enumerate(validators) if is_active_validator(v)]
| 13,916
|
def test_tag_links_render_on_bookmarks_list(client, db_setup):
"""When a user views their bookmark list - links to the tags should be
incldued in the respose, but it should NOT include links to tags
associated with other user's bookmarks.
"""
user = db_setup.get("homer")
login = client.login(username=user.email, password=USER_PASSWORD)
assert login is True
url = reverse("bookmark_it:bookmark_list")
response = client.get(url)
assert response.status_code == 200
assertTemplateUsed(response, "bookmark_it/bookmark_list.html")
keywords = ["history", "beer"]
for word in keywords:
expected = "?tag={}".format(word)
assertContains(response, expected)
| 13,917
|
def parse_decodes(sentences, predictions, lengths, label_vocab):
"""Parse the padding result
Args:
sentences (list): the tagging sentences.
predictions (list): the prediction tags.
lengths (list): the valid length of each sentence.
label_vocab (dict): the label vocab.
Returns:
outputs (list): the formatted output.
"""
predictions = [x for batch in predictions for x in batch]
lengths = [x for batch in lengths for x in batch]
id_label = dict(zip(label_vocab.values(), label_vocab.keys()))
outputs = []
for idx, end in enumerate(lengths):
sent = sentences[idx][:end]
tags = [id_label[x] for x in predictions[idx][:end]]
sent_out = []
tags_out = []
words = ""
for s, t in zip(sent, tags):
if t.endswith('-B') or t == 'O':
if len(words):
sent_out.append(words)
tags_out.append(t.split('-')[0])
words = s
else:
words += s
if len(sent_out) < len(tags_out):
sent_out.append(words)
outputs.append(''.join(
[str((s, t)) for s, t in zip(sent_out, tags_out)]))
return outputs
| 13,918
|
def get_weekday(start_date, end_date, weekday_nums, repeat=None):
"""
获取一段时间范围内每个周天对应的日期
:param start_date:
:param end_date:
:param weekday_nums: list, 星期对应数字 0 ~ 6
:param repeat:
:return:
"""
sdate = datetime.datetime.strptime(start_date, date_pattern1)
edate = datetime.datetime.strptime(end_date, date_pattern1)
if not repeat:
edate += datetime.timedelta(days=1)
weekdays = []
for weekday_num in weekday_nums:
tmp_date = sdate
while tmp_date < edate:
now_weekday = tmp_date.weekday()
tmp_date += datetime.timedelta(days=(((int(weekday_num)+6) % 7 - now_weekday + 7) % 7))
if tmp_date < edate:
weekdays.append(tmp_date.strftime(date_pattern1))
tmp_date += datetime.timedelta(days=7)
else:
break
return weekdays
| 13,919
|
def context_processor(target):
"""
Decorator that allows context processors with parameters to be assigned
(and executed properly) in a RequestContext
Example::
return render_to_response(
template_name,
context_instance=RequestContext(
request,
processors=[
test_processor1,
test_processor2(val1=test_val1, val2=test_val2),
]
)
)
"""
def cp_wrapper(*args, **kwargs):
if (len(args) == 1 and len(kwargs) == 0) \
or (len(args) == 0 and len(kwargs) == 1 and 'request' in kwargs):
return target(*args, **kwargs)
else:
def get_processor(request):
return target(request, *args, **kwargs)
return get_processor
return cp_wrapper
| 13,920
|
def write_checkgroups(groups, path):
""" Write the current checkgroups file.
Arguments: groups (a dictionary representing a checkgroups)
path (path of the checkgroups file)
No return value
"""
keys = groups.keys()
keys.sort()
checkgroups_file = file(path, 'wb')
for key in keys:
if len(key) < 8:
checkgroups_file.write(key + '\t\t\t' + groups[key] + '\n')
elif len(key) < 16:
checkgroups_file.write(key + '\t\t' + groups[key] + '\n')
else:
checkgroups_file.write(key + '\t' + groups[key] + '\n')
checkgroups_file.close()
print 'Checkgroups file written.'
print
| 13,921
|
def test_put_object_from_filelike(repository, generate_directory):
"""Test the ``Repository.put_object_from_filelike`` method."""
directory = generate_directory({'file_a': None, 'relative': {'file_b': None}})
with open(directory / 'file_a', 'rb') as handle:
repository.put_object_from_filelike(handle, 'file_a')
assert repository.has_object('file_a')
with open(directory / 'relative/file_b', 'rb') as handle:
repository.put_object_from_filelike(handle, 'relative/file_b')
assert repository.has_object('relative/file_b')
with io.BytesIO(b'content_stream') as stream:
repository.put_object_from_filelike(stream, 'stream')
| 13,922
|
def data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_bandwidth_profile_committed_burst_size_delete(uuid, local_id): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_bandwidth_profile_committed_burst_size_delete
removes tapi.common.CapacityValue # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param local_id: Id of end-point
:type local_id: str
:rtype: None
"""
return 'do some magic!'
| 13,923
|
def BuildWagonCommand(state):
"""Build wagon command to build MCEP model for the given state.
Example command -
$ESTDIR/bin/wagon
-track_feats 1-50
-vertex_output mean
-desc festival/clunits/mcep.desc
-data 'festival/feats/w_3.feats'
-test 'festival/feats/w_3.feats'
-balance 0
-track 'festival/disttabs/w_3.mcep'
-stop 50
-output 'festival/trees/w_3_mcep.tree
Args :
state: HMM state for which the MCEP tree should be generated for.
"""
cmd = "%s -desc %s " % (WAGON_BIN, F0_DESCRIPTION_FILE)
cmd += "-track_feats %s -vertex_output %s %s %s " % (TRACK_FEATS,
VERTEX_OUTPUT, BALANCE,
STOP)
cmd += "-data %s%s%s " % (FESTIVAL_FEATS_PATH, state, FEATS_FILE_EXTENSION)
cmd += "-track %s%s%s " % (DISTLABS_PATH, state, MCEP_FILE_EXTENSION)
cmd += "-test %s%s%s " % (FESTIVAL_FEATS_PATH, state, FEATS_FILE_EXTENSION)
cmd += "-output %s%s%s%s " % (OUTPUT_TREE_PATH, state, MCEP_FILE_SUFFIX,
OUTPUT_TREE_FILE_EXTENSION)
STDOUT.write("[INFO] Generating tree for state %s\n" % (state))
STDOUT.write("[INFO] \n %s \n" % (cmd))
os.system(cmd)
return
| 13,924
|
def decode_messages(fit_bytes: bytes) -> typing.List[typing.Dict]:
"""Decode serialized messages.
Arguments:
fit_bytes: Encoded messages
Returns:
Decoded messages
"""
messages = []
for line in fit_bytes.splitlines():
payload = json.loads(line)
messages.append(schemas.WriterMessage().load(payload))
return messages
| 13,925
|
def main() :
"""
Main Function: to compute performance of Two Models with different non-linear functions for different losses
Namely, this will generate 4 graphs, for losses MSE and MAE, for each non-linear type Relu and Tanh
:return: nothing but four graphs and write the number of errors in the terminal
"""
### To Set Constants and Parameters ###
train_size = 1000
mini_batch_size = 100
with_one_hot_encoding = True
label_1_in_center = True
early_stoping = True
epoch_count = 250
look_back_count_early_stoping = 100
learning_rate = 100/(epoch_count * train_size)
loss_type_array = ['MSE','MAE']
mini_batch_size = min(train_size,mini_batch_size)
plot_inputs = False
### Generate training and testing data ###
train_input, train_target = generate_disc_set(train_size,one_hot_encoding=with_one_hot_encoding,label_1_in_center=label_1_in_center)
test_input, test_target = generate_disc_set(train_size, one_hot_encoding=with_one_hot_encoding,label_1_in_center=label_1_in_center)
if plot_inputs:
plot_data(train_input, train_target, test_input, test_target, title="Train and Test Samples before Normaliation") ## uncomment to plot generated test and train inputs and targets
### Normalize the input data ###
mean, std = train_input.mean(), train_input.std()
train_input.sub_(mean).div_(std)
test_input.sub_(mean).div_(std)
if plot_inputs:
plot_data(train_input, train_target, test_input, test_target, title="Train and Test Samples after Normaliation") ## uncomment to plot noramlized test and train inputs and targets
in_feature_size = len(train_input[0])
out_feature_size = len(train_target[0])
### Creating and analysing the models ###
nnmodel_with_Tanh = NNSequential(NNLinear(in_feature_size,25),NNTanh(),NNLinear(25,25),NNTanh(),NNLinear(25,25),NNTanh(),NNLinear(25,out_feature_size) )
nnmodel_with_Relu = NNSequential(NNLinear(in_feature_size,25),NNRelu(),NNLinear(25,25),NNRelu(),NNLinear(25,25),NNRelu(),NNLinear(25,out_feature_size) )
model_Tanh_tupel = (nnmodel_with_Tanh, "Model with three hidden layers of 25 units with Tanh")
model_Relu_tupel = (nnmodel_with_Relu, "Model with three hidden layers of 25 units with Relu")
for model_iter in [model_Tanh_tupel, model_Relu_tupel]: ## for each model
print(f'\nFor the {model_iter[1]}')
for loss_type in loss_type_array: ## for each loss type
nnmodel = model_iter[0]
nnmodel.set_learning_rate(learning_rate)
nnmodel.set_loss_function(loss_type)
nnmodel.train_network(train_input, train_target, epoch_count= epoch_count, mini_batch_size=mini_batch_size, early_stoping=early_stoping, look_back_count_early_stoping=look_back_count_early_stoping)
## to analyze and plot the results
analyse_model(nnmodel,train_input,train_target,test_input,test_target, mini_batch_size,with_one_hot_encoding,title=f'Final Results with {loss_type} loss for the \n{model_iter[1]}')
| 13,926
|
def test_parse_no_pint_objects_in_df():
"""Test that there are no Pint quantities in dataframes created by parser."""
input_file = get_test_data('metar_20190701_1200.txt', mode='rt')
metar_str = ('KSLK 011151Z AUTO 21005KT 1/4SM FG VV002 14/13 A1013 RMK AO2 SLP151 70043 '
'T01390133 10139 20094 53002=')
for df in (parse_metar_file(input_file), parse_metar_to_dataframe(metar_str)):
for column in df:
assert not isinstance(df[column][0], units.Quantity)
| 13,927
|
def read_DELETE(msg, hosts):
"""Parse the DELETE request and send data to the response generator function
Args:
msg (String): The request message to parse
hosts (List): The array of hosts
Returns:
List: An array of information about the request, including status code,
filename, file length, file type and connection type
"""
request_line = [i.strip() for i in msg.split("\n")[0].split(" ")]
headers = [i.rstrip() for i in msg.split("\n")[1:]]
tmp_host = ""
tmp_file = request_line[1][1:]
for i in headers:
if i.split(":")[0] == "Host":
tmp_host = i.split(": ")[1]
for i in hosts:
if tmp_host == f"localhost:{PORT}":
# CHANGE THIS LINE IN ORDER TO SEE A STUDENT'S WEBSITE IN THE BROWSER
HOST = hosts[0][0]
break
if i[0] == tmp_host:
HOST = tmp_host
break
else:
return [404, request_line[2]]
if os.path.exists(f"./{HOST}/{tmp_file}"):
if os.path.isdir(f"./{HOST}/{tmp_file}"):
os.rmdir(f"./{HOST}/{tmp_file}")
else:
os.remove(f"./{HOST}/{tmp_file}")
return [204, request_line[2], f"./{HOST}/{tmp_file}"]
else:
return [404, request_line[2]]
| 13,928
|
def iresnet101(pretrained=False, progress=True, **kwargs):
"""
Constructs the IResNet-101 model trained on Glint360K(https://github.com/deepinsight/insightface/tree/master/recognition/partial_fc#4-download).
.. note::
The required input size of the model is 112x112.
Args:
pretrained (bool): Whether to download the pre-trained model on Glint360K. Default: ``False``
progress (bool): If True, displays a progress bar of the download to stderr. Default: ``True``
For example:
.. code-block:: python
>>> import flowvision
>>> iresnet101 = flowvision.models.face_recognition.iresnet101(pretrained=False, progress=True)
"""
return _iresnet(
"iresnet101", IBasicBlock, [3, 13, 30, 3], pretrained, progress, **kwargs
)
| 13,929
|
def get_full_jwt(user: User) -> Dict:
"""
Get a full jwt response from the username and uid token.
"""
return {
'access_token': create_access_token(identity=user, fresh=True),
'refresh_token': create_refresh_token(identity=user)
}
| 13,930
|
def delete():
""" (Local command) Deletes the current note.
"""
path = vim.current.buffer.name
if exists(path):
confirm = vim.eval('input("really delete? (y/n): ")')
if confirm in ("y", "Y"):
remove(path)
vim.command("bd!")
vim.command("redraw!")
| 13,931
|
def GridSearch_Prophet(prophet_grid, metric='mape'):
"""
GridSearch tool to determine the optimal parameters for prophet
Args:
- prophet_grid: List of parameters. Enter it as list(ParameterGrid(prophet_grid)
- metric: String. Not used yet. May be used to change the metric used to sort
the tested models.
Return:
- mape_table: Pandas dataframe. Show the tested parameters and median of Mean
Absolute Percentage Error calculated over 1 day.
"""
# mape_table summarizes the mean of mape according to tested parameters
mape_table = pd.DataFrame.from_dict(prophet_grid)
mape_table = mape_table[['device',
'parameter',
'begin',
'end',
'sampling_period_min',
'interval_width',
'daily_fo',
'changepoint_prior_scale']]
mape_table['mape'] = np.nan
# Loop Prophet over the prophet_grid and store the data
a = 0
name = re.sub("[']", '', str(mape_table.iloc[0, 0])) + '_Prediction_' + \
str(mape_table.iloc[a, 1])
for prophet_instance in prophet_grid:
print('\nprophet_instance nb ' + str(a))
# Run Prophet
df_pred, mape = prophet(**prophet_instance)
# store the mape
mape_table.iloc[a, 8] = mape
# Save the df_pred and figure if the mape_table has 1 row (best model)
if mape_table.shape[0] == 1:
# calculate diff between begin and end
begin_str = mape_table.iloc[a, 2]
end_str = mape_table.iloc[a, 3]
d1 = datetime.strptime(begin_str, "%Y-%m-%d")
d2 = datetime.strptime(end_str, "%Y-%m-%d")
pred_duration = abs((d2 - d1).days)
# Generate the generic name
model_name = re.sub("[']", '', str(mape_table.iloc[0, 0])) + '_' + str(mape_table.iloc[a, 3]) + \
'_cps_' + str(mape_table.iloc[a, 7]) + '_fo_' + str(mape_table.iloc[a, 6]) + '_' + \
str('{:02d}'.format(pred_duration)) + 'td'
# Save the figure
folder_name = '/Users/guillaume/Documents/DS2020/XXXX/XXXX/figures/best/'
fig_name = folder_name + model_name + '.png'
plt.savefig(fig_name, bbox_inches="tight")
# Save the df_pred (prediction and actual values) as a csv
folder_name = '/Users/guillaume/Documents/DS2020/XXXX/XXXX/data/processed/'
csv_name = folder_name + model_name + '.csv'
df_pred.to_csv(csv_name)
# elif a+1 == mape_table.shape[0]:
# # Store the complete mape_table if this is the last prediction
# folder_name = '/Users/guillaume/Documents/DS2020/XXXX/XXXX/data/processed/'
# mape_table_name = folder_name + re.sub("[']", '', str(
# mape_table.iloc[0, 0])) + '_' + str(mape_table.iloc[a, 3]) + '_mape_table.csv'
mape_table = mape_table.sort_values('mape')
# mape_table.to_csv(mape_table_name)
a += 1
return mape_table
| 13,932
|
def test_reference_links_548():
"""
Test case 548: Unicode case fold is used:
"""
# Arrange
source_markdown = """[ẞ]
[SS]: /url"""
expected_tokens = [
"[para(1,1):]",
"[link(1,1):shortcut:/url:::::ẞ:::::]",
"[text(1,2):ẞ:]",
"[end-link::]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[link-ref-def(3,1):True::ss:SS: :/url:::::]",
]
expected_gfm = """<p><a href="/url">ẞ</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
| 13,933
|
def _check_max_features(importances, max_features):
"""Interpret the max_features value"""
n_features = len(importances)
if max_features is None:
max_features = n_features
elif isinstance(max_features, int):
max_features = min(n_features, max_features)
elif isinstance(max_features, float):
max_features = int(n_features * max_features)
return max_features
| 13,934
|
def map_blocks(func, data):
"""Curried version of Dask's map_blocks
Args:
func: the function to map
data: a Dask array
Returns:
a new Dask array
>>> f = map_blocks(lambda x: x + 1)
>>> f(da.arange(4, chunks=(2,)))
dask.array<lambda, shape=(4,), dtype=int64, chunksize=(2,)>
"""
return da.map_blocks(func, data)
| 13,935
|
def download_and_extract(
package: str,
directory: Path,
version: Optional[str] = None,
remove_after: bool = False
) -> Path:
"""Modified to allow avoiding removing files after.
Parameters
----------
package
directory
version
remove_after
Returns
-------
Examples
--------
>>> import src.constants as cte
>>> download_and_extract('six', cte.RAW)
PosixPath('/home/agustin/github_repos/top_pypi_source_code_stats/data/raw/six-1.16.0')
"""
try:
source = get_package_source(package, version)
except ValueError:
return None
print(f"Downloading {package}.")
local_file, _ = urlretrieve(source, directory / f"{package}-src")
with get_archive_manager(local_file) as archive:
print(f"Extracting {package}")
archive.extractall(path=directory)
result_dir = get_first_archive_member(archive)
if remove_after:
os.remove(local_file)
return directory / result_dir
| 13,936
|
def test_case_3_b(setup_leveldb, setup_sqlite):
""" Test case 3.b:
3. Incoming record contains 2+ OCNs that resolve to two Concordance Table primary record
b. Record OCNs + OCLC OCNs match one CID
Test datasets:
Zephir cluster: one match
CID: 008648991
OCNs: 4912741, 5066412, 23012053
OCLC OCNs:
[200, 1078101879, 1102728950, etc.] (only selected a subset for testing)
[4912741, 5066412, 23012053, 228676186, 315449541, etc.] (only selected a subset for testing)
Incoming OCN for test case:
[200, 228676186]
"""
primary_db_path = setup_leveldb["primary_db_path"]
cluster_db_path = setup_leveldb["cluster_db_path"]
db_conn_str = setup_sqlite["db_conn_str"]
incoming_ocns = [200, 228676186]
expected_oclc_clusters = [[4912741, 5066412, 23012053, 228676186, 315449541], [200, 1078101879, 1102728950]]
inquiry_ocns_zephir = [200, 4912741, 5066412, 23012053, 228676186, 315449541, 1078101879, 1102728950]
expected_cid_ocn_list = [
{"cid": '008648991', "ocn": '23012053'},
{"cid": '008648991', "ocn": '4912741'},
{"cid": '008648991', "ocn": '5066412'}]
expected_zephir_clsuters = {"008648991": ['23012053', '4912741', '5066412']}
expected_min_cid = "008648991"
result = cid_inquiry(incoming_ocns, db_conn_str, primary_db_path, cluster_db_path)
print(result)
assert result["inquiry_ocns"] == incoming_ocns
assert result["matched_oclc_clusters"] == expected_oclc_clusters
assert result["num_of_matched_oclc_clusters"] == 2
assert result["inquiry_ocns_zephir"] == inquiry_ocns_zephir
assert result["cid_ocn_list"] == expected_cid_ocn_list
assert result["cid_ocn_clusters"] == expected_zephir_clsuters
assert result["num_of_matched_zephir_clusters"] == 1
assert result["min_cid"] == expected_min_cid
| 13,937
|
def dump_config(exp_dir: str, config: Union[ConfigDict, FrozenConfigDict]) -> None:
"""Dump a config to disk.
Args:
exp_dir (str): Path to the experiment directory.
config (Union[ConfigDict, FrozenConfigDict]): The config to dump.
"""
if not os.path.exists(exp_dir):
os.makedirs(exp_dir)
# Note: No need to explicitly delete the previous config file as "w" will overwrite
# the file if it already exists.
with open(os.path.join(exp_dir, "config.yaml"), "w") as fp:
yaml.dump(config.to_dict(), fp)
| 13,938
|
def premises_to_syllogism(premises):
"""
>>> premises_to_syllogism(["Aab", "Ebc"])
'AE1'
"""
figure = {"abbc": "1", "bacb": "2", "abcb": "3", "babc": "4"}[premises[0][1:] + premises[1][1:]]
return premises[0][0] + premises[1][0] + figure
| 13,939
|
def fix_fits_keywords(header):
"""
Update header keyword to change '-' by '_' as columns with '-' are not
allowed on SQL
"""
new_header = {}
for key in header.keys():
new_key = key.replace('-', '_')
new_header[new_key] = header[key]
# Temporary fix - needs to be removed
# Making it backwards complatible with older files.
# Check the FILETYPE is present, if not get from filename
if 'FILETYPE' not in header.keys():
logger.warning("Adding FILETYPE from FITSNAME pattern to header to compatibility")
# Try to get it from the filename
if re.search('_passthrough.fits', header['FITSNAME']):
new_header['FILETYPE'] = 'psth'
elif re.search('_fltd.fits', header['FITSNAME']):
new_header['FILETYPE'] = 'filtered'
# For headers without FILETYPE (i.e.: yearly) we set it to raw
else:
raise Exception("ERROR: Cannot provide suitable FILETYPE from header or pattern")
logger.warning(f"Added FILETYPE {new_header['FILETYPE']} from pattern")
return new_header
| 13,940
|
def request_password(email: str, mailer: Mailer, _tn: Translator):
"""
Create new hashed password and send mail..
:param email: Mail-address which should be queried
:param mailer: pyramid Mailer
:param _tn: Translator
:return: dict with info about mailing
"""
db_user = DBDiscussionSession.query(User).filter(func.lower(User.email) == func.lower(email)).first()
if not db_user:
LOG.debug("User could not be found for mail %s", email)
return {
'success': False,
'message': _tn.get(_.emailSentGeneric)
}
if checks_if_user_is_ldap_user(db_user):
LOG.debug("User is no LDAP user")
return {
'success': False,
'message': _tn.get(_.passwordRequestLDAP)
}
rnd_pwd = get_rnd_passwd()
hashed_pwd = get_hashed_password(rnd_pwd)
db_user.password = hashed_pwd
DBDiscussionSession.add(db_user)
db_language = DBDiscussionSession.query(Language).get(db_user.settings.lang_uid)
body = _tn.get(_.nicknameIs) + db_user.nickname + '\n'
body += _tn.get(_.newPwdIs) + rnd_pwd + '\n\n'
body += _tn.get(_.newPwdInfo)
subject = _tn.get(_.dbasPwdRequest)
success, _success_message, message = send_mail(mailer, subject, body, email, db_language.ui_locales)
return {
'success': success,
'message': _tn.get(_.emailSentGeneric)
}
| 13,941
|
def is_sum_lucky(x, y):
"""This returns a string describing whether or not the sum of input is lucky
This function first makes sure the inputs are valid and then calculates the
sum. Then, it will determine a message to return based on whether or not
that sum should be considered "lucky"
"""
if x != None:
if y is not None:
result = x+y;
if result == 7:
return 'a lucky number!'
else:
return( 'an unlucky number!')
return ('just a normal number')
| 13,942
|
def unsync_function(func, *args, **kwargs):
"""Runs an async function in a standard blocking way and returns output"""
return asyncio.run(func(*args, **kwargs))
| 13,943
|
def zip_results(name: str, recipes: Iterable[Recipe], cache=CacheType.Auto) \
-> Recipe[Union[List[Tuple[Any, ...]], Dict[Any, Tuple[Any, ...]]]]:
"""
Create a Recipe that zips the outputs from a number of recipes into elements, similar to Python's built-in zip().
Notably, dictionaries are handled a bit differently, in that a dictionary is returned with keys mapping to tuples
from the different inputs, i.e.::
{"1": 1} zip {"1", "one"} -> {"1", (1, "one")}
:param name: The name to give the created Recipe
:param recipes: The recipes to zip. These must return lists or dictionaries
:param cache: The type of caching to use for this Recipe
:return: The created Recipe
"""
def _zip_results(*iterables: Union[List, Dict]) \
-> Union[List[Tuple[Any, ...]], Dict[Any, Tuple[Any, ...]]]:
# Sanity checks
if not iterables or len(iterables) == 0:
return []
if any(not isinstance(iterable, Iterable) for iterable in iterables):
raise ValueError("Cannot zip non-iterable inputs")
first_iterable = iterables[0]
if any(not isinstance(iterable, type(first_iterable)) for iterable in iterables):
raise ValueError("Cannot zip inputs of different types")
num_items = len(first_iterable)
if any(len(iterable) != num_items for iterable in iterables):
raise ValueError("Cannot zip inputs of different length")
# Handle the actual zipping operation
if isinstance(first_iterable, list):
return list(zip(*iterables))
elif isinstance(first_iterable, dict):
return {
key: tuple(iterable[key] for iterable in iterables)
for key in first_iterable.keys()
}
else:
raise ValueError("Type: {} not supported in _zip_results()".format(type(first_iterable)))
return Recipe(_zip_results, recipes, name, transient=False, cache=cache)
| 13,944
|
def get_muscle_reference_dictionary():
"""
The
@article{bashkatov2011optical,
title={Optical properties of skin, subcutaneous, and muscle tissues: a review},
author={Bashkatov, Alexey N and Genina, Elina A and Tuchin, Valery V},
journal={Journal of Innovative Optical Health Sciences},
volume={4},
number={01},
pages={9--38},
year={2011},
publisher={World Scientific}
}
"""
reference_dict = dict()
values650nm = TissueProperties()
values650nm[Tags.DATA_FIELD_ABSORPTION_PER_CM] = 1.04
values650nm[Tags.DATA_FIELD_SCATTERING_PER_CM] = 87.5
values650nm[Tags.DATA_FIELD_ANISOTROPY] = 0.9
values650nm[Tags.DATA_FIELD_GRUNEISEN_PARAMETER] = calculate_gruneisen_parameter_from_temperature(37.0)
values650nm[Tags.DATA_FIELD_SEGMENTATION] = SegmentationClasses.MUSCLE
values650nm[Tags.DATA_FIELD_OXYGENATION] = 0.175
values650nm[Tags.DATA_FIELD_DENSITY] = 1090.4
values650nm[Tags.DATA_FIELD_SPEED_OF_SOUND] = 1588.4
values650nm[Tags.DATA_FIELD_ALPHA_COEFF] = 1.09
values700nm = TissueProperties()
values700nm[Tags.DATA_FIELD_ABSORPTION_PER_CM] = 0.48
values700nm[Tags.DATA_FIELD_SCATTERING_PER_CM] = 81.8
values700nm[Tags.DATA_FIELD_ANISOTROPY] = 0.9
values700nm[Tags.DATA_FIELD_GRUNEISEN_PARAMETER] = calculate_gruneisen_parameter_from_temperature(37.0)
values700nm[Tags.DATA_FIELD_SEGMENTATION] = SegmentationClasses.MUSCLE
values700nm[Tags.DATA_FIELD_OXYGENATION] = 0.175
values700nm[Tags.DATA_FIELD_DENSITY] = 1090.4
values700nm[Tags.DATA_FIELD_SPEED_OF_SOUND] = 1588.4
values700nm[Tags.DATA_FIELD_ALPHA_COEFF] = 1.09
values750nm = TissueProperties()
values750nm[Tags.DATA_FIELD_ABSORPTION_PER_CM] = 0.41
values750nm[Tags.DATA_FIELD_SCATTERING_PER_CM] = 77.1
values750nm[Tags.DATA_FIELD_ANISOTROPY] = 0.9
values750nm[Tags.DATA_FIELD_GRUNEISEN_PARAMETER] = calculate_gruneisen_parameter_from_temperature(37.0)
values750nm[Tags.DATA_FIELD_SEGMENTATION] = SegmentationClasses.MUSCLE
values750nm[Tags.DATA_FIELD_OXYGENATION] = 0.175
values750nm[Tags.DATA_FIELD_DENSITY] = 1090.4
values750nm[Tags.DATA_FIELD_SPEED_OF_SOUND] = 1588.4
values750nm[Tags.DATA_FIELD_ALPHA_COEFF] = 1.09
values800nm = TissueProperties()
values800nm[Tags.DATA_FIELD_ABSORPTION_PER_CM] = 0.28
values800nm[Tags.DATA_FIELD_SCATTERING_PER_CM] = 70.4
values800nm[Tags.DATA_FIELD_ANISOTROPY] = 0.9
values800nm[Tags.DATA_FIELD_GRUNEISEN_PARAMETER] = calculate_gruneisen_parameter_from_temperature(37.0)
values800nm[Tags.DATA_FIELD_SEGMENTATION] = SegmentationClasses.MUSCLE
values800nm[Tags.DATA_FIELD_OXYGENATION] = 0.175
values800nm[Tags.DATA_FIELD_DENSITY] = 1090.4
values800nm[Tags.DATA_FIELD_SPEED_OF_SOUND] = 1588.4
values800nm[Tags.DATA_FIELD_ALPHA_COEFF] = 1.09
values850nm = TissueProperties()
values850nm[Tags.DATA_FIELD_ABSORPTION_PER_CM] = 0.3
values850nm[Tags.DATA_FIELD_SCATTERING_PER_CM] = 66.7
values850nm[Tags.DATA_FIELD_ANISOTROPY] = 0.9
values850nm[Tags.DATA_FIELD_GRUNEISEN_PARAMETER] = calculate_gruneisen_parameter_from_temperature(37.0)
values850nm[Tags.DATA_FIELD_SEGMENTATION] = SegmentationClasses.MUSCLE
values850nm[Tags.DATA_FIELD_OXYGENATION] = 0.175
values850nm[Tags.DATA_FIELD_DENSITY] = 1090.4
values850nm[Tags.DATA_FIELD_SPEED_OF_SOUND] = 1588.4
values850nm[Tags.DATA_FIELD_ALPHA_COEFF] = 1.09
values900nm = TissueProperties()
values900nm[Tags.DATA_FIELD_ABSORPTION_PER_CM] = 0.32
values900nm[Tags.DATA_FIELD_SCATTERING_PER_CM] = 62.1
values900nm[Tags.DATA_FIELD_ANISOTROPY] = 0.9
values900nm[Tags.DATA_FIELD_GRUNEISEN_PARAMETER] = calculate_gruneisen_parameter_from_temperature(37.0)
values900nm[Tags.DATA_FIELD_SEGMENTATION] = SegmentationClasses.MUSCLE
values900nm[Tags.DATA_FIELD_OXYGENATION] = 0.175
values900nm[Tags.DATA_FIELD_DENSITY] = 1090.4
values900nm[Tags.DATA_FIELD_SPEED_OF_SOUND] = 1588.4
values900nm[Tags.DATA_FIELD_ALPHA_COEFF] = 1.09
values950nm = TissueProperties()
values950nm[Tags.DATA_FIELD_ABSORPTION_PER_CM] = 0.46
values950nm[Tags.DATA_FIELD_SCATTERING_PER_CM] = 59.0
values950nm[Tags.DATA_FIELD_ANISOTROPY] = 0.9
values950nm[Tags.DATA_FIELD_GRUNEISEN_PARAMETER] = calculate_gruneisen_parameter_from_temperature(37.0)
values950nm[Tags.DATA_FIELD_SEGMENTATION] = SegmentationClasses.MUSCLE
values950nm[Tags.DATA_FIELD_OXYGENATION] = 0.175
values950nm[Tags.DATA_FIELD_DENSITY] = 1090.4
values950nm[Tags.DATA_FIELD_SPEED_OF_SOUND] = 1588.4
values950nm[Tags.DATA_FIELD_ALPHA_COEFF] = 1.09
reference_dict[650] = values650nm
reference_dict[700] = values700nm
reference_dict[750] = values750nm
reference_dict[800] = values800nm
reference_dict[850] = values850nm
reference_dict[900] = values900nm
reference_dict[950] = values950nm
return reference_dict
| 13,945
|
def flake8_entrypoint(physical_line: str) -> Optional[Tuple[int, str]]:
"""Flake8 plugin entrypoint that operates on physical lines."""
match = RX_TODO_OR_ELSE.search(physical_line)
if match:
by = match.group(2)
pact = match.group(3).strip()
try:
TodoOrElse().by(pact, by=by)
except PactViolatedException as e:
return match.start(), f"{CODE} {e.short()}"
return None
| 13,946
|
def test_to_config_dict_given_non_configurable() -> None:
"""
Test that ``to_config_dict`` errors when passed an instance that does not
descend from configurable.
"""
class SomeOtherClassType (object):
pass
inst = SomeOtherClassType()
with pytest.raises(ValueError,
match="c_inst must be an instance and its type must "
r"subclass from Configurable\."):
# noinspection PyTypeChecker
to_config_dict(inst)
| 13,947
|
def smartquotes(text):
"""
Runs text through pandoc for smartquote correction.
This script accepts a paragraph of input and outputs typographically correct
text using pandoc. Note line breaks are not retained.
"""
command = shlex.split('pandoc --smart -t plain')
com = Popen(command, shell=False, stdin=PIPE, stdout=PIPE, stderr=PIPE)
out, err = com.communicate(text.encode('utf-8'))
com_out = out.decode('utf-8')
text = com_out.replace('\n', ' ').strip()
return text
| 13,948
|
def get_nearest_stations_xy(x, y, variable, n=1, stations=None, ignore=None):
"""find the KNMI stations that measure 'variable' closest to the
x, y coordinates
Parameters
----------
x : int or float
x coordinate in RD
y : int or float
x coordinate in RD
variable : str
measurement variable e.g. 'RD' or 'EV24'
n : int, optional
number of stations you want to return. The default is 1.
stations : pd.DataFrame, optional
if None stations will be obtained using the get_stations function.
The default is None.
ignore : list, optional
list of stations to ignore. The default is None.
Returns
-------
list
station numbers.
"""
if stations is None:
stations = get_stations(variable=variable)
if ignore is not None:
stations.drop(ignore, inplace=True)
if stations.empty:
return None
d = np.sqrt((stations.x - x)**2 + (stations.y - y)**2)
return d.nsmallest(n).index.to_list()
| 13,949
|
def parseStdInput():
"""Obtain a graph by parsing the standard input
as per the format specified in the PACE Challange.
"""
edges = [(1,2),(2,3),(3,4),(4,1)]
G = nx.Graph()
G.add_edges_from(edges)
return G
| 13,950
|
def train_worker(dpw,
reverb_client,
config):
"""Train worker loop.
Collects rollouts and writes sampled discounted state visitation to reverb
indefinitely (xmanager should kill this job when the learner job ends).
Args:
dpw: The discretized puddle world to use.
reverb_client: The reverb client for writing data to reverb.
config: Experiment configuration.
"""
while True:
rollout = pw_utils.generate_rollout(dpw, config.env.pw.rollout_length)
sr = pw_utils.calculate_empricial_successor_representation(
dpw, rollout, config.env.pw.gamma)
pos = np.asarray(
[rollout[0].state.true_state.x, rollout[0].state.true_state.y],
dtype=np.float32)
reverb_client.insert((pos, rollout[0].state.bin_idx, sr),
priorities={_TRAIN_TABLE: 1.0})
| 13,951
|
def strfdelta(tdelta, fmt):
""" Get a string from a timedelta.
"""
f, d = Formatter(), {}
l = {"D": 86400, "H": 3600, "M": 60, "S": 1}
k = list(map(lambda x: x[1], list(f.parse(fmt))))
rem = int(tdelta.total_seconds())
for i in ("D", "H", "M", "S"):
if i in k and i in l.keys():
d[i], rem = divmod(rem, l[i])
return f.format(fmt, **d)
| 13,952
|
def get_file_list_from_dir(parent_dir: Path, file_mask: str = "*") -> list:
"""
Recursively gets a list of files in a Path directory with the specified name mask
and return absolute string paths for files
"""
get_logger(__name__).debug("Iterating for files in '{}'".format(parent_dir.absolute()))
src_glob = parent_dir.rglob(file_mask)
src_files = [str(f.absolute()) for f in src_glob if f.is_file()]
get_logger(__name__).debug("Iterated and found {} files in '{}'".format(len(src_files), parent_dir.absolute()))
return src_files
| 13,953
|
def imputation_Y(X, model):
"""Perform imputation. Don't normalize for depth.
Args:
X: feature matrix from h5.
model: a trained scBasset model.
Returns:
array: a peak*cell imputed accessibility matrix. Sequencing depth
isn't corrected for.
"""
Y_impute = model.predict(X)
return Y_impute
| 13,954
|
def format_percent(x, _pos=None):
"""
plt.gca().yaxis.set_major_formatter(format_percent)
"""
x = 100 * x
if abs(x - round(x)) > 0.05:
return r"${:.1f}\%$".format(x)
else:
return r"${:.0f}\%$".format(x)
| 13,955
|
def is_row_and_col_balanced(T1, T2):
"""
Partial latin squares T1 and T2 are balanced if the symbols
appearing in row r of T1 are the same as the symbols appearing in
row r of T2, for each r, and if the same condition holds on
columns.
EXAMPLES::
sage: from sage.combinat.matrices.latin import *
sage: T1 = matrix([[0,1,-1,-1], [-1,-1,-1,-1], [-1,-1,-1,-1], [-1,-1,-1,-1]])
sage: T2 = matrix([[0,1,-1,-1], [-1,-1,-1,-1], [-1,-1,-1,-1], [-1,-1,-1,-1]])
sage: is_row_and_col_balanced(T1, T2)
True
sage: T2 = matrix([[0,3,-1,-1], [-1,-1,-1,-1], [-1,-1,-1,-1], [-1,-1,-1,-1]])
sage: is_row_and_col_balanced(T1, T2)
False
"""
for r in range(T1.nrows()):
val1 = set(x for x in T1.row(r) if x >= 0)
val2 = set(x for x in T2.row(r) if x >= 0)
if val1 != val2: return False
for c in range(T1.ncols()):
val1 = set(x for x in T1.column(c) if x >= 0)
val2 = set(x for x in T2.column(c) if x >= 0)
if val1 != val2: return False
return True
| 13,956
|
def submit(ds, entry_name, molecule, index):
"""
Submit an optimization job to a QCArchive server.
Parameters
----------
ds : qcportal.collections.OptimizationDataset
The QCArchive OptimizationDataset object that this calculation
belongs to
entry_name : str
The base entry name that the conformation belongs to. Usually,
this is a canonical SMILES, but can be anything as it is represents
a key in a dictionary-like datastructure. This will be used as an
entry name in the dataset
molecule : QCMolecule
The JSON representation of a QCMolecule, which has geometry
and connectivity present, among others
index : int
The conformation identifier of the molecule. This is used to make
the entry names unique, since each conformation must have its own
unique entry in the dataset in the dataset
Returns
-------
(unique_id, success): tuple
unique_id : str
The unique_id that was submitted to the dataset. This is the name
of the new entry in the dataset.
success : bool
Whether the dataset was able to successfully add the entry. If this
is False, then the entry with the name corresponding to unique_id
was already present in the dataset.
"""
# This workaround prevents cmiles from crashing if OE is installed but has
# no license. Even though rdkit is specified, protomer enumeration is OE-
# specific and still attempted.
# oe_flag = cmiles.utils.has_openeye
# cmiles.utils.has_openeye = False
# attrs = cmiles.generator.get_molecule_ids(molecule, toolkit="rdkit")
# cmiles.utils.has_openeye = oe_flag
CIEHMS = "canonical_isomeric_explicit_hydrogen_mapped_smiles"
molecule["extras"] = {CIEHMS: entry_name}
attrs = {CIEHMS: entry_name}
unique_id = entry_name + f"-{index}"
success = False
try:
ds.add_entry(unique_id, molecule, attributes=attrs, save=False)
success = True
except KeyError:
pass
return unique_id, success
| 13,957
|
def db():
"""Database queries (including initialization)."""
| 13,958
|
def list_isos(apiclient, **kwargs):
"""Lists all available ISO files."""
cmd = listIsos.listIsosCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
return(apiclient.listIsos(cmd))
| 13,959
|
async def edit_message_live_location(
token: str = TOKEN_VALIDATION,
latitude: float = Query(..., description='Latitude of new location'),
longitude: float = Query(..., description='Longitude of new location'),
chat_id: Optional[Union[int, str]] = Query(None, description='Required if inline_message_id is not specified. Unique identifier for the target chat or username of the target channel (in the format @channelusername)'),
message_id: Optional[int] = Query(None, description='Required if inline_message_id is not specified. Identifier of the message to edit'),
inline_message_id: Optional[str] = Query(None, description='Required if chat_id and message_id are not specified. Identifier of the inline message'),
horizontal_accuracy: Optional[float] = Query(None, description='The radius of uncertainty for the location, measured in meters; 0-1500'),
heading: Optional[int] = Query(None, description='Direction in which the user is moving, in degrees. Must be between 1 and 360 if specified.'),
proximity_alert_radius: Optional[int] = Query(None, description='Maximum distance for proximity alerts about approaching another chat member, in meters. Must be between 1 and 100000 if specified.'),
reply_markup: Optional[Json['InlineKeyboardMarkupModel']] = Query(None, description='A JSON-serialized object for a new inline keyboard.'),
) -> JSONableResponse:
"""
Use this method to edit live location messages. A location can be edited until its live_period expires or editing is explicitly disabled by a call to stopMessageLiveLocation. On success, if the edited message is not an inline message, the edited Message is returned, otherwise True is returned.
https://core.telegram.org/bots/api#editmessagelivelocation
"""
reply_markup: Optional[InlineKeyboardMarkupModel] = parse_obj_as(
Optional[InlineKeyboardMarkupModel],
obj=reply_markup,
)
from .....main import _get_bot
bot = await _get_bot(token)
try:
entity = await get_entity(bot, chat_id)
except BotMethodInvalidError:
assert isinstance(chat_id, int) or (isinstance(chat_id, str) and len(chat_id) > 0 and chat_id[0] == '@')
entity = chat_id
except ValueError:
raise HTTPException(404, detail="chat not found?")
# end try
result = await bot.edit_message_live_location(
latitude=latitude,
longitude=longitude,
entity=entity,
message_id=message_id,
inline_message_id=inline_message_id,
horizontal_accuracy=horizontal_accuracy,
heading=heading,
proximity_alert_radius=proximity_alert_radius,
reply_markup=reply_markup,
)
data = await to_web_api(result, bot)
return r_success(data.to_array())
| 13,960
|
def process_rollout(rollout, gamma, lambda_=1.0):
"""
given a rollout, compute its returns and the advantage
"""
batch_si = np.asarray(rollout.states)
batch_a = np.asarray(rollout.actions)
rewards = np.asarray(rollout.rewards)
action_reward = np.concatenate((batch_a,rewards[:,np.newaxis]), axis=1)
vpred_t = np.asarray(rollout.values + [rollout.r])
rewards_plus_v = np.asarray(rollout.rewards + [rollout.r])
batch_r = discount(rewards_plus_v, gamma)[:-1]
delta_t = rewards + gamma * vpred_t[1:] - vpred_t[:-1]
# this formula for the advantage comes "Generalized Advantage Estimation":
# https://arxiv.org/abs/1506.02438
batch_adv = discount(delta_t, gamma * lambda_)
features = rollout.features
batch_pc = np.asarray(rollout.pixel_changes)
return Batch(batch_si, batch_a, action_reward, batch_adv, batch_r, rollout.terminal, features, batch_pc)
| 13,961
|
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Add sensors for passed config_entry in HA."""
coordinator: IotawattUpdater = hass.data[DOMAIN][config_entry.entry_id]
created = set()
@callback
def _create_entity(key: str) -> IotaWattSensor:
"""Create a sensor entity."""
created.add(key)
return IotaWattSensor(
coordinator=coordinator,
key=key,
mac_address=coordinator.data["sensors"][key].hub_mac_address,
name=coordinator.data["sensors"][key].getName(),
entity_description=ENTITY_DESCRIPTION_KEY_MAP.get(
coordinator.data["sensors"][key].getUnit(),
IotaWattSensorEntityDescription("base_sensor"),
),
)
async_add_entities(_create_entity(key) for key in coordinator.data["sensors"])
@callback
def new_data_received():
"""Check for new sensors."""
entities = [
_create_entity(key)
for key in coordinator.data["sensors"]
if key not in created
]
if entities:
async_add_entities(entities)
coordinator.async_add_listener(new_data_received)
| 13,962
|
def calculate_cost(cost, working_days_flag, month, nr_of_passes):
"""Calculate the monthly tolls cost"""
if working_days_flag:
passes = working_days(month) * nr_of_passes
else:
now = datetime.datetime.now()
passes = calendar.monthrange(now.year, month)[1] * nr_of_passes
total_cost = 0
for i in range(1, passes + 1):
if 1 <= i <= 5:
total_cost += cost
elif 6 <= i <= 10:
total_cost += cost - (cost * 15 / 100)
elif 11 <= i <= 20:
total_cost += cost - (cost * 30 / 100)
elif 21 <= i <= 30:
total_cost += cost - (cost * 40 / 100)
elif 31 <= i <= 40:
total_cost += cost - (cost * 50 / 100)
elif 41 <= i <= 60:
total_cost += cost - (cost * 60 / 100)
else:
total_cost += cost
return total_cost
| 13,963
|
def e_dl() -> str:
"""Fetch size of archives to be downloaded for next system update."""
size = 'Calculating...'
with open(file=TMERGE_LOGFILE, mode='r', encoding='utf-8') as log_file:
for line in list(log_file)[::-1]:
reqex = search(r'(Size of downloads:.)([0-9,]*\s[KMG]iB)', line)
if reqex is not None:
size = reqex.group(2)
break
print(size)
return size
| 13,964
|
def get_model_fn():
"""Returns the model definition."""
def model_fn(features, labels, mode, params):
"""Returns the model function."""
feature = features['feature']
print(feature)
labels = labels['label']
one_hot_labels = model_utils.get_label(
labels,
params,
FLAGS.src_num_classes,
batch_size=FLAGS.train_batch_size)
def get_logits():
"""Return the logits."""
avg_pool = model.conv_model(feature, mode)
name = 'final_dense_dst'
with tf.variable_scope('target_CLS'):
logits = tf.layers.dense(
inputs=avg_pool, units=FLAGS.src_num_classes, name=name)
return logits
logits = get_logits()
logits = tf.cast(logits, tf.float32)
dst_loss = tf.losses.softmax_cross_entropy(
logits=logits,
onehot_labels=one_hot_labels,
)
dst_l2_loss = FLAGS.weight_decay * tf.add_n([
tf.nn.l2_loss(v)
for v in tf.trainable_variables()
if 'batch_normalization' not in v.name and 'kernel' in v.name
])
loss = dst_loss + dst_l2_loss
train_op = None
if mode == tf.estimator.ModeKeys.TRAIN:
cur_finetune_step = tf.train.get_global_step()
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
finetune_learning_rate = lr_schedule()
optimizer = tf.train.AdamOptimizer(finetune_learning_rate)
train_op = tf.contrib.slim.learning.create_train_op(loss, optimizer)
with tf.variable_scope('finetune'):
train_op = optimizer.minimize(loss, cur_finetune_step)
else:
train_op = None
eval_metrics = None
if mode == tf.estimator.ModeKeys.EVAL:
eval_metrics = model_utils.metric_fn(labels, logits)
if mode == tf.estimator.ModeKeys.TRAIN:
with tf.control_dependencies([train_op]):
tf.summary.scalar('classifier/finetune_lr', finetune_learning_rate)
else:
train_op = None
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metrics,
)
return model_fn
| 13,965
|
def build_node(idx, node_type):
""" Build node list
:idx: a value to id mapping dict
:node_type: a string describe the node type
:returns: a list of records of the nodes extracted from the mapping
"""
return rekey(idx, 'value', 'id:ID', {':LABEL': node_type})
| 13,966
|
def _getTypeFromExtension(path, mode='write'):
"""
Parameters
----------
path : str
path from with to pull the extension from - note that it may NOT be
ONLY the extension - ie, "obj" and ".obj", will not work, but
"foo.obj" will
mode : {'write', 'read'}
the type is basically a string name of a file translator, which can
have different ones registered for reading or writing; this specifies
whether you're looking for the read or write translator
"""
pass
| 13,967
|
def residual_block(filters, repetitions,kernel_size=(3,3),strides=(2,2), is_first_layer=False):
"""Builds a residual block with repeating bottleneck blocks.
"""
def f(input):
for i in range(repetitions):
init_strides = (1, 1)
if i == 0 and not is_first_layer:
init_strides = strides
input = basic_block(filters=filters,kernel_size=kernel_size, init_strides=init_strides,
is_first_block_of_first_layer=(is_first_layer and i == 0))(input)
return input
return f
| 13,968
|
def vpg_omega(X,Y,Gamma=1, sigma=1, polarIn=False):
"""
Vorticity distribution for 2D Gaussian vortex patch
"""
if polarIn:
r = X
else:
r = np.sqrt(X ** 2 + Y ** 2)
omega_z = Gamma/(np.pi*sigma) * (np.exp(- r**2/sigma**2))
return omega_z
| 13,969
|
async def _run_filter_tests(hass, tests, process_queue, mock_batch):
"""Run a series of filter tests on azure event hub."""
for test in tests:
hass.states.async_set(test.id, STATE_ON)
await hass.async_block_till_done()
await process_queue(None)
if test.should_pass:
mock_batch.add.assert_called_once()
mock_batch.add.reset_mock()
else:
mock_batch.add.assert_not_called()
| 13,970
|
def test_example_file_passing_using_fixture(fs_reload_example):
"""Test passes if using a fixture that reloads the module containing
EXAMPLE_FILE"""
fs_reload_example.create_file(example.EXAMPLE_FILE, contents='stuff here')
check_that_example_file_is_in_fake_fs()
| 13,971
|
def autoload():
"""
Attempts to load (import) notification handlers from modules defined in ``PYNOTIFY_AUTOLOAD_MODULES``
"""
modules = settings.AUTOLOAD_MODULES
if modules:
for module in modules:
try:
import_module(module)
except ImportError:
logger.exception('Failed to autoload notification handlers from module {}'.format(module))
| 13,972
|
def get_outgroup(tree: CassiopeiaTree, triplet: Tuple[str, str, str]) -> str:
"""Infers the outgroup of a triplet from a CassioepiaTree.
Finds the outgroup based on the depth of the latest-common-ancestors
of each pair of items. The pair with the deepest LCA is the
ingroup and the remaining leaf is the outgroup. We infer the depth
of the LCA from the number of shared ancestors.
Args:
tree: CassiopeiaTree
triplet: A tuple of three leaves constituting a triplet.
Returns:
The outgroup (i.e. the most distal leaf in the triplet.)
"""
i, j, k = triplet[0], triplet[1], triplet[2]
i_ancestors = tree.get_all_ancestors(i)
j_ancestors = tree.get_all_ancestors(j)
k_ancestors = tree.get_all_ancestors(k)
ij_common = len(set(i_ancestors) & set(j_ancestors))
ik_common = len(set(i_ancestors) & set(k_ancestors))
jk_common = len(set(j_ancestors) & set(k_ancestors))
out_group = "None"
if ij_common > jk_common and ij_common > ik_common:
out_group = k
elif ik_common > jk_common and ik_common > ij_common:
out_group = j
elif jk_common > ij_common and jk_common > ik_common:
out_group = i
return out_group
| 13,973
|
def delete_host(resource_root, host_id):
"""
Delete a host by id
@param resource_root: The root Resource object.
@param host_id: Host id
@return: The deleted ApiHost object
"""
return call(resource_root.delete, "%s/%s" % (HOSTS_PATH, host_id), ApiHost)
| 13,974
|
def get_preds(model: nn.Module, image: Union[np.array, str], **kwargs) -> Tuple[List]:
"""
Generated predictions for the given `image` using `model`.
"""
logger = _get_logger(name=__name__)
# load in the image if string is give
if isinstance(image, str):
image = Image.open(image).convert("RGB")
# Convert PIL image to array
image = np.array(image)
# Convert Image to a tensor
tensor_image = transforms(image=image)["image"]
# Generate predicitons
model.eval()
pred = model.predict([tensor_image])
# Gather the bbox, scores & labels from the preds
pred_boxes = pred[0]["boxes"] # Bounding boxes
pred_class = pred[0]["labels"] # predicted class labels
pred_score = pred[0]["scores"] # predicted scores
# Process detections
boxes = list(pred_boxes.cpu().numpy())
clas = list(pred_class.cpu().numpy())
scores = list(pred_score.cpu().numpy())
return boxes, clas, scores
| 13,975
|
def get_csv_data(filepath, source='DoQ'):
"""
Yield a large csv row by row to avoid memory overload
"""
if source =='DoQ':
with open(filepath, "rt") as csvfile:
datareader = csv.reader(csvfile, delimiter='\t')
for row in datareader:
if row[2] == 'LENGTH' or row[2] == 'VOLUME': #filter only by length or volume measures
yield row
else:
with open(filepath, "rt") as csvfile:
datareader = csv.reader(csvfile, delimiter=',')
for row in datareader: yield row
| 13,976
|
async def unblock_func(func_name:object,
func_args,
logger=None,
default_res=None,
is_new_loop=False,):
"""
异步函数非阻塞
:param func_name: def 函数对象名
:param func_args: 请求参数可迭代对象(必须遵循元素入参顺序!)
:param logger:
:param default_res: 默认返回结果
:param is_new_loop: 是否开启新loop, True容易造成OSError, too many file open错误
:return:
"""
# todo notice: 一个进程/线程只能一个 event loop
loop = get_event_loop() if not is_new_loop else new_event_loop()
try:
default_res = await loop.run_in_executor(None, func_name, *func_args)
except Exception as e:
_print(msg='遇到错误:', logger=logger, log_level=2, exception=e)
finally:
# loop.close()
try:
del loop
except:
pass
collect()
return default_res
| 13,977
|
def replace_if_has_wiki_link(line: str, folder_dict: Dict) -> Tuple[str, int]:
""" ^title
:return: (string with all wikis replaced, replacement count)
"""
embed_rule = re.compile(re_md_reference)
wiki_partial_rule = re.compile(re_md_wiki_partial)
wiki_rule = re.compile(re_md_wiki)
new_line = line
wiki_count = 0
# ![[xxxx.png]] -> 
while (match := re.search(embed_rule, new_line)):
new_line = handle_rewrite(match, new_line, folder_dict, "embed")
wiki_count += 1
# [[xxxx|yyy]] -> [[yyy]](...xxxx.md) todo: not implemented
while (match := re.search(wiki_partial_rule, new_line)):
new_line = handle_rewrite(match, new_line, folder_dict, "md_partial")
wiki_count += 1
# [[xxxx]] -> [xxx](...xxxx.md)
while (match := re.search(wiki_rule, new_line)):
new_line = handle_rewrite(match, new_line, folder_dict, "md")
wiki_count +=1
# new_line=line if no changes made
return new_line, wiki_count
| 13,978
|
def init_w(w, n):
"""
:purpose:
Initialize a weight array consistent of 1s if none is given
This is called at the start of each function containing a w param
:params:
w : a weight vector, if one was given to the initial function, else None
NOTE: w MUST be an array of np.float64. so, even if you want a boolean w,
convert it to np.float64 (using w.astype(np.float64)) before passing it to
any function
n : the desired length of the vector of 1s (often set to len(u))
:returns:
w : an array of 1s with shape (n,) if w is None, else return w un-changed
"""
if w is None:
return np.ones(n)
else:
return w
| 13,979
|
def parallelise_edges(xy, edges, targets, i_nbrs, ij_e, fixed=None, kmax=100, lmin=None, lmax=None, callback=None):
"""Parallelise the edges of a mesh to given target vectors.
Parameters
----------
xy : list
The XY coordinates of the vertices of the edges.
edges : list
The edges as pairs of indices in ``xy``.
targets : list
A target vector for every edge.
i_nbrs : dict
A list of neighbours per vertex.
ij_e : dict
An edge index per vertex pair.
fixed : list, optional
The fixed nodes of the mesh.
Default is ``None``.
kmax : int, optional
Maximum number of iterations.
Default is ``100``.
lmin : list, optional
Minimum length per edge.
Default is ``None``.
lmax : list, optional
Maximum length per edge.
Default is ``None``.
callback : callable, optional
A user-defined callback function to be executed after every iteration.
Default is ``None``.
Returns
-------
None
Examples
--------
>>>
"""
if callback:
if not callable(callback):
raise Exception('The provided callback is not callable.')
fixed = fixed or []
fixed = set(fixed)
n = len(xy)
for k in range(kmax):
xy0 = [[x, y] for x, y in xy]
uv = [[xy[j][0] - xy[i][0], xy[j][1] - xy[i][1]] for i, j in edges]
lengths = [(dx**2 + dy**2)**0.5 for dx, dy in uv]
if lmin:
lengths[:] = [max(a, b) for a, b in zip(lengths, lmin)]
if lmax:
lengths[:] = [min(a, b) for a, b in zip(lengths, lmax)]
for j in range(n):
if j in fixed:
continue
nbrs = i_nbrs[j]
x, y = 0.0, 0.0
for i in nbrs:
ax, ay = xy0[i]
if (i, j) in ij_e:
e = ij_e[(i, j)]
l = lengths[e] # noqa: E741
tx, ty = targets[e]
x += ax + l * tx
y += ay + l * ty
else:
e = ij_e[(j, i)]
l = lengths[e] # noqa: E741
tx, ty = targets[e]
x += ax - l * tx
y += ay - l * ty
xy[j][0] = x / len(nbrs)
xy[j][1] = y / len(nbrs)
for (i, j) in ij_e:
e = ij_e[(i, j)]
if lengths[e] == 0.0:
c = midpoint_point_point_xy(xy[i], xy[j])
xy[i][:] = c[:][:2]
xy[j][:] = c[:][:2]
if callback:
callback(k, xy, edges)
| 13,980
|
def _ensure_min_resources(progs, cores, memory, min_memory):
"""Ensure setting match minimum resources required for used programs.
"""
for p in progs:
if p in min_memory:
if not memory or cores * memory < min_memory[p]:
memory = float(min_memory[p]) / cores
return cores, memory
| 13,981
|
def ctypes_pointer(name):
"""Create a ctypes type representing a C pointer to a custom data type ``name``."""
return type("c_%s_p" % name, (ctypes.c_void_p,), {})
| 13,982
|
def generate_linear_data(n, betas, sigma):
"""Generate pandas df with x and y variables related by a linear equation.
Export data as csv.
:param n: Number of observations.
:param betas: beta parameters.
:param sigma: standard deviation
:return: None
"""
x = np.linspace(start=0.0, stop=1.0, num=n)
y = betas[0] + betas[1]*x + np.random.normal(loc=1, scale=sigma, size=n)
df = pd.DataFrame({'x': x, 'y': y})
df.to_csv('data/train_data.csv', index=False)
return None
| 13,983
|
def entry(
text,
*,
foreground: str = "",
background: str = "",
sgr: str = "",
jump_line: str = "\n> ",
) -> str:
"""
This function is derived from the input, but with the option of
coloring it and some different formatting.
Note: If you use Windows, the coloring option will not work.
>>> from snakypy.helpers import entry, FG
>>> entry("What's your name?", foreground=FG().QUESTION)
➜ What's your name?
> 'snakypy'
>>> entry("What's your name?", foreground=FG().BLUE)
➜ What's your name?
> 'snakypy'
>>> entry("What's your name?", foreground=FG().GREEN)
➜ What's your name?
> 'snakypy'
Args:
text (object): Argument must receive an object
foreground (str): This named argument should optionally receive \
an object of class "snakypy.helpers.ansi.FG" for the foreground \
color of the text. This object will be text with ansi code. \
(default: '')
background (str): This named argument should optionally receive \
an object of class "snakypy.helpers.ansi.BG" for the background \
color of the text. This object will be text with ansi code. \
(default: '')
sgr (str): This named argument should optionally receive \
an object of class "snakypy.helpers.ansi.SGR" for the effect \
of the text. This object will be text with ansi code. \
(default: '')
jump_line (str): Named argument that makes the action of skipping a line \
and adding a greater sign to represent an arrow. You change \
that argument to your liking. (default: '[bar]n> ') \
"""
# TODO: DEPRECATED
# check_fg_bg_sgr(FG, BG, SGR, foreground, background, sgr)
try:
return input(f"{NONE}{sgr}{foreground}{background}{text}{jump_line}{NONE}")
except KeyboardInterrupt:
print(f"\n{FG().WARNING} Aborted by user.{NONE}")
return "Aborted by user."
except TypeError:
print(f"\n{FG().ERROR} Input value not defined.{NONE}")
return "Input value not defined."
| 13,984
|
def vrotate_3D(vec: np.ndarray,
ref: np.ndarray) -> np.ndarray:
"""Rotates a vector in a 3D space.
Returns the rotation matrix for `vec` to match the orientation of a
reference vector `ref`.
https://math.stackexchange.com/questions/180418/calculate-rotation-matrix-to-align-vector-a-to-vector-b-in-3d/476311#476311
Parameters
----------
vec
Vector to rotate, as a numpy 1D array
ref
Reference vector, as a numpy 1D array
Returns
-------
np.ndarray
(3,3) rotation matrix, as a numpy 2D array
"""
def norm(A):
return sqrt(np.dot(A, A))
# G = np.matrix([
# [np.dot(A, B), -norm(np.cross(A, B)), 0.0],
# [norm(np.cross(A, B)), np.dot(A, B), 0.0],
# [0.0, 0.0, 1.0]
# ])
# F = np.matrix([
# A,
# (B-np.dot(A, B)*A)/norm(B-np.dot(A, B)*A),
# np.cross(B, A)/norm(np.cross(B, A))
# ])
# return F.I*G*F
V = np.cross(vec, ref)
S = norm(V)
if abs(S) < 1.0e-6:
# Already collinear, nothing to do
return np.eye(3)
else:
C = np.dot(vec, ref)
Vx = np.matrix([[0.0, -V[2], V[1]],
[V[2], 0.0, -V[0]],
[-V[1], V[0], 0.0]])
return np.eye(3) + Vx + Vx**2*(1.0-C)/S**2
| 13,985
|
def inject_data(image,
key=None, net=None, metadata=None, admin_password=None,
files=None, partition=None, use_cow=False):
"""Injects a ssh key and optionally net data into a disk image.
it will mount the image as a fully partitioned disk and attempt to inject
into the specified partition number.
If partition is not specified it mounts the image as a single partition.
"""
img = _DiskImage(image=image, partition=partition, use_cow=use_cow)
if img.mount():
try:
inject_data_into_fs(img.mount_dir,
key, net, metadata, admin_password, files)
finally:
img.umount()
else:
raise exception.NovaException(img.errors)
| 13,986
|
def asymptotic_decay(learning_rate, t, max_iter):
"""Decay function of the learning process.
Parameters
----------
learning_rate : float
current learning rate.
t : int
current iteration.
max_iter : int
maximum number of iterations for the training.
"""
return learning_rate / (1+t/(max_iter/2))
| 13,987
|
def get_cell_content(browser, author):
"""
get novel cells
return [cell, cell, cell]
"""
content = list()
cells = browser.find_all(class_='t t2')
for cell in cells:
if cell.find(class_='r_two').b.string != author:
continue
for cell_content in cell.find(class_=['tpc_content do_not_catch', 'tpc_content']).strings:
content.append(cell_content.strip())
return "\n".join(content)
| 13,988
|
def add_supported_cxxflags(self, cxxflags):
"""
Check which cxxflags are supported by compiler and add them to env.CXXFLAGS variable
"""
if len(cxxflags) == 0:
return
self.start_msg('Checking supported CXXFLAGS')
supportedFlags = []
for flags in cxxflags:
flags = Utils.to_list(flags)
if self.check_cxx(cxxflags=['-Werror'] + flags, mandatory=False):
supportedFlags += flags
self.end_msg(' '.join(supportedFlags))
self.env.prepend_value('CXXFLAGS', supportedFlags)
| 13,989
|
def iter_meta_refresh(file):
"""Iterate through meta refreshes from a file.
Args:
file: str, path-like, or file-like object
"""
try:
fh = open(file, 'rb')
except TypeError:
fh = file
except FileNotFoundError:
fh = None
if not fh:
return
try:
contexts = []
for event, elem in etree.iterparse(fh, html=True, events=('start', 'end')):
if event == 'start':
if elem.tag in META_REFRESH_CONTEXT_TAGS:
contexts.append(elem.tag)
continue
if (elem.tag == 'meta' and
elem.attrib.get('http-equiv', '').lower() == 'refresh'):
time, _, content = elem.attrib.get('content', '').partition(';')
try:
time = int(time)
except ValueError:
time = 0
match_url = META_REFRESH_REGEX_URL.search(content)
target = match_url.group(1) if match_url else None
context = contexts.copy() if contexts else None
yield MetaRefreshInfo(time=time, target=target, context=context)
elif event == 'end':
if contexts and elem.tag == contexts[-1]:
contexts.pop()
continue
# clean up to save memory
elem.clear()
while elem.getprevious() is not None:
try:
del elem.getparent()[0]
except TypeError:
# broken html may generate extra root elem
break
finally:
if fh != file:
fh.close()
| 13,990
|
def kmeans(data: cp.ndarray, K: int, centroids: cp.ndarray):
"""
Clusters points into k clusters using k_means clustering.
"""
print("Start K-means clustering.")
N, D = data.shape
new_centroids = cp.full((K, D), 0.0)
loop = 1
while loop < 200:
# assign each point to nearest cluster
distance = cp.full((K, N), 0.0)
for centroid_idx in range(K):
# NEW: extract the underlying array when use static method from cupy
distance[centroid_idx] = cp.linalg.norm(data - centroids[centroid_idx], axis=1)
assignment = cp.argmin(distance, axis=0)
for i in range(K):
condition = assignment == i
# build new clusters
cluster = data[condition]
# compute new centroids
if cluster.size != 0:
new_centroids[i] = cp.mean(cluster, axis=0)
# stop when the distance of current centroids to last centroids are lower than threshold
if check_stop_criteria(new_centroids, centroids, K):
pass # change this to 'pass' will let the loops has a fixed number of iteration
loop += 1
# NEW: keep the reference unchanged
# LHS: PArray / RHS: return a numpy.ndarray
# centroids = np.copy(new_centroids)
centroids[:] = np.copy(new_centroids)
print(f"K-means done with {loop} loops.")
if DEBUG:
for k in range(K):
print(f"Predicted Centroid {k}: {vector_str(centroids[k])}")
| 13,991
|
def _init_unique_pool(_ic_profile_dic, _cac_profile_dic, _ic_shape, _cac_shape):
"""initialize pool, function used to put data into shared memory"""
print(f"- Initialize core with illumination correction profiles for {list(_ic_profile_dic.keys())}")
init_dic['illumination'] = _ic_profile_dic
print(f"- Initialize core with chromatic correction profiles for {list(_cac_profile_dic.keys())}")
init_dic['chromatic'] = _cac_profile_dic
init_dic['ic_shape'] = _ic_shape
init_dic['cac_shape'] = _cac_shape
| 13,992
|
def do_host_describe(cs, args):
"""Describe a specific host."""
result = cs.hosts.get(args.host)
columns = ["HOST", "PROJECT", "cpu", "memory_mb", "disk_gb"]
utils.print_list(result, columns)
| 13,993
|
def triangle_area(a, h):
"""Given length of a side and high return area for a triangle.
>>> triangle_area(5, 3)
7.5
"""
#[SOLUTION]
return a * h / 2.0
| 13,994
|
def generate_exclude_file1_file2(HWEresults_file, batches_list, draw_script, all_output, perbatch_output, allbatches_output, FDR_index_remove_variants):
"""generate exclude file 1: From HWE calculations across the entire
collection, remove variants for which HWE fails even if the worst batch
removed (i.e. even if we remove the batch with the smallest p_value (below
the HWE threshold), then the variant fails HWE for the entire collection
without the particular batch
generate exclude file 2: Remove variants (from HWE calculations by batch)
failed HWE in >1 batches.
"""
# THIS INDEX MUST BE SET BY USER
# '4' corresponds to FDR at 1e-5, see list thresholds_FDR below
# FDR_index_remove_variants = 4
# -------------------------------------------------------- #
# -- read HWE P-values from results file to control FDR -- #
# -------------------------------------------------------- #
numof_batches = len(batches_list)
# HWE_Pval_vectors
HWE_Pval_vector_allbatches = [] # (HWE_p-value, variant_id)
HWE_Pval_vector_worstbatchremoved = [] # (HWE_p-value, variant_id)
# HWE_Pval_vector for each batch
# 2D array
HWE_Pval_vector_perbatch = []
for b in xrange(numof_batches):
HWE_Pval_vector_perbatch.append([])
try:
fh_r = file(HWEresults_file, "r")
except IOError, e:
print e
sys.exit(1)
line = fh_r.readline().rstrip('\n')
while line:
list = re.split("\s+", line)
if list[0] == "":
del list[0]
if list[-1] == "":
del list[-1]
numof_pvalues = (len(list[4:]) - 2) / 4
assert(numof_batches == numof_pvalues)
# ------------------------------- #
# -- look at entire collection -- #
# ------------------------------- #
HWE_entire_collection = float(list[4])
variant_id = list[1]
HWE_Pval_vector_allbatches.append((HWE_entire_collection, variant_id))
# --------------------- #
# -- look at batches -- #
# --------------------- #
# find the ("worst") batch with the lowest p-value in HWE_particularbatch
HWE_min_excludebatch = 1.0
HWE_min_batch = 1.0
batch_index = 0
for i in xrange(5, 5 + 2 * numof_batches, 2):
HWE_particularbatch = float(list[i])
HWE_entire_collection_exclude_particularbatch = float(list[i + 1])
if HWE_particularbatch < HWE_min_batch:
HWE_min_batch = HWE_particularbatch
HWE_min_excludebatch = HWE_entire_collection_exclude_particularbatch
HWE_Pval_vector_perbatch[batch_index].append((HWE_particularbatch, variant_id))
batch_index += 1
HWE_Pval_vector_worstbatchremoved.append((HWE_min_excludebatch, variant_id))
line = fh_r.readline().rstrip('\n')
fh_r.close()
# ------------------------------------------------------------------- #
# -- sort p-value vectors by first element of tuples, i.e. p-value -- #
# ------------------------------------------------------------------- #
HWE_Pval_vector_allbatches.sort(reverse=False)
HWE_Pval_vector_worstbatchremoved.sort(reverse=False)
assert(len(HWE_Pval_vector_allbatches) == len(HWE_Pval_vector_worstbatchremoved))
for b in xrange(numof_batches):
HWE_Pval_vector_perbatch[b].sort(reverse=False)
assert(len(HWE_Pval_vector_allbatches) == len(HWE_Pval_vector_perbatch[b]))
# ---------------------------------------------------------------- #
# -- count #variant failed at FDR at q=1e-1,1e-2,1e-3,...,1e-10 -- #
# ---------------------------------------------------------------- #
thresholds_FDR = [1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6, 1e-7, 1e-8, 1e-9, 1e-10]
counts_rejected_FDR_allbatches = [0 for i in range(10)] # set count to 0
counts_rejected_FDR_worstbatchremoved = [0 for i in range(10)] # set count to 0
assert(len(counts_rejected_FDR_allbatches) == len(thresholds_FDR))
# fill this vector with HWE_Pvalues at FDR thresholds
thresholds_Pvals_allbatches = [float(0) for i in range(10)]
thresholds_Pvals_worstbatchremoved = [float(0) for i in range(10)]
# 2D array, per batch
counts_rejected_FDR_perbatch = []
for b in xrange(numof_batches):
counts_rejected_FDR_perbatch.append([0 for i in range(10)]) # set count to 0
# ------------------------------------------------------------------------------- #
# -- calculate FDR for different FDR thresholds (Benjamini and Hochberg, 1995) -- #
# ------------------------------------------------------------------------------- #
# (a) for all batches and worstbatchremoved
n = len(HWE_Pval_vector_allbatches)
for j in xrange(len(thresholds_FDR)):
break_i_loop_part1 = False
break_i_loop_part2 = False
for i in xrange(1, n + 1, 1):
rank = i / float(n)
threshold = rank * thresholds_FDR[j]
if (not break_i_loop_part1) and (HWE_Pval_vector_allbatches[i - 1][0] > threshold):
thresholds_Pvals_allbatches[j] = HWE_Pval_vector_allbatches[i - 2][0]
counts_rejected_FDR_allbatches[j] = i - 1
break_i_loop_part1 = True
if (not break_i_loop_part2) and (HWE_Pval_vector_worstbatchremoved[i - 1][0] > threshold):
thresholds_Pvals_worstbatchremoved[j] = HWE_Pval_vector_worstbatchremoved[i - 2][0]
counts_rejected_FDR_worstbatchremoved[j] = i - 1
break_i_loop_part2 = True
if break_i_loop_part1 and break_i_loop_part2:
break
# (b) for each batch itself
for j in xrange(len(thresholds_FDR)):
for i in xrange(1, n + 1, 1):
for b in xrange(numof_batches):
rank = i / float(n)
threshold = rank * thresholds_FDR[j]
if HWE_Pval_vector_perbatch[b][i - 1][0] > threshold:
counts_rejected_FDR_perbatch[b][j] = i - 1
break
# ------------ #
# -- file 1 -- #
# ------------ #
# extract rejected variants for FDRs at threshold with index
# FDR_index_remove_variants
# THIS INDEX MUST BE SET BY USER at the beginning of this function
# '4' corresponds to FDR at 1e-5
# use variable FDR_index_remove_variants = 4 (per default)
try:
fh_worst_batch_removed_w = file(all_output, "w")
fh_allbatches_w = file(allbatches_output, "w")
except IOError, e:
print e
sys.exit(1)
sep = "\t"
header = "Variant" + sep + "P_HWE\n"
fh_worst_batch_removed_w.writelines(header)
fh_allbatches_w.writelines(header)
for i in xrange(counts_rejected_FDR_worstbatchremoved[FDR_index_remove_variants]):
fh_worst_batch_removed_w.writelines(HWE_Pval_vector_worstbatchremoved[i][1] + sep +
str(HWE_Pval_vector_worstbatchremoved[i][0]) + "\n")
for i in xrange(counts_rejected_FDR_allbatches[FDR_index_remove_variants]):
fh_allbatches_w.writelines(HWE_Pval_vector_allbatches[i][1] + sep +
str(HWE_Pval_vector_allbatches[i][0]) + "\n")
fh_worst_batch_removed_w.close()
fh_allbatches_w.close()
# ----------------------------------------------------------------------------------- #
# -- write #rejected variants for FDRs at different thresholds for plotting with R -- #
# -- HWE across entire collection and worstbatchremoved -- #
# ----------------------------------------------------------------------------------- #
try:
fh_FDR_w = file(all_output + ".FDRthresholds.SNPQCI.1.txt", "w")
except IOError, e:
print e
sys.exit(1)
# write header
fh_FDR_w.writelines("FDR\tFail_allbatches\tHWE_pval_allbatches\tFail_worstbatchremoved\tHWE_pval_worstbatchremoved\n")
for i in xrange(len(thresholds_FDR)):
fh_FDR_w.writelines("%s" % (str(thresholds_FDR[i])))
fh_FDR_w.writelines("\t%s" % (str(counts_rejected_FDR_allbatches[i])))
fh_FDR_w.writelines("\t%s" % (str(thresholds_Pvals_allbatches[i])))
fh_FDR_w.writelines("\t%s" % (str(counts_rejected_FDR_worstbatchremoved[i])))
fh_FDR_w.writelines("\t%s\n" % (str(thresholds_Pvals_worstbatchremoved[i])))
fh_FDR_w.close()
# ------------ #
# -- file 2 -- #
# ------------ #
# ---------------------------------------------------------------------------------------------------------- #
# -- for each batch: extract rejected variants for FDRs at threshold with index FDR_index_remove_variants -- #
# ---------------------------------------------------------------------------------------------------------- #
# THIS INDEX MUST BE SET BY USER at the beginning of this function
# '4' corresponds to FDR at 1e-5
# use again variable FDR_index_remove_variants = 4
try:
fh_failed2plusbatches_w = file(perbatch_output, "w")
except IOError, e:
print e
sys.exit(1)
variant_exclude_dict_failed1plusbatches = {}
variant_exclude_dict_failed2plusbatches = {}
for b in xrange(numof_batches):
for j in xrange(counts_rejected_FDR_perbatch[b][FDR_index_remove_variants]):
if not HWE_Pval_vector_perbatch[b][j][1] in variant_exclude_dict_failed1plusbatches:
variant_exclude_dict_failed1plusbatches[HWE_Pval_vector_perbatch[b][j][1]] = True
elif HWE_Pval_vector_perbatch[b][j][1] in variant_exclude_dict_failed1plusbatches:
variant_exclude_dict_failed2plusbatches[HWE_Pval_vector_perbatch[b][j][1]] = True
for variant in variant_exclude_dict_failed2plusbatches.keys():
fh_failed2plusbatches_w.writelines(variant + "\n")
fh_failed2plusbatches_w.close()
# ----------------------------------------------------------------------------------- #
# -- write #rejected variants for FDRs at different thresholds for plotting with R -- #
# -- HWE for each batch -- #
# ----------------------------------------------------------------------------------- #
try:
fh_FDR_w = file(perbatch_output + ".FDRthresholds.SNPQCI.2.txt", "w")
except IOError, e:
print e
sys.exit(1)
# write header
fh_FDR_w.writelines("FDR\tFail_1plusbatches\tFail_2plusbatches\n")
for i in xrange(len(thresholds_FDR)):
variant_exclude_dict_failed1plusbatches = {}
variant_exclude_dict_failed2plusbatches = {}
for b in xrange(numof_batches):
for j in xrange(counts_rejected_FDR_perbatch[b][i]):
if not HWE_Pval_vector_perbatch[b][j][1] in variant_exclude_dict_failed1plusbatches:
variant_exclude_dict_failed1plusbatches[HWE_Pval_vector_perbatch[b][j][1]] = True
elif HWE_Pval_vector_perbatch[b][j][1] in variant_exclude_dict_failed1plusbatches:
variant_exclude_dict_failed2plusbatches[HWE_Pval_vector_perbatch[b][j][1]] = True
fh_FDR_w.writelines("%s" % (str(thresholds_FDR[i])))
fh_FDR_w.writelines("\t%s" % (str(len(variant_exclude_dict_failed1plusbatches))))
fh_FDR_w.writelines("\t%s\n" % (str(len(variant_exclude_dict_failed2plusbatches))))
# do not write table of corresponding HWE_Pvalues thresholds for FDR
# thresholds, because every batch has a different corresponding HWE_p-value
fh_FDR_w.close()
# plot results applying FDR thresholds
os.system("R --slave --args %s %s %s < %s"
% (all_output + ".FDRthresholds.SNPQCI.1.txt", perbatch_output + ".FDRthresholds.SNPQCI.2.txt",
str(FDR_index_remove_variants + 1),
draw_script))
# TODO: include SNP_QCI_draw_FDR_CON_PS_AS_CD_UC_PSC.r as a special case
| 13,995
|
def create_mongo_handler(config):
"""
:param config: configuration dictionary
:return: [MongoLogHandler, ] if 'mongo_logger' is in options, else []
"""
from nicos.core import ConfigurationError
if hasattr(config, 'mongo_logger'):
url = urllib.parse.urlparse(config.mongo_logger)
if not url.netloc:
raise ConfigurationError('mongo_logger: invalid url')
mongo_handler = MongoLogHandler()
mongo_handler.setLevel(logging.WARNING)
return mongo_handler
| 13,996
|
def split_and_load(data, ctx_list, batch_axis=0, even_split=True):
"""Splits an NDArray into `len(ctx_list)` slices along `batch_axis` and loads
each slice to one context in `ctx_list`.
Parameters
----------
data : NDArray
A batch of data.
ctx_list : list of Context
A list of Contexts.
batch_axis : int, default 0
The axis along which to slice.
even_split : bool, default True
Whether to force all slices to have the same number of elements.
Returns
-------
list of NDArray
Each corresponds to a context in `ctx_list`.
"""
if len(ctx_list) == 1:
return [d.as_in_context(ctx_list[0]) for d in data]
size = len(data)
num_slice = len(ctx_list)
step = size // num_slice
for i in range(num_slice):
for k in range(i*step, (i+1)*step):
data[k].as_in_context(ctx_list[i])
return data
| 13,997
|
def _get_scoped_outputs(comp, g, explicit_outs):
"""Return a list of output varnames scoped to the given name."""
cnamedot = comp.name + '.'
outputs = set()
if explicit_outs is None:
explicit_outs = ()
for u,v in g.list_connections():
if u.startswith(cnamedot):
outputs.add(u)
outputs.update([n for n in explicit_outs if n.startswith(cnamedot)])
if not outputs:
return None
return [n.split('.',1)[1] for n in outputs]
| 13,998
|
def init_ha_active(datanode, cluster):
"""
Do initial HA setup on the leader.
"""
local_hostname = hookenv.local_unit().replace('/', '-')
hadoop = get_hadoop_base()
hdfs = HDFS(hadoop)
hdfs.stop_namenode()
remove_state('namenode.started')
# initial cluster is us (active) plus a standby
set_cluster_nodes([local_hostname, cluster.nodes()[0]])
update_ha_config(datanode)
hdfs.init_sharededits()
hdfs.start_namenode()
leadership.leader_set({'ha-initialized': 'true'})
set_state('namenode.started')
| 13,999
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.