content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def parse(data):
"""
Parses the input of the Santander text file.
The format of the bank statement is as follows:
"From: <date> to <date>"
"Account: <number>"
"Date: <date>"
"Description: <description>"
"Amount: <amount>"
"Balance: <amount>"
<second_transaction_entry>
<nth_transaction_entry>
:param data: A list containing each line of the bank statement.
:return: A pandas DataFrame.
"""
dates = []
descs = []
amounts = []
balances = []
# Skip unnecessary headers
data = data[4:]
# Remove empty lines
data = [d.strip() for d in data]
data = list(filter(None, data))
# Creates sublist for each transaction
data = [data[d:d+4] for d in range(0, len(data), 4)]
# Parsing data into a 2D list
for entry in data:
# Removing field descriptors
for e in entry:
if e.startswith("Date"):
e = e.replace("Date: ", "").strip()
dates.append(datetime.datetime.strptime(e, "%d/%m/%Y"))
if e.startswith("Description"):
descs.append(e.replace("Description: ", "").strip())
if e.startswith("Amount"):
e = e.replace("Amount: ", "").replace(" GBP", "").strip()
amounts.append(float(e))
if e.startswith("Balance"):
e = e.replace("Balance: ", "").replace(" GBP", "").strip()
balances.append(float(e))
# Stores data in a Pandas data container
data = list(zip(dates, balances, amounts, descs))
cols = ["DATES", "BALANCE", "AMOUNT", "DESCRIPTION"]
parsed = pd.DataFrame(data, columns=cols, index=dates)
return parsed | 5,330,200 |
def solve_duffing(tmax, dt_per_period, t_trans, x0, v0, gamma, delta, omega):
"""Solve the Duffing equation for parameters gamma, delta, omega.
Find the numerical solution to the Duffing equation using a suitable
time grid: tmax is the maximum time (s) to integrate to; t_trans is
the initial time period of transient behaviour until the solution
settles down (if it does) to some kind of periodic motion (these data
points are dropped) and dt_per_period is the number of time samples
(of duration dt) to include per period of the driving motion (frequency
omega).
Returns the time grid, t (after t_trans), position, x, and velocity,
xdot, dt, and step, the number of array points per period of the driving
motion.
"""
# Time point spacings and the time grid
period = 2*np.pi/omega
dt = 2*np.pi/omega / dt_per_period
step = int(period / dt)
t = np.arange(0, tmax, dt)
# Initial conditions: x, xdot
X0 = [x0, v0]
X = odeint(deriv, X0, t, args=(gamma, delta, omega))
idx = int(t_trans / dt)
return t[idx:], X[idx:], dt, step | 5,330,201 |
def create_prog_assignment_registry():
"""Create the registry for course properties."""
reg = FieldRegistry(
'Prog Assignment Entity', description='Prog Assignment',
extra_schema_dict_values={
'className': 'inputEx-Group new-form-layout'})
# Course level settings.
course_opts = reg.add_sub_registry('prog_assignment', 'Assignment Config')
course_opts.add_property(SchemaField(
'key', 'ID', 'string', editable=False,
extra_schema_dict_values={'className': 'inputEx-Field keyHolder'},
description='Unique Id of the Assignment'))
course_opts.add_property(SchemaField(
'pa_id', 'PA_ID', 'string', editable=False,
extra_schema_dict_values={'className': 'inputEx-Field keyHolder'},
description='Unique id of the test cases in this assignment.'))
course_opts.add_property(SchemaField('parent_unit', 'Parent Unit', 'string', select_data=[]))
course_opts.add_property(
SchemaField('type', 'Type', 'string', editable=False))
course_opts.add_property(
SchemaField('title', 'Title', 'string', optional=False))
course_opts.add_property(
SchemaField('weight', 'Weight', 'number', optional=False))
course_opts.add_property(SchemaField(
content_key('question'), 'Problem Statement', 'html', optional=False,
description=('Problem Statement and description of program, visible'
' to student.'),
extra_schema_dict_values={
'supportCustomTags': tags.CAN_USE_DYNAMIC_TAGS.value,
'className': 'inputEx-Field content'}))
course_opts.add_property(SchemaField(
'html_check_answers', 'Allow "Compile & Run"', 'boolean',
optional=True,
extra_schema_dict_values={
'className': 'inputEx-Field assessment-editor-check-answers'}))
course_opts.add_property(SchemaField(
content_key('evaluator'), 'Program Evaluator', 'string', optional=True,
select_data=[
(eid, eid)
for eid in evaluator.ProgramEvaluatorRegistory.list_ids()]))
course_opts.add_property(SchemaField(
content_key('ignore_presentation_errors'), 'Ignore Presentation Errors',
'boolean', optional=True,
extra_schema_dict_values={
'className': 'inputEx-Field assessment-editor-check-answers'}))
course_opts.add_property(
SchemaField(workflow_key(courses.SUBMISSION_DUE_DATE_KEY),
'Submission Due Date', 'string', optional=True,
description=str(messages.DUE_DATE_FORMAT_DESCRIPTION)))
course_opts.add_property(SchemaField(
content_key('show_sample_solution'),
'Show sample solution after deadline', 'boolean', optional=True,
extra_schema_dict_values={
'className': 'inputEx-Field assessment-editor-check-answers'}))
test_case_opts = FieldRegistry('', '')
test_case_opts.add_property(SchemaField(
'input', 'Input', 'text', optional=True,
extra_schema_dict_values={}))
test_case_opts.add_property(SchemaField(
'output', 'Output', 'text', optional=True,
extra_schema_dict_values={'className': 'inputEx-Field content'}))
test_case_opts.add_property(SchemaField(
'weight', 'Weight', 'number', optional=False,
extra_schema_dict_values={'className': 'inputEx-Field content','value':1}))
public_test_cases = FieldArray(
content_key('public_testcase'), '', item_type=test_case_opts,
extra_schema_dict_values={
'sortable': False,
'listAddLabel': 'Add Public Test Case',
'listRemoveLabel': 'Delete'})
public_tests_reg = course_opts.add_sub_registry(
'public_testcase', title='Public Test Cases')
public_tests_reg.add_property(public_test_cases)
private_test_cases = FieldArray(
content_key('private_testcase'), '', item_type=test_case_opts,
extra_schema_dict_values={
'sortable': False,
'listAddLabel': 'Add Private Test Case',
'listRemoveLabel': 'Delete'})
private_tests_reg = course_opts.add_sub_registry(
'private_testcase', title='Private Test Cases')
private_tests_reg.add_property(private_test_cases)
lang_reg = course_opts.add_sub_registry(
'allowed_languages', title='Allowed Programming Languages')
language_opts = FieldRegistry('', '')
language_opts.add_property(
SchemaField(
'language', 'Programming Language', 'string',
select_data=base.ProgAssignment.PROG_LANG_FILE_MAP.items()))
language_opts.add_property(SchemaField(
'prefixed_code', 'Prefixed Fixed Code', 'text', optional=True,
description=('The uneditable code for the assignment. '
'This will be prepended at the start of user code'),
extra_schema_dict_values={'className': 'inputEx-Field content'}))
language_opts.add_property(SchemaField(
'code_template', 'Template Code', 'text', optional=True,
description=('The default code that is populated on opening ' +
'an assignment.'),
extra_schema_dict_values={'className': 'inputEx-Field content'}))
language_opts.add_property(SchemaField(
'uneditable_code', 'Suffixed Fixed Code', 'text', optional=True,
description=('The uneditable code for the assignment. '
'This will be appended at the end of user code'),
extra_schema_dict_values={'className': 'inputEx-Field content'}))
language_opts.add_property(SchemaField(
'suffixed_invisible_code', 'Invisible Code', 'text', optional=True,
description=('This code will not be visible to the student and will be'
' appended at the very end.'),
extra_schema_dict_values={'className': 'inputEx-Field content'}))
language_opts.add_property(SchemaField(
'sample_solution', 'Sample Solution', 'text',
optional=True,
extra_schema_dict_values={'className': 'inputEx-Field'}))
language_opts.add_property(SchemaField(
'filename', 'Sample Solution Filename', 'string',
optional=True,
extra_schema_dict_values={'className': 'inputEx-Field'}))
allowed_languages = FieldArray(
content_key('allowed_languages'), '',
item_type=language_opts,
extra_schema_dict_values={
'sortable': False,
'listAddLabel': 'Add Language',
'listRemoveLabel': 'Delete',
'minItems': 1})
lang_reg.add_property(allowed_languages)
course_opts.add_property(
SchemaField('is_draft', 'Status', 'boolean',
select_data=[(True, DRAFT_TEXT), (False, PUBLISHED_TEXT)],
extra_schema_dict_values={
'className': 'split-from-main-group'}))
return reg | 5,330,202 |
def _read_one_cml(cml_g,
cml_id_list=None,
t_start=None,
t_stop=None,
column_names_to_read=None,
read_all_data=False):
"""
Parameters
----------
cml_g
cml_id_list
t_start
t_stop
column_names_to_read
read_all_data
Returns
-------
"""
metadata = _read_cml_metadata(cml_g)
if cml_id_list is not None:
if metadata['cml_id'] not in cml_id_list:
return None
cml_ch_list = []
for cml_ch_name, cml_ch_g in list(cml_g.items()):
if 'channel_' in cml_ch_name:
cml_ch_list.append(
_read_cml_channel(
cml_ch_g=cml_ch_g,
t_start=t_start,
t_stop=t_stop,
column_names_to_read=column_names_to_read,
read_all_data=read_all_data))
# TODO: Handle `auxiliary_N` and `product_N` cml_g-subgroups
return Comlink(channels=cml_ch_list, metadata=metadata) | 5,330,203 |
def get_commands():
"""
returns a dictionary with all the az cli commands, keyed by the path to the command
inside each dictionary entry is another dictionary of verbs for that command
with the command object (from cli core module) being stored in that
"""
# using Microsoft VSCode tooling module to load the az cli command table
tooling.initialize()
commands = tooling.load_command_table()
command_dict = {} # initialize empty dict for our return
# iterate through the all the commands
for command_name in commands:
command = commands[command_name]
#print(command_name) # get the name of the command in format "az ..."
command_list = command_name.split(" ") # split apart each command segment
command_list = [pythonize_name(name) for name in command_list] # pythonize the names
command_verb = command_list.pop() # remove the last command which is the action verb
command_path = os.path.join(Constants.COMMAND_ROOT, *command_list) # build path of commands
# add command path to dictionary if not already there
if command_path not in command_dict:
command_dict[command_path]={}
# add the command object to the dictionary using the path and verb as keys
command_dict[command_path][command_verb] = command
return command_dict | 5,330,204 |
def test_getNum():
"""Test fcn that returns selected nums[i]"""
new_num = 321
i = 2
u = Blockchain().address(0) # btw, this account is no longer owner
c = Contract('test')
c.connect()
c.run_trx(u, 'storeNum', i, new_num)
assert c.call_fcn('getNum', i) == new_num | 5,330,205 |
def calculate_keypoints(img, method, single_channel, graphics=False):
"""
Gray or single channel input
https://pysource.com/2018/03/21/feature-detection-sift-surf-obr-opencv-3-4-with-python-3-tutorial-25/
"""
if single_channel=='gray':
img_single_channel = single_channel_gray(img)
elif single_channel=='laplacian':
img_single_channel = compute_laplac(img)
elif single_channel=='color':
img_single_channel = clahe(img)
elif single_channel=='HSV':
img_single_channel = HSV(img)
elif single_channel=='hog':
img_single_channel = hog(img)
elif single_channel=='mixed':
img_single_channel = mixed(img)
print(img_single_channel.shape, type(img_single_channel), img_single_channel.dtype)
if method=='sift':
# SIFT
sift = cv2.SIFT_create(edgeThreshold = 21, sigma = 1.2) #edgeThreshold = 21, sigma = 1.2 #SIFT (Scale-Invariant Feature Transform)
keypoints_sift, descriptors_sift = sift.detectAndCompute(img_single_channel, None)
img_sift = cv2.drawKeypoints(img_single_channel, keypoints_sift, None, color=(0, 255, 0), flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
if graphics == True:
plt.figure(), plt.imshow(img_sift), plt.title("SIFT"), plt.show()
return keypoints_sift, descriptors_sift
elif method=='orb':
# ORB
orb = cv2.ORB_create(nfeatures=3000)
keypoints_orb, descriptors_orb = orb.detectAndCompute(img_single_channel, None)
img_orb = cv2.drawKeypoints(img_single_channel, keypoints_orb, None, color=(0, 255, 0), flags=0)
if graphics == True:
plt.figure(), plt.imshow(img_orb), plt.title("ORB"), plt.show()
return keypoints_orb, descriptors_orb
elif method=='fast':
# FAST
fast = cv2.FastFeatureDetector_create() #FAST algorithm for corner detection
brief = cv2.xfeatures2d.BriefDescriptorExtractor_create()
keypoints_fast = fast.detect(img_single_channel, None)
keypoints_brief, descriptors_brief = brief.compute(img_single_channel, keypoints_fast)
print(len(keypoints_fast), len(keypoints_brief))
if graphics == True:
img_fast = cv2.drawKeypoints(img_single_channel, keypoints_fast, None, color=(255, 0, 0))
img_brief = cv2.drawKeypoints(img_single_channel, keypoints_brief, None, color=(255, 0, 0))
plt.figure(), plt.imshow(img_fast), plt.title("Detected FAST keypoints"), plt.show()
plt.figure(), plt.imshow(img_brief), plt.title("Detected BRIEF keypoints"), plt.show()
return keypoints_brief, descriptors_brief
elif method=='star':
# STAR-BRIEF
star = cv2.xfeatures2d.StarDetector_create() ## only feature
brief = cv2.xfeatures2d.BriefDescriptorExtractor_create() # only descript, NO feature
keypoints_star = star.detect(img_single_channel, None)
keypoints_brief, descriptors_brief = brief.compute(img_single_channel, keypoints_star)
print(len(keypoints_star), len(keypoints_brief))
if graphics == True:
img_star = cv2.drawKeypoints(img_single_channel, keypoints_star, None, color=(255, 0, 0))
img_brief = cv2.drawKeypoints(img_single_channel, keypoints_brief, None, color=(255, 0, 0))
plt.figure(), plt.imshow(img_star), plt.title("Detected STAR keypoints"), plt.show()
plt.figure(), plt.imshow(img_brief), plt.title("Detected BRIEF keypoints"), plt.show()
return keypoints_brief, descriptors_brief
return 0 | 5,330,206 |
def plot_pos_neg(
train_data: pd.DataFrame,
train_target: pd.DataFrame,
col1: str = 'v5',
col2: str = 'v6'
) -> None:
"""
Make hexbin plot for training transaction data
:param train_data: pd.DataFrame, features dataframe
:param train_target: pd.DataFrame, target dataframe
:param col1: str, name of first column for hexbin plot
:param col2: str, name of second column for hexbin plot
:return: None
"""
pos_df = pd.DataFrame(train_data[train_target.values == 1], columns=train_data.columns)
neg_df = pd.DataFrame(train_data[train_target.values == 0], columns=train_data.columns)
sns.jointplot(pos_df[col1], pos_df[col2], kind='hex', xlim=(-5, 5), ylim=(-5, 5))
plt.suptitle('Positive distribution')
sns.jointplot(neg_df[col1], neg_df[col2], kind='hex', xlim=(-5, 5), ylim=(-5, 5))
_ = plt.suptitle('Negative distribution')
return None | 5,330,207 |
def open_w_lock (file_name, mode = "r", bufsize = -1) :
"""Context manager that opens `file_name` after successfully locking it.
"""
with lock_file (file_name) :
with open (file_name, mode, bufsize) as file :
yield file | 5,330,208 |
def raffle_form(request, prize_id):
"""Supply the raffle form."""
_ = request
prize = get_object_or_404(RafflePrize, pk=prize_id)
challenge = challenge_mgr.get_challenge()
try:
template = NoticeTemplate.objects.get(notice_type='raffle-winner-receipt')
except NoticeTemplate.DoesNotExist:
return render_to_response('view_prizes/form.txt', {
'raffle': True,
'prize': prize,
'round': prize.round,
'competition_name': challenge.name,
}, context_instance=RequestContext(request), mimetype='text/plain')
message = template.render({
'raffle': True,
'prize': prize,
'round': prize.round,
'competition_name': challenge.name,
})
return HttpResponse(message, content_type="text", mimetype='text/html') | 5,330,209 |
def is_rldh_label(label):
"""Tests a binary string against the definition of R-LDH label
As defined by RFC5890_
Reserved LDH labels, known as "tagged domain names" in some
other contexts, have the property that they contain "--" in the
third and fourth characters but which otherwise conform to LDH
label rules.
Non-Reserved LDH labels are the set of valid LDH labels that do
not have "--" in the third and fourth positions.
Therefore you can test for a NR-LDH label simply by using the *not*
operator."""
return is_ldh_label(label) and label[2:4] == b'--' | 5,330,210 |
def offence_memory_patterns(obs, player_x, player_y):
""" group of memory patterns for environments in which player's team has the ball """
def environment_fits(obs, player_x, player_y):
""" environment fits constraints """
# player have the ball
if obs["ball_owned_player"] == obs["active"] and obs["ball_owned_team"] == 0:
return True
return False
def get_memory_patterns(obs, player_x, player_y):
""" get list of memory patterns """
memory_patterns = [
far_from_goal_shot,
far_from_goal_high_pass,
bad_angle_high_pass,
close_to_goalkeeper_shot,
go_through_opponents,
idle
]
return memory_patterns
return {"environment_fits": environment_fits, "get_memory_patterns": get_memory_patterns} | 5,330,211 |
def get_mem() -> int:
"""Return memory used by CombSpecSearcher - note this is actually the
memory usage of the process that the instance of CombSpecSearcher was
invoked."""
return int(psutil.Process(os.getpid()).memory_info().rss) | 5,330,212 |
def load_parameters(directory_name):
"""
Loads the .yml file parameters to a dictionary.
"""
root = os.getcwd()
directory = os.path.join(root, directory_name)
parameter_file_name = directory
parameter_file = open(parameter_file_name, 'r')
parameters = yaml.load(parameter_file, Loader=yaml.FullLoader)
parameter_file.close()
return parameters | 5,330,213 |
def aggregated_lineplot_new(df_agg,countries,fill_between=('min','max'),save=False,fig=None,ax=None,clrs='default'):
"""
Creates an aggregates lineplot for multiple countries
Arguments:
*df_agg* (DataFrame) : contains the aggregated results, either relative (df_rel) or absolute (df_abs)
*countries* (list) : list of strings with names of countries to plot
*fill_between* (tuple) : indicates which percentiles to feel between
*save* (Boolean) : should the file be saved in the folder config['paths']['output_images']
Returns:
fig,ax
"""
#assert fill_between in cols.
if 'AoI relative combinations' in df_agg.columns: #INDICATES THAT THESE ARE RELATIVE RESULTS
grouper = 'AoI relative combinations'
xlabel = "% of combinations of micro-floods (AoI's) of the maximum number of micro-floods per country"
relative = True #need for plotting
elif 'AoI combinations' in df_agg.columns: #ABSOLUTE RESULTS
grouper = 'AoI combinations'
xlabel = "Number of combinations of micro-floods (AoI's)"
relative = False
if clrs == 'default':
clrs = ['darkblue', 'red', 'green', 'purple', 'orange', 'skyblue']
if (fig==None and ax==None): #if No axes and no figure is provided
fig, ax = plt.subplots(figsize=(8, 6))
lines = df_agg
for cntry, cl in zip(countries, clrs):
c = cntry.capitalize()
ax.plot(lines.loc[lines['country'] == c, grouper], lines.loc[lines['country'] == c, 'mean'],
color=cl, label=c)
ax.fill_between(lines.loc[lines['country'] == c, grouper], lines.loc[lines['country'] == c, fill_between[0]],
lines.loc[lines['country'] == c, fill_between[1]], alpha=0.3, edgecolor=cl, facecolor=cl, linewidth=0)
ax.legend()
ax.set_ylabel("% optimal routes disrupted")
ax.set_xlabel(xlabel)
#Todo: add function to link country names with official codes NUTS0
if save: #TODO REPLACE ALL INSTANCES OF THIS PART OF CODE WITH A SPECIAL FUNCTION
save_figs = load_config(config_file)['paths']['output_images'] / 'aggregate_line'
if not save_figs.exists(): save_figs.mkdir()
filename = "aggregateline_{}_{}.png".format('-'.join(countries),fill_between[0] + '-' + fill_between[1])
if relative: filename = "aggregateline_{}_{}_relative.png".format(\
'-'.join(countries),fill_between[0] + '-' + fill_between[1])
fig.savefig(save_figs / filename)
return fig,ax | 5,330,214 |
def _stat_categories():
"""
Returns a `collections.OrderedDict` of all statistical categories
available for play-by-play data.
"""
cats = OrderedDict()
for row in nfldb.category.categories:
cat_type = Enums.category_scope[row[2]]
cats[row[3]] = Category(row[3], row[0], cat_type, row[1], row[4])
return cats | 5,330,215 |
def rsafactor(d: int, e: int, N: int) -> List[int]:
"""
This function returns the factors of N, where p*q=N
Return: [p, q]
We call N the RSA modulus, e the encryption exponent, and d the decryption exponent.
The pair (N, e) is the public key. As its name suggests, it is public and is used to
encrypt messages.
The pair (N, d) is the secret key or private key and is known only to the recipient
of encrypted messages.
>>> rsafactor(3, 16971, 25777)
[149, 173]
>>> rsafactor(7331, 11, 27233)
[113, 241]
>>> rsafactor(4021, 13, 17711)
[89, 199]
"""
k = d * e - 1
p = 0
q = 0
while p == 0:
g = random.randint(2, N - 1)
t = k
while True:
if t % 2 == 0:
t = t // 2
x = (g ** t) % N
y = math.gcd(x - 1, N)
if x > 1 and y > 1:
p = y
q = N // y
break # find the correct factors
else:
break # t is not divisible by 2, break and choose another g
return sorted([p, q]) | 5,330,216 |
def integrate_prob_current(psi, n0, n1, h):
"""
Numerically integrate the probability current, which is
Im{psi d/dx psi^*} over the given spatial interval.
"""
psi_diff = get_imag_grad(psi, h)
curr = get_prob_current(psi, psi_diff)
res = np.zeros(psi.shape[0])
with progressbar.ProgressBar(max_value=int(psi.shape[0])) as bar:
for i in range(0, psi.shape[0]):
res [i] = np.trapz(curr[i,n0:n1], dx=h)
bar.update(i)
print("Finished calculating the integrated prob. current!")
return res | 5,330,217 |
def getFile(path):
"""
指定一个文件的路径,放回该文件的信息。
:param path: 文件路径
:return: PHP-> base64 code
"""
code = """
@ini_set("display_errors","0");
@set_time_limit(0);
@set_magic_quotes_runtime(0);
$path = '%s';
$hanlder = fopen($path, 'rb');
$res = fread($hanlder, filesize($path));
fclose($hanlder);
echo $res;
"""% path
return base64.b64encode(code.encode("UTF-8")).decode("UTF-8") | 5,330,218 |
def _load():
"""Load the previous state of the repository."""
if not os.path.exists(CONFIG_PATH):
print("[x] No config file available.")
return
with open(CONFIG_PATH) as file_handle:
try:
CONFIG.update(json.load(file_handle))
except ValueError:
print("[x] The config file is invalid.") | 5,330,219 |
def rgc(tmpdir):
""" Provide an RGC instance; avoid disk read/write and stay in memory. """
return RGC(entries={CFG_GENOMES_KEY: dict(CONF_DATA),
CFG_FOLDER_KEY: tmpdir.strpath,
CFG_SERVER_KEY: "http://staging.refgenomes.databio.org/"}) | 5,330,220 |
def test_riid_generator_length():
"""The RIID generator length should be a non-zero length string."""
assert isinstance(config.riid_generator_length, int)
assert 0 < config.riid_generator_length | 5,330,221 |
def execlog(command): # logs commands and control errors
"""
controling the command executions using os.system, and logging the commands
if an error raise when trying to execute a command, stops the script and writting the
rest of commands to the log file after a 'Skipping from here' note.
"""
global skipping
try:
log = open(cmd_logfile,'a')
except IOError:
sys.exit("Could not fined "+cmd_logfile)
else:
log.write(command+"\n")
log.close()
if not skipping:
cmd_strerror = os.strerror(os.system(command))
if not cmd_strerror == 'Success':
message(cmd_strerror)
message("Faild at "+stage)
if not stage == "* Unwrapping":
message("############## Skipping from here ##############")
log = open(cmd_logfile,'a')
log.write("############## Skipping from here ##############\n")
log.close()
skipping = 1
else:
return "unwfaild" | 5,330,222 |
def simulate_quantities_of_interest_superoperator(tlist, c_ops, noise_parameters_CZ, fluxlutman,
fluxbias_q1, amp,
sim_step,
verbose: bool=True):
"""
Calculates the propagator and the quantities of interest from the propagator (either unitary or superoperator)
Args:
tlist (array): times in s, describes the x component of the
trajectory to simulate (not actually used, we just use sim_step)
sim_step(float): time between one point and another of amp
c-ops (list of Qobj): time (in)dependent jump operators
amp(array): amplitude in voltage describes the y-component of the trajectory to simulate
fluxlutman,noise_parameters_CZ: instruments containing various parameters
Returns
phi_cond (float): conditional phase (deg)
L1 (float): leakage
L2 (float): seepage
avgatefid_pc (float): average gate fidelity in full space, phase corrected
avgatefid_compsubspace_pc (float): average gate fidelity only in the computational subspace, phase corrected
avgatefid_compsubspace (float): average gate fidelity only in the computational subspace, not phase corrected,
but taking into account the rotating frame of the two qutrits as qubits
phase_q0 / q1 (float): single qubit phases in the rotating frame at the end of the pulse
"""
H_0=calc_hamiltonian(0,fluxlutman,noise_parameters_CZ) # computed at 0 amplitude
# NOTE: parameters of H_0 could be not exactly e.g. the bare frequencies
# We change the basis from the standard basis to the basis of eigenvectors of H_0
# The columns of S are the eigenvectors of H_0, appropriately ordered
if noise_parameters_CZ.dressed_compsub():
S = qtp.Qobj(matrix_change_of_variables(H_0),dims=[[3, 3], [3, 3]])
else:
S = qtp.tensor(qtp.qeye(3),qtp.qeye(3)) # line here to quickly switch off the use of S
H_0_diag = S.dag()*H_0*S
#w_q0 = fluxlutman.q_freq_01()
w_q0 = (H_0_diag[1,1]-H_0_diag[0,0]) / (2*np.pi)
#w_q1 = fluxlutman.q_freq_10()
w_q1 = (H_0_diag[3,3]-H_0_diag[0,0]) / (2*np.pi)
# H_rotateaway = coupled_transmons_hamiltonian_new(w_q0=w_q0, w_q1=w_q1,
# alpha_q0=-2*w_q0, alpha_q1=-2*w_q1, J=0)
w_q1_sweetspot = noise_parameters_CZ.w_q1_sweetspot()
# Correction up to second order of the frequency due to flux noise, computed from w_q0(phi) = w_q0^sweetspot * sqrt(cos(pi * phi/phi_0))
w_q1_biased = w_q1 - np.pi/2 * (w_q1_sweetspot**2/w_q1) * np.sqrt(1 - (w_q1**4/w_q1_sweetspot**4)) * fluxbias_q1 - \
- np.pi**2/2 * w_q1_sweetspot * (1+(w_q1**4/w_q1_sweetspot**4)) / (w_q1/w_q1_sweetspot)**3 * fluxbias_q1**2
# with sigma up to circa 1e-3 \mu\Phi_0 the second order is irrelevant
correction_to_H = coupled_transmons_hamiltonian_new(w_q0=0, w_q1=np.real(w_q1_biased-w_q1), alpha_q0=0, alpha_q1=0, J=0)
t0 = time.time()
exp_L_total=1
for i in range(len(amp)):
H=calc_hamiltonian(amp[i],fluxlutman,noise_parameters_CZ) + correction_to_H
H=S.dag()*H*S
if c_ops != []:
c_ops_temp=[]
for c in range(len(c_ops)):
if isinstance(c_ops[c],list):
c_ops_temp.append(c_ops[c][0]*c_ops[c][1][i]) # c_ops are already in the H_0 basis
else:
c_ops_temp.append(c_ops[c])
liouville_exp_t=(qtp.liouvillian(H,c_ops_temp)*sim_step).expm()
else:
liouville_exp_t=(-1j*H*sim_step).expm()
exp_L_total=liouville_exp_t*exp_L_total
t1 = time.time()
#print('\n alternative propagator',t1-t0)
U_final = exp_L_total
#U_final=rotating_frame_transformation_new(U_final, fluxlutman.cz_length(), H_0_diag)
phases = phases_from_superoperator(U_final) # order is phi_00, phi_01, phi_10, phi_11, phi_02, phi_20, phi_cond
phi_cond = phases[-1]
L1 = leakage_from_superoperator(U_final)
population_02_state = calc_population_02_state(U_final)
L2 = seepage_from_superoperator(U_final)
avgatefid = pro_avfid_superoperator_phasecorrected(U_final,phases)
avgatefid_compsubspace = pro_avfid_superoperator_compsubspace_phasecorrected(U_final,L1,phases) # leakage has to be taken into account, see Woods & Gambetta
#print('avgatefid_compsubspace',avgatefid_compsubspace)
#H_twoqubits = coupled_transmons_hamiltonian_new(w_q0=w_q0, w_q1=w_q1,
# alpha_q0=-2*w_q0, alpha_q1=-2*w_q1, J=0)
#U_final_new = rotating_frame_transformation_new(U_final, fluxlutman.cz_length(), H_twoqubits) ### old method rotating away also the phase of the |2> state
t = tlist[-1]+sim_step
U_final_new = correct_reference(U=U_final,w_q1=w_q1,w_q0=w_q0,t=t)
### Script to check that we are correctly removing the single qubit phases in the rotating frame
# cz_length = fluxlutman.cz_length()
# U_check = (1j*H_twoqubits*cz_length).expm() * (-1j*H_0_diag*cz_length).expm()
# phases_check = phases_from_superoperator(U_check)
# print(phases_check)
avgatefid_compsubspace_notphasecorrected = pro_avfid_superoperator_compsubspace(U_final_new,L1)
# NOTE: a single qubit phase off by 30 degrees costs 5.5% fidelity
### Script to check that leakage and phi_cond are not affected by the phase correction, as it should be
# L1_bis = leakage_from_superoperator(U_final_new)
# phi_cond_bis = phases_from_superoperator(U_final_new)[-1]
# print('leakage',L1-L1_bis)
# print('phi_cond',phi_cond-phi_cond_bis)
phases = phases_from_superoperator(U_final_new) # order is phi_00, phi_01, phi_10, phi_11, phi_02, phi_20, phi_cond
phase_q0 = (phases[1]-phases[0]) % 360
phase_q1 = (phases[2]-phases[0]) % 360
# We now correct only for the phase of qubit left (q1), in the rotating frame
avgatefid_compsubspace_pc_onlystaticqubit = pro_avfid_superoperator_compsubspace_phasecorrected_onlystaticqubit(U_final_new,L1,phases)
return {'phi_cond': phi_cond, 'L1': L1, 'L2': L2, 'avgatefid_pc': avgatefid,
'avgatefid_compsubspace_pc': avgatefid_compsubspace, 'phase_q0': phase_q0, 'phase_q1': phase_q1,
'avgatefid_compsubspace': avgatefid_compsubspace_notphasecorrected,
'avgatefid_compsubspace_pc_onlystaticqubit': avgatefid_compsubspace_pc_onlystaticqubit, 'population_02_state': population_02_state,
'U_final_new': U_final_new} | 5,330,223 |
def setup_logging(loglevel):
"""Setup basic logging
Args:
loglevel (int): minimum loglevel for emitting messages
"""
logformat = '[%(asctime)s] %(levelname)s:%(name)s: %(message)s'
logging.basicConfig(level=loglevel, stream=sys.stdout,
format=logformat, datefmt='%Y-%m-%d %H:%M:%S') | 5,330,224 |
def test_query_with_parquet(sdc_builder, sdc_executor, cluster, database):
"""Validate end-to-end case with stopping the pipeline and executing the map/reduce job after it read all the
data from database. Addresses Hive drift synchronization solution in parquet data format. The pipeline looks like:
jdbc_query_consumer >= pipeline_finisher_executor
jdbc_query_consumer >> expression_evaluator >> field_remover >> hive_metadata
hive_metadata >> hadoop_fs
hive_metadata >> hive_metastore
hadoop_fs >= mapreduce
"""
if 'hive' in cluster.kerberized_services:
pytest.skip('Test runs only in non-kerberized environment till SDC-9324 is fixed.')
table_name = get_random_string(string.ascii_lowercase, 20) # lowercase for db compatibility (e.g. PostgreSQL)
table = sqlalchemy.Table(table_name, sqlalchemy.MetaData(),
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column('name', sqlalchemy.String(25)),
sqlalchemy.Column('dt', sqlalchemy.String(20)))
rows_in_database = [{'id': 1, 'name': 'Ji Sun', 'dt': '2017-05-03'},
{'id': 2, 'name': 'Jarcec', 'dt': '2017-05-03'},
{'id': 3, 'name': 'Santhosh', 'dt': '2017-05-03'}]
pipeline_builder = sdc_builder.get_pipeline_builder()
jdbc_query_consumer = pipeline_builder.add_stage('JDBC Query Consumer')
jdbc_query_consumer.set_attributes(incremental_mode=False, sql_query=f'SELECT * FROM {table_name};')
expression_evaluator = pipeline_builder.add_stage('Expression Evaluator')
expression_evaluator.set_attributes(header_attribute_expressions=[{'attributeToSet': 'dt',
'headerAttributeExpression': "${record:value('/dt')}"}])
field_remover = pipeline_builder.add_stage('Field Remover')
field_remover.set_attributes(fields=['/dt'])
hive_metadata = pipeline_builder.add_stage('Hive Metadata')
hive_metadata.set_attributes(data_format='PARQUET', database_expression=f'{database.database}',
table_name="${record:attribute('jdbc.tables')}")
hadoop_fs = pipeline_builder.add_stage('Hadoop FS', type='destination')
# max_records_in_file enables to close the file and generate the event
hadoop_fs.set_attributes(avro_schema_location='HEADER', data_format='AVRO', directory_in_header=True,
max_records_in_file=1)
hive_metastore = pipeline_builder.add_stage('Hive Metastore', type='destination')
mapreduce = pipeline_builder.add_stage('MapReduce', type='executor')
mapreduce.set_attributes(job_type='AVRO_PARQUET',
output_directory="${file:parentPath(file:parentPath(record:value('/filepath')))}")
wiretap = pipeline_builder.add_wiretap()
pipeline_finisher_executor = pipeline_builder.add_stage('Pipeline Finisher Executor')
jdbc_query_consumer >= pipeline_finisher_executor
jdbc_query_consumer >> expression_evaluator >> field_remover >> hive_metadata
hive_metadata >> hadoop_fs
hive_metadata >> hive_metastore
hadoop_fs >= mapreduce
mapreduce >= wiretap.destination
pipeline = pipeline_builder.build(title='Hive drift test').configure_for_environment(cluster, database)
sdc_executor.add_pipeline(pipeline)
hive_cursor = cluster.hive.client.cursor()
try:
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
logger.info('Adding %s rows into %s database ...', len(rows_in_database), database.type)
connection = database.engine.connect()
connection.execute(table.insert(), rows_in_database)
sdc_executor.start_pipeline(pipeline).wait_for_finished()
# assert events (MapReduce) generated
assert len(wiretap.output_records) == len(rows_in_database)
# make sure MapReduce job is done and is successful
for event in wiretap.output_records:
job_id = event.field['job-id'].value
assert cluster.yarn.wait_for_job_to_end(job_id) == 'SUCCEEDED'
# verify inserted data
hive_cursor.execute(f'RELOAD {_get_qualified_table_name(None, table_name)}')
hive_cursor.execute(f'SELECT * from {_get_qualified_table_name(None, table_name)}')
hive_values = [list(row) for row in hive_cursor.fetchall()]
raw_values = [list(row.values()) for row in rows_in_database]
assert sorted(hive_values) == sorted(raw_values)
finally:
logger.info('Dropping table %s in %s database...', table_name, database.type)
table.drop(database.engine)
logger.info('Dropping table %s in Hive...', table_name)
hive_cursor.execute(f'DROP TABLE `{table_name}`') | 5,330,225 |
def __polyline():
"""Read polyline in from package data.
:return:
"""
polyline_filename = resource_filename(
'cad', join(join('data', 'dxf'), 'polyline.dxf'))
with open(polyline_filename, 'r') as polyline_file:
return polyline_file.read() | 5,330,226 |
def create_identity_split(all_chain_sequences, cutoff, split_size,
min_fam_in_split):
"""
Create a split while retaining diversity specified by min_fam_in_split.
Returns split and removes any pdbs in this split from the remaining dataset
"""
dataset_size = len(all_chain_sequences)
tmp = {x: y for (x, y) in all_chain_sequences}
assert len(tmp) == len(all_chain_sequences)
all_chain_sequences = tmp
# Get structure tuple.
split, used = set(), set()
to_use = set(all_chain_sequences.keys())
while len(split) < split_size:
# Get random structure tuple and random chain_sequence.
rstuple = random.sample(to_use, 1)[0]
rcs = all_chain_sequences[rstuple]
found = seq.find_similar(rcs, 'blast_db', cutoff, dataset_size)
# Get structure tuples.
found = set([seq.fasta_name_to_tuple(x)[0] for x in found])
# ensure that at least min_fam_in_split families in each split
max_fam_size = int(math.ceil(split_size / min_fam_in_split))
split = split.union(list(found)[:max_fam_size])
to_use = to_use.difference(found)
used = used.union(found)
selected_chain_sequences = \
[(s, cs) for s, cs in all_chain_sequences.items() if s in split]
remaining_chain_sequences = \
[(s, cs) for s, cs in all_chain_sequences.items() if s in to_use]
return selected_chain_sequences, remaining_chain_sequences | 5,330,227 |
def get_SNR(raw, fmin=1, fmax=55, seconds=3, freq=[8, 13]):
"""Compute power spectrum and calculate 1/f-corrected SNR in one band.
Parameters
----------
raw : instance of Raw
Raw instance containing traces for which to compute SNR
fmin : float
minimum frequency that is used for fitting spectral model.
fmax : float
maximum frequency that is used for fitting spectral model.
seconds: float
Window length in seconds, converts to FFT points for PSD calculation.
freq : list | [8, 13]
SNR in that frequency window is computed.
Returns
-------
SNR : array, 1-D
Contains SNR (1/f-corrected, for a chosen frequency) for each channel.
"""
SNR = np.zeros((len(raw.ch_names),))
n_fft = int(seconds * raw.info["sfreq"])
psd, freqs = mne.time_frequency.psd_welch(
raw, fmin=fmin, fmax=fmax, n_fft=n_fft
)
fm = fooof.FOOOFGroup()
fm.fit(freqs, psd)
for pick in range(len(raw.ch_names)):
psd_corr = 10 * np.log10(psd[pick]) - 10 * fm.get_fooof(pick)._ap_fit
idx = np.where((freqs > freq[0]) & (freqs < freq[1]))[0]
idx_max = np.argmax(psd_corr[idx])
SNR[pick] = psd_corr[idx][idx_max]
return SNR | 5,330,228 |
def get_all_child_wmes(self):
""" Returns a list of (attr, val) tuples representing all wmes rooted at this identifier
val will either be an Identifier or a string, depending on its type """
wmes = []
for index in range(self.GetNumberChildren()):
wme = self.GetChild(index)
if wme.IsIdentifier():
wmes.append( (wme.GetAttribute(), wme.ConvertToIdentifier()) )
else:
wmes.append( (wme.GetAttribute(), wme.GetValueAsString()) )
return wmes | 5,330,229 |
def scan_image_directory(path):
"""Scan directory of FITS files to create basic stats.
Creates CSV file ready to be read by pandas and print-out of the stats if
less than 100 entries.
Parameters
----------
path : str, pathlib.Path
Returns
-------
pd.DataFrame
DataFrame containing the collected stats
"""
input_dir = Path(path)
directory_file = input_dir / "directory.csv"
files = list(input_dir.glob("*.fits"))
print(len(files), "images found.")
# fields = [
# "name",
# "date",
# "time",
# "filter",
# "exposure",
# "hasSlit",
# "isValid",
# "angle",
# "radius",
# "area",
# "centerY",
# "centerX",
# "sublon",
# "sublat",
# "isFull",
# "group",
# ]
bucket = []
print("Scanning directory...")
for fitspath in tqdm(files):
_, head = pf.getdata(fitspath, header=True)
line = {}
line["name"] = fitspath.name
line["date"] = head["DATE_OBS"]
line["time"] = head["TIME_OBS"]
line["filter"] = head["GFLT"]
line["exposure"] = head["ELAPTIME"]
line["hasSlit"] = head["SLIT"] != "Mirror"
line["isValid"] = head["ELAPTIME"] == 0.482500 and head["GFLT"] == "contK"
line["naxis1"] = head["NAXIS1"]
line["naxis2"] = head["NAXIS2"]
bucket.append(line)
df = pd.DataFrame(bucket)
df.to_csv(directory_file, index=False)
print("Metadata CSV generated at", directory_file)
return df | 5,330,230 |
def print_configure_help_info():
"""
print configuration tips
"""
sys.stdout.write('Please run the command first: duedge configure ')
sys.stdout.write('--access-key=<your access-key> ')
sys.stdout.write('--secret-key=<your secret-key> ')
sys.stdout.write('to initialize local user configuration')
sys.stdout.write(os.linesep) | 5,330,231 |
def test_function3():
"""tests the key guessing function"""
basepath = os.path.join(os.getcwd(), "MusicXML_files")
filenames = os.listdir(basepath)
for filename in filter(lambda s: s.endswith(".mxl"), filenames):
m = MusicXMLExtractor(os.path.join(basepath, filename))
m.read_xml_from_zip()
root = m.parse_song_key()
note_to_watch = increment_note(root, -3 + -1)
note_chord_dict = m.parse_melody_with_harmony()
note_arr = np.array([note_chord_dict[key] for key in note_chord_dict])
total_notes = note_arr.sum()
ind = ['C', 'C#', 'D', 'D#', 'E', 'F',
'F#', 'G', 'G#', 'A', 'A#', 'B'].index(note_to_watch)
note_to_watch_count = note_arr[:, ind].sum()
print(filename, root, note_to_watch_count, total_notes)
print("%.2f" % (note_to_watch_count / float(total_notes) * 100)) | 5,330,232 |
def load_mat(filename):
"""
Reads a OpenCV Mat from the given filename
"""
return read_mat(open(filename, 'rb')) | 5,330,233 |
def data_consist_notebook(table1, table2, key1, key2, schema1, schema2, fname, output_root=''):
"""
Automatically generate ipynb for checking data consistency
Parameters
----------
table1: pandas DataFrame
one of the two tables to compare
table2: pandas DataFrame
one of the two tables to compare
key1: string
key for table1
key2: string
key for table2
schema1: pandas DataFrame
data schema (contains column names and corresponding data types) for _table1
schema2: pandas DataFrame
data schema (contains column names and corresponding data types) for _table2
fname: string
the output file name
output_root: string, default=''
the root directory for the output file
"""
# check whether keys are valid
if key1 not in table1.columns.values:
raise ValueError('key1: does not exist in table1')
if key2 not in table2.columns.values:
raise ValueError('key2: does not exist in table2')
# check whether two tables are unique in key level
if table1[key1].nunique() != table1.shape[0]:
raise ValueError('table1: should be unique in %s level' % (key1))
if table2[key2].nunique() != table2.shape[0]:
raise ValueError('table2: should be unique in %s level' % (key2))
# check output_root
if output_root != '':
if not os.path.isdir(output_root):
raise ValueError('output_root: root not exists')
# generate output file path
output_path = os.path.join(output_root, 'data_consist_notebook_%s.py' %(fname))
# delete potential generated script and notebook
if os.path.isfile(output_path):
os.remove(output_path)
if os.path.isfile(output_path.replace('.py', '.ipynb')):
os.remove(output_path.replace('.py', '.ipynb'))
schema, _ = _check_features(schema1, schema2)
dir_path = os.path.dirname(os.path.realpath(__file__))
main_line = open(dir_path + '/templates/data_consist_main.txt').read()
key_line = open(dir_path + '/templates/data_consist_key.txt').read()
str_line = open(dir_path + '/templates/data_consist_str.txt').read()
numeric_line = open(dir_path + '/templates/data_consist_numeric.txt').read()
date_line = open(dir_path + '/templates/data_consist_date.txt').read()
with open(output_path, "a") as outbook:
outbook.write(main_line)
schema_error = schema[schema['error'] != ''].reset_index(drop=True)
if schema_error.shape[0] > 0:
schema_error['error_column'] = schema_error['column_1']
schema_error.loc[schema_error['error_column'].isnull(), 'error_column'] = schema_error['column_2']
outbook.write('\n"""\n## error columns\n\n')
for i in range(schema_error.shape[0]):
get = schema_error.iloc[i]
outbook.write('**%s:** %s<br>' % (get['error_column'], get['error']))
outbook.write('"""\n\n')
# only compare check columns in both table1 and table2, and follow the column order of table1
check_cols = [col for col in table1.columns.values if col in schema[schema['error'] == '']['column_1'].values]
for col in check_cols:
# get the data type of the column
col_type = schema[schema['column_1'] == col]['type_1'].values[0]
outbook.write('\n"""\n## %s (type: %s)\n\n"""\n\n' %(col, col_type))
outbook.write('col = "%s"\n' %(col))
# for key and str, compare intersection
if col_type == 'key':
outbook.write(key_line)
elif col_type == 'str':
outbook.write(str_line)
elif col_type == 'numeric':
outbook.write(numeric_line)
else:
outbook.write(date_line)
outbook.close()
os.system("python -m py2nb %s %s" %(output_path, output_path.replace('.py', '.ipynb'))) | 5,330,234 |
def preprocess_fmri(rawdata=None):
"""example of a preprocessing function
Args:
rawdata (pyrsa.data.dataset.Dataset): the neural data
Returns:
preprocessed neural data in format of measurements,
descriptors, obs_descriptors, channel_descriptors
Example:
.. code-block:: python
measurements, descriptors, obs_descriptors,
channel_descriptors = preprocess(rawdata)
"""
raise NotImplementedError(
"preprocess_fmri function not implemented!") | 5,330,235 |
def connected_components(image, threshold, min_area, max_area, max_features, invert=False):
"""
Detect features using connected-component labeling.
Arguments:
image (float array): The image data. \n
threshold (float): The threshold value. \n
...
Returns:
features (pandas DataFrame): A pandas DataFrame with the detected features. \n
image_out (2D array): The output image.
"""
features = pd.DataFrame()
threshold_image = (image > threshold).astype(int) # threshold image
if invert:
threshold_image = 1 - threshold_image
label_image = skimage.measure.label(threshold_image)
regions = skimage.measure.regionprops(label_image = label_image, intensity_image = image) # http://scikit-image.org/docs/dev/api/skimage.measure.html
j = 0
for region in regions:
# Area filter first
if region.area < min_area or region.area > max_area: # Do not add feature
continue
if j >= max_features: # Do not add feature
continue
features = features.append([{'y': region.centroid[0],
'x': region.centroid[1],
'y_weighted': region.weighted_centroid[0],
'x_weighted': region.weighted_centroid[1],
'orientation': region.orientation,
'minor_axis_length': region.minor_axis_length,
'major_axis_length': region.major_axis_length,
'eccentricity': region.eccentricity,
'area': region.area,
'equivalent_diameter': region.equivalent_diameter,
'filled_area': region.filled_area,
'max_intensity': region.max_intensity,
'mean_intensity': region.mean_intensity,}])
return features, threshold_image | 5,330,236 |
def reduce_scan(row, params, **kwargs):
"""
Reduce scan-mode grism data
.. warning::
This function is not yet implemented. It will raise an exception.
Parameters
----------
row : abscal.common.exposure_data_table.AbscalDataTable
Single-row table of the exposure to be extracted.
params : dict
Dictionary of parameters to use for the reduction
arg_list : namespace
Namespace of command-line arguments.
Returns
-------
row : abscal.common.exposure_data_table.AbscalDataTable
Updated single-row table of the exposure
"""
raise NotImplementedError("Scan mode is not yet available.")
default_values = get_defaults('abscal.common.args')
base_defaults = default_values | get_defaults(kwargs.get('module_name', __name__))
verbose = arg_list.verbose
show_plots = arg_list.plots
bkg_flat_order = arg_list.bkg_flat_order
file = os.path.join(row["path"], row["filename"])
with fits.open(file) as inf:
image = inf['SCI'].data
filter = row['filter']
xsize, ysize = image.shape[1], image.shape[0]
err = inf['ERR'].data
time = inf['TIME'].data
dq = inf['DQ'].data
return input_table | 5,330,237 |
def module(spec):
""" Returns the module at :spec:
@see Issue #2
:param spec: to load.
:type spec: str
"""
cwd = os.getcwd()
if cwd not in sys.path:
sys.path.append(cwd)
return importlib.import_module(spec) | 5,330,238 |
def transform_digits_to_string(labels: Tuple[str], coefficients,
offset: Fraction) -> str:
"""Form a string from digits.
Arguments
---------
labels: the tuple of lablels (ex.: ('x', 'y', 'z') or ('a', 'b', 'c')))
coefficients: the parameters in front of label (ex.: (1.0, 0.5, 0.0))
offset: the number (ex.: 2/3)
Output
------
string
Example
-------
>>> transform_digits_to_string(('x', 'y', 'z'), (1.0, 0.5, 0.0), 0.6666667)
x+1/2y+2/3
"""
l_res = []
for _coefficient, _label in zip(coefficients, labels):
_name = transform_fraction_with_label_to_string(_coefficient, _label)
if _name == "":
pass
elif _name.startswith("-"):
l_res.append(_name)
elif l_res == []:
l_res.append(_name)
else:
l_res.append(f"+{_name:}")
_name = str(Fraction(offset).limit_denominator(10))
if _name == "0":
if l_res == []:
l_res.append(_name)
elif ((l_res == []) | (_name.startswith("-"))):
l_res.append(_name)
else:
l_res.append(f"+{_name:}")
return "".join(l_res) | 5,330,239 |
def match_peaks_with_mz_info_in_spectra(spec_a, spec_b, ms2_ppm=None, ms2_da=None):
"""
Match two spectra, find common peaks. If both ms2_ppm and ms2_da is defined, ms2_da will be used.
:return: list. Each element in the list is a list contain three elements:
m/z from spec 1; intensity from spec 1; m/z from spec 2; intensity from spec 2.
"""
a = 0
b = 0
spec_merged = []
peak_b_mz = 0.
peak_b_int = 0.
while a < spec_a.shape[0] and b < spec_b.shape[0]:
mass_delta_ppm = (spec_a[a, 0] - spec_b[b, 0]) / spec_a[a, 0] * 1e6
if ms2_da is not None:
ms2_ppm = ms2_da / spec_a[a, 0] * 1e6
if mass_delta_ppm < -ms2_ppm:
# Peak only existed in spec a.
spec_merged.append([spec_a[a, 0], spec_a[a, 1], peak_b_mz, peak_b_int])
peak_b_mz = 0.
peak_b_int = 0.
a += 1
elif mass_delta_ppm > ms2_ppm:
# Peak only existed in spec b.
spec_merged.append([0., 0., spec_b[b, 0], spec_b[b, 1]])
b += 1
else:
# Peak existed in both spec.
peak_b_mz = ((peak_b_mz * peak_b_int) + (spec_b[b, 0] * spec_b[b, 1])) / (peak_b_int + spec_b[b, 1])
peak_b_int += spec_b[b, 1]
b += 1
if peak_b_int > 0.:
spec_merged.append([spec_a[a, 0], spec_a[a, 1], peak_b_mz, peak_b_int])
peak_b_mz = 0.
peak_b_int = 0.
a += 1
if b < spec_b.shape[0]:
spec_merged += [[0., 0., x[0], x[1]] for x in spec_b[b:]]
if a < spec_a.shape[0]:
spec_merged += [[x[0], x[1], 0., 0.] for x in spec_a[a:]]
if spec_merged:
spec_merged = np.array(spec_merged, dtype=np.float64)
else:
spec_merged = np.array([[0., 0., 0., 0.]], dtype=np.float64)
return spec_merged | 5,330,240 |
def make_recsim_env(
recsim_user_model_creator: Callable[[EnvContext], AbstractUserModel],
recsim_document_sampler_creator: Callable[[EnvContext], AbstractDocumentSampler],
reward_aggregator: Callable[[List[AbstractResponse]], float],
) -> Type[gym.Env]:
"""Creates a RLlib-ready gym.Env class given RecSim user and doc models.
See https://github.com/google-research/recsim for more information on how to
build the required components from scratch in python using RecSim.
Args:
recsim_user_model_creator: A callable taking an EnvContext and returning
a RecSim AbstractUserModel instance to use.
recsim_document_sampler_creator: A callable taking an EnvContext and
returning a RecSim AbstractDocumentSampler
to use. This will include a AbstractDocument as well.
reward_aggregator: Callable taking a list of RecSim
AbstractResponse instances and returning a float (aggregated
reward).
Returns:
An RLlib-ready gym.Env class to use inside a Trainer.
"""
class _RecSimEnv(gym.Wrapper):
def __init__(self, config: Optional[EnvContext] = None):
# Override with default values, in case they are not set by the user.
default_config = {
"num_candidates": 10,
"slate_size": 2,
"resample_documents": True,
"seed": 0,
"convert_to_discrete_action_space": False,
"wrap_for_bandits": False,
}
if config is None or isinstance(config, dict):
config = EnvContext(config or default_config, worker_index=0)
config.set_defaults(default_config)
# Create the RecSim user model instance.
recsim_user_model = recsim_user_model_creator(config)
# Create the RecSim document sampler instance.
recsim_document_sampler = recsim_document_sampler_creator(config)
# Create a raw RecSim environment (not yet a gym.Env!).
raw_recsim_env = environment.SingleUserEnvironment(
recsim_user_model,
recsim_document_sampler,
config["num_candidates"],
config["slate_size"],
resample_documents=config["resample_documents"],
)
# Convert raw RecSim env to a gym.Env.
gym_env = recsim_gym.RecSimGymEnv(raw_recsim_env, reward_aggregator)
# Fix observation space and - if necessary - convert to discrete
# action space (from multi-discrete).
env = recsim_gym_wrapper(
gym_env,
config["convert_to_discrete_action_space"],
config["wrap_for_bandits"],
)
# Call the super (Wrapper constructor) passing it the created env.
super().__init__(env=env)
return _RecSimEnv | 5,330,241 |
def test_dpp_auth_resp_retries(dev, apdev):
"""DPP Authentication Response retries"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
dev[0].set("dpp_resp_max_tries", "3")
dev[0].set("dpp_resp_retry_time", "100")
logger.info("dev0 displays QR Code")
addr = dev[0].own_addr().replace(':', '')
res = dev[0].request("DPP_BOOTSTRAP_GEN type=qrcode chan=81/1 mac=" + addr)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id0 = int(res)
uri0 = dev[0].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
logger.info("dev1 scans QR Code")
res = dev[1].request("DPP_QR_CODE " + uri0)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
id1 = int(res)
logger.info("dev1 displays QR Code")
addr = dev[1].own_addr().replace(':', '')
res = dev[1].request("DPP_BOOTSTRAP_GEN type=qrcode chan=81/1 mac=" + addr)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id1b = int(res)
uri1b = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id1b)
logger.info("dev1 initiates DPP Authentication")
if "OK" not in dev[0].request("DPP_LISTEN 2412 qr=mutual"):
raise Exception("Failed to start listen operation")
if "OK" not in dev[1].request("DPP_AUTH_INIT peer=%d own=%d" % (id1, id1b)):
raise Exception("Failed to initiate DPP Authentication")
ev = dev[1].wait_event(["DPP-RESPONSE-PENDING"], timeout=5)
if ev is None:
raise Exception("Pending response not reported")
ev = dev[0].wait_event(["DPP-SCAN-PEER-QR-CODE"], timeout=5)
if ev is None:
raise Exception("QR Code scan for mutual authentication not requested")
# Stop Initiator from listening to frames to force retransmission of the
# DPP Authentication Response frame with Status=0
dev[1].request("DPP_STOP_LISTEN")
dev[1].dump_monitor()
dev[0].dump_monitor()
logger.info("dev0 scans QR Code")
res = dev[0].request("DPP_QR_CODE " + uri1b)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
id0b = int(res)
ev = dev[0].wait_event(["DPP-TX"], timeout=5)
if ev is None or "type=1" not in ev:
raise Exception("DPP Authentication Response not sent")
ev = dev[0].wait_event(["DPP-TX-STATUS"], timeout=5)
if ev is None:
raise Exception("TX status for DPP Authentication Response not reported")
if "result=no-ACK" not in ev:
raise Exception("Unexpected TX status for Authentication Response: " + ev)
ev = dev[0].wait_event(["DPP-TX"], timeout=15)
if ev is None or "type=1" not in ev:
raise Exception("DPP Authentication Response retransmission not sent") | 5,330,242 |
def set_default_subparser(self, name, args=None):
"""
see http://stackoverflow.com/questions/5176691/argparse-how-to-specify-a-default-subcommand
"""
subparser_found = False
for arg in sys.argv[1:]:
if arg in ['-h', '--help']: # global help if no subparser
break
else:
for x in self._subparsers._actions:
if not isinstance(x, argparse._SubParsersAction):
continue
for sp_name in x._name_parser_map.keys():
if sp_name in sys.argv[1:]:
subparser_found = True
if not subparser_found:
# insert default in first position, this implies no
# global options without a sub_parsers specified
if args is None:
sys.argv.insert(1, name)
else:
args.insert(0, name) | 5,330,243 |
def unflatten(dictionary, delim='.'):
"""Breadth first turn flattened dictionary into a nested one.
Arguments
---------
dictionary : dict
The dictionary to traverse and linearize.
delim : str, default='.'
The delimiter used to indicate nested keys.
"""
out = defaultdict(dict)
# try to maintain curent order of the dictionary
for key, value in dictionary.items():
key, sep, sub_key = key.partition(delim)
if sep:
out[key][sub_key] = value
else:
out[key] = value
for k, v in out.items():
if isinstance(v, dict):
out[k] = unflatten(v, delim)
return dict(out) | 5,330,244 |
def read_array(dtype, data):
"""Reads a formatted string and outputs an array.
The format is as for standard python arrays, which is
[array[0], array[1], ... , array[n]]. Note the use of comma separators, and
the use of square brackets.
Args:
data: The string to be read in.
dtype: The data type of the elements of the target array.
Raises:
ValueError: Raised if the input data is not of the correct format.
Returns:
An array of data type dtype.
"""
rlist = read_list(data)
for i in range(len(rlist)):
rlist[i] = read_type(dtype,rlist[i])
return np.array(rlist, dtype) | 5,330,245 |
def in_ipynb():
"""
Taken from Adam Ginsburg's SO answer here:
http://stackoverflow.com/a/24937408/4118756
"""
try:
cfg = get_ipython().config
if cfg['IPKernelApp']['parent_appname'] == 'ipython-notebook':
return True
else:
return False
except NameError:
return False | 5,330,246 |
def load_bioschemas_jsonld_from_html(url, config):
"""
Load Bioschemas JSON-LD from a webpage.
:param url:
:param config:
:return: array of extracted jsonld
"""
try:
extractor = bioschemas.extractors.ExtractorFromHtml(config)
filt = bioschemas.filters.BioschemasFilter(config)
jsonlds = extractor.extract_jsonld_from_url(url)
jsonlds = filt.filter(jsonlds)
logger.info('Got %d jsonld sections', len(jsonlds))
return jsonlds
except Exception as e:
logging.exception('Ignoring failure') | 5,330,247 |
def pformat(dictionary, function):
"""Recursively print dictionaries and lists with %.3f precision."""
if isinstance(dictionary, dict):
return type(dictionary)((key, pformat(value, function)) for key, value in dictionary.items())
# Warning: bytes and str are two kinds of collections.Container, but we don't want to go inside it, so it should be pick out here.
if isinstance(dictionary, bytes) or isinstance(dictionary, str):
return dictionary
if isinstance(dictionary, collections.Container):
return type(dictionary)(pformat(value, function) for value in dictionary)
if isinstance(dictionary, float):
return function(dictionary)
return dictionary | 5,330,248 |
def _tonal_unmodulo(x):
"""
>>> _tonal_unmodulo((0,10,0))
(0, -2, 0)
>>> _tonal_unmodulo((6,0,0))
(6, 12, 0)
>>> _tonal_unmodulo((2, 0))
(2, 0)
"""
d = x[0]
c = x[1]
base_c = MS[d].c
# Example: Cb --- base=0 c=11 c-base=11 11 - 12 = -1
if c - base_c > 6:
c = c - C_LEN
# Example: B# --- base=11 c=0 c-base=-11 c+C_LEN =12
if c - base_c < -6:
c = c + C_LEN
try:
return (d, c, x[2])
except:
return (d, c) | 5,330,249 |
def get_ncopy(path, aboutlink = False):
"""Returns an ncopy attribute value (it is a requested count of
replicas). It calls gfs_getxattr_cached."""
(n, cc) = getxattr(path, GFARM_EA_NCOPY, aboutlink)
if (n != None):
return (int(n), cc)
else:
return (None, cc) | 5,330,250 |
def cache_bottlenecks(sess, image_lists, image_dir, bottleneck_dir,
jpeg_data_tensor, bottleneck_tensor):
"""Ensures all the training, testing, and validation bottlenecks are cached.
Because we're likely to read the same image multiple times (if there are no
distortions applied during training) it can speed things up a lot if we
calculate the bottleneck layer values once for each image during
preprocessing, and then just read those cached values repeatedly during
training. Here we go through all the images we've found, calculate those
values, and save them off.
Args:
sess: The current active TensorFlow Session.
image_lists: Dictionary of training images for each label.
image_dir: Root folder string of the subfolders containing the training
images.
bottleneck_dir: Folder string holding cached files of bottleneck values.
jpeg_data_tensor: Input tensor for jpeg data from file.
bottleneck_tensor: The penultimate output layer of the graph.
Returns:
Nothing.
"""
how_many_bottlenecks = 0
ensure_dir_exists(bottleneck_dir)
for label_name, label_lists in image_lists.items():
for category in ['train', 'test1', 'test2', 'validation']:
try:
category_list = label_lists[category]
for index, unused_base_name in enumerate(category_list):
get_or_create_bottleneck(sess, image_lists, label_name, index,
image_dir, category, bottleneck_dir,
jpeg_data_tensor, bottleneck_tensor)
how_many_bottlenecks += 1
if how_many_bottlenecks % 100 == 0:
print(str(how_many_bottlenecks) + ' bottleneck files created.')
except KeyError:
print("No files, skipping")
continue | 5,330,251 |
def has_three_or_more_vowels(string):
"""Check if string has three or more vowels."""
return sum(string.count(vowel) for vowel in 'aeiou') >= 3 | 5,330,252 |
def write_to_xlsx(
fund_infos: list[FundInfo],
xlsx_filename: Path,
logger: Logger = Logger.null_logger(),
) -> None:
"""
Structuralize a list of fund infos to an Excel document.
Input: a list of fund infos, and an Excel filename.
"""
# TODO profile to see whether and how much setting constant_memory improves
# performance.
with xlsxwriter.Workbook(xlsx_filename) as workbook:
logger.log("新建 Excel 文档......")
worksheet = workbook.add_worksheet()
schema = [
{"name": "基金名称", "width": 22},
{"name": "基金代码"},
{"name": "上一天净值日期", "width": 14, "format": {"num_format": "yyyy-mm-dd"}},
{"name": "上一天净值", "width": 10, "format": {"bg_color": "yellow"}},
{"name": "净值日期", "width": 13, "format": {"num_format": "yyyy-mm-dd"}},
{"name": "单位净值", "format": {"bg_color": "yellow"}},
{"name": "日增长率", "format": {"num_format": "0.00%"}},
{"name": "估算日期", "width": 17, "format": {"num_format": "yyyy-mm-dd hh:mm"}},
{"name": "实时估值", "width": 11, "format": {"bg_color": "B4D6E4"}},
{"name": "估算增长率", "width": 11, "format": {"num_format": "0.00%"}},
{"name": "分红送配"},
{"name": "近1周同类排名", "width": 13},
{"name": "近1月同类排名", "width": 13},
{"name": "近3月同类排名", "width": 13},
{"name": "近6月同类排名", "width": 13},
{"name": "今年来同类排名", "width": 13},
{"name": "近1年同类排名", "width": 13},
{"name": "近2年同类排名", "width": 13},
{"name": "近3年同类排名", "width": 13},
]
logger.log("调整列宽......")
for col, field in enumerate(schema):
# FIXME Despite the xlsxwriter doc saying that set_column(i, i, None) doesn't
# change the column width, some simple tests show that it does. The source
# code of xlsxwriter is too complex that I can't figure out where the
# bug originates.
worksheet.set_column(col, col, field.get("width"))
header_format = workbook.add_format(
dict(bold=True, align="center", valign="top", border=1)
)
logger.log("写入文档头......")
for col, field in enumerate(schema):
worksheet.write_string(0, col, field["name"], header_format)
cell_formats = [workbook.add_format(field.get("format")) for field in schema]
logger.log("写入文档体......")
for row, fund_info in tenumerate(fund_infos, start=1, unit="行", desc="写入基金信息"):
for col, field in enumerate(schema):
# Judging from source code of xlsxwriter, add_format(None) is equivalent
# to default format.
worksheet.write(
row, col, getattr(fund_info, field["name"]), cell_formats[col]
)
logger.log("Flush 到硬盘......") | 5,330,253 |
def lbfgs_inverse_hessian_factors(S, Z, alpha):
"""
Calculates factors for inverse hessian factored representation.
It implements algorithm of figure 7 in:
Pathfinder: Parallel quasi-newton variational inference, Lu Zhang et al., arXiv:2108.03782
"""
J = S.shape[1]
StZ = S.T @ Z
R = jnp.triu(StZ)
eta = jnp.diag(StZ)
beta = jnp.hstack([jnp.diag(alpha) @ Z, S])
minvR = -jnp.linalg.inv(R)
alphaZ = jnp.diag(jnp.sqrt(alpha)) @ Z
block_dd = minvR.T @ (alphaZ.T @ alphaZ + jnp.diag(eta)) @ minvR
gamma = jnp.block([[jnp.zeros((J, J)), minvR],
[minvR.T, block_dd]])
return beta, gamma | 5,330,254 |
def get_geojson_observations(properties: List[str] = None, **kwargs) -> Dict[str, Any]:
""" Get all observation results combined into a GeoJSON ``FeatureCollection``.
By default this includes some basic observation properties as GeoJSON ``Feature`` properties.
The ``properties`` argument can be used to override these defaults.
Example:
>>> get_geojson_observations(observation_id=16227955, properties=["photo_url"])
{"type": "FeatureCollection",
"features": [{
"type": "Feature",
"geometry": {"type": "Point", "coordinates": [4.360086, 50.646894]},
"properties": {
"photo_url": "https://static.inaturalist.org/photos/24355315/square.jpeg?1536150659"
}
}
]
}
Args:
properties: Properties from observation results to include as GeoJSON properties
kwargs: Arguments for :py:func:`.get_observations`
Returns:
A ``FeatureCollection`` containing observation results as ``Feature`` dicts.
"""
kwargs["mappable"] = True
observations = get_all_observations(kwargs)
return as_geojson_feature_collection(
(flatten_nested_params(obs) for obs in observations),
properties=properties if properties is not None else DEFAULT_OBSERVATION_ATTRS,
) | 5,330,255 |
def createFilter(fc, Q, fs):
"""
Returns digital BPF with given specs
:param fc: BPF center frequency (Hz)
:param Q: BPF Q (Hz/Hz)
:param fs: sampling rate (Samp/sec)
:returns: digital implementation of BPF
"""
wc = 2*pi*fc
num = [wc/Q, 0]
den = [1, wc/Q, wc**2]
dig_tf = signal.bilinear(num, den, fs)
return dig_tf | 5,330,256 |
def delete_tc_policy_class(device, parent, classid, namespace=None):
"""Delete a TC policy class of a device.
:param device: (string) device name
:param parent: (string) qdisc parent class ('root', 'ingress', '2:10')
:param classid: (string) major:minor handler identifier ('10:20')
:param namespace: (string) (optional) namespace name
"""
priv_tc_lib.delete_tc_policy_class(device, parent, classid,
namespace=namespace) | 5,330,257 |
def create_uno_struct(cTypeName: str):
"""Create a UNO struct and return it.
Similar to the function of the same name in OOo Basic.
Returns:
object: uno struct
"""
oCoreReflection = get_core_reflection()
# Get the IDL class for the type name
oXIdlClass = oCoreReflection.forName(cTypeName)
# Create the struct.
oReturnValue, oStruct = oXIdlClass.createObject(None)
return oStruct | 5,330,258 |
def get_dir():
"""Return the location of resources for report"""
return pkg_resources.resource_filename('naarad.resources',None) | 5,330,259 |
def p_atl_suite(p) :
"""
atl_suite : NEWLINE INDENT atl_line_stmts DEDENT
"""
p[0] = p[3] | 5,330,260 |
async def multiwalk(ip, community, oids,
port=161, timeout=6, fetcher=multigetnext):
# type: (str, str, List[str], int, int, Callable[[str, str, List[str], int, int], List[VarBind]]) -> Generator[VarBind, None, None]
"""
Executes a sequence of SNMP GETNEXT requests and returns an async_generator
over :py:class:`~puresnmp.pdu.VarBind` instances.
This is the same as :py:func:`~.walk` except that it is capable of
iterating over multiple OIDs at the same time.
Example::
>>> multiwalk('127.0.0.1', 'private',
... ['1.3.6.1.2.1.1', '1.3.6.1.4.1.1'])
<async_generator object multiwalk at 0x7fa2f775cf68>
"""
LOG.debug('Walking on %d OIDs using %s', len(oids), fetcher.__name__)
varbinds = await fetcher(ip, community, oids, port, timeout)
requested_oids = [OID(oid) for oid in oids]
grouped_oids = group_varbinds(varbinds, requested_oids)
unfinished_oids = get_unfinished_walk_oids(grouped_oids)
LOG.debug('%d of %d OIDs need to be continued',
len(unfinished_oids),
len(oids))
yielded = _set([]) # type: ignore
for var in group_varbinds(varbinds, requested_oids).values():
for varbind in var:
containment = [varbind.oid in _ for _ in requested_oids]
if not any(containment) or varbind.oid in yielded: # type: ignore
continue
yielded.add(varbind.oid) # type: ignore
yield varbind
# As long as we have unfinished OIDs, we need to continue the walk for
# those.
while unfinished_oids:
next_fetches = [_[1].value.oid for _ in unfinished_oids]
try:
varbinds = await fetcher(ip, community,
[unicode(_) for _ in next_fetches],
port,
timeout)
except NoSuchOID:
# Reached end of OID tree, finish iteration
break
grouped_oids = group_varbinds(varbinds,
next_fetches,
user_roots=requested_oids)
unfinished_oids = get_unfinished_walk_oids(grouped_oids)
LOG.debug('%d of %d OIDs need to be continued',
len(unfinished_oids),
len(oids))
for var in group_varbinds(varbinds, next_fetches).values():
for varbind in var:
containment = [varbind.oid in _ for _ in requested_oids]
if not any(containment) or varbind.oid in yielded: # type: ignore
continue
yielded.add(varbind.oid) # type: ignore
yield varbind | 5,330,261 |
def checkout_commit(repo, commit_id):
"""
Context manager that checks out a commit in the repo.
"""
current_head = repo.head.commit if repo.head.is_detached else repo.head.ref
try:
repo.git.checkout(commit_id)
yield
finally:
repo.git.checkout(current_head) | 5,330,262 |
def SoftCrossEntropyLoss(input, target):
"""
Calculate the CrossEntropyLoss with soft targets
:param input: prediction logicts
:param target: target probabilities
"""
total_loss = torch.tensor(0.0)
for i in range(input.size(1)):
cls_idx = torch.full((input.size(0),), i, dtype=torch.long)
loss = F.cross_entropy(input, cls_idx, reduce=False)
total_loss += target[:, i].dot(loss)
return total_loss / input.shape[0] | 5,330,263 |
def test_base_mult_list_is_empty_without_base_lists():
"""
test baseMultList.is_empty is True when there are no baseLists
"""
assert base_mult_list.is_empty() is True | 5,330,264 |
def composite_layer(inputs, mask, hparams):
"""Composite layer."""
x = inputs
# Applies ravanbakhsh on top of each other.
if hparams.composite_layer_type == "ravanbakhsh":
for layer in xrange(hparams.layers_per_layer):
with tf.variable_scope(".%d" % layer):
x = common_layers.ravanbakhsh_set_layer(
hparams.hidden_size,
x,
mask=mask,
dropout=0.0)
# Transforms elements to get a context, and then uses this in a final layer.
elif hparams.composite_layer_type == "reembedding":
# Transform elements n times and then pool.
for layer in xrange(hparams.layers_per_layer):
with tf.variable_scope(".%d" % layer):
x = common_layers.linear_set_layer(
hparams.hidden_size,
x,
dropout=0.0)
context = common_layers.global_pool_1d(x, mask=mask)
# Final layer.
x = common_layers.linear_set_layer(
hparams.hidden_size,
x,
context=context,
dropout=0.0)
return x | 5,330,265 |
def edit_expense(expense_id, budget_id, date_incurred, description, amount, payee_id):
"""
Changes the details of the given expense.
"""
query = sqlalchemy.text("""
UPDATE budget_expenses
SET
budget_id = (:budget_id),
date_incurred = (:date_incurred),
description = (:description),
cost = (:amount),
payee_id = (:payee_id)
WHERE
expense_id = (:expense_id)
""")
rp = flask.g.db.execute(
query,
expense_id=expense_id,
budget_id=budget_id,
date_incurred=date_incurred,
description=description,
amount=amount,
payee_id=payee_id
)
return rp.rowcount != 0 | 5,330,266 |
def get_mnist_iterator(batch_size, input_shape, num_parts=1, part_index=0):
"""Returns training and validation iterators for MNIST dataset
"""
get_mnist_ubyte()
flat = False if len(input_shape) == 3 else True
train_dataiter = mx.io.MNISTIter(
image="data/train-images-idx3-ubyte",
label="data/train-labels-idx1-ubyte",
input_shape=input_shape,
batch_size=batch_size,
shuffle=True,
flat=flat,
num_parts=num_parts,
part_index=part_index)
val_dataiter = mx.io.MNISTIter(
image="data/t10k-images-idx3-ubyte",
label="data/t10k-labels-idx1-ubyte",
input_shape=input_shape,
batch_size=batch_size,
flat=flat,
num_parts=num_parts,
part_index=part_index)
return (train_dataiter, val_dataiter) | 5,330,267 |
def dataset():
"""Get data frame for test purposes."""
return pd.DataFrame(
data=[['alice', 26], ['bob', 34], ['claire', 19]],
index=[0, 2, 1],
columns=['Name', 'Age']
) | 5,330,268 |
def main(number_sites_along_xyz=10, steps=25000, external_field_sweep_start=1,
external_field_sweep_end=11, temperature_sweep_start=1,
temperature_sweep_end=11):
"""Run simulation over a sweep of temperature and external field values.
Parameters
----------
number_sites_along_xyz : int
steps : int
external_field_sweep_start : int
external_field_sweep_end : int
temperature_sweep_start : int
temperature_sweep_end : int
"""
ising = Ising(number_sites_along_xyz=number_sites_along_xyz)
column_names = [
"temperature", "external_field", "mean_magnetization",
"std_dev_magnetization"
]
output_directory = Path(".").cwd() / "results"
output_directory.mkdir(parents=True, exist_ok=True)
output_filename = (
output_directory / "runs_magnetization_vs_temperature.csv")
number_simulations = ((
external_field_sweep_end - external_field_sweep_start) *
(temperature_sweep_end - temperature_sweep_start))
simulation_results_database = pd.DataFrame(
columns=column_names, index=np.arange(number_simulations))
_external_field_and_temperature_sweep(
ising=ising, steps=steps, column_names=column_names,
external_field_sweep_start=external_field_sweep_start,
external_field_sweep_end=external_field_sweep_end,
temperature_sweep_start=temperature_sweep_start,
temperature_sweep_end=temperature_sweep_end,
simulation_results_database=simulation_results_database)
_save_database_to_disk(database=simulation_results_database,
output_filename=output_filename) | 5,330,269 |
def get_consumer_key():
"""This is entirely questionable. See settings.py"""
consumer_key = None
try:
loc = "%s/consumer_key.txt" % settings.TWITTER_CONSUMER_URL
url = urllib2.urlopen(loc)
consumer_key = url.read().rstrip()
except (urllib2.HTTPError, IOError), e:
print "Unable to obtain consumer_key from %s: %s" % (loc, e)
return consumer_key | 5,330,270 |
def write_validation3_set_to_file(file, outfile):
"""
单独写标签3.0体系到文件
"""
tag3 = ["社会", "体育", "娱乐", "财经", "时政", "科技", "时尚", "教育", "情感", "文化",
"旅游", "美食", "宠物", "星座命理", "搞笑", "壁纸头像", "生活", "职场", "小说",
"国际", "房产", "汽车", "军事", "游戏", "动漫", "育儿", "健康", "历史", "儿童",
"知识", "其他"]
with open(file, "r", encoding="utf-8") as f, open(outfile, "w", encoding="utf-8") as wf:
lines = f.readlines()
for line in lines:
line_json = json.loads(line.strip())
label = line_json["manual_tag-3.0"]
if label == "":
continue
else:
in_lable = True
if "," in label:
label_list = label.split(",")
for lab in label_list:
if lab not in tag3:
in_lable = False
else:
if label not in tag3:
in_lable = False
if in_lable:
wf.write(line) | 5,330,271 |
def disconnect() -> Tuple[str, int]:
"""Deletes the DroneServerThread with a given id.
Iterates over all the drones in the shared list and deletes the one with a
matching drone_id. If none are found returns an error.
Request:
drone_id (str): UUID of the drone.
Response:
Tuple[str, int]: Response status.
200, "OK" - Drone disconnected created successfully.
400, "Bad Request" - Incorrect drone_id.
"""
# Check if the json is correct and making a variable
if not 'uuid' in request.json or request.json["uuid"] == "":
return "Bad Request", 400
drone_id = request.json["uuid"]
# Iterates through the array and checks elements
drones_lock, drone_ts = common_variables.get_drone_ts()
drones_lock.acquire()
for drone in drone_ts:
if drone_id == drone.drone_id:
ports_lock, ports_assigned = common_variables.get_ports_assigned()
ports_assigned.remove(drone.ports[0])
ports_assigned.remove(drone.ports[1])
ports_assigned.remove(drone.ports[2])
drone_ts.remove(drone)
drones_lock.release()
return "OK", 200
drones_lock.release()
return "Bad Request", 400 | 5,330,272 |
def add_company(context: Context, company: Company):
"""Will add an Company to Scenario Data.
:param context: behave `context` object
:param company: an instance of Company Tuple
"""
assert isinstance(
company, Company
), "Expected Company named tuple but got '{}' instead".format(type(company))
context.scenario_data.companies[company.alias] = company
logging.debug(
"Successfully added Company: %s - %s to " "Scenario Data as '%s'",
company.title,
company.number,
company.alias,
) | 5,330,273 |
def get_graph_feature(x, k=20, idx=None, x_coord=None):
"""
Args:
x: (B, d, N)
"""
batch_size = x.size(0)
num_points = x.size(2)
x = x.view(batch_size, -1, num_points)
if idx is None:
if x_coord is None: # dynamic knn graph
idx = knn(x, k=k)
else: # fixed knn graph with input point coordinates
idx = knn(x_coord, k=k)
if k is None: k = idx.size(-1)
_, num_dims, _ = x.size()
feature = gather(x, idx)
x = x.transpose(2, 1).contiguous()
x = x.view(batch_size, num_points, 1, num_dims).repeat(1, 1, k, 1)
feature = torch.cat((feature-x, x), dim=3).permute(0, 3, 1, 2).contiguous() # (B, d, N, K)
return feature | 5,330,274 |
def get_markers(
image_array: np.ndarray,
evened_selem_size: int = 4,
markers_contrast_times: float = 15,
markers_sd: float = 0.25,
) -> np.ndarray:
"""Finds the highest and lowest grey scale values for image flooding."""
selem = smo.disk(evened_selem_size)
evened = sfi.rank.mean_bilateral(
inc_contrast(image_array, contrast_times=markers_contrast_times), selem
)
# Markers defined by highest and lowest grey levels set as markers
high = np.max(evened)
low = np.min(evened)
std = np.std(evened)
neatarray = np.array(image_array)
markers: np.ndarray = np.zeros_like(neatarray)
# Level reduced/decreased by 1/4 SD
markers[evened < low + (markers_sd * std)] = 3
markers[evened > high - (markers_sd * std)] = 2
return markers | 5,330,275 |
def pd_bigdata_read_csv(file, **pd_read_csv_params):
"""
读取速度提升不明显
但是内存占用显著下降
"""
reader = pd.read_csv(file, **pd_read_csv_params, iterator=True)
loop = True
try:
chunk_size = pd_read_csv_params['chunksize']
except:
chunk_size = 1000000
chunks = []
while loop:
try:
chunk = reader.get_chunk(chunk_size)
chunks.append(chunk)
except StopIteration:
loop = False
print('[Info]: Iteration is stopped.')
df = pd.concat(chunks, ignore_index=True, axis=0)
return df | 5,330,276 |
def PCA(Y_name, input_dim):
"""
Principal component analysis: maximum likelihood solution by SVD
Adapted from GPy.util.linalg
Arguments
---------
:param Y: NxD np.array of data
:param input_dim: int, dimension of projection
Returns
-------
:rval X: - Nxinput_dim np.array of dimensionality reduced data
W - input_dimxD mapping from X to Y
"""
Y = genfromtxt(Y_name, delimiter=',')
Z = numpy.linalg.svd(Y - Y.mean(axis=0), full_matrices=False)
[X, W] = [Z[0][:, 0:input_dim], numpy.dot(numpy.diag(Z[1]), Z[2]).T[:, 0:input_dim]]
v = X.std(axis=0)
X /= v;
W *= v;
return X | 5,330,277 |
def data_cache_path(page, page_id_field='slug'):
"""
Get (and make) local data cache path for data
:param page:
:return:
"""
path = os.path.join(CACHE_ROOT, '.cache', 'data', *os.path.split(getattr(page, page_id_field)))
if not os.path.exists(path):
sh.mkdir('-p', path)
return path | 5,330,278 |
def _get_sequence(value, n, channel_index, name):
"""Formats a value input for gen_nn_ops."""
# Performance is fast-pathed for common cases:
# `None`, `list`, `tuple` and `int`.
if value is None:
return [1] * (n + 2)
# Always convert `value` to a `list`.
if isinstance(value, list):
pass
elif isinstance(value, tuple):
value = list(value)
elif isinstance(value, int):
value = [value]
elif not isinstance(value, collections.abc.Sized):
value = [value]
else:
value = list(value) # Try casting to a list.
len_value = len(value)
# Fully specified, including batch and channel dims.
if len_value == n + 2:
return value
# Apply value to spatial dims only.
if len_value == 1:
value = value * n # Broadcast to spatial dimensions.
elif len_value != n:
raise ValueError('{} should be of length 1, {} or {} but was {}'.format(
name, n, n + 2, len_value))
# Add batch and channel dims (always 1).
if channel_index == 1:
return [1, 1] + value
else:
return [1] + value + [1] | 5,330,279 |
def make_linear(input_dim, output_dim, bias=True, std=0.02):
"""
Parameters
----------
input_dim: int
output_dim: int
bias: bool
std: float
Returns
-------
torch.nn.modules.linear.Linear
"""
linear = nn.Linear(input_dim, output_dim, bias)
init.normal_(linear.weight, std=std)
if bias:
init.zeros_(linear.bias)
return linear | 5,330,280 |
def matnorm_logp_conditional_col(x, row_cov, col_cov, cond, cond_cov):
"""
Log likelihood for centered conditional matrix-variate normal density.
Consider the following partitioned matrix-normal density:
.. math::
\\begin{bmatrix}
\\operatorname{vec}\\left[\\mathbf{X}_{i j}\\right] \\\\
\\operatorname{vec}\\left[\\mathbf{Y}_{i k}\\right]\\end{bmatrix}
\\sim \\mathcal{N}\\left(0,\\begin{bmatrix} \\Sigma_{j} \\otimes
\\Sigma_{i} & \\Sigma_{j k} \\otimes \\Sigma_{i}\\\\
\\Sigma_{k j} \\otimes \\Sigma_{i} & \\Sigma_{k} \\otimes \\Sigma_{i}
\\end{bmatrix}\\right)
Then we can write the conditional:
.. math::
\\mathbf{X}_{i j} \\mid \\mathbf{Y}_{i k} \\sim \\mathcal{M}\\
\\mathcal{N}\\left(0, \\Sigma_{i}, \\Sigma_{j}-\\Sigma_{j k}\\
\\Sigma_{k}^{-1} \\Sigma_{k j}\\right)
This function efficiently computes the conditionals by unpacking some
info in the covariance classes and then dispatching to
`solve_det_conditional`.
Parameters
---------------
x: tf.Tensor
Observation tensor
row_cov: CovBase
Row covariance (:math:`\\Sigma_{i}` in the notation above).
col_cov: CovBase
Column covariance (:math:`\\Sigma_{j}` in the notation above).
cond: tf.Tensor
Off-diagonal block of the partitioned covariance (:math:`\\Sigma_{jk}`
in the notation above).
cond_cov: CovBase
Covariance of conditioning variable (:math:`\\Sigma_{k}` in the
notation above).
"""
rowsize = tf.cast(tf.shape(input=x)[0], "float64")
colsize = tf.cast(tf.shape(input=x)[1], "float64")
solve_row = row_cov.solve(x)
logdet_row = row_cov.logdet
solve_col, logdet_col = solve_det_conditional(
tf.transpose(a=x), col_cov, tf.transpose(a=cond), cond_cov
)
return _mnorm_logp_internal(
colsize, rowsize, logdet_row, logdet_col, solve_row, solve_col
) | 5,330,281 |
def get_map_folderpath(detectionID):
"""
Make sure map directory exists and return folder location for maps to be
saved to.
"""
homedir = os.path.dirname(os.path.abspath(__file__))
if not os.path.exists('map'):
os.makedirs('map')
detection_folder = 'map/'+str(detectionID)
if not os.path.exists(detection_folder):
os.makedirs(detection_folder)
map_dirpath = os.path.join(homedir, detection_folder)
return(map_dirpath) | 5,330,282 |
def get_next_by_date(name, regexp):
"""Get the next page by page publishing date"""
p = Page.get(Page.name == name)
query = (Page.select(Page.name, Page.title)
.where(Page.pubtime > p.pubtime)
.order_by(Page.pubtime.asc())
.dicts())
for p in ifilter(lambda x: regexp.match(x['name']), query):
return p | 5,330,283 |
def relative_performance(r_df, combinations, optimal_combinations, ref_method='indp', ref_jt='nan', ref_at='nan',
ref_vt='nan', cost_type='Total', deaggregate=False):
"""
This functions computes the relative performance, relative cost, and univeral
relative measure :cite:`Talebiyan2019c` based on results from JC and INDP.
Parameters
----------
r_df : dict
Dictionary that contains complete results by JC and INDP collected by
:func:`read_results`.
combinations : dict
All combinations of magnitude, sample, judgment type, resource allocation type
involved in the JC (or any other decentralized results) collected by
:func:`generate_combinations`.
optimal_combinations : dict
All combinations of magnitude, sample, judgment type, resource allocation type
involved in the INDP (or any other optimal results) collected by :func:`generate_combinations`.
ref_method : str, optional
Referece method to computue relative measure in comparison to. The default is 'indp'.
ref_jt : str, optional
Referece judgment type to computue relative measure in comparison to. It is used only
when the reference method is JC. The default is 'nan'.
ref_at : str, optional
Referece resource allocation type to computue relative measure in comparison to.
It is used only when the reference method is JC. The default is 'nan'.
ref_vt : str, optional
Referece val;uation type to computue relative measure in comparison to.
It is used only when the reference method is JC, and the reference resource
allocation type is Auntion. The default is 'nan'.
cost_type : str, optional
Cost type for which the relative measure is computed. The default is 'Total'.
deaggregate : bool, optional
Should the deaggregated results (for seperate layers) be computed. The default is False.
Returns
-------
lambda_df : dict
Dictionary that contains the relative measures.
"""
columns = ['Magnitude', 'cost_type', 'decision_type', 'judgment_type', 'auction_type',
'valuation_type', 'no_resources', 'sample',
'Area_TC', 'Area_P', 'lambda_tc', 'lambda_p', 'lambda_U', 'layer']
T = len(r_df['t'].unique())
lambda_df = pd.DataFrame(columns=columns, dtype=int)
# Computing reference area for lambda
# Check if the method in optimal combination is the reference method #!!!
print('\nRef area calculation\n', end='')
for idx, x in enumerate(optimal_combinations):
if x[4] == ref_method:
rows = r_df[(r_df['Magnitude'] == x[0]) & (r_df['decision_type'] == ref_method) &
(r_df['sample'] == x[1]) & (r_df['auction_type'] == ref_at) &
(r_df['valuation_type'] == ref_vt) & (r_df['no_resources'] == x[3]) &
(r_df['judgment_type'] == ref_jt)]
if not rows.empty:
row_all = rows[(rows['layer'] == 'nan')]
area_tc = trapz_int(y=list(row_all[(row_all['cost_type'] == cost_type)].cost[:T]),
x=list(row_all[row_all['cost_type'] == cost_type].t[:T]))
area_p = -trapz_int(y=list(row_all[row_all['cost_type'] == 'Under Supply Perc'].cost[:T]),
x=list(row_all[row_all['cost_type'] == 'Under Supply Perc'].t[:T]))
values = [x[0], cost_type, x[4], ref_jt, ref_at, ref_vt, x[3], x[1],
area_tc, area_p, 'nan', 'nan', 'nan', 'nan']
lambda_df = lambda_df.append(dict(zip(columns, values)), ignore_index=True)
if deaggregate:
for l in range(x[2]):
row_lyr = rows[(rows['layer'] == l + 1)]
area_tc = trapz_int(y=list(row_lyr[row_lyr['cost_type'] == cost_type].cost[:T]),
x=list(row_lyr[row_lyr['cost_type'] == cost_type].t[:T]))
area_p = -trapz_int(y=list(row_lyr[row_lyr['cost_type'] == 'Under Supply Perc'].cost[:T]),
x=list(row_lyr[row_lyr['cost_type'] == 'Under Supply Perc'].t[:T]))
values = [x[0], cost_type, x[4], ref_jt, ref_at, ref_vt, x[3], x[1],
area_tc, area_p, 'nan', 'nan', 'nan', l + 1]
lambda_df = lambda_df.append(dict(zip(columns, values)), ignore_index=True)
if idx % (len(optimal_combinations) / 10 + 1) == 0:
update_progress(idx + 1, len(optimal_combinations))
update_progress(len(optimal_combinations), len(optimal_combinations))
# Computing areas and lambdas
print('\nLambda calculation\n', end='')
for idx, x in enumerate(combinations + optimal_combinations):
if x[4] != ref_method:
# Check if reference area exists
cond = ((lambda_df['Magnitude'] == x[0]) & (lambda_df['decision_type'] == ref_method) &
(lambda_df['auction_type'] == ref_at) & (lambda_df['valuation_type'] == ref_vt) &
(lambda_df['cost_type'] == cost_type) & (lambda_df['sample'] == x[1]) &
(lambda_df['no_resources'] == x[3]) & (lambda_df['judgment_type'] == ref_jt))
if not cond.any():
sys.exit('Error:Reference type is not here! for %s,%s, m %d, resource %d' \
% (x[4], x[5], x[0], x[3]))
rows = r_df[(r_df['Magnitude'] == x[0]) & (r_df['decision_type'] == x[4]) &
(r_df['judgment_type'] == x[5]) & (r_df['auction_type'] == x[6]) &
(r_df['valuation_type'] == x[7]) & (r_df['sample'] == x[1]) &
(r_df['no_resources'] == x[3])]
if not rows.empty:
row_all = rows[(rows['layer'] == 'nan')]
ref_area_tc = lambda_df.loc[cond & (lambda_df['layer'] == 'nan'), 'Area_TC']
ref_area_P = lambda_df.loc[cond & (lambda_df['layer'] == 'nan'), 'Area_P']
area_tc = trapz_int(y=list(row_all[row_all['cost_type'] == cost_type].cost[:T]),
x=list(row_all[row_all['cost_type'] == cost_type].t[:T]))
area_p = -trapz_int(y=list(row_all[row_all['cost_type'] == 'Under Supply Perc'].cost[:T]),
x=list(row_all[row_all['cost_type'] == 'Under Supply Perc'].t[:T]))
lambda_tc, lambda_p = compute_lambdas(float(ref_area_tc), float(ref_area_P), area_tc, area_p)
values = [x[0], cost_type, x[4], x[5], x[6], x[7], x[3], x[1], area_tc,
area_p, lambda_tc, lambda_p, (lambda_tc + lambda_p) / 2, 'nan']
lambda_df = lambda_df.append(dict(zip(columns, values)), ignore_index=True)
if deaggregate:
for l in range(x[2]):
row_lyr = rows[(rows['layer'] == l + 1)]
ref_area_tc = lambda_df.loc[cond & (lambda_df['layer'] == l + 1), 'Area_TC']
ref_area_P = lambda_df.loc[cond & (lambda_df['layer'] == l + 1), 'Area_P']
area_tc = trapz_int(y=list(row_lyr[row_lyr['cost_type'] == cost_type].cost[:T]),
x=list(row_lyr[row_lyr['cost_type'] == cost_type].t[:T]))
area_p = -trapz_int(y=list(row_lyr[row_lyr['cost_type'] == 'Under Supply Perc'].cost[:T]),
x=list(row_lyr[row_lyr['cost_type'] == 'Under Supply Perc'].t[:T]))
lambda_tc, lambda_p = compute_lambdas(float(ref_area_tc), float(ref_area_P),
area_tc, area_p)
values = [x[0], cost_type, x[4], x[5], x[6], x[7], x[3], x[1], area_tc,
area_p, lambda_tc, lambda_p, (lambda_tc + lambda_p) / 2, l + 1]
lambda_df = lambda_df.append(dict(zip(columns, values)), ignore_index=True)
else:
sys.exit('Error: No entry for %s %s %s m %d|resource %d, ...' \
% (x[4], x[5], x[6], x[0], x[3]))
if idx % (len(combinations + optimal_combinations) / 10 + 1) == 0:
update_progress(idx + 1, len(combinations + optimal_combinations))
update_progress(idx + 1, len(combinations + optimal_combinations))
return lambda_df | 5,330,284 |
def generate_raw_mantissa_extraction(optree):
""" generate an operation graph to extraction the significand field
of floating-point node <optree> (may be scalar or vector).
The implicit bit is not injected in this raw version """
if optree.precision.is_vector_format():
base_precision = optree.precision.get_scalar_format()
vector_size = optree.precision.get_vector_size()
int_precision = {
v2float32: v2int32,
v2float64: v2int64,
v4float32: v4int32,
v4float64: v4int64,
v8float32: v8int32,
v8float64: v8int64,
}[optree.precision]
else:
int_precision = optree.precision.get_integer_format()
base_precision = optree.precision
return generate_field_extraction(
optree,
int_precision,
0,
base_precision.get_field_size() - 1,
) | 5,330,285 |
def read_temp_f(p):
"""
read_temp_f
Returns the temperature from the probe in degrees farenheit
p = 1-Wire device file
"""
lines = read_temp_raw(p)
while lines[0].strip()[-3:] != 'YES':
time.sleep(0.2)
lines = read_temp_raw(p)
equals_pos = lines[1].find('t=')
if equals_pos != -1:
temp_string = lines[1][equals_pos + 2:]
temp_f = temp_string * 9.0 / 5.0 + 32.0
return temp_f | 5,330,286 |
def count_parameters(model, trainable_only=True, is_dict=False):
"""
Count number of parameters in a model or state dictionary
:param model:
:param trainable_only:
:param is_dict:
:return:
"""
if is_dict:
return sum(np.prod(list(model[k].size())) for k in model)
if trainable_only:
return sum(p.numel() for p in model.parameters() if p.requires_grad)
else:
return sum(p.numel() for p in model.parameters()) | 5,330,287 |
def send_mail(text, to_addr, account):
""" Send the email using msmtp. account is the account in .msmtprc
"""
# check_call does not take input in Python 3.4.
# But check_output does??
dummy = subprocess.check_output(
['msmtp', '-d', '-a', account, to_addr],
input=text.encode()
) | 5,330,288 |
def setup_conf(conf=cfg.CONF):
"""Setup the cfg for the status check utility.
Use separate setup_conf for the utility because there are many options
from the main config that do not apply during checks.
"""
common_config.register_common_config_options()
neutron_conf_base.register_core_common_config_opts(conf)
neutron_conf_service.register_service_opts(
neutron_conf_service.SERVICE_OPTS, cfg.CONF)
db_options.set_defaults(conf)
return conf | 5,330,289 |
def release_kind():
"""
Determine which release to make based on the files in the
changelog.
"""
# use min here as 'major' < 'minor' < 'patch'
return min(
'major' if 'breaking' in file.name else
'minor' if 'change' in file.name else
'patch'
for file in pathlib.Path('changelog.d').iterdir()
) | 5,330,290 |
def assert_array_equal(x: List[list], y: List[list]):
"""
usage.scipy: 1
"""
... | 5,330,291 |
def edit_text_file(filepath: str, regex_search_string: str, replace_string: str):
"""
This function is used to replace text inside a file.
:param filepath: the path where the file is located.
:param regex_search_string: string used in the regular expression to find what has to be replaced.
:param replace_string: the string which will replace all matches found using regex_search_string.
:return: None
:raise RuntimeError: if regex_search_string doesn't find any match.
"""
# open the file and read the content
with open(filepath, "r") as f:
text_file = f.read()
# find all matches
matches = re.finditer(regex_search_string, text_file)
if matches is None:
raise RuntimeError("No match has been found using the given regex_search_string!")
# replace all matches with replace_string
for match in matches:
text_file = text_file.replace(match.group(0), replace_string)
# overwrite the file
with open(filepath, "w") as f:
f.write(text_file)
return None | 5,330,292 |
def test_parent_dataset_links(some_interdeps):
"""
Test that we can set links and retrieve them when loading the dataset
"""
links = generate_some_links(3)
ds = DataSet()
for link in links:
link.head = ds.guid
ds.set_interdependencies(some_interdeps[1])
ds.parent_dataset_links = links[:2]
# setting it again/overwriting it should be okay
ds.parent_dataset_links = links
ds.mark_started()
match = re.escape('Can not set parent dataset links on a dataset '
'that has been started.')
with pytest.raises(RuntimeError, match=match):
ds.parent_dataset_links = links
ds.add_results([{'ps1': 1, 'ps2': 2}])
run_id = ds.run_id
ds_loaded = DataSet(run_id=run_id)
assert ds_loaded.parent_dataset_links == links | 5,330,293 |
def find_sums(sheet):
"""
Tallies the total assets and total liabilities for each person.
RETURNS:
Tuple of assets and liabilities.
"""
pos = 0
neg = 0
for row in sheet:
if row[-1] > 0:
pos += row[-1]
else:
neg += row[-1]
return pos, neg | 5,330,294 |
def read_links(title):
"""
Reads the links from a file in directory link_data.
Assumes the file exists, as well as the directory link_data
Args:
title: (Str) The title of the current wiki file to read
Returns a list of all the links in the wiki article with the name title
"""
with open(f"link_data/{title}", "r") as f:
read_data = f.read()
return read_data.split("\n")[:-1] | 5,330,295 |
def test_compile_model_from_params():
"""Tests that if build_fn returns an un-compiled model,
the __init__ parameters will be used to compile it
and that if build_fn returns a compiled model
it is not re-compiled.
"""
# Load data
data = load_boston()
X, y = data.data[:100], data.target[:100]
losses = ("mean_squared_error", "mean_absolute_error")
# build_fn that does not compile
def build_fn(compile_with_loss=None):
model = Sequential()
model.add(keras.layers.Dense(X.shape[1], input_shape=(X.shape[1],)))
model.add(keras.layers.Activation("relu"))
model.add(keras.layers.Dense(1))
model.add(keras.layers.Activation("linear"))
if compile_with_loss:
model.compile(loss=compile_with_loss)
return model
for loss in losses:
estimator = KerasRegressor(
model=build_fn,
loss=loss,
# compile_with_loss=None returns an un-compiled model
compile_with_loss=None,
)
estimator.fit(X, y)
assert estimator.model_.loss.__name__ == loss
for myloss in losses:
estimator = KerasRegressor(
model=build_fn,
loss="binary_crossentropy",
# compile_with_loss != None overrides loss
compile_with_loss=myloss,
)
estimator.fit(X, y)
assert estimator.model_.loss == myloss | 5,330,296 |
def writeFEvalsMaxSymbols(fevals, maxsymbols, isscientific=False):
"""Return the smallest string representation of a number.
This method is only concerned with the maximum number of significant
digits.
Two alternatives:
1) modified scientific notation (without the trailing + and zero in
the exponent)
2) float notation
:returns: string representation of a number of function evaluations
or ERT.
"""
#Compared to writeFEvals2?
#Printf:
# %[flags][width][.precision][length]specifier
assert not numpy.isnan(fevals)
if numpy.isinf(fevals):
return r'$\infty$'
#repr1 is the alternative scientific notation
#repr2 is the full notation but with a number of significant digits given
#by the variable precision.
# modified scientific notation:
#smallest representation of the decimal part
#drop + and starting zeros of the exponent part
repr1 = (('%.' + str(maxsymbols) + 'e') % fevals)
size1 = len(repr1)
tmp = repr1.split('e', 1)
tmp2 = tmp[-1].lstrip('+-0')
if float(tmp[-1]) < 0:
tmp2 = '-' + tmp2
tmp[-1] = tmp2
remainingsymbols = max(maxsymbols - len(tmp2) - 2, 0)
tmp[0] = (('%.' + str(remainingsymbols) + 'f') % float(tmp[0]))
repr1 = 'e'.join(tmp)
#len(repr1) <= maxsymbols is not always the case but should be most usual
tmp = '%.0f' % fevals
remainingsymbols = max(maxsymbols - len(tmp), 0)
repr2 = (('%.' + str(remainingsymbols) + 'f') % fevals)
tmp = repr2.split('.', 1)
if len(tmp) > 1:
tmp[-1] = tmp[-1].rstrip('0')
repr2 = '.'.join(tmp)
repr2 = repr2.rstrip('.')
#set_trace()
if len(repr1)-repr1.count('.') < len(repr2)-repr2.count('.') or isscientific:
return repr1
#tmp1 = '%4.0f' % bestalgdata[-1]
#tmp2 = ('%2.2g' % bestalgdata[-1]).split('e', 1)
#if len(tmp2) > 1:
# tmp2[-1] = tmp2[-1].lstrip('+0')
# tmp2 = 'e'.join(tmp2)
# tmp = tmp1
# if len(tmp1) >= len(tmp2):
# tmp = tmp2
# curline.append(r'\multicolumn{2}{c|}{%s}' % tmp)
return repr2 | 5,330,297 |
def closedcone(r=1, h=5, bp=[0,0,0], sampH=360, sampV=50, fcirc=20):
"""
Returns parametrization of a closed cone with radius 'r' and height 'h at
basepoint (bpx,bpy,bpz), where 'sampH' and 'sampV' specify the amount of
samples used horizontally, i.e. for circles, and vertically, i.e.
for height, and 'fcirc' specifies the amount
of circles that fill the bottom of the cone with radius 'r',
The base point is in the cones's center at the bottom.
The default values are 1, 5, (0,0,0), 360 and 50 for the radius, center,
and amount of horizontal and vertical samples, respectively.
"""
bpx, bpy, bpz = bp
theta0 = np.linspace(0, 2*np.pi, sampH)
z = np.linspace(bpz, bpz+h, sampV)
theta, z = np.meshgrid(theta0, z)
r = np.linspace(r, 0, sampV)
theta, r = np.meshgrid(theta0, r)
x = r * np.cos(theta) - bpx
y = r * np.sin(theta) - bpy
xcirc, ycirc, zcirc = filledcircle(r=r,c=[bpx,bpy,bpz], sampH=sampH,
fcirc=fcirc)
x = np.append(x,xcirc,0)
y = np.append(y,ycirc,0)
z = np.append(z,zcirc,0)
return x, y, z | 5,330,298 |
def E_lndetW_Wishart(nu,V):
"""
mean of log determinant of precision matrix over Wishart <lndet(W)>
input
nu [float] : dof parameter of Wichart distribution
V [ndarray, shape (D x D)] : base matrix of Wishart distribution
"""
if nu < len(V) + 1:
raise ValueError, "dof parameter nu must larger than len(V)"
D = len(V)
E = D*np.log(2.0) - np.log(det(V)) + \
digamma(np.arange(nu+1-D,nu+1)*0.5).sum()
return E | 5,330,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.