content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def convolve_cbvs(sectors=np.arange(1,14,1)):
"""
Bins the co-trending basis vectors into FFI times;
Calls download_cbvs to get filenames
Input
-----
type(sectors) == list
"""
# Gets the cutout for a target in the CVZ
coord = SkyCoord('04:35:50.330 -64:01:37.33', unit=(u.hourangle, u.deg))
sector_table = Tesscut.get_sectors(coord)
for sector in sectors:
files = download_cbvs(int(sector))
manifest = Tesscut.download_cutouts(coord, 31, sector = sector)
cutout = fits.open(manifest['Local Path'][0])
time = cutout[1].data['TIME'] - cutout[1].data['TIMECORR']
for c in trange(len(files)):
cbv = fits.open(files[c])
camera = cbv[1].header['CAMERA']
ccd = cbv[1].header['CCD']
cbv_time = cbv[1].data['Time']
new_fn = './metadata/s{0:04d}/cbv_components_s{0:04d}_{1:04d}_{2:04d}.txt'.format(sector, camera, ccd)
convolved = np.zeros((len(time), 16))
for i in range(len(time)):
g = np.argmin( np.abs(time[i] - cbv_time) )
for j in range(16):
index = 'VECTOR_{0}'.format(j+1)
cads = np.arange(g-7, g+8, 1)
convolved[i,j] = np.mean(cbv[1].data[index][cads])
np.savetxt(new_fn, convolved)
| 18,100
|
def _format_param(name, optimizer, param):
"""Return correctly formatted lr/momentum for each param group."""
if isinstance(param, (list, tuple)):
if len(param) != len(optimizer.param_groups):
raise ValueError("expected {} values for {}, got {}".format(
len(optimizer.param_groups), name, len(param)))
return param
else:
return [param] * len(optimizer.param_groups)
| 18,101
|
def run_test(d):
"""
Run the gaussian test with dimension d
"""
######### Problem Specification
# Data generation parameters
prior_mu_z = np.zeros(d, dtype=np.float32) # Prior mean
prior_sigma_z = np.eye(d, dtype=np.float32) # Prior covariance matrix
# True model parameters
num_range = np.arange(-(d-1)/2, (d+1)/2, dtype=np.float32)
t_delta = num_range / 5
if d == 1:
t_sigma = np.ones(1)
else:
# Allow sigma to range from 0.1 to 1
t_sigma = 36/(10*(d-1)**2) * num_range**2 + 0.1
######### Variable Initialization
# Initial model parameters - same across all methods
init_delta = prior_mu_z.copy()
init_log_sigma = 3 * np.ones(d)
# Initial HVAE variational parameters
init_T = 5.
init_eps = 0.005 * np.ones(d)
max_eps = params['max_eps'] * np.ones(d)
init_logit_eps = np.log(init_eps/(max_eps - init_eps))
init_log_T_0 = np.log(init_T - 1)
# Initial NF variational parameters
init_u_pre_reparam = scipy.stats.truncnorm.rvs(-2, 2, scale=0.1, size=d)
init_w = scipy.stats.truncnorm.rvs(-2, 2, scale=0.1, size=d)
init_b = 0.1
# Initial VAE parameters
init_mu_z = prior_mu_z.copy()
init_log_sigma_z = np.ones(d)
######### Set up models
HVAE_model_1 = HVAE(
['delta', 'log_sigma', 'logit_eps', 'log_T_0'],
[init_delta, init_log_sigma, init_logit_eps, init_log_T_0],
'HVAE_1', d, params['HVAE_K_1'])
HVAE_model_2 = HVAE(
['delta', 'log_sigma', 'logit_eps', 'log_T_0'],
[init_delta, init_log_sigma, init_logit_eps, init_log_T_0],
'HVAE_2', d, params['HVAE_K_2'])
HVAE_model_notemp_1 = HVAE(
['delta', 'log_sigma', 'logit_eps'],
[init_delta, init_log_sigma, init_logit_eps],
'HVAE_notemp_1', d, params['HVAE_K_1'])
HVAE_model_notemp_2 = HVAE(
['delta', 'log_sigma', 'logit_eps'],
[init_delta, init_log_sigma, init_logit_eps],
'HVAE_notemp_2', d, params['HVAE_K_2'])
NF_model_1 = NF(
['delta', 'log_sigma', 'u_pre_reparam', 'w', 'b'],
[init_delta, init_log_sigma, init_u_pre_reparam, init_w, init_b],
'NF_1', d, params['NF_K_1'])
NF_model_2 = NF(
['delta', 'log_sigma', 'u_pre_reparam', 'w', 'b'],
[init_delta, init_log_sigma, init_u_pre_reparam, init_w, init_b],
'NF_2', d, params['NF_K_2'])
VB_model = VB(['delta', 'log_sigma', 'mu_z', 'log_sigma_z'],
[init_delta, init_log_sigma, init_mu_z, init_log_sigma_z], 'VB', d)
model_list = [HVAE_model_1, HVAE_model_2, HVAE_model_notemp_1,
HVAE_model_notemp_2, NF_model_1, NF_model_2, VB_model]
######### Generate Training Data & Save - One for each test
train_data_list = []
for i in range(params['n_tests']):
z = np.random.multivariate_normal(prior_mu_z, prior_sigma_z)
x = np.random.multivariate_normal(z + t_delta, np.diag(t_sigma**2),
size=params['n_data'])
train_data_list.append(x)
# Folder should have already been created in the initializations
data_path = os.path.join('save', str(d), 'train_data.p')
pickle.dump(train_data_list, open(data_path, 'wb'))
######### Train models
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# Store the final parameter values for all test runs in this dictionary
final_params = {}
for m in model_list:
final_values = []
for i in range(params['n_tests']):
(delta, sigma) = m.train(sess, train_data_list[i], i)
final_values.append((delta, sigma))
final_params[m.model_name] = final_values.copy()
######### Test models using difference between parameters
param_diffs = {}
for m in model_list:
diffs = []
for i in range(params['n_tests']):
delta = final_params[m.model_name][i][0]
sigma = final_params[m.model_name][i][1]
delta_diff = np.sum((delta - t_delta)**2)
sigma_diff = np.sum((sigma - t_sigma)**2)
diffs.append((delta_diff, sigma_diff))
param_diffs[m.model_name] = diffs.copy()
# Save parameter differences in a pickle file
diff_path = os.path.join('save', str(d), 'all_diffs.p')
pickle.dump(param_diffs, open(diff_path, 'wb'))
| 18,102
|
def coh_overflow_test():
"""
Test whether very very opaque layers will break the coherent program
"""
n_list = [ 1., 2+.1j, 1+3j, 4., 5.]
d_list = [inf, 50, 1e5, 50, inf]
lam = 200
alpha_d = imag(n_list[2]) * 4 * pi * d_list[2] / lam
print('Very opaque layer: Calculation should involve e^(-', alpha_d, ')!')
data = coh_tmm('s', n_list, d_list, 0, lam)
n_list2 = n_list[0:3]
d_list2 = d_list[0:3]
d_list2[-1] = inf
data2 = coh_tmm('s', n_list2, d_list2, 0, lam)
print('First entries of the following two lists should agree:')
print(data['vw_list'])
print(data2['vw_list'])
| 18,103
|
def ShowZallocs(cmd_args=None):
""" Prints all allocations in the zallocations table
"""
if unsigned(kern.globals.zallocations) == 0:
print "zallocations array not initialized!"
return
print '{0: <5s} {1: <18s} {2: <5s} {3: <15s}'.format('INDEX','ADDRESS','TRACE','SIZE')
current_index = 0
max_zallocation = unsigned(kern.globals.zleak_alloc_buckets)
allocation_count = 0
while current_index < max_zallocation:
current_zalloc = kern.globals.zallocations[current_index]
if int(current_zalloc.za_element) != 0:
print '{0: <5d} {1: <#018x} {2: <5d} {3: <15d}'.format(current_index, current_zalloc.za_element, current_zalloc.za_trace_index, unsigned(current_zalloc.za_size))
allocation_count += 1
current_index += 1
print 'Total Allocations: {0: <d}'.format(allocation_count)
| 18,104
|
def test_field_extension_post(app_client, load_test_data):
"""Test POST search with included and excluded fields (fields extension)"""
body = {
"fields": {
"exclude": ["datetime"],
"include": ["properties.pers:phi", "properties.gsd"],
}
}
resp = app_client.post("/search", json=body)
resp_json = resp.json()
assert not set(resp_json["features"][0]["properties"]) - {"gsd", "pers:phi"}
| 18,105
|
def write(text, into=None, session_id=None):
"""
:param text: The text to be written.
:type text: one of str, unicode
:param into: The element to write into.
:type into: one of str, unicode, :py:class:`HTMLElement`, \
:py:class:`selenium.webdriver.remote.webelement.WebElement`, :py:class:`Alert`
Types the given text into the active window. If parameter 'into' is given,
writes the text into the text field or element identified by that parameter.
Common examples of 'write' are::
write("Hello World!")
write("user12345", into="Username:")
write("Michael", into=Alert("Please enter your name"))
"""
_get_api_impl(session_id).write_impl(text, into)
| 18,106
|
def as_binary_vector(labels, num_classes):
"""
Construct binary label vector given a list of label indices.
Args:
labels (list): The input label list.
num_classes (int): Number of classes of the label vector.
Returns:
labels (numpy array): the resulting binary vector.
"""
label_arr = np.zeros((num_classes,))
for lbl in set(labels):
label_arr[lbl] = 1.0
return label_arr
| 18,107
|
def evaluation_lda(model, data, dictionary, corpus):
""" Compute coherence score and perplexity.
params:
model: lda model
data: list of lists (tokenized)
dictionary
corpus
returns: coherence score, perplexity score
"""
coherence_model_lda = CoherenceModel(model=model, texts=data, dictionary=dictionary, coherence='c_v')
coherence = coherence_model_lda.get_coherence()
perplexity = model.log_perplexity(corpus)
return coherence, perplexity
| 18,108
|
def get_map_with_square(map_info, square):
"""
build string of the map with its top left
bigger square without obstacle full
"""
map_string = ""
x_indices = list(range(square["x"], square["x"] + square["size"]))
y_indices = list(range(square["y"], square["y"] + square["size"]))
M = map_info["matrix"]
for y in range(map_info["line_num"]):
if map_string:
map_string += '\n'
for x in range(map_info["line_len"]):
if M[y][x]:
map_string += map_info["obstacle_char"]
elif x in x_indices and y in y_indices:
map_string += map_info["full_char"]
else:
map_string += map_info["empty_char"]
return map_string
| 18,109
|
def bgr_colormap():
"""
In cdict, the first column is interpolated between 0.0 & 1.0 - this indicates the value to be plotted
the second column specifies how interpolation should be done from below
the third column specifies how interpolation should be done from above
if the second column does not equal the third, then there will be a break in the colors
"""
darkness = 0.85 #0 is black, 1 is white
cdict = {'red': ((0.0, 0.0, 0.0),
(0.5, darkness, darkness),
(1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.0),
(0.5, darkness, darkness),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 1.0, 1.0),
(0.5, darkness, darkness),
(1.0, 0.0, 0.0))
}
return LinearSegmentedColormap("bgr", cdict)
| 18,110
|
def test_construct_h6_tag(attributes):
"""Test for validating whether the h6 tag is constructed correctly or not.
"""
h6_ = H6(**attributes)
assert h6_.construct() == h6.render(attributes)
| 18,111
|
def autovalidation_from_docstring():
"""
Test validation using JsonSchema
The default payload is invalid, try it, then change the age to a
valid integer and try again
---
tags:
- officer
parameters:
- name: body
in: body
required: true
schema:
id: Officer
required:
- name
- age
properties:
name:
type: string
description: The officer's name.
default: "James T. Kirk"
age:
type: integer
description: The officer's age (should be integer)
default: "138"
tags:
type: array
description: optional list of tags
default: ["starfleet", "captain", "enterprise", "dead"]
items:
type: string
responses:
200:
description: A single officer item
schema:
$ref: '#/definitions/Officer'
"""
data = request.json
return jsonify(data)
| 18,112
|
def get_vlan_groups(url, headers):
"""
Get dictionary of existing vlan groups
"""
vlan_groups = []
api_url = f"{url}/api/ipam/vlan-groups/"
response = requests.request("GET", api_url, headers=headers)
all_vlan_groups = response.json()["results"]
for vlan_group in all_vlan_groups:
vlan_group_info = dict()
vlan_group_info["name"] = vlan_group["name"]
vlan_group_info["state"] = "present"
if vlan_group["site"] is not None:
vlan_group_info["site"] = vlan_group["site"]["name"]
else:
vlan_group_info["site"] = None
vlan_groups.append(vlan_group_info)
return vlan_groups
| 18,113
|
def highest_price():
""" Finding the ten most expensive items per unit price in the northwind DB """
ten_highest_query = """
SELECT ProductName
FROM Product
GROUP BY UnitPrice
ORDER BY UnitPrice DESC
LIMIT 10; """
ten_highest = cursor.execute(ten_highest_query).fetchall()
print(f' The ten most expensive items per unit price are: {ten_highest}')
| 18,114
|
def load_viewpoints(viewpoint_file_list):
"""load multiple viewpoints file from given lists
Args:
viewpoint_file_list: a list contains obj path
a wrapper for load_viewpoint function
Returns:
return a generator contains multiple generators
which contains obj pathes
"""
if isinstance(viewpoint_file_list, str):
vp_file_list = [viewpoint_file_list]
try:
vp_file_list = iter(viewpoint_file_list)
except TypeError:
print("viewpoint_file_list is not an iterable object")
for vp_file in vp_file_list:
yield load_viewpoint(vp_file)
| 18,115
|
def getLastReading(session: Session) -> Reading:
"""
Finds the last reading associated with the session
NB: Always returns a Reading, because every Session has at least 1 Reading
Args:
session (Session): A Session object representing the session record in the database
Returns:
datetime: Time object of last reading
"""
return Reading.objects.filter(session_id=session.pk).order_by("t").reverse()[:1].get()
| 18,116
|
def process_outlier(data, population_set):
"""
Parameters
----------
data
population_set
Returns
-------
"""
content = list()
for date in set(map(lambda x: x['date'], data)):
tmp_item = {
"date": date,
"value": list()
}
for value in filter(lambda d: d["date"] == date, data):
tmp_value = deepcopy(value)
del tmp_value["date"]
if population_set:
tmp_value["rate"] = round(
tmp_value["value"] / population_set[tmp_value["age"]] *
RATE_PER_POPULATION_FACTOR,
1
)
tmp_item["value"].append(tmp_value)
content.append(tmp_item)
return deepcopy(content)
| 18,117
|
def valid_http(http_success=HTTPOk, # type: Union[Type[HTTPSuccessful], Type[HTTPRedirection]]
http_kwargs=None, # type: Optional[ParamsType]
detail="", # type: Optional[Str]
content=None, # type: Optional[JSON]
content_type=CONTENT_TYPE_JSON, # type: Optional[Str]
): # type: (...) -> Union[HTTPSuccessful, HTTPRedirection]
"""
Returns successful HTTP with standardized information formatted with content type. (see :func:`raise_http` for HTTP
error calls)
:param http_success: any derived class from *valid* HTTP codes (<400) (default: `HTTPOk`)
:param http_kwargs: additional keyword arguments to pass to `http_success` when called
:param detail: additional message information (default: empty)
:param content: json formatted content to include
:param content_type: format in which to return the exception (one of `magpie.common.SUPPORTED_ACCEPT_TYPES`)
:returns: formatted successful response with additional details and HTTP code
"""
global RAISE_RECURSIVE_SAFEGUARD_COUNT # pylint: disable=W0603
content = dict() if content is None else content
detail = repr(detail) if not isinstance(detail, six.string_types) else detail
content_type = CONTENT_TYPE_JSON if content_type == CONTENT_TYPE_ANY else content_type
http_code, detail, content = validate_params(http_success, [HTTPSuccessful, HTTPRedirection],
detail, content, content_type)
json_body = format_content_json_str(http_code, detail, content, content_type)
resp = generate_response_http_format(http_success, http_kwargs, json_body, content_type=content_type)
RAISE_RECURSIVE_SAFEGUARD_COUNT = 0 # reset counter for future calls (don't accumulate for different requests)
return resp
| 18,118
|
async def unregister(lrrbot, conn, event, respond_to, channel):
"""
Command: !live unregister CHANNEL
Unregister CHANNEL as a fanstreamer channel.
"""
try:
await twitch.unfollow_channel(channel)
conn.privmsg(respond_to, "Channel '%s' removed from the fanstreamer list." % channel)
except urllib.error.HTTPError:
conn.privmsg(respond_to, "'%s' isn't a Twitch channel." % channel)
| 18,119
|
def button(update, context):
"""
Reply button when displayed options.
:param update: update object of chatbot
:param context: context of conversation
"""
query = update.callback_query
entity = query.data
entity_type = context.chat_data["entity_type"]
local_context = {
"intent": context.chat_data["intent"],
"entities": [{"value": entity, "type": entity_type}]
}
data = {
"user": context.user_data["user"],
"input": {
"user_input": None,
"context": local_context
}
}
logger.info("[BUTTON] >>>>> SentData %s", data)
resp = chat_with_system(data)
logger.info("[BUTTON] <<<<< ReceivedData %s", data)
if resp["answer"]["answer_type"] == "text":
context.chat_data["intent"] = None
context.chat_data["entities"] = []
query.edit_message_text(text=resp["answer"]["text"])
| 18,120
|
def operating_cf(cf_df):
"""Checks if the latest reported OCF (Cashflow) is positive.
Explanation of OCF: https://www.investopedia.com/terms/o/operatingcashflow.asp
cf_df = Cashflow Statement of the specified company
"""
cf = cf_df.iloc[cf_df.index.get_loc("Total Cash From Operating Activities"),0]
if (cf > 0):
return True
else:
return False
| 18,121
|
def generate_performance_scores(query_dataset, target_variable, candidate_datasets, params):
"""Generates all the performance scores.
"""
performance_scores = list()
# params
algorithm = params['regression_algorithm']
cluster_execution = params['cluster']
hdfs_address = params['hdfs_address']
hdfs_user = params['hdfs_user']
inner_join = params['inner_join']
# HDFS Client
hdfs_client = None
if cluster_execution:
# time.sleep(np.random.randint(1, 120)) # avoid opening multiple sockets at the same time
hdfs_client = InsecureClient(hdfs_address, user=hdfs_user)
# reading query dataset
query_data_str = read_file(query_dataset, hdfs_client, cluster_execution)
query_data = pd.read_csv(StringIO(query_data_str))
query_data.set_index(
'key-for-ranking',
drop=True,
inplace=True
)
# build model on query data only
_, scores_before = get_performance_scores(
query_data,
target_variable,
algorithm,
False
)
for candidate_dataset in candidate_datasets:
# reading candidate dataset
candidate_data_str = read_file(candidate_dataset, hdfs_client, cluster_execution)
candidate_data = pd.read_csv(StringIO(candidate_data_str))
candidate_data.set_index(
'key-for-ranking',
drop=True,
inplace=True
)
# join dataset
join_ = query_data.join(
candidate_data,
how='left',
rsuffix='_r'
)
if inner_join:
join_.dropna(inplace=True)
# build model on joined data
# print('[INFO] Generating performance scores for query dataset %s and candidate dataset %s ...' % (query_dataset, candidate_dataset))
imputation_strategy, scores_after = get_performance_scores(
join_,
target_variable,
algorithm,
not(inner_join)
)
# print('[INFO] Performance scores for query dataset %s and candidate dataset %s done!' % (query_dataset, candidate_dataset))
performance_scores.append(
generate_output_performance_data(
query_dataset=query_dataset,
target=target_variable,
candidate_dataset=candidate_dataset,
scores_before=scores_before,
scores_after=scores_after,
imputation_strategy=imputation_strategy
)
)
return performance_scores
| 18,122
|
def correct_sparameters_twelve_term(sparameters_complex,twelve_term_correction,reciprocal=True):
"""Applies the twelve term correction to sparameters and returns a new sparameter list.
The sparameters should be a list of [frequency, S11, S21, S12, S22] where S terms are complex numbers.
The twelve term correction should be a list of
[frequency,Edf,Esf,Erf,Exf,Elf,Etf,Edr,Esr,Err,Exr,Elr,Etr] where Edf, etc are complex numbers"""
if len(sparameters_complex) != len(twelve_term_correction):
raise TypeError("s parameter and twelve term correction must be the same length")
sparameter_out=[]
phase_last=0.
for index,row in enumerate(sparameters_complex):
frequency=row[0]
Sm=np.matrix(row[1:]).reshape((2,2))
[frequency,Edf,Esf,Erf,Exf,Elf,Etf,Edr,Esr,Err,Exr,Elr,Etr]=twelve_term_correction[index]
# frequency Edf Esf Erf Exf Elf Etf Edr Esr Err Exr Elr Etr.
# print [frequency,Edf,Esf,Erf,Exf,Elf,Etf,Edr,Esr,Err,Exr,Elr,Etr]
# print Sm[0,0]
D =(1+(Sm[0,0]-Edf)*(Esf/Erf))*(1+(Sm[1,1]-Edr)*(Esr/Err))-(Sm[0,1]*Sm[1,0]*Elf*Elr)/(Etf*Etr)
# print D
S11 =(Sm[0,0]-Edf)/(D*Erf)*(1+(Sm[1,1]-Edr)*(Esr/Err))-(Sm[0,1]*Sm[1,0]*Elf)/(D*Etf*Etr)
S21 =((Sm[1,0]-Exr)/(D*Etf))*(1+(Sm[1,1]-Edr)*(Esr-Elf)/Err)
S12 = ((Sm[0,1]-Exf)/(D*Etr))*(1+(Sm[0,0]-Edf)*(Esf-Elr)/Erf)
S22 = (Sm[1,1]-Edr)/(D*Err)*(1+(Sm[0,0]-Edf)*(Esf/Erf))-(Sm[0,1]*Sm[1,0]*Elr)/(D*Etf*Etr)
# S12 and S21 are averaged together in a weird way that makes phase continuous
geometric_mean=cmath.sqrt(S21*S12)
root_select=1
phase_new=cmath.phase(geometric_mean)
# if the phase jumps by >180 but less than 270, then pick the other root
if abs(phase_new-phase_last)>math.pi/2 and abs(phase_new-phase_last)<3*math.pi/2:
root_select=-1
mean_S12_S21=root_select*cmath.sqrt(S21*S12)
if reciprocal:
sparameter_out.append([frequency,S11,mean_S12_S21,mean_S12_S21,S22])
else:
sparameter_out.append([frequency,S11,S21,S12,S22])
phase_last=cmath.phase(mean_S12_S21)
return sparameter_out
| 18,123
|
def api_activity_logs(request):
"""Test utility."""
auth = get_auth(request)
obj = ActivityLogs(auth=auth)
check_apiobj(authobj=auth, apiobj=obj)
return obj
| 18,124
|
def RNAshapes_parser(lines=None,order=True):
"""
Returns a list containing tuples of (sequence,pairs object,energy) for
every sequence
[[Seq,Pairs,Ene],[Seq,Pairs,Ene],...]
Structures will be ordered by the structure energy by default, of ordered
isnt desired set order to False
"""
result = lineParser(lines)
if order:
result = order_structs(result)
return result
| 18,125
|
def get_case_strategy( # pylint: disable=too-many-locals
draw: Callable,
operation: APIOperation,
hooks: Optional[HookDispatcher] = None,
data_generation_method: DataGenerationMethod = DataGenerationMethod.default(),
path_parameters: Union[NotSet, Dict[str, Any]] = NOT_SET,
headers: Union[NotSet, Dict[str, Any]] = NOT_SET,
cookies: Union[NotSet, Dict[str, Any]] = NOT_SET,
query: Union[NotSet, Dict[str, Any]] = NOT_SET,
body: Any = NOT_SET,
) -> Any:
"""A strategy that creates `Case` instances.
Explicit `path_parameters`, `headers`, `cookies`, `query`, `body` arguments will be used in the resulting `Case`
object.
If such explicit parameters are composite (not `body`) and don't provide the whole set of parameters for that
location, then we generate what is missing and merge these two parts. Note that if parameters are optional, then
they may remain absent.
The primary purpose of this behavior is to prevent sending incomplete explicit examples by generating missing parts
as it works with `body`.
"""
to_strategy = DATA_GENERATION_METHOD_TO_STRATEGY_FACTORY[data_generation_method]
context = HookContext(operation)
with detect_invalid_schema(operation):
path_parameters_value = get_parameters_value(
path_parameters, "path", draw, operation, context, hooks, to_strategy
)
headers_value = get_parameters_value(headers, "header", draw, operation, context, hooks, to_strategy)
cookies_value = get_parameters_value(cookies, "cookie", draw, operation, context, hooks, to_strategy)
query_value = get_parameters_value(query, "query", draw, operation, context, hooks, to_strategy)
media_type = None
if body is NOT_SET:
if operation.body:
parameter = draw(st.sampled_from(operation.body.items))
strategy = _get_body_strategy(parameter, to_strategy, operation)
strategy = apply_hooks(operation, context, hooks, strategy, "body")
media_type = parameter.media_type
body = draw(strategy)
else:
media_types = operation.get_request_payload_content_types() or ["application/json"]
# Take the first available media type.
# POSSIBLE IMPROVEMENT:
# - Test examples for each available media type on Open API 2.0;
# - On Open API 3.0, media types are explicit, and each example has it.
# We can pass `OpenAPIBody.media_type` here from the examples handling code.
media_type = media_types[0]
if operation.schema.validate_schema and operation.method.upper() == "GET" and operation.body:
raise InvalidSchema("Body parameters are defined for GET request.")
return Case(
operation=operation,
media_type=media_type,
path_parameters=path_parameters_value,
headers=CaseInsensitiveDict(headers_value) if headers_value is not None else headers_value,
cookies=cookies_value,
query=query_value,
body=body,
data_generation_method=data_generation_method,
)
| 18,126
|
def addi(imm_val, rs1):
"""
Adds the sign extended 12 bit immediate to register rs1.
Arithmetic overflow is ignored and the result is the
low 32 bits.
--ADDI rs, rs1, 0 is used to implement MV rd, rs1
"""
reg[rs1] = imm_val + int(reg[rs1])
| 18,127
|
def type_from_value(value, visitor=None, node=None):
"""Given a Value from resolving an annotation, return the type."""
ctx = _Context(visitor, node)
return _type_from_value(value, ctx)
| 18,128
|
def _accesslen(data) -> int:
"""This was inspired by the `default_collate` function.
https://github.com/pytorch/pytorch/blob/master/torch/utils/data/_utils/
"""
if isinstance(data, (tuple, list)):
item = data[0]
if not isinstance(item, (float, int, str)):
return len(item)
return len(data)
| 18,129
|
def createSkill(request, volunteer_id):
"""
Method to create skills and interests
:param request:
:param volunteer_id:
:return:
"""
if request.method == 'POST':
volunteer = Volunteer_User_Add_Ons.objects.get(pk=volunteer_id)
skills = request.POST.getlist('skills')
interests = request.POST.getlist('interests')
# call to create the skills
createInputToken(request, skills, 'Skill', volunteer_id)
# call to create the interests
createInputToken(request, interests, 'Interest', volunteer_id)
return HttpResponse('ok')
| 18,130
|
def analyticJacobian(robot : object, dq = 0.001, symbolic = False):
"""Using Homogeneous Transformation Matrices, this function computes Analytic Jacobian Matrix of a serial robot given joints positions in radians. Serial robot's kinematic parameters have to be set before using this function
Args:
robot (Serial): serial robot (this won't work with other type of robots)
dq (float, optional): step size for numerical derivative. Defaults to 0.001.
symbolic (bool, optional): used to calculate symbolic equations. Defaults to False.
Returns:
J (np.array): Inertial Analytic Jacobian Matrix (numerical)
J (SymPy Matrix): Inertial Analytic Jacobian Matrix (symbolical)
"""
# Calculate forward kinematics: f(q)
fkHTM = forwardHTM(robot, symbolic)
# Convert result into an Axis - Angle vector: x(q)
x = axisAngle(fkHTM[-1], symbolic)
if symbolic:
# Calculate Analytic Jacobian Matrix by differentiating Axis - Angle vector with SymPy functions
return nsimplify(trigsimp(x.jacobian(robot.qSymbolic)).evalf(), tolerance = 1e-10)
else:
# Get number of joints (generalized coordinates)
n = robot.jointsPositions.shape[0]
# Initializes jacobian matrix with zeros
J = np.zeros((6, n))
# Auxiliar variable to keep original joints positions
q = robot.jointsPositions.copy()
# Iterates through all colums (generalized coordinates)
for j in range(n):
# Set increment to current generalized coordinate: z[j] = q[j] + dq
robot.jointsPositions[j] += dq
# Calculate forward kinematics with step size: f(z) = f(q + dq)
f = forwardHTM(robot)
# Convert result into an Axis - Angle vector: X(q + dq)
X = axisAngle(f[-1])
# Calculate analytic jacobian matrix: [X(q + dq) - x(q)] / dq
J[: , j] = ((X - x) / dq).flatten()
# Eliminates step size by copying original values from auxiliar variable
robot.jointsPositions[:, :] = q
return J
| 18,131
|
def test_sharedmethod_reuse_on_subclasses():
"""
Regression test for an issue where sharedmethod would bind to one class
for all time, causing the same method not to work properly on other
subclasses of that class.
It has the same problem when the same sharedmethod is called on different
instances of some class as well.
"""
class AMeta(type):
def foo(cls):
return cls.x
class A:
x = 3
def __init__(self, x):
self.x = x
@sharedmethod
def foo(self):
return self.x
a1 = A(1)
a2 = A(2)
assert a1.foo() == 1
assert a2.foo() == 2
# Similar test now, but for multiple subclasses using the same sharedmethod
# as a classmethod
assert A.foo() == 3
class B(A):
x = 5
assert B.foo() == 5
| 18,132
|
def get_subtask_spec_factory_classes():
"""Return dictionary with all factory classes defined in files in this directory.
This file is excluded from the search."""
this_file = os.path.split(__file__)[-1]
directory = os.path.dirname(__file__)
exclude = [this_file, "subtask_spec_factory.py"]
factory_files = [f for f in os.listdir(directory)
if f.endswith(".py") and f not in exclude]
factory_classes = {}
for f in factory_files:
path = os.path.join(directory, f)
relative_import_string = "." + inspect.getmodulename(path)
module = import_module(relative_import_string, package=__package__)
for name in dir(module):
obj = getattr(module, name)
if inspect.isclass(obj):
if issubclass(obj, SubtaskSpecFactory):
factory_classes[name] = obj
return factory_classes
| 18,133
|
def test_countMatches():
"""Unit test for countMatches function. Checks output is as
expected for a variety of extreme cases."""
# create test image
ground_truth = np.zeros((20, 20))
ground_truth[4:10, 4:10] = 1
inferred = np.zeros((20, 20))
inferred[4:10, 4:6] = 1
inferred[4:10, 6:10] = 2
assert Segmentors.countMatches(inferred, ground_truth) ==\
({0.0: {0.0: 364}, 1.0: {1.0: 12}, 2.0: {1.0: 24}}, 3, 2)
inferred = np.zeros((20, 20))
inferred[4:10, 3:6] = 1
inferred[4:10, 6:10] = 2
assert Segmentors.countMatches(inferred, ground_truth) ==\
({0.0: {0.0: 358}, 1.0: {0.0: 6, 1.0: 12}, 2.0: {1.0: 24}}, 3, 2)
inferred = np.zeros((20, 20))
inferred[4:10, 3:6] = 1
inferred[4:10, 6:10] = 2
inferred[3:5, 3:6] = 3
assert Segmentors.countMatches(inferred, ground_truth) ==\
({0.0: {0.0: 355}, 3.0: {0.0: 4, 1.0: 2}, 2.0: {1.0: 24}, 1.0: {0.0: 5, 1.0: 10}}, 4, 2)
inferred = np.zeros((20, 20))
assert Segmentors.countMatches(inferred, ground_truth) == ({0.0: {0.0: 364, 1.0: 36}}, 1, 2)
inferred = np.zeros((20, 20))
inferred[1:19, 1:19] = 1
assert Segmentors.countMatches(inferred, ground_truth) ==\
({0.0: {0.0: 76}, 1.0: {0.0: 288, 1.0: 36}}, 2, 2)
| 18,134
|
def check_keyup_events(event, ship):
"""Respond to key release."""
if event.key == pygame.K_RIGHT:
ship.moving_right = False
elif event.key == pygame.K_LEFT:
ship.moving_left = False
elif event.key == pygame.K_UP:
ship.moving_up = False
elif event.key == pygame.K_DOWN:
ship.moving_down = False
| 18,135
|
def triu_indices_from(arr, k=0):
"""
Returns the indices for the upper-triangle of `arr`.
Args:
arr (Union[Tensor, list, tuple]): 2-dimensional array.
k (int, optional): Diagonal offset, default is 0.
Returns:
triu_indices_from, tuple of 2 tensor, shape(N)
Indices for the upper-triangle of `arr`.
Raises:
TypeError: If `arr` cannot be converted to tensor, or `k` is not a number.
ValueError: If `arr` cannot be converted to a 2-dimensional tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore.numpy as np
>>> tensor = np.ones((3,3))
>>> print(np.triu_indices_from(tensor))
(Tensor(shape=[6], dtype=Int32, value= [0, 0, 0, 1, 1, 2]),
Tensor(shape=[6], dtype=Int32, value= [0, 1, 2, 1, 2, 2]))
"""
arr = asarray(arr)
if arr.ndim != 2:
_raise_value_error("input array must be 2-d")
return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
| 18,136
|
def is_debug():
"""Return true if xylem is set to debug console output."""
global _debug
return _debug
| 18,137
|
def import_metrics(jsondoc):
"""Update metrics DB from `dict` data structure.
The input data structure is expected to be the one produced by SNMP
simulator's command responder `fulljson` reporting module.
.. code-block:: python
{
'format': 'jsondoc',
'version': 1,
'host': '{hostname}',
'watch_dir': {dir},
'started': '{timestamp}',
'first_update': '{timestamp}',
'last_update': '{timestamp}',
'executables': [
{
'executable': '{path}',
'runtime': '{seconds}',
'memory': {MB},
'cpu': {ms},
'files': 0,
'exits': 0,
'changes': 0,
'endpoints': {
'udpv4': [
'127.0.0.1:161'
],
'udpv6': [
'::1:161'
]
],
'console': [
{
'timestamp': 0,
'text': '{text}'
}
]
}
]
}
"""
old_times = int(time.time() - MAX_CONSOLE_PAGE_AGE)
timestamp = datetime.datetime.utcfromtimestamp(
jsondoc['started'])
supervisor_model = models.Supervisor(
hostname=jsondoc['host'],
watch_dir=jsondoc['watch_dir'],
started=timestamp
)
supervisor_model = db.session.merge(supervisor_model)
autoincrement(supervisor_model, models.Supervisor)
for executable in jsondoc['executables']:
process_model = models.Process(
path=executable['executable'],
supervisor_id=supervisor_model.id
)
process_model = db.session.merge(process_model)
autoincrement(process_model, models.Process)
process_model.runtime = process_model.runtime or 0
process_model.runtime += executable['runtime']
process_model.memory = executable['memory']
process_model.cpu = process_model.cpu or 0
process_model.cpu += executable['cpu']
process_model.files = executable['files']
process_model.exits = process_model.exits or 0
process_model.exits += executable['exits']
process_model.changes = process_model.changes or 0
process_model.changes += executable['changes']
process_model.update_interval = (
jsondoc['last_update'] - jsondoc['first_update'])
timestamp = datetime.datetime.utcfromtimestamp(
jsondoc['last_update'])
process_model.last_update = timestamp
query = (
models.Endpoint
.query
.filter_by(process_id=process_model.id))
existing_endpoints = set(
(x.protocol, x.address) for x in query.all())
reported_endpoints = set()
for protocol, addresses in executable['endpoints'].items():
for address in addresses:
reported_endpoints.add((protocol, address))
new_endpoints = reported_endpoints.difference(existing_endpoints)
for protocol, address in new_endpoints:
endpoint_model = models.Endpoint(
protocol=protocol,
address=address,
process_id=process_model.id
)
autoincrement(endpoint_model, models.Endpoint)
db.session.add(endpoint_model)
removed_endpoints = existing_endpoints.difference(reported_endpoints)
for protocol, address in removed_endpoints:
query = (
db.session
.query(models.Endpoint)
.filter_by(protocol=protocol)
.filter_by(address=address)
.filter_by(process_id=process_model.id))
query.delete()
query = (
db.session
.query(models.ConsolePage)
.filter(models.ConsolePage.process_id == process_model.id)
.filter(models.ConsolePage.timestamp < old_times)
)
query.delete()
for console_page in executable['console']:
timestamp = datetime.datetime.utcfromtimestamp(
console_page['timestamp'])
console_page_model = models.ConsolePage(
timestamp=timestamp,
text=console_page['text'],
process_id=process_model.id
)
autoincrement(console_page_model, models.ConsolePage)
db.session.add(console_page_model)
db.session.commit()
| 18,138
|
def func(var):
"""Function"""
return var + 1
| 18,139
|
def register_user(username, password):
"""
Hashes the given password and registers a new user in the database.
"""
hashed_password = bcrypt.hash(password)
app_user = AppUser(username=username.lower(), password=hashed_password)
db.session.add(app_user)
db.session.commit()
| 18,140
|
def flanking_regions_fasta_deletion(genome, dataframe, flanking_region_size):
"""
Makes batch processing possible, pulls down small region
of genome for which to design primers around.
This is based on the chromosome and position of input file.
Each Fasta record will contain:
>Sample_Gene_chr:posStart-posStop
Seq of flanking region upstream of SV + seq of flanking region downstream of SV
Args:
genome (list): genome list of tuples (header, seq).
dataframe (pandas object): dataframe with sample info.
flanking_region_size (int): length of sequence upstream and downstream of
input coordinate position to pull as sequence to design primers around.
"""
output = []
for headers, seqs in genome:
chrm = str(headers)
seq = str(seqs)
for gene, sample, chrom, start, stop in zip(dataframe.Gene, dataframe.Sample, dataframe.Chr,
dataframe.PosStart, dataframe.PosStop):
if str(chrom) == chrm:
header = str(str(sample)+"_"+str(gene)+"_"+\
str(chrom)+":"+str(start)+"-"+str(stop)+"__")
flank_seq = seq[int(start)-int(flanking_region_size):int(start)+1]\
+seq[int(stop):(int(stop)+int(flanking_region_size))]
output.append((header, flank_seq.upper()))
return output
| 18,141
|
def numpy_episodes(
train_dir, test_dir, shape, loader, preprocess_fn=None, scan_every=10,
num_chunks=None, **kwargs):
"""Read sequences stored as compressed Numpy files as a TensorFlow dataset.
Args:
train_dir: Directory containing NPZ files of the training dataset.
test_dir: Directory containing NPZ files of the testing dataset.
shape: Tuple of batch size and chunk length for the datasets.
use_cache: Boolean. Set to True to cache episodes in memory. Default is to
read episodes from disk every time.
**kwargs: Keyword arguments to forward to the read episodes implementation.
Returns:
Structured data from numpy episodes as Tensors.
"""
try:
dtypes, shapes = _read_spec(train_dir, **kwargs)
except ZeroDivisionError:
dtypes, shapes = _read_spec(test_dir, **kwargs)
loader = {
'scan': functools.partial(_read_episodes_scan, every=scan_every),
'reload': _read_episodes_reload,
'dummy': _read_episodes_dummy,
}[loader]
train = tf.data.Dataset.from_generator(
functools.partial(loader, train_dir, shape[0], **kwargs), dtypes, shapes)
test = tf.data.Dataset.from_generator(
functools.partial(loader, test_dir, shape[0], **kwargs), dtypes, shapes)
chunking = lambda x: tf.data.Dataset.from_tensor_slices(
# Returns dict of image, action, reward, length tensors with num_chunks in 0 dim.
chunk_sequence(x, shape[1], True, num_chunks))
def sequence_preprocess_fn(sequence):
if preprocess_fn:
with tf.device('/cpu:0'):
sequence['image'] = preprocess_fn(sequence['image'])
return sequence
# This transformation (flat_map):
# 1. Chunk each sequence,
# 2. From each sequence one can get variable number of chunks
# (first dim. of a tensor is chunks number, like with batches).
# Flatten to get the dataset of chunks.
train = train.flat_map(chunking)
train = train.shuffle(100 * shape[0])
train = train.batch(shape[0], drop_remainder=True)
train = train.map(sequence_preprocess_fn, 10).prefetch(20)
test = test.flat_map(chunking)
test = test.shuffle(100 * shape[0])
test = test.batch(shape[0], drop_remainder=True)
test = test.map(sequence_preprocess_fn, 10).prefetch(20)
return attr_dict.AttrDict(train=train, test=test)
| 18,142
|
def fft_convolve(ts, query):
"""
Computes the sliding dot product for query over the time series using
the quicker FFT convolution approach.
Parameters
----------
ts : array_like
The time series.
query : array_like
The query.
Returns
-------
array_like - The sliding dot product.
"""
n = len(ts)
m = len(query)
x = np.fft.fft(ts)
y = np.append(np.flipud(query), np.zeros([1, n - m]))
y = np.fft.fft(y)
z = np.fft.ifft(x * y)
return np.real(z[m - 1:n])
| 18,143
|
def _add_merge_gvcfs_job(
b: hb.Batch,
gvcfs: List[hb.ResourceGroup],
output_gvcf_path: Optional[str],
sample_name: str,
) -> Job:
"""
Combine by-interval GVCFs into a single sample GVCF file
"""
job_name = f'Merge {len(gvcfs)} GVCFs, {sample_name}'
j = b.new_job(job_name)
j.image(PICARD_IMAGE)
j.cpu(2)
java_mem = 7
j.memory('standard') # ~ 4G/core ~ 7.5G
j.storage(f'{len(gvcfs) * 1.5 + 2}G')
j.declare_resource_group(
output_gvcf={
'g.vcf.gz': '{root}-' + sample_name + '.g.vcf.gz',
'g.vcf.gz.tbi': '{root}-' + sample_name + '.g.vcf.gz.tbi',
}
)
input_cmd = ' '.join(f'INPUT={g["g.vcf.gz"]}' for g in gvcfs)
j.command(
f"""set -e
(while true; do df -h; pwd; du -sh $(dirname {j.output_gvcf['g.vcf.gz']}); free -m; sleep 300; done) &
java -Xms{java_mem}g -jar /usr/picard/picard.jar \
MergeVcfs {input_cmd} OUTPUT={j.output_gvcf['g.vcf.gz']}
df -h; pwd; du -sh $(dirname {j.output_gvcf['g.vcf.gz']}); free -m
"""
)
if output_gvcf_path:
b.write_output(j.output_gvcf, output_gvcf_path.replace('.g.vcf.gz', ''))
return j
| 18,144
|
def register_permission(name, codename, ctypes=None):
"""Registers a permission to the framework. Returns the permission if the
registration was successfully, otherwise False.
**Parameters:**
name
The unique name of the permission. This is displayed to the customer.
codename
The unique codename of the permission. This is used internally to
identify the permission.
content_types
The content type for which the permission is active. This can be
used to display only reasonable permissions for an object. This
must be a Django ContentType
"""
if ctypes is None:
ctypes = []
# Permission with same codename and/or name must not exist.
if Permission.objects.filter(Q(name=name) | Q(codename=codename)):
return False
p = Permission.objects.create(name=name, codename=codename)
ctypes = [ContentType.objects.get_for_model(ctype) for ctype in ctypes]
if ctypes:
p.content_types = ctypes
p.save()
return p
| 18,145
|
def calculate_outliers(tile_urls, num_outliers, cache, nprocs):
"""
Fetch tiles and calculate the outlier tiles per layer.
The number of outliers is per layer - the largest N.
Cache, if true, uses a local disk cache for the tiles. This can be very
useful if re-running percentile calculations.
Nprocs is the number of processes to use for both fetching and aggregation.
Even on a system with a single CPU, it can be worth setting this to a
larger number to make concurrent nework requests for tiles.
"""
def factory_fn():
return LargestN(num_outliers, cache)
if nprocs > 1:
results = parallel(
tile_urls, FactoryFunctionHolder(factory_fn), nprocs)
else:
results = sequential(tile_urls, factory_fn)
return results
| 18,146
|
def load_trigger_dataset(
fname,
templatizer,
limit=None,
train=False,
preprocessor_key=None,
priming_dataset=None,
max_priming_examples=64,
):
"""
Loads a MLM classification dataset.
Parameters
==========
fname : str
The filename.
templatizer : Templatizer
Maps instances to cloze-style model inputs.
limit : int
(optional) Limit the amount of data loaded.
train : bool
Whether the data is used for training. Default: False.
preprocessor_key : str
Key used to lookup preprocessor for data.
"""
if preprocessor_key is None:
preprocessor = PREPROCESSORS[fname.split('.')[-1]]
else:
preprocessor = PREPROCESSORS[preprocessor_key]
instances = []
for x in preprocessor(fname, train=train):
try:
model_inputs, label_id = templatizer(x, train=train)
if priming_dataset is not None:
model_inputs, label_id = prime(
model_inputs,
label_id,
priming_dataset,
model_max_length=templatizer._tokenizer.model_max_length,
max_priming_examples=max_priming_examples,
)
except ValueError as e:
logger.warning('Encountered error "%s" when processing "%s". Skipping.', e, x)
continue
else:
instances.append((model_inputs, label_id))
if limit:
limit = min(len(instances), limit)
return random.sample(instances, limit)
return instances
| 18,147
|
def pmu2bids(physio_files, verbose=False):
"""
Function to read a list of Siemens PMU physio files and
save them as a BIDS physiological recording.
Parameters
----------
physio_files : list of str
list of paths to files with a Siemens PMU recording
verbose : bool
verbose flag
Returns
-------
physio : PhysioData
PhysioData with the contents of the file
"""
# In case we are handled just a single file, make it a one-element list:
if isinstance(physio_files, str):
physio_files = [physio_files]
# Init PhysioData object to hold physio signals:
physio = PhysioData()
# Read the files from the list, extract the relevant information and
# add a new PhysioSignal to the list:
for f in physio_files:
physio_type, MDHTime, sampling_rate, physio_signal = readpmu(f, verbose=verbose)
testSamplingRate(
sampling_rate = sampling_rate,
Nsamples = len(physio_signal),
logTimes=MDHTime
)
# specify label:
if 'PULS' in physio_type:
physio_label = 'cardiac'
elif 'RESP' in physio_type:
physio_label = 'respiratory'
elif "TRIGGER" in physio_type:
physio_label = 'trigger'
else:
physio_label = physio_type
physio.append_signal(
PhysioSignal(
label=physio_label,
units='',
samples_per_second=sampling_rate,
physiostarttime=MDHTime[0],
signal=physio_signal
)
)
return physio
| 18,148
|
def add_chain(length):
"""Adds a chain to the network so that"""
chained_works = []
chain = utils.generate_chain(length)
for i in range(len(chain)-1):
agent_id = get_random_agent().properties(ns.KEY_AGENT_ID).value().next()
work_id = g.create_work().properties(ns.KEY_WORK_ID).value().next()
g.agent(agent_id).owns_work(g.work(work_id)).next()
item1 = g.create_item(chain[i])
g.agent(agent_id).works(work_id).demands(item1).next()
item2 = g.create_item(chain[i+1])
g.agent(agent_id).works(work_id).offers(item2).next()
chained_works.append(work_id)
return chained_works
| 18,149
|
async def ping(ctx):
""" Pong """
await ctx.send("pong")
| 18,150
|
def re_fit(file_name, top_c, bot_c):
""" re-fits a prepared oocyte file (-t and -b flags for top and bot constraints)"""
from vartools.result import re_fit_data
if top_c == "True":
top_c = True
elif top_c == "False":
top_c = False
else:
sys.exit("Invalid option: " + top_c)
if bot_c == "True":
bot_c = True
elif bot_c == "False":
bot_c = False
else:
sys.exit("Invalid option: " + bot_c)
re_fit_data(file_name, top_c, bot_c)
return None
| 18,151
|
def convert_graph_to_db_format(input_graph: nx.Graph, with_weights=False, cast_to_directed=False):
"""Converts a given graph into a DB format, which consists of two or three lists
1. **Index list:** a list where the i-th position contains the index of the beginning of the list of adjacent nodes (in the second list).
2. **Node list:** for each node, we list (in order) all the nodes which are adjacent to it.
3. **Weight list:** if the weight parameter is True, includes the weights of the edges, corresponds to the nodes list
**Assumptions:**
The code has several preexisting assumptions:
a) The nodes are labeled with numbers
b) Those numbers are the sequence [0,...,num_of_nodes-1]
c) If there are weights, they are floats
d) If there are weights, they are initialized for all edges
e) If there are weights, the weight key is 'weight'
.. Note::
The code behaves differently for directed and undirected graphs.
For undirected graph, every edge is actually counted twice (p->q and q->p).
Example::
For the simple directed graph (0->1, 0->2,0->3,2->0,3->1,3->2):
`Indices: [0, 3, 3, 4, 6]`
`Neighbors: [1, 2, 3, 0, 1, 2]`
Note that index[1] is the same as index[2]. That is because 1 has no neighbors, and so his neighbor list is of size 0, but we still need to have an index for the node on.
For the same graph when it is undirected:
`Indices: [0, 3, 5, 7, 10]`
`Neighbors: [1, 2, 3, 0, 3, 0, 3, 0, 1, 2]`
Note that the number of edges isn't doubled because in the directed version there is a bidirectional edge.
:param graph: the nx.Graph object to convert
:param with_weights: whether to create a weight list. Defaults to False.
:param cast_to_directed: whether to cast the graph into a directed format
:return: two or three lists: index,nodes, [weights]
"""
if cast_to_directed:
graph = input_graph.to_directed()
else:
graph = input_graph.copy()
if graph.is_directed():
# Color printing taken from https://www.geeksforgeeks.org/print-colors-python-terminal/
print("\033[93m {}\033[00m".format('Note that the graph is processed as a directed graph'))
indices = [0] # The first neighbor list always starts at index 0
neighbor_nodes = []
nodes = [node for node in graph.nodes()]
# print(nodes)
nodes.sort()
neighbors = [sorted([x for x in graph.neighbors(node)]) for node in nodes]
# Create the indices and neighbor nodes lists
for neighbor_list in neighbors:
neighbor_list.sort()
# print(neighbor_list)
neighbor_nodes.extend(neighbor_list)
indices.append(indices[-1] + len(neighbor_list))
if with_weights:
try:
weights = [0] * len(neighbor_nodes)
current_index = 0
for node in nodes:
for x in neighbors[node]:
w = graph[node][x]['weight']
weights[current_index] = w
current_index += 1
return indices, neighbor_nodes, weights
except KeyError:
# Print in red
print("\033[91m {}\033[00m".format('No weights defined, returning an empty list of weights'))
print()
return indices, neighbor_nodes, []
return indices, neighbor_nodes
| 18,152
|
def read_pnts(pnt_bytes, object_layers):
"""Read the layer's points."""
print("\tReading Layer ("+object_layers[-1].name+") Points")
offset= 0
chunk_len= len(pnt_bytes)
while offset < chunk_len:
pnts= struct.unpack(">fff", pnt_bytes[offset:offset+12])
offset+= 12
# Re-order the points so that the mesh has the right pitch,
# the pivot already has the correct order.
pnts= [pnts[0] - object_layers[-1].pivot[0],\
pnts[2] - object_layers[-1].pivot[1],\
pnts[1] - object_layers[-1].pivot[2]]
object_layers[-1].pnts.append(pnts)
| 18,153
|
def auxiliary_subfields():
"""Factory associated with AuxSubfieldsPoroelasticity.
"""
return AuxSubfieldsPoroelasticity()
| 18,154
|
def cassandra_get_unit_data():
"""
Basing function to obtain units from db and return as dict
:return: dictionary of units
"""
kpi_dict = {}
cassandra_cluster = Cluster()
session = cassandra_cluster.connect('pb2')
query = session.prepare('SELECT * FROM kpi_units')
query_data = session.execute(query)
for row in query_data:
kpi_dict[row[1]] = [row[0], row[2], row[3], row[4]]
return kpi_dict
| 18,155
|
def read_cfg_float(cfgp, section, key, default):
"""
Read float from a config file
Args:
cfgp: Config parser
section: [section] of the config file
key: Key to be read
default: Value if couldn't be read
Returns: Resulting float
"""
if cfgp.has_option(section, key):
return cfgp.getfloat(section, key)
else:
return default
| 18,156
|
def get_repository_output(repository_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetRepositoryResult]:
"""
The AWS::ECR::Repository resource specifies an Amazon Elastic Container Registry (Amazon ECR) repository, where users can push and pull Docker images. For more information, see https://docs.aws.amazon.com/AmazonECR/latest/userguide/Repositories.html
:param str repository_name: The name to use for the repository. The repository name may be specified on its own (such as nginx-web-app) or it can be prepended with a namespace to group the repository into a category (such as project-a/nginx-web-app). If you don't specify a name, AWS CloudFormation generates a unique physical ID and uses that ID for the repository name. For more information, see https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-name.html.
"""
...
| 18,157
|
def random(website):
"""
随机获取cookies
:param website:查询网站给 如:weibo
:return:随机获取的cookies
"""
g = get_conn()
cookies = getattr(g, website + '_cookies').random()
return cookies
| 18,158
|
def get_pid(referral_data):
""" Example getting PID using the same token used to query AD
NOTE! to get PID the referral information must exist in the BETA(UAT) instance of TOMS
"""
referral_uid = referral_data['referral_uid']
url = "https://api.beta.genomics.nhs.uk/reidentification/referral-pid/{referral_uid}".format(referral_uid=referral_uid)
auth_header = {'Authorization': 'Bearer {}'.format(jwt_token)}
pid = requests.get(url, headers=auth_header).json()
return pid
| 18,159
|
def test_create_search_space():
"""Generate a random neural network from the search_space definition.
"""
import random
random.seed(10)
from random import random
from tensorflow.keras.utils import plot_model
import tensorflow as tf
tf.random.set_seed(10)
search_space = create_search_space(num_layers=5)
ops = [random() for _ in range(search_space.num_nodes)]
search_space.set_ops(ops)
model = search_space.create_model()
model.summary()
print(f'This search_space needs {len(ops)} choices to generate a neural network.')
| 18,160
|
def open_events(
fname: Union[Path, str], leap_sec: float, get_frame_rate: bool = False
) -> Tuple[
List[float], List[float], List[float], List[datetime], Union[List[float], None]
]:
"""
Parameters
----------
fname : Path or str
filename of *_events.pos file
leap_sec : float
The current leap second used to convert GPS time to UTC time
get_frame_rate : bool [default=False]
Whether to return the frame rate of sequential trigger events
Returns
-------
lat : List[float]
Latitudes (decimal degrees) of trigger events recorded by Reach M2
lon : List[float]
Longitudes (decimal degrees) of trigger events recorded by Reach M2
height : List[float]
Ellipsoid heights of trigger events recorded by Reach M2
dt_ls : List[datetime]
datetime (UTC) of trigger events recorded by Reach M2
reach_frate : List[float] or None
if get_frame_rate is True:
reach_frate -> frame rate (seconds) of trigger events recorded
by Reach M2
if get_frame_rate is False:
reach_frate = None
"""
with open(fname, encoding="utf-8") as fid:
contents = fid.readlines()
lat, lon, height, dt_ls = [], [], [], []
reach_frate = [] if get_frame_rate else None
cnt = 0
for i in range(len(contents)):
if contents[i].startswith("%"):
continue
row = contents[i].strip().split()
dt = datetime_from_event_text(row[0], row[1], leap_sec)
if cnt > 0:
reach_frate.append((dt - prev_dt).total_seconds()) # noqa
lat.append(float(row[2]))
lon.append(float(row[3]))
height.append(float(row[4]))
dt_ls.append(dt)
prev_dt = dt # noqa
cnt += 1
return lat, lon, height, dt_ls, reach_frate
| 18,161
|
def intdags_permutations(draw, min_size:int=1, max_size:int=10):
""" Produce instances of a same DAG. Instances are not nesessarily
topologically sorted """
return draw(lists(permutations(draw(intdags())),
min_size=min_size,
max_size=max_size))
| 18,162
|
def rich_echo_via_pager(
text_or_generator: t.Union[t.Iterable[str], t.Callable[[], t.Iterable[str]], str],
theme: t.Optional[Theme] = None,
highlight=False,
markdown: bool = False,
**kwargs,
) -> None:
"""This function takes a text and shows it via an environment specific
pager on stdout.
Args:
text_or_generator: the text to page, or alternatively, a generator emitting the text to page.
theme: optional rich.theme.Theme object to use for formatting
markdown: if True, interpret message as Markdown
highlight: if True, use automatic rich.print highlighting
**kwargs: if "color" in kwargs, works the same as click.echo_via_pager(color=color)
otherwise any kwargs are passed to rich.Console.print()
"""
if inspect.isgeneratorfunction(text_or_generator):
text_or_generator = t.cast(t.Callable[[], t.Iterable[str]], text_or_generator)()
elif isinstance(text_or_generator, str):
text_or_generator = [text_or_generator]
else:
try:
text_or_generator = iter(text_or_generator)
except TypeError:
text_or_generator = [text_or_generator]
console = _console.console or Console(theme=theme)
color = kwargs.pop("color", True)
with console.pager(styles=color):
for x in text_or_generator:
if isinstance(x, str) and markdown:
x = Markdown(x)
console.print(x, highlight=highlight, **kwargs)
| 18,163
|
def getConfiguredGraphClass(doer):
"""
In this class method, we must return a configured graph class
"""
# if options.bReified:
# DU_GRAPH = Graph_MultiSinglePageXml_Segmenter_Separator_DOM
if options.bSeparator:
DU_GRAPH = ConjugateSegmenterGraph_MultiSinglePageXml_Separator
else:
DU_GRAPH = ConjugateSegmenterGraph_MultiSinglePageXml
ntClass = My_ConjugateNodeType
if options.bBB2:
nt = ntClass("mi_clstr" #some short prefix because labels below are prefixed with it
, [] # in conjugate, we accept all labels, andNone becomes "none"
, []
, False # unused
, BBoxDeltaFun = None
, bPreserveWidth=True
)
elif options.bBB31:
nt = ntClass("mi_clstr" #some short prefix because labels below are prefixed with it
, [] # in conjugate, we accept all labels, andNone becomes "none"
, []
, False # unused
, BBoxDeltaFun = (None, lambda v: v * 0.066*3) # shrink to 60% of its size
, bPreserveWidth=True
)
else:
nt = ntClass("mi_clstr" #some short prefix because labels below are prefixed with it
, [] # in conjugate, we accept all labels, andNone becomes "none"
, []
, False # unused
, BBoxDeltaFun =lambda v: max(v * 0.066, min(5, v/3)) #we reduce overlap in this way
)
nt.setLabelAttribute("id")
## HD added 23/01/2020: needed for output generation
DU_GRAPH.clusterType='paragraph'
nt.setXpathExpr(( ".//pc:TextLine"
, "./pc:TextEquiv") #how to get their text
)
DU_GRAPH.addNodeType(nt)
return DU_GRAPH
| 18,164
|
def find_amped_polys_for_syntheticidle(qubit_filter, idleStr, model, singleQfiducials=None,
prepLbl=None, effectLbls=None, initJ=None, initJrank=None,
wrtParams=None, algorithm="greedy", require_all_amped=True,
idtPauliDicts=None, comm=None, verbosity=0):
"""
Find fiducial pairs which amplify the parameters of a synthetic idle gate.
This routine is primarily used internally within higher-level n-qubit
sequence selection routines.
Parameters
----------
qubit_filter : list
A list specifying which qubits fiducial pairs should be placed upon.
Typically this is a subset of all the qubits, as the synthetic idle
is composed of nontrivial gates acting on a localized set of qubits
and noise/errors are localized around these.
idleStr : Circuit
The operation sequence specifying the idle operation to consider. This may
just be a single idle gate, or it could be multiple non-idle gates
which together act as an idle.
model : Model
The model used to compute the polynomial expressions of probabilities
to first-order. Thus, this model should always have (simulation)
type "termorder".
singleQfiducials : list, optional
A list of gate-name tuples (e.g. `('Gx',)`) which specify a set of single-
qubit fiducials to use when trying to amplify gate parameters. Note that
no qubit "state-space" label is required here (i.e. *not* `(('Gx',1),)`);
the tuples just contain single-qubit gate *names*. If None, then
`[(), ('Gx',), ('Gy',)]` is used by default.
prepLbl : Label, optional
The state preparation label to use. If None, then the first (and
usually the only) state prep label of `model` is used, so it's
usually fine to leave this as None.
effectLbls : list, optional
The list of POVM effect labels to use, as a list of `Label` objects.
These are *simplified* POVM effect labels, so something like "Mdefault_0",
and if None the default is all the effect labels of the first POVM of
`model`, which is usually what you want.
initJ : numpy.ndarray, optional
An initial Jacobian giving the derivatives of some other polynomials
with respect to the same `wrtParams` that this function is called with.
This acts as a starting point, and essentially informs the fiducial-pair
selection algorithm that some parameters (or linear combos of them) are
*already* amplified (e.g. by some other germ that's already been
selected) and for which fiducial pairs are not needed.
initJrank : int, optional
The rank of `initJ`. The function could compute this from `initJ`
but in practice one usually has the rank of `initJ` lying around and
so this saves a call to `np.linalg.matrix_rank`.
wrtParams : slice, optional
The parameters to consider for amplification. (This function seeks
fiducial pairs that amplify these parameters.) If None, then pairs
which amplify all of `model`'s parameters are searched for.
algorithm : {"greedy","sequential"}
Which algorithm is used internally to find fiducial pairs. "greedy"
will give smaller sets of fiducial pairs (better) but takes longer.
Usually it's worth the wait and you should use the default ("greedy").
require_all_amped : bool, optional
If True and AssertionError is raised when fewer than all of the
requested parameters (in `wrtParams`) are amplifed by the final set of
fiducial pairs.
verbosity : int, optional
The level of detail printed to stdout. 0 means silent.
Returns
-------
J : numpy.ndarray
The final jacobian with rows equal to the number of chosen amplified
polynomials (note there is one row per fiducial pair *including* the
outcome - so there will be two different rows for two different
outcomes) and one column for each parameter specified by `wrtParams`.
Jrank : int
The rank of the jacobian `J`, equal to the number of amplified
parameters (at most the number requested).
fidpair_lists : list
The selected fiducial pairs, each in "gatename-fidpair-list" format.
Elements of `fidpair_lists` are themselves lists, all of length=#qubits.
Each element of these lists is a (prep1Qnames, meas1Qnames) 2-tuple
specifying the 1-qubit gates (by *name* only) on the corresponding qubit.
For example, the single fiducial pair prep=Gx:1Gy:2, meas=Gx:0Gy:0 in a
3-qubit system would have `fidpair_lists` equal to:
`[ [ [(),('Gx','Gy')], [('Gx',), () ], [('Gy',), () ] ] ]`
` < Q0 prep,meas >, < Q1 prep,meas >, < Q2 prep,meas >`
"""
#Note: "useful" fiducial pairs are identified by looking at the rank of a
# Jacobian matrix. Each row of this Jacobian is the derivative of the
# "amplified polynomial" - the L=1 polynomial for a fiducial pair (i.e.
# pr_poly(F1*(germ)*F2) ) minus the L=0 polynomial (i.e. pr_poly(F1*F2) ).
# When the model only gives probability polynomials to first order in
# the error rates this gives the L-dependent and hence amplified part
# of the polynomial expression for the probability of F1*(germ^L)*F2.
# This derivative of an amplified polynomial, taken with respect to
# all the parameters we care about (i.e. wrtParams) would ideally be
# kept as a polynomial and the "rank" of J would be the number of
# linearly independent polynomials within the rows of J (each poly
# would be a vector in the space of polynomials). We currently take
# a cheap/HACK way out and evaluate the derivative-polynomial at a
# random dummy value which should yield linearly dependent vectors
# in R^n whenever the polynomials are linearly indepdendent - then
# we can use the usual scipy/numpy routines for computing a matrix
# rank, etc.
# Assert that model uses termorder, as doing L1-L0 to extract the "amplified" part
# relies on only expanding to *first* order.
assert(model._sim_type == "termorder" and model._sim_args['max_order'] == 1), \
'`model` must use "termorder:1" simulation type!'
printer = _VerbosityPrinter.build_printer(verbosity, comm)
if prepLbl is None:
prepLbl = model._shlp.get_default_prep_lbl()
if effectLbls is None:
povmLbl = model._shlp.get_default_povm_lbl(sslbls=None)
effectLbls = [_Lbl("%s_%s" % (povmLbl, l))
for l in model._shlp.get_effect_labels_for_povm(povmLbl)]
if singleQfiducials is None:
# TODO: assert model has Gx and Gy gates?
singleQfiducials = [(), ('Gx',), ('Gy',)] # ('Gx','Gx')
#dummy = 0.05*_np.ones(model.num_params(),'d') # for evaluating derivs...
#dummy = 0.05*_np.arange(1,model.num_params()+1) # for evaluating derivs...
#dummy = 0.05*_np.random.random(model.num_params())
dummy = 5.0 * _np.random.random(model.num_params()) + 0.5 * _np.ones(model.num_params(), 'd')
# expect terms to be either coeff*x or coeff*x^2 - (b/c of latter case don't eval at zero)
#amped_polys = []
selected_gatename_fidpair_lists = []
if wrtParams is None: wrtParams = slice(0, model.num_params())
Np = _slct.length(wrtParams)
if initJ is None:
J = _np.empty((0, Np), 'complex'); Jrank = 0
else:
J = initJ; Jrank = initJrank
if algorithm == "greedy":
Jrows = _np.empty((len(effectLbls), Np), 'complex')
#Outer iteration
while Jrank < Np:
if algorithm == "sequential":
printer.log("Sequential find_amped_polys_for_syntheticidle started. Target rank=%d" % Np)
assert(comm is None), "No MPI support for algorithm='sequential' case!"
elif algorithm == "greedy":
maxRankInc = 0
bestJrows = None
printer.log("Greedy find_amped_polys_for_syntheticidle started. Target rank=%d" % Np)
else: raise ValueError("Invalid `algorithm` argument: %s" % algorithm)
# loop over all possible (remaining) fiducial pairs
nQubits = len(qubit_filter)
loc_Indices, _, _ = _mpit.distribute_indices(
list(range(len(singleQfiducials)**nQubits)), comm, False)
loc_itr = 0; nLocIters = len(loc_Indices)
#print("DB: Rank %d indices = " % comm.Get_rank(), loc_Indices)
with printer.progress_logging(2):
for itr, prep in enumerate(_itertools.product(*([singleQfiducials] * nQubits))):
# There's probably a cleaner way to do this,
if loc_itr < len(loc_Indices) and itr == loc_Indices[loc_itr]:
loc_itr += 1 # but this limits us to this processor's local indices
else:
continue
#print("DB: Rank %d: running itr=%d" % (comm.Get_rank(), itr))
printer.show_progress(loc_itr, nLocIters, prefix='--- Finding amped-polys for idle: ')
prepFid = _objs.Circuit(())
for i, el in enumerate(prep):
prepFid = prepFid + _onqubit(el, qubit_filter[i])
for meas in _itertools.product(*([singleQfiducials] * nQubits)):
if idtPauliDicts is not None:
# For idle tomography compatibility, only consider fiducial pairs with either
# all-the-same or all-different prep & measure basis (basis is determined
# by the *last* letter in the value, e.g. ignore '-' sign in '-X').
prepDict, measDict = idtPauliDicts
rev_prepDict = {v[-1]: k for k, v in prepDict.items()} # could do this once above,
rev_measDict = {v[-1]: k for k, v in measDict.items()} # but this isn't the bottleneck.
cmp = [(rev_prepDict[prep[kk]] == rev_measDict[meas[kk]]) for kk in range(nQubits)]
# if all are not the same or all are not different, skip
if not (all(cmp) or not any(cmp)): continue
measFid = _objs.Circuit(())
for i, el in enumerate(meas):
measFid = measFid + _onqubit(el, qubit_filter[i])
gatename_fidpair_list = [(prep[i], meas[i]) for i in range(nQubits)]
if gatename_fidpair_list in selected_gatename_fidpair_lists:
continue # we've already chosen this pair in a previous iteration
gstr_L0 = prepFid + measFid # should be a Circuit
gstr_L1 = prepFid + idleStr + measFid # should be a Circuit
ps = model._fwdsim().prs_as_polys(prepLbl, effectLbls, gstr_L1)
qs = model._fwdsim().prs_as_polys(prepLbl, effectLbls, gstr_L0)
if algorithm == "sequential":
added = False
for elbl, p, q in zip(effectLbls, ps, qs):
amped = p + -1 * q # the amplified poly
Jrow = _np.array([[amped.deriv(iParam).evaluate(dummy)
for iParam in _slct.as_array(wrtParams)]])
if _np.linalg.norm(Jrow) < 1e-8: continue # row of zeros can fool matrix_rank
Jtest = _np.concatenate((J, Jrow), axis=0)
testRank = _np.linalg.matrix_rank(Jtest, tol=RANK_TOL)
if testRank > Jrank:
printer.log("fidpair: %s,%s (%s) increases rank => %d" %
(str(prep), str(meas), str(elbl), testRank), 4)
J = Jtest
Jrank = testRank
if not added:
selected_gatename_fidpair_lists.append(gatename_fidpair_list)
added = True # only add fidpair once per elabel loop!
if Jrank == Np: break # this is the largest rank J can take!
elif algorithm == "greedy":
#test adding all effect labels - get the overall increase in rank due to this fidpair
for k, (elbl, p, q) in enumerate(zip(effectLbls, ps, qs)):
amped = p + -1 * q # the amplified poly
Jrows[k, :] = _np.array([[amped.deriv(iParam).evaluate(dummy)
for iParam in _slct.as_array(wrtParams)]])
Jtest = _np.concatenate((J, Jrows), axis=0)
testRank = _np.linalg.matrix_rank(Jtest, tol=RANK_TOL)
rankInc = testRank - Jrank
if rankInc > maxRankInc:
maxRankInc = rankInc
bestJrows = Jrows.copy()
bestFidpair = gatename_fidpair_list
if testRank == Np: break # this is the largest rank we can get!
if algorithm == "greedy":
# get the best of the bestJrows, bestFidpair, and maxRankInc
if comm is not None:
maxRankIncs_per_rank = comm.allgather(maxRankInc)
iWinningRank = maxRankIncs_per_rank.index(max(maxRankIncs_per_rank))
maxRankInc = maxRankIncs_per_rank[iWinningRank]
if comm.Get_rank() == iWinningRank:
comm.bcast(bestJrows, root=iWinningRank)
comm.bcast(bestFidpair, root=iWinningRank)
else:
bestJrows = comm.bcast(None, root=iWinningRank)
bestFidpair = comm.bcast(None, root=iWinningRank)
if require_all_amped:
assert(maxRankInc > 0), "No fiducial pair increased the Jacobian rank!"
Jrank += maxRankInc
J = _np.concatenate((J, bestJrows), axis=0)
selected_gatename_fidpair_lists.append(bestFidpair)
printer.log("%d fidpairs => rank %d (Np=%d)" %
(len(selected_gatename_fidpair_lists), Jrank, Np))
#DEBUG
#print("DB: J = ")
#_gt.print_mx(J)
#print("DB: svals of J for synthetic idle: ", _np.linalg.svd(J, compute_uv=False))
return J, Jrank, selected_gatename_fidpair_lists
| 18,165
|
def _seed(x, deg=5, seeds=None):
"""Seed the greedy algorithm with (deg+1) evenly spaced indices"""
if seeds is None:
f = lambda m, n: [ii*n//m + n//(2*m) for ii in range(m)]
indices = np.sort(np.hstack([[0, len(x)-1], f(deg-1, len(x))]))
else:
indices = seeds
errors = []
return indices, errors
| 18,166
|
def check_radius_against_distance(cube, radius):
"""Check required distance isn't greater than the size of the domain.
Args:
cube (iris.cube.Cube):
The cube to check.
radius (float):
The radius, which cannot be more than half of the
size of the domain.
"""
axes = []
for axis in ["x", "y"]:
coord = cube.coord(axis=axis).copy()
coord.convert_units("metres")
axes.append((max(coord.points) - min(coord.points)))
max_allowed = np.sqrt(axes[0] ** 2 + axes[1] ** 2) * 0.5
if radius > max_allowed:
raise ValueError(
f"Distance of {radius}m exceeds max domain " f"distance of {max_allowed}m"
)
| 18,167
|
def get_ref(cube):
"""Gets the 8 reflection symmetries of a nd numpy array"""
L = []
L.append(cube[:,:,:])
L.append(cube[:,:,::-1])
L.append(cube[:,::-1,:])
L.append(cube[::-1,:,:])
L.append(cube[:,::-1,::-1])
L.append(cube[::-1,:,::-1])
L.append(cube[::-1,::-1,:])
L.append(cube[::-1,::-1,::-1])
return L
| 18,168
|
def get_relation_functionality(
mapped_triples: Collection[Tuple[int, int, int]],
add_labels: bool = True,
label_to_id: Optional[Mapping[str, int]] = None,
) -> pd.DataFrame:
"""Calculate relation functionalities.
:param mapped_triples:
The ID-based triples.
:return:
A dataframe with columns ( functionality | inverse_functionality )
"""
df = pd.DataFrame(data=mapped_triples, columns=["h", "r", "t"])
df = df.groupby(by="r").agg(dict(
h=["nunique", COUNT_COLUMN_NAME],
t="nunique",
))
df[FUNCTIONALITY_COLUMN_NAME] = df[("h", "nunique")] / df[("h", COUNT_COLUMN_NAME)]
df[INVERSE_FUNCTIONALITY_COLUMN_NAME] = df[("t", "nunique")] / df[("h", COUNT_COLUMN_NAME)]
df = df[[FUNCTIONALITY_COLUMN_NAME, INVERSE_FUNCTIONALITY_COLUMN_NAME]]
df.columns = df.columns.droplevel(1)
df.index.name = RELATION_ID_COLUMN_NAME
df = df.reset_index()
return add_relation_labels(df, add_labels=add_labels, label_to_id=label_to_id)
| 18,169
|
def df_to_vega_lite(df, path=None):
"""
Export a pandas.DataFrame to a vega-lite data JSON.
Params
------
df : pandas.DataFrame
dataframe to convert to JSON
path : None or str
if None, return the JSON str. Else write JSON to the file specified by
path.
"""
chart = altair.Chart(data=df)
data = chart.to_dict()['data']['values']
if path is None:
return json.dumps(data, **json_dump_kwargs)
with open(path, 'w') as write_file:
json.dump(data, write_file, **json_dump_kwargs)
| 18,170
|
def _is_json_mimetype(mimetype):
"""Returns 'True' if a given mimetype implies JSON data."""
return any(
[
mimetype == "application/json",
mimetype.startswith("application/") and mimetype.endswith("+json"),
]
)
| 18,171
|
def show_fields(*fields):
"""Output the {field label -> field value} dictionary. Does the alignment
and formats certain specific types of values."""
fields = filter( lambda x: x, fields )
target_len = max( len(name) for name, value in fields ) + 2
for name, value in fields:
line = name + ':' + " " * (target_len - len(name))
if type(value) == bool:
line += color_text("Yes", 'green') if value else color_text("No", 'red')
else:
line += str(value)
print line
| 18,172
|
def make_request(action, data, token):
"""Make request based on passed arguments and timestamp."""
return {
'action': action,
'time': datetime.now().timestamp(),
'data': data,
'token': token
}
| 18,173
|
def get_stats_historical_prices(timestamp, horizon):
"""
We assume here that the price is a random variable following a normal
distribution. We compute the mean and covariance of the price distribution.
"""
hist_prices_df = pd.read_csv(HISTORICAL_PRICES_CSV)
hist_prices_df["timestamp"] = pd.to_datetime(hist_prices_df["timestamp"])
hist_prices_df = hist_prices_df.set_index("timestamp")
start = pd.Timestamp(year=2018,
month=6,
day=2,
hour=timestamp.hour,
minute=timestamp.minute)
end = pd.Timestamp(year=2018,
month=10,
day=25,
hour=timestamp.hour,
minute=timestamp.minute)
hist_prices_df = hist_prices_df[
(hist_prices_df.index >= start) &
(hist_prices_df.index < end)
]
hist_prices_df['hour'] = hist_prices_df.index.hour
hist_prices_df['minute'] = hist_prices_df.index.minute
num_features = horizon
num_samples = min(hist_prices_df.groupby(
[hist_prices_df.index.hour, hist_prices_df.index.minute]
).count()['clearing_price'].values)
new = hist_prices_df.groupby(
[hist_prices_df.index.hour, hist_prices_df.index.minute]
).mean()
new = new.set_index(pd.Index(range(48)))
i = new[
(new.hour == timestamp.hour) & (new.minute == timestamp.minute)
]['clearing_price'].index.values[0]
a = new[new.index >= i]['clearing_price']
b = new[new.index < i]['clearing_price']
mean_X = np.concatenate((a, b))
X = np.copy(hist_prices_df['clearing_price'].values)
X = np.reshape(X, (num_samples, num_features))
cov = GaussianMixture(covariance_type='tied').fit(
normalize(X)).covariances_
return mean_X, cov
| 18,174
|
def _unflattify(values, shape):
"""
Unflattifies parameter values.
:param values: The flattened array of values that are to be unflattified
:type values: torch.Tensor
:param shape: The shape of the parameter prior
:type shape: torch.Size
:rtype: torch.Tensor
"""
if len(shape) < 1 or values.shape[1:] == shape:
return values
return values.reshape(values.shape[0], *shape)
| 18,175
|
def sc_iter_fasta_brute(file_name, inmem=False):
""" Iter over fasta file."""
header = None
seq = []
with open(file_name) as fh:
if inmem:
data = fh.readlines()
else:
data = fh
for line in data:
if line.startswith(">"):
if seq:
sequence = "".join(seq)
yield (header, sequence)
header = line
seq = []
continue
seq.append(line)
if seq or header:
sequence = "".join(seq)
yield (header, sequence)
| 18,176
|
def theme_cmd(data, buffer, args):
"""Callback for /theme command."""
if args == '':
weechat.command('', '/help ' + SCRIPT_COMMAND)
return weechat.WEECHAT_RC_OK
argv = args.strip().split(' ', 1)
if len(argv) == 0:
return weechat.WEECHAT_RC_OK
if argv[0] in ('install',):
weechat.prnt('',
'{0}: action "{1}" not developed'
''.format(SCRIPT_NAME, argv[0]))
return weechat.WEECHAT_RC_OK
# check arguments
if len(argv) < 2:
if argv[0] in ('install', 'installfile', 'save', 'export'):
weechat.prnt('',
'{0}: too few arguments for action "{1}"'
''.format(SCRIPT_NAME, argv[0]))
return weechat.WEECHAT_RC_OK
# execute asked action
if argv[0] == 'list':
theme_list(argv[1] if len(argv) >= 2 else '')
elif argv[0] == 'info':
filename = None
if len(argv) >= 2:
filename = argv[1]
theme = Theme(filename)
if filename:
theme.info('Info about theme "{0}":'.format(filename))
else:
theme.info('Info about current theme:')
elif argv[0] == 'show':
filename = None
if len(argv) >= 2:
filename = argv[1]
theme = Theme(filename)
if filename:
theme.show('Content of theme "{0}":'.format(filename))
else:
theme.show('Content of current theme:')
elif argv[0] == 'installfile':
theme = Theme()
theme.save(theme_config_get_undo())
theme = Theme(argv[1])
if theme.isok():
theme.install()
elif argv[0] == 'update':
theme_update()
elif argv[0] == 'undo':
theme = Theme(theme_config_get_undo())
if theme.isok():
theme.install()
elif argv[0] == 'save':
theme = Theme()
theme.save(argv[1])
elif argv[0] == 'backup':
theme = Theme()
theme.save(theme_config_get_backup())
elif argv[0] == 'restore':
theme = Theme(theme_config_get_backup())
if theme.isok():
theme.install()
elif argv[0] == 'export':
htheme = HtmlTheme()
whitebg = False
htmlfile = argv[1]
argv2 = args.strip().split(' ', 2)
if len(argv2) >= 3 and argv2[1] == 'white':
whitebg = True
htmlfile = argv2[2]
htheme.save_html(htmlfile, whitebg)
return weechat.WEECHAT_RC_OK
| 18,177
|
def get_unique_chemical_names(reagents):
"""Get the unique chemical species names in a list of reagents.
The concentrations of these species define the vector space in which we sample possible experiments
:param reagents: a list of perovskitereagent objects
:return: a list of the unique chemical names in all of the reagent
"""
chemical_species = set()
if isinstance(reagents, dict):
reagents = [v for v in reagents.values()]
for reagent in reagents:
chemical_species.update(reagent.chemicals)
return sorted(list(chemical_species))
| 18,178
|
def get_sorted_keys(dict_to_sort):
"""Gets the keys from a dict and sorts them in ascending order.
Assumes keys are of the form Ni, where N is a letter and i is an integer.
Args:
dict_to_sort (dict): dict whose keys need sorting
Returns:
list: list of sorted keys from dict_to_sort
"""
sorted_keys = list(dict_to_sort.keys())
sorted_keys.sort(key=lambda x: int(x[1:]))
return sorted_keys
| 18,179
|
def model_3d(psrs, psd='powerlaw', noisedict=None, components=30,
gamma_common=None, upper_limit=False, bayesephem=False,
wideband=False):
"""
Reads in list of enterprise Pulsar instance and returns a PTA
instantiated with model 3D from the analysis paper:
per pulsar:
1. fixed EFAC per backend/receiver system
2. fixed EQUAD per backend/receiver system
3. fixed ECORR per backend/receiver system
4. Red noise modeled as a power-law with 30 sampling frequencies
5. Linear timing model.
global:
1. GWB with HD correlations modeled with user defined PSD with
30 sampling frequencies. Available PSDs are
['powerlaw', 'turnover' 'spectrum']
2. Monopole signal modeled with user defined PSD with
30 sampling frequencies. Available PSDs are
['powerlaw', 'turnover' 'spectrum']
3. Optional physical ephemeris modeling.
:param psd:
PSD to use for common red noise signal. Available options
are ['powerlaw', 'turnover' 'spectrum'] 'powerlaw' is default
value.
:param gamma_common:
Fixed common red process spectral index value. By default we
vary the spectral index over the range [0, 7].
:param upper_limit:
Perform upper limit on common red noise amplitude. By default
this is set to False. Note that when perfoming upper limits it
is recommended that the spectral index also be fixed to a specific
value.
:param bayesephem:
Include BayesEphem model. Set to False by default
"""
amp_prior = 'uniform' if upper_limit else 'log-uniform'
# find the maximum time span to set GW frequency sampling
Tspan = model_utils.get_tspan(psrs)
# red noise
s = red_noise_block(prior=amp_prior, Tspan=Tspan, components=components)
# common red noise block
s += common_red_noise_block(psd=psd, prior=amp_prior, Tspan=Tspan,
components=components, gamma_val=gamma_common,
orf='hd', name='gw')
# monopole
s += common_red_noise_block(psd=psd, prior=amp_prior, Tspan=Tspan,
components=components, gamma_val=gamma_common,
orf='monopole', name='monopole')
# ephemeris model
if bayesephem:
s += deterministic_signals.PhysicalEphemerisSignal(use_epoch_toas=True)
# timing model
s += gp_signals.TimingModel()
# adding white-noise, and acting on psr objects
models = []
for p in psrs:
if 'NANOGrav' in p.flags['pta'] and not wideband:
s2 = s + white_noise_block(vary=False, inc_ecorr=True)
models.append(s2(p))
else:
s3 = s + white_noise_block(vary=False, inc_ecorr=False)
models.append(s3(p))
# set up PTA
pta = signal_base.PTA(models)
# set white noise parameters
if noisedict is None:
print('No noise dictionary provided!...')
else:
noisedict = noisedict
pta.set_default_params(noisedict)
return pta
| 18,180
|
def max_votes(x):
"""
Return the maximum occurrence of predicted class.
Notes
-----
If number of class 0 prediction is equal to number of class 1 predictions, NO_VOTE will be returned.
E.g.
Num_preds_0 = 25,
Num_preds_1 = 25,
Num_preds_NO_VOTE = 0,
returned vote : "NO_VOTE".
"""
if x['Num_preds_0'] > x['Num_preds_1'] and x['Num_preds_0'] > x['Num_preds_NO_VOTE']:
return 0
elif x['Num_preds_1'] > x['Num_preds_0'] and x['Num_preds_1'] > x['Num_preds_NO_VOTE']:
return 1
else:
return 'NO_VOTE'
| 18,181
|
def misclassification_error(y_true: np.ndarray, y_pred: np.ndarray, normalize: bool = True) -> float:
"""
Calculate misclassification loss
Parameters
----------
y_true: ndarray of shape (n_samples, )
True response values
y_pred: ndarray of shape (n_samples, )
Predicted response values
normalize: bool, default = True
Normalize by number of samples or not
Returns
-------
Misclassification of given predictions
"""
n = y_true.shape[-1]
counter = np.ones_like(y_true)
error = counter[y_true!=y_pred].sum(axis=-1)
return error / n if normalize else error
| 18,182
|
def dunif(x, minimum=0,maximum=1):
"""
Calculates the point estimate of the uniform distribution
"""
from scipy.stats import uniform
result=uniform.pdf(x=x,loc=minimum,scale=maximum-minimum)
return result
| 18,183
|
def fatal(msg):
""" Print an error message and die """
global globalErrorHandler
globalErrorHandler._fatal(msg)
| 18,184
|
def _generate_upsert_sql(mon_loc):
"""
Generate SQL to insert/update.
"""
mon_loc_db = [(k, _manipulate_values(v, k in TIME_COLUMNS)) for k, v in mon_loc.items()]
all_columns = ','.join(col for (col, _) in mon_loc_db)
all_values = ','.join(value for (_, value) in mon_loc_db)
update_query = ','.join(f"{k}={v}" for (k, v) in mon_loc_db if k not in ['AGENCY_CD', 'SITE_NO'])
statement = (
f"MERGE INTO GW_DATA_PORTAL.WELL_REGISTRY_STG a "
f"USING (SELECT '{mon_loc['AGENCY_CD']}' AGENCY_CD, '{mon_loc['SITE_NO']}' "
f"SITE_NO FROM DUAL) b ON (a.AGENCY_CD = b.AGENCY_CD AND a.SITE_NO = b.SITE_NO) "
f"WHEN MATCHED THEN UPDATE SET {update_query} WHEN NOT MATCHED THEN INSERT ({all_columns}) VALUES ({all_values})"
)
return statement
| 18,185
|
def filtered_qs(func):
"""
#TODO: zrobić, obsługę funkcji z argumentami
:param func:
:return:
"""
@functools.wraps(func)
def wrapped(self, *args, **kwargs):
ret_qs = func(self)
return ret_qs.filter(*args, **kwargs)
return wrapped
| 18,186
|
def dict2obj(d):
"""Given a dictionary, return an object with the keys mapped to attributes
and the values mapped to attribute values. This is recursive, so nested
dictionaries are nested objects."""
top = type('dict2obj', (object,), d)
seqs = tuple, list, set, frozenset
for k, v in d.items():
if isinstance(v, dict):
setattr(
top,
k, dict2obj(v)
)
elif isinstance(v, seqs):
setattr(
top,
k, type(v)(dict2obj(sj) if isinstance(sj, dict) else sj for sj in v)
)
else:
setattr(top, k, v)
return top
| 18,187
|
async def paste(pstl):
""" For .paste command, allows using
dogbin functionality with the command. """
dogbin_final_url = ""
match = pstl.pattern_match.group(1).strip()
reply_id = pstl.reply_to_msg_id
if not match and not reply_id:
await pstl.edit("There's nothing to paste.")
return
if match:
message = match
elif reply_id:
message = (await pstl.get_reply_message()).message
# Dogbin
await pstl.edit("`Pasting text . . .`")
resp = post(DOGBIN_URL + "documents", data=message.encode('utf-8'))
if resp.status_code == 200:
response = resp.json()
key = response['key']
dogbin_final_url = DOGBIN_URL + key
if response['isUrl']:
reply_text = ("`Pasted successfully!`\n\n"
f"`Shortened URL:` {dogbin_final_url}\n\n"
"Original(non-shortened) URLs`\n"
f"`Dogbin URL`: {DOGBIN_URL}v/{key}\n")
else:
reply_text = ("`Pasted successfully!`\n\n"
f"`Dogbin URL`: {dogbin_final_url}")
else:
reply_text = ("`Failed to reach Dogbin`")
await pstl.edit(reply_text)
if BOTLOG:
await pstl.client.send_message(
BOTLOG_CHATID,
"Paste query `" + message + "` was executed successfully",
)
| 18,188
|
def customized_algorithm_plot(experiment_name='finite_simple_sanity', data_path=_DEFAULT_DATA_PATH):
"""Simple plot of average instantaneous regret by agent, per timestep.
Args:
experiment_name: string = name of experiment config.
data_path: string = where to look for the files.
Returns:
p: ggplot plot
"""
df = load_data(experiment_name, data_path)
plt_df = (df.groupby(['t', 'agent'])
.agg({'instant_regret': np.mean})
.reset_index())
plt_df['agent_new_name'] = plt_df.agent.apply(rename_agent)
custom_labels = ['Laplace TS','Langevin TS','TS','bootstrap TS']
custom_colors = ["#E41A1C","#377EB8","#4DAF4A","#984EA3"]
p = (gg.ggplot(plt_df)
+ gg.aes('t', 'instant_regret', colour='agent_new_name')
+ gg.geom_line(size=1.25, alpha=0.75)
+ gg.xlab('time period (t)')
+ gg.ylab('per-period regret')
+ gg.scale_color_manual(name='agent', labels = custom_labels,values=custom_colors))
return p
| 18,189
|
def main(target_dir=None):
"""
Read gyp files and create Android.mk for the Android framework's
external/skia.
@param target_dir Directory in which to place 'Android.mk'. If None, the file
will be placed in skia's root directory.
"""
# Create a temporary folder to hold gyp and gypd files. Create it in SKIA_DIR
# so that it is a sibling of gyp/, so the relationships between gyp files and
# other files (e.g. platform_tools/android/gyp/dependencies.gypi, referenced
# by android_deps.gyp as a relative path) is unchanged.
# Use mkdtemp to find an unused folder name, but then delete it so copytree
# can be called with a non-existent directory.
tmp_folder = tempfile.mkdtemp(dir=SKIA_DIR)
os.rmdir(tmp_folder)
shutil.copytree(os.path.join(SKIA_DIR, GYP_FOLDER), tmp_folder)
try:
main_gyp_file = 'android_framework_lib.gyp'
print 'Creating Android.mk',
# Generate a separate VarsDict for each architecture type. For each
# archtype:
# 1. call android_framework_gyp.main() to generate gypd files
# 2. call parse_gypd to read those gypd files into the VarsDict
# 3. delete the gypd files
#
# Once we have the VarsDict for each architecture type, we combine them all
# into a single Android.mk file, which can build targets of any
# architecture type.
# The default uses a non-existant archtype, to find all the general
# variable definitions.
default_var_dict = generate_var_dict(tmp_folder, main_gyp_file, 'other',
False)
arm_var_dict = generate_var_dict(tmp_folder, main_gyp_file, 'arm', False)
arm_neon_var_dict = generate_var_dict(tmp_folder, main_gyp_file, 'arm',
True)
x86_var_dict = generate_var_dict(tmp_folder, main_gyp_file, 'x86', False)
mips_var_dict = generate_var_dict(tmp_folder, main_gyp_file, 'mips', False)
arm64_var_dict = generate_var_dict(tmp_folder, main_gyp_file, 'arm64',
False)
# Compute the intersection of all targets. All the files in the intersection
# should be part of the makefile always. Each dict will now contain trimmed
# lists containing only variable definitions specific to that configuration.
var_dict_list = [default_var_dict, arm_var_dict, arm_neon_var_dict,
x86_var_dict, mips_var_dict, arm64_var_dict]
common = vars_dict_lib.intersect(var_dict_list)
# Create SkUserConfig
user_config = os.path.join(SKIA_DIR, 'include', 'config', 'SkUserConfig.h')
if target_dir:
dst_dir = target_dir
else:
dst_dir = os.path.join(SKIA_DIR, 'include', 'core')
generate_user_config.generate_user_config(
original_sk_user_config=user_config, target_dir=dst_dir,
ordered_set=common.DEFINES)
# Now that the defines have been written to SkUserConfig, they are not
# needed in Android.mk.
common.DEFINES.reset()
# Further trim arm_neon_var_dict with arm_var_dict. After this call,
# arm_var_dict (which will now be the intersection) includes all definitions
# used by both arm and arm + neon, and arm_neon_var_dict will only contain
# those specific to arm + neon.
arm_var_dict = vars_dict_lib.intersect([arm_var_dict, arm_neon_var_dict])
# Now create a list of VarsDictData holding everything but common.
deviations_from_common = []
deviations_from_common.append(makefile_writer.VarsDictData(
arm_var_dict, 'arm'))
deviations_from_common.append(makefile_writer.VarsDictData(
arm_neon_var_dict, 'arm', 'ARCH_ARM_HAVE_NEON'))
deviations_from_common.append(makefile_writer.VarsDictData(x86_var_dict,
'x86'))
# Currently, x86_64 is identical to x86
deviations_from_common.append(makefile_writer.VarsDictData(x86_var_dict,
'x86_64'))
deviations_from_common.append(makefile_writer.VarsDictData(mips_var_dict,
'mips'))
deviations_from_common.append(makefile_writer.VarsDictData(arm64_var_dict,
'arm64'))
makefile_writer.write_android_mk(target_dir=target_dir,
common=common, deviations_from_common=deviations_from_common)
finally:
shutil.rmtree(tmp_folder)
| 18,190
|
def write_var(db, blob_id, body):
"""
"""
size = len(body)
with open(make_path(blob_id), "wb") as f:
f.write(body)
cur = db.cursor()
cur.execute("UPDATE objs SET size=?, status=? WHERE id=?", (size, STATUS_COMPLETE, blob_id))
db.commit()
| 18,191
|
def _get_log_time_scale(units):
"""Retrieves the ``log10()`` of the scale factor for a given time unit.
Args:
units (str): String specifying the units
(one of ``'fs'``, ``'ps'``, ``'ns'``, ``'us'``, ``'ms'``, ``'sec'``).
Returns:
The ``log10()`` of the scale factor for the time unit.
"""
scale = {"fs": -15, "ps": -12, "ns": -9, "us": -6, "ms": -3, "sec": 0}
units_lwr = units.lower()
if units_lwr not in scale:
raise ValueError(f"Invalid unit ({units}) provided")
else:
return scale[units_lwr]
| 18,192
|
def match_info_multithreading():
"""
多线程匹配信息
:return:
"""
start = 0
num = 100
total = vj['user'].count()
thread_pool = []
while start < total:
th = threading.Thread(target=match_info, args=(start, num))
thread_pool.append(th)
start += num
for th in thread_pool:
th.start()
for th in thread_pool:
th.join()
| 18,193
|
def print_status(status):
""" Helper function printing your status """
print("This is your status:")
print("\n---\n")
print("\n".join(l.strip() for l in status))
| 18,194
|
def resolvermatch(request):
"""Add the name of the currently resolved pattern to the RequestContext"""
match = resolve(request.path)
if match:
return {'resolved': match}
else:
return {}
| 18,195
|
def create_vlan(host, port, user, password, interface, int_id, vlan, ip, mask, template, config):
"""Function to create a subinterface on CSR1000V."""
intfc = re.compile(r'^(\D+)(\d+)$')
m = intfc.match(interface + int_id)
if m is None:
print("Invalid interface name. Valid example: ", BASE)
sys.exit()
# create the XML configuration issued via NETCONF
create_xml_config(template, config, interface, int_id, vlan, ip, mask)
# open the NETCONF session
with manager.connect(host=host, port=port, username=user, password=password,
hostkey_verify=False, device_params={'name': 'default'},
allow_agent=False, look_for_keys=False) as m:
with open(config) as f:
try:
# issue the edit-config operation with the XML config
rpc_reply = m.edit_config(target='running', config=f.read())
except Exception as e:
print("Encountered the following RPC error!")
print(e)
sys.exit()
# validate the RPC Reply returns "ok"
if rpc_reply.ok is not True:
print("Encountered a problem when configuring the device!")
sys.exit()
| 18,196
|
def selection_sort(arr: list) -> list:
"""
Main sorting function. Using "find_smallest" function as part
of the algorythm.
:param arr: list to sort
:return: sorted list
"""
new_arr = []
for index in range(len(arr)):
smallest = find_smallest(arr)
new_arr.append(arr.pop(smallest))
return new_arr
| 18,197
|
def get_primary_monitor():
"""
Returns the primary monitor.
Wrapper for:
GLFWmonitor* glfwGetPrimaryMonitor(void);
"""
return _glfw.glfwGetPrimaryMonitor()
| 18,198
|
def query_people_and_institutions(rc, names):
"""Get the people and institutions names."""
people, institutions = [], []
for person_name in names:
person_found = fuzzy_retrieval(all_docs_from_collection(
rc.client, "people"),
["name", "aka", "_id"],
person_name, case_sensitive=False)
if not person_found:
person_found = fuzzy_retrieval(all_docs_from_collection(
rc.client, "contacts"),
["name", "aka", "_id"], person_name, case_sensitive=False)
if not person_found:
print(
"WARNING: {} not found in contacts or people. Check aka".format(
person_name))
else:
people.append(person_found['name'])
inst = fuzzy_retrieval(all_docs_from_collection(
rc.client, "institutions"),
["name", "aka", "_id"],
person_found["institution"], case_sensitive=False)
if inst:
institutions.append(inst["name"])
else:
institutions.append(person_found.get("institution", "missing"))
print("WARNING: {} missing from institutions".format(
person_found["institution"]))
else:
people.append(person_found['name'])
pinst = get_recent_org(person_found)
inst = fuzzy_retrieval(all_docs_from_collection(
rc.client, "institutions"), ["name", "aka", "_id"],
pinst, case_sensitive=False)
if inst:
institutions.append(inst["name"])
else:
institutions.append(pinst)
print(
"WARNING: {} missing from institutions".format(
pinst))
return people, institutions
| 18,199
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.