content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def check_disabled(func):
"""
Decorator to wrap up checking if the Backdrop
connection is set to disabled or not
"""
@wraps(func)
def _check(*args, **kwargs):
if _DISABLED:
return
else:
return func(*args, **kwargs)
return _check
| 16,900
|
def debug(message): #pragma: no cover
""" Utility debug function to ease logging. """
_leverage_logger.debug(message)
| 16,901
|
def bbox_from_points(points):
"""Construct a numeric list representing a bounding box from polygon coordinates in page representation."""
xys = [[int(p) for p in pair.split(',')] for pair in points.split(' ')]
return bbox_from_polygon(xys)
| 16,902
|
def return_true():
"""Return True
Simple function used to check liveness of workers.
"""
return True
| 16,903
|
def create_list(input_list):
"""Construct the list of items to turn into a table. File and string inputs supported"""
if os.path.isfile(input_list):
with open(input_list, 'r', encoding='UTF-8') as ifile:
return [line.rstrip() for line in ifile]
return input_list.split(',')
| 16,904
|
def get_active_test_suite():
"""
Returns the test suite that was last ran
>>> get_active_test_suite()
"Hello"
"""
return TEST_RUNNER_STATE.test_suite
| 16,905
|
def go_test(name, library = None, **kwargs):
"""This macro wraps the go_test rule provided by the Bazel Go rules
to silence a deprecation warning for use of the "library" attribute.
It is otherwise equivalent in function to a go_test.
"""
# For internal tests (defined in the same package), we need to embed
# the library under test, but this is not needed for external tests.
embed = [library] if library else []
_go_test(
name = name,
embed = embed,
**kwargs
)
| 16,906
|
def header_from_stream(stream, _magic=None) -> (dict, list, int):
"""
Parse SAM formatted header from stream.
Dict of header values returned is structured as such: {Header tag:[ {Attribute tag: value}, ]}.
Header tags can occur more than once and so each list item represents a different tag line.
:param stream: Stream containing header data.
:param _magic: Data consumed from stream while peeking. Will be prepended to read data.
:return: Tuple containing (Dict of header values, list of Reference objects, placeholder to keep return value consistent with header_from_buffer()).
"""
header = defaultdict(list)
while stream.peek(1)[0] == b'@':
line = stream.readline()
tag = line[1:2]
if tag == b'CO':
header[tag].append(line[4:])
else:
header[tag].append({m[0]: m[1] for m in header_re.findall(line)})
return header, [bam.Reference(ref[b'SN'], int(ref[b'LN'])) for ref in header.pop(b'SQ')] if b'SQ' in header else [], 0
| 16,907
|
def walk_binary_file_or_stdin(filepath, buffer_size = 32768):
"""
Yield 'buffer_size' bytes from filepath until EOF, or from
standard input when 'filepath' is '-'.
"""
if filepath == '-':
return walk_binary_stdin(buffer_size)
else:
return walk_binary_file(filepath, buffer_size)
| 16,908
|
def register_device() -> device_pb2.DeviceResponse:
"""
Now that the client credentials are set, the device can be registered. The device is
registered by instantiating an OauthService object and using the register() method.
The OauthService requires a Config object and an ISecureCredentialStore object to be constructed.
Once the register method is called, a DeviceResponse object will be returned.
NOTE: This function will check if the device id set in config.json has already been registered.
If it has, then the DeviceResponse for the existing registered device will be returned.
Otherwise, the registration will proceed and the DeviceResponse for the new registration will
be returned.
Returns:
A DeviceResponse object indicating whether or not the device registration was successful
"""
oauth_service: OauthService = helpers.get_oauth_service()
if check_device_is_registered():
print(
f"Registration already exists for device_id = {helpers.environment_config['device_id']}"
)
device_response: device_pb2.DeviceResponse = oauth_service.get_who_am_i()
else:
print(f"Registering device_id = {helpers.environment_config['device_id']}")
device_response: device_pb2.DeviceResponse = oauth_service.register(
device_id=helpers.environment_config["device_id"],
device_name=helpers.environment_config["device_name"],
credential=helpers.environment_config["tenant_secret"],
)
save_environment_config()
return device_response
| 16,909
|
def _sto_to_graph(agent: af.SubTaskOption) -> subgraph.Node:
"""Convert a `SubTaskOption` to a `Graph`."""
node_label = '{},{},{}'.format(agent.name or 'SubTask Option',
agent.subtask.name or 'SubTask',
agent.agent.name or 'Policy')
return subgraph.Node(label=node_label, type='sub_task_option')
| 16,910
|
def _hdf5_write_data(filename, data, tablename=None, mode='w', append=False,
header={}, units={}, comments={}, aliases={}, **kwargs):
""" Write table into HDF format
Parameters
----------
filename : file path, or tables.File instance
File to write to. If opened, must be opened and writable (mode='w' or 'a')
data: recarray
data to write to the new file
tablename: str
path of the node including table's name
mode: str
in ('w', 'a') mode to open the file
append: bool
if set, tends to append data to an existing table
header: dict
table header
units: dict
dictionary of units
alias: dict
aliases
comments: dict
comments/description of keywords
.. note::
other keywords are forwarded to :func:`tables.open_file`
"""
if hasattr(filename, 'read'):
raise Exception("HDF backend does not implement stream")
if append is True:
mode = 'a'
silent = kwargs.pop('silent', False)
if isinstance(filename, tables.File):
if (filename.mode != mode) & (mode != 'r'):
raise tables.FileModeError('The file is already opened in a different mode')
hd5 = filename
else:
hd5 = tables.open_file(filename, mode=mode)
# check table name and path
tablename = tablename or header.get('NAME', None)
if tablename in ('', None, 'Noname', 'None'):
tablename = '/data'
w = tablename.split('/')
where = '/'.join(w[:-1])
name = w[-1]
if where in ('', None):
where = '/'
if where[0] != '/':
where = '/' + where
if append:
try:
t = hd5.get_node(where + name)
t.append(data.astype(t.description._v_dtype))
t.flush()
except tables.NoSuchNodeError:
if not silent:
print(("Warning: Table {0} does not exists. \n A new table will be created").format(where + name))
append = False
if not append:
# t = hd5.createTable(where, name, data, **kwargs)
t = hd5.create_table(where, name, data, **kwargs)
# update header
for k, v in header.items():
if (k == 'FILTERS') & (float(t.attrs['VERSION']) >= 2.0):
t.attrs[k.lower()] = v
else:
t.attrs[k] = v
if 'TITLE' not in header:
t.attrs['TITLE'] = name
# add column descriptions and units
for e, colname in enumerate(data.dtype.names):
_u = units.get(colname, None)
_d = comments.get(colname, None)
if _u is not None:
t.attrs['FIELD_{0:d}_UNIT'] = _u
if _d is not None:
t.attrs['FIELD_{0:d}_DESC'] = _d
# add aliases
for i, (k, v) in enumerate(aliases.items()):
t.attrs['ALIAS{0:d}'.format(i)] = '{0:s}={1:s}'.format(k, v)
t.flush()
if not isinstance(filename, tables.File):
hd5.flush()
hd5.close()
| 16,911
|
def filter_input(self, forced=False, context=None):
"""
Passes each hunk (file or code) to the 'input' methods
of the compressor filters.
"""
content = []
for hunk in self.hunks(forced, context=context):
content.append(hunk)
return content
| 16,912
|
def create_venn3(df, comparison_triple):
"""
Create a 3 circle venn diagram
Parameters
----------
df: DataFrame
df contains all option ratins for each feature
comparison_pair: list
Three strings. Determines which options to compare.
"""
list_of_dicts = df[comparison_triple].T.to_dict('records')
list_of_strings = []
for key, value in list_of_dicts[0].items():
list_of_strings.append(str(key)+':'+str(value))
set_A = set(list_of_strings)
try:
list_of_strings = []
for key, value in list_of_dicts[1].items():
list_of_strings.append(str(key)+':'+str(value))
set_B = set(list_of_strings)
except:
set_B = set_A
try:
list_of_strings = []
for key, value in list_of_dicts[2].items():
list_of_strings.append(str(key)+':'+str(value))
set_C = set(list_of_strings)
except:
set_C = set_A
list_of_sets = [set_A, set_B, set_C]
# Careful! set_A is associated with 100, set_B is associated with 010, set_C is associated with 001
strings_100 = list(set_A.difference(set_B).difference(set_C))
strings_010 = list(set_B.difference(set_A).difference(set_C))
strings_001 = list(set_C.difference(set_A).difference(set_B))
strings_110 = list(set_A.intersection(set_B).difference(set_C))
strings_101 = list(set_A.intersection(set_C).difference(set_B))
strings_011 = list(set_B.intersection(set_C).difference(set_A))
strings_111 = list(set_A.intersection(set_B).intersection(set_C))
plt.figure(figsize=(10,10))
# Again, careful! the ordering is backwards in the same way as above
v=venn3(subsets = (len(strings_100),
len(strings_010),
len(strings_110),
len(strings_001),
len(strings_101),
len(strings_011),
len(strings_111)),
set_labels = comparison_triple)
v.get_label_by_id('001').set_text('\n'.join(strings_001))
v.get_label_by_id('010').set_text('\n'.join(strings_010))
v.get_label_by_id('100').set_text('\n'.join(strings_100))
try:
v.get_label_by_id('011').set_text('\n'.join(strings_011))
except:
pass #v.get_label_by_id('011').set_text('no overlap')
try:
v.get_label_by_id('101').set_text('\n'.join(strings_101))
except:
pass #v.get_label_by_id('101').set_text('no overlap')
try:
v.get_label_by_id('110').set_text('\n'.join(strings_110))
except:
pass #v.get_label_by_id('110').set_text('no overlap')
try:
v.get_label_by_id('111').set_text('\n'.join(strings_111))
except:
pass #v.get_label_by_id('111').set_text('no overlap')
plt.title('Venn Diagram')
plt.show()
return
| 16,913
|
def delete(request, user):
""" Deletes a poll """
poll_id = request.POST.get('poll_id')
try:
poll = Poll.objects.get(pk=poll_id)
except:
return JsonResponse({'error': 'Invalid poll_id'}, status=404)
if poll.user.id != user.id:
return JsonResponse({'error': 'You cannot delete this poll'}, status=403)
poll.delete()
return JsonResponse({'message': 'Poll was deleted'})
| 16,914
|
def get_nodes(collection: str, node_link: Optional[str] = None):
"""Get the Node based on its ID or kind"""
# pylint: disable=too-many-locals,too-many-return-statements,too-many-branches
user_id = to_object_id(g.user._id)
can_view_others_operations = g.user.check_role(IAMPolicies.CAN_VIEW_OTHERS_OPERATIONS)
can_view_others_workflows = g.user.check_role(IAMPolicies.CAN_VIEW_OTHERS_WORKFLOWS)
can_view_operations = g.user.check_role(IAMPolicies.CAN_VIEW_OPERATIONS)
can_view_workflows = g.user.check_role(IAMPolicies.CAN_VIEW_WORKFLOWS)
can_create_operations = g.user.check_role(IAMPolicies.CAN_CREATE_OPERATIONS)
can_create_workflows = g.user.check_role(IAMPolicies.CAN_CREATE_WORKFLOWS)
if node_link in executor_manager.kind_to_executor_class and collection == Collections.TEMPLATES:
# if node_link is a base node
# i.e. /templates/basic-bash
kind = node_link
if kind in workflow_manager.kind_to_workflow_dict and (not can_view_workflows or not can_create_workflows):
return make_permission_denied()
if kind in operation_manager.kind_to_operation_dict and (not can_view_operations or not can_create_operations):
return make_permission_denied()
node: Node = executor_manager.kind_to_executor_class[kind].get_default_node(
is_workflow=kind in workflow_manager.kind_to_workflow_dict
)
if isinstance(node, tuple):
data = node[0].to_dict()
tour_steps = node[1]
else:
data = node.to_dict()
tour_steps = []
data['kind'] = kind
return make_success_response({
'node': data,
'tour_steps': tour_steps,
'plugins_dict': PLUGINS_DICT,
})
else:
# when node_link is an id of the object
try:
node_id = to_object_id(node_link)
except bson.objectid.InvalidId: # type: ignore
return make_fail_response('Invalid ID'), 404
node_dict = node_collection_managers[collection].get_db_node(node_id, user_id)
logger.debug(node_dict)
if node_dict:
is_owner = node_dict['author'] == user_id
kind = node_dict['kind']
if kind in workflow_manager.kind_to_workflow_dict and not can_view_workflows:
return make_permission_denied()
if kind in operation_manager.kind_to_operation_dict and not can_view_operations:
return make_permission_denied()
if kind in workflow_manager.kind_to_workflow_dict and not can_view_others_workflows and not is_owner:
return make_permission_denied()
if kind in operation_manager.kind_to_operation_dict and not can_view_others_operations and not is_owner:
return make_permission_denied()
return make_success_response({
'node': node_dict,
'plugins_dict': PLUGINS_DICT,
})
else:
return make_fail_response(f"Node `{node_link}` was not found"), 404
| 16,915
|
def get_loader(path):
"""Gets the configuration loader for path according to file extension.
Parameters:
path: the path of a configuration file, including the filename
extension.
Returns the loader associated with path's extension within LOADERS.
Throws an UnknownConfigurationException if no such loader exists.
"""
for ext, loader in LOADERS:
fullext = '.' + ext
if path[-len(fullext):] == fullext:
return loader
raise exception.UnknownConfigurationException, "No configuration loader found for path '%s'" % path
| 16,916
|
def mat_stretch(mat, target):
"""
Changes times of `mat` in-place so that it has the same average BPM and
initial time as target.
Returns `mat` changed in-place.
"""
in_times = mat[:, 1:3]
out_times = target[:, 1:3]
# normalize in [0, 1]
in_times -= in_times.min()
in_times /= in_times.max()
# restretch
new_start = out_times.min()
in_times *= (out_times.max() - new_start)
in_times += new_start
return mat
| 16,917
|
def parse_range(cpu_range):
"""Create cpu range object"""
if '-' in cpu_range:
[x, y] = cpu_range.split('-') # pylint: disable=invalid-name
cpus = range(int(x), int(y)+1)
if int(x) >= int(y):
raise ValueError("incorrect cpu range: " + cpu_range)
else:
cpus = [int(cpu_range)]
return cpus
| 16,918
|
def causal_parents(node, graph):
"""
Returns the nodes (string names) that are causal parents of the node (have the edge type "causes_or_promotes"), else returns empty list.
Parameters
node - name of the node (string)
graph - networkx graph object
"""
node_causal_parents = []
if list(graph.predecessors(node)):
possibleCausalParents = graph.predecessors(node)
for possibleCausalParent in possibleCausalParents:
if graph[possibleCausalParent][node]["type"] == "causes_or_promotes":
node_causal_parents.append(possibleCausalParent)
return node_causal_parents
| 16,919
|
def create_app(settings_override=None):
"""
Create a test application.
:param settings_override: Override settings
:type settings_override: dict
:return: Flask app
"""
app = Flask(__name__)
params = {
'DEBUG': True,
'WEBPACK_MANIFEST_PATH': './build/manifest.json'
}
app.config.update(params)
if settings_override:
app.config.update(settings_override)
webpack.init_app(app)
return app
| 16,920
|
async def user_me(current_user=Depends(get_current_active_user)):
"""
Get own user
"""
return current_user
| 16,921
|
def _check_3dt_version():
"""
The function checks if cluster has diagnostics capability.
:raises: DCOSException if cluster does not have diagnostics capability
"""
cosmos = packagemanager.PackageManager(get_cosmos_url())
if not cosmos.has_capability('SUPPORT_CLUSTER_REPORT'):
raise DCOSException(
'DC/OS backend does not support diagnostics capabilities in this '
'version. Must be DC/OS >= 1.8')
| 16,922
|
def get_password_hash(password: str, salt: Optional[str] = None) -> Tuple[str, str]:
"""Get user password hash."""
salt = salt or crypt.mksalt(crypt.METHOD_SHA256)
return salt, crypt.crypt(password, salt)
| 16,923
|
def find_iamrole_changes(accounts):
""" Runs watchers/iamrole"""
sm_find_iamrole_changes(accounts)
| 16,924
|
def start_at(gra, key):
""" start a v-matrix at a specific atom
Returns the started vmatrix, along with keys to atoms whose neighbors are
missing from it
"""
symb_dct = atom_symbols(gra)
ngb_keys_dct = atoms_sorted_neighbor_atom_keys(
gra, symbs_first=('X', 'C',), symbs_last=('H',), ords_last=(0.1,))
ngb_keys = ngb_keys_dct[key]
if not ngb_keys:
zma_keys = []
elif len(ngb_keys) == 1:
# Need special handling for atoms with only one neighbor
if symb_dct[key] in ('H', 'X'):
key2 = ngb_keys[0]
zma_keys = (key2,) + ngb_keys_dct[key2]
else:
key2 = ngb_keys[0]
ngb_keys = tuple(k for k in ngb_keys_dct[key2] if k != key)
zma_keys = (key, key2) + ngb_keys
else:
zma_keys = (key,) + ngb_keys_dct[key]
vma = ()
for row, key_ in enumerate(zma_keys):
idx1 = idx2 = idx3 = None
if row > 0:
key1 = next(k for k in ngb_keys_dct[key_] if k in zma_keys[:row])
idx1 = zma_keys.index(key1)
if row > 1:
key2 = next(k for k in ngb_keys_dct[key1] if k in zma_keys[:row]
and k != key_)
idx2 = zma_keys.index(key2)
if row > 2:
key3 = next(k for k in zma_keys[:row]
if k not in (key_, key1, key2))
idx3 = zma_keys.index(key3)
sym = symb_dct[key_]
key_row = [idx1, idx2, idx3]
vma = automol.vmat.add_atom(vma, sym, key_row)
return vma, zma_keys
| 16,925
|
def test_scenariooutline_set_rule_on_all_examples(mocker):
"""A ScenarioOutline should forward a set Rule to all its Examples"""
# given
rule_mock = mocker.MagicMock(name="Rule")
scenario = ScenarioOutline(
1, "Scenario Outline", "My ScenarioOutline", [], None, None, [], []
)
first_example = mocker.MagicMock(name="First Example")
second_example = mocker.MagicMock(name="Second Example")
scenario.examples = [first_example, second_example]
# when
scenario.set_rule(rule_mock)
# then
assert scenario.rule is rule_mock
first_example.set_rule.assert_called_once_with(rule_mock)
second_example.set_rule.assert_called_once_with(rule_mock)
| 16,926
|
def nested_lookup(key, document):
"""
nested document lookup,
works on dicts and lists
:param key: string of key to lookup
:param document: dict or list to lookup
:return: yields item
"""
if isinstance(document, list):
for d in document:
for result in nested_lookup(key, d):
yield result
if isinstance(document, dict):
for k, v in document.items():
if k == key:
yield v
elif isinstance(v, dict):
for result in nested_lookup(key, v):
yield result
elif isinstance(v, list):
for d in v:
for result in nested_lookup(key, d):
yield result
| 16,927
|
def test_kubernetes_resource_get_status_value_incomplete():
"""
This test verifies that KubernetesResource.get_status_value() returns None for an undefined resource.
"""
assert KubernetesResource(dict()).get_status_value("replicas") is None
| 16,928
|
def associate_kitti(detections,
trackers,
det_cates,
iou_threshold,
velocities,
previous_obs,
vdc_weight):
"""
@param detections:
"""
if (len(trackers) == 0):
return np.empty((0, 2), dtype=int), np.arange(len(detections)), np.empty((0, 5), dtype=int)
"""
Cost from the velocity direction consistency
"""
Y, X = velocity_direction_batch(detections, previous_obs)
inertia_Y, inertia_X = velocities[:, 0], velocities[:, 1]
inertia_Y = np.repeat(inertia_Y[:, np.newaxis], Y.shape[1], axis=1)
inertia_X = np.repeat(inertia_X[:, np.newaxis], X.shape[1], axis=1)
diff_angle_cos = inertia_X * X + inertia_Y * Y
diff_angle_cos = np.clip(diff_angle_cos, a_min=-1, a_max=1)
diff_angle = np.arccos(diff_angle_cos)
diff_angle = (np.pi / 2.0 - np.abs(diff_angle)) / np.pi
valid_mask = np.ones(previous_obs.shape[0])
valid_mask[np.where(previous_obs[:, 4] < 0)] = 0
valid_mask = np.repeat(valid_mask[:, np.newaxis], X.shape[1], axis=1)
scores = np.repeat(detections[:, -1][:, np.newaxis], trackers.shape[0], axis=1)
angle_diff_cost = (valid_mask * diff_angle) * vdc_weight
angle_diff_cost = angle_diff_cost.T
angle_diff_cost = angle_diff_cost * scores
"""
Cost from IoU
"""
iou_matrix = iou_batch(detections, trackers)
"""
With multiple categories, generate the cost for catgory mismatch
"""
num_dets = detections.shape[0]
num_trk = trackers.shape[0]
cate_matrix = np.zeros((num_dets, num_trk))
for i in range(num_dets):
for j in range(num_trk):
if det_cates[i] != trackers[j, 4]:
cate_matrix[i][j] = -1e6
cost_matrix = - iou_matrix - angle_diff_cost - cate_matrix
if min(iou_matrix.shape) > 0:
a = (iou_matrix > iou_threshold).astype(np.int32)
if a.sum(1).max() == 1 and a.sum(0).max() == 1:
matched_indices = np.stack(np.where(a), axis=1)
else:
matched_indices = linear_assignment(cost_matrix)
else:
matched_indices = np.empty(shape=(0, 2))
unmatched_detections = []
for d, det in enumerate(detections):
if (d not in matched_indices[:, 0]):
unmatched_detections.append(d)
unmatched_trackers = []
for t, trk in enumerate(trackers):
if (t not in matched_indices[:, 1]):
unmatched_trackers.append(t)
# filter out matched with low IOU
matches = []
for m in matched_indices:
if (iou_matrix[m[0], m[1]] < iou_threshold):
unmatched_detections.append(m[0])
unmatched_trackers.append(m[1])
else:
matches.append(m.reshape(1, 2))
if (len(matches) == 0):
matches = np.empty((0, 2), dtype=int)
else:
matches = np.concatenate(matches, axis=0)
return matches, np.array(unmatched_detections), np.array(unmatched_trackers)
| 16,929
|
def read_app12(imagefile, seg_offset, meta_dict):
"""Read metadata from an APP12 segment and store in dictionary.
1st parameter = file handle for jpeg file, opened as 'rb' read binary
2nd parameter = the offset of the APP12 segment to be processed
3rd parameter = dictionary being created by readmeta(); found Exif metadata
will be added to this dictionary
"""
imagefile.seek(seg_offset, 0) # go to this APP12 segment
verify_marker(imagefile, 'APP12')
meta_dict['APP12|///ToDo'] = \
('offset = {0}'.format(seg_offset), '', '', 1)
| 16,930
|
def seat_guest(self, speech, guest, timeout):
"""
Start the view 'seatGuest'
:param speech: the text that will be use by the Local Manager for tablet and vocal
:type speech: dict
:param guest: name of the guest to seat
:type guest: string
:param timeout: maximum time to wait for a reaction from the local manager
:type timeout: float
"""
goal = RequestToLocalManagerGoal(action="seatGuest", payload=json.dumps({
'id': str(uuid.uuid4()),
'timestamp': time.time(),
'args': {
'speech': speech,
'guest': guest
}
}))
return self._send_goal_and_wait(goal, timeout)
| 16,931
|
def retreive_retries_and_sqs_handler(task_id):
"""This function retrieve the number of retries and the SQS handler associated to an expired task
Args:
task_id(str): the id of the expired task
Returns:
rtype: dict
Raises:
ClientError: if DynamoDB query failed
"""
try:
response = table.query(
KeyConditionExpression=Key('task_id').eq(task_id)
)
# CHeck if 1 and only 1
return response.get('Items')[0].get('retries'), response.get('Items')[0].get('sqs_handler_id')
except ClientError as e:
errlog.log("Cannot retreive retries and handler for task {} : {}".format(task_id, e))
raise e
| 16,932
|
def configure_base_bos(assembly):
""" Base configure method for a balance of station cost assembly for coupling models to get a full wind plant balance of station cost estimate. It adds a default balance of station cost aggregator component. """
assembly.add('bos', BaseBOSCostAggregator())
assembly.driver.workflow.add('bos')
assembly.connect('bos.bos_costs','bos_costs')
| 16,933
|
def test_authorized_rpc_call2(volttron_instance_encrypt):
"""Tests an agent with two capability calling a method that
requires those same two capabilites
"""
agent1, agent2 = build_two_test_agents(volttron_instance_encrypt)
# Add another required capability
agent1.vip.rpc.allow(agent1.foo, 'can_call_foo2')
volttron_instance_encrypt.add_capabilities(agent2.publickey,
['can_call_foo', 'can_call_foo2'])
gevent.sleep(.1)
result = agent2.vip.rpc.call(agent1.core.identity, 'foo', 42).get(timeout=2)
assert result == 42
| 16,934
|
def raise_error(config,msg,parser=None):
"""Raise error with specified message, either as parser error (when option passed in via command line),
or ValueError (when option passed in via config file).
Arguments:
----------
config : str
Path to config file.
msg : str
Error message to display.
parser : class ``argparse.ArgumentParser``, optional
If this is input, parser error will be raised."""
if parser is None:
raise ValueError("Bad input found in '{0}' -- {1}".format(config,msg))
else:
parser.error(msg)
| 16,935
|
def update_app_installs():
"""
Update app install counts for all published apps.
We break these into chunks so we can bulk index them. Each chunk will
process the apps in it and reindex them in bulk. After all the chunks are
processed we find records that haven't been updated and purge/reindex those
so we nullify their values.
"""
chunk_size = 100
ids = list(Webapp.objects.filter(status=mkt.STATUS_PUBLIC,
disabled_by_user=False)
.values_list('id', flat=True))
for chunk in chunked(ids, chunk_size):
count = 0
times = []
reindex_ids = []
for app in Webapp.objects.filter(id__in=chunk).no_transforms():
reindex = False
count += 1
now = datetime.now()
t_start = time.time()
scores = _get_installs(app.id)
# Update global installs, then per-region installs below.
value = scores.get('all')
if value > 0:
reindex = True
installs, created = app.installs.get_or_create(
region=0, defaults={'value': value})
if not created:
installs.update(value=value, modified=now)
else:
# The value is <= 0 so we can just ignore it.
app.installs.filter(region=0).delete()
for region in mkt.regions.REGIONS_DICT.values():
value = scores.get(region.slug)
if value > 0:
reindex = True
installs, created = app.installs.get_or_create(
region=region.id, defaults={'value': value})
if not created:
installs.update(value=value, modified=now)
else:
# The value is <= 0 so we can just ignore it.
app.installs.filter(region=region.id).delete()
if reindex:
reindex_ids.append(app.id)
times.append(time.time() - t_start)
# Now reindex the apps that actually have a popularity value.
if reindex_ids:
WebappIndexer.run_indexing(reindex_ids)
log.info('Installs calculated for %s apps. Avg time overall: '
'%0.2fs' % (count, sum(times) / count))
# Purge any records that were not updated.
#
# Note: We force update `modified` even if no data changes so any records
# with older modified times can be purged.
now = datetime.now()
midnight = datetime(year=now.year, month=now.month, day=now.day)
qs = Installs.objects.filter(modified__lte=midnight)
# First get the IDs so we know what to reindex.
purged_ids = qs.values_list('addon', flat=True).distinct()
# Then delete them.
qs.delete()
for ids in chunked(purged_ids, chunk_size):
WebappIndexer.run_indexing(ids)
| 16,936
|
def decompose(original_weights: torch.Tensor, mask, threshould: float) -> torch.Tensor:
"""
Calculate the scaling matrix. Use before pruning the current layer.
[Inputs]
original_weights: (N[i], N[i+1])
important_weights: (N[i], P[i+1])
[Outputs]
scaling_matrix: (P[i+1], N[i+1])
"""
important_weights = convert_to_important_weights(original_weights, mask)
msglogger.info("important_weights", important_weights.size())
scaling_matrix = torch.zeros(important_weights.size()[-1], original_weights.size()[-1])
msglogger.info("scaling_matrix", scaling_matrix.size())
msglogger.info("original_weights", original_weights.size())
for i, weight in enumerate(original_weights.transpose(0, -1)):
if weight in important_weights.transpose(0, -1):
scaling_matrix[important_weights.transpose(0, -1) == weight][i] = 1
else:
most_similar_neuron, similarity, scale = most_similar(weight, important_weights)
most_similar_neuron_index_in_important_weights = important_weights == most_similar_neuron
if similarity >= threshould:
scaling_matrix[most_similar_neuron_index_in_important_weights][i] = scale
return scaling_matrix
| 16,937
|
def calc_Kullback_Leibler_distance(dfi, dfj):
"""
Calculates the Kullback-Leibler distance of the two matrices.
As defined in Aerts et al. (2003). Also called Mutual Information.
Sort will be ascending.
Epsilon is used here to avoid conditional code for checking that neither P nor Q is equal to 0.
"""
epsilon = 0.00001
P = dfi + epsilon
Q = dfj + epsilon
divergence = np.sum(P * np.log2(P / Q))
return divergence
| 16,938
|
def test_python_java_classes():
""" Run Python tests against JPY test classes """
sub_env = {'PYTHONPATH': _build_dir()}
log.info('Executing Python unit tests (against JPY test classes)...')
return jpyutil._execute_python_scripts(python_java_jpy_tests, env=sub_env)
| 16,939
|
def trim_datasets_using_par(data, par_indexes):
"""
Removes all the data points needing more fitting parameters than available.
"""
parameters_to_fit = set(par_indexes.keys())
trimmed_data = list()
for data_point in data:
if data_point.get_fitting_parameter_names() <= parameters_to_fit:
trimmed_data.append(data_point)
return trimmed_data
| 16,940
|
def menu_heading(username, submenu=None):
"""Heading for all menus
:param username: a string containing the username of the authenticated user
:param submenu: a string with the name of submenu
"""
click.clear()
click.secho("===== MAL CLI Application =====", fg="white", bg="blue")
click.echo("Logged in as: " + click.style("{}\n".format(username), fg="white", bg="cyan"))
if submenu is not None:
click.echo("--- Update your {} list ---".format(submenu))
| 16,941
|
def _parse_field(field: str) -> Field:
"""
Parse the given string representation of a CSV import field.
:param field: string or string-like field input
:return: a new Field
"""
name, _type = str(field).split(':')
if '(' in _type and _type.endswith(')'):
_type, id_space = _type.split('(')[0:-1]
return Field(name or _type, FieldType.from_str(_type), id_space)
return Field(name or _type, FieldType.from_str(_type))
| 16,942
|
def removeNoise(
audio_clip,
noise_thresh,
mean_freq_noise,
std_freq_noise,
noise_stft_db,
n_grad_freq=2,
n_grad_time=4,
n_fft=2048,
win_length=2048,
hop_length=512,
n_std_thresh=1.5,
prop_decrease=1.0,
verbose=False,
visual=False,
):
"""Remove noise from audio based upon a clip containing only noise
Args:
audio_clip (array): The first parameter.
noise_clip (array): The second parameter.
n_grad_freq (int): how many frequency channels to smooth over with the mask.
n_grad_time (int): how many time channels to smooth over with the mask.
n_fft (int): number audio of frames between STFT columns.
win_length (int): Each frame of audio is windowed by `window()`. The window will be of length `win_length` and then padded with zeros to match `n_fft`..
hop_length (int):number audio of frames between STFT columns.
n_std_thresh (int): how many standard deviations louder than the mean dB of the noise (at each frequency level) to be considered signal
prop_decrease (float): To what extent should you decrease noise (1 = all, 0 = none)
visual (bool): Whether to plot the steps of the algorithm
Returns:
array: The recovered signal with noise subtracted
"""
if verbose:
start = time.time()
# STFT over noise
if verbose:
print("STFT on noise:", td(seconds=time.time() - start))
start = time.time()
# STFT over signal
if verbose:
start = time.time()
sig_stft = _stft(audio_clip, n_fft, hop_length, win_length)
sig_stft_db = _amp_to_db(np.abs(sig_stft))
if verbose:
print("STFT on signal:", td(seconds=time.time() - start))
start = time.time()
# Calculate value to mask dB to
mask_gain_dB = np.min(_amp_to_db(np.abs(sig_stft)))
print(noise_thresh, mask_gain_dB)
# Create a smoothing filter for the mask in time and frequency
smoothing_filter = np.outer(
np.concatenate(
[
np.linspace(0, 1, n_grad_freq + 1, endpoint=False),
np.linspace(1, 0, n_grad_freq + 2),
]
)[1:-1],
np.concatenate(
[
np.linspace(0, 1, n_grad_time + 1, endpoint=False),
np.linspace(1, 0, n_grad_time + 2),
]
)[1:-1],
)
smoothing_filter = smoothing_filter / np.sum(smoothing_filter)
# calculate the threshold for each frequency/time bin
db_thresh = np.repeat(
np.reshape(noise_thresh, [1, len(mean_freq_noise)]),
np.shape(sig_stft_db)[1],
axis=0,
).T
# mask if the signal is above the threshold
sig_mask = sig_stft_db < db_thresh
if verbose:
print("Masking:", td(seconds=time.time() - start))
start = time.time()
# convolve the mask with a smoothing filter
sig_mask = scipy.signal.fftconvolve(sig_mask, smoothing_filter, mode="same")
sig_mask = sig_mask * prop_decrease
if verbose:
print("Mask convolution:", td(seconds=time.time() - start))
start = time.time()
# mask the signal
sig_stft_db_masked = (
sig_stft_db * (1 - sig_mask)
+ np.ones(np.shape(mask_gain_dB)) * mask_gain_dB * sig_mask
) # mask real
sig_imag_masked = np.imag(sig_stft) * (1 - sig_mask)
sig_stft_amp = (_db_to_amp(sig_stft_db_masked) * np.sign(sig_stft)) + (
1j * sig_imag_masked
)
if verbose:
print("Mask application:", td(seconds=time.time() - start))
start = time.time()
# recover the signal
recovered_signal = _istft(sig_stft_amp, hop_length, win_length)
recovered_spec = _amp_to_db(
np.abs(_stft(recovered_signal, n_fft, hop_length, win_length))
)
if verbose:
print("Signal recovery:", td(seconds=time.time() - start))
if visual:
plot_spectrogram(noise_stft_db, title="Noise")
if visual:
plot_statistics_and_filter(
mean_freq_noise, std_freq_noise, noise_thresh, smoothing_filter
)
if visual:
plot_spectrogram(sig_stft_db, title="Signal")
if visual:
plot_spectrogram(sig_mask, title="Mask applied")
if visual:
plot_spectrogram(sig_stft_db_masked, title="Masked signal")
if visual:
plot_spectrogram(recovered_spec, title="Recovered spectrogram")
return recovered_signal
| 16,943
|
def validate_wra_params(func):
"""Water Risk atlas parameters validation"""
@wraps(func)
def wrapper(*args, **kwargs):
validation_schema = {
'wscheme': {
'required': True
},
'geostore': {
'type': 'string',
'required': True
},
'analysis_type': {
'type': 'string',
'required': True,
'default': None
},
'month': {
'required': False,
'default': None,
'nullable': True
},
'year': {
'type': 'string',
'required': False,
'default': None,
'nullable': True
},
'change_type': {
'type': 'string',
'required': False,
'default': None,
'nullable': True
},
'indicator': {
'type': 'string',
'required': True,
'default': None,
'nullable': True
},
'scenario': {
'type': 'string',
'required': False,
'default': None,
'nullable': True
},
'locations': {
'type': 'string',
'required': True,
'required': False,
'default': None,
'nullable': True
},
'input_address': {
'type': 'string',
'required': False,
'default': None,
'nullable': True
},
'match_address': {
'type': 'string',
'required': False,
'default': None,
'nullable': True
},
'ids': {
'type': 'string',
'required': False,
'nullable': True,
'default': None
}
}
jsonRequestContent = request.json or {}
rArgs = {**request.args, **jsonRequestContent}
kwargs.update(rArgs)
logging.debug(f'[MIDDLEWARE - ws scheme]: {kwargs}')
logging.debug(f"[VALIDATOR - wra_weights]: {kwargs}")
validator = Validator(validation_schema, allow_unknown=True)
if not validator.validate(kwargs):
return error(status=400, detail=validator.errors)
kwargs['sanitized_params'] = validator.normalized(kwargs)
return func(*args, **kwargs)
return wrapper
| 16,944
|
def handle_request(r):
"""Handle the Simulator request given by the r dictionary
"""
print ("handle_request executed .. ")
print (r)
# Parse request ..
config = SimArgs()
config.machine = r[u'machine']
config.overlay = [r[u'topology']] # List of topologies - just one
config.group = r[u'cores']
overlay = r[u'topology'].split('-')
overlay_name = overlay[0]
overlay_args = overlay[1:]
if overlay_name == 'hybrid':
overlay_name = 'cluster'
config.hybrid = True;
config.hybrid_cluster = overlay_args[0];
config.overlay = [u'cluster']
if overlay_args == 'mm' :
config.multimessage = True
elif overlay_args == 'rev' :
config.reverserecv = True
c = config
from simulator import simulate
(last_nodes, leaf_nodes, root) = simulate(config)
# Generate response to be sent back to client
import config
assert len(config.models)==1 # Exactly one model has been generated
res = {}
res['root'] = root
res['model'] = config.models[0]
res['last_node'] = last_nodes[0]
res['leaf_nodes'] = leaf_nodes[0]
res['git-version'] = helpers.git_version().decode('ascii')
print(res)
logging.info(('Responding with >>>'))
logging.info((json.dumps(res)))
logging.info(('<<<'))
write_statistics(c.machine)
return json.dumps(res)
| 16,945
|
def number_empty_block(n):
"""Number of empty block"""
L = L4 if n == 4 else L8
i = 0
for x in range(n):
for y in range(n):
if L[x][y] == 0:
i = i + 1
return i
| 16,946
|
def test(seconds):
"""
demo
:param seconds:
:return:
"""
time.sleep(seconds)
| 16,947
|
def compute_referendum_result_by_regions(referendum_and_areas):
"""Return a table with the absolute count for each region.
The return DataFrame should be indexed by `code_reg` and have columns:
['name_reg', 'Registered', 'Abstentions', 'Null', 'Choice A', 'Choice B']
"""
ans = referendum_and_areas.groupby(
['code_reg', 'name_reg']).sum().reset_index().set_index('code_reg')
ans = ans.drop(columns="Town code")
return ans
| 16,948
|
def fixed_rho_total_legacy(data, rho_p, rho_s, beads_2_M):
"""
*LEGACY*: only returns polycation/cation concentrations. Use updated version
(`fixed_rho_total()`), which returns a dictionary of all concentrations.
Computes the polycation concentration in the
supernatant (I) and coacervate (II) phases for
different Bjerrum lengths.
Parameters
----------
data : dictionary of Pandas dataframes
Contains dataframes of data from liquid state
theory calculations indexed by Bjerrum length.
Dataframes have densities in beads/sigma^3.
rho_p : float
Average density of polymer (cation + anion) in
both phases [mol/L]
rho_s : float
Average density of salt (just cation since 1 cation
and 1 anion come from one KBr molecule) in both
phases [mol/L]
beads_2_M : float
Multiplicative conversion to get from beads/sigma^3
to moles of monomer/L.
Returns
-------
lB_arr : (Nx1) numpy array
Array of Bjerrum non-dimensionalized by sigma (defined in definition
of "data" dictionary).
rho_PCI_list : N-element list
List of densities of polycation in phase I (supernatant) [mol/L]
rho_PCII_list : N-element list
List of densities of polycation in phase II (coacervate) [mol/L]
alpha_list : N-element list (only returned if ret_alpha==True)
List of volume fractions of phase I [nondim].
"""
# initializes outputs
lB_valid_list = []
rho_PCI_list = []
rho_PCII_list = []
rho_CI_list = []
rho_CII_list = []
alpha_list = []
# computes coexistence at each Bjerrum length and stores results if physical
for lB in data.keys():
df = data[lB]
df_s = compute_rho_s(df, rho_p, beads_2_M)
# ensures that the total salt concentration is within the possible two-phase range
if rho_s <= np.max(df_s['rhoS'])*beads_2_M and \
rho_s >= np.min(df_s['rhoS'])*beads_2_M:
# finds the index of the dataframe that has the closest salt concentration to the given value
diff_rho_s = np.abs(df_s['rhoS']*beads_2_M - rho_s)
i_same_salt = np.argmin(diff_rho_s)
alpha = df_s['alpha'].iloc[i_same_salt]
# recomputes the volume fraction of supernatant more precisely using
# interpolation
alpha = np.interp(rho_s, df_s['rhoS']*beads_2_M,
df_s['alpha'].to_numpy(dtype='float64'))
if alpha == 1:
print('rho_s = {0:.64f}'.format(rho_s/beads_2_M))
print('rho_p = {0:.64f}'.format(rho_p/beads_2_M))
print('rhoPCI = {0:.64f}'.format(df['rhoPCI'].loc[i_same_salt]))
print('rhoPCII = {0:.64f}'.format(df['rhoPCII'].loc[i_same_salt]))
print('rhoCI = {0:.64f}'.format(df['rhoCI'].loc[i_same_salt]))
print('rhoCII = {0:.64f}'.format(df['rhoCII'].loc[i_same_salt]))
print(df.loc[i_same_salt])
# ensures that the ratio of volume I to total volume is physical
# (i.e., in the range [0,1])
if alpha > 1 or alpha < 0:
continue
lB_valid_list += [lB]
rho_PCI_list += [df_s['rhoPCI'].iloc[i_same_salt]*beads_2_M]
rho_PCII_list += [df_s['rhoPCII'].iloc[i_same_salt]*beads_2_M]
rho_CI_list += [df_s['rhoCI'].iloc[i_same_salt]*beads_2_M]
rho_CII_list += [df_s['rhoCII'].iloc[i_same_salt]*beads_2_M]
alpha_list += [alpha]
lB_arr = np.array(lB_valid_list)
return rho_PCI_list, rho_PCII_list, rho_CI_list, rho_CII_list, lB_arr, alpha_list
| 16,949
|
def is_valid_file(parser, filename):
"""Check if file exists, and return the filename"""
if not os.path.exists(filename):
parser.error("The file %s does not exist!" % filename)
else:
return filename
| 16,950
|
def _check_modality(study: xml.etree.ElementTree.Element, expected_modality: str):
"""Check that the modality of the given study is the expected one."""
series = _parse_series(study)
modality = _check_xml_and_get_text(series[1], "modality")
if modality != expected_modality:
raise ValueError(
f"Unexpected modality {modality}, expected {expected_modality}."
)
| 16,951
|
def portfolio():
"""Function to render the portfolio page."""
form = PortfolioCreateForm()
if form.validate_on_submit():
try:
portfolio = Portfolio(name=form.data['name'], user_id=session['user_id'])
db.session.add(portfolio)
db.session.commit()
except (DBAPIError, IntegrityError):
flash('Something went terribly wrong.')
return render_template('stocks/stocks.html', form=form)
return redirect(url_for('.search_form'))
companies = Company.query.filter_by(user_id=session['user_id']).all()
return render_template('./stocks/stocks.html', companies=companies, form=form), 200
| 16,952
|
def svn_client_get_simple_provider(*args):
"""svn_client_get_simple_provider(svn_auth_provider_object_t provider, apr_pool_t pool)"""
return apply(_client.svn_client_get_simple_provider, args)
| 16,953
|
def svn_client_invoke_get_commit_log(*args):
"""
svn_client_invoke_get_commit_log(svn_client_get_commit_log_t _obj, char log_msg, char tmp_file,
apr_array_header_t commit_items,
void baton, apr_pool_t pool) -> svn_error_t
"""
return apply(_client.svn_client_invoke_get_commit_log, args)
| 16,954
|
def enable_ph(module, blade):
"""Enable Phone Hone"""
changed = True
if not module.check_mode:
ph_settings = Support(phonehome_enabled=True)
try:
blade.support.update_support(support=ph_settings)
except Exception:
module.fail_json(msg='Enabling Phone Home failed')
module.exit_json(changed=changed)
| 16,955
|
def find_valid_nodes(node_ids, tree_1, tree_2):
"""
Recursive function for finding a subtree in the second tree
with the same output type of a random subtree in the first tree
Args:
node_ids: List of node ids to search
tree_1: Node containing full tree
tree_2: Node containing full tree
Returns:
Random subtree of the first tree AND a valid node id of the second tree
The output_type of the subtree will match the output_type
of the valid node of the second tree
"""
# Randomly choose a node in the first tree
node_id = random.choice(node_ids)
# Get output_type of the random node in first tree
output_type = tree_1.get_id_outputs()[node_id]
# Find nodes with the same output_type in the second tree
valid_node_ids = []
for n in tree_2.get_id_outputs():
if tree_2.get_id_outputs()[n] == output_type:
valid_node_ids.append(n)
if len(valid_node_ids) == 0:
# Rerun function without invalid output_type
return find_valid_nodes([i for i in node_ids if tree_1.get_id_outputs()[i] != output_type], tree_1, tree_2)
# Take off root id
node_id = node_id[1:]
# Get subtree object from tree_1
subtree_1 = find_subtree(tree_1, node_id)
# Randomly choose a node in the second
valid_node_id = random.choice(valid_node_ids)
# Take off root id
valid_node_id = valid_node_id[1:]
# Get subtree object from tree_2
subtree_2 = find_subtree(tree_2, valid_node_id)
return subtree_1, valid_node_id, subtree_2, node_id
| 16,956
|
def pytorch_local_average(n, local_lookup, local_tensors):
"""Average the neighborhood tensors.
Parameters
----------
n : {int}
Size of tensor
local_lookup : {dict: int->float}
A dictionary from rank of neighborhood to the weight between two processes
local_tensors : {dict: int->tensor}
A dictionary from rank to tensors to be aggregated.
Returns
-------
tensor
An averaged tensor
"""
averaged = torch.DoubleTensor(np.zeros(n))
for node_id, node_weight in local_lookup.items():
averaged += node_weight * local_tensors[node_id]
return averaged
| 16,957
|
def get_rr_Ux(N, Fmat, psd, x):
"""
Given a rank-reduced decomposition of the Cholesky factor L, calculate L^{T}x
where x is some vector. This way, we don't have to built L, which saves
memory and computational time.
@param N: Vector with the elements of the diagonal matrix N
@param Fmat: (n x m) matrix consisting of the reduced rank basis
@param psd: PSD of the rank-reduced approximation
@param x: Vector we want to process as Lx
@return Ux
"""
n = N.shape[0]
m = Fmat.shape[1]
r = np.zeros(n)
t = np.zeros(m)
Z, B, D = get_rr_cholesky_rep(N, Fmat, psd)
BD = (B.T * np.sqrt(D)).T
for ii in range(n-1, -1, -1):
r[ii] = x[ii]*np.sqrt(D[ii]) + np.dot(BD[ii,:].T, t)
t += x[ii] * Z[ii,:]
return r
| 16,958
|
def get_poll_options(message: str) -> list:
"""
Turns string into a list of poll options
:param message:
:return:
"""
parts = message.split(CREATE_POLL_EVENT_PATTERN)
if len(parts) > 1:
votes = parts[-1].split(",")
if len(votes) == 1 and votes[0] == ' ':
return []
else:
return votes
return []
| 16,959
|
def test_cli_run_basic_ae(cli_args):
"""Test running CLI for an example with default params."""
from pl_bolts.models.autoencoders.basic_ae.basic_ae_module import cli_main
cli_args = cli_args.split(' ') if cli_args else []
with mock.patch("argparse._sys.argv", ["any.py"] + cli_args):
cli_main()
| 16,960
|
def get_avg_percent_bonds(bond_list, num_opts, adj_lists, num_trials, break_co_bonds=False):
"""
Given adj_list for a set of options, with repeats for each option, find the avg and std dev of percent of each
bond type
:param bond_list: list of strings representing each bond type
:param num_opts: number of options specified (should be length of adj_lists)
:param adj_lists: list of lists of adjs: outer is for each option, inner is for each repeat
:param num_trials: number of repeats (should be length of inner adj_lists list)
:param break_co_bonds: Boolean, to determine whether determine oligomers and remaining bonds after removing C-O
bonds to simulate RCF
:return: avg_bonds, std_bonds: list of floats, list of floats: for each option tested, the average and std dev
of bond distributions (percentages)
"""
analysis = []
for i in range(num_opts):
cur_adjs = adj_lists[i]
analysis.append([analyze_adj_matrix(cur_adjs[j], break_co_bonds=break_co_bonds) for j in range(num_trials)])
bond_percents = {}
avg_bonds = {}
std_bonds = {}
for bond_type in bond_list:
bond_percents[bond_type] = [[analysis[j][i][BONDS][bond_type]/sum(analysis[j][i][BONDS].values())
for i in range(num_trials)] for j in range(num_opts)]
avg_bonds[bond_type] = [np.mean(bond_pcts) for bond_pcts in bond_percents[bond_type]]
std_bonds[bond_type] = [np.sqrt(np.var(bond_pcts)) for bond_pcts in bond_percents[bond_type]]
return avg_bonds, std_bonds
| 16,961
|
def current_fig_image():
"""Takes current figure of matplotlib and returns it as a PIL image.
Also clears the current plot"""
plt.axis('off')
fig = plt.gcf()
buff = StringIO.StringIO()
fig.savefig(buff)
buff.seek(0)
img = Image.open(buff).convert('RGB')
plt.clf()
return img
| 16,962
|
def test_oneof_nested_oneof_messages_are_serialized_with_defaults():
"""
Nested messages with oneofs should also be handled
"""
message = Test(
wrapped_nested_message_value=NestedMessage(
id=0, wrapped_message_value=Message(value=0)
)
)
assert (
betterproto.which_one_of(message, "value_type")
== betterproto.which_one_of(Test().from_json(message.to_json()), "value_type")
== (
"wrapped_nested_message_value",
NestedMessage(id=0, wrapped_message_value=Message(value=0)),
)
)
| 16,963
|
def get_bgp_peer(
api_client, endpoint_id, bgp_peer_id, verbose=False, **kwargs
): # noqa: E501
"""Get eBGP peer # noqa: E501
Get eBGP peer details # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> response = api.get_bgp_peer(client, endpoint_id, bgp_peer_id, async_req=True)
:param int endpoint_id: ID for IPsec endpoint (required)
:param int bgp_peer_id: ID for BGP peer (required)
:param async_req bool: execute request asynchronously
:param bool verbose: True for verbose output
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: APIResponse or awaitable if async
"""
local_var_params = locals()
request_params = ["verbose"] # noqa: E501
collection_formats = {}
query_params = []
for param in [p for p in request_params if local_var_params.get(p) is not None]:
query_params.append((param, local_var_params[param])) # noqa: E501
path_params = {"endpoint_id": endpoint_id, "bgp_peer_id": bgp_peer_id}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = api_client.select_header_accept(
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiTokenAuth", "basicAuth"] # noqa: E501
return api_client.call_api(
"/ipsec/endpoints/{endpoint_id}/ebgp_peers/{bgp_peer_id}",
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="object", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
| 16,964
|
def rewrite_elife_funding_awards(json_content, doi):
""" rewrite elife funding awards """
# remove a funding award
if doi == "10.7554/eLife.00801":
for i, award in enumerate(json_content):
if "id" in award and award["id"] == "par-2":
del json_content[i]
# add funding award recipient
if doi == "10.7554/eLife.04250":
recipients_for_04250 = [
{
"type": "person",
"name": {"preferred": "Eric Jonas", "index": "Jonas, Eric"},
}
]
for i, award in enumerate(json_content):
if "id" in award and award["id"] in ["par-2", "par-3", "par-4"]:
if "recipients" not in award:
json_content[i]["recipients"] = recipients_for_04250
# add funding award recipient
if doi == "10.7554/eLife.06412":
recipients_for_06412 = [
{
"type": "person",
"name": {"preferred": "Adam J Granger", "index": "Granger, Adam J"},
}
]
for i, award in enumerate(json_content):
if "id" in award and award["id"] == "par-1":
if "recipients" not in award:
json_content[i]["recipients"] = recipients_for_06412
return json_content
| 16,965
|
def remove_subnet_from_router(router_id, subnet_id):
"""Remove a subnet from the router.
Args:
router_id (str): The router ID.
subnet_id (str): The subnet ID.
"""
return neutron().remove_interface_router(router_id, {
'subnet_id': subnet_id
})
| 16,966
|
def direct_to_template(request, template):
"""Generic template direction view."""
return render_to_response(template, {}, request)
| 16,967
|
def validate_metadata(template, size):
"""Validates metadata.
Catch any errors or invalid input that would cause a template with incorrect
information being created and propagated down to instances.
Args:
template: Creation parameters.
size: Size parameters to use.
Returns:
Nothing. The function returns normally if everything is correct. Raises an
exception otherwise.
Raises:
OrchestrateTemplateCreationError: If any of the metadata is invalid.
"""
print('Validating metadata for template {name} size {size_name}'.format(
name=template.name, size_name=size.name))
# (b/148229648) Does gpu_type exist?
if size.gpu_type:
response = compute.acceleratorTypes().list(
project=template.project,
zone=template.zone,
).execute()
gpu_types = [gpu['name'] for gpu in response['items']]
if size.gpu_type not in gpu_types:
message = (
'{gpu_type} is not a valid GPU type or is not available in project'
' {project} zone {zone}. Available options are: {gpu_types}'
).format(
project=template.image_project,
zone=template.zone,
gpu_type=size.gpu_type,
gpu_types=', '.join(gpu_types),
)
raise OrchestrateTemplateCreationError(message)
| 16,968
|
def now():
"""Return the current time as date object."""
return datetime.now()
| 16,969
|
def is_valid_node_name(name):
"""
Determine if a name is valid for a node.
A node name:
- Cannot be empty
- Cannot start with a number
- Cannot match any blacklisted pattern
:param str name: The name to check.
:return: True if the name is valid. False otherwise.
:rtype: bool
"""
return name and name not in BLACKLISTED_NODE_NAMES
| 16,970
|
def trajCalc(setup):
""" Creates trajectory between point A and the ground (B) based off of the initial position and the angle of travel
Arguments:
setup: [Object] ini file parameters
Returns:
A [list] lat/lon/elev of the tail of the trajectory
B [list] lat/lon/elev of the head of the trajectory
"""
B = np.array([0, 0, 0])
# convert angles to radians
ze = np.radians(setup.zangle)
az = np.radians(setup.azim)
# Create trajectory vector
traj = np.array([np.sin(az)*np.sin(ze), np.cos(az)*np.sin(ze), -np.cos(ze)])
# backwards propegate the trajectory until it reaches 100000 m up
n = 85920/traj[2]
# B is the intersection between the trajectory vector and the ground
A = n*traj
# Convert back to geo coordinates
B = np.array(loc2Geo(setup.lat_centre, setup.lon_centre, 0, B))
A = np.array(loc2Geo(setup.lat_centre, setup.lon_centre, 0, A))
# print("Created Trajectory between A and B:")
# print(" A = {:10.4f}N {:10.4f}E {:10.2f}m".format(A[0], A[1], A[2]))
# print(" B = {:10.4f}N {:10.4f}E {:10.2f}m".format(B[0], B[1], B[2]))
A[2] /= 1000
B[2] /= 1000
setup.lat_i = A[0]
setup.lon_i = A[1]
setup.elev_i = A[2]
return A, B
| 16,971
|
async def health() -> Dict[str, str]:
"""Health check function
:return: Health check dict
:rtype: Dict[str: str]
"""
health_response = schemas.Health(name=settings.PROJECT_NAME,
api_version=__version__)
return health_response.dict()
| 16,972
|
def projection_v3(v, w):
"""Return the signed length of the projection of vector v on vector w.
For the full vector result, use projection_as_vec_v3().
Since the resulting vector is along the 1st vector, you can get the
full vector result by scaling the 1st vector to the length of the result of
this function.
"""
return dot_v3(v, w) / w.length()
| 16,973
|
def custom_djsettings(settings):
"""Custom django settings to avoid warnings in stdout"""
settings.TEMPLATE_DEBUG = False
settings.DEBUG = False
return settings
| 16,974
|
def test_create_pull_requests_no_labels(mock_inquirer_prompt: MockerFixture) -> None:
"""It returns pull request."""
mock_inquirer_prompt.return_value = {
"title": "my title",
"body": "my body",
"labels": "",
"confirmation": True,
"issues_title_query": "issue title",
"inherit_labels": True,
"head": "main",
"base": "branch",
"draft": False,
"correct": True,
}
result = p.InquirerPrompter.create_pull_requests([REPO])
expected_pr = pr.PullRequest(
"my title",
"my body",
set(),
True,
"issue title",
True,
"main",
"branch",
False,
)
assert result == expected_pr
| 16,975
|
def bleu(pred_seq, label_seq, k):
"""计算BLEU"""
pred_tokens, label_tokens = pred_seq.split(' '), label_seq.split(' ')
len_pred, len_label = len(pred_tokens), len(label_tokens)
score = math.exp(min(0, 1 - len_label / len_pred))
for n in range(1, k + 1):
num_matches, label_subs = 0, collections.defaultdict(int)
for i in range(len_label - n + 1):
label_subs[''.join(label_tokens[i: i + n])] += 1
for i in range(len_pred - n + 1):
if label_subs[''.join(pred_tokens[i: i + n])] > 0:
num_matches += 1
label_subs[''.join(pred_tokens[i: i + n])] -= 1
score *= math.pow(num_matches / (len_pred - n + 1), math.pow(0.5, n))
return score
| 16,976
|
def generate_wetbulb_temps(year, directory):
"""Generate puma level hourly time series of wetbulb temperatures for all pumas within a state
:param int year: year of desired dark fractions
:param str directory: path to local root directory for weather data
:export: (*csv*) -- statewide hourly wetbulb temperatures for every puma
"""
# Create folder to store dark_frac output if it doesn"t yet exist
os.makedirs(os.path.join(directory, "pumas", "temps_wetbulb"), exist_ok=True)
for state in const.state_list:
temps = pd.read_csv(
f"https://besciences.blob.core.windows.net/datasets/bldg_el/pumas/temps/temps_pumas_{state}_{year}.csv"
)
dwpts = pd.read_csv(
f"https://besciences.blob.core.windows.net/datasets/bldg_el/pumas/dewpoints/dewpts_pumas_{state}_{year}.csv"
)
press = pd.read_csv(
f"https://besciences.blob.core.windows.net/datasets/bldg_el/pumas/press/press_pumas_{state}_{year}.csv"
)
temps_wetbulb = temps.apply(lambda x: t_to_twb(x, dwpts[x.name], press[x.name]))
temps_wetbulb.to_csv(
os.path.join(
directory,
"pumas",
"temps_wetbulb",
f"temps_wetbulb_pumas_{state}_{year}.csv",
),
index=False,
)
| 16,977
|
def report_by_name(http_request, agent_name):
"""
A version of report that can look up an agent by its name. This will
generally be slower but it also doesn't expose how the data is stored and
might be easier in some cases.
"""
agent = get_list_or_404(Agent, name=agent_name)[0]
return report(http_request, agent.id)
| 16,978
|
def create_application(global_config=None, **local_conf):
"""
Create a configured instance of the WSGI application.
"""
sites, types = load_config(local_conf.get("config"))
return ImageProxy(sites, types)
| 16,979
|
def dimensionState(moons,dimension):
"""returns the state for the given dimension"""
result = list()
for moon in moons:
result.append((moon.position[dimension],moon.velocity[dimension]))
return result
| 16,980
|
def test_get_local_coordinate_system_no_time_dep(
system_name, reference_name, exp_orientation, exp_coordinates
):
"""Test the ``get_cs`` function without time dependencies.
Have a look into the tests setup section to see which coordinate systems are
defined in the CSM.
Parameters
----------
system_name : str
Name of the system that should be returned
reference_name : str
Name of the reference system
exp_orientation : List or numpy.ndarray
The expected orientation of the returned system
exp_coordinates
The expected coordinates of the returned system
"""
# setup
csm = tf.CoordinateSystemManager(root_coordinate_system_name="root")
csm.create_cs("lcs_1", "root", r_mat_z(0.5), [1, 2, 3])
csm.create_cs("lcs_2", "root", r_mat_y(0.5), [3, -3, 1])
csm.create_cs("lcs_3", "lcs_2", r_mat_x(0.5), [1, -1, 3])
check_coordinate_system(
csm.get_cs(system_name, reference_name),
exp_orientation,
exp_coordinates,
True,
)
| 16,981
|
def login():
"""Log in current user."""
user = get_user()
if user.system_wide_role != 'No Access':
flask_login.login_user(user)
return flask.redirect(common.get_next_url(
flask.request, default_url=flask.url_for('dashboard')))
flask.flash(u'You do not have access. Please contact your administrator.',
'alert alert-info')
return flask.redirect('/')
| 16,982
|
def test_load_module_recursive_v2_module_depends_on_v1(
local_module_package_index: str, snippetcompiler, preload_v1_module: bool
) -> None:
"""
A V2 module cannot depend on a V1 module. This test case ensure that the load_module_recursive() method
raises an error when a dependency of a V2 module is only available as a V1 module.
Dependency graph: v2_depends_on_v1 (V2) ---> mod1 (V1)
"""
project = snippetcompiler.setup_for_snippet(
snippet="import v2_depends_on_v1",
python_package_sources=[local_module_package_index],
python_requires=[Requirement.parse("inmanta-module-v2-depends-on-v1")],
install_project=False,
)
if preload_v1_module:
project.get_module("mod1", allow_v1=True)
assert ("mod1" in project.modules) == preload_v1_module
with pytest.raises(ModuleLoadingException, match="Failed to load module mod1"):
project.load_module_recursive(install=True)
| 16,983
|
def init_module():
"""
Initialize user's module handler.
:return: wrapper handler.
"""
original_module, module_path, handler_name = import_original_module()
try:
handler = original_module
for name in module_path.split('.')[1:] + [handler_name]:
handler = getattr(handler, name)
return handler
except AttributeError:
raise AttributeError(
'No handler {} in module {}'.format(handler_name, module_path)
)
| 16,984
|
def daterange(start_date = None, end_date = None):
"""
Loops over date range
"""
if not start_date:
start_date = datetime.datetime(day=1, month=1,year=1950)
if not end_date:
end_date = datetime.datetime.now()
cursor_date = start_date
while cursor_date < end_date:
yield cursor_date
cursor_date += datetime.timedelta(days=1)
| 16,985
|
def process(lines: List[str]) -> str:
""" Preprocess a Fortran source file.
Args:
inputLines The input Fortran file.
Returns:
Preprocessed lines of Fortran.
"""
# remove lines that are entirely comments and partial-line comments
lines = [
rm_trailing_comment(line)
for line in lines
if not line_is_comment(line)
]
# merge continuation lines
chg = True
while chg:
chg = False
i = 0
while i < len(lines):
line = lines[i]
llstr = line.lstrip()
if len(llstr) > 0 and llstr[0] == "&": # continuation character
prevline = lines[i - 1]
line = llstr[1:].lstrip()
prevline = prevline.rstrip() + line
lines[i - 1] = prevline
lines.pop(i)
chg = True
i += 1
return "".join(lines)
| 16,986
|
def range_atk_params():
"""Range calculations with attack parameters."""
dsc = DamageStatCalc()
attacker = POKEMON_DATA["spinda"]
defender = POKEMON_DATA["spinda"]
move = generate_move(MOVE_DATA["tackle"])
params = {}
params["atk"] = {}
params["atk"]["max_evs"] = True
params["atk"]["positive_nature"] = True
params["def"] = {}
params["hp"] = {}
dmg_range = dsc.calculate_range(move, attacker, defender, params)
assert dmg_range[0] == 25
assert dmg_range[1] == 30
attacker = POKEMON_DATA["exploud"]
dmg_range = dsc.calculate_range(move, attacker, defender, params)
assert dmg_range[0] == 32
assert dmg_range[1] == 39
params["atk"]["positive_nature"] = False
defender = POKEMON_DATA["floatzel"]
dmg_range = dsc.calculate_range(move, attacker, defender, params)
assert dmg_range[0] == 26
assert dmg_range[1] == 32
| 16,987
|
def _summary(function):
"""
Derive summary information from a function's docstring or name. The summary is the first
sentence of the docstring, ending in a period, or if no dostring is present, the
function's name capitalized.
"""
if not function.__doc__:
return f"{function.__name__.capitalize()}."
result = []
for word in function.__doc__.split():
result.append(word)
if word.endswith("."):
break
return " ".join(result)
| 16,988
|
def findrun(base,dim,boxsize):
""" find all files associated with run given base directory
and the resolution size and box length """
if not os.path.isdir(base):
print base, 'is not a valid directory'
sys.exit(1)
#retreive all files that match tag and box size
#note this will include the initialisation boxes, which
#are independent of redshift
searchstr='_'+str(dim)+'_'+str(boxsize)+'Mpc'
filenames=os.listdir(base)
box_files=[]
for filename in filenames:
if filename.find(searchstr)>=0:
box_files.append(os.path.join(base,filename))
return box_files
| 16,989
|
def sf_rhino_inputs(hull_lis, ten_lines_dic):
"""
create hull_dic to save as rhino input
hull_dic = {hull_ind = [seg1[[pt1][pt2]], seg2[[][]], ...], ...}
saves lines_dic
"""
hull_dic={ind: [] for ind in range(len(hull_lis))}
for ind, hull in enumerate(hull_lis):
for simplex in hull.simplices:
hull_dic[ind].append(hull.points[simplex].tolist())
with open(os.path.join(BASEDIR, 'hull_dic.p'), 'wb') as fp:
pickle.dump(hull_dic, fp, protocol=2)
with open(os.path.join(BASEDIR, 'ten_lines_dic.p'), 'wb') as fp:
pickle.dump(ten_lines_dic, fp, protocol=2)
| 16,990
|
def demand_mass_balance_c(host_odemand, class_odemand, avail, host_recapture):
"""Solve Demand Mass Balance equation for class-level
Parameters
----------
host_odemand: int
Observerd host demand
class_odemand: int
Observed class demand
avail: dict
Availability of demand open during period considered
host_recapture: float
Estimated host level recapture
Returns
-------
tuple
Estimated demand, spill and recapture
"""
# if observed demand of a class is 0 demand mass balance can't
# estimate demand and spill alone without additioanl information
demand = spill = recapture = 0
if class_odemand:
recapture = host_recapture * class_odemand / host_odemand
# availability of demand closed during period considered
k = 1 - avail
A = np.array([[1, -1], [-k, 1]])
B = np.array([class_odemand - recapture, 0])
demand, spill = solve(A, B)
return demand, spill, recapture
| 16,991
|
def test_fetch_methods(mocker: MockerFixture) -> None:
"""
Test ``fetchone``, ``fetchmany``, ``fetchall``.
"""
requests = mocker.patch("datajunction.sql.dbapi.cursor.requests")
requests.post().headers.get.return_value = "application/json"
requests.post().json.return_value = {
"database_id": 1,
"catalog": None,
"schema_": None,
"id": "3d33ceae-3484-45b6-807f-7c7cea3f6577",
"submitted_query": "SELECT 1",
"executed_query": "SELECT 1",
"scheduled": "2022-04-08T18:24:06.395989",
"started": "2022-04-08T18:24:06.396026",
"finished": "2022-04-08T18:24:06.396882",
"state": "FINISHED",
"progress": 1.0,
"results": [
{
"sql": "SELECT COUNT(*) AS A FROM B GROUP BY B.group",
"columns": [{"name": "A", "type": "INT"}],
"rows": [[1], [2], [3]],
"row_count": 3,
},
],
"next": None,
"previous": None,
"errors": [],
}
url = URL("http://localhost:8000/")
cursor = Cursor(url)
cursor.execute("SELECT A FROM metrics GROUP BY B.group")
assert cursor.fetchone() == (1,)
assert cursor.fetchone() == (2,)
assert cursor.fetchone() == (3,)
assert cursor.fetchone() is None
cursor.execute("SELECT A FROM metrics GROUP BY B.group")
assert cursor.fetchmany(2) == [(1,), (2,)]
assert cursor.fetchmany(2) == [(3,)]
cursor.execute("SELECT A FROM metrics GROUP BY B.group")
assert cursor.fetchall() == [(1,), (2,), (3,)]
| 16,992
|
def test_isConnected(web3):
"""
Web3.isConnected() returns True when connected to a node.
"""
assert web3.isConnected() is True
| 16,993
|
def record_reaction(reaction):
""" reaction is -1 for cold , 0 for ok, 1 for hot """
ts = datetime.now().isoformat()
th = temphumids.get_current_temphumid()
ot = outside_weather.get_recent_temp()
with open(out_file, 'a') as f:
w = csv.writer(f)
w.writerow([ts, th['temperature'], th['humidity'], ot, reaction])
| 16,994
|
def __iadd__(*args, **kwargs): # real signature unknown
""" a = iadd(a, b) -- Same as a += b. """
pass
| 16,995
|
def serve_buffer(
data: bytes,
offered_filename: str = None,
content_type: str = None,
as_attachment: bool = True,
as_inline: bool = False,
default_content_type: Optional[str] = MimeType.FORCE_DOWNLOAD) \
-> HttpResponse:
"""
Serve up binary data from a buffer.
Options as for :func:`serve_file`.
"""
response = HttpResponse(data)
add_http_headers_for_attachment(
response,
offered_filename=offered_filename,
content_type=content_type,
as_attachment=as_attachment,
as_inline=as_inline,
content_length=len(data),
default_content_type=default_content_type)
return response
| 16,996
|
def get_single_image_results(gt_boxes, pred_boxes, iou_thr):
"""Calculates number of true_pos, false_pos, false_neg from single batch of boxes.
Args:
gt_boxes (list of list of floats): list of locations of ground truth
objects as [xmin, ymin, xmax, ymax]
pred_boxes (dict): dict of dicts of 'boxes' (formatted like `gt_boxes`)
and 'scores'
iou_thr (float): value of IoU to consider as threshold for a
true prediction.
Returns:
dict: true positives (int), false positives (int), false negatives (int)
"""
all_pred_indices = range(len(pred_boxes))
all_gt_indices = range(len(gt_boxes))
if len(all_pred_indices) == 0:
tp = 0
fp = 0
fn = len(gt_boxes)
return {'true_pos': tp, 'false_pos': fp, 'false_neg': fn}
if len(all_gt_indices) == 0:
tp = 0
fp = len(pred_boxes)
fn = 0
return {'true_pos': tp, 'false_pos': fp, 'false_neg': fn}
gt_idx_thr = []
pred_idx_thr = []
ious = []
for ipb, pred_box in enumerate(pred_boxes):
for igb, gt_box in enumerate(gt_boxes):
iou = calc_iou_individual(pred_box, gt_box)
if iou > iou_thr:
gt_idx_thr.append(igb)
pred_idx_thr.append(ipb)
ious.append(iou)
args_desc = np.argsort(ious)[::-1]
if len(args_desc) == 0:
# No matches
tp = 0
fp = len(pred_boxes)
fn = len(gt_boxes)
else:
gt_match_idx = []
pred_match_idx = []
for idx in args_desc:
gt_idx = gt_idx_thr[idx]
pr_idx = pred_idx_thr[idx]
# If the boxes are unmatched, add them to matches
if (gt_idx not in gt_match_idx) and (pr_idx not in pred_match_idx):
gt_match_idx.append(gt_idx)
pred_match_idx.append(pr_idx)
tp = len(gt_match_idx)
fp = len(pred_boxes) - len(pred_match_idx)
fn = len(gt_boxes) - len(gt_match_idx)
return {'true_pos': tp, 'false_pos': fp, 'false_neg': fn}
| 16,997
|
def partition(arr, left, right):
"""[summary]
The point of a pivot value is to select a value,
find out where it belongs in the array while moving everything lower than that value
to the left, and everything higher to the right.
Args:
arr ([array]): [Unorderd array]
left ([int]): [Left index of the array]
right ([int]): [Right index of the array]
Returns:
[int]: [the value of the lowest element]
"""
pivot = arr[right]
low = left - 1
for current in range(left, right):
if arr[current] <= pivot:
low += 1
swap(arr, current, low)
swap(arr, right, low + 1)
return low + 1
| 16,998
|
def graphatbottleneck(g,m,shallfp=True):
"""handles the bottleneck transformations for a pure graph ae, return g, compressed, new input, shallfp=True=>convert vector in matrix (with gfromparam), can use redense to add a couple dense layers around the bottleneck (defined by m.redense*)"""
comp=ggoparam(gs=g.s.gs,param=g.s.param)([g.X])
if m.shallredense:
for e in m.redenseladder:
comp=Dense(e,activation=m.redenseactivation,kernel_initializer=m.redenseinit)(comp)
inn2=Input(m.redenseladder[-1])
use=inn2
for i in range(len(m.redenseladder)-1,-1,-1):
use=Dense(m.redenseladder[i],activation=m.redenseactivation,kernel_initializer=m.redenseinit)(use)
use=Dense(g.s.gs*g.s.param,activation=m.redenseactivation,kernel_initializer=m.redenseinit)(use)
else:
inn2=Input(g.s.gs*g.s.param)
use=inn2
if shallfp:
taef1=gfromparam(gs=g.s.gs,param=g.s.param)([use])
else:
taef1=inn2
g.X=taef1
g.A=None
return g,comp,inn2
| 16,999
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.