content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def test_get_train_test_val(capsys):
"""Tests imbDRL.data.get_train_test_val."""
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 0, 0, 0])
with pytest.raises(ValueError) as exc:
data.get_train_test_val(X, y, X, y, 0.2, [0], [1, 2], val_frac=0.0)
assert "not in interval" in str(exc.value)
with pytest.raises(ValueError) as exc:
data.get_train_test_val(X, y, X, y, 0.2, [0], [1, 2], val_frac=1)
assert "not in interval" in str(exc.value)
with pytest.raises(TypeError) as exc:
data.get_train_test_val(X, y, X, y, 0.2, [0], [1, 2], print_stats=1234)
assert "must be of type" in str(exc.value)
X_train, y_train, X_test, y_test, X_val, y_val = data.get_train_test_val(X, y, X, y, [1], [0], imb_ratio=0.25, print_stats=False)
assert X_train.shape == (2, 2)
assert X_test.shape == (3, 2)
assert X_val.shape == (1, 2)
assert y_train.shape == (2, )
assert y_test.shape == (3, )
assert y_val.shape == (1, )
data.get_train_test_val(X, y, X, y, [1], [0], imb_ratio=0.25, print_stats=True) # Check if printing
captured = capsys.readouterr()
assert captured.out == ("Imbalance ratio `p`:\n"
"\ttrain: n=0, p=0.000000\n"
"\ttest: n=0, p=0.000000\n"
"\tvalidation: n=0, p=0.000000\n")
data.get_train_test_val(X, y, X, y, [1], [0], imb_ratio=0.25, print_stats=False) # Check if not printing
captured = capsys.readouterr()
assert captured.out == "" | 28,600 |
def readExampleInfos(fpath, examples):
""" Add the example infos for the file fpath to the
global dictionary examples.
"""
# Create doctree
t = SConsDoc.SConsDocTree()
t.parseXmlFile(fpath)
# Parse scons_examples
for e in stf.findAll(t.root, "scons_example", SConsDoc.dbxid,
t.xpath_context, t.nsmap):
n = ''
if stf.hasAttribute(e, 'name'):
n = stf.getAttribute(e, 'name')
if n and n not in examples:
i = ExampleInfo()
i.name = n
examples[n] = i
# Parse file and directory entries
for f in stf.findAll(e, "file", SConsDoc.dbxid,
t.xpath_context, t.nsmap):
fi = ExampleFile()
if stf.hasAttribute(f, 'name'):
fi.name = stf.getAttribute(f, 'name')
if stf.hasAttribute(f, 'chmod'):
fi.chmod = stf.getAttribute(f, 'chmod')
fi.content = stf.getText(f)
examples[n].files.append(fi)
for d in stf.findAll(e, "directory", SConsDoc.dbxid,
t.xpath_context, t.nsmap):
di = ExampleFolder()
if stf.hasAttribute(d, 'name'):
di.name = stf.getAttribute(d, 'name')
if stf.hasAttribute(d, 'chmod'):
di.chmod = stf.getAttribute(d, 'chmod')
examples[n].folders.append(di)
# Parse scons_example_files
for f in stf.findAll(t.root, "scons_example_file", SConsDoc.dbxid,
t.xpath_context, t.nsmap):
if stf.hasAttribute(f, 'example'):
e = stf.getAttribute(f, 'example')
else:
continue
fi = ExampleFile(FT_FILEREF)
if stf.hasAttribute(f, 'name'):
fi.name = stf.getAttribute(f, 'name')
if stf.hasAttribute(f, 'chmod'):
fi.chmod = stf.getAttribute(f, 'chmod')
fi.content = stf.getText(f)
examples[e].files.append(fi)
# Parse scons_output
for o in stf.findAll(t.root, "scons_output", SConsDoc.dbxid,
t.xpath_context, t.nsmap):
if stf.hasAttribute(o, 'example'):
n = stf.getAttribute(o, 'example')
else:
continue
eout = ExampleOutput()
if stf.hasAttribute(o, 'name'):
eout.name = stf.getAttribute(o, 'name')
if stf.hasAttribute(o, 'tools'):
eout.tools = stf.getAttribute(o, 'tools')
if stf.hasAttribute(o, 'os'):
eout.os = stf.getAttribute(o, 'os')
if stf.hasAttribute(o, 'suffix'):
eout.suffix = stf.getAttribute(o, 'suffix')
for c in stf.findAll(o, "scons_output_command", SConsDoc.dbxid,
t.xpath_context, t.nsmap):
oc = ExampleCommand()
if stf.hasAttribute(c, 'edit'):
oc.edit = stf.getAttribute(c, 'edit')
if stf.hasAttribute(c, 'environment'):
oc.environment = stf.getAttribute(c, 'environment')
if stf.hasAttribute(c, 'output'):
oc.output = stf.getAttribute(c, 'output')
if stf.hasAttribute(c, 'cmd'):
oc.cmd = stf.getAttribute(c, 'cmd')
else:
oc.cmd = stf.getText(c)
eout.commands.append(oc)
examples[n].outputs.append(eout) | 28,601 |
def keep_row(row):
"""
:param row: a list for the row in the data
:return: True if we should keep row; False if we should discard row
"""
if row[_INDICES["Actor1CountryCode"]] in _COUNTRIES_OF_INTEREST or \
row[_INDICES["Actor2CountryCode"]] in _COUNTRIES_OF_INTEREST:
return True
return False | 28,602 |
def test_estimator_results():
"""
creating some planes pointing in different directions (two
north-south, two east-west) and that have a slight position errors (+-
0.1 m in one of the four cardinal directions """
horizon_frame = AltAz()
p1 = SkyCoord(alt=43 * u.deg, az=45 * u.deg, frame=horizon_frame)
p2 = SkyCoord(alt=47 * u.deg, az=45 * u.deg, frame=horizon_frame)
circle1 = HillasPlane(p1=p1, p2=p2, telescope_position=[0, 1, 0] * u.m)
p1 = SkyCoord(alt=44 * u.deg, az=90 * u.deg, frame=horizon_frame)
p2 = SkyCoord(alt=46 * u.deg, az=90 * u.deg, frame=horizon_frame)
circle2 = HillasPlane(p1=p1, p2=p2, telescope_position=[1, 0, 0] * u.m)
p1 = SkyCoord(alt=44.5 * u.deg, az=45 * u.deg, frame=horizon_frame)
p2 = SkyCoord(alt=46.5 * u.deg, az=45 * u.deg, frame=horizon_frame)
circle3 = HillasPlane(p1=p1, p2=p2, telescope_position=[0, -1, 0] * u.m)
p1 = SkyCoord(alt=43.5 * u.deg, az=90 * u.deg, frame=horizon_frame)
p2 = SkyCoord(alt=45.5 * u.deg, az=90 * u.deg, frame=horizon_frame)
circle4 = HillasPlane(p1=p1, p2=p2, telescope_position=[-1, 0, 0] * u.m)
# creating the fit class and setting the the great circle member
fit = HillasReconstructor()
fit.hillas_planes = {1: circle1, 2: circle2, 3: circle3, 4: circle4}
# performing the direction fit with the minimisation algorithm
# and a seed that is perpendicular to the up direction
dir_fit_minimise, _ = fit.estimate_direction()
print("direction fit test minimise:", dir_fit_minimise)
print() | 28,603 |
def _grad_shapelets(X, y, n_classes, weights, shapelets, lengths, alpha,
penalty, C, fit_intercept, intercept_scaling,
sample_weight):
"""Compute the gradient of the loss with regards to the shapelets."""
n_samples, n_timestamps = X.shape
# Derive distances between shapelets and time series
distances = _derive_all_squared_distances(
X, n_samples, n_timestamps, shapelets, lengths, alpha)
distances = np.asarray(distances).T
# Add intercept
if fit_intercept:
distances = np.c_[np.ones(n_samples) * intercept_scaling, distances]
weight_idx = 1
else:
weight_idx = 0
# Derive probabilities and cross-entropy loss
if weights.ndim == 1:
proba = _expit(distances @ weights)
proba = np.clip(proba, 1e-8, 1 - 1e-8)
else:
proba = _softmax(distances @ weights, n_samples, n_classes)
proba = np.clip(proba, 1e-8, 1 - 1e-8)
# Reshape some arrays
if weights.ndim == 1:
proba_minus_y = (proba - y)[:, None]
else:
proba_minus_y = proba - y
# Compute the gradients
gradients = _compute_shapelet_grad(
X, n_samples, n_timestamps, weights, shapelets, lengths,
alpha, proba_minus_y, weight_idx, sample_weight
)
gradients = np.concatenate(gradients)
return gradients | 28,604 |
def get_tags_list(server_address: str, image_name: str) -> List[str]:
"""
Returns list of tags connected with an image with a given name
:param server_address: address of a server with docker registry
:param image_name: name of an image
:return: list of tags connected with a given image
In case of any problems during getting list of tags - it throws an error
"""
url = f"http://{server_address}/v2/{image_name}/tags/list"
result = requests.get(url)
if not result or result.status_code != HTTPStatus.OK:
err_message = Texts.TAGS_GET_ERROR_MSG
logger.exception(err_message)
raise RuntimeError(err_message)
return result.json().get("tags") | 28,605 |
def build_graph(graph_attrs, meta_data, nodes, edges):
""" Build the Graph with specific nodes and edges.
:param graph_attrs: dictionary with graph attributes
:param nodes: list of nodes where each node is tuple (node_name, type, attrs)
nodes=[
('input', 'Parameter', {}),
('weights', 'Const', {}),
('conv', 'Convolution', {}),
('output', 'Result', {})
]
:param edges: list of edges where each edge is tuple (node_out, node_in, attrs)
edges=[
('input', 'conv', {'out': 0, 'in': 0}),
('weights', 'conv', {'out': 0, 'in': 1}),
('conv', 'output', {'out': 0, 'in': 0})
]
:return: generated graph.
"""
graph = Graph()
graph.graph = graph_attrs
graph.meta_data = meta_data
for node in nodes:
create_node(graph, node[0], node[1], node[2])
for edge in edges:
out_port = edge[2].get('out', 0)
in_port = edge[2].get('in', 0)
connect_nodes_by_name(graph, edge[0], out_port, edge[1], in_port)
graph.clean_up()
return graph | 28,606 |
def test_run(s3_stubber, caplog, reset_unmatched):
"""
Test that the command updates the specified records (ignoring ones with errors).
If `reset_unmatched` is False, the existing records not in the CSV are kept untouched,
otherwise they are set to None.
"""
caplog.set_level('ERROR')
new_one_list_tier = random_obj_for_model(OneListTier)
one_list_companies = CompanyFactory.create_batch(
8,
one_list_tier=factory.LazyFunction(
lambda: random_obj_for_queryset(
OneListTier.objects.exclude(pk=new_one_list_tier.pk),
),
),
one_list_account_owner=factory.SubFactory(AdviserFactory),
)
non_one_list_companies = CompanyFactory.create_batch(
3,
one_list_tier=None,
one_list_account_owner=None,
)
for company in chain(one_list_companies, non_one_list_companies):
save_prev_fields(company, 'one_list_tier_id', 'one_list_account_owner_id')
advisers = AdviserFactory.create_batch(4)
bucket = 'test_bucket'
object_key = 'test_key'
csv_content = f"""id,one_list_tier_id,one_list_account_owner_id
00000000-0000-0000-0000-000000000000,test,test
{one_list_companies[0].pk},{one_list_companies[0].one_list_tier_id},{one_list_companies[0].one_list_account_owner_id}
{one_list_companies[1].pk},{one_list_companies[1].one_list_tier_id},{advisers[0].pk}
{one_list_companies[2].pk},{new_one_list_tier.pk},{one_list_companies[2].one_list_account_owner_id}
{one_list_companies[3].pk},null,null
{one_list_companies[4].pk},00000000-0000-0000-0000-000000000000,{advisers[1].pk}
{one_list_companies[5].pk},{new_one_list_tier.pk},00000000-0000-0000-0000-000000000000
{non_one_list_companies[0].pk},{new_one_list_tier.pk},{advisers[2].pk}
{non_one_list_companies[1].pk},00000000-0000-0000-0000-000000000000,{advisers[3].pk}
{non_one_list_companies[2].pk},{new_one_list_tier.pk},00000000-0000-0000-0000-000000000000
"""
s3_stubber.add_response(
'get_object',
{'Body': BytesIO(csv_content.encode(encoding='utf-8'))},
expected_params={
'Bucket': bucket,
'Key': object_key,
},
)
call_command('update_one_list_fields', bucket, object_key, reset_unmatched=reset_unmatched)
for company in chain(one_list_companies, non_one_list_companies):
company.refresh_from_db()
# assert exceptions
assert len(caplog.records) == 5
assert 'Company matching query does not exist' in caplog.records[0].exc_text
assert 'OneListTier matching query does not exist' in caplog.records[1].exc_text
assert 'Advisor matching query does not exist' in caplog.records[2].exc_text
assert 'OneListTier matching query does not exist' in caplog.records[3].exc_text
assert 'Advisor matching query does not exist' in caplog.records[4].exc_text
# one_list_companies[0]: nothing changed
assert_did_not_change(one_list_companies[0], 'one_list_tier_id', 'one_list_account_owner_id')
# one_list_companies[1]: only one_list_account_owner_id changed
assert_changed(one_list_companies[1], 'one_list_account_owner_id')
assert_did_not_change(one_list_companies[1], 'one_list_tier_id')
assert one_list_companies[1].one_list_account_owner == advisers[0]
# one_list_companies[2]: only one_list_tier_id changed
assert_did_not_change(one_list_companies[2], 'one_list_account_owner_id')
assert_changed(one_list_companies[2], 'one_list_tier_id')
assert one_list_companies[2].one_list_tier == new_one_list_tier
# one_list_companies[3]: all changed
assert_changed(one_list_companies[3], 'one_list_tier_id', 'one_list_account_owner_id')
assert one_list_companies[3].one_list_tier_id is None
assert one_list_companies[3].one_list_account_owner_id is None
# one_list_companies[4]: nothing changed
assert_did_not_change(one_list_companies[4], 'one_list_tier_id', 'one_list_account_owner_id')
# one_list_companies[5]: nothing changed
assert_did_not_change(one_list_companies[5], 'one_list_tier_id', 'one_list_account_owner_id')
# non_one_list_companies[0]: all changed
assert_changed(non_one_list_companies[0], 'one_list_tier_id', 'one_list_account_owner_id')
assert non_one_list_companies[0].one_list_account_owner == advisers[2]
assert non_one_list_companies[0].one_list_tier == new_one_list_tier
# non_one_list_companies[1]: nothing changed
assert_did_not_change(
non_one_list_companies[1], 'one_list_tier_id', 'one_list_account_owner_id',
)
# non_one_list_companies[2]: nothing changed
assert_did_not_change(
non_one_list_companies[2], 'one_list_tier_id', 'one_list_account_owner_id',
)
# one_list_companies[6] / [7]: if reset_unmatched == False => nothing changed else all changed
if reset_unmatched:
assert_changed(one_list_companies[6], 'one_list_tier_id', 'one_list_account_owner_id')
assert_changed(one_list_companies[7], 'one_list_tier_id', 'one_list_account_owner_id')
assert one_list_companies[6].one_list_tier is None
assert one_list_companies[6].one_list_account_owner is None
assert one_list_companies[7].one_list_tier is None
assert one_list_companies[7].one_list_account_owner is None
else:
assert_did_not_change(
one_list_companies[6], 'one_list_tier_id', 'one_list_account_owner_id',
)
assert_did_not_change(
one_list_companies[7], 'one_list_tier_id', 'one_list_account_owner_id',
) | 28,607 |
def test_get_status_api_down(mocker, connector):
"""
It should fail if the third-party api is down.
"""
mockreq = mocker.patch(f'{import_path}.requests')
mockehttperror = mockreq.get
mockehttperror.side_effect = HttpError
assert connector.get_status().status is False | 28,608 |
def cached_value(func: Callable[[], Any], path) -> Any:
"""
Tries to load data from the pickle file. If the file doesn't exist, the func() method is run and its results
are saved into the file. Then the result is returned.
"""
if exists(path):
with open(path, 'rb') as file:
result = pickle.load(file)
else:
try:
result = func()
with open(path, 'wb') as file:
pickle.dump(result, file, protocol=3)
except CachedValueException:
logger = qf_logger.getChild(__name__)
logger.error('Error while processing {}'.format(func))
return result | 28,609 |
def copy_images(root_path):
"""Copy images.
Will be done from images directory of subdirectories to images directory
in the src directory
"""
subdir_list = []
# walk through the src directory to find subdirectories named 'images'
# and copy contents to the 'images' directory in the duplicate src
# directory
for root, dirs, files in os.walk(root_path):
if 'images' in dirs:
subdir_list.append(root)
for each in subdir_list:
if each != root_path:
run_shell_cmd("cp -R "+each+"/images"+" "+root_path+"/images/") | 28,610 |
def expensehistory():
"""Show history of expenses or let the user update existing expense"""
# User reached route via GET
if request.method == "GET":
# Get all of the users expense history ordered by submission time
history = tendie_expenses.getHistory(session["user_id"])
# Get the users spend categories
categories = tendie_categories.getSpendCategories(session["user_id"])
# Get the users payers (for modal)
payers = tendie_account.getPayers(session["user_id"])
return render_template("expensehistory.html", history=history, categories=categories, payers=payers, isDeleteAlert=False)
# User reached route via POST
else:
# Initialize users action
userHasSelected_deleteExpense = False
# Determine what action was selected by the user (button/form trick from: https://stackoverflow.com/questions/26217779/how-to-get-the-name-of-a-submitted-form-in-flask)
if "btnDeleteConfirm" in request.form:
userHasSelected_deleteExpense = True
elif "btnSave" in request.form:
userHasSelected_deleteExpense = False
else:
return apology("Doh! Spend Categories is drunk. Try again!")
# Get the existing expense record ID from the DB and build a data structure to store old expense details
oldExpense = tendie_expenses.getExpense(
request.form, session["user_id"])
# Make sure an existing record was found otherwise render an error message
if oldExpense["id"] == None:
return apology("The expense record you're trying to update doesn't exist")
# Delete the existing expense record
if userHasSelected_deleteExpense == True:
# Delete the old record from the DB
deleted = tendie_expenses.deleteExpense(
oldExpense, session["user_id"])
if not deleted:
return apology("The expense was unable to be deleted")
# Get the users expense history, spend categories, payers, and then render the history page w/ delete alert
history = tendie_expenses.getHistory(session["user_id"])
categories = tendie_categories.getSpendCategories(
session["user_id"])
payers = tendie_account.getPayers(session["user_id"])
return render_template("expensehistory.html", history=history, categories=categories, payers=payers, isDeleteAlert=True)
# Update the existing expense record
else:
# Update the old record with new details from the form
expensed = tendie_expenses.updateExpense(
oldExpense, request.form, session["user_id"])
if not expensed:
return apology("The expense was unable to be updated")
# Redirect to results page and render a summary of the updated expense
return render_template("expensed.html", results=expensed) | 28,611 |
def resize(a, new_shape):
"""resize(a,new_shape) returns a new array with the specified shape.
The original array's total size can be any size.
"""
a = ravel(a)
if not len(a): return zeros(new_shape, a.typecode())
total_size = multiply.reduce(new_shape)
n_copies = int(total_size / len(a))
extra = total_size % len(a)
if extra != 0:
n_copies = n_copies+1
extra = len(a)-extra
a = concatenate( (a,)*n_copies)
if extra > 0:
a = a[:-extra]
return reshape(a, new_shape) | 28,612 |
def rotate180(image_np):
"""Rotates the given image by 180 degrees."""
if image_np is None:
return None
return np.fliplr(np.flipud(image_np)) | 28,613 |
def proxy_view(request, url, domain=None, secure=False, requests_args=None, template_name="proxy/debug.html"):
"""
Forward as close to an exact copy of the request as possible along to the
given url. Respond with as close to an exact copy of the resulting
response as possible.
If there are any additional arguments you wish to send to requests, put
them in the requests_args dictionary.
"""
requests_args = (requests_args or {}).copy()
headers = get_headers(request.META)
params = request.GET.copy()
proxy_domain = settings.PROXY_DOMAIN
protocol = 'http'
if secure:
protocol = 'https'
url = '%s://%s/%s' % (protocol, proxy_domain, url[1:] if url.startswith('/') else url)
if 'headers' not in requests_args:
requests_args['headers'] = {}
if 'data' not in requests_args:
requests_args['data'] = request.body
if 'params' not in requests_args:
requests_args['params'] = QueryDict('', mutable=True)
if 'cookies' not in requests_args and getattr(settings, 'PROXY_SET_COOKIES', False):
headers = dict([ (kk, vv) for kk, vv in headers.items() if kk.lower() != 'cookie' ])
requests_args['cookies'] = get_cookies(request, proxy_domain)
# Overwrite any headers and params from the incoming request with explicitly
# specified values for the requests library.
headers.update(requests_args['headers'])
params.update(requests_args['params'])
# If there's a content-length header from Django, it's probably in all-caps
# and requests might not notice it, so just remove it.
for key in headers.keys():
if key.lower() == 'content-length':
del headers[key]
requests_args['headers'] = headers
requests_args['params'] = params
if settings.DEBUG and request.method != 'HEAD':
requests_args['allow_redirects'] = False
response = requests.request(request.method, url, **requests_args)
if getattr(settings, 'PROXY_SET_COOKIES', False):
set_cookies(request, proxy_domain, response.cookies)
content_type = response.headers['content-type']
content = response.content
show_debug = False
if 'html' in content_type.lower():
content = rewrite_response(content, proxy_domain, secure=secure)
show_debug = settings.DEBUG
elif 'javascript' in content_type.lower():
content = rewrite_script(content, proxy_domain, secure=secure)
if show_debug:
ctx = {
'url': url,
'requests_args': requests_args,
'response': content,
'headers': response.headers,
'status': response.status_code,
}
if int(response.status_code) in (301, 302):
redirection = response.headers['location']
ctx['redirection'] = proxy_reverse(redirection, secure)
proxy_response = render(request, template_name, ctx)
else:
proxy_response = HttpResponse(
content,
status=response.status_code)
excluded_headers = set([
# Hop-by-hop headers
# ------------------
# Certain response headers should NOT be just tunneled through. These
# are they. For more info, see:
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html#sec13.5.1
'connection', 'keep-alive', 'proxy-authenticate',
'proxy-authorization', 'te', 'trailers', 'transfer-encoding',
'upgrade',
# Although content-encoding is not listed among the hop-by-hop headers,
# it can cause trouble as well. Just let the server set the value as
# it should be.
'content-encoding',
# Since the remote server may or may not have sent the content in the
# same encoding as Django will, let Django worry about what the length
# should be.
'content-length',
])
for key, value in response.headers.iteritems():
if key.lower() in excluded_headers:
continue
proxy_response[key] = value
return proxy_response | 28,614 |
def poll_for_staleness(id_or_elem, wait=10, frequency=1):
"""Use WebDriverWait with expected_conditions.staleness_of to wait for an
element to be no longer attached to the DOM.
:argument id_or_elem: The identifier of the element, or its element object.
:argument wait: The amount of seconds to wait before throwing a
TimeoutException.
:argument frequency: The amount of seconds between each poll.
:return: False if the element is still attached to the DOM, true otherwise.
"""
elem = _get_elem(id_or_elem)
try:
logger.debug('Waiting for element to be removed from DOM: {}'
.format(elem))
return (WebDriverWait(_test.browser, wait, poll_frequency=frequency)
.until(EC.staleness_of(elem)))
except TimeoutException:
_raise("Element was not removed from the DOM.") | 28,615 |
def configure_parser(parser):
"""Configure parser for this action """
qisys.parsers.worktree_parser(parser)
qisys.parsers.project_parser(parser) | 28,616 |
def test_extraplate():
"""
Test basic stuff.
"""
a = np.array([np.nan, np.nan, 9, 9, 9, 9, 9, 2, 3, 9, 2, 1, 1, 3, 4, 9, 9, 9, np.nan, np.nan])
m = extrapolate(a)
t = np.array([9, 9, 9, 9, 9, 9, 9, 2, 3, 9, 2, 1, 1, 3, 4, 9, 9, 9, 9, 9])
assert len(a) == len(m)
assert np.allclose(m, t) | 28,617 |
def check_y(y, allow_empty=False, allow_constant=True):
"""Validate input data.
Parameters
----------
y : pd.Series
allow_empty : bool, optional (default=False)
If True, empty `y` raises an error.
allow_constant : bool, optional (default=True)
If True, constant `y` does not raise an error.
Returns
-------
y : pd.Series
Raises
------
ValueError, TypeError
If y is an invalid input
"""
# Check if pandas series or numpy array
if not (isinstance(y, pd.Series) or isinstance(y, pd.DataFrame)):
raise TypeError(f"`y` must be a pandas Series, but found type: {type(y)}")
if not allow_constant:
if np.all(y == y.iloc[0]):
raise ValueError("All values of `y` are the same.")
# check time index
check_time_index(y.index, allow_empty=allow_empty)
return y | 28,618 |
def show_disabled_locators():
""" Print in console enabled locators ordered by preference.
:return: None
"""
enabled_locators_set = set(_get_enabled_locators_list())
complete_locators_set = set(config.DEFAULT_LOCATORS_PREFERENCE)
disabled_locators_set = complete_locators_set - enabled_locators_set
_print_locators_list("Disabled locators:", disabled_locators_set) | 28,619 |
def preprocess_features(features):
"""Row-normalize feature matrix and convert to tuple representation"""
rowsum = np.array(features.sum(1), dtype=float)
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
features = r_mat_inv.dot(features)
return sparse_to_tuple(features) | 28,620 |
def CheckReachability(urls, http_client=None):
"""Check whether the hosts of given urls are reachable.
Args:
urls: iterable(str), The list of urls to check connection to.
http_client: httplib2.Http, an object used by gcloud to make http and https
connections. Defaults to an non-authenticated Http object from the
googlecloudsdk.core.credentials.http module.
Returns:
list(Failure): Reasons for why any urls were unreachable. The list will be
empty if all urls are reachable.
"""
if not http_client:
http_client = http.Http(auth=False)
failures = []
for url in urls:
try:
response, _ = http_client.request(url, method='HEAD')
# TODO(user): Investigate other possible exceptions that might be thrown.
except (httplib.HTTPException, socket.error, ssl.SSLError,
httplib2.HttpLib2Error) as err:
message = 'Cannot reach {0} ({1})'.format(url, type(err).__name__)
failures.append(Failure(message=message, exception=err))
else:
if response.status != httplib.OK:
message = 'Cannot reach {0} ([{1}] {2})'.format(url, response.status,
response.reason)
failures.append(Failure(message=message, response=response))
return failures | 28,621 |
def dataset_prediction_results(dataset, event, model_factory_fn=pohmm_factory,
min_history=90, max_history=None, out_name=None):
"""
Obtain predictions for each model.
Create stratified folds
Train on 1-n_folds. Use the last fold to make predictions for each event
"""
print('Running:', out_name, flush=True)
# Load and preprocess the dataset
df = load_data(dataset)
# from .data import reduce_dataset
# df = reduce_dataset(df, num_users=5, min_samples=1, max_samples=1)
df = preprocess_data(df, event, ['tau'])
# fold, ref user, query user, query session, into future, event, ground truth, prediction
baseline_col = 'baseline_tau'
prediction_col = 'prediction_tau'
work_done = 0
work = len(df.index.unique())
progress = ProgressBar(work)
progress.animate(work_done)
def _predictions(df):
if max_history is None:
upper = len(df) - 1
else:
upper = min(max_history, len(df) - 1)
results = []
for i in range(min_history, upper + 1):
hmm = model_factory_fn(df[:i])
pred = hmm.predict_df(df[:i], next_pstate=df.iloc[i]['event'])[0]
# pred = hmm.predict_df(df[:i])[0]
baseline_pred = df['tau'].values[:i].mean(axis=0)
results.append([i, df.iloc[i]['event'], df.iloc[i]['tau'], pred, baseline_pred])
nonlocal work_done
work_done += 1
progress.animate(work_done)
results = pd.DataFrame(results, columns=['event_idx', 'event', 'tau', prediction_col, baseline_col])
return results
pred = df.groupby(level=[0, 1]).apply(_predictions)
pred['SMAPE_tau'] = SMAPE(pred['tau'], pred[prediction_col])
pred['SMAPE_baseline_tau'] = SMAPE(pred['tau'], pred[baseline_col])
pred = pred.reset_index(level=df.index.nlevels, drop=True)
save_results(pred, out_name + '_predictions')
return | 28,622 |
def num(value):
"""Parse number as float or int."""
value_float = float(value)
try:
value_int = int(value)
except ValueError:
return value_float
return value_int if value_int == value_float else value_float | 28,623 |
def __ensure_testcase_module(path: Text) -> NoReturn:
""" ensure pytest files are in python module, generate __init__.py on demand
"""
init_file = os.path.join(os.path.dirname(path), "__init__.py")
if os.path.isfile(init_file):
return
with open(init_file, "w", encoding="utf-8") as f:
f.write("# NOTICE: Generated By HttpRunner. DO NOT EDIT!\n") | 28,624 |
def __format_event_start_date_and_time(t):
"""Formats datetime into e.g. Tue Jul 30 at 5PM"""
strftime_format = "%a %b %-d at %-I:%M %p"
return t.strftime(strftime_format) | 28,625 |
def collect_properties(service_instance, view_ref, obj_type, path_set=None,
include_mors=False):
"""
Collect properties for managed objects from a view ref
Check the vSphere API documentation for example on retrieving
object properties:
- http://goo.gl/erbFDz
Args:
si (ServiceInstance): ServiceInstance connection
view_ref (vim.view.*): Starting point of inventory navigation
obj_type (vim.*): Type of managed object
path_set (list): List of properties to retrieve
include_mors (bool): If True include the managed objects
refs in the result
Returns:
A list of properties for the managed objects
"""
collector = service_instance.content.propertyCollector
# Create object specification to define the starting point of
# inventory navigation
obj_spec = vmodl.query.PropertyCollector.ObjectSpec()
obj_spec.obj = view_ref
obj_spec.skip = True
# Create a traversal specification to identify the path for collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec()
traversal_spec.name = 'traverseEntities'
traversal_spec.path = 'view'
traversal_spec.skip = False
traversal_spec.type = view_ref.__class__
obj_spec.selectSet = [traversal_spec]
# Identify the properties to the retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec()
property_spec.type = obj_type
if not path_set:
property_spec.all = True
property_spec.pathSet = path_set
# Add the object and property specification to the
# property filter specification
filter_spec = vmodl.query.PropertyCollector.FilterSpec()
filter_spec.objectSet = [obj_spec]
filter_spec.propSet = [property_spec]
# Retrieve properties
props = collector.RetrieveContents([filter_spec])
data = []
for obj in props:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
if include_mors:
properties['obj'] = obj.obj
data.append(properties)
return data | 28,626 |
def to_all_gpus(
cpu_index: faiss.Index,
co: Optional['faiss.GpuMultipleClonerOptions'] = None
) -> faiss.Index:
"""
TODO: docstring
"""
n_gpus = faiss.get_num_gpus()
assert n_gpus != 0, 'Attempting to move index to GPU without any GPUs'
gpu_index = faiss.index_cpu_to_all_gpus(cpu_index, co=co)
return gpu_index | 28,627 |
def parse_template_mapping(
template_mapping: List[str]
) -> MutableMapping[str, str]:
"""Parses a string template map from <key>=<value> strings."""
result = {}
for mapping in template_mapping:
key, value = mapping.split("=", 1)
result[key] = value
return result | 28,628 |
def _AllDirsToRoot(dir):
"""Return all parent paths of a directory."""
dir = os.path.abspath(dir)
while True:
yield dir
parent = os.path.dirname(dir)
if parent == "" or parent == dir:
break
dir = parent | 28,629 |
def isGZ(fn):
"""
Tests whether a file is gz-compressed.
:param fn: a filename
:type fn: str
:returns: True if fn is gz-compressed otherwise False
"""
assert os.path.exists(fn)
with open(fn, 'rb') as fi:
b1, b2 = fi.read(1), fi.read(1)
return b1 == b'\x1f' and b2 == b'\x8b' | 28,630 |
def select_dim_over_nm(max_n, max_m, d, coef_nd, coef_md, coef_nm, coef_n, coef_m, rest, max_mem):
"""Finds the optimal values for `n` and `m` to fit in available memory.
This function should be called for problems where the GPU needs to hold
two blocks of data (one of size m, one of size n) and one kernel block
(of size n x m).
Parameters
-----------
max_n : int
The maximum value for n (the first dimension of the problem)
max_m : int
The maximum value for m (the second dimension of the problem)
d : int
The dimensionality of the data
coef_nd : float
How many n*d blocks need to be held in memory
coef_md : float
How many m*d blocks need to be held in memory
coef_nm : float
How many m*n blocks need to be held in memory
coef_n : float
How many n-dimensional vectors need to be held in memory
coef_m : float
How many m-dimensional vectors need to be held in memory
rest : float
additional bytes to be kept in memory
max_mem : float
The amount of available memory in bytes. This is the main problem constraint
Returns
-------
out_n : int
The dimension n to use in order to fit in available memory
out_m : int
The dimension m to use in order to fit in available memory
Notes
------
The equation gives a hyperbola. We intersect the hyperbola
with a line from the origin, with the slope given by the ratio
of max_m and max_n. We then solve a quadratic equation to find
the intersection point.
"""
fac = max_m / max_n
if coef_nm == 0 and (coef_nd == 0 and coef_md == 0 and coef_n == 0 and coef_m == 0):
v_n = max_n
elif coef_nm == 0:
v_n = solve_lin(b=d * (coef_nd + fac * coef_md) + coef_n + coef_m * fac,
c=rest - max_mem)
else:
v_n = solve_quad(a=fac * coef_nm,
b=d * (fac * coef_md + coef_nd) + fac * coef_m + coef_n,
c=rest - max_mem)
v_m = fac * v_n
out_n = int(min(v_n, max_n))
out_m = int(min(v_m, max_m))
if out_n <= 0 or out_m <= 0:
raise MemoryError("Available memory %.2fMB is not enough." % (max_mem / 2**20))
return out_n, out_m | 28,631 |
def _file_extension(filename):
"""Return file extension without the dot"""
# openbabel expects the extension without the dot, but os.path.splitext
# returns the extension with it
dotext = os.path.splitext(filename)[1]
return dotext[1:] | 28,632 |
def atomic_tmp_file(final_path):
"""Return a tmp file name to use with atomic_install. This will be in the
same directory as final_path. The temporary file will have the same extension
as finalPath. It the final path is in /dev (/dev/null, /dev/stdout), it is
returned unchanged and atomic_tmp_install will do nothing."""
final_dir = os.path.dirname(os.path.normpath(final_path)) # can be empty
if final_dir == '/dev':
return final_path
final_basename = os.path.basename(final_path)
final_ext = os.path.splitext(final_path)[1]
base_name = "{}.{}.tmp{}".format(final_basename, uuid.uuid4(), final_ext)
return os.path.join(final_dir, base_name) | 28,633 |
def hzAnalyticDipoleF(r, freq, sigma, secondary=True, mu=mu_0):
"""
4.56 in Ward and Hohmann
.. plot::
import matplotlib.pyplot as plt
from SimPEG import EM
freq = np.logspace(-1, 6, 61)
test = EM.Analytics.FDEM.hzAnalyticDipoleF(100, freq, 0.001, secondary=False)
plt.loglog(freq, abs(test.real))
plt.loglog(freq, abs(test.imag))
plt.title('Response at $r$=100m')
plt.xlabel('Frequency')
plt.ylabel('Response')
plt.legend(('real','imag'))
plt.show()
"""
r = np.abs(r)
k = np.sqrt(-1j*2.*np.pi*freq*mu*sigma)
m = 1
front = m / (2. * np.pi * (k**2) * (r**5) )
back = 9 - ( 9 + 9j * k * r - 4 * (k**2) * (r**2) - 1j * (k**3) * (r**3)) * np.exp(-1j*k*r)
hz = front*back
if secondary:
hp =-1/(4*np.pi*r**3)
hz = hz-hp
if hz.ndim == 1:
hz = Utils.mkvc(hz,2)
return hz | 28,634 |
def dockerCall(job,
tool,
parameters=None,
workDir=None,
dockerParameters=None,
outfile=None,
defer=None):
"""
Throws CalledProcessorError if the Docker invocation returns a non-zero exit code
This function blocks until the subprocess call to Docker returns
:param toil.Job.job job: The Job instance for the calling function.
:param str tool: Name of the Docker image to be used (e.g. quay.io/ucsc_cgl/samtools:latest).
:param list[str] parameters: Command line arguments to be passed to the tool.
If list of lists: list[list[str]], then treat as successive commands chained with pipe.
:param str workDir: Directory to mount into the container via `-v`. Destination convention is /data
:param list[str] dockerParameters: Parameters to pass to Docker. Default parameters are `--rm`,
`--log-driver none`, and the mountpoint `-v work_dir:/data` where /data is the destination convention.
These defaults are removed if docker_parmaters is passed, so be sure to pass them if they are desired.
:param file outfile: Pipe output of Docker call to file handle
:param int defer: What action should be taken on the container upon job completion?
FORGO (0) will leave the container untouched.
STOP (1) will attempt to stop the container with `docker stop` (useful for debugging).
RM (2) will stop the container and then forcefully remove it from the system
using `docker rm -f`. This is the default behavior if defer is set to None.
"""
_docker(job, tool=tool, parameters=parameters, workDir=workDir, dockerParameters=dockerParameters,
outfile=outfile, checkOutput=False, defer=defer) | 28,635 |
def test_generate_signature():
"""Test we can get correct signature."""
data = signature.generate_signature('test_key', 'test_msg')
assert data == b'TC9wro8movj4HGMphrpEdES3oBdPsq+y+2tAt6kM/Tw=' | 28,636 |
def test_parse_date():
"""
Check that the date is parsed correctly, and that the output types
are string.
"""
year, month, day = standardcitations.parse_date("1982-06-23")
assert year == "1982"
assert day == "23"
assert month == "6" | 28,637 |
def _compute_net_budget(recarray, zonenamedict):
"""
:param recarray:
:param zonenamedict:
:return:
"""
recnames = _get_record_names(recarray)
innames = [
n for n in recnames if n.startswith("FROM_") or n.endswith("_IN")
]
outnames = [
n for n in recnames if n.startswith("TO_") or n.endswith("_OUT")
]
select_fields = ["totim", "time_step", "stress_period", "name"] + list(
zonenamedict.values()
)
if "totim" not in recarray.dtype.names:
select_fields.pop(0)
select_records_in = np.in1d(recarray["name"], innames)
select_records_out = np.in1d(recarray["name"], outnames)
in_budget = recarray[select_fields][select_records_in]
out_budget = recarray[select_fields][select_records_out]
net_budget = in_budget.copy()
for f in [n for n in zonenamedict.values() if n in select_fields]:
net_budget[f] = np.array([r for r in in_budget[f]]) - np.array(
[r for r in out_budget[f]]
)
newnames = []
for n in net_budget["name"]:
if n.endswith("_IN") or n.endswith("_OUT"):
newnames.append("_".join(n.split("_")[:-1]))
else:
newnames.append("_".join(n.split("_")[1:]))
net_budget["name"] = newnames
return net_budget | 28,638 |
def check_thrown_events_histogram(thrown_events_hist1, thrown_events_hist2):
"""
Check that two ThrownEventsHistogram class are compatible with each other
Parameters
----------
thrown_events_hist1: `lstchain.io.lstcontainers.ThrownEventsHistogram`
thrown_events_hist2: `lstchain.io.lstcontainers.ThrownEventsHistogram`
"""
keys1 = set(thrown_events_hist1.keys())
keys2 = set(thrown_events_hist2.keys())
if keys1 != keys2:
different = keys1.symmetric_difference(keys2)
raise ValueError(f'Histogram keys do not match, differing keys: {different}')
# It does not matter that the number of simulated showers is the same
keys = ["bins_energy", "bins_core_dist"]
for k in keys:
if (thrown_events_hist1[k] != thrown_events_hist2[k]).all():
raise ValueError(f'Key {k} does not match for histograms') | 28,639 |
def CheckAllNonHistoricalVariables(model_part):
"""Check the consistency of all non-historical variables."""
for variable in GetHistoricalVariableList(model_part, model_part.Nodes):
debug_utilities.ChecknonHistoricalVariable(model_part, model_part.Nodes, variable)
for variable in GetHistoricalVariableList(model_part, model_part.Elements):
debug_utilities.ChecknonHistoricalVariable(model_part, model_part.Elements, variable)
for variable in GetHistoricalVariableList(model_part, model_part.Conditions):
debug_utilities.ChecknonHistoricalVariable(model_part, model_part.Conditions, variable) | 28,640 |
def bg_white(msg):
""" return msg with a white background """
return __apply_style(__background_colors['white'],msg) | 28,641 |
def read_image_from_s3(bucket_name, key):
"""S3 to PIL Image"""
s3 = boto3.resource('s3')
bucket = s3.Bucket(bucket_name)
object = bucket.Object(key)
response = object.get()
return Image.open(response['Body']) | 28,642 |
def count_indra_apis(graph: BELGraph) -> typing.Counter[str]:
"""Count the APIs reported by INDRA."""
return Counter(
api
for _, _, d in graph.edges(data=True)
if ANNOTATIONS in d and 'INDRA_API' in d[ANNOTATIONS]
for api in d[ANNOTATIONS]['INDRA_API']
if api and isinstance(api, str) and api != 'nan'
) | 28,643 |
def integrate_sed(wavelength, flambda, wlmin=None, wlmax=None):
"""
Calculate the flux in an SED by direct integration.
A direct trapezoidal rule integration is carried out on the flambda values
and the associated wavelength values.
Parameters
----------
wavelength: A numpy float array of wavelength values, normally in
microns
flambda: A numpy float array of flux density values, normally
F_lambda in W/m^2/micron
wlmin: An optional float value for the minimum wavelength of
the calculation, or None to have no lower limit aside
from the data range
wlmax: An optional float value for the maximum wavelength of
the calculation, or None to have no upper limit aside
from the data range
Returns
-------
flux1: The float value, the estimated total flux, nominally in
W/m^2 if the input units are microns and W/m^2/micron; if
the wavelength range is bad or the two arrays do not match
in length a value of zero is returned
"""
if len(wavelength) != len(flambda):
return 0.
if wlmin is None:
xmin = 0.9 * numpy.min(wavelength)
else:
xmin = wlmin
if wlmax is None:
xmax = 1.1 * numpy.max(wavelength)
else:
xmax = wlmax
if (xmin >= xmax) or (len(wavelength) < 2):
return 0.
inds = numpy.argsort(wavelength)
newwavelength = numpy.copy(wavelength[inds])
newflambda = numpy.copy(flambda[inds])
if (xmin > numpy.min(wavelength)) or (xmax < numpy.max(wavelength)):
fl1 = numpy.interp(xmin, wavelength, flambda)
fl2 = numpy.interp(xmax, wavelength, flambda)
newwavelength[newwavelength < xmin] = xmin
newwavelength[newwavelength > xmax] = xmax
newflambda[newwavelength < xmin] = fl1
newflambda[newwavelength > xmax] = fl2
flux = numpy.trapz(newflambda, newwavelength)
return flux | 28,644 |
def unpack_unknowns(segment):
"""Unpacks the unknowns set in the mission to be available for the mission.
Assumptions:
N/A
Source:
N/A
Inputs:
segment.state.unknowns.throttle [Unitless]
segment.state.unknowns.body_angle [Radians]
segment.state.unknowns.flight_path_angle [Radians]
segment.state.unknowns.velocity [meters/second]
segment.altitude_start [meters]
segment.altitude_end [meters]
segment.air_speed_start [meters/second]
segment.air_speed_end [meters/second]
Outputs:
segment.state.conditions.propulsion.throttle [Unitless]
segment.state.conditions.frames.body.inertial_rotations [Radians]
conditions.frames.inertial.velocity_vector [meters/second]
Properties Used:
N/A
"""
# unpack unknowns and givens
throttle = segment.state.unknowns.throttle
theta = segment.state.unknowns.body_angle
gamma = segment.state.unknowns.flight_path_angle
vel = segment.state.unknowns.velocity
vel0 = segment.air_speed_start
velf = segment.air_speed_end
# Overide the speeds
if segment.air_speed_end is None:
v_mag = np.concatenate([[[vel0]],vel])
elif segment.air_speed_end is not None:
v_mag = np.concatenate([[[vel0]],vel,[[velf]]])
if np.all(gamma == 0.):
gamma[gamma==0.] = 1.e-16
if np.all(vel == 0.):
vel[vel==0.] = 1.e-16
# process velocity vector
v_x = v_mag * np.cos(gamma)
v_z = -v_mag * np.sin(gamma)
# apply unknowns and pack conditions
segment.state.conditions.propulsion.throttle[:,0] = throttle[:,0]
segment.state.conditions.frames.body.inertial_rotations[:,1] = theta[:,0]
segment.state.conditions.frames.inertial.velocity_vector[:,0] = v_x[:,0]
segment.state.conditions.frames.inertial.velocity_vector[:,2] = v_z[:,0] | 28,645 |
def computeMSSIM(groundTruth, recovered):
"""
Compute Mean Structural SImilarity Measure (MSSIM) between
the recovered and the corresponding ground-truth image
Args:
:param groundTruth: ground truth reference image.
numpy.ndarray (Height x Width x Spectral_Dimension)
:param rc: image under evaluation.
numpy.ndarray (Height x Width x Spectral_Dimension)
Returns:
MSSIM between `recovered` and `groundTruth`
"""
assert groundTruth.shape == recovered.shape, \
"Size not match for groundtruth and recovered spectral images"
groundTruth = np.clip(groundTruth.astype("float64"), 0, 1)
recovered = np.clip(recovered.astype("float64"), 0, 1)
# to get SSIM put full = True to get values instead of mean
return compare_ssim(groundTruth, recovered, multichannel=True) | 28,646 |
def get_integrations_from_cache(ctx, python, bucket, branch, integrations_dir, target_dir, integrations, awscli="aws"):
"""
Get cached integration wheels for given integrations.
python: Python version to retrieve integrations for
bucket: S3 bucket to retrieve integration wheels from
branch: namespace in the bucket to get the integration wheels from
integrations_dir: directory with Git repository of integrations
target_dir: local directory to put integration wheels to
integrations: comma-separated names of the integrations to try to retrieve from cache
awscli: AWS CLI executable to call
"""
integrations_hashes = {}
for integration in integrations.strip().split(","):
integration_path = os.path.join(integrations_dir, integration)
if not os.path.exists(integration_path):
raise Exit(f"Integration {integration} given, but doesn't exist in {integrations_dir}", code=2)
last_commit = ctx.run(
LAST_DIRECTORY_COMMIT_PATTERN.format(integrations_dir=integrations_dir, integration=integration),
hide="both",
echo=False,
)
integrations_hashes[integration] = last_commit.stdout.strip()
print(f"Trying to retrieve {len(integrations_hashes)} integration wheels from cache")
# On windows, maximum length of a command line call is 8191 characters, therefore
# we do multiple syncs that fit within that limit (we use 8100 as a nice round number
# and just to make sure we don't do any of-by-one errors that would break this).
# WINDOWS NOTES: on Windows, the awscli is usually in program files, so we have to wrap the
# executable in quotes; also we have to not put the * in quotes, as there's no
# expansion on it, unlike on Linux
exclude_wildcard = "*" if platform.system().lower() == "windows" else "'*'"
sync_command_prefix = (
f"\"{awscli}\" s3 sync s3://{bucket} {target_dir} --no-sign-request --exclude {exclude_wildcard}"
)
sync_commands = [[[sync_command_prefix], len(sync_command_prefix)]]
for integration, hash in integrations_hashes.items():
include_arg = " --include " + CACHED_WHEEL_FULL_PATH_PATTERN.format(
hash=hash,
integration=integration,
python_version=python,
branch=branch,
)
if len(include_arg) + sync_commands[-1][1] > 8100:
sync_commands.append([[sync_command_prefix], len(sync_command_prefix)])
sync_commands[-1][0].append(include_arg)
sync_commands[-1][1] += len(include_arg)
for sync_command in sync_commands:
ctx.run("".join(sync_command[0]))
found = []
# move all wheel files directly to the target_dir, so they're easy to find/work with in Omnibus
for integration in sorted(integrations_hashes):
hash = integrations_hashes[integration]
original_path_glob = os.path.join(
target_dir,
CACHED_WHEEL_FULL_PATH_PATTERN.format(
hash=hash,
integration=integration,
python_version=python,
branch=branch,
),
)
files_matched = glob.glob(original_path_glob)
if len(files_matched) == 0:
continue
elif len(files_matched) > 1:
raise Exit(
f"More than 1 wheel for integration {integration} matched by {original_path_glob}: {files_matched}"
)
wheel_path = files_matched[0]
print(f"Found cached wheel for integration {integration}")
shutil.move(wheel_path, target_dir)
found.append(f"datadog_{integration}")
print(f"Found {len(found)} cached integration wheels")
with open(os.path.join(target_dir, "found.txt"), "w") as f:
f.write('\n'.join(found)) | 28,647 |
def full_n_graphs(n_reps=3, full=False):
"""
Test varying the number of input graphs.
"""
varied_param = 'n_input_graphs'
p = {}
p[varied_param] = [2, 3, 4, 5, 6, 7]
p['duplicates'] = 5
p['density_multiplier'] = 1.5
p['p_keep_edge'] = 0.7
p['g'] = 0.5
p['f'] = 2
p['gap_cost'] = p['f']
p['n_entities'] = 50
if full:
p['n_input_graph_nodes'] = 50
else:
p['n_input_graph_nodes'] = 30
p['max_iters'] = 500
full_str = 'full'
cv = False
if not full:
full_str = 'partial'
cv = True
experiment_template(n_reps, p, varied_param, cv=cv,
title=full_str + '_n_graphs') | 28,648 |
def test_cache_memoize_ttl(cache, timer):
"""Test that cache.memoize() can set a TTL."""
ttl1 = 5
ttl2 = ttl1 + 1
@cache.memoize(ttl=ttl1)
def func1(a):
return a
@cache.memoize(ttl=ttl2)
def func2(a):
return a
func1(1)
func2(1)
assert len(cache) == 2
key1, key2 = tuple(cache.keys())
timer.time = ttl1 - 1
assert cache.has(key1)
assert cache.has(key2)
timer.time = ttl1
assert not cache.has(key1)
assert cache.has(key2)
timer.time = ttl2
assert not cache.has(key2) | 28,649 |
def build_param_structure(params, target, value, index=None):
"""
This method provides a basic reverse JMESPath implementation that
lets you go from a JMESPath-like string to a possibly deeply nested
object. The ``params`` are mutated in-place, so subsequent calls
can modify the same element by its index.
>>> build_param_structure(params, 'test[0]', 1)
>>> print(params)
{'test': [1]}
>>> build_param_structure(params, 'foo.bar[0].baz', 'hello world')
>>> print(params)
{'test': [1], 'foo': {'bar': [{'baz': 'hello, world'}]}}
"""
pos = params
parts = target.split('.')
# First, split into parts like 'foo', 'bar[0]', 'baz' and process
# each piece. It can either be a list or a dict, depending on if
# an index like `[0]` is present. We detect this via a regular
# expression, and keep track of where we are in params via the
# pos variable, walking down to the last item. Once there, we
# set the value.
for i, part in enumerate(parts):
# Is it indexing an array?
result = INDEX_RE.search(part)
if result:
if result.group(1):
if result.group(1) == '*':
part = part[:-3]
else:
# We have an explicit index
index = int(result.group(1))
part = part[:-len(str(index) + '[]')]
else:
# Index will be set after we know the proper part
# name and that it's a list instance.
index = None
part = part[:-2]
if part not in pos or not isinstance(pos[part], list):
pos[part] = []
# This means we should append, e.g. 'foo[]'
if index is None:
index = len(pos[part])
while len(pos[part]) <= index:
# Assume it's a dict until we set the final value below
pos[part].append({})
# Last item? Set the value, otherwise set the new position
if i == len(parts) - 1:
pos[part][index] = value
else:
# The new pos is the *item* in the array, not the array!
pos = pos[part][index]
else:
if part not in pos:
pos[part] = {}
# Last item? Set the value, otherwise set the new position
if i == len(parts) - 1:
pos[part] = value
else:
pos = pos[part] | 28,650 |
def queues(request):
"""
We get here from /queues
"""
return render("queues.html", request, { "queuelist" : request.jt.queues()}) | 28,651 |
def shorten_str(string, length=30, end=10):
"""Shorten a string to the given length."""
if string is None:
return ""
if len(string) <= length:
return string
else:
return "{}...{}".format(string[:length - end], string[- end:]) | 28,652 |
def A2cell(A):
"""Compute unit cell constants from A
:param A: [G11,G22,G33,2*G12,2*G13,2*G23] G - reciprocal metric tensor
:return: a,b,c,alpha, beta, gamma (degrees) - lattice parameters
"""
G,g = A2Gmat(A)
return Gmat2cell(g) | 28,653 |
def main():
"""Demonstration main function that you can run just to see what it does with a few phrases."""
logging.basicConfig(level=logging.INFO)
rrc_classifier = RRCLanguageClassifier.default_instance()
texts = ["This is English", "Esto es español"]
for text in texts:
winner = rrc_classifier.get_winner(text)
print(f'{winner}: {text}') | 28,654 |
def update_user_post(
slug: str,
post: schemas.PostCreate,
db: Session = Depends(get_db),
current_user: schemas.User = Depends(get_current_active_user),
) -> Any:
"""
Update a user Post if its owner
"""
post_data = get_post(db, slug)
if post_data is None:
raise HTTPException(status_code=404, detail="Don't find post")
elif post_data.author_id != current_user.id:
raise HTTPException(status_code=403, detail="Don't have permission")
req_post = update_post(db=db, slug=slug, post=post)
return req_post | 28,655 |
def loadNetParameter(caffemodel):
""" Return a NetParameter protocol buffer loaded from the caffemodel.
"""
from backend.caffe.path_loader import PathLoader
proto = PathLoader().importProto()
net = proto.NetParameter()
try:
with open(caffemodel, 'rb') as f:
net.ParseFromString(f.read())
return net
except:
pass | 28,656 |
def extract_keywords(header, *args):
"""
For a given header, find all of the keys and return an unnested dict.
"""
try:
header = pvl.load(header)
except:
header = pvl.loads(header)
res = {}
# Iterate through all of the requested keys
for a in args:
try:
res[a] = find_in_dict(a)
except:
res[a] = None
return res | 28,657 |
def plot_parcel_stats_profile(fnames, figure="save", fmt="png", **kwargs):
"""
Plot parcel statistics
"""
n = len(fnames)
labels = kwargs.pop("labels", n * [None])
dset = kwargs.pop("dset", "aspect-ratio")
no_xlabel = kwargs.pop("no_xlabel", False)
beg = kwargs.pop("begin", None)
end = kwargs.pop("end", None)
if dset == "aspect-ratio":
dset = "aspect ratio"
colors = plt.cm.tab10(np.arange(n).astype(int))
if len(labels) < n:
raise ValueError("Not enough labels provided.")
ncreader = nc_reader()
lmax = 0
for i, fname in enumerate(fnames):
ncreader.open(fname)
if not ncreader.is_parcel_stats_file:
raise IOError("Not a parcel diagnostic output file.")
nsteps = ncreader.get_num_steps()
data_mean = np.zeros(nsteps)
data_std = np.zeros(nsteps)
t = np.zeros(nsteps)
for step in range(nsteps):
t[step] = ncreader.get_dataset(step, "t")
data_mean[step] = ncreader.get_dataset(step, "avg " + dset)
data_std[step] = ncreader.get_dataset(step, "std " + dset)
if dset == "aspect ratio":
lmax = max(lmax, ncreader.get_global_attribute("lambda_max"))
ncreader.close()
plt.plot(t[beg:end], data_mean[beg:end], label=labels[i], color=colors[i])
plt.fill_between(
t[beg:end],
data_mean[beg:end] - data_std[beg:end],
data_mean[beg:end] + data_std[beg:end],
alpha=0.5,
color=colors[i],
)
if not no_xlabel:
plt.xlabel(get_label("time", units["time"]))
plt.grid(linestyle="dashed", zorder=-1)
if dset == "aspect-ratio":
plt.ylabel(r"aspect ratio $\lambda$")
plt.text(t[10], 0.95 * lmax, r"$\lambda\le\lambda_{max} = " + str(lmax) + "$")
plt.axhline(lmax, linestyle="dashed", color="black")
elif dset == "volume":
plt.ylabel(r"parcel volume / $V_{g}$")
# plt.axhline(1.0, linestyle='dashed', color='black',
# label=r'cell volume $V_{g}$')
else:
plt.ylabel(r"parcel " + dset)
if not labels[0] is None:
plt.legend(
loc=legend_dict["loc"],
ncol=legend_dict["ncol"],
bbox_to_anchor=legend_dict["bbox_to_anchor"],
)
plt.tight_layout()
if figure == "return":
return plt
elif figure == "save":
prefix = os.path.splitext(fnames[0])[0] + "_"
if n > 1:
prefix = ""
dset = dset.replace(" ", "_")
plt.savefig(prefix + "parcel_" + dset + "_profile." + fmt, bbox_inches="tight")
else:
plt.show()
plt.close() | 28,658 |
def dot(p, q):
"""
Compute dot product between two 3D vectors
p: array
Cartesian coordinates for one of the vectors
q: array
Cartesian coordinates for one of the vectors
"""
return p[0] * q[0] + p[1] * q[1] + p[2] * q[2] | 28,659 |
def cl_do_all(command, workdirs):
"""Run the same command line argument from each working directory"""
for workdir in workdirs:
os.system("( cd %s ; %s)" % (workdir, command)) | 28,660 |
def color_code_MGS_image(T_id,data,img_path,output_path):
"""Function to visualize the MGS of a given image by color coding labelled images
Parameters:
T_id: Tissue id/Name of the image
data: grouped dataframe that contains the centroid and MGS of nuclei
img_path: path to the images
"""
img = imread(img_path)
score=data.get_group(T_id)
score['label']=pd.to_numeric(score['nucid'].str.split('_').str[-1])
t=np.multiply(0,np.array(img==1))
for i in tqdm(range(len(score['label']))):
t=t+np.multiply(np.array(score['MGS'])[i]+1,np.array(img==np.array(score['label'])[i]))
im = Image.fromarray(t)
im.save(output_path+"/"+T_id+ ".tif") | 28,661 |
def test_command_line_interface():
"""Test the CLI."""
runner = CliRunner()
result = runner.invoke(cli.main)
assert result.exit_code == 0
assert "lazypredict.cli.main" in result.output
help_result = runner.invoke(cli.main, ["--help"])
assert help_result.exit_code == 0
assert "--help Show this message and exit." in help_result.output | 28,662 |
def create(protocol, host, port, objname):
"""
Sets up an environment through which the program can be re-invoked by
itself. In this new environment, the output will be the username and
password stored in the keyring for the specified server.
Refer to `credentials.keyring` for what the arguments mean.
"""
# Set up an environment in which 'git fetch' will load username and
# password from this script, not prompt the user in terminal.
env = deepcopy(os.environ)
env['GIT_ASKPASS'] = sys.argv[0] # Use the entry point of the script.
env['SR_ASKPASS'] = '1'
env['SR_ASKPASS_PROTOCOL'] = protocol if protocol else ''
env['SR_ASKPASS_SERVER'] = host if host else ''
env['SR_ASKPASS_PORT'] = str(port) if port else '0'
env['SR_ASKPASS_OBJECT'] = objname if objname else ''
handle, filepath = tempfile.mkstemp()
os.write(handle, 'U'.encode('ascii'))
os.close(handle)
env['SR_ASKPASS_TEMP'] = filepath
return env | 28,663 |
def install_completion(ctx, attr, value): # pragma: no cover
"""Install completion for the current shell."""
import click_completion.core
if not value or ctx.resilient_parsing:
return value
shell, path = click_completion.core.install()
click.secho("{0} completion installed in {1}".format(shell, path), fg="green")
ctx.exit() | 28,664 |
def _extractSetsSingleUser(df, time_window):
"""Get activity set and trip set for each individual."""
# total weeks and start week
weeks = (df["endt"].max() - df["startt"].min()).days // 7
start_date = df["startt"].min().date()
aSet = pd.DataFrame([], columns=["userid", "locid", "dur_s", "class", "timeStep"])
tSet = pd.DataFrame([], columns=["userid", "tripid", "length_m", "dur_s", "nloc", "class", "timeStep"])
# construct the sliding week gdf, i is the timestep
for i in range(0, weeks - time_window + 1):
# start and end time
curr_start = datetime.datetime.combine(start_date + datetime.timedelta(weeks=i), datetime.time())
curr_end = datetime.datetime.combine(curr_start + datetime.timedelta(weeks=time_window), datetime.time())
## determine activity set locations
# get the currect time step points gdf
curr_stps = df.loc[(df["startt"] >= curr_start) & (df["endt"] < curr_end) & (df["type"] == "points")]
# extract the activity set (location)
curr_ASet = curr_stps.groupby("locid", as_index=False).apply(_getActLocs, time_window=time_window).dropna()
# if no location, jump to next time step
if curr_ASet.empty:
continue
# result is the locations with stayed duration class
curr_ASet["timeStep"] = i
aSet = aSet.append(curr_ASet)
## determine activity set trips
# select activity set location
curr_ASet = curr_ASet.loc[curr_ASet["class"] > 0]
# get the currect time step trips gdf
curr_t = df.loc[(df["startt"] >= curr_start) & (df["endt"] < curr_end) & (df["type"] == "trips")]
curr_tSet = _getCurrTrips(curr_t, curr_stps, curr_ASet)
# result is the trips that ends at activity set locations
curr_tSet["timeStep"] = i
tSet = tSet.append(curr_tSet)
# clean up
aSet.reset_index(drop=True)
tSet.reset_index(drop=True)
aSet["type"] = "points"
tSet["type"] = "trips"
aSet["userid"] = df["userid"].unique()[0]
tSet["userid"] = df["userid"].unique()[0]
return aSet.append(tSet) | 28,665 |
def _ed25519():
"""Edwards curve Ed25519.
Link: https://en.wikipedia.org/wiki/EdDSA#Ed25519
"""
q = 2 ** 255 - 19
order = 2 ** 252 + 27742317777372353535851937790883648493
gf = GF(q)
ed = CurveParams(name="ED25519", order=order, gf=gf, is_cyclic = True)
ed.set_constants(a=gf(-1), d=gf(-121665) / gf(121666))
ed.set_equation(set_edwards_eq(a=ed.a, c=ed.c, d=ed.d))
ed.set_base_pt(
(
gf(
15112221349535400772501151409588531511454012693041857206046113283949847762202
),
gf(4) / gf(5),
)
)
return ed | 28,666 |
def from_raw_bytes(raw_bytes):
"""Take raw bytes and turn it into a DmailRequest"""
return from_json(json.loads(raw_bytes.decode(encoding='UTF-8'))) | 28,667 |
def show_evaluation_cli(
configuration: Configuration,
number: int,
verbosity: str,
):
"""Show past evaluation."""
try:
evaluation = configuration.cache.get_evaluation(number - 1)
except CacheError:
click.echo(
failure_style(f"Could not find evaluation with given index {number}")
)
sys.exit(1)
click.echo(total_evaluation_string(evaluation))
for source, source_evaluation in evaluation.items():
click.echo(
f"{source_style(str(source))} ("
f"{source_evaluation.source_execution_duration:.2f} seconds):"
)
for command_evaluation in source_evaluation.commands_evaluations:
click.echo(
f"\t{name_style(command_evaluation.command.name)} - "
f"{evaluation_status(command_evaluation)} "
f"({command_evaluation.execution_duration:.2f} seconds)"
)
if is_verbose(verbosity):
click.echo(
f"\t\tArguments: {' '.join(command_evaluation.command.args)}"
) | 28,668 |
def BRepApprox_TheMultiLineToolOfApprox_FirstPoint(*args):
"""
:param ML:
:type ML: BRepApprox_TheMultiLineOfApprox &
:rtype: int
"""
return _BRepApprox.BRepApprox_TheMultiLineToolOfApprox_FirstPoint(*args) | 28,669 |
def push_message(token, user, message, **kwargs):
"""
Send message to selected user/group/device.
:param str token: application token
:param str user: user or group id to send the message to
:param str message: your message
:param str title: your message's title, otherwise your app's name is used
:param str device: your user's device name to send the message directly to that device
:param list device: your user's devices names to send the message directly to that device
:param str url: a supplementary URL to show with your message
:param str url_title: a title for your supplementary URL, otherwise just the URL is shown
:param int priority: message priority (Use the Priority class to select)
:param int retry: how often (in seconds) the Pushover servers will retry the notification to the user (required
only with priority level of Emergency)
:param int expire: how many seconds your notification will continue to be retried (required only with priority
level of Emergency)
:param datetime timestamp: a datetime object repr the timestamp of your message's date and time to display to the user
:param str sound: the name of the sound to override the user's default sound choice (Use the Sounds consts to
select)
:param bool html: Enable rendering message on user device using HTML
"""
data_out = {
'token': token,
'user': user, # can be a user or group key
'message': message
}
# Support for non-required parameters of PushOver
if 'title' in kwargs:
data_out['title'] = kwargs['title']
if 'device' in kwargs:
temp = kwargs['device']
if type(temp) == list:
data_out['device'] = ','.join(temp)
else:
data_out['device'] = temp
data_out['device'] = kwargs['device']
if 'url' in kwargs:
data_out['url'] = kwargs['url']
if 'url_title' in kwargs:
data_out['url_title'] = kwargs['url_title']
if 'priority' in kwargs:
data_out['priority'] = kwargs['priority']
# Emergency prioritized messages require 'retry' and 'expire' to be defined
if data_out['priority'] == PRIORITIES.EMERGENCY:
if 'retry' not in kwargs:
raise TypeError('Missing `retry` argument required for message priority of Emergency')
else:
retry_val = kwargs['retry']
# 'retry' val must be a minimum of _MIN_RETRY and max of _MAX_EXPIRE
if not (_MIN_RETRY <= retry_val <= _MAX_EXPIRE):
raise ValueError('`retry` argument must be at a minimum of {} and a maximum of {}'.format(
_MIN_RETRY, _MAX_EXPIRE
))
data_out['retry'] = retry_val
if 'expire' not in kwargs:
raise TypeError('Missing `expire` arguemnt required for message priority of Emergency')
else:
expire_val = kwargs['expire']
# 'expire' val must be a minimum of _MIN_RETRY and max of _MAX_EXPIRE
if not(_MIN_RETRY <= expire_val <= _MAX_EXPIRE):
raise ValueError('`expire` argument must be at a minimum of {} and a maximum of {}'.format(
_MIN_RETRY, _MAX_EXPIRE
))
data_out['expire'] = expire_val
# Optionally a callback url may be supplied for the Emergency Message
if 'callback' in kwargs:
data_out['callback'] = kwargs['callback']
if 'timestamp' in kwargs:
data_out['timestamp'] = int(time.mktime(kwargs['timestamp'].timetuple()))
if 'sound' in kwargs:
data_out['sound'] = kwargs['sound']
if 'html' in kwargs:
data_out['html'] = int(kwargs['html'])
return send(_push_url, data_out=data_out) | 28,670 |
def _rrv_div_ ( s , o ) :
"""Division of RooRealVar and ``number''
>>> var = ...
>>> num = ...
>>> res = var / num
"""
if isinstance ( o , _RRV_ ) and not o.isConstant() : o = o.ve ()
elif hasattr ( o , 'getVal' ) : o = o.getVal ()
#
v = s.getVal() if s.isConstant() else s.ve()
#
return v / o | 28,671 |
def getWindowsAt(x: int, y: int, app: AppKit.NSApplication = None, allWindows=None):
"""
Get the list of Window objects whose windows contain the point ``(x, y)`` on screen
:param x: X screen coordinate of the window(s)
:param y: Y screen coordinate of the window(s)
:param app: (optional) NSApp() object. If passed, returns the list of windows at (x, y) position of given app
:param allWindows: (optional) list of window objects (required to improve performance in Apple Script version)
:return: list of Window objects
"""
matches = []
if not allWindows:
allWindows = getAllWindows(app)
for win in allWindows:
box = win.box
if pointInRect(x, y, box.left, box.top, box.width, box.height):
matches.append(win)
return matches | 28,672 |
def find_closest_vertices(surface_coords, point_coords):
"""Return the vertices on a surface mesh closest to some given coordinates.
The distance metric used is Euclidian distance.
Parameters
----------
surface_coords : numpy array
Array of coordinates on a surface mesh
point_coords : numpy array
Array of coordinates to map to vertices
Returns
-------
closest_vertices : numpy array
Array of mesh vertex ids
"""
point_coords = np.atleast_2d(point_coords)
return np.argmin(cdist(surface_coords, point_coords), axis=0) | 28,673 |
def blob_utils_get_loss_gradients(model, loss_blobs):
"""Generate a gradient of 1 for each loss specified in 'loss_blobs'"""
loss_gradients = {}
for b in loss_blobs:
loss_grad = model.net.ConstantFill(b, [b + '_grad'], value=1.0)
loss_gradients[str(b)] = str(loss_grad)
return loss_gradients | 28,674 |
def _list_indexing(X, key, key_dtype):
""" Index a Python list """
if np.isscalar(key) or isinstance(key, slice):
# key is a slice or a scalar
return X[key]
if key_dtype == 'bool':
# key is a boolean array-like
return list(compress(X, key))
# key is a integer array-like of key
return [X[idx] for idx in key] | 28,675 |
def extract_hubs_from_motifs(list_of_motifs: list,
genes_to_remove: list,
check_conflict: bool = True,
debug: bool = False,
gene_ids_file: str = None,
top_pc: float = 1.):
"""
Parameters
==========
list_of_motifs: list of lists of all the motifs found.
Each item in the main list is a list of length 2: ['Motif_name', weight]
genes_to_remove: if there are genes to be removed due to ID conflicts or discrepancies
check_conflict: if True, check the overlap between the hubs and the list provided
debug: if printing debug options
gene_ids_file: file with gene_IDs that might generate conflict
top_pc: to % of genes to consider
Returns
=======
hubs: list of all the genes perturbed with their connectivity, and hubs
"""
genes_p = collections.Counter(list(itertools.chain(*[m.split("_") for m in list_of_motifs])))
# this file corrects the list for discrepancies in the list for gene IDs
path_tofile = gene_ids_file
fin = open(path_tofile, mode='r')
genelist = fin.readlines()
genelist = [x.strip() for x in genelist]
fin.close()
if debug:
print("Find top {}% of hubs".format(top_pc))
temp_genes = copy.deepcopy(genes_p)
for g in genes_p:
if g in genes_to_remove and g not in genelist:
if debug:
print("Removing gene for conflict: {}".format(g))
temp_genes.pop(g)
if debug:
print("Hubs before: {} and after: {}".format(len(genes_p), len(temp_genes)))
list_of_genes = sorted(list(map(list, list(temp_genes.items()))), key=itemgetter(1), reverse=True)
if check_conflict:
top_pc = 100-top_pc
top_pc_genes = list(np.array(list_of_genes).T[0][:top_pc])
notpert_hubs = list(set(top_pc_genes) - set(genelist))
nothubs_pert = list(set(genelist) - set(top_pc_genes))
for idx, gene in enumerate(notpert_hubs):
temp1 = [gene, temp_genes[gene]]
temp_genes.pop(gene)
temp2 = [nothubs_pert[idx], temp_genes[nothubs_pert[idx]]]
temp_genes.pop(nothubs_pert[idx])
temp_genes[temp2[0]] = temp1[1]
temp_genes[temp1[0]] = temp2[1]
temp_genes = collections.OrderedDict(sorted(temp_genes.items(), key=itemgetter(1), reverse=True))
full_genes = sorted(list(map(list, list(temp_genes.items()))), key=itemgetter(1), reverse=True)
connectivity = np.array(full_genes).T[1].astype(np.float16)
hubs = list(np.array(list(temp_genes.keys()))[np.where(connectivity > np.percentile(connectivity, top_pc))[0]])
return full_genes, hubs | 28,676 |
def credit_rating():
"""
credit_rating http api
"""
return_dict = {'rescode': '200', 'credit-rating': '1', 'description': 'Good credit'}
if request.get_data() is None:
return_dict['rescode'] = '5004'
return json.dumps(return_dict, ensure_ascii=False)
role_dict = {'farmer': 1, 'consumer': 2}
sex_dict = {'male': 1, 'female': 2}
location_dict = {'Cantwell city, Alaska, USA': 1, 'Queens, New York, NY, USA': 2}
description_dict = {'0': 'Bad credit', '1': 'Good credit'}
get_data = request.get_data()
get_data = json.loads(get_data)
role_name = get_data.get('rolename')
sex = get_data.get('sex')
user_name = get_data.get('username')
location = get_data.get('location')
carbon_credit = get_data.get('carbon_credit')
footprint_names = get_data.get('footprint_name')
carbon_credit = int(carbon_credit)
footprint_count_dict = {'Buy': 0, 'Fertilize': 0, 'Seed': 0}
for ftn in footprint_names:
if ftn.startswith('Buy'):
footprint_count_dict['Buy'] = footprint_count_dict['Buy'] + 1
elif ftn.startswith('Fertilize'):
footprint_count_dict['Fertilize'] = footprint_count_dict['Fertilize'] + 1
elif ftn.startswith('Seed'):
footprint_count_dict['Seed'] = footprint_count_dict['Seed'] + 1
x_predict_json = {
'x0': sex_dict.get(sex),
'x1': role_dict.get(role_name),
'x2': location_dict.get(location),
'x3': carbon_credit,
'x4': footprint_count_dict['Seed'],
'x5': footprint_count_dict['Buy'],
'x6': footprint_count_dict['Fertilize']
}
value_dict = {'max_x0': 2, 'min_x0': 1, 'max_x1': 2, 'min_x1': 1, 'max_x2': 2, 'min_x2': 1, 'max_x3': 99,
'min_x3': 0, 'max_x4': 30, 'min_x4': 0, 'max_x5': 30, 'min_x5': 0, 'max_x6': 30, 'min_x6': 0}
for i in range(7):
x_predict_json['x' + str(i)] = normalization(x_predict_json['x' + str(i)], value_dict['max_x' + str(i)],
value_dict['min_x' + str(i)])
body_json = {
"head": {
"serviceId": "cfc"
},
"body": {
'featureData': x_predict_json,
'sendToRemoteFeatureData': {
'device_id': user_name
}
}
}
# guest node ip
response = requests.post(
'http://IP:8059/federation/v1/inference',
data=json.dumps(body_json))
response_data = json.loads(response.text).get('data')
prob = response_data.get('prob')
flag = "0"
if float(prob) > 0.4:
flag = "1"
return_dict['credit-rating'] = flag
return_dict['description'] = description_dict[flag]
return json.dumps(return_dict, ensure_ascii=False) | 28,677 |
def updateGlobalInventory(D_SKUs: pd.DataFrame, inventoryColumn: str):
"""
Update the global inventory of the warehouse
Args:
D_SKUs (pd.DataFrame): Input SKUs dataframe.
inventoryColumn (str): column name with the inventory.
Returns:
D_inventory (pd.DataFrame): Output DataFrame with inventory values.
"""
D_inventory = pd.DataFrame([], columns=['WH_INVENTORY_VOLUME', 'WH_INVENTORY_NORMALISED'])
givenVolumes = 0 # count the number of SKUs with a given volume
for i in range(0, len(D_SKUs)):
# i=33159
volume = D_SKUs.iloc[i]['VOLUME']
list_days = D_SKUs.iloc[i]['INVENTORY_DAYS']
# go on only if an inventory has been saved
if isinstance(list_days, list):
list_inventory = np.array(D_SKUs.iloc[i][inventoryColumn])
list_inventory = np.nan_to_num(list_inventory) # convert nan to 0
list_inventory_volume = list_inventory * volume
list_inventory_normalised = (list_inventory - min(list_inventory)) / (max(list_inventory) - min(list_inventory))
D_temp = pd.DataFrame(list_inventory_normalised, index=list_days, columns=['SKU_INVENTORY_NORMALISED'])
D_inventory = pd.concat([D_temp, D_inventory], axis=1, sort=False)
D_inventory = D_inventory.fillna(0)
D_inventory['WH_INVENTORY_NORMALISED'] = D_inventory['WH_INVENTORY_NORMALISED'] + D_inventory['SKU_INVENTORY_NORMALISED']
D_inventory = D_inventory.drop(columns=['SKU_INVENTORY_NORMALISED'])
if str(volume) != 'nan': # if volume is not nan
D_temp = pd.DataFrame(list_inventory_volume, index=list_days, columns=['SKU_INVENTORY_VOLUME'])
D_inventory = pd.concat([D_temp, D_inventory], axis=1, sort=False)
D_inventory = D_inventory.fillna(0)
D_inventory['WH_INVENTORY_VOLUME'] = D_inventory['WH_INVENTORY_VOLUME'] + D_inventory['SKU_INVENTORY_VOLUME']
D_inventory = D_inventory.drop(columns=['SKU_INVENTORY_VOLUME'])
givenVolumes = givenVolumes + 1
return D_inventory | 28,678 |
def generate_code():
"""
This method reads the template file, fills it with the appropriate contents, and writes the result in the
mini_lambda_generated.py file. All contents of the destination file are overriden by this operation.
:return:
"""
# (1) generate the to-do list for the first template
to_override, to_override_with_exception = define_what_needs_to_be_written()
# check outside of the template that it works:
for o in to_override:
print(o)
for o in to_override_with_exception:
print(o)
# generate
generate_from_template('mini_lambda_template.mako', 'generated.py',
dict(to_override=to_override, to_override_with_exception=to_override_with_exception))
generate_from_template('mini_lambda_template_2.mako', 'generated2.py',
dict(to_override=to_override, to_override_with_exception=to_override_with_exception))
# (2) to-do list for the second template
import_lines, to_create = define_goodies()
# check outside of the template that it works:
for o in import_lines:
print(o)
for o in to_create:
print(o)
# generate
generate_from_template('goodies_template.mako', 'goodies_generated.py',
dict(import_lines=import_lines, to_create=to_create)) | 28,679 |
def _query_checks(start, end, owner_id=''):
"""Get the number of rules checks from `start` to `end` in 1-day windows"""
series = []
assert (isinstance(end, datetime.datetime) and
isinstance(start, datetime.datetime))
while start < end:
stop = start + datetime.timedelta(days=1)
results = _query_influxdb(
_get_checks_or_datapoints_query('checks',
start, stop, owner_id), owner_id
)
series.append(('%sZ' % start.isoformat(), results))
start += datetime.timedelta(days=1)
return _parse_checks_or_datapoints_series(series, 'checks', owner_id) | 28,680 |
def _FilterManufacturedEvents(results):
"""Return a list of results where first question is 'MANUFACTURED'.
Manufactured events are either Recording events that correspond to
an instrumented event in the browser, or Showed notification events
that correspond to when the user was invited to take a survey.
Args:
results: Results parsed from JSON. Assumed to already be filtered by date.
Returns:
(1) List of results that are manufactured events.
(2) Integer index into the results list indicating which list
element's questions can be considered canonical and complete.
"""
manuf_events = [
r for r in results
if r['responses'][0]['question'] == 'MANUFACTURED']
return manuf_events, _GetCanonicalIndex(manuf_events) | 28,681 |
def test_naive_fusion_2d_with_overlaps() -> None:
"""Test naive fusion with really overlaping points in 2d that need to be marked."""
n_rays = 20
s = 8
shape = (s, s)
dists = np.zeros(shape + (n_rays,))
probs = np.zeros(shape)
# object 1
z0 = s // 4
dist = 3
dists[z0, z0, :] = dist
probs[z0, z0] = 0.9
# object 2
z1 = s // 4 * 3 - 1
dists[z1, z1, :] = dist
probs[z1, z1] = 0.9
lbl = nf.naive_fusion(
dists,
probs,
grid=(1, 1),
show_overlaps=True,
)
new_dists = np.full((2, n_rays), dist)
new_points: npt.NDArray[np.double] = np.array([[z0, z0], [z1, z1]])
label = polygons_to_label(new_dists, new_points, shape)
# set overlapping labels to correct ids
label[3:5, 3:5] = -1
label[2, 5] = -1
label[5, 2] = -1
print(lbl)
print(label)
np.testing.assert_array_equal(lbl, label) | 28,682 |
def setup(client):
"""
Adds the command to the client object
"""
client.add_cog(Ping(client))
print("\tLoaded Ping cog!") | 28,683 |
def get_bgp(host, username, password) -> None:
"""Gets device BGP configuration"""
session = create_netconf_connection(host, username, password)
config_data = session.get(get_policies)
qos_details = xmltodict.parse(config_data.xml)["rpc-reply"]["data"]
bgp_details = qos_details["native"].get("router", {}).get("bgp", {})
search_config(bgp_details) | 28,684 |
def pca_loadings_bar(axi, coefficients, xvars, plot_type='bar'):
"""Plot the loadings for a single component in a bar plot.
Parameters
----------
axi : object like :class:`matplotlib.axes.Axes`
The plot we will add the loadings to.
coefficients : object like :class:`numpy.ndarray`
The coefficients we are to show.
xvars : list of strings
Labels for the original variables.
plot_type : string, optional
Selects the type of plot we are making.
"""
xpos = range(len(coefficients))
if plot_type == 'bar-square':
yval = coefficients**2
ylabel = 'Squared coefficients'
elif plot_type == 'bar-absolute':
yval = np.abs(coefficients)
ylabel = 'Absolute value of coefficients'
else:
yval = coefficients
ylabel = 'Coefficient'
axi.set_ylabel(ylabel)
axi.axhline(y=0, ls=':', color='#262626')
axi.bar(xpos, yval)
axi.set_xticks(xpos)
axi.set_xticklabels(
xvars,
rotation='vertical',
)
axi.set_xlabel('Variables') | 28,685 |
def Max(data):
"""Returns the maximum value of a time series"""
return data.max() | 28,686 |
def write_qif_orders(app_ui, orders, qif_file):
"""Write orders to a .qif file."""
for order_id in sorted(orders):
order = orders[order_id]
print("D{}".format(order.order_date), file=qif_file)
print("POpenSky order {}".format(order_id), file=qif_file)
print("L{}".format(app_ui.args.acct_opensky), file=qif_file)
print("T{:.2f}".format(order.total_payment), file=qif_file)
# It appears that splits show up in GNUCash in the
# opposite order than they are specified in the
# imported file.
write_split(
qif_file,
order.opensky_commission,
app_ui.args.acct_commission
)
write_split(
qif_file,
order.cc_processing,
app_ui.args.acct_cc_processing
)
write_split(
qif_file,
order.restocking_fee,
app_ui.args.acct_restocking
)
write_split(
qif_file,
order.sales_tax,
app_ui.args.acct_sales_tax
)
write_split(
qif_file,
order.opensky_credits,
app_ui.args.acct_credits
)
write_split(
qif_file,
order.shipping,
app_ui.args.acct_shipping
)
write_split(
qif_file,
order.item_price,
app_ui.args.acct_sales,
list_to_string("SKU", order.skus)
)
print("^", file=qif_file) | 28,687 |
def mcf_and(modal_context, fml, clausal_form_dict, id_mc, distributive):
""" Takes AND fml, and applies transformation rules.
"""
global max_mc_id
left_fml = fml[1]
right_fml = fml[2]
max_mc_id = max(id_mc, max_mc_id)
# if modal context distributes over the 'and' operator
if not distributive:
left_mc = get_mc(left_fml, copy.deepcopy(modal_context))
max_mc_id += 1
to_mcf(left_mc[0], left_mc[1], clausal_form_dict, max_mc_id)
right_mc = get_mc(right_fml, copy.deepcopy(modal_context))
max_mc_id += 3
to_mcf(right_mc[0], right_mc[1], clausal_form_dict, max_mc_id)
else:
p_atom = create_atom()
to_mcf(modal_context, p_atom, clausal_form_dict, id_mc)
updated_fml = [u.op('|'), [u.op('~'), p_atom], fml]
max_mc_id += 1
to_mcf(modal_context, updated_fml, clausal_form_dict, max_mc_id, distributive=False) | 28,688 |
def clean_words(words, remove_stopwords=False, language='portuguese'):
"""Stems and removes stopwords from a set of word-level tokens using the RSLPStemmer.
Args:
words (list): Tokens to be stemmed.
remove_stopwords (bool): Whether stopwords should be removed or not.
language (str): Identifier of stopwords' language.
Returns:
List of stemmed tokens.
"""
# Creates the RSLP stemmer
stemmer = RSLPStemmer()
if remove_stopwords:
# Gathers the stopwords
stop_words = stopwords.words(language)
# Stems and removes the stopwords
stemmed_words = [stemmer.stem(word) for word in words if word.lower() not in stop_words]
else:
# Just stems the words
stemmed_words = [stemmer.stem(word) for word in words]
return stemmed_words | 28,689 |
def prompt_input(message: str):
""" Prompt user for input."""
while True:
try:
num = input(message+':\n')
if num==stop_word:
print(f'Max: {max(nums)} Min: {min(nums)}')
else:
nums.append(int(num))
continue
except ValueError:
print('Invalid Argument supplied')
continue
else:
break | 28,690 |
def load_model(model_dir, model_file=None):
"""Loads the model.
The model object is pickled in `model_dir` to make the model configuration
optional for future runs.
Args:
model_dir: The model directory.
model_file: An optional model configuration.
Returns:
A `opennmt.models.Model` object.
"""
serial_model_file = os.path.join(model_dir, "model_description.pkl")
if model_file:
if tf.train.latest_checkpoint(model_dir) is not None:
tf.logging.warn(
"You provided a model configuration but a checkpoint already exists. "
"The model configuration must define the same model as the one used for "
"the initial training. However, you can change non structural values like "
"dropout.")
model_config = load_model_module(model_file)
model = model_config.model()
with open(serial_model_file, "wb") as serial_model:
pickle.dump(model, serial_model)
elif not os.path.isfile(serial_model_file):
raise RuntimeError("A model configuration is required.")
else:
tf.logging.info("Loading serialized model description from %s", serial_model_file)
with open(serial_model_file, "rb") as serial_model:
model = pickle.load(serial_model)
return model | 28,691 |
def generate(args):
"""
Generate snapshot of sources of kernel functions.
This involves:
- find source code with functions definitions
- compile the source codes into LLVM IR
- copy LLVM and C source files into snapshot directory
- create YAML with list mapping functions to their LLVM sources
"""
source = KernelSource(args.kernel_dir, True)
args.output_dir = os.path.abspath(args.output_dir)
fun_list = FunctionList(args.output_dir)
# Cleanup or create the output directory
if os.path.isdir(args.output_dir):
shutil.rmtree(args.output_dir)
os.mkdir(args.output_dir)
# Build sources for functions from the list into LLVM IR
with open(args.functions_list, "r") as fun_list_file:
for line in fun_list_file.readlines():
fun = line.strip()
if not fun or not (fun[0].isalpha() or fun[0] == "_"):
continue
sys.stdout.write("{}: ".format(fun))
try:
llvm_mod = source.get_module_for_symbol(fun)
print(os.path.relpath(llvm_mod.llvm, args.kernel_dir))
fun_list.add(fun, llvm_mod)
except SourceNotFoundException:
print("source not found")
# Copy LLVM files to the snapshot
source.copy_source_files(fun_list.modules(), args.output_dir)
source.copy_cscope_files(args.output_dir)
# Create YAML with functions list
with open(os.path.join(args.output_dir, "functions.yaml"),
"w") as fun_list_yaml:
fun_list_yaml.write(fun_list.to_yaml())
source.finalize() | 28,692 |
def parse_healing_and_target(line):
"""Helper method that finds the amount of healing and who it was provided to"""
split_line = line.split()
target = ' '.join(split_line[3:split_line.index('for')])
target = target.replace('the ', '')
amount = int(split_line[split_line.index('for')+1])
return [amount, target] | 28,693 |
def trim_os_hidden_files(abs_path):
"""trim os hidden files and folder like: .DS_Store, """
pass | 28,694 |
def _decode_and_center_crop(
image_bytes: tf.Tensor,
jpeg_shape: Optional[tf.Tensor] = None,
image_size: Sequence[int] = (224, 224),
) -> tf.Tensor:
"""Crops to center of image with padding then scales."""
if jpeg_shape is None:
jpeg_shape = get_shape(image_bytes)
image_height = jpeg_shape[0]
image_width = jpeg_shape[1]
# Pad the image with at least 32px on the short edge and take a
# crop that maintains aspect ratio.
scale = tf.minimum(tf.cast(image_height, tf.float32) / (image_size[0] + 32),
tf.cast(image_width, tf.float32) / (image_size[1] + 32))
padded_center_crop_height = tf.cast(scale * image_size[0], tf.int32)
padded_center_crop_width = tf.cast(scale * image_size[1], tf.int32)
offset_height = ((image_height - padded_center_crop_height) + 1) // 2
offset_width = ((image_width - padded_center_crop_width) + 1) // 2
crop_window = [offset_height, offset_width,
padded_center_crop_height, padded_center_crop_width]
image = crop(image_bytes, crop_window)
return image | 28,695 |
def print_stat(test_list, stat_dict):
"""
"""
for t_name in test_list:
print('| {}: MRE: {:.4f} SDR: {:.4f} SCR: {:.4f}'.format(
t_name,
stat_dict[t_name]['MRE']['average'].cpu().numpy(),
stat_dict[t_name]['SDR'][20]['average'].cpu().numpy(),
stat_dict[t_name]['SCR'].cpu().mean(),
)
) | 28,696 |
def inq_affine(inp, n_outmaps, base_axis=1, num_bits=4,
inq_iterations=(), selection_algorithm='random',
seed=-1, w_init=None, i_init=None, b_init=None,
fix_parameters=False, rng=None, with_bias=True):
"""Incremental Network Quantization Affine Layer
During training, the weights are sequentially quantized to power-of-two
values, which allows the training of a multiplierless network.
Using `inq_iterations`, one can specify after how many forward passes
half of the learnable weights are fixed and quantized to powers-of-two.
After reaching the last value in `inq_iterations`, all weights are fixed.
For more details, please refer to the reference.
Reference:
Zhou A, Yao A, Guo Y, Xu L, Chen Y. Incremental network quantization:
Towards lossless CNNs with low-precision weights.
<https://arxiv.org/abs/1702.03044>
Args:
inp (~nnabla.Variable): Input N-D array with shape (:math:`M_0 \\times \ldots \\times M_{B-1} \\times D_B \\times \ldots \\times D_N`). Dimensions before and after base_axis are flattened as if it was a matrix.
n_outmaps (int or :obj:`tuple` of :obj:`int`): Number of output neurons per data.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
num_bits (int): Number of bits per weight. Value has to be larger than 1 as one bit is already used to code the value "0"
inq_iterations (tuple of int): Tuple of iteration numbers at which we fix half of the weights.
selection_algorithm (str): Chooses algorithm that is used to decide which weights are fixed. ("largest_abs" ... fix weights with largest absolute value, "random" ... fix weights randomly)
seed (int): Random seed for INQ algorithm
w_init (~nnabla.initializer.BaseInitializer): Initializer for the weight.
i_init (~nnabla.initializer.BaseInitializer): Initializer for the indicators (0 ... learnable, 1 ... fixed).
b_init (~nnabla.initializer.BaseInitializer): Initializer for the bias.
fix_parameters (bool): When set to `True`, the weight and bias will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
Returns:
:class:`~nnabla.Variable`
"""
if not hasattr(n_outmaps, '__iter__'):
n_outmaps = [n_outmaps]
n_outmaps = list(n_outmaps)
n_outmap = int(np.prod(n_outmaps))
if w_init is None:
fan_in = np.prod(inp.shape[base_axis:])
w_init = UniformInitializer(
calc_uniform_lim_glorot(fan_in, n_outmap), rng=rng)
if i_init is None:
fan_in = np.prod(inp.shape[base_axis:])
i_init = ConstantInitializer()
if b_init is None:
b_init = ConstantInitializer()
w = get_parameter_or_create(
"W", [int(np.prod(inp.shape[base_axis:]))] + n_outmaps,
w_init, not fix_parameters)
i = get_parameter_or_create(
"I", [int(np.prod(inp.shape[base_axis:]))] + n_outmaps,
i_init, False)
b = None
if with_bias:
b = get_parameter_or_create(
"b", n_outmaps, b_init, not fix_parameters)
return F.inq_affine(inp, w, i, b, base_axis, num_bits, inq_iterations, selection_algorithm, seed) | 28,697 |
def is_greater_equal(min_value):
"""Check if the attribute value is greater than or equal to a minimum value.
This validator can handle both lists and single element attributes. If it
is a list, it checks if the element with the smallest value is greater than
or equal to the specified minimum value.
"""
def compare(self, attribute, value):
if type(value) is not list:
value = [value]
if np.min(value) < min_value:
_logger.error(
f"{attribute.name} cannot be smaller than {min_value}!",
_logger.ExceptionTypes.ValueError,
)
return compare | 28,698 |
def save_lyrics(list_: List[Text], location: Text) -> None:
"""Writes 'list_' to 'location' as txt file. Returns None."""
with open(location, "w+") as f:
for element in list_:
f.write(element)
f.write("\n")
return None | 28,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.