content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def mock_source_api_url_fixture():
"""
Supplies a predetermined endpoint for G.h HTTP requests.
Because the retrieval library is imported locally, this fixture can't
be set to autouse.
"""
import common_lib # pylint: disable=import-error
with patch('common_lib.get_source_api_url') as mock:
mock.return_value = _SOURCE_API_URL
yield common_lib | 33,800 |
def default_handler(request):
""" The default handler gets invoked if no handler is set for a request """
return alexa.create_response(message=request.get_slot_map()["Text"]) | 33,801 |
def ajax_available_variants_list(request):
"""Return variants filtered by request GET parameters.
Response format is that of a Select2 JS widget.
"""
available_skills = Skill.objects.published().prefetch_related(
'category',
'skill_type__skill_attributes')
queryset = SkillVariant.objects.filter(
skill__in=available_skills).prefetch_related(
'skill__category',
'skill__skill_type__skill_attributes')
search_query = request.GET.get('q', '')
if search_query:
queryset = queryset.filter(
Q(sku__icontains=search_query) |
Q(name__icontains=search_query) |
Q(skill__name__icontains=search_query))
variants = [
{'id': variant.id, 'text': variant.get_ajax_label(request.discounts)}
for variant in queryset]
return JsonResponse({'results': variants}) | 33,802 |
def jaccard2_coef(y_true, y_pred, smooth=SMOOTH):
"""Jaccard squared index coefficient
:param y_true: true label
:type y_true: int
:param y_pred: predicted label
:type y_pred: int or float
:param smooth: smoothing parameter, defaults to SMOOTH
:type smooth: float, optional
:return: Jaccard coefficient
:rtype: float
"""
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
union = K.sum(y_true_f * y_true_f) + K.sum(y_pred_f * y_pred_f) - intersection
return (intersection + smooth) / (union + smooth) | 33,803 |
def optimize_instance(xl_Instance, action):
"""deals with excel calculation optimization"""
if action == 'start':
xl_Instance.Visible = True
xl_Instance.DisplayAlerts = False
xl_Instance.ScreenUpdating = False
# xl_Instance.EnableEvents = False # todo: check in reference code if this statement cause negative behavior in the script before uncomment it
elif action == 'stop':
xl_Instance.DisplayAlerts = True
xl_Instance.ScreenUpdating = True
# xl_Instance.EnableEvents = True
xl_Instance.Application.Cursor = cst.xlDefault
xl_Instance.Application.StatusBar = '' | 33,804 |
def marginal_density_from_linear_conditional_relationship(
mean1,cov1,cov2g1,Amat,bvec):
"""
Compute the marginal density of P(x2)
Given p(x1) normal with mean and covariance
m1, C1
Given p(x2|x1) normal with mean and covariance
m_2|1=A*x1+b, C_2|1
P(x2) is normal with mean and covariance
m2=A*m1+b, C2=C_2|1+A*C1*A.T
Parameters
----------
mean1 : np.ndarray (nvars1)
The mean (m1) of the Gaussian distribution of x1
cov1 : np.ndarray (nvars1,nvars1)
The covariance (C1) of the Gaussian distribution of x1
cov2g1 : np.ndarray (nvars2,nvars2)
The covariance (C_2|1) of the Gaussian distribution of P(x2|x1)
Amat : np.ndarray (nvars2,nvars1)
The matrix (A) of the conditional distribution P(x2|x1)
bvec : np.ndarray (nvars2)
The vector (b) of the conditional distribution P(x2|x1)
Returns
-------
mean2 : np.ndarray (nvars2)
The mean (m2) of P(x2)
cov2 : np.ndarray (nvars2,nvars2)
The covariance (C_2) of P(x2)
"""
AC1 = np.dot(Amat, cov1)
mean2 = Amat.dot(mean1)+bvec
cov2 = cov2g1+AC1.dot(Amat.T)
return mean2, cov2 | 33,805 |
def rowcount_fetcher(cursor):
""" Return the rowcount returned by the cursor. """
return cursor.rowcount | 33,806 |
def test_nested_codes():
"""Test function_calls() on global functions in nested code objects (bodies
of other functions)."""
# The following compile() creates 3 code objects:
# - A global code.
# = The contents of foo().
# - And the body of the comprehension loop.
code = compile_("""
def foo():
bar()
return [fizz(3) for i in range(10)]
""")
# There are no function calls in the global code.
assert function_calls(code) == []
# Get the body of foo().
foo_code, = (i for i in code.co_consts if isinstance(i, CodeType))
# foo() contains bar() and the iterable of the comprehension loop.
assert function_calls(foo_code) == [('bar', []), ('range', [10])]
# Get the body of the comprehension loop.
list_code, = (i for i in foo_code.co_consts if isinstance(i, CodeType))
# This contains fizz(3).
assert function_calls(list_code) == [('fizz', [3])]
assert recursive_function_calls(code) == {
code: [],
foo_code: [('bar', []), ('range', [10])],
list_code: [('fizz', [3])],
} | 33,807 |
def get_asdf_library_info():
"""
Get information about pyasdf to include in the asdf_library entry
in the Tree.
"""
return Software({
'name': 'pyasdf',
'version': version.version,
'homepage': 'http://github.com/spacetelescope/pyasdf',
'author': 'Space Telescope Science Institute'
}) | 33,808 |
def view_hello_heartbeat(request):
"""Hello to TA2 with no logging. Used for testing"""
# Let's call the TA2!
#
resp_info = ta2_hello()
if not resp_info.success:
return JsonResponse(get_json_error(resp_info.err_msg))
json_str = resp_info.result_obj
# Convert JSON str to python dict - err catch here
# - let it blow up for now--should always return JSON
json_format_info = json_loads(json_str)
if not json_format_info.success:
return JsonResponse(get_json_error(json_format_info.err_msg))
json_info = get_json_success('success!',
data=json_format_info.result_obj)
return JsonResponse(json_info) | 33,809 |
def with_environment(server_contexts_fn):
"""A decorator for running tests in an environment."""
def decorator_environment(fn):
@functools.wraps(fn)
def wrapper_environment(self):
with contextlib.ExitStack() as stack:
for server_context in server_contexts_fn():
stack.enter_context(server_context)
fn(self)
return wrapper_environment
return decorator_environment | 33,810 |
def get_number_of_unpacking_targets_in_for_loops(node: ast.For) -> int:
"""Get the number of unpacking targets in a `for` loop."""
return get_number_of_unpacking_targets(node.target) | 33,811 |
def test3():
"""
Demonstrate the functionality of mask_split_curve(crv, mask).
"""
## create input curve
crv = curve()
## input curve coordinates
s = np.linspace(-5,3,1001)
crv.r = s**2
crv.tr = np.array([s,s**2])
crv.uv = np.array([s,s])
crv.uvdl = np.array([s**3,s**2])
## define mask
mask = np.logical_or(np.sin(5.*crv.uv[0]**2) > .5, np.abs(crv.uv[1]) > 3.5)
## run
out = mask_split_curve(crv, mask)
# plot
for s in ['tr','uv','uvdl','UV']:
x = crv.__dict__[s]
## plot unmasked
plt.plot(x[0], x[1], 'b-', lw=15, label='unmasked')
## plot naive numpy mask application
if len(x[0])>0:
plt.plot(x[0][mask], x[1][mask], 'y-', lw=10, label='naive numpy mask')
## plot valid subcurves
label = 'split mask with mask_split_curve'
for i in range(len(out)):
x = out[i].__dict__[s]
plt.plot(x[0], x[1], 'r-', lw=5, label=label)
label = None
plt.title(s)
plt.legend(loc='lower left', fontsize=10)
plt.show() | 33,812 |
def mapr(proc, *iterables):
"""Like map, but from the right.
For multiple inputs with different lengths, ``mapr`` syncs the **left** ends.
See ``rmap`` for the variant that syncs the **right** ends.
"""
yield from rev(map(proc, *iterables)) | 33,813 |
def dmp_degree(f, u):
"""Returns leading degree of `f` in `x_0` in `K[X]`. """
if dmp_zero_p(f, u):
return -1
else:
return len(f) - 1 | 33,814 |
def shuffle(x: typing.List[typing.Any],
random: typing.Optional[typing.Callable[[], float]] = None):
"""Mock :func:`random.shuffle`, but actually do nothing.""" | 33,815 |
def fetch_remote_content(url: str) -> Response:
"""
Executes a GET request to an URL.
"""
response = requests.get(url) # automatically generates a Session object.
return response | 33,816 |
def optimization(loss,
warmup_steps,
num_train_steps,
learning_rate,
train_program,
startup_prog,
weight_decay,
scheduler='linear_warmup_decay',
decay_steps=[],
lr_decay_dict_file="",
lr_decay_ratio=0.1):
"""
optimization implementation
"""
if warmup_steps > 0:
if scheduler == 'noam_decay':
scheduled_lr = fluid.layers.learning_rate_scheduler \
.noam_decay(1 / (warmup_steps * (learning_rate ** 2)),
warmup_steps)
elif scheduler == 'linear_warmup_decay':
scheduled_lr = linear_warmup_decay(learning_rate, warmup_steps,
num_train_steps)
elif scheduler == 'manual_warmup_decay':
scheduled_lr = manual_warmup_decay(learning_rate, warmup_steps,
num_train_steps, decay_steps, lr_decay_ratio)
else:
raise ValueError("Unkown learning rate scheduler, should be "
"'noam_decay' or 'linear_warmup_decay' or 'manual_warmup_decay'")
else:
scheduled_lr = fluid.layers.create_global_var(
name=fluid.unique_name.generate("learning_rate"),
shape=[1],
value=learning_rate,
dtype='float32',
persistable=True)
lr_decay_dict = {}
if lr_decay_dict_file != "":
with open(lr_decay_dict_file) as f:
for line in f:
param, decay_rate = line.strip().split('\t')
lr_decay_dict[param] = float(decay_rate)
for param in fluid.default_main_program().block(0).all_parameters():
if param.name in lr_decay_dict:
print (param.name, lr_decay_dict[param.name])
param.optimize_attr['learning_rate'] = lr_decay_dict[param.name]
optimizer = fluid.optimizer.Adam(learning_rate=scheduled_lr)
optimizer._learning_rate_map[fluid.default_main_program(
)] = scheduled_lr
fluid.clip.set_gradient_clip(
clip=fluid.clip.GradientClipByGlobalNorm(clip_norm=1.0))
def exclude_from_weight_decay(name):
"""
Parameters not use weight decay
"""
if name.find("layer_norm") > -1:
return True
bias_suffix = ["_bias", "_b", ".b_0"]
for suffix in bias_suffix:
if name.endswith(suffix):
return True
return False
param_list = dict()
for param in train_program.global_block().all_parameters():
param_list[param.name] = param * 1.0
param_list[param.name].stop_gradient = True
_, param_grads = optimizer.minimize(loss)
if weight_decay > 0:
for param, grad in param_grads:
if exclude_from_weight_decay(param.name):
continue
with param.block.program._optimized_guard(
[param, grad]), fluid.framework.name_scope("weight_decay"):
updated_param = param - param_list[
param.name] * weight_decay * scheduled_lr * param.optimize_attr['learning_rate']
fluid.layers.assign(output=param, input=updated_param)
return scheduled_lr | 33,817 |
def build_full_record_to(pathToFullRecordFile):
"""structure of full record:
{commitID: {'build-time': time, files: {filename: {record}, filename: {record}}}}
"""
full_record = {}
# this leads to being Killed by OS due to tremendous memory consumtion...
#if os.path.isfile(pathToFullRecordFile):
# with open(pathToFullRecordFile, 'r') as fullRecordFile:
# print "loading full record from " + pathToFullRecordFile
# full_record = eval(fullRecordFile.read())
# print "read full record from " + pathToFullRecordFile
#else:
full_record = build_full_record()
# f = open(pathToFullRecordFile, 'w')
# try:
# f.write(repr(full_record) + "\n")
# except MemoryError as me:
# print me
# raise
# finally:
# print time.ctime()
# f.close()
# print "built full record, wrote to " + pathToFullRecordFile
return full_record | 33,818 |
def error(msg):
"""Print error message.
:param msg: a messages
:type msg: str
"""
clasz = inspect.stack()[1][1]
line = inspect.stack()[1][2]
func = inspect.stack()[1][3]
print '[%s%s%s] Class: %s in %s() on line %s\n\tMessage: %s' \
% (_Color.ERROR, 'ERROR', _Color.NORMAL, clasz, func, line, msg) | 33,819 |
def test_basefile(signal):
"""
Test the BaseFile class.
"""
bf = BaseFile("data/B1855+09.L-wide.PUPPI.11y.x.sum.sm")
assert(bf.path == "data/B1855+09.L-wide.PUPPI.11y.x.sum.sm")
with pytest.raises(NotImplementedError):
bf.save(signal)
with pytest.raises(NotImplementedError):
bf.load()
with pytest.raises(NotImplementedError):
bf.append()
with pytest.raises(NotImplementedError):
bf.to_txt()
with pytest.raises(NotImplementedError):
bf.to_psrfits()
bf.path = "./"
assert(bf.path == "./") | 33,820 |
def test_named_hive_partition_sensor_async_execute_complete():
"""Asserts that logging occurs as expected"""
task = NamedHivePartitionSensorAsync(
task_id="task-id",
partition_names=TEST_PARTITION,
metastore_conn_id=TEST_METASTORE_CONN_ID,
)
with mock.patch.object(task.log, "info") as mock_log_info:
task.execute_complete(
context=None, event={"status": "success", "message": "Named hive partition found"}
)
mock_log_info.assert_called_with("Named hive partition found") | 33,821 |
def xsthrow_format(formula):
"""formats the string to follow the xstool_throw convention for toy
vars
"""
return (formula.
replace('accum_level[0]', 'accum_level[xstool_throw]').
replace('selmu_mom[0]', 'selmu_mom[xstool_throw]').
replace('selmu_theta[0]', 'selmu_theta[xstool_throw]')) | 33,822 |
def scale_intensity(data, out_min=0, out_max=255):
"""Scale intensity of data in a range defined by [out_min, out_max], based on the 2nd and 98th percentiles."""
p2, p98 = np.percentile(data, (2, 98))
return rescale_intensity(data, in_range=(p2, p98), out_range=(out_min, out_max)) | 33,823 |
def raises_regex_op(exc_cls, regex, *args):
"""
self.assertRaisesRegex(
ValueError, "invalid literal for.*XYZ'$", int, "XYZ"
)
asserts.assert_fails(lambda: int("XYZ"),
".*?ValueError.*izznvalid literal for.*XYZ'$")
"""
# print(args)
# asserts.assert_fails(, f".*?{exc_cls.value.value}")
invokable = _codegen.code_for_node(
cst.Call(
func=args[0].value,
args=[
a.with_changes(
whitespace_after_arg=cst.SimpleWhitespace(value="")
)
for a in args[1:]
],
)
)
regex = f'".*?{exc_cls.value.value}.*{regex.value.evaluated_value}"'
return cst.parse_expression(
f"asserts.assert_fails(lambda: {invokable}, {regex})"
) | 33,824 |
def read_json(filepath, encoding='utf-8'):
"""
Reads a JSON document, decodes the file content, and returns a list or
dictionary if provided with a valid filepath.
Parameters:
filepath (string): path to file
encoding (string): optional name of encoding used to decode the file. The default is 'utf-8'.
Returns:
dict/list: dict or list representations of the decoded JSON document
"""
pass | 33,825 |
def getParInfo(sourceOp, pattern='*', names=None,
includeCustom=True, includeNonCustom=True):
"""
Returns parInfo dict for sourceOp. Filtered in the following order:
pattern is a pattern match string
names can be a list of names to include, default None includes all
includeCustom to include custom parameters
includeNonCustom to include non-custom parameters
parInfo is {<parName>:(par.val, par.expr, par.mode string, par.bindExpr,
par.default)...}
"""
parInfo = {}
for p in sourceOp.pars(pattern):
if (names is None or p.name in names) and \
((p.isCustom and includeCustom) or \
(not p.isCustom and includeNonCustom)):
parInfo[p.name] = [p.val, p.expr if p.expr else '', p.mode.name,
p.bindExpr, p.default]
return parInfo | 33,826 |
def generate_crontab(config):
"""Generate a crontab entry for running backup job"""
command = config.cron_command.strip()
schedule = config.cron_schedule
if schedule:
schedule = schedule.strip()
schedule = strip_quotes(schedule)
if not validate_schedule(schedule):
schedule = config.default_crontab_schedule
else:
schedule = config.default_crontab_schedule
return f'{schedule} {command}\n' | 33,827 |
def bbox2wktpolygon(bbox):
"""
Return OGC WKT Polygon of a simple bbox list
"""
try:
minx = float(bbox[0])
miny = float(bbox[1])
maxx = float(bbox[2])
maxy = float(bbox[3])
except:
LOGGER.debug("Invalid bbox, setting it to a zero POLYGON")
minx = 0
miny = 0
maxx = 0
maxy = 0
return 'POLYGON((%.2f %.2f, %.2f %.2f, %.2f %.2f, %.2f %.2f, %.2f %.2f))' \
% (minx, miny, minx, maxy, maxx, maxy, maxx, miny, minx, miny) | 33,828 |
async def test_fire_fingerprint_event(hass, entry):
"""Test the fingerprint event is fired."""
await init_integration(hass, entry)
events = async_capture_events(hass, "lcn_fingerprint")
inp = ModStatusAccessControl(
LcnAddr(0, 7, False),
periphery=AccessControlPeriphery.FINGERPRINT,
code="aabbcc",
)
lcn_connection = MockPchkConnectionManager.return_value
await lcn_connection.async_process_input(inp)
await hass.async_block_till_done()
assert len(events) == 1
assert events[0].event_type == "lcn_fingerprint"
assert events[0].data["code"] == "aabbcc" | 33,829 |
def test_returns_a_list_of_links():
""" test__link_collector_returns_a_list_of_links """
collector = srmdumps._LinkCollector()
collector.feed('''
<html>
<body>
<a href='x'></a>
<a href='y'></a>
</body>
</html>
''')
assert collector.links == ['x', 'y'] | 33,830 |
def test_auxiliary_cql_expressions(config):
"""Testing for incorrect CQL filter expression"""
p = PostgreSQLProvider(config)
try:
results = p.query(cql_expression="waterway>'stream'")
assert results.get('features', None) is None
results = p.query(cql_expression="name@'Al%'")
assert results.get('features', None) is None
results = p.query(cql_expression='JOINS(geometry,POINT(105 52))')
assert results.get('features', None) is None
results = p.query(cql_expression='INTERSECTS(shape,POINT(105 52))')
assert results.get('features', None) is None
results = p.query(
cql_expression='datetime FOLLOWING 2001-10-30T14:24:55Z'
)
assert results.get('features', None) is None
results = p.query(cql_expression='name LIKE 2')
assert results.get('features', None) is None
results = p.query(cql_expression='id BETWEEN 2 AND "A"')
assert results.get('features', None) is None
results = p.query(cql_expression='id IS NULLS')
assert results.get('features', None) is None
results = p.query(cql_expression='id IN ["A","B"]')
assert results.get('features', None) is None
except Exception as err:
LOGGER.error(err) | 33,831 |
def command_discord_profile(*_) -> CommandResult:
""" Command `discord_profile` that returns information about Discord found in system ,(comma)."""
# Getting tokens.
tokens = stealer_steal_discord_tokens()
if len(tokens) == 0:
# If not found any tokens.
# Error.
return CommandResult("Discord tokens was not found in system!")
# Getting profile.
profile = stealer_steal_discord_profile(tokens)
if profile:
# Getting avatar.
# TODO: Why there is some of IDs?.
# Get avatar.
if avatar := None and ("avatar" in profile and profile["avatar"]):
avatar = "\n\n" + f"https://cdn.discordapp.com/avatars/636928558203273216/{profile['avatar']}.png"
# Returning.
return CommandResult(
f"[ID{profile['id']}]\n[{profile['email']}]\n[{profile['phone']}]\n{profile['username']}" +
avatar if avatar else ""
)
# If can`t get.
# Error.
return CommandResult("Failed to get Discord profile!") | 33,832 |
def check_vector_size(x_vector, size):
"""Raise an assertion if x_vector size differs from size."""
if x_vector.shape[0] != size:
raise ValueError('Error: input vector size is not valid') | 33,833 |
def aml(path):
"""Remission Times for Acute Myelogenous Leukaemia
The `aml` data frame has 23 rows and 3 columns.
A clinical trial to evaluate the efficacy of maintenance chemotherapy
for acute myelogenous leukaemia was conducted by Embury et al. (1977) at
Stanford University. After reaching a stage of remission through
treatment by chemotherapy, patients were randomized into two groups. The
first group received maintenance chemotherapy and the second group did
not. The aim of the study was to see if maintenance chemotherapy
increased the length of the remission. The data here formed a
preliminary analysis which was conducted in October 1974.
This data frame contains the following columns:
`time`
The length of the complete remission (in weeks).
`cens`
An indicator of right censoring. 1 indicates that the patient had a
relapse and so `time` is the length of the remission. 0 indicates
that the patient had left the study or was still in remission in
October 1974, that is the length of remission is right-censored.
`group`
The group into which the patient was randomized. Group 1 received
maintenance chemotherapy, group 2 did not.
The data were obtained from
Miller, R.G. (1981) *Survival Analysis*. John Wiley.
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `aml.csv`.
Returns:
Tuple of np.ndarray `x_train` with 23 rows and 3 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'aml.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/boot/aml.csv'
maybe_download_and_extract(path, url,
save_file_name='aml.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata | 33,834 |
def to_dict(observation: Observation):
"""Convert an Observation object back to dict format"""
return _unprefix_attrs(attr.asdict(observation)) | 33,835 |
def _consolidate_extrapolated(candidates):
"""Get the best possible derivative estimate, given an error estimate.
Going through ``candidates`` select the best derivative estimate element-wise using
the estimated candidates, where best is defined as minimizing the error estimate
from the Richardson extrapolation.
See https://tinyurl.com/ubn3nv5 for corresponding code in numdifftools and
https://tinyurl.com/snle7mb for an explanation of how errors of Richardson
extrapolated derivative estimates can be estimated.
Args:
candidates (dict): Dictionary containing different derivative estimates and
their error estimates.
Returns:
consolidated (np.ndarray): Array of same shape as input derivative estimates.
candidate_der_dict (dict): Best derivative estimate given method.
candidate_err_dict (dict): Errors corresponding to best derivatives given method
"""
# first find minimum over steps for each method
candidate_der_dict = {}
candidate_err_dict = {}
for key in candidates.keys():
_der = candidates[key]["derivative"]
_err = candidates[key]["error"]
derivative, error = _select_minimizer_along_axis(_der, _err)
candidate_der_dict[key] = derivative
candidate_err_dict[key] = error
# second find minimum over methods
candidate_der = np.stack(list(candidate_der_dict.values()))
candidate_err = np.stack(list(candidate_err_dict.values()))
consolidated, _ = _select_minimizer_along_axis(candidate_der, candidate_err)
updated_candidates = (candidate_der_dict, candidate_err_dict)
return consolidated, updated_candidates | 33,836 |
def roget_graph():
""" Return the thesaurus graph from the roget.dat example in
the Stanford Graph Base.
"""
# open file roget_dat.txt.gz (or roget_dat.txt)
fh = gzip.open('roget_dat.txt.gz', 'r')
G = nx.DiGraph()
for line in fh.readlines():
line = line.decode()
if line.startswith("*"): # skip comments
continue
if line.startswith(" "): # this is a continuation line, append
line = oldline + line
if line.endswith("\\\n"): # continuation line, buffer, goto next
oldline = line.strip("\\\n")
continue
(headname, tails) = line.split(":")
# head
numfind = re.compile("^\d+") # re to find the number of this word
head = numfind.findall(headname)[0] # get the number
G.add_node(head)
for tail in tails.split():
if head == tail:
print("skipping self loop", head, tail, file=sys.stderr)
G.add_edge(head, tail)
return G | 33,837 |
def test_get_org(client, jwt, session): # pylint:disable=unused-argument
"""Assert that an org can be retrieved via GET."""
headers = factory_auth_header(jwt=jwt, claims=TEST_JWT_CLAIMS)
rv = client.post('/api/v1/users', headers=headers, content_type='application/json')
rv = client.post('/api/v1/orgs', data=json.dumps(TEST_ORG_INFO),
headers=headers, content_type='application/json')
dictionary = json.loads(rv.data)
org_id = dictionary['id']
rv = client.get('/api/v1/orgs/{}'.format(org_id),
headers=headers, content_type='application/json')
assert rv.status_code == http_status.HTTP_200_OK
dictionary = json.loads(rv.data)
assert dictionary['id'] == org_id | 33,838 |
def build_wideresnet_hub(
num_class: int,
name='wide_resnet50_2',
pretrained=True):
"""[summary]
Normalized
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
Args:
name (str, optional): [description]. Defaults to 'wide_resnet50_2'.
pretrained (bool, optional): [description]. Defaults to True.
"""
model = torch.hub.load(
'pytorch/vision:v0.6.0',
name,
pretrained=pretrained)
num_ftrs = model.fc.in_features
model.fc = nn.Linear(num_ftrs, num_class)
return model | 33,839 |
def get_phoenix_model_wavelengths(cache=True):
"""
Return the wavelength grid that the PHOENIX models were computed on,
transformed into wavelength units in air (not vacuum).
"""
wavelength_url = ('ftp://phoenix.astro.physik.uni-goettingen.de/v2.0/'
'HiResFITS/WAVE_PHOENIX-ACES-AGSS-COND-2011.fits')
wavelength_path = download_file(wavelength_url, cache=cache, timeout=30)
wavelengths_vacuum = fits.getdata(wavelength_path)
# Wavelengths are provided at vacuum wavelengths. For ground-based
# observations convert this to wavelengths in air, as described in
# Husser 2013, Eqns. 8-10:
sigma_2 = (10**4 / wavelengths_vacuum)**2
f = (1.0 + 0.05792105/(238.0185 - sigma_2) + 0.00167917 /
(57.362 - sigma_2))
wavelengths_air = wavelengths_vacuum / f
return wavelengths_air | 33,840 |
def scroll_to(text_to_find, locator=None, anchor='1', scroll_length=None,
timeout=120, **kwargs): # pylint: disable=unused-argument
"""Scroll a dynamic web page or scrollbar.
Parameters
----------
text_to_find : str
Text to find by scrolling a page or an element.
locator : str
Locator text for a scrollable element.
anchor : str
Anchor used for the locator.
scroll_length : str
Amount of pixels per one scroll.
timeout : int
How long to scroll in seconds, before timing out.
Examples
--------
.. code-block:: robotframework
ScrollTo Cat
In the above example, the web page is scrolled until the text "Cat" is found.
.. code-block:: robotframework
ScrollTo Cat scroll_length=2000
In the above example, the web page is scrolled 2000 pixels per scroll, until the text "Cat" is
found.
.. code-block:: robotframework
ScrollTo Cat List of cats
In the above example, a scrollbar located by a text "List of cats", is scrolled until the text
"Cat" is found.
.. code-block:: robotframework
ScrollTo Cat List of cats scroll_length=2000
In the above example, a scrollbar located by a text "List of cats", is scrolled 2000 pixels
per scroll until the text "Cat" is found.
"""
visible = is_text(text_to_find)
if visible:
scroll_text(text_to_find)
return
slow_mode = util.par2bool(kwargs.get('slow_mode', False))
if locator: # If we are trying to scroll a specific element
scroll_first_scrollable_parent_element(
locator, anchor, text_to_find, scroll_length, slow_mode, timeout)
else: # if we are just scrolling the web page
scroll_dynamic_web_page(text_to_find, scroll_length, slow_mode, timeout) | 33,841 |
def get_skin_mtime(skin_name):
"""
skin.html 의 최근변경시간을 가져오는 기능
:param skin_name:
:return:
"""
return os.path.getmtime(get_skin_html_path(skin_name)) | 33,842 |
def onstart_msg(update, context):
"""
Start message. When '/start' command is recieved.
"""
context.bot.send_message(
update.message.chat_id, text=u'скинь ссылку на товар\n'
'из каталога: https://lenta.com/catalog '
) | 33,843 |
def accuracy_boundingbox(data, annotation, method, instance): ## NOT IMPLEMENTED
"""
Calculate how far off each bounding box was
Parameters
----------
data: color_image, depth_image
annotation: pascal voc annotation
method: function(instance, *data)
instance: instance of object
Returns
-------
(int, int, int) boxes_found, boxes_missed, boxes_extra.
"""
FOUND_THRESHOLD = 5 # pixels
##
bounding_boxes = method(instance, *data)
##
boxes_found, boxes_missed, boxes_extra = 0, 0, 0
for value in annotation.findall('object'):
annotation_bounding_box = value.find('bndbox')
ax1, ay1, ax2, ay2 = [int(annotation_bounding_box.find(param).text) for param in ['xmin', 'ymin', 'xmax', 'ymax']]
for bounding_box in bounding_boxes:
X, Y, Z = [], [], []
for x, y in bounding_box.vertices:
X.append(x)
Y.append(y)
X, Y = np.unique(X), np.unique(Y)
bx1, by1, bx2, by2 = min(X), min(Y), max(X), max(Y)
##
x1_close = bx1 - FOUND_THRESHOLD <= ax1 <= bx1 + FOUND_THRESHOLD
y1_close = by1 - FOUND_THRESHOLD <= ay1 <= by1 + FOUND_THRESHOLD
x2_close = bx2 - FOUND_THRESHOLD <= ax2 <= bx2 + FOUND_THRESHOLD
y2_close = by2 - FOUND_THRESHOLD <= ay2 <= by2 + FOUND_THRESHOLD
if all((x1_close, y1_close, x2_close, y2_close)):
boxes_found += 1
boxes_missed = len(annotation.findall('object')) - boxes_found
boxes_extra = len(bounding_boxes) - boxes_found
return boxes_found, boxes_missed, boxes_extra | 33,844 |
def http_get(url, as_json=False):
"""TODO.
"""
retry_strategy = Retry(
total=5,
status_forcelist=[
429, # Too Many Requests
500, # Internal Server Error
502, # Bad Gateway
503, # Service Unavailable
504 # Gateway Timeout
],
method_whitelist=["GET"],
backoff_factor=2 # wait 1, 2, 4, 8, ... seconds between retries
)
adapter = HTTPAdapter(max_retries=retry_strategy)
http = requests.Session()
http.mount("https://", adapter)
http.mount("http://", adapter)
response = http.get(url)
content = response.content
if as_json:
content = response.json()
return content, response.status_code | 33,845 |
def test_kbd_gpios():
"""Test keyboard row & column GPIOs.
Note, test only necessary on 50pin -> 50pin flex
These must be tested differently than average GPIOs as the servo side logic,
a 4to1 mux, is responsible for shorting colX to rowY where X == 1|2 and Y
= 1|2|3. To test the flex traces I'll set the row to both high and low
and examine that the corresponding column gets shorted correctly.
Returns:
errors: integer, number of errors encountered while testing
"""
errors = 0
# disable everything initially
kbd_off_cmd = 'kbd_m1_a0:1 kbd_m1_a1:1 kbd_m2_a0:1 kbd_m2_a1:1 kbd_en:off'
for col_idx in xrange(2):
if not set_ctrls(kbd_off_cmd):
logging.error('Disabling all keyboard rows/cols')
errors += 1
break
mux_ctrl = KBD_MUX_COL_IDX[col_idx]
kbd_col = 'kbd_col%d' % (col_idx + 1)
for row_idx in xrange(3):
kbd_row = 'kbd_row%d' % (row_idx + 1)
cmd = '%s1:%d %s0:%d ' % (mux_ctrl, row_idx>>1, mux_ctrl,
row_idx & 0x1)
cmd += 'kbd_en:on %s' % (kbd_col)
(retval, ctrls) = get_ctrls(cmd, timeout=30)
if not retval:
logging.error('ctrls = %s', ctrls)
errors += 1
for set_val in [GPIO_MAPS[ctrls[kbd_col]], ctrls[kbd_col]]:
cmd = '%s:%s sleep:0.2 %s' % (kbd_row, set_val, kbd_col)
(retval, ctrls) = get_ctrls(cmd)
if not retval:
logging.error('ctrls = %s', ctrls)
errors += 1
if ctrls[kbd_col] != set_val:
logging.error('After setting %s, %s != %s', kbd_row,
kbd_col, set_val)
errors += 1
return errors | 33,846 |
def merge_json(name: str):
"""
merge all devices json files into one file
"""
print("Creating JSON files")
json_files = [x for x in sorted(glob(f'{name}/*.json')) if not x.endswith('recovery.json')
and not x.endswith('fastboot.json')]
json_data = []
for file in json_files:
with open(file, "r") as json_file:
json_data.append(json.load(json_file))
with open(f'{name}/{name}', "w") as output:
json.dump(json_data, output, indent=1)
if path.exists(f'{name}/{name}'):
rename(f'{name}/{name}', f'{name}/{name}.json') | 33,847 |
def fields_for_model(model):
"""
This function returns the fields for a schema that matches the provided
nautilus model.
Args:
model (nautilus.model.BaseModel): The model to base the field list on
Returns:
(dict<field_name: str, graphqlType>): A mapping of field names to
graphql types
"""
# the attribute arguments (no filters)
args = {field.name.lower() : convert_peewee_field(field) \
for field in model.fields()}
# use the field arguments, without the segments
return args | 33,848 |
def install(packages, installer=None, upgrade=False, use_sudo=False):
"""
Install Python packages with distribute
"""
func = use_sudo and sudo or run
if not isinstance(packages, basestring):
packages = " ".join(packages)
options = []
if upgrade:
options.append("-U")
options = " ".join(options)
func('easy_install %(options)s %(packages)s' % locals()) | 33,849 |
def _landstat(landscape, updated_model, in_coords):
"""
Compute the statistic for transforming coordinates onto an existing
"landscape" of "mountains" representing source positions. Since the
landscape is an array and therefore pixellated, the precision is limited.
Parameters
----------
landscape: nD array
synthetic image representing locations of sources in reference plane
updated_model: Model
transformation (input -> reference) being investigated
in_coords: nD array
input coordinates
Returns
-------
float:
statistic representing quality of fit to be minimized
"""
def _element_if_in_bounds(arr, index):
try:
return arr[index]
except IndexError:
return 0
out_coords = updated_model(*in_coords)
if len(in_coords) == 1:
out_coords = (out_coords,)
out_coords2 = tuple((coords - 0.5).astype(int) for coords in out_coords)
result = sum(_element_if_in_bounds(landscape, coord[::-1]) for coord in zip(*out_coords2))
################################################################################
# This stuff replaces the above 3 lines if speed doesn't hold up
# sum = np.sum(landscape[i] for i in out_coords if i>=0 and i<len(landscape))
# elif len(in_coords) == 2:
# xt, yt = out_coords
# sum = np.sum(landscape[iy,ix] for ix,iy in zip((xt-0.5).astype(int),
# (yt-0.5).astype(int))
# if ix>=0 and iy>=0 and ix<landscape.shape[1]
# and iy<landscape.shape[0])
################################################################################
return -result | 33,850 |
def presence(label):
"""Higher-order function to test presence of a given label
"""
return lambda x, y: 1.0 * ((label in x) == (label in y)) | 33,851 |
def get_config(config_file=None, section=None):
"""Gets the user defined config and validates it.
Args:
config_file:
Path to config file to use. If None, uses defaults.
section (str):
Name of section in the config to extract (i.e., 'fetchers',
'processing', 'pickers', etc.) If None, whole config is returned.
Returns:
dictionary:
Configuration parameters.
Raises:
IndexError:
If input section name is not found.
"""
if config_file is None:
# Try not to let tests interfere with actual system:
if os.getenv("CALLED_FROM_PYTEST") is None:
# Not called from pytest -- Is there a local project?
local_proj = os.path.join(os.getcwd(), constants.PROJ_CONF_DIR)
local_proj_conf = os.path.join(local_proj, "projects.conf")
if os.path.isdir(local_proj) and os.path.isfile(local_proj_conf):
# There's a local project
config_file = __proj_to_conf_file(local_proj)
else:
# Is there a system project?
sys_proj = constants.PROJECTS_PATH
sys_proj_conf = os.path.join(sys_proj, "projects.conf")
if os.path.isdir(sys_proj) and os.path.isfile(sys_proj_conf):
config_file = __proj_to_conf_file(sys_proj)
else:
# Fall back on conf file in repository
data_dir = os.path.abspath(
pkg_resources.resource_filename("gmprocess", "data")
)
config_file = os.path.join(
data_dir, constants.CONFIG_FILE_PRODUCTION
)
else:
# When called by pytest
data_dir = os.path.abspath(
pkg_resources.resource_filename("gmprocess", "data")
)
config_file = os.path.join(data_dir, constants.CONFIG_FILE_TEST)
if not os.path.isfile(config_file):
fmt = "Missing config file: %s."
raise OSError(fmt % config_file)
else:
with open(config_file, "r", encoding="utf-8") as f:
yaml = YAML()
yaml.preserve_quotes = True
config = yaml.load(f)
CONF_SCHEMA.validate(config)
if section is not None:
if section not in config:
raise IndexError(f"Section {section} not found in config file.")
else:
config = config[section]
return config | 33,852 |
def make_formula(formula_str, row, col, first_data_row=None):
# noinspection SpellCheckingInspection
"""
A cell will be written as a formula if the HTML tag has the attribute "data-excel" set.
Note that this function is called when the spreadsheet is being created. The cell it applies to knows where it
is and what the first data row is.
Allowed formula strings:
"SUM ROW A-C": sum the current row from A-C
"SUM ROW A,C": sum cells A and C in the current row
"SUM COL": sums current col from first_row to row - 1
"FORMULA RAW IF(F13 > 0, (F13-E13)/F13, '')": uses formula as is
"FORMULA RELATIVE IF(colm001rowp000 > 0, (colm001rowp0-colm002rowp000)/colm001rowp001, '')": creates the
formula relative to the current location. colm002 means two cols to the left of the current cell.
rowp000 means the current row plus 0 (e.g. the current row)
:param formula_str: the value of the "data-excel" tag containing params for generating the formula
:param row: cell row
:param col: cell column
:param first_data_row: for column formulas
:return: a string
"""
parts = formula_str.split(' ')
func = parts[0]
args = parts[-1]
formula = ''
if func == 'SUM':
func_modifier = parts[1]
if func_modifier == 'ROW':
if '-' in args:
cols = args.split('-')
formula = '=SUM({}{}:{}{})'.format(cols[0], row + 1, cols[1], row + 1)
elif ',' in args:
cols = map(methodcaller('strip'), args.split(','))
# Put the row number after each col letter and then add them together
cols = '+'.join(map(lambda x: x + str(row + 1), cols))
formula = '=SUM({})'.format(cols)
elif func_modifier == 'COL':
formula = '=SUM({}:{})'.format(xl_rowcol_to_cell(first_data_row, col), xl_rowcol_to_cell(row - 1, col))
elif func == 'FORMULA':
func_modifier = parts[1]
formula_str = ' '.join(parts[2:])
if func_modifier == 'RAW':
formula = '=' + formula_str
elif func_modifier == 'RELATIVE':
formula = '=' + locate_cells(formula_str, row, col)
return formula | 33,853 |
def plot_loss_curve(train_loss, val_loss, title="", save=False,
fname=""):
"""Plot the training and validation loss curves
Args:
train_loss (list): Training loss values
val_loss (list): Validationloss values
title (str): Plot title
save (bool): Whether to save the plot
fname (str): Filename for saving the plot
"""
assert len(train_loss) == len(val_loss)
train_rounds = len(train_loss)
plt.figure(1, figsize=(12, 4))
plt.plot(range(train_rounds), train_loss, 'tab:blue',
label='Training loss')
# The validation loss curve should be shifted by 0.5 since it is a running
# average
plt.plot([x + 0.5 for x in range(train_rounds)], val_loss, 'tab:orange',
label='Validation loss')
plt.ylabel("Loss", fontsize=14)
plt.xlabel("Training rounds", fontsize=14)
plt.legend(loc='upper right', fontsize='large')
plt.title(title, fontsize=14)
plt.xticks(fontsize=11)
plt.yticks(fontsize=11)
if save:
plt.savefig(fname, format='pdf', bbox_inches='tight')
else:
plt.show() | 33,854 |
def select(population, to_retain):
"""Go through all of the warroirs and check which ones are best fit to breed and move on."""
#This starts off by sorting the population then gets all of the population dived by 2 using floor divison I think
#that just makes sure it doesn't output as a pesky decimal. Then it takes one half of memebers which shall be females.
# which tend to be not as strong as males(Not being sexist just science thats how we are built.) So the front half will be
#The lower digits because we sorted it then the upper half will be males. Then it finishes off by getting the strongest males and
#females and returns them.
sorted_pop = sorted(population)
to_keep_by_sex = to_retain//2
members = len(sorted_pop)//2
females = sorted_pop[:members]
males = sorted_pop[members:]
strong_females = females[-to_keep_by_sex:]
strong_males = males[-to_keep_by_sex:]
return strong_males, strong_females | 33,855 |
def normal_function( sigma, width ):
"""
Defaulf fitting function, it returns values from a normal distribution
"""
log2 = log(2)
sigma2 = float(sigma)**2
lo, hi = width, width+1
def normal_func(value, index):
return value * exp( -index*index/sigma2 * log2 )
values = [ normal_func(1, x) for x in range(-lo, hi) ]
values = numpy.array( values )
return lo, hi, values | 33,856 |
def style_get_url(layer, style_name, internal=True):
"""Get QGIS Server style as xml.
:param layer: Layer to inspect
:type layer: Layer
:param style_name: Style name as given by QGIS Server
:type style_name: str
:param internal: Flag to switch between public url and internal url.
Public url will be served by Django Geonode (proxified).
:type internal: bool
:return: QGIS Server request url
:rtype: str
"""
try:
qgis_layer = QGISServerLayer.objects.get(layer=layer)
except QGISServerLayer.DoesNotExist:
msg = f'No QGIS Server Layer for existing layer {layer.name}'
logger.debug(msg)
raise
qgis_project_path = qgis_layer.qgis_project_path
query_string = {
'PROJECT': qgis_project_path,
'SERVICE': 'STYLEMANAGER',
'REQUEST': 'GetStyle',
'LAYER': layer.name,
'NAME': style_name
}
qgis_server_url = qgis_server_endpoint(internal)
url = Request('GET', qgis_server_url, params=query_string).prepare().url
return url | 33,857 |
def remove_duplicates(l):
"""
Remove any duplicates from the original list.
Return a list without duplicates.
"""
new_l = l[:]
tmp_l = new_l[:]
for e in l:
tmp_l.remove(e)
if e in tmp_l:
new_l.remove(e)
return new_l | 33,858 |
def in_boudoir(callback):
"""Décorateur : commande utilisable dans un boudoir uniquement.
Lors d'une invocation de la commande décorée hors d'un boudoir
(enregistré dans :class:`.bdd.Boudoir`), affiche un message d'erreur.
Ce décorateur n'est utilisable que sur une commande définie dans un
Cog.
"""
@functools.wraps(callback)
async def new_callback(self, ctx, *args, **kwargs):
try:
Boudoir.from_channel(ctx.channel)
except ValueError:
await ctx.reply("Cette commande est invalide en dehors "
"d'un boudoir.")
else:
# if ctx.authors
return await callback(self, ctx, *args, **kwargs)
return new_callback | 33,859 |
def parse_input():
""" Sets up the required input arguments and parses them """
parser = argparse.ArgumentParser()
parser.add_argument('log_file', help='CSV file of log data')
parser.add_argument('-e, --n_epochs', dest='n_epochs',
help='number of training epochs', metavar='',
type=int, default=5)
parser.add_argument('-o, --out_dir', dest='out_dir', metavar='',
default=time.strftime("%Y%m%d_%H%M%S"),
help='directory where the model is stored')
return parser.parse_args() | 33,860 |
def xyz_to_pix(position, bounds, pixel_size):
"""Convert from 3D position to pixel location on heightmap."""
u = int(np.round((position[1] - bounds[1, 0]) / pixel_size))
v = int(np.round((position[0] - bounds[0, 0]) / pixel_size))
return (u, v) | 33,861 |
def main():
"""
Main function for this module
"""
sandbox = create_sandbox()
directory = download_package_to_sandbox(
sandbox,
'https://pypi.python.org/packages/source/c/checkmyreqs/checkmyreqs-0.1.6.tar.gz'
)
print(directory)
destroy_sandbox(sandbox) | 33,862 |
def get_random_lb():
""" Selects a random location from the load balancers file.
Returns:
A string specifying a load balancer IP.
"""
with open(LOAD_BALANCERS_FILE) as lb_file:
return random.choice([':'.join([line.strip(), str(PROXY_PORT)])
for line in lb_file]) | 33,863 |
def enlarge(n):
"""
Multiplies a number by 100
Param: n (numeric) the number to enlarge
Return the enlarged number(numeric)
"""
return n * 100 | 33,864 |
def ResolveWikiLinks(html):
"""Given an html file, convert [[WikiLinks]] into links to the personal wiki:
<a href="https://z3.ca/WikiLinks">WikiLinks</a>"""
wikilink = re.compile(r'\[\[(?:[^|\]]*\|)?([^\]]+)\]\]')
def linkify(match):
wiki_root = 'https://z3.ca'
wiki_name = match.group(1).replace('\n', ' ')
wiki_slug = wiki_name.replace(' ', '_')
return f'<a class="wiki" href="{wiki_root}/{wiki_slug}">{wiki_name}</a>'
return wikilink.sub(linkify, html) | 33,865 |
def handle_new_favorite(query_dict):
"""Does not handle multi-part data properly.
Also, posts don't quite exist as they should."""
for required in POST_REQUIRED_PARAMS:
if required not in query_dict:
return False
# not yet safe to use.
post_id = str(string_from_interwebs(query_dict["post"][0])).strip()
author_id = str(string_from_interwebs(query_dict["user"][0])).strip()
with Connection('localhost', 27017) as connection:
favorite = check_favorite(author_id, post_id, connection)
if favorite is not None:
delete_favorite(favorite, connection)
update_post(post_id, connection)
return True
return False | 33,866 |
def steam_ratings(html_text):
"""Tries to get both 'all' and 'recent' ratings."""
return {
"overall": steam_all_app_rating(html_text),
"recent": steam_recent_app_rating(html_text),
} | 33,867 |
def separation_cos_angle(lon0, lat0, lon1, lat1):
"""Evaluate the cosine of the angular separation between two
direction vectors."""
return (np.sin(lat1) * np.sin(lat0) + np.cos(lat1) * np.cos(lat0) *
np.cos(lon1 - lon0)) | 33,868 |
def getArg(flag):
"""
Devolve o argumento de uma dada flag
"""
try:
a = sys.argv[sys.argv.index(flag) + 1]
except:
return ""
else:
return a | 33,869 |
def get_band_params(meta, fmt='presto'):
"""
Returns (fmin, fmax, nchans) given a metadata dictionary loaded from
a specific file format.
"""
if fmt == 'presto':
fbot = meta['fbot']
nchans = meta['nchan']
ftop = fbot + nchans * meta['cbw']
fmin = min(fbot, ftop)
fmax = max(fbot, ftop)
elif fmt == 'sigproc':
raise ValueError("Cannot parse observing band parameters from data in sigproc format")
else:
raise ValueError(f"Unknown format: {fmt}")
return fmin, fmax, nchans | 33,870 |
def logging(f):
"""Decorate a function to log its calls."""
@functools.wraps(f)
def decorated(*args, **kwargs):
sargs = map(str, args)
skwargs = (f'{key}={value}' for key, value in kwargs.items())
print(f'{f.__name__}({", ".join([*sargs, *skwargs])})...')
try:
value = f(*args, **kwargs)
except Exception as cause:
print(f'! {cause}')
raise
print(f'=> {value}')
return value
return decorated | 33,871 |
def yandex_mean_encoder(columns=None, n_jobs=1, alpha=100, true_label=None):
"""
Smoothed mean-encoding with custom smoothing strength (alpha)
http://learningsys.org/nips17/assets/papers/paper_11.pdf
"""
buider = partial(
build_yandex_mean_encoder,
alpha=alpha,
)
return TargetCategoryEncoder(buider, columns, n_jobs, true_label) | 33,872 |
def cure_sample_part(
X: np.ndarray,
k: int,
c: int = 3,
alpha: float = 0.3,
u_min: Optional[int] = None,
f: float = 0.3,
d: float = 0.02,
p: Optional[int] = None,
q: Optional[int] = None,
n_rep_finalclust: Optional[int] = None,
plotting: bool = True,
):
"""
CURE algorithm variation for large datasets.
Partition the sample space into p partitions, each of size len(X)/p, then partially cluster each
partition until the final number of clusters in each partition reduces to n/(pq). Then run a second
clustering pass on the n/q partial clusters for all the partitions.
:param X: input data array.
:param k: desired number of clusters.
:param c: number of representatives for each cluster.
:param alpha: parameter that regulates the shrinking of representative points toward the centroid.
:param u_min: size of the smallest cluster u.
:param f: percentage of cluster points (0 <= f <= 1) we would like to have in the sample.
:param d: (0 <= d <= 1) the probability that the sample contains less than f*|u| points of cluster u is less than d.
:param p: the number of partitions.
:param q: the number >1 such that each partition reduces to n/(pq) clusters.
:param n_rep_finalclust: number of representatives to use in the final assignment phase.
:param plotting: if True, plots all intermediate steps.
:return, rep, mat_a): returns the clusters dictionary, the dictionary of representatives, the matrix a.
"""
if ((p is None) and (q is not None)) or ((q is None) and (p is not None)):
raise ValueError("p and q must be both specified if not None.")
# choose the parameters suggested by the paper if the user doesnt provide input parameters
if u_min is None:
u_min = round(len(X) / k)
if n_rep_finalclust is None:
n_rep_finalclust = c
_, df_nonan = build_initial_matrices(X)
# this is done to ensure that the algorithm starts even when input params are bad
while True:
print("new f: ", f)
print("new d: ", d)
n = math.ceil(chernoffBounds(u_min=u_min, f=f, N=len(X), k=k, d=d))
if n <= len(df_nonan):
b_sampled = df_nonan.sample(n, random_state=42)
break
else:
if f >= 0.19:
f = f - 0.1
else:
d = d * 2
b_notsampled = df_nonan.loc[
[str(i) for i in range(len(df_nonan)) if str(i) not in b_sampled.index], :
]
# find the best p and q according to the paper
if (p is None) and (q is None):
def g(x):
res = (x[1] - 1) / (x[0] * x[1]) + 1 / (x[1] ** 2)
return res
results = {}
for i in range(2, 15):
for j in range(2, 15):
results[(i, j)] = g([i, j])
p, q = max(results, key=results.get)
print("p: ", p)
print("q: ", q)
if (n / (p * q)) < 2 * k:
print("n/pq is less than 2k, results could be wrong.")
if k * d >= 1:
print("k*d is greater or equal to 1, results could be wrong.")
# form the partitions
lin_sp = np.linspace(0, n, p + 1, dtype="int")
# lin_sp
b_partitions = []
for num_p in range(p):
# try:
b_partitions.append(b_sampled.iloc[lin_sp[num_p]: lin_sp[num_p + 1]])
# except:
# b_partitions.append(b_sampled.iloc[lin_sp[num_p]:])
k_prov = round(n / (p * q))
# perform clustering on each partition separately
partial_clust = []
partial_rep = []
partial_CURE_df = []
for i in range(p):
print("\n")
print(i)
clusters, rep, CURE_df = cure(
b_partitions[i].values,
k=k_prov,
c=c,
alpha=alpha,
plotting=plotting,
partial_index=b_partitions[i].index,
)
partial_clust.append(clusters)
partial_rep.append(rep)
partial_CURE_df.append(CURE_df)
# merging all data into single components
# clusters
clust_tot = {}
for d in partial_clust:
clust_tot.update(d)
# representatives
rep_tot = {}
for d in partial_rep:
rep_tot.update(d)
# mat CURE_df
diz = {i: len(b_partitions[i]) for i in range(p)}
num_freq = Counter(diz.values()).most_common(1)[0][0]
bad_ind = [k for k, v in diz.items() if v != num_freq]
for ind in bad_ind:
partial_CURE_df[ind]["{0}x".format(diz[ind])] = [np.nan] * k_prov
partial_CURE_df[ind]["{0}y".format(diz[ind])] = [np.nan] * k_prov
CURE_df_tot = partial_CURE_df[0].append(partial_CURE_df[1])
for i in range(1, len(partial_CURE_df) - 1):
CURE_df_tot = CURE_df_tot.append(partial_CURE_df[i + 1])
# mat Xdist
X_dist_tot = dist_mat_gen_cure(rep_tot)
# final_clustering
prep_data = [clust_tot, rep_tot, CURE_df_tot, X_dist_tot]
clusters, rep, CURE_df = cure(
b_sampled.values,
k=k,
c=c,
alpha=alpha,
preprocessed_data=prep_data,
partial_index=b_sampled.index,
n_rep_finalclust=n_rep_finalclust,
not_sampled=b_notsampled.values,
plotting=plotting,
not_sampled_ind=b_notsampled.index,
)
return clusters, rep, CURE_df | 33,873 |
def get_serializer(request):
"""Returns the serializer for the given API request."""
format = request.args.get('format')
if format is not None:
rv = _serializer_map.get(format)
if rv is None:
raise BadRequest(_(u'Unknown format "%s"') % escape(format))
return rv
# webkit sends useless accept headers. They accept XML over
# HTML or have no preference at all. We spotted them, so they
# are obviously not regular API users, just ignore the accept
# header and return the debug serializer.
if request.user_agent.browser in ('chrome', 'safari'):
return _serializer_map['debug']
best_match = (None, 0)
for mimetype, serializer in _serializer_for_mimetypes.iteritems():
quality = request.accept_mimetypes[mimetype]
if quality > best_match[1]:
best_match = (serializer, quality)
if best_match[0] is None:
raise BadRequest(_(u'Could not detect format. You have to specify '
u'the format as query argument or in the accept '
u'HTTP header.'))
# special case. If the best match is not html and the quality of
# text/html is the same as the best match, we prefer HTML.
if best_match[0] != 'text/html' and \
best_match[1] == request.accept_mimetypes['text/html']:
return _serializer_map['debug']
return _serializer_map[best_match[0]] | 33,874 |
def deserialize_result(r: bytes, *, deserializer: Optional[Deserializer] = None) -> JobResult:
"""Given bytes, deserializes them into a JobResult object.
:param r: bytes to deserialize.
:param deserializer: Optional serializer to use for deserialization. If not set, pickle is used.
:return: A JobResult object.
:raises DeserializationError: If bytes cannot be converted to JobResult.
"""
if deserializer is None:
deserializer = pickle.loads
try:
d = deserializer(r)
return JobResult(
job_try=d['t'],
function=d['f'],
args=d['a'],
kwargs=d['k'],
enqueue_time=ms_to_datetime(d['et']),
score=None,
success=d['s'],
result=d['r'],
start_time=ms_to_datetime(d['st']),
finish_time=ms_to_datetime(d['ft']),
)
except Exception as e:
raise DeserializationError('unable to deserialize job result') from e | 33,875 |
def test_explode():
"""
Plenty of systems use dice that explode on their maximum value.
"""
assert (dice.d6.explode() > 6).to_dict()[True] == pytest.approx(1 / 6)
assert (dice.d6.explode() > 12).to_dict()[True] == pytest.approx(1 / 36)
# Limit the number of times the die is re-rolled
mini_explode = dice.d6.explode(rerolls=1)
assert (mini_explode > 6).to_dict()[True] == pytest.approx(1 / 6)
assert mini_explode.to_dict()[12] == pytest.approx(1 / 36)
assert (mini_explode > 12).to_dict().get(True, 0) == 0
# It doesn't have to be a single die
multi_explode = (2@dice.d6).explode()
assert multi_explode.to_dict().get(12, 0) == 0
assert (multi_explode > 12).to_dict()[True] == pytest.approx(1 / 36) | 33,876 |
def form_symb_dCdU():
"""Form a symbolic version of dCdU"""
dCdU = form_nd_array("dCdU",[3,3,8*12])
for I in range(3):
for J in range(3):
for K in range(3,8*12):
dCdU[I,J,K] = 0
return dCdU | 33,877 |
def predict(X, y, clf, onehot_encoder, params):
"""
Runs a forward pass for a SINGLE sample and returns the output prediction.
Arguments:
X (list[int]) : a list of integers with each integer an input class of step
y (list[int]) : a list of integers with each integer an output class of step
Returns:
y_pred (list[int]) : a list of integers with each integer the prediction of each step
"""
scalar = params['scalar']
output_dim = params['output_dim']
X = torch.tensor(X).cuda() # shape(seq_len,)
X = X.unsqueeze(0) # shape(batch_size=1, seq_len)
if scalar is True:
seq_len = [X.shape[1]]
else:
seq_len = [len(y)] # 2d list
if scalar is True:
y = torch.tensor([[y]]).cuda().float().cuda() # shape(1,)
else:
y = lists2onehottensors([y], output_dim, onehot_encoder)
# change to 1-hot
y_pred = clf.forward(X, seq_len)
loss = clf.compute_loss(y_pred, y, seq_len)
loss = loss.item()
if scalar is False:
# convert softmax y_pred and y to 1d list
y_pred = onehottensors2classlist(y_pred, seq_len)[0]
return y_pred | 33,878 |
def parse_modules_and_elabs(raw_netlist, net_manager):
"""
Parses a raw netlist into its IvlModule and IvlElab objects.
Returns a tuple: (modules, elabs)
modules is a list of IvlModule objects.
elabs is a list of IvlElab objects.
"""
sections = parse_netlist_to_sections(raw_netlist)
modules_lines = group_lines(sections['SCOPES'])
elab_bundles_lines = group_lines(sections['ELABORATED NODES'])
modules = [parse_module_lines(lines, net_manager)
for lines in modules_lines]
elabs = [parse_elab_bundle_lines(lines, net_manager)
for lines in elab_bundles_lines]
return modules, elabs | 33,879 |
def scrub_dt_dn(dt, dn):
"""Returns in lowercase and code friendly names of doctype and name for certain types"""
ndt, ndn = dt, dn
if dt in lower_case_files_for:
ndt, ndn = scrub(dt), scrub(dn)
return ndt, ndn | 33,880 |
def selobj_add_read_obj(selobj, callback, *callback_args, **callback_kwargs):
"""
Add read selection object, callback is a co-routine that is
sent the selection object that has become readable
"""
global _read_callbacks
coroutine = callback(*callback_args, **callback_kwargs)
_read_callbacks[selobj] = coroutine | 33,881 |
def by_uri(uri):
"""A LicenseSelector-less means of picking a License from a URI."""
if _BY_URI_CACHE.has_key(uri):
return _BY_URI_CACHE[uri]
for key, selector in cc.license.selectors.SELECTORS.items():
if selector.has_license(uri):
license = selector.by_uri(uri)
_BY_URI_CACHE[uri] = license
return license
return None | 33,882 |
def assignmentStatement(node):
"""
assignmentStatement = variable "=" expression ";".
"""
identifierNode = Node(token)
consume(IDENTIFIER)
operatorNode = Node(token)
consume("=")
node.addNode(operatorNode)
operatorNode.addNode(identifierNode)
expression(operatorNode)
consume(";") | 33,883 |
def read_S(nameIMGxml):
"""
This function extract the images's center from the xml file.
Parameters
----------
nameIMGxml : str
the name of the file generated by MM3D.
Usually, it is "Orientation-Im[n°i].JPG.xml"
Returns
-------
numpy.ndarray: the center of the IMG (size 1*3)
"""
tree = etree.parse(nameIMGxml)
for user in tree.xpath("/ExportAPERO/OrientationConique/Externe/Centre"):
# print(user.text)
S = user.text.split(" ")
center = np.array(S, float)
return np.transpose(center) | 33,884 |
def AddBatchJob(client):
"""Add a new BatchJob to upload operations to.
Args:
client: an instantiated AdWordsClient used to retrieve the BatchJob.
Returns:
The new BatchJob created by the request.
"""
# Initialize appropriate service.
batch_job_service = client.GetService('BatchJobService', version='v201601')
# Create a BatchJob.
batch_job_operations = [{
'operand': {},
'operator': 'ADD'
}]
return batch_job_service.mutate(batch_job_operations)['value'][0] | 33,885 |
def bellmanFord(obj,source):
"""Determination of minimum distance between vertices using Bellman Ford Algorithm."""
validatePositiveWeight(obj)
n = CountVertices(obj)
minDist = dict()
for vertex in obj.vertexList:
if vertex == source:
minDist[vertex] = 0
else:
minDist[vertex] = float("inf")
#Comparing if minDist[i]+edge_weight(i,j)<minDist[j]
for i in range(n-1):
for vertex in obj.adjList:
for nbrVertex in obj.adjList[vertex]:
if minDist[nbrVertex]>minDist[vertex]+obj.weightList[vertex][obj.adjList[vertex].index(nbrVertex)]:
minDist[nbrVertex] = minDist[vertex]+obj.weightList[vertex][obj.adjList[vertex].index(nbrVertex)]
return minDist | 33,886 |
def test_my_sum() -> None:
"""Evaluate the output of `my_sum`
by creating 2 random integers.
The ground truth is given by the python
function `sum`.
"""
x_val = random.randrange(999)
y_val = random.randrange(999)
assert my_sum(x_val, y_val) == sum([x_val, y_val]) | 33,887 |
def auto_default_option(*param_decls, **attrs) -> Callable[[_C], _C]:
"""
Attaches an option to the command, with a default value determined from the decorated function's signature.
All positional arguments are passed as parameter declarations to :class:`click.Option`;
all keyword arguments are forwarded unchanged (except ``cls``).
This is equivalent to creating an :class:`click.Option` instance manually
and attaching it to the :attr:`click.Command.params` list.
.. versionadded:: 0.7.0
:param cls: the option class to instantiate. This defaults to :class:`click.Option`.
"""
def decorator(f: _C) -> _C:
option_attrs = attrs.copy()
if "help" in option_attrs:
option_attrs["help"] = inspect.cleandoc(option_attrs["help"])
OptionClass = option_attrs.pop("cls", click.Option)
option = OptionClass(param_decls, **option_attrs)
_param_memo(f, option)
_get_default_from_callback_and_set(f, option)
return f
return decorator | 33,888 |
def get_filename_from_url(url):
"""
Convert URL to filename.
Example:
URL `http://www.example.com/foo.pdf` will be converted to `foo.pdf`.
:type url: unicode
:rtype: unicode
"""
name, extension = os.path.splitext(os.path.basename(urlsplit(url).path))
fin = "{filename}{extension_with_dot}".format(filename=name.strip(), extension_with_dot=extension.strip().lower())
return fin | 33,889 |
def properties(debugger, command, exe_ctx, result, internal_dict):
"""
Syntax:
properties <className/classInstance>
Examples:
(lldb) properties UIViewController
(lldb) properties [NSObject new]
(lldb) expression -l objc -O -- [NSObject new]
<NSObject: 0x60000372f760>
(lldb) properties 0x60000372f760
This command is implemented in HMClassInfoCommands.py
"""
if len(command) == 0:
HM.DPrint("Requires a argument, Please enter \"help properties\" for help.")
return
value = HM.evaluateExpressionValue(expression=f'(NSString *)[{command} performSelector:NSSelectorFromString(@"_propertyDescription")]', printErrors=False)
if HM.successOfSBError(value.GetError()):
HM.DPrint(value.GetObjectDescription())
return
clsPrefixesValue = HM.getClassPrefixes()[1]
command_script = f'''
Class inputClass = objc_lookUpClass("{command}");
if (inputClass == nil) {{ // Find prefixed class
for (NSString *prefix in (NSMutableArray *){clsPrefixesValue.GetValue()}) {{
NSString *clsName = [prefix stringByAppendingString:@".{command}"];
inputClass = objc_lookUpClass((char *)[clsName UTF8String]);
if (inputClass) {{
break;
}}
}}
}}
NSMutableString *result = [[NSMutableString alloc] init];
if (inputClass == nil) {{
[result appendString:@"Unable to resolve {command} or find {command} class, maybe {command} is not a subclass of NSObject\\n"];
}} else {{
if ((BOOL)[(Class)inputClass respondsToSelector:(SEL)NSSelectorFromString(@"_propertyDescription")]) {{
[result appendString:(NSString *)[inputClass performSelector:NSSelectorFromString(@"_propertyDescription")]];
}} else {{
[result appendString:@"{command} is not a subclass of NSObject"];
}}
}}
result;
'''
result = HM.evaluateExpressionValue(command_script).GetObjectDescription()
HM.DPrint(result) | 33,890 |
def test_path_functionality1():
"""testing path functionality in detail"""
pos0 = np.array([[1, 1, 1], [2, 2, 2], [3, 3, 3], [4, 4, 4], [5, 5, 5.0]])
rot0 = R.from_quat(
[(1, 0, 0, 1), (2, 0, 0, 1), (4, 0, 0, 1), (5, 0, 0, 1), (10, 0, 0, 1.0)]
)
inpath = np.array([(0.1, 0.1, 0.1), (0.2, 0.2, 0.2), (0.3, 0.3, 0.3)])
b1, b2, b3, b4, b5 = pos0
c1, c2, c3 = inpath
q1, q2, q3, q4, q5 = np.array(
[(1, 0, 0, 1), (1, 0, 0, 0.5), (1, 0, 0, 0.25), (1, 0, 0, 0.2), (1, 0, 0, 0.1)]
)
pos, ori = evall(BaseGeo(pos0, rot0))
P = np.array([b1, b2, b3, b4, b5])
Q = np.array([q1, q2, q3, q4, q5])
assert np.allclose(pos, P)
assert np.allclose(ori, Q)
pos, ori = evall(BaseGeo(pos0, rot0).move(inpath, start=0))
P = np.array([b1 + c1, b2 + c2, b3 + c3, b4, b5])
Q = np.array([q1, q2, q3, q4, q5])
assert np.allclose(pos, P)
assert np.allclose(ori, Q)
pos, ori = evall(BaseGeo(pos0, rot0).move(inpath, start=1))
P = np.array([b1, b2 + c1, b3 + c2, b4 + c3, b5])
Q = np.array([q1, q2, q3, q4, q5])
assert np.allclose(pos, P)
assert np.allclose(ori, Q)
pos, ori = evall(BaseGeo(pos0, rot0).move(inpath, start=2))
P = np.array([b1, b2, b3 + c1, b4 + c2, b5 + c3])
Q = np.array([q1, q2, q3, q4, q5])
assert np.allclose(pos, P)
assert np.allclose(ori, Q) | 33,891 |
def zCurve(seq):
"""Return 3-dimensional Z curve corresponding to sequence.
zcurve[n] = zcurve[n-1] + zShift[n]
"""
zcurve = np.zeros((len(seq), 3), dtype=int)
zcurve[0] = zShift(seq, 0)
for pos in range(1, len(seq)):
zcurve[pos] = np.add(zcurve[pos - 1], zShift(seq, pos))
return zcurve | 33,892 |
def getgeo():
""" Grabbing and returning the zones """
data = request.args.get('zone_name', None)
print data
#Check if data is null - get all zones
out = []
if data:
rec = mongo.db.zones.find({'zone_name':data})
else:
rec = mongo.db.zones.find()
for r in rec:
r.pop('_id')
out.append(r['loc'])
jsonOut = json.dumps(out)
print jsonOut
return Response(response=jsonOut,
status=200,
mimetype="application/json") | 33,893 |
def test_maxwell_filter_additional():
"""Test processing of Maxwell filtered data"""
# TODO: Future tests integrate with mne/io/tests/test_proc_history
# Load testing data (raw, SSS std origin, SSS non-standard origin)
data_path = op.join(testing.data_path(download=False))
file_name = 'test_move_anon'
raw_fname = op.join(data_path, 'SSS', file_name + '_raw.fif')
with warnings.catch_warnings(record=True): # maxshield
raw = Raw(raw_fname, preload=False, proj=False,
allow_maxshield=True).crop(0., 1., False)
raw_sss = maxwell.maxwell_filter(raw)
# Test io on processed data
tempdir = _TempDir()
test_outname = op.join(tempdir, 'test_raw_sss.fif')
raw_sss.save(test_outname)
raw_sss_loaded = Raw(test_outname, preload=True, proj=False,
allow_maxshield=True)
# Some numerical imprecision since save uses 'single' fmt
assert_allclose(raw_sss_loaded._data[:, :], raw_sss._data[:, :],
rtol=1e-6, atol=1e-20)
# Test covariance calculation XXX add this | 33,894 |
def update_meta(metatable, table):
"""
After ingest/update, update the metatable registry to reflect table information.
:param metatable: MetaTable instance to update.
:param table: Table instance to update from.
:returns: None
"""
metatable.update_date_added()
metatable.obs_from, metatable.obs_to = postgres_session.query(
func.min(table.c.point_date),
func.max(table.c.point_date)
).first()
metatable.bbox = postgres_session.query(
func.ST_SetSRID(
func.ST_Envelope(func.ST_Union(table.c.geom)),
4326
)
).first()[0]
metatable.column_names = {
c.name: str(c.type) for c in metatable.column_info()
if c.name not in {'geom', 'point_date', 'hash'}
}
postgres_session.add(metatable)
postgres_session.commit() | 33,895 |
def photo_upload(request):
"""AJAX POST for uploading a photo for any given application."""
response = None
if request.is_ajax() and request.method == 'POST':
form = PhotoForm(
data=request.POST, files=request.FILES, use_required_attribute=False,
)
if form.is_valid():
ct = request.POST.get('content_type')
oid = request.POST.get('oid')
if ct and oid:
ct = ContentType.objects.get(pk=ct)
mod = ct.model_class()
try:
instance = mod.objects.get(pk=oid)
phile = form.save(commit=False)
phile.content_object = instance
phile.save()
response = render(
request,
'dashboard/view_photo.ajax.html',
{'photo': phile, 'ct': ct, 'oid': oid},
)
except Exception as error:
msg = "Fail: {0}".format(str(error))
else:
msg = "Fail: No Content Type or Object ID Provided"
else:
msg = "Fail: {0}".format(form.errors)
else:
msg = "AJAX POST required"
if not response:
response = HttpResponse(msg, content_type='text/plain; charset=utf-8')
return response | 33,896 |
def table_str(bq_target):
# type: (BigqueryTarget) -> str
"""Given a BigqueryTarget returns a string table reference."""
t = bq_target.table
return "%s.%s.%s" % (t.project_id, t.dataset_id, t.table_id) | 33,897 |
def any_to_any_translate_back(content, from_='zh-CN', to_='en'):
"""
中英,英中回译
:param content:str, 4891个字, 用户输入
:param from_: str, original language
:param to_: str, target language
:return: str, result of translate
"""
translate_content = any_to_any_translate(content, from_=from_, to_=to_)
result = any_to_any_translate(translate_content, from_=to_, to_=from_)
return result | 33,898 |
def loop_helper(eq_dict, func, *args, **kwargs):
"""Loop over an equipment dictionary returned from one of the
initialize_* functions and apply a function to each
EquipmentSinglePhase object.
I constantly find myself re-writing these annoying loops due to
the poor design decision to allow the initialize_* functions to
have either EquipmentSinglePhase objects or dictionaries as the
values for the top level values of their respective returns. So,
this method hopefully mitigates that.
:param eq_dict: Dictionary from one of this modules initialize_*
functions (e.g. initialize_regulators).
:param func: Function which takes two positional inputs: a single
EquipmentSinglePhase object as input.
"""
for eq_or_dict in eq_dict.values():
if isinstance(eq_or_dict, dict):
# Loop over the phases.
for eq in eq_or_dict.values():
func(eq, *args, **kwargs)
elif isinstance(eq_or_dict, EquipmentSinglePhase):
func(eq_or_dict, *args, **kwargs)
else:
raise TypeError('Value was not a dict or EquipmentSinglePhase.') | 33,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.