content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def build_bundletoperfectsensor_pipeline(pan_img, ms_img):
"""
This function builds the a pipeline that performs P+XS pansharpening
:param pan_img: Path to the panchromatic image
:type pan_img: string
:param ms_img: Path to the multispectral image
:type ms_img: string
:returns: resample_image
:rtype: otb application
"""
pansharpening_app = otbApplication.Registry.CreateApplication(
"BundleToPerfectSensor"
)
pansharpening_app.SetParameterString("inp", pan_img)
pansharpening_app.SetParameterString("inxs", ms_img)
pansharpening_app.Execute()
return pansharpening_app
| 14,900
|
def get_argparser_ctor_args():
"""
This method returns a dict containing the kwargs for constructing an
argparse.ArgumentParser (either directly or as a subparser).
"""
return {
'prog': 'CodeChecker store',
'formatter_class': arg.RawDescriptionDefaultHelpFormatter,
# Description is shown when the command's help is queried directly
'description': """
Store the results from one or more 'codechecker-analyze' result files in a
database.""",
# Epilogue is shown after the arguments when the help is queried
# directly.
'epilog': """
Environment variables
------------------------------------------------
CC_PASS_FILE The location of the password file for auto login. By default
CodeChecker will use '~/.codechecker.passwords.json' file.
It can also be used to setup different credential files to
login to the same server with a different user.
CC_SESSION_FILE The location of the session file where valid sessions are
stored. This file will be automatically created by
CodeChecker. By default CodeChecker will use
'~/.codechecker.session.json'. This can be used if
restrictive permissions forbid CodeChecker from creating
files in the users home directory (e.g. in a CI
environment).
The results can be viewed by connecting to such a server in a Web browser or
via 'CodeChecker cmd'.""",
# Help is shown when the "parent" CodeChecker command lists the
# individual subcommands.
'help': "Save analysis results to a database."
}
| 14,901
|
def main():
"""
sig: ()-> NoneType
Runs program
"""
update_input()
store_objects()
global waves
init_waves()
turtle.tracer(0, 0)
turtle.onkey(key_y, "y")
turtle.onkey(key_up, "Up")
turtle.onkey(key_down, "Down")
turtle.onkey(key_left, "Left")
turtle.onkey(key_right, "Right")
turtle.onkey(key_i, "i")
turtle.onkey(key_k, "k")
turtle.onkey(key_l, "l")
turtle.onkey(key_j, "j")
turtle.onkey(key_s, "s")
turtle.listen()
while len(waves) > 0:
turtle.clear()
draw_frame()
update_waves()
turtle.update()
time.sleep(0.1)
turtle.mainloop()
| 14,902
|
def test_census_income():
"""Test correct shape and content of census income data."""
data = datasets.census_income()
# correct number of rows and columns
assert data.shape == (299285, 396)
# correct proportion of target values
assert (
data["income_gt_50k"].value_counts().loc[[0, 1]] ==
[280717, 18568]).all()
# correct proportion of training and test data (the original data contained)
assert (
data["dataset_partition"].value_counts().loc[
["training_set", "test_set"]]).all()
| 14,903
|
def castep_geom_count(dot_castep):
"""Count the number of geom cycles"""
count = 0
with open(dot_castep) as fhandle:
for line in fhandle:
if 'starting iteration' in line:
count += 1
return count
| 14,904
|
def nav_login(request, text="Login", button=False):
"""Navigation login button
Args:
request (Request): Request object submitted by template
text (str, optional): Text to be shown in button. Defaults to "Login".
button (bool, optional): Is this to be styled as a button or as a link. Defaults to False.
Returns:
SafeText: HTML form
"""
url = reverse("login")
return nav_next(request, url, text, button)
| 14,905
|
def test_mesh():
"""
mesh should return a meshed array with increasing/decreasing grid size.
"""
dis = mesh(n=100, a=1)
d = dis[1:]-dis[:-1]
assert len(dis) == 100
assert np.all(d[1:] > d[:-1])
dis = mesh(n=100, a=-1)
d = dis[1:]-dis[:-1]
assert np.all(d[1:] < d[:-1])
| 14,906
|
async def main() -> None:
"""Show example on controlling your WLED device."""
async with WLED("10.10.11.135") as led:
device = await led.update()
print(device.info.version)
if isinstance(device.state.preset, Preset):
print(f"Preset active! Name: {device.state.preset.name}")
if isinstance(device.state.playlist, Playlist):
print(f"Playlist active! Name: {device.state.playlist.name}")
# Turn strip on, full brightness
await led.master(on=True, brightness=255)
| 14,907
|
def crop_image(input_image, output_image, start_x, start_y, width, height):
"""Pass input name image, output name image, x coordinate to start croping, y coordinate to start croping, width to crop, height to crop """
input_img = Image.open(input_image)
box = (start_x, start_y, start_x + width, start_y + height)
cropped_img = input_img.crop(box)
baseheight = 128
hpercent = (baseheight / float(cropped_img.size[1]))
wsize = int((float(cropped_img.size[0]) * float(hpercent)))
resized_img = cropped_img.resize((wsize, baseheight), Image.ANTIALIAS)
resized_img.save(output_image +".png")
| 14,908
|
def parse_adapter(name: str, raw: dict) -> dict:
"""Parse a single adapter."""
parsed = {
"name": strip_right(obj=name, fix="_adapter"),
"name_raw": name,
"name_plugin": raw["unique_plugin_name"],
"node_name": raw["node_name"],
"node_id": raw["node_id"],
"status": raw["status"],
"features": raw["supported_features"],
}
generic_name = GENERIC_NAME
discovery_name = DISCOVERY_NAME
specific_name = get_specific_name(raw=raw)
config = raw["config"]
specific_schema = config.get(specific_name, {}).get("schema", {})
specific_schema = parse_schema(raw=specific_schema)
generic_schema = config[generic_name]["schema"]
generic_schema = parse_schema(raw=generic_schema)
discovery_schema = config[discovery_name]["schema"]
discovery_schema = parse_schema(raw=discovery_schema)
cnx_schema = parse_schema(raw=raw["schema"])
cnx_schema["connection_label"] = {
"name": "connection_label",
"title": "Connection Label",
"type": "string",
"required": False,
}
parsed["schemas"] = {
"cnx": cnx_schema,
"specific": specific_schema,
"generic": generic_schema,
"discovery": discovery_schema,
"generic_name": generic_name,
"specific_name": specific_name,
"discovery_name": discovery_name,
}
parsed["config"] = {
"specific": raw["config"].get(specific_name, {}).get("config", {}),
"generic": raw["config"].get(generic_name, {}).get("config", {}),
"discovery": raw["config"].get(discovery_name, {}).get("config", {}),
}
parsed["cnx"] = parse_cnx(raw=raw, parsed=parsed)
parsed["cnx_count_total"] = len(parsed["cnx"])
parsed["cnx_count_broken"] = len([x for x in parsed["cnx"] if not x["working"]])
parsed["cnx_count_working"] = len([x for x in parsed["cnx"] if x["working"]])
return parsed
| 14,909
|
def export_phones(ucm_axl):
"""
Export Phones
"""
try:
phone_list = ucm_axl.get_phones(
tagfilter={
"name": "",
"description": "",
"product": "",
"model": "",
"class": "",
"protocol": "",
"protocolSide": "",
"callingSearchSpaceName": "",
"devicePoolName": "",
"commonDeviceConfigName": "",
"commonPhoneConfigName": "",
"networkLocation": "",
"locationName": "",
"mediaResourceListName": "",
"networkHoldMohAudioSourceId": "",
"userHoldMohAudioSourceId": "",
"loadInformation": "",
"securityProfileName": "",
"sipProfileName": "",
"cgpnTransformationCssName": "",
"useDevicePoolCgpnTransformCss": "",
"numberOfButtons": "",
"phoneTemplateName": "",
"primaryPhoneName": "",
"loginUserId": "",
"defaultProfileName": "",
"enableExtensionMobility": "",
"currentProfileName": "",
"loginTime": "",
"loginDuration": "",
# "currentConfig": "",
"ownerUserName": "",
"subscribeCallingSearchSpaceName": "",
"rerouteCallingSearchSpaceName": "",
"allowCtiControlFlag": "",
"alwaysUsePrimeLine": "",
"alwaysUsePrimeLineForVoiceMessage": "",
}
)
all_phones = []
for phone in phone_list:
# print(phone)
phone_details = {
"name": phone.name,
"description": phone.description,
"product": phone.product,
"model": phone.model,
"protocol": phone.protocol,
"protocolSide": phone.protocolSide,
"callingSearchSpaceName": phone.callingSearchSpaceName._value_1,
"devicePoolName": phone.defaultProfileName._value_1,
"commonDeviceConfigName": phone.commonDeviceConfigName._value_1,
"commonPhoneConfigName": phone.commonPhoneConfigName._value_1,
"networkLocation": phone.networkLocation,
"locationName": phone.locationName._value_1,
"mediaResourceListName": phone.mediaResourceListName._value_1,
"networkHoldMohAudioSourceId": phone.networkHoldMohAudioSourceId,
"userHoldMohAudioSourceId": phone.userHoldMohAudioSourceId,
"loadInformation": phone.loadInformation,
"securityProfileName": phone.securityProfileName._value_1,
"sipProfileName": phone.sipProfileName._value_1,
"cgpnTransformationCssName": phone.cgpnTransformationCssName._value_1,
"useDevicePoolCgpnTransformCss": phone.useDevicePoolCgpnTransformCss,
"numberOfButtons": phone.numberOfButtons,
"phoneTemplateName": phone.phoneTemplateName._value_1,
"primaryPhoneName": phone.primaryPhoneName._value_1,
"loginUserId": phone.loginUserId,
"defaultProfileName": phone.defaultProfileName._value_1,
"enableExtensionMobility": phone.enableExtensionMobility,
"currentProfileName": phone.currentProfileName._value_1,
"loginTime": phone.loginTime,
"loginDuration": phone.loginDuration,
# "currentConfig": phone.currentConfig,
"ownerUserName": phone.ownerUserName._value_1,
"subscribeCallingSearchSpaceName": phone.subscribeCallingSearchSpaceName._value_1,
"rerouteCallingSearchSpaceName": phone.rerouteCallingSearchSpaceName._value_1,
"allowCtiControlFlag": phone.allowCtiControlFlag,
"alwaysUsePrimeLine": phone.alwaysUsePrimeLine,
"alwaysUsePrimeLineForVoiceMessage": phone.alwaysUsePrimeLineForVoiceMessage,
}
line_details = ucm_axl.get_phone(name=phone.name)
# print(line_details.lines.line)
try:
for line in line_details.lines.line:
# print(line)
phone_details[f"line_{line.index}_dirn"] = line.dirn.pattern
phone_details[f"line_{line.index}_routePartitionName"] = line.dirn.routePartitionName._value_1
phone_details[f"line_{line.index}_display"] = line.display
phone_details[f"line_{line.index}_e164Mask"] = line.e164Mask
except Exception as e:
print(e)
all_phones.append(phone_details)
print(
f"exporting: {phone.name}: {phone.model} - {phone.description}")
print("-" * 35)
print(f"number of phones: {len(all_phones)}")
return all_phones
except Exception as e:
print(e)
return []
| 14,910
|
def is_various_artists(name, mbid):
"""Check if given name or mbid represents 'Various Artists'."""
return name and VA_PAT.match(name) or mbid == VA_MBID
| 14,911
|
def validate_ruletype(t):
"""Validate *bounds rule types."""
if t not in ["typebounds"]:
raise exception.InvalidBoundsType("{0} is not a valid *bounds rule type.".format(t))
return t
| 14,912
|
def update_betweenness(G, path, pair, count, relevant):
"""
Given a shortest path in G, along with a count of paths
that length, to determine weight, updates the edge and
pair betweenness dicts with the path's new information.
"""
weight = 1./count
pos = 0
while pos < len(path) - 2:
if path[pos + 1] in relevant:
pair[path[pos + 1]][order_tuple((path[pos], path[pos + 2]))] += weight
pos += 1
| 14,913
|
def download_and_unpack_database(db: str, sha256: str) -> Path:
"""Download the given database, unpack it to the local filesystem, and
return the path.
"""
local_dir = cache_path(f"state_transition_dataset/{sha256}")
with _DB_DOWNLOAD_LOCK, InterProcessLock(
transient_cache_path(".state_transition_database_download.LOCK")
):
if not (local_dir / ".installed").is_file():
tar_data = io.BytesIO(download(db, sha256))
local_dir.mkdir(parents=True, exist_ok=True)
logger.info("Unpacking database to %s ...", local_dir)
with tarfile.open(fileobj=tar_data, mode="r:bz2") as arc:
arc.extractall(str(local_dir))
(local_dir / ".installed").touch()
unpacked = [f for f in local_dir.iterdir() if f.name != ".installed"]
if len(unpacked) != 1:
print(
f"fatal: Archive {db} expected to contain one file, contains: {len(unpacked)}",
file=sys.stderr,
)
return unpacked[0]
| 14,914
|
def addMachine(args):
"""
Adds a Machine to a Plant with plantName.
"""
plantName = args[0]
machineName = args[1]
machineQuantity = args[2]
machineDelay = args[3]
machineCanUnhook = args[4]
plantFilename = plantFileExists(plantName)
plant = Plant.fromXmlFile(plantFilename)
plant.addMachine(Machine(name = machineName,
quantity = int(machineQuantity), minDelay = int(machineDelay),
canUnhook = strToBool(machineCanUnhook)))
plant.toXmlFile(plantFilename)
| 14,915
|
def walk_class_hierarchy(clazz, encountered=None):
"""Walk class hierarchy, yielding most derived classes first."""
if not encountered:
encountered = []
for subclass in clazz.__subclasses__():
if subclass not in encountered:
encountered.append(subclass)
# drill down to leaves first
for subsubclass in walk_class_hierarchy(subclass, encountered):
yield subsubclass
yield subclass
| 14,916
|
def test_command_line_interface(training_set_dir_path, cli_output_path, reax_output_dir_path):
"""Test the CLI."""
training_path = str(training_set_dir_path)
population_path = str(cli_output_path)
config_path = str(os.path.join(os.path.abspath(os.path.join(__file__, "../../")),
"tests", "integration", "config", "cli_config.json"))
generation_number = 1
runner = CliRunner()
result = runner.invoke(main, '--generation_number {} --training_path "{}" --population_path "{}" --config_path "{}"'
.format(generation_number, training_path, population_path, config_path))
print(result.output)
assert result.exit_code == 0
assert "Generation Number: 1\n" in result.output
assert "Retrieving reference data from: {}".format(training_path) in result.output
assert "Outputting generational genetic algorithm data to: {}".format(population_path) in result.output
assert "Retrieving user configuration from: {}".format(config_path) in result.output
assert "{}: {}".format("SUCCESS", "Generation successfully written at {}".format(population_path)) in result.output
assert not result.exception
help_result = runner.invoke(main, ['--help'])
assert help_result.exit_code == 0
assert "Usage: cli [OPTIONS]" in help_result.output
assert "Command-line interface for genetic algorithm + neural network generational\n propagation" in help_result.output
assert "-g, --generation_number" in help_result.output
assert "-t, --training_path" in help_result.output
assert "-p, --population_path" in help_result.output
assert not help_result.exception
# Teardown
shutil.rmtree(os.path.join(population_path, "generation-1"))
population_path = str(reax_output_dir_path)
result = runner.invoke(main, '--generation_number {} --training_path "{}" --population_path "{}" --config_path "{}"'
.format(3, training_path, population_path, config_path))
assert result.exit_code == 0
assert "Generation Number: 3\n" in result.output
assert "Retrieving reference data from: {}".format(training_path) in result.output
assert "Outputting generational genetic algorithm data to: {}".format(population_path) in result.output
assert "Retrieving user configuration from: {}".format(config_path) in result.output
assert "{}: {}".format("SUCCESS", "Generation successfully written at {}".format(population_path)) in result.output
assert not result.exception
# Teardown
shutil.rmtree(os.path.join(population_path, "generation-3"))
result = runner.invoke(main, '')
print(result)
assert result.exception
| 14,917
|
def decode_section_flags(sflags: str) -> int:
"""Map readelf's representation of section flags to ELF flag values."""
d = {
'W': elftools.elf.constants.SH_FLAGS.SHF_WRITE,
'A': elftools.elf.constants.SH_FLAGS.SHF_ALLOC,
'X': elftools.elf.constants.SH_FLAGS.SHF_EXECINSTR,
'M': elftools.elf.constants.SH_FLAGS.SHF_MERGE,
'S': elftools.elf.constants.SH_FLAGS.SHF_STRINGS,
'I': elftools.elf.constants.SH_FLAGS.SHF_INFO_LINK,
'L': elftools.elf.constants.SH_FLAGS.SHF_LINK_ORDER,
'O': elftools.elf.constants.SH_FLAGS.SHF_OS_NONCONFORMING,
'G': elftools.elf.constants.SH_FLAGS.SHF_GROUP,
'T': elftools.elf.constants.SH_FLAGS.SHF_TLS,
'C': 0x800, # SHF_COMPRESSED
'E': elftools.elf.constants.SH_FLAGS.SHF_EXCLUDE,
'y': 0x20000000, # SHF_ARM_PURECODE
}
flags = 0
for k, v in d.items():
if k in sflags:
flags |= v
return flags
| 14,918
|
def nCr(n,r):
"""
Implements multiplicative formula:
https://en.wikipedia.org/wiki/Binomial_coefficient#Multiplicative_formula
"""
if r < 0 or r > n:
return 0
if r == 0 or r == n:
return 1
c = 1
for i in xrange(min(r, n - r)):
c = c * (n - i) // (i + 1)
return c
| 14,919
|
def get_test_vesselfile():
"""
return the necessary paths for the testfile tests
Returns
-------
str
absolute file path to the test file
"""
testfile = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'test_data', 'vessel_file.kfc')
return testfile
| 14,920
|
def _get_init_arguments(cls, *args, **kwargs):
"""Returns an OrderedDict of args passed to cls.__init__ given [kw]args."""
init_args = inspect.signature(cls.__init__)
bound_args = init_args.bind(None, *args, **kwargs)
bound_args.apply_defaults()
arg_dict = bound_args.arguments
del arg_dict['self']
return arg_dict
| 14,921
|
def MatchCapture(nfa: NFA, id: CaptureGroup) -> NFA:
"""Handles: (?<id>A)"""
captures = {(s, i): {id} for (s, i) in nfa.transitions if i != Move.EMPTY}
return NFA(nfa.start, nfa.end, nfa.transitions, merge_trans(nfa.captures, captures))
| 14,922
|
def save_private_file_share(request, token):
"""
Save private share file to someone's library.
"""
username = request.user.username
try:
pfs = PrivateFileDirShare.objects.get_priv_file_dir_share_by_token(token)
except PrivateFileDirShare.DoesNotExist:
raise Http404
from_user = pfs.from_user
to_user = pfs.to_user
repo_id = pfs.repo_id
path = pfs.path
src_path = os.path.dirname(path)
obj_name = os.path.basename(path.rstrip('/'))
if username == from_user or username == to_user:
dst_repo_id = request.POST.get('dst_repo')
dst_path = request.POST.get('dst_path')
new_obj_name = check_filename_with_rename(dst_repo_id, dst_path, obj_name)
wingufile_api.copy_file(repo_id, src_path, obj_name,
dst_repo_id, dst_path, new_obj_name, username)
messages.success(request, _(u'Successfully saved.'))
else:
messages.error(request, _("You don't have permission to save %s.") % obj_name)
next = request.META.get('HTTP_REFERER', None)
if not next:
next = SITE_ROOT
return HttpResponseRedirect(next)
| 14,923
|
def get_all_state_events(log):
""" Returns a list of tuples of event id, state_change_id, block_number and events"""
return [
(InternalEvent(res[0], res[1], res[2], log.serializer.deserialize(res[3])))
for res in get_db_state_changes(log.storage, 'state_events')
]
| 14,924
|
def iff(a: NNF, b: NNF) -> Or[And[NNF]]:
"""``a`` is true if and only if ``b`` is true."""
return (a & b) | (a.negate() & b.negate())
| 14,925
|
def redact_access_token(e: Exception) -> Exception:
"""Remove access token from exception message."""
if not isinstance(e, FacebookError):
return e
e.args = (redact_access_token_from_str(str(e.args[0])),)
return e
| 14,926
|
def _get_ext_comm_subtype(type_high):
"""
Returns a ByteEnumField with the right sub-types dict for a given community.
http://www.iana.org/assignments/bgp-extended-communities/bgp-extended-communities.xhtml
"""
return _ext_comm_subtypes_classes.get(type_high, {})
| 14,927
|
def _filter_classes(classes, filters, names_only, iq):
"""
Filter a list of classes for the qualifiers defined by the
qualifier_filter parameter where this parameter is a list of tuples.
each tuple contains the qualifier name and a dictionary with qualifier
name as key and tuple containing the option_value(True or False) and
a list of booleans where each boolean represents one of the scope types
()
whether to display or not display if it exists.
This method only works for boolean qualifiers
Parameters:
classes (list of :class:`~pywbem.CIMClass`):
list of classes to be filtered
qualifier_filters (dict):
Dictionary defining the filtering to be performed. It contains an entry
for each qualifier filter that is defined. See _build_qualifier_filters
for a definition of this list.
names_only (:class:`py:bool`):
If True, return only the classnames. Otherwise returns the filtered
classes. This is because we must get the classes from the server to
perform the filtering
iq (:class:`py:bool`):
If not True, remove any qualifiers from the classes. This is because
we must get the classes from the server with qualifiers to
perform the filtering.
"""
def class_has_qualifier(cls, qname, scopes):
"""
Determine if the qualifier defined by qname exists in the elements
of the class where the elements are defined by the scopes parameter
for this filter.
Parameters:
cls (:class:`~pywbem.CIMClass`):
The class to be inspected for the qualifier defined by qname
qname (:term:`string`):
The qualifier for which we are searching
scopes (tuple of booleans):
A tuple containing a boolean value for each of the possible scopes
(class, property, method, parameter)
Returns:
True if the qualifier with name quname is found in the elements where
the scope is True. Otherwise, False is returned
"""
# Test class scope
if scopes[0] and qname in cls.qualifiers:
return True
# if property scope, test properties
if scopes[1]:
for prop in cls.properties.values():
if qname in prop.qualifiers:
return True
# If method scope, test methods and if parameter scope, test parameters
if scopes[2]:
for method in cls.methods.values():
if qname in method.qualifiers:
return True
if scopes[3]:
params = method.parameters
for param in params.values():
if qname in param.qualifiers:
return True
return False
# Test all classes in the input property for the defined filters.
filtered_classes = []
subclass_names = []
# Build list of subclass names that will be used later as a filter on the
# classes to be returned
if 'subclass_of' in filters:
try:
subclass_names = get_subclass_names(
classes,
classname=filters['subclass_of'].optionvalue,
deep_inheritance=True)
except ValueError:
raise click.ClickException(
'Classname {} for "subclass-of" not found in returned classes.'
.format(filters['subclass_of'].optionvalue))
# Build a list of leaf class names that will be used later as a filter on
# the classes to be returned.
if 'leaf_classes' in filters:
try:
if subclass_names:
clsx = [cls for cls in classes if cls.classname in
subclass_names]
leafclass_names = get_leafclass_names(clsx)
else:
leafclass_names = get_leafclass_names(classes)
except ValueError:
raise click.ClickException(
'Classname {} for "leaf_classes-of" not found in returned '
'classes.'.format(filters['leaf_classes'].optionvalue))
for cls in classes:
show_class_list = []
for filter_name, filter_ in filters.items():
if filter_name == 'qualifier':
option_value = filter_.optionvalue
if class_has_qualifier(cls, filter_.qualifiername,
filter_.scopes):
if filter_.qualifiername == 'version':
if filter_.qualifiername in cls.qualifiers:
cls_version = \
cls.qualifiers[filter_.qualifiername].value
val = parse_version_value(cls_version,
cls.classname)
option_value = bool(val >= filter_.optionvalue)
show_class_list.append(option_value)
else:
show_class_list.append(not option_value)
elif filter_name == 'schema':
show_class_list.append(
cls.classname.lower().startswith(filter_.optionvalue))
elif filter_name == 'subclass_of':
show_class_list.append(cls.classname in subclass_names)
elif filter_name == 'leaf_classes':
show_class_list.append(cls.classname in leafclass_names)
else:
assert False # Future for other test_types
# Show if all options are True for this class
show_this_class = all(show_class_list)
if show_this_class:
# If returning instances, honor the names_only option
if not names_only and not iq:
cls.qualifiers = []
for p in cls.properties.values():
p.qualifiers = []
for m in cls.methods.values():
m.qualifiers = []
for p in m.parameters.values():
p.qualifiers = []
filtered_classes.append(cls)
# If names_only parameter create list of classnames
if names_only:
filtered_classes = [cls.classname for cls in filtered_classes]
return filtered_classes
| 14,928
|
def template2():
"""load_cep_homo"""
script = """
## (Store,figure)
<< host = chemml
<< function = SavePlot
<< kwargs = {'normed':True}
<< output_directory = plots
<< filename = amwVSdensity
>> 0 fig
## (Visualize,artist)
<< host = chemml
<< function = decorator
<< title = AMW vs. Density
<< grid_color = g
<< xlabel = density (Kg/m3)
<< ylabel = atomic molecular weight
<< grid = True
<< size = 18
>> fig 0
>> 4 fig
## (Enter,python script)
<< host = chemml
<< function = PyScript
<< line01 = print (iv1.head())
>> 1 iv1
## (Enter,datasets)
<< host = chemml
<< function = load_organic_density
>> smiles 1
>> density 2
>> features 3
## (Visualize,plot)
<< host = chemml
<< function = scatter2D
<< y = 0
<< marker = o
<< x = 'AMW'
>> 2 dfy
>> 3 dfx
>> fig 4
"""
return script.strip().split('\n')
| 14,929
|
def get_target_and_encoder_gpu(train: GpuDataset) -> Tuple[Any, type]:
"""Get target encoder and target based on dataset.
Args:
train: Dataset.
Returns:
(Target values, Target encoder).
"""
target = train.target
if isinstance(target, cudf.Series):
target = target.values
target_name = train.target.name
if train.task.name == 'multiclass':
n_out = cp.max(target)+1
target = (target[:, cp.newaxis] == cp.arange(n_out)[cp.newaxis, :])
encoder = MultiClassTargetEncoder_gpu
else:
encoder = TargetEncoder_gpu
return target, encoder
| 14,930
|
def main(inputs_dir):
"""
Entry point for merge and split processing script
Attributes:
------------
:param inputs_dir {string} - Path to the directory containing input data
"""
# Create output directory if it doesn't exist
if not os.path.exists('output/'):
os.mkdir('output')
# Iterate over geojson files in data directory, splitting merged datasets by three data providers
for filename in tqdm(os.listdir(inputs_dir)):
if filename.endswith(".geojson"):
print("Processing: {}".format(os.path.join(inputs_dir, filename)))
fname = filename[18:]
geodf = gpd.read_file(inputs_dir + filename)
# Get all OSM Data and output it to a new file
osm_geodf = geodf[geodf['Data_prov'] == 'OSM']
if not osm_geodf.empty:
osm_geodf.to_file('output/osm_' + fname, driver='GeoJSON')
# Remove OSM records from geodf
geodf = geodf[geodf['Data_prov'] != 'OSM']
# Get All MS Data and output it to a new file
ms_geodf = geodf[geodf['Data_prov'] == 'Microsoft']
if not ms_geodf.empty:
ms_geodf.to_file('output/ms_' + fname, driver='GeoJSON')
# Remove Microsoft records from geodf
geodf = geodf[geodf['Data_prov'] != 'Microsoft']
# Output ODB Data
if not geodf.empty:
geodf.to_file('output/odb_' + fname, driver='GeoJSON')
# Delete old geo-dataframes
del geodf
del ms_geodf
del osm_geodf
| 14,931
|
def addSyntheticToTrainingData(train, synth):
"""
write out new shuffled training data file
:param train: locations of all existing training data files
:param synth: generated data
"""
# create list of generated data with adjusted paths
synth_paths = []
for img in synth:
synth_paths.append("data/obj/" + img[0].split('\\')[-1][:-4] + " 0.0050" + ".JPG")
new_train = shuffle(train + synth_paths, random_state=0)
# create NEW_train.txt file, containing the locations of the respective image files
with open("data" + "/" + "SYNTH_train.txt", "w") as f:
for file in new_train:
f.write(file + "\n")
| 14,932
|
def get_available_languages(domain):
"""Lists the available languages for the given translation domain.
:param domain: the domain to get languages for
"""
if domain in _AVAILABLE_LANGUAGES:
return copy.copy(_AVAILABLE_LANGUAGES[domain])
localedir = os.environ.get(_locale.get_locale_dir_variable_name(domain))
def find(x):
return gettext.find(domain, localedir=localedir, languages=[x])
# NOTE(mrodden): en_US should always be available (and first in case
# order matters) since our in-line message strings are en_US
language_list = ['en_US']
locale_identifiers = set(locale.windows_locale.values())
language_list.extend(
language for language in locale_identifiers if find(language)
)
language_list.extend(
alias for alias, _ in _BABEL_ALIASES.items() if find(alias)
)
_AVAILABLE_LANGUAGES[domain] = language_list
return copy.copy(language_list)
| 14,933
|
def once(f):
"""Cache result of a function first call"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
rv = getattr(f, 'rv', MISSING)
if rv is MISSING:
f.rv = f(*args, **kwargs)
return f.rv
return wrapper
| 14,934
|
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
add_config(args, cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.MODEL.BUA.EXTRACTOR.MODE = 1
default_setup(cfg, args)
cfg.MODEL.DEVICE = 'cuda:0' if torch.cuda.is_available() else 'cpu'
cfg.freeze()
return cfg
| 14,935
|
def build_audit_stub(obj):
"""Returns a stub of audit model to which assessment is related to."""
audit_id = obj.audit_id
if audit_id is None:
return None
return {
'type': 'Audit',
'id': audit_id,
'context_id': obj.context_id,
'href': '/api/audits/%d' % audit_id,
'issue_tracker': obj.audit.issue_tracker,
}
| 14,936
|
def test_verbose_probability(probability_above_cube, interpreter):
"""Test verbose output for a simple probability diagnostic"""
expected_result = (
"This is a gridded probabilities file\n"
" Source: name, coordinates\n"
"It contains probabilities of air temperature greater than thresholds\n"
" Source: name, threshold coordinate (probabilities only)\n"
"It has undergone some significant post-processing\n"
" Source: title attribute\n"
"It contains data from UKV\n"
" Source: model ID attribute\n"
)
interpreter.run(probability_above_cube)
result = display_interpretation(interpreter, verbose=True)
assert result == expected_result
| 14,937
|
def calculate_widths(threshold_img, landmarks):
"""
Calcula a largura dos vasos sanguíneos nos pontos de potenciais
bifurcação. Esse cálculo é feito pegando a menor distância percorrida
a partir do ponto em cada uma das direções (8 direções são utilizadas).
A função retorna o que seria equivalente ao diametro do vasos em cada
ponto.
:param threshold_img: imagem (binária) usada para calculo da largura dos
vasos sanguíneos
:param landmarks: pontos onde calcular as larguras
:return: vetor com larguras de cada um dos pontos (diametro dos vasos)
"""
N, M = threshold_img.shape
widths = []
for x, y, mark_type in landmarks:
# down
i = x
j = y
vert_dist = 0
while(j < M and threshold_img[i, j] != 0):
vert_dist += 1
j += 1
# up
i = x
j = y
while(j >= 0 and threshold_img[i, j] != 0):
vert_dist += 1
j -= 1
# right
horiz_dist = 0
i = x
j = y
while(i < N and threshold_img[i, j] != 0):
horiz_dist += 1
i += 1
# left
i = x
j = y
while(i >= 0 and threshold_img[i, j] != 0):
horiz_dist += 1
i -= 1
# down right
i = x
j = y
s_diag_dist = 0
while(i < N and j < M and threshold_img[i, j] != 0):
i += 1
j += 1
s_diag_dist += 1
# up left
i = x
j = y
while(i >= 0 and j >= 0 and threshold_img[i, j] != 0):
i -= 1
j -= 1
s_diag_dist += 1
# down left
i = x
j = y
p_diag_dist = 0
while(i >= 0 and j < M and threshold_img[i, j] != 0):
i -= 1
j += 1
p_diag_dist += 1
# up right
i = x
j = y
while(i < N and j >= 0 and threshold_img[i, j] != 0):
i += 1
j -= 1
p_diag_dist += 1
min_width = np.min([vert_dist, horiz_dist, p_diag_dist, s_diag_dist])
widths.append([(x, y), np.ceil(min_width).astype(int), mark_type])
return widths
| 14,938
|
def smi_to_fp(smi: str, fingerprint: str,
radius: int = 2, length: int = 2048) -> Optional[np.ndarray]:
"""fingerprint functions must be wrapped in a static function
so that they may be pickled for parallel processing
Parameters
----------
smi : str
the SMILES string of the molecule to encode
fingerprint : str
the the type of fingerprint to generate
radius : int
the radius of the fingerprint
length : int
the length of the fingerprint
Returns
-------
T_comp
the compressed feature representation of the molecule
"""
mol = Chem.MolFromSmiles(smi)
if mol is None:
return None
if fingerprint == 'morgan':
fp = rdmd.GetMorganFingerprintAsBitVect(
mol, radius=radius, nBits=length, useChirality=True)
elif fingerprint == 'pair':
fp = rdmd.GetHashedAtomPairFingerprintAsBitVect(
mol, minLength=1, maxLength=1+radius, nBits=length)
elif fingerprint == 'rdkit':
fp = rdmd.RDKFingerprint(
mol, minPath=1, maxPath=1+radius, fpSize=length)
elif fingerprint == 'maccs':
fp = rdmd.GetMACCSKeysFingerprint(mol)
else:
raise NotImplementedError(
f'Unrecognized fingerprint: "{fingerprint}"')
x = np.empty(len(fp))
DataStructs.ConvertToNumpyArray(fp, x)
return x
| 14,939
|
def f(x, t):
"""function to learn."""
return tf.square(tf.cast(t, tf.float32) / FLAGS.tm) * (tf.math.sin(5 * x) + 1)
| 14,940
|
def render_cells(cells, width=80, col_spacing=2):
"""Given a list of short (~10 char) strings, display these aligned in
columns.
Example output::
Something like this can be
used to neatly arrange long
sequences of values in a
compact format.
Parameters
----------
cells : [(strlen, str), ...]
Gives the cells to print as tuples giving the strings length in visible
characters and the string to display.
width : int
The width of the terminal.
col_spacing : int
Size of the gap to leave between columns.
"""
# Special case (since max below will fail)
if len(cells) == 0:
return ""
# Columns should be at least as large as the largest cell with padding
# between columns
col_width = max(strlen for strlen, s in cells) + col_spacing
lines = [""]
cur_length = 0
for strlen, s in cells:
# Once line is full, move to the next
if cur_length + strlen > width:
lines.append("")
cur_length = 0
# Add the current cell (with spacing)
lines[-1] += s + (" "*(col_width - strlen))
cur_length += col_width
return "\n".join(map(str.rstrip, lines))
| 14,941
|
def separate_types(data):
"""Separate out the points from the linestrings."""
if data['type'] != 'FeatureCollection':
raise TypeError('expected a FeatureCollection, not ' + data['type'])
points = []
linestrings = []
for thing in data['features']:
if thing['type'] != 'Feature':
raise TypeError('expected Feature, not ' + thing['type'])
geometry_type = thing['geometry']['type']
if geometry_type == 'Point':
points.append(thing)
elif geometry_type == 'LineString':
linestrings.append(thing)
else:
raise TypeError('expected Point or LineString, not ' + geometry_type)
return points, linestrings
| 14,942
|
def state(state_vec):
""" Qiskit wrapper of qobj
"""
return gen_operator.state(state_vec)
| 14,943
|
def get_index_path(bam_path: str):
"""
Obtain path to bam index
Returns:
path_to_index(str) : path to the index file, None if not available
"""
for p in [bam_path+'.bai', bam_path.replace('.bam','.bai')]:
if os.path.exists(p):
return p
return None
| 14,944
|
def get_games_for_platform(platform_id):
"""Return the list of all the games for a given platform"""
controller = GameController
return controller.get_list_by_platform(MySQLFactory.get(), platform_id)
| 14,945
|
def set_entity_variable_types(entity, node_type, db_info):
"""
Sets a featuretools Entity's variable types to match the variable types in db_info[node_type]
"""
for var_name, var_type in entity.variable_types.items():
if not var_type in [Index, Id]:
feat_info = db_info['node_types_and_features'][node_type].get(var_name)
if not feat_info:
print(f'make sure {node_type}.{var_name} is set in variable_types, or is an Id')
else:
right_type = feat_info['type']
new_type = None
if right_type == 'CATEGORICAL':
new_type = Categorical
elif right_type == 'SCALAR':
new_type = Numeric
elif right_type == 'DATETIME':
assert var_type == Datetime
elif right_type == 'TEXT':
assert var_type == Text
else:
raise ValueError
if new_type:
entity.convert_variable_type(var_name, new_type)
| 14,946
|
def validate_dataset(elem: object) -> Dataset:
"""Check that `elem` is a :class:`~pydicom.dataset.Dataset` instance."""
if not isinstance(elem, Dataset):
raise TypeError('Sequence contents must be Dataset instances.')
return elem
| 14,947
|
def build_array(name: str, variables: Dict[str, Dict[str, Any]],
data: np.ndarray):
"""Builds the array from the data and the variables"""
properties = variables[name]
attrs = copy.deepcopy(properties["attrs"])
# Reading the storage properties of the variable
encoding: Dict[str, Any] = dict(dtype=properties["dtype"])
# If the variable defines a fill value.
if "_FillValue" in attrs:
encoding["_FillValue"] = encode_fill_value(properties)
del attrs["_FillValue"]
# Some values read from the XML files must be decoded
# TODO(fbriol): The type of these attributes should be determined
# from their type, but at the moment this is not possible.
for item in ["add_offset", "scale_factor"]:
if item in attrs:
attrs[item] = float(attrs[item])
for item in ["valid_range", "valid_min", "valid_max"]:
if item in attrs:
attrs[item] = cast_to_dtype(attrs[item], properties)
if "flag_values" in attrs:
items = attrs["flag_values"].split()
attrs["flag_values"] = np.array(
[cast_to_dtype(item, properties) for item in items],
properties["dtype"]) if len(items) != 1 else cast_to_dtype(
float(attrs["flag_values"]), properties)
# if "scale_factor" in attrs and "add_offset" not in attrs:
# attrs["add_offset"] = 0.0
# if "add_offset" in attrs and "scale_factor" not in attrs:
# attrs["scale_factor"] = 1.0
return {
name: encoding
}, xr.DataArray(data=data,
dims=properties["shape"],
name=name,
attrs=attrs)
| 14,948
|
def conv3x3(in_channels, out_channels, stride=1):
"""3x3 convolution """
weight_shape = (out_channels, in_channels, 3, 3)
weight = Tensor(np.ones(weight_shape).astype(np.float32))
conv = Conv2d(in_channels, out_channels,
kernel_size=3, stride=stride, padding=0, weight_init=weight, has_bias=False,
pad_mode="same")
conv.conv2d.shard(strategy_weight)
return conv
| 14,949
|
def _workflow_complete(workflow_stage_dict: dict):
"""Check if the workflow is complete.
This function checks if the entire workflow is complete.
This function is used by `execute_processing_block`.
Args:
workflow_stage_dict (dict): Workflow metadata dictionary.
Returns:
bool, True if the workflow is complete, otherwise False.
"""
# Check if all stages are complete, if so end the PBC by breaking
# out of the while loop
complete_stages = []
for _, stage_config in workflow_stage_dict.items():
complete_stages.append((stage_config['status'] == 'complete'))
if all(complete_stages):
LOG.info('PB workflow complete!')
return True
return False
| 14,950
|
def close_session(*args, **kwargs):
"""
Flask SQLAlchemy will automatically create new sessions for you from
a scoped session factory, given that we are maintaining the same app
context, this ensures tasks have a fresh session (e.g. session errors
won't propagate across tasks)
"""
db.session.remove()
| 14,951
|
def set_cpus(instance):
"""Set the number of virtual CPUs for the virtual machine.
:param instance: nova.objects.instance.Instance
"""
host_info = get_host_info()
if instance.vcpus > host_info[constants.HOST_PROCESSOR_COUNT]:
raise nova_exception.ImageNUMATopologyCPUOutOfRange(
cpunum=instance.vcpus,
cpumax=host_info[constants.HOST_PROCESSOR_COUNT])
manage.VBoxManage.modify_vm(instance, constants.FIELD_CPUS,
instance.vcpus)
| 14,952
|
def create_output_folder(ProjectDir):
"""Create the output folders starting from the project directory.
Parameters
----------
ProjectDir : str
Name of the project directory.
Returns
-------
type
PicturePath, ResultsPath
"""
npath = os.path.normpath(ProjectDir)
# set pathname for the Output
OutputPath = os.path.join(npath, os.path.basename(npath))
# set pathname for the images
PicturePath = os.path.join(npath, os.path.basename(npath), "Pictures")
# set pathname for the files
ResultsPath = os.path.join(npath, os.path.basename(npath), "Results")
# Add foldes for outputs
if not os.path.exists(OutputPath):
os.mkdir(OutputPath)
if not os.path.exists(PicturePath):
os.mkdir(PicturePath)
if not os.path.exists(ResultsPath):
os.mkdir(ResultsPath)
return PicturePath, ResultsPath
| 14,953
|
def get_name(f, opera_format=True):
"""Load dataset and extract radar name from it"""
ds = xr.open_dataset(f)
if hasattr(ds, 'source'):
radar = ds.source
else:
filename = osp.splitext(osp.basename(f))[0]
radar = filename.split('_')[-1]
if opera_format:
if '/' in radar:
radar = (radar[:2]+radar[-3:]).lower()
else:
if radar.islower():
radar = radar[:2] + '/' + radar[-3:]
return radar
| 14,954
|
def range_join(numbers, to_str=False, sep=",", range_sep=":"):
"""
Takes a sequence of positive integer numbers given either as integer or string types, and
returns a sequence 1- and 2-tuples, denoting either single numbers or inclusive start and stop
values of possible ranges. When *to_str* is *True*, a string is returned in a format consistent
to :py:func:`range_expand` with ranges constructed by *range_sep* and merged with *sep*.
Example:
.. code-block:: python
range_join([1, 2, 3, 5])
# -> [(1, 3), (5,)]
range_join([1, 2, 3, 5, 7, 8, 9])
# -> [(1, 3), (5,), (7, 9)]
range_join([1, 2, 3, 5, 7, 8, 9], to_str=True)
# -> "1:3,5,7:9"
"""
if not numbers:
return "" if to_str else []
# check type, convert, make unique and sort
_numbers = []
for n in numbers:
if isinstance(n, six.string_types):
try:
n = int(n)
except ValueError:
raise ValueError("invalid number format '{}'".format(n))
if isinstance(n, six.integer_types):
_numbers.append(n)
else:
raise TypeError("cannot handle non-integer value '{}' in numbers to join".format(n))
numbers = sorted(set(_numbers))
# iterate through numbers, keep track of last starts and stops and fill a list of range tuples
ranges = []
start = stop = numbers[0]
for n in numbers[1:]:
if n == stop + 1:
stop += 1
else:
ranges.append((start,) if start == stop else (start, stop))
start = stop = n
ranges.append((start,) if start == stop else (start, stop))
# convert to string representation
if to_str:
ranges = sep.join(
(str(r[0]) if len(r) == 1 else "{1}{0}{2}".format(range_sep, *r))
for r in ranges
)
return ranges
| 14,955
|
def power_spectrum(x, fs, N=None):
"""
Power spectrum of instantaneous signal :math:`x(t)`.
:param x: Instantaneous signal :math:`x(t)`.
:param fs: Sample frequency :math:`f_s`.
:param N: Amount of FFT bins.
The power spectrum, or single-sided autospectrum, contains the squared RMS amplitudes of the signal.
A power spectrum is a spectrum with squared RMS values. The power spectrum is
calculated from the autospectrum of the signal.
.. warning:: Does not include scaling to reference value!
.. seealso:: :func:`auto_spectrum`
"""
N = N if N else x.shape[-1]
f, a = auto_spectrum(x, fs, N=N)
a = a[..., N//2:]
f = f[..., N//2:]
a *= 2.0
a[..., 0] /= 2.0 # DC component should not be doubled.
if not N%2: # if not uneven
a[..., -1] /= 2.0 # And neither should fs/2 be.
return f, a
| 14,956
|
def CreateInstanceTemplate(task, task_dir):
"""Create the Compute Engine instance template that will be used to create the
instances.
"""
backend_params = task.BackendParams()
instance_count = backend_params.get('instance_count', 0)
if instance_count <= 0:
clovis_logger.info('No template required.')
return True
bucket = backend_params.get('storage_bucket')
if not bucket:
clovis_logger.error('Missing bucket in backend_params.')
return False
return instance_helper.CreateTemplate(task.BackendParams()['tag'], bucket,
task_dir)
| 14,957
|
def get_config_string(info_type, board_num, dev_num, config_item, max_config_len):
"""Returns configuration or device information as a null-terminated string.
Parameters
----------
info_type : InfoType
The configuration information for each board is grouped into different categories. This
parameter specifies which category you want. Always set this parameter to
InfoType.BOARDINFO.
board_num : int
The number associated with the board when it was installed with InstaCal or created
with :func:`.create_daq_device`.
dev_num : int
The purpose of the dev_num parameter depends on the value of the config_item parameter. It
can serve as a channel number, an index into the config_item, or it can be ignored.
Unless otherwise noted in the "config_item parameter values" section below, this value is
ignored.
config_item : BoardInfo
The type of information to read from the device. Set it to one of the constants listed in
the "config_item parameter values" section below.
max_config_len : int
The maximum number of bytes to be read from the device into config_val.
Returns
-------
string
The specified configuration item
.. table:: **config_item parameter values**
============ =============================================================================
config_item Description
============ =============================================================================
DEVMACADDR MAC address of an Ethernet device.
------------ -----------------------------------------------------------------------------
DEVSERIALNUM Factory serial number of a USB or Bluetooth device.
dev_num specifies either a base board (0) or an expansion board (1).
------------ -----------------------------------------------------------------------------
DEVUNIQUEID Unique identifier of a discoverable device, such as the serial number of a
USB device or MAC address of an Ethernet device.
------------ -----------------------------------------------------------------------------
DEVVERSION Firmware version and FPGA version installed on a device.
Use this setting in conjunction with one of these dev_num settings:
- MAIN (main firmware version)
- MEASUREMENT (measurement firmware version)
- MEASUREMENT_EXP (expansion board measurement firmware version)
- RADIO (radio firmware version)
- FPGA (FPGA version)
------------ -----------------------------------------------------------------------------
USERDEVID User-configured string identifier of up to maxConfigLen character/bytes from
an Ethernet, Bluetooth, or USB device.
============ =============================================================================
"""
config_val = create_string_buffer(max_config_len)
_check_err(_cbw.cbGetConfigString(
info_type, board_num, dev_num, config_item,
config_val, byref(c_int(max_config_len))))
return config_val.value.decode('utf-8')
| 14,958
|
def test_feature_constraintslist_qaenv(unleash_client):
"""
Feature.constraints.list should NOT be enabled in qa environment
"""
# Set up API
responses.add(responses.POST, URL + REGISTER_URL, json={}, status=202)
responses.add(responses.GET, URL + FEATURES_URL, json=json.loads(MOCK_JSON), status=200)
responses.add(responses.POST, URL + METRICS_URL, json={}, status=202)
# Tests
unleash_client.initialize_client()
unleash_client.unleash_static_context["environment"] = "qa"
assert not unleash_client.is_enabled("Feature.constraints.list", {})
| 14,959
|
def _get_kwargs(func, locals_dict, default=None):
"""
Convert a function's args to a kwargs dict containing entries that are not identically default.
Parameters
----------
func : function
The function whose args we want to convert to kwargs.
locals_dict : dict
The locals dict for the function.
default : object
Don't include arguments whose values are this object.
Returns
-------
dict
The non-default keyword args dict.
"""
return {n: locals_dict[n] for n in inspect.signature(func).parameters
if locals_dict[n] is not default}
| 14,960
|
def iso_to_date(iso_str: str):
"""Convert a date string with iso formating to a datetime date object"""
if not iso_str:
return None
return datetime.date(*map(int, iso_str.split('-')))
| 14,961
|
def replace_X_with_U(fold: data.Fold, Theta: NP.Matrix):
""" Replace X with its rotated/reordered version U."""
column_headings = MultiIndex.from_product(((fold.meta['data']['X_heading'],), ("u{:d}".format(i) for i in range(fold.M))))
X = DataFrame(einsum('MK, NK -> NM', Theta, fold.X, optimize=True, dtype=float),
columns=column_headings, index=fold.X.index)
test_X = DataFrame(einsum('MK, NK -> NM', Theta, fold.test_X, optimize=True, dtype=float),
columns=column_headings, index=fold.test_X.index)
fold.data.df = concat((X, fold.data.df[[fold.meta['data']['Y_heading']]].copy(deep=True)), axis='columns')
fold.data.write()
fold.test.df = concat((test_X, fold.test.df[[fold.meta['data']['Y_heading']]].copy(deep=True)), axis='columns')
fold.test.write()
fold.meta_data_update()
| 14,962
|
def call_with_error(error_type):
"""Collects a bunch of errors and returns them all once.
Decorator that collects the errors in the decorated function so that the
user can see everything they need to fix at once. All errors are thrown
with the same error type.
The decorated must have an `error` keyword parameter. The `error` parameter
is then ignored if the end user passes in that argument.
Parameters
----------
error_type: type
The type of error to throw. For example, `ValueError`.
Returns
-------
Callable[Callable[[Any], Any], Callable[[Any], Any]]
Returns a decorator
Example
-------
>>> @call_with_error(ValueError)
>>> def func(a: int, b: int, error: Callable[[str], None]) -> int:
... if a < 0:
... error("a must be zero or greater")
... if b < 0:
... error("b must be zero or greater")
... return a + b
>>> func(-1, 0)
ValueError("a must be zero or greater")
>>> func(0, -1)
ValueError("b must be zero or greater")
>>> func(-1, -1)
ValueError("a must be zero or greater\nb must be zero or greater")
"""
def _call_with_error(f):
@curry
def error(log, msg):
log.append(msg)
@wraps(f)
def wrapped(*args, **kwargs):
log = []
result = f(*args, error=error(log), **kwargs)
if len(log) > 0:
raise error_type("\n".join(log))
return result
return wrapped
return _call_with_error
| 14,963
|
def _get_metadata_and_fingerprint(instance_name, project, zone):
"""Return the metadata values and fingerprint for the given instance."""
instance_info = _get_instance_info(instance_name, project, zone)
if not instance_info:
logs.log_error('Failed to fetch instance metadata')
return None, None
fingerprint = instance_info['metadata']['fingerprint']
metadata_items = instance_info['metadata']['items']
return metadata_items, fingerprint
| 14,964
|
def makersslist(xlist, nodes , d={}):
""" recurse until txt is found """
for i in nodes:
if i.nodeType == i.ELEMENT_NODE:
dd = d[i.nodeName] = {}
makersslist(xlist, i.childNodes, dd)
if dd: xlist.append(dd)
txt = gettext(i.childNodes)
if txt: d[i.nodeName] = txt
| 14,965
|
def solve_token_pair_and_fee_token_economic_viable(
token_pair, accounts, b_orders, s_orders, f_orders, fee,
xrate=None
):
"""Match orders between token pair and the fee token, taking into
account all side constraints, including economic viability.
If xrate is given, then it will be used instead of trying to find
optimal xrate.
Sets b_orders/s_orders/f_orders (integral) buy_amounts for the best execution.
Also returns the (integral) prices found.
"""
b_buy_token, s_buy_token = token_pair
orders, prices = TRIVIAL_SOLUTION
# Search for an economically viable solution.
while len(b_orders) > 0 or len(s_orders) > 0:
# Solve current problem.
orders, prices = solve_token_pair_and_fee_token(
token_pair, accounts, b_orders, s_orders, f_orders, fee, xrate
)
# If solution is economically viable, exit.
# Hopefully, in large majority of cases this will occur in the first iteration.
if is_economic_viable(orders, prices, fee, IntegerTraits) or is_trivial(orders):
break
# If solution cannot be made economically viable (assuming prices wouldn't change)
if len(compute_approx_economic_viable_subset(
orders, prices, fee, IntegerTraits
)) == 0:
orders, prices = TRIVIAL_SOLUTION
break
# Note: to increase performance, we could consider removing all orders that do not
# satisfy min_abs_fee_per_order here at once, instead of removing one at a time as
# it is currently. The advantage of removing one by one is that it will not remove
# more than needed (note that prices, and hence order fees, keep changing).
# Find and remove the order paying the least fee.
b_order_with_min_buy_amount = min(
[o for o in b_orders if o.buy_amount > 0],
key=lambda o: o.buy_amount
)
s_order_with_min_buy_amount = min(
[o for o in s_orders if o.buy_amount > 0],
key=lambda o: o.buy_amount
)
if b_order_with_min_buy_amount.buy_amount * prices[b_buy_token]\
< s_order_with_min_buy_amount.buy_amount * prices[s_buy_token]:
b_orders = [
o for o in b_orders if o.id != b_order_with_min_buy_amount.id
]
else:
s_orders = [
o for o in s_orders if o.id != s_order_with_min_buy_amount.id
]
# Make sure the solution is correct.
validate(accounts, orders, prices, fee)
return orders, prices
| 14,966
|
def two_categorical(df, x, y, plot_type="Cross tab"):
"""
['Cross tab', "Stacked bone_numeric_one_categorical"]
"""
if plot_type is None:
plot_type = 'Cross tab'
if plot_type == 'Stacked bar': # 20
df_cross = pd.crosstab(df[x], df[y])
data = []
for x in df_cross.columns:
data.append(go.Bar(name=str(x), x=df_cross.index, y=df_cross[x]))
fig = go.Figure(data)
fig.update_layout(barmode = 'stack')
#For you to take a look at the result use
if plot_type == "Cross tab": # 21
df_cross = pd.crosstab(df[x], df[y])
return df_cross
return fig
| 14,967
|
def yices_bvconst_one(n):
"""Set low-order bit to 1, all the other bits to 0.
Error report:
if n = 0
code = POS_INT_REQUIRED
badval = n
if n > YICES_MAX_BVSIZE
code = MAX_BVSIZE_EXCEEDED
badval = n.
"""
# let yices deal with int32_t excesses
if n > MAX_INT32_SIZE:
n = MAX_INT32_SIZE
return libyices.yices_bvconst_one(n)
| 14,968
|
def showItem(category_id):
"""Show all Items"""
category = session.query(Category).filter_by(id=category_id).one()
items = session.query(Item).filter_by(
category_id=category_id).all()
return render_template('item.html', items=items, category=category)
| 14,969
|
def get_standard_t_d(l, b, d):
"""
Use NE2001 to estimate scintillation time at 1 GHz and 1 km/s transverse velocity.
Parameters
----------
l : float
Galactic longitude
b : float
Galactic latitude
d : float
Distance in kpc
Returns
-------
t_d : float
Scintillation timescale in s
"""
return query_ne2001(l, b, d, field='SCINTIME')
| 14,970
|
def main(argv=sys.argv):
"""
Entry point
"""
global stats
if len(argv) !=2:
usage(argv)
ifile = argv[1]
try:
f = open(ifile)
except IOError as error:
print ("I/O error while opening file: %s" % error, file=sys.stderr)
return
incomes = {}
i = 0
print("#race, average income, total members")
for line in f:
try:
line = line.split('\n')[0]
income = int(line.split(',')[4])
race = int(line.split(',')[3])
if race not in incomes:
incomes[race] = []
incomes[race].append(income)
except Exception as error:
print("Exception:%s at line:%s" % (error, line), file=sys.stderr)
f.close()
for i in incomes:
print("%d,%.0f,%d" %
(i, sum(incomes[i]) / len(incomes[i]), len(incomes[i])))
| 14,971
|
def get_json_test_data(project, test_name):
"""Get data from json file.
If json data is not of type dict or list of dicts it is ignored.
"""
json_data = None
json_path = json_file_path(project, test_name)
if os.path.isfile(json_path):
try:
with open(json_path, encoding='utf-8') as f:
json_data = json.load(f)
except json.JSONDecodeError:
pass
if type(json_data) is dict:
return [json_data]
if type(json_data) is list:
if all(type(x) is dict for x in json_data):
return json_data
return []
| 14,972
|
def sendsensordata() -> None:
"""
Reads a data structure to configure DHT22 sensors on multiple pins
Example: { "terrarium-cold": 1, "terrarium-center": 2, "terrarium-hot": 3 }
"""
mqtt = MQTT()
for name, pin in config.sensors.items():
print('Measuring sensor', name)
sensor = dht.DHT22(machine.Pin(pin))
try:
sensor.measure()
except OSError as e:
print(e)
mqtt.publish('Notifications/%s/errors' % name, 'Sensor read error')
else:
mqtt.publish('Sensors/%s/Temperature_C' % name, str(sensor.temperature()))
mqtt.publish('Sensors/%s/Humidity_Pct' % name, str(sensor.humidity()))
time.sleep(NETWORK_DELAY)
mqtt.disconnect()
| 14,973
|
def scrape(start, end, out):
"""
Scrape a MLBAM Data
:param start: Start Day(YYYYMMDD)
:param end: End Day(YYYYMMDD)
:param out: Output directory(default:"../output/mlb")
"""
try:
logging.basicConfig(level=logging.DEBUG)
MlbAm.scrape(start, end, out)
except MlbAmBadParameter as e:
raise click.BadParameter(e)
| 14,974
|
def match(pattern: str, text: str) -> bool:
"""
匹配同样长度的字符串
"""
if pattern:
return True
elif pattern == "$" and text == "":
return True
elif pattern[1] == "?":
return _match_question(pattern, text)
elif pattern[1] == "*":
return _match_star(pattern, text)
else:
return match_one(pattern[0], text[0]) and match(pattern[1:], text[1:])
| 14,975
|
def config_func(tools, index, device_id, config_old: {}, config_new: {}):
"""
CANedge configuration update function
:param tools: A collection of tools used for device configuration
:param index: Consecutive device index (from 0)
:param device_id: Device ID
:param config_old: The current device configuration
:param config_new: Default new device configuration
:return: Update configuration
"""
# This is an example of how to upgrade existing access point and S3 credentials from plain to encrypted form. Note
# that below assumes that the existing configuration holds the information in unencrypted form.
# Devices already using encrypted credentials are skipped (no configuration returned)
# New configuration uses same structure. The old configuration can safely be copied to the new.
config_new = config_old
# Only update configurations unencrypted credentials
if config_new["connect"]["wifi"]["keyformat"] == 0 and config_new["connect"]["s3"]["server"]["keyformat"] == 0:
# Set the server kpub
config_new["general"]["security"] = {"kpub": tools.security.user_public_key_base64}
# Set the access point key format to 1 (encrypted)
config_new["connect"]["wifi"]["keyformat"] = 1
# Loop each accesspoint in list
for ap in config_new["connect"]["wifi"]["accesspoint"]:
# Encrypt the wifi password
unencrypted_wifi_pwd = ap["pwd"]
ap["pwd"] = tools.security.encrypt_encode(unencrypted_wifi_pwd)
# Encrypt the S3 secret key
unencrypted_s3_secretkey = config_new["connect"]["s3"]["server"]["secretkey"]
config_new["connect"]["s3"]["server"]["keyformat"] = 1
config_new["connect"]["s3"]["server"]["secretkey"] = tools.security.encrypt_encode(unencrypted_s3_secretkey)
return config_new
| 14,976
|
def load_figure(file_path: str) -> matplotlib.figure.Figure:
"""Fully loads the saved figure to be able to be modified.
It can be easily showed by:
fig_object.show()
Args:
file_path: String file path without file extension.
Returns:
Figure object.
Raises:
None.
"""
with open(file_path + '.pkl', 'rb') as handle:
fig_object = pk.load(handle)
return fig_object
| 14,977
|
def cluster_vectors(vectors, k=500, n_init=100, **kwargs):
"""Build NearestNeighbors tree."""
kwargs.pop('n_clusters', None)
kwargs.pop('init', None)
kwargs.pop('n_init', None)
return KMeans(n_clusters=k, init='k-means++', n_init=n_init,
**kwargs).fit(vectors)
| 14,978
|
def positional_encoding(d_model, length):
"""
:param d_model: dimension of the model
:param length: length of positions
:return: length*d_model position matrix
"""
if d_model % 2 != 0:
raise ValueError("Cannot use sin/cos positional encoding with "
"odd dim (got dim={:d})".format(d_model))
pe = torch.zeros(length, d_model)
position = torch.arange(0, length).unsqueeze(1)
div_term = torch.exp((torch.arange(0, d_model, 2, dtype=torch.float) * -(math.log(10000.0) / d_model)))
pe[:, 0::2] = torch.sin(position.float() * div_term)
pe[:, 1::2] = torch.cos(position.float() * div_term)
return pe
| 14,979
|
def esx_connect(host, user, pwd, port, ssl):
"""Establish connection with host/vcenter."""
si = None
# connect depending on SSL_VERIFY setting
if ssl is False:
si = SmartConnectNoSSL(host=host, user=user, pwd=pwd, port=port)
current_session = si.content.sessionManager.currentSession.key
_LOGGER.debug("Logged in - session %s", current_session)
else:
si = SmartConnect(host=host, user=user, pwd=pwd, port=port)
current_session = si.content.sessionManager.currentSession.key
_LOGGER.debug("Logged in - session %s", current_session)
return si
| 14,980
|
def get_cropped_face_img(image_path, margin=44, image_size=160, folders=None):
"""return cropped face img if face is detected,
otherwise remove the img
"""
minsize = 20 # minimum size of face
threshold = [0.6, 0.7, 0.7] # three steps's threshold
factor = 0.709 # scale factor
pnet, rnet, onet = init_mtcnn()
img_file_dict = {}
cropped_face_img_dict = {}
if isinstance(image_path, list):
img_file_dict["img_list"] = image_path
img_list = []
for image in image_path:
img = imageio.imread(os.path.expanduser(image), pilmode="RGB",)
img_size = np.asarray(img.shape)[0:2]
bounding_boxes, points = align.detect_face.detect_face(
img, minsize, pnet, rnet, onet, threshold, factor
)
if len(bounding_boxes) < 1:
img_file_dict["img_list"].remove(image)
print("can't detect face, remove ", image)
continue
# print(f"bound_boxes: {bounding_boxes}")
# print(f'points: {points}')
det = np.squeeze(bounding_boxes[0, 0:4])
bb = np.zeros(4, dtype=np.int32)
bb[0] = np.maximum(det[0] - margin / 2, 0)
bb[1] = np.maximum(det[1] - margin / 2, 0)
bb[2] = np.minimum(det[2] + margin / 2, img_size[1])
bb[3] = np.minimum(det[3] + margin / 2, img_size[0])
cropped = img[bb[1] : bb[3], bb[0] : bb[2], :]
aligned = np.array(
Image.fromarray(cropped).resize(
(image_size, image_size), Image.BILINEAR
)
).astype(np.double)
prewhitened = facenet.prewhiten(aligned)
img_list.append(prewhitened)
# Only add to dict when list is not empty
if img_list:
cropped_face_img_dict["img_list"] = np.stack(img_list)
return cropped_face_img_dict, img_file_dict
else:
if not folders:
for folder in os.listdir(image_path):
for _, _, files in os.walk(os.path.join(image_path, folder)):
img_file_dict[folder] = files
else:
for folder in folders:
for _, _, files in os.walk(os.path.join(image_path, folder)):
img_file_dict[folder] = files
for folder in img_file_dict:
img_list = []
for image in img_file_dict[folder]:
img = imageio.imread(
os.path.expanduser(os.path.join(image_path, folder, image)),
pilmode="RGB",
)
img_size = np.asarray(img.shape)[0:2]
bounding_boxes, points = align.detect_face.detect_face(
img, minsize, pnet, rnet, onet, threshold, factor
)
if len(bounding_boxes) < 1:
img_file_dict[folder].remove(image)
print("can't detect face, remove ", image)
continue
# print(f"bound_boxes: {bounding_boxes}")
# print(f'points: {points}')
det = np.squeeze(bounding_boxes[0, 0:4])
bb = np.zeros(4, dtype=np.int32)
bb[0] = np.maximum(det[0] - margin / 2, 0)
bb[1] = np.maximum(det[1] - margin / 2, 0)
bb[2] = np.minimum(det[2] + margin / 2, img_size[1])
bb[3] = np.minimum(det[3] + margin / 2, img_size[0])
cropped = img[bb[1] : bb[3], bb[0] : bb[2], :]
aligned = np.array(
Image.fromarray(cropped).resize(
(image_size, image_size), Image.BILINEAR
)
).astype(np.double)
prewhitened = facenet.prewhiten(aligned)
img_list.append(prewhitened)
# Only add to dict when list is not empty
if img_list:
cropped_face_img_dict[folder] = np.stack(img_list)
return cropped_face_img_dict, img_file_dict
| 14,981
|
def invert_hilbert_QQ(n=40, system='sage'):
"""
Runs the benchmark for calculating the inverse of the hilbert
matrix over rationals of dimension n.
INPUT:
- ``n`` - matrix dimension (default: ``300``)
- ``system`` - either 'sage' or 'magma' (default: 'sage')
EXAMPLES::
sage: import sage.matrix.benchmark as b
sage: ts = b.invert_hilbert_QQ(30)
sage: tm = b.invert_hilbert_QQ(30, system='magma') # optional - magma
"""
if system == 'sage':
A = hilbert_matrix(n)
t = cputime()
d = A**(-1)
return cputime(t)
elif system == 'magma':
code = """
h := HilbertMatrix(%s);
tinit := Cputime();
d := h^(-1);
s := Cputime(tinit);
delete h;
"""%n
if verbose: print(code)
magma.eval(code)
return float(magma.eval('s'))
| 14,982
|
def getModelListForEnumProperty(self, context):
"""Returns a list of (str, str, str) elements which contains the models
contained in the currently selected model category.
If there are no model categories (i.e. '-') return ('-', '-', '-').
Args:
context:
Returns:
"""
category = context.window_manager.category
if category == '-' or category == '':
return [('-',) * 3]
return sorted(model_previews[category].enum_items)
| 14,983
|
def moveFiles(subsystemDict, dirPrefix):
""" For each subsystem, moves ROOT files that need to be moved from directory that receives HLT histograms into appropriate file structure for processing.
Creates run directory and subsystem directories as needed. Renames files to convention that is later used for extracting timestamps.
Args:
subsystemDict (dict): Dictionary of subsystems (keys) and lists of files that need to be moved (values) for each subsystem.
dirPrefix (str): Directory prefix used to get to all of the folders.
Returns:
None.
"""
runsDict = {}
# For each subsystem, loop over all files to move, and put them in subsystem directory
for key in subsystemDict.keys():
filesToMove = subsystemDict[key]
if len(filesToMove) == 0:
logger.info("No files to move in %s" % key)
for filename in filesToMove:
# Extract time stamp and run number
tempFilename = filename
splitFilename = tempFilename.replace(".root","").split("_")
#logger.debug("tempFilename: %s" % tempFilename)
#logger.debug("splitFilename: ", splitFilename)
if len(splitFilename) < 3:
continue
timeString = "_".join(splitFilename[3:])
#logger.debug("timeString: ", timeString)
# How to parse the timeString if desired
#timeStamp = time.strptime(timeString, "%Y_%m_%d_%H_%M_%S")
runString = splitFilename[1]
runNumber = int(runString)
hltMode = splitFilename[2]
# Determine the directory structure for each run
runDirectoryPath = "Run" + str(runNumber)
# Move replays of the data to a different directory
if hltMode == "E":
runDirectoryPath = os.path.join("ReplayData", runDirectoryPath)
# Create Run directory and subsystem directories as needed
if not os.path.exists(os.path.join(dirPrefix, runDirectoryPath)):
os.makedirs( os.path.join(dirPrefix, runDirectoryPath) )
if len(filesToMove) != 0 and not os.path.exists(os.path.join(dirPrefix, runDirectoryPath, key)):
os.makedirs(os.path.join(dirPrefix, runDirectoryPath, key))
newFilename = key + "hists." + timeString + ".root"
oldPath = os.path.join(dirPrefix, tempFilename)
newPath = os.path.join(dirPrefix, runDirectoryPath, key, newFilename)
logger.info("Moving %s to %s" % (oldPath, newPath))
# DON"T IMPORT MOVE. BAD CONSEQUENCES!!
shutil.move(oldPath, newPath)
# Create dict for subsystem if it doesn't exist, and then create a list for the run if it doesn't exist
# See: https://stackoverflow.com/a/12906014
runsDict.setdefault(runString, {}).setdefault(key, []).append(newFilename)
# Save the HLT mode
# Must be the same for each file in the run
if "hltMode" not in runsDict[runString]:
runsDict[runString]["hltMode"] = hltMode
return runsDict
| 14,984
|
def h_atom():
"""H atom solved numerically"""
m = 1
eigval = -1/2
rs = np.linspace(0.000001, 5.0, num=2000)
sol = odeint(radial_h_potential,
y0=np.array([1, -0.001]),
t=rs,
args=(m, 0, eigval))
plt.plot(rs, sol[:, 0])
plt.plot(rs, np.exp(-rs))
plt.savefig('tmp.pdf')
return
| 14,985
|
def run_wcs(*args, **kwargs):
"""
Set up the environment and run the bundled wcs.exe (from the Talon distribution)
using the supplied command line arguments
"""
# Pull out keyword args that we are interested in
write_stdout_to_console = kwargs.get("write_stdout_to_console", False)
# Override the TELHOME environment variable so that we can use relative
# paths when specifying the location of the GSC directory and ip.cfg
environment = dict(TELHOME=paths.talon_wcs_path())
stdout_destination = PIPE
if write_stdout_to_console:
stdout_destination = None
# Make sure all passed-in arguments are strings
args = [str(x) for x in args]
args = [
WCS_EXE,
# wcs.exe will use the last-specified values for -i and -c, so
# we'll provide defaults below but they can be overridden by values
# coming in via the *args array
"-i", "ip.cfg", # Specify the path to ip.cfg (relative to TELHOME)
"-c", "gsc" # Specify the path to the GSC catalog (relative to TELHOME)
] + list(args) # Include additional args specified by the user
process = Popen(
args,
env=environment,
stdout=stdout_destination,
stderr=PIPE
)
(stdout, stderr) = process.communicate() # Obtain stdout and stderr output from the wcs tool
exit_code = process.wait() # Wait for process to complete and obtain the exit code
if not write_stdout_to_console:
logging.info(stdout.decode("utf-8"))
if exit_code != 0:
logging.info("Error finding WCS solution.\n" +
"Exit code: " + str(exit_code) + "\n" +
"Error output: " + stderr.decode("utf-8"))
return False
return True
| 14,986
|
def read_setup_cfg():
"""
Build an absolute path from *parts* and and return the contents of the
resulting file. Assume UTF-8 encoding.
"""
config_file = os.path.join(HERE, "setup.cfg")
cp = ConfigParser()
cp.read([config_file])
return cp
| 14,987
|
def store_exposure_fp(fp, exposure_type):
"""
Preserve original exposure file extention if its in a pandas supported
compressed format
compression : {‘infer’, ‘gzip’, ‘bz2’, ‘zip’, ‘xz’, None}, default ‘infer’
For on-the-fly decompression of on-disk data. If ‘infer’ and
filepath_or_buffer is path-like, then detect compression from
the following extensions: ‘.gz’, ‘.bz2’, ‘.zip’, or ‘.xz’
(otherwise no decompression).
If using ‘zip’, the ZIP file must contain only one data file
to be read in. Set to None for no decompression.
New in version 0.18.1: support for ‘zip’ and ‘xz’ compression.
"""
compressed_ext = ('.gz', '.bz2', '.zip', '.xz')
filename = SOURCE_FILENAMES[exposure_type]
if fp.endswith(compressed_ext):
return '.'.join([filename, fp.rsplit('.')[-1]])
else:
return filename
| 14,988
|
def velocity_genes(data, vkey='velocity', min_r2=0.01, highly_variable=None, copy=False):
"""Estimates velocities in a gene-specific manner
Arguments
---------
data: :class:`~anndata.AnnData`
Annotated data matrix.
vkey: `str` (default: `'velocity'`)
Name under which to refer to the computed velocities for `velocity_graph` and `velocity_embedding`.
min_r2: `float` (default: 0.01)
Minimum threshold for coefficient of determination
highly_variable: `bool` (default: `None`)
Whether to include highly variable genes only.
copy: `bool` (default: `False`)
Return a copy instead of writing to `adata`.
Returns
-------
Updates `adata` attributes
velocity_genes: `.var`
genes to be used for further velocity analysis (velocity graph and embedding)
"""
adata = data.copy() if copy else data
if vkey + '_genes' not in adata.var.keys(): velocity(data, vkey)
adata.var[vkey + '_genes'] = np.array(adata.var[vkey + '_genes'], dtype=bool) & (adata.var[vkey + '_r2'] > min_r2)
if highly_variable and 'highly_variable' in adata.var.keys():
adata.var[vkey + '_genes'] &= adata.var['highly_variable']
logg.info('Number of obtained velocity_genes:', np.sum(adata.var[vkey + '_genes']))
return adata if copy else None
| 14,989
|
def test_net(net, imdb, max_per_image=100, thresh=0.05, vis=False):
"""Test a Fast R-CNN network on an image database."""
num_images = len(imdb.image_index)
# all attributes are collected into:
# all_probs[image] = 40 x 2 array of attributes in
# (score1, score2)
all_probs = [[] for _ in xrange(num_images)]
output_dir = get_output_dir(imdb, net)
# timers
_t = {'im_attr': Timer()}
if not cfg.TEST.HAS_RPN:
roidb = imdb.roidb
for i in xrange(num_images):
# filter out any ground truth boxes
if cfg.TEST.HAS_RPN:
box_proposals = None
else:
# The roidb may contain ground-truth rois (for example, if the roidb
# comes from the training or val split). We only want to evaluate
# detection on the *non*-ground-truth rois. We select those the rois
# that have the gt_classes field set to 0, which means there's no
# ground truth.
box_proposals = roidb[i]['boxes'][roidb[i]['gt_classes'] == 0]
im = cv2.imread(imdb.image_path_at(i))
# resize to 178*218
# im_resized = cv2.resize(im, (178, 218))
_t['im_attr'].tic()
probs = im_attr(net, im, box_proposals)
_t['im_attr'].toc()
all_probs[i] = probs
print 'im_attr: {:d}/{:d} {:.3f}s' \
.format(i + 1, num_images, _t['im_attr'].average_time)
attr_file = os.path.join(output_dir, 'attributes.pkl')
with open(attr_file, 'wb') as f:
cPickle.dump(all_probs, f, cPickle.HIGHEST_PROTOCOL)
print 'Evaluating attributes'
imdb.evaluate_attributes(all_probs, output_dir)
| 14,990
|
def start_cluster_server(ctx, num_gpus=1, rdma=False):
"""*DEPRECATED*. Use higher-level APIs like `tf.keras` or `tf.estimator`"""
raise Exception("DEPRECATED: Use higher-level APIs like `tf.keras` or `tf.estimator`")
| 14,991
|
def _init_servers(r_exec, s_execs, binary_dirs, out_dir, sync,
staged_src, singlesrv_cfg):
"""Initializes the receiver and senders.
Args:
r_exec: The receiver executor session.
s_execs: The sender executor sessions.
binary_dirs: Where to fetch binaries (e.g., tc, netperf, ...). This is a
list of directories to search in.
out_dir: Where to put the data.
sync: Whether to sync the python files on sender and receiver.
staged_src: Staged transperf source ready for transfer.
singlesrv_cfg: Single server mode config params.
Raises:
RuntimeError: When encountered a critial error.
"""
# Check if single server mode. If so, we do not use the root namespaces.
singlesrv_mode = singlesrv_cfg['enabled']
use_rootns = not singlesrv_mode
all_targets = [r_exec] + s_execs
cleanup_cmds = {}
for target in all_targets:
cleanup_cmds[target] = ['{sudo} pkill -f transperf']
tgt_exp_dir = transperf.path.get_exp_out_dir(
target.get_container_root_dir())
cleanup_cmds[target].append(
'rm -rf {exp_dir}'.format(exp_dir=tgt_exp_dir))
cleanup_cmds[target].append(
'mkdir -p {exp_dir}'.format(exp_dir=tgt_exp_dir))
if sync:
cleanup_cmds[target].append(
'mkdir -p ' +
transperf.path.get_transperf_home(
target.get_container_root_dir()))
to_sync = _stage_transperf_binaries(binary_dirs, sync,
cleanup_cmds, staged_src, all_targets)
LOG.info('Staged files list: %s', to_sync)
# Background procs are to improve initial launch time. We try to run as much
# as we can in parallel.
procs = []
for target in all_targets:
for cmd in cleanup_cmds[target]:
# When in single server mode, trying to run too many commands at
# the same time intermittently fails.
target.run(cmd, use_rootns=use_rootns)
if not singlesrv_mode:
LOG.debug('disabling containers on %s', target.addr())
# Create directory for configuration file.
config_dir = os.path.join(transperf.path.get_transperf_home(), '__config')
cfg_dir_make_cmd = 'rm -rf %(cfg)s && mkdir -p %(cfg)s && rm -rf %(cfg)s/*'
cfg_dir_make_cmd %= {'cfg': config_dir}
cfg_dir_make_cmd = '{sudo} %(cmd)s' % {'cmd': cfg_dir_make_cmd}
# We push it for the receiver node and orchestrator (if single server mode).
procs.append(r_exec.bg(cfg_dir_make_cmd, use_rootns=use_rootns))
if singlesrv_mode:
procs.append(r_exec.bg(cfg_dir_make_cmd, use_rootns=True)) # for orch
# Create directory for node interface configuration.
node_ifacecfg_dir = os.path.join(transperf.path.get_transperf_home(),
transperf.path.IFACE_CFG_DIR)
scp_node_iface_cmd = '{sudo} mkdir -p %s' % node_ifacecfg_dir
procs.append(r_exec.bg(scp_node_iface_cmd, use_rootns=use_rootns))
# NB: orch.py does not need this so no single server special case here.
# We also push ifacecfg to the sender nodes; prepare directories for them.
for s_exec in s_execs:
procs.append(s_exec.bg(scp_node_iface_cmd, use_rootns=use_rootns))
# Wait for directory creation/cleanup to complete.
for p in procs:
shell.wait(p)
procs = []
if sync:
for target in all_targets:
procs.append(target.push_bg(to_sync,
transperf.path.get_transperf_home(),
use_rootns=use_rootns))
# Push configs.
cfg_items = glob.glob(os.path.join(out_dir, '*.py'))
procs.append(r_exec.push_bg(cfg_items, config_dir, use_rootns=use_rootns))
if singlesrv_mode:
procs.append(r_exec.push_bg(cfg_items, config_dir, use_rootns=True))
# Also push the interface config files if any.
local_ifacecfg_dir = os.path.join(out_dir, transperf.path.IFACE_CFG_DIR)
iface_cfgs = glob.glob(os.path.join(local_ifacecfg_dir, '*.py'))
if iface_cfgs:
procs.append(r_exec.push_bg(iface_cfgs, node_ifacecfg_dir,
use_rootns=use_rootns))
# Push ifacecfg to senders too.
for s_exec in s_execs:
procs.append(s_exec.push_bg(iface_cfgs, node_ifacecfg_dir,
use_rootns=use_rootns))
# Install data files needed for tc distributions.
dist_files = glob.glob(os.path.join(out_dir, 'data', '*.dist'))
if dist_files:
# Special case here; tc_lib_dir might or might not be in a
# node-virtualized directory, and we need to be careful which.
use_rootns_dist_files = True # Default behaviour
tc_lib_dir = transperf.path.tc_lib_dir()
tc_lib_is_virt = False
for pfx in virtsetup.Constants.VIRTUALIZED_PATHS:
if os.path.commonprefix([pfx, tc_lib_dir]) == pfx:
tc_lib_is_virt = True
break
if tc_lib_is_virt and singlesrv_mode:
use_rootns_dist_files = False
procs.append(r_exec.push_bg(dist_files, transperf.path.tc_lib_dir(),
use_rootns=use_rootns_dist_files))
# Wait for transfers to complete.
for p in procs:
_, err, returncode = shell.wait(p)
if err and returncode != 0:
raise RuntimeError(err)
| 14,992
|
def update_graphics_labels_from_node_data(node, n_id_map, add_new_props):
"""Updates the graphics labels so they match the node-data"""
try:
gfx = select_child(node, n_id_map, 'nodegraphics').getchildren()[0].getchildren()
except:
return None
node_label = select_child(node, n_id_map, 'labelcount').text
node_props = select_child(node, n_id_map, 'node_prop_text').text
# Nodes have either 0, 1, or 2 node labels. If 1, its just title and count
# If 2, the first one is title count, second is properties and counts
i = 0
for elem in gfx:
if elem.tag.endswith('NodeLabel'):
if i == 0:
elem.text = node_label
i += 1
# not all nodes have a props-label
elif i == 1 and node_props:
# Add all properties to the label text, even if new
elem.text = node_props
| 14,993
|
def _evaluate_worker(input_queue, output_queue, logging_queue, glm_mgr):
"""'Worker' function for evaluating individuals in parallel.
This method is designed to be used in a multi-threaded or
multi-processing environment.
:param input_queue: Multiprocessing.JoinableQueue instance. The
objects in this queue are expected to only be of type
ga.Individual. Note that we can't do an explicit type check
on this object, so we'll instead check for the task_done
attribute. You're asking for trouble if this is a simple
queue.Queue object (which is multi-threading safe, but not
multi-processing safe).
If None is received in the queue, the process will terminate.
:param output_queue: Multiprocessing.Queue instance. The input
Individuals will be placed into the output queue after they've
been evaluated.
:param logging_queue: Multiprocessing.Queue instance for which
dictionaries with logging information will be placed. See the
_logging_thread function for further reference.
:param glm_mgr: glm.GLMManager instance which will be passed along
to the ga.Individual's evaluate method. So, read the comment
there for more details on requirements.
IMPORTANT NOTE ON THE glm_mgr: The glm_mgr will be re-used for each
subsequent individual. At the time of writing (2019-07-16), this
is just fine, because none of the updates that happen care about
the previous value. HOWEVER, if you go and change this to use
multi-threading instead of multi-processing, you're going to
enter a special kind of hell. The work-around is to create a
deepcopy for each individual.
"""
# Ensure our input_queue is joinable.
try:
input_queue.task_done
except AttributeError:
raise TypeError('input_queue must be multiprocessing.JoinableQueue')
# Loop forever.
while True:
# Grab an individual from the queue. Wait forever.
ind = input_queue.get(block=True, timeout=None)
# Terminate if None is received.
if ind is None:
# Mark the task as done so joins won't hang later.
input_queue.task_done()
# We're done here. Deuces.
return
try:
t0 = time.time()
# So, we now have an individual. Evaluate.
ind.evaluate(glm_mgr=glm_mgr,
db_conn=db.connect_loop(timeout=10,
retry_interval=0.1))
t1 = time.time()
# Dump information into the logging queue.
logging_queue.put({'uid': ind.uid, 'fitness': ind.fitness,
'penalties': ind.penalties,
'time': t1 - t0})
except Exception as e:
# This is intentionally broad, and is here to ensure that
# the process attached to this method (or when this method
# is attached to a process?) doesn't crash and burn.
logging_queue.put({'error': e,
'uid': ind.uid})
finally:
try:
# Put the (possibly) fully evaluated individual in the
# output queue. Error handling in ind.evaluate will
# ensure a failed evaluation results in a fitness of
# infinity.
output_queue.put(ind)
finally:
# Mark this task as complete. Putting this in a finally
# block should avoid us getting in a stuck state where
# a failure in evaluation causes us to not mark a task
# as complete.
input_queue.task_done()
| 14,994
|
def deploy_binary_if_master(args):
"""if the active branch is 'master', deploy binaries for the primary suite to remote maven repository."""
master_branch = 'master'
active_branch = mx.VC.get_vc(SUITE.dir).active_branch(SUITE.dir)
if active_branch == master_branch:
if sys.platform == "darwin":
args.insert(0, "--platform-dependent")
return mx.command_function('deploy-binary')(args)
else:
mx.log('The active branch is "%s". Binaries are deployed only if the active branch is "%s".' % (
active_branch, master_branch))
return 0
| 14,995
|
def fast_spearman(x, y=None, destination=None):
"""calculate the spearman correlation matrix for the columns of x (with dimensions MxN), or optionally, the spearman correlaton
matrix between the columns of x and the columns of y (with dimensions OxP). If destination is provided, put the results there.
In the language of statistics the columns are the variables and the rows are the observations.
Args:
x (numpy array-like) MxN in shape
y (optional, numpy array-like) OxP in shape. M (# rows in x) must equal O (# rows in y)
destination (numpy array-like) optional location where to store the results as they are calculated (e.g. a numpy
memmap of a file)
returns:
(numpy array-like) array of the covariance values
for defaults (y=None), shape is NxN
if y is provied, shape is NxP
"""
logger.debug("x.shape: {}".format(x.shape))
if hasattr(y, "shape"):
logger.debug("y.shape: {}".format(y.shape))
x_ranks = pandas.DataFrame(x).rank(method="average").values
logger.debug("some min and max ranks of x_ranks:\n{}\n{}".format(numpy.min(x_ranks[:10], axis=0), numpy.max(x_ranks[:10], axis=0)))
y_ranks = pandas.DataFrame(y).rank(method="average").values if y is not None else None
return fast_corr(x_ranks, y_ranks, destination)
| 14,996
|
def test_RootLogger_configure_processors(root_logger_default_params):
"""
Tests if default processors loaded are valid
# C1: All processors returned are functions
# C2: logging_renderer must be the last processor of the list
"""
# C1
processors = root_logger_default_params._configure_processors()
assert all(isinstance(_funct, Callable) for _funct in processors)
# C2
last_processor = processors[-1]
assert any(
extract_name(last_processor) == extract_name(_funct)
for _funct in [
StructlogUtils.graypy_structlog_processor,
structlog.processors.JSONRenderer(indent=1)
]
)
# C3
processors_names = [extract_name(processor) for processor in processors]
assert all(
extract_name(def_tracker) in processors_names
for def_tracker in DEFAULT_TRACKERS
)
| 14,997
|
def test_report_provider_when_vmware_is_installed():
"""Test report_provider."""
with patch.object(mech.vmrun.VMrun, 'installed', return_value=True) as mock_vmrun:
assert not mech.utils.report_provider('atari')
assert mech.utils.report_provider('vmware')
mock_vmrun.assert_called()
| 14,998
|
def ProcuraPalavra(dicionário, palavra):
"""
Procura as possíveis palavras para substituir
a palavra passada, e as devolve numa lista
"""
#Antes de mais nada tornamos a palavra maiuscula
#para realizar as comparações
palavra = palavra.upper()
#Primeiro olhamos para o caso de haver uma
#primeira letra selecionada, o que facilitaria
#a nossa busca
if palavra[0] != '*':
#Primeiro nós encontramos o ponto do dionário
#onde começa nossa letra
for i in range(len(dicionário)):
if i % 100 == 0:
print('Procurando Letra no dicionário...')
if dicionário[i][0] == palavra[0]:
break
#E também o ponto do dicionário onde nossa
#letra acaba
for j in range(i, len(dicionário)):
if j % 100 == 0:
print('Procurando Letra no dicionário...')
if dicionário[j][0] != palavra[0]:
break
return SeparaPorTamanho(dicionário[i:j], palavra)
else:
return SeparaPorTamanho(dicionário, palavra)
| 14,999
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.