content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def test_uncrustify_tool_plugin_found():
"""Test that the plugin manager can find the uncrustify plugin."""
manager = PluginManager()
# Get the path to statick_tool/__init__.py, get the directory part, and
# add 'plugins' to that to get the standard plugins dir
manager.setPluginPlaces([os.path.join(os.path.dirname(statick_tool.__file__),
'plugins')])
manager.setCategoriesFilter({
"Tool": ToolPlugin,
})
manager.collectPlugins()
# Verify that a plugin's get_name() function returns "uncrustify"
assert any(plugin_info.plugin_object.get_name() == 'uncrustify' for
plugin_info in manager.getPluginsOfCategory("Tool"))
# While we're at it, verify that a plugin is named Uncrustify Tool Plugin
assert any(plugin_info.name == 'Uncrustify Tool Plugin' for
plugin_info in manager.getPluginsOfCategory("Tool"))
| 20,400
|
def objective_func(x, cs_objects, cs_data):
"""
Define the objective function
:param x: 1D array containing the voltages to be set
:param args: tuple containing all extra parameters needed
:return: average count rate for 100 shots
"""
x = np.around(x,2)
try:
flag_range = 0
for i in xrange(len(x)):
if (x[i] <= float(cs_objects[i,4])) or (x[i] >= float(cs_objects[i,5])):
flag_range = 1
raise ValueError
for i in xrange(len(x)):
if flag_range == 0:
if int(cs_objects[i,2]) != -1:
cs.call_process2(cs_objects[i,0], cs_objects[i,1], "I:1,D:1", cs.pack_ch_val([int(cs_objects[i,2])], [x[i]]))
else:
cs.call_process2(cs_objects[i,0], cs_objects[i,1], "D:1", cs.pack_val([x[i]]))
else:
return
time.sleep(1)
flag = 0
value = total_counts(flag, *cs_data)
# value = scop.rosen(x)
return value
except ValueError:
print "Value error : value went out of bound"
| 20,401
|
def getHPELTraceLogAttribute(nodename, servername, attributename):
""" This function returns an attribute of the HPEL Trace Log for the specified server.
Function parameters:
nodename - the name of the node on which the server to be configured resides.
servername - the name of the server whose HPEL Trace is to be configured.
attributename - the following attribute names can be specified:
- 'dataDirectory' - Specifies the name of the directory where the HPEL logs
will be stored.
- 'bufferingEnabled' - Specifies whether or not log record buffering should
be enabled. Valid values are 'true' and 'false'.
- 'fileSwitchEnabled' - Specifies whether or not a new log file should be
started each day. Valid values are 'true' and 'false'.
- 'fileSwitchTime' - If 'fileSwitchEnabled' is set to 'true', this field
specifies the time that new log file should be started.
A value from 0 - 23 should be specified. A value of 0
means 12 AM 1 means 1 AM, 2 means 2 AM, ..., 23 means
11 PM. If a value greater than 23 is entered, this
field will be set to 0 (12 AM).
- 'memoryBufferSize' - Specifies the size (in MB) of the memory trace buffer.
- 'outOfSpaceAction' - Specifies which action to take if the hard disk runs
out of space. Valid values are 'StopLogging',
'StopServer', and 'PurgeOld'.
- 'purgeBySizeEnabled' - Specifies whether or not to purge the logs based
on size. Valid values are 'true' and 'false'.
- 'purgeByTimeEnabled' - Specifies whether or not to purge the logs based
on time. Valid values are 'true' and 'false'.
- 'purgeMaxSize' - Specifies the maximum total size of the logs (in MB).
- 'purgeMinTime' - Specifies the minimum amount of time to keep the logs
(in hours).
- 'storageType' - Specifies whether the trace log should be written to a
directory or to memory. Valid values are 'DIRECTORY'
and 'MEMORYBUFFER'.
"""
m = "getHPELTraceLogAttribute:"
sop (m, "Entering function...")
sop (m, "Calling getNodeId() with nodename = %s." % (nodename))
nodeID = getNodeId(nodename)
sop (m, "Returned from getNodeID; returned nodeID = %s" % nodeID)
if nodeID == "":
raise "Could not find node name '%s'" % (nodename)
else:
sop (m, "Calling getServerId() with nodename = %s and servername = %s." % (nodename, servername))
serverID = getServerId(nodename, servername)
sop (m, "Returned from getServerID; returned serverID = %s" % serverID)
if serverID == None:
raise "Could not find server '%s' on node '%s'" % (servername, nodename)
else:
serviceName = "HighPerformanceExtensibleLogging"
sop (m, "Calling AdminConfig.list with serviceName = %s and serverID = %s." % (serviceName, serverID))
HPELID = AdminConfig.list(serviceName, serverID)
sop (m, "Returned from AdminConfig.list; HPELID = %s" % HPELID)
sop (m, "Calling AdminConfig.list to get the config ID of the HPEL Trace object.")
HPELTraceID = AdminConfig.list("HPELTrace", HPELID)
sop (m, "Returned from AdminConfig.list; HPELTraceID = %s" % HPELTraceID)
sop(m, "Calling AdminConfig.showAttribute to get the value of attribute = %s" % ( attributename ))
attributevalue = AdminConfig.showAttribute(HPELTraceID, attributename)
sop (m, "Returned from AdminConfig.showAttribute; attributevalue = %s" % ( attributevalue ))
sop (m, "Exiting function...")
return attributevalue
#endif
#endif
| 20,402
|
def dispersionTable(adata):
"""
Parameters
----------
adata
Returns
-------
"""
if adata.uns["ispFitInfo"]["blind"] is None:
raise ("Error: no dispersion model found. Please call estimateDispersions() before calling this function")
disp_df = pd.DataFrame({"gene_id": adata.uns["ispFitInfo"]["blind"]["disp_table"]["gene_id"],
"mean_expression": adata.uns["ispFitInfo"]["blind"]["disp_table"]["mu"],
"dispersion_fit": adata.uns["ispFitInfo"]["blind"]["disp_table"]["blind"]["mu"],
"dispersion_empirical": adata.uns["ispFitInfo"]["blind"]["disp_table"]["disp"]})
return disp_df
| 20,403
|
def L10_indicator(row):
"""
Determine the Indicator of L10 as one of five indicators
"""
if row < 40:
return "Excellent"
elif row < 50:
return "Good"
elif row < 61:
return "Fair"
elif row <= 85:
return "Poor"
else:
return "Hazard"
| 20,404
|
def create_ip_record(
heartbeat_df: pd.DataFrame, az_net_df: pd.DataFrame = None
) -> IpAddress:
"""
Generate ip_entity record for provided IP value.
Parameters
----------
heartbeat_df : pd.DataFrame
A dataframe of heartbeat data for the host
az_net_df : pd.DataFrame
Option dataframe of Azure network data for the host
Returns
-------
IP
Details of the IP data collected
"""
ip_entity = IpAddress()
# Produce ip_entity record using available dataframes
ip_hb = heartbeat_df.iloc[0]
ip_entity.Address = ip_hb["ComputerIP"]
ip_entity.hostname = ip_hb["Computer"] # type: ignore
ip_entity.SourceComputerId = ip_hb["SourceComputerId"] # type: ignore
ip_entity.OSType = ip_hb["OSType"] # type: ignore
ip_entity.OSName = ip_hb["OSName"] # type: ignore
ip_entity.OSVMajorersion = ip_hb["OSMajorVersion"] # type: ignore
ip_entity.OSVMinorVersion = ip_hb["OSMinorVersion"] # type: ignore
ip_entity.ComputerEnvironment = ip_hb["ComputerEnvironment"] # type: ignore
ip_entity.OmsSolutions = [ # type: ignore
sol.strip() for sol in ip_hb["Solutions"].split(",")
]
ip_entity.VMUUID = ip_hb["VMUUID"] # type: ignore
ip_entity.SubscriptionId = ip_hb["SubscriptionId"] # type: ignore
geoloc_entity = GeoLocation() # type: ignore
geoloc_entity.CountryName = ip_hb["RemoteIPCountry"] # type: ignore
geoloc_entity.Longitude = ip_hb["RemoteIPLongitude"] # type: ignore
geoloc_entity.Latitude = ip_hb["RemoteIPLatitude"] # type: ignore
ip_entity.Location = geoloc_entity # type: ignore
# If Azure network data present add this to host record
if az_net_df is not None and not az_net_df.empty:
if len(az_net_df) == 1:
priv_addr_str = az_net_df["PrivateIPAddresses"].loc[0]
ip_entity["private_ips"] = convert_to_ip_entities(priv_addr_str)
pub_addr_str = az_net_df["PublicIPAddresses"].loc[0]
ip_entity["public_ips"] = convert_to_ip_entities(pub_addr_str)
else:
if "private_ips" not in ip_entity:
ip_entity["private_ips"] = []
if "public_ips" not in ip_entity:
ip_entity["public_ips"] = []
return ip_entity
| 20,405
|
def batch_render(voxel_dir, dest_dir, voxel_processor=None):
"""Render a bunch of voxel tensors stored as npy files in the voxel_dir.
Args:
voxel_dir: A directory containing voxel tensors stored in npy files.
voxel_processor: Function that processes the voxels before rendering
"""
npy_files = glob.glob(os.path.join(voxel_dir, '*.npy'))
batch_render_voxels(npy_files, dest_dir, write_txt=True, voxel_processor=voxel_processor)
render_nrrd('logs/generation/results/nrrd_output/nrrd_filenames.txt', dest_dir, check=False)
| 20,406
|
def extract_version(version_file_name):
"""Extracts the version from a python file.
The statement setting the __version__ variable must not be indented. Comments after that
statement are allowed.
"""
regex = re.compile(r"^__version__\s*=\s*['\"]([^'\"]*)['\"]\s*(#.*)?$")
with open(version_file_name, "r") as version_file:
lines = version_file.read().splitlines()
for line in reversed(lines):
version_match = regex.match(line)
if version_match:
return version_match.group(1)
else:
raise RuntimeError("Unable to find version string.")
| 20,407
|
def test_image_dictionary(test_link):
"""
Test if the image_dictionary method returns dictinary in correct
format.
"""
url = 'https://example.com'
filename = 'example.com'
result = {'url' : url, 'filename' : filename}
assert test_link.image_dictionary(url, filename) == result
| 20,408
|
def test_invalid_stdout():
"""invalid utf-8 byte in stdout."""
# https://en.wikipedia.org/wiki/UTF-8#Codepage_layout
# 0x92 continuation byte
if six.PY3:
cmd = [python, "-c", "import sys;sys.stdout.buffer.write(b'\\x92')"]
else:
cmd = [python, "-c", "import sys;sys.stdout.write(b'\\x92')"]
p = EasyProcess(cmd).call()
assert p.return_code == 0
assert p.stdout == ""
# 0xFF must never appear in a valid UTF-8 sequence
if six.PY3:
cmd = [python, "-c", "import sys;sys.stdout.buffer.write(b'\\xFF')"]
else:
cmd = [python, "-c", "import sys;sys.stdout.write(b'\\xFF')"]
p = EasyProcess(cmd).call()
assert p.return_code == 0
assert p.stdout == ""
| 20,409
|
def batteryRoutine():
""" Creates a routine to the robot thats checks the battery and
sends the robot to the dock station for charging purpouses
"""
# The "get Charged" routine
getChargedTask = Selector("getChargedTask")
# Add the check battery condition
checkBatteryTask = MonitorTask("checkBattery", "battery_level",
Float32, check_battery)
# Add the recharge task
chargeRobotTask = ServiceTask("chargeRobot", "battery_simulator/set_battery_level",
SetBatteryLevel, 100, result_cb=recharge_cb)
# Add the movement routine to the dock
coords = global_vars.black_board.getCoords('dock')
goal = MoveBaseGoal()
goal.target_pose.header.frame_id = 'map'
goal.target_pose.header.stamp = rospy.Time.now()
goal.target_pose.pose = coords
moveToDockTask = SimpleActionTask("MoveToDock", "move_base", MoveBaseAction, goal,
reset_after=True, feedback_cb=update_robot_position, result_timeout=100)
checkLocation = checkLocationTask("dock")
NavigationTask = Selector("Nav", [checkLocation, moveToDockTask] )
# Build the recharge sequence using inline syntax
rechargeTask = Sequence("recharge", [NavigationTask, chargeRobotTask])
# Add the check battery and recharge tasks to the stay healthy selector
getChargedTask.add_child(checkBatteryTask)
getChargedTask.add_child(rechargeTask)
#Add the routine to the black board
global_vars.black_board.setRoutine(getChargedTask)
| 20,410
|
def get_object_combinations(objects: Collection[Object],
types: Sequence[Type]) -> Iterator[List[Object]]:
"""Get all combinations of objects satisfying the given types sequence."""
sorted_objects = sorted(objects)
choices = []
for vt in types:
this_choices = []
for obj in sorted_objects:
if obj.is_instance(vt):
this_choices.append(obj)
choices.append(this_choices)
for choice in itertools.product(*choices):
yield list(choice)
| 20,411
|
def powerlaw_loglike(data, theta):
"""Return the natural logarithm of the likelihood P(data | theta) for our
model of the ice flow.
data is expected to be a tuple of numpy arrays = (x, y, sigma)
theta is expected to be an array of parameters = (intercept, slope)
"""
x, y, sigma = data
n = len(x)
model = powerlaw_model(x, theta)
lnlike = -0.5 * (n*np.log(2.*np.pi) + np.sum(2.*np.log(errs) + (
y-model)**2 / sigma**2))
return lnlike
| 20,412
|
def get_conv(dim=3):
"""Chooses an implementation for a convolution layer."""
if dim == 3:
return nn.Conv3d
elif dim == 2:
return nn.Conv2d
else:
raise ValueError('dim has to be 2 or 3')
| 20,413
|
def resolve_path(path, parent=None):
"""Resolves the absolute path of the specified file.
Args:
path (str): Path to resolve.
parent (str): The directory containing ``path`` if ``path`` is relative.
Returns:
The absolute path.
Raises:
IOError: if the path does not exist.
"""
apath = abspath(path)
if not os.path.exists(apath) and parent is not None:
apath = abspath(os.path.join(parent, path))
if not os.path.exists(apath):
raise IOError(errno.ENOENT, "%s does not exist" % apath, apath)
return apath
| 20,414
|
def notebook_metadata():
"""Attempts to query jupyter for the path and name of the notebook file"""
error_message = "Failed to query for notebook name, you can set it manually with the WANDB_NOTEBOOK_NAME environment variable"
try:
import ipykernel
from notebook.notebookapp import list_running_servers
kernel_id = re.search('kernel-(.*).json', ipykernel.connect.get_connection_file()).group(1)
servers = list(list_running_servers()) # TODO: sometimes there are invalid JSON files and this blows up
except Exception:
logger.error(error_message)
return {}
for s in servers:
try:
if s['password']:
raise ValueError("Can't query password protected kernel")
res = requests.get(urljoin(s['url'], 'api/sessions'), params={'token': s.get('token', '')}).json()
except (requests.RequestException, ValueError):
logger.error(error_message)
return {}
for nn in res:
# TODO: wandb/client#400 found a case where res returned an array of strings...
if isinstance(nn, dict) and nn.get("kernel") and 'notebook' in nn:
if nn['kernel']['id'] == kernel_id:
return {"root": s['notebook_dir'], "path": nn['notebook']['path'], "name": nn['notebook']['name']}
return {}
| 20,415
|
async def create_mute_role(bot, ctx):
"""Create the mute role for a guild"""
perms = discord.Permissions(
send_messages=False, read_messages=True)
mute_role = await ctx.guild.create_role(
name='Muted', permissions=perms,
reason='Could not find a muted role in the process of muting or unmuting.')
await bot.config.update_one({"_id": ctx.guild.id},
{'$set': {"mute_role": mute_role.id}}, upsert=True)
for channel in ctx.guild.channels:
try:
await channel.set_permissions(mute_role, read_messages=True, send_messages=False)
except discord.Forbidden:
continue
except discord.HTTPException:
continue
return mute_role
| 20,416
|
def build_request_data(useralias,
req_node):
"""build_request_data
:param useralias: user alias for directory name
:param req_node: simulated request node
"""
if "file" not in req_node:
return None
use_uniques = req_node["unique_names"]
use_file = req_node["file"].format(
useralias)
use_data = json.loads(open(use_file, 'r').read())
if use_uniques:
if "title" in use_data:
use_data["title"] = "{}_{}".format(
use_data["title"],
str(uuid.uuid4()))
if "full_file" in use_data:
use_data["full_file"] = \
use_data["full_file"].format(
str(uuid.uuid4()))
if "clean_file" in use_data:
use_data["clean_file"] = \
use_data["clean_file"].format(
str(uuid.uuid4()))
if "csv_file" in use_data:
use_data["csv_file"] = \
use_data["csv_file"].format(
str(uuid.uuid4()))
if "meta_file" in use_data:
use_data["meta_file"] = \
use_data["meta_file"].format(
str(uuid.uuid4()))
if "meta_suffix" in use_data:
use_data["meta_suffix"] = \
use_data["meta_suffix"].format(
str(uuid.uuid4()))
return use_data
| 20,417
|
def main():
"""
Entry point
"""
parser = argparse.ArgumentParser(COMMAND)
parser.add_argument(
"dbt_dir",
type=str,
help="dbt root directory")
parser.add_argument(
"-b",
"--backup",
action="store_true",
help="When set, take a back up of existing schema.yml and docs.md")
args = parser.parse_args()
_run(args.dbt_dir, args.backup)
| 20,418
|
def _get_stmt_lists(self):
"""
Returns a tuple of the statement lists contained in this `ast.stmt`
node. This method should only be called by an `ast.stmt` node.
"""
if self.is_simple():
return ()
elif self.is_body():
return (self.body,)
elif self.is_body_orelse():
return (self.body, self.orelse)
elif self.is_body_finally():
return (self.body, self.finalbody)
else:
# Every statement has to be simple or complex.
assert(False)
| 20,419
|
def get_article(name):
"""a general function to get an article, returns None if doesn't exist
"""
article = None
if name is not None:
try:
article = Article.objects.get(name=name)
except Article.DoesNotExist:
pass
return article
| 20,420
|
def _CreateNginxConfigMapDir():
"""Returns a TemporaryDirectory containing files in the Nginx ConfigMap."""
if FLAGS.nginx_conf:
nginx_conf_filename = FLAGS.nginx_conf
else:
nginx_conf_filename = (
data.ResourcePath('container/kubernetes_nginx/http.conf'))
temp_dir = tempfile.TemporaryDirectory()
config_map_filename = os.path.join(temp_dir.name, 'default')
shutil.copyfile(nginx_conf_filename, config_map_filename)
return temp_dir
| 20,421
|
def abbreviateLab(lab):
"""Lab names are very long and sometimes differ by punctuation or typos. Abbreviate for easier comparison."""
labAbbrev = apostropheSRe.sub('', lab)
labAbbrev = firstLetterRe.sub(r'\1', labAbbrev, count=0)
labAbbrev = spacePunctRe.sub('', labAbbrev, count=0)
return labAbbrev
| 20,422
|
def backproject(depth, intrinsics, instance_mask):
""" Back-projection, use opencv camera coordinate frame.
"""
cam_fx = intrinsics[0, 0]
cam_fy = intrinsics[1, 1]
cam_cx = intrinsics[0, 2]
cam_cy = intrinsics[1, 2]
non_zero_mask = (depth > 0)
final_instance_mask = np.logical_and(instance_mask, non_zero_mask)
idxs = np.where(final_instance_mask)
z = depth[idxs[0], idxs[1]]
x = (idxs[1] - cam_cx) * z / cam_fx
y = (idxs[0] - cam_cy) * z / cam_fy
pts = np.stack((x, y, z), axis=1)
return pts, idxs
| 20,423
|
def if_any(
_data,
*args,
_names=None,
_context=None,
**kwargs,
):
"""Apply the same predicate function to a selection of columns and combine
the results True if any element is True.
See Also:
[`across()`](datar.dplyr.across.across)
"""
if not args:
args = (None, None)
elif len(args) == 1:
args = (args[0], None)
_cols, _fns, *args = args
_data = _context.meta.get("input_data", _data)
return IfAny(
_data,
_cols,
_fns,
_names,
args,
kwargs,
).evaluate(_context)
| 20,424
|
def get_inpgen_para_from_xml(inpxmlfile, inpgen_ready=True):
"""
This routine returns an python dictionary produced from the inp.xml
file, which can be used as a calc_parameters node by inpgen.
Be aware that inpgen does not take all information that is contained in an inp.xml file
:param inpxmlfile: and xml etree of a inp.xml file
:param inpgen_ready: Bool, return a dict which can be inputed into inpgen while setting atoms
:return new_parameters: A Dict, which will lead to the same inp.xml (in case if other defaults,
which can not be controlled by input for inpgen, were changed)
"""
# TODO: convert econfig
# TODO: parse kpoints, somehow count is bad (if symmetry changes), mesh is not known, path cannot be specified
# Disclaimer: this routine needs some xpath expressions. these are hardcoded here,
# therefore maintainance might be needed, if you want to circumvent this, you have
# to get all the paths from somewhere.
#######
# all hardcoded xpaths used and attributes names:
# input
film_xpath = '/fleurInput/atomGroups/atomGroup/filmPos/' # check for film pos
# atom, for each species\
species_xpath = '/fleurInput/atomSpecies/species'
atom_id_xpath = '' # is reconstruction possible at all now?
atom_z_xpath = '@atomicNumber'
atom_rmt_xpath = 'mtSphere/@radius'
atom_dx_xpath = 'mtSphere/@logIncrement'
atom_jri_xpath = 'mtSphere/@gridPoints'
atom_lmax_xpath = 'atomicCutoffs/@lmax'
atom_lnosph_xpath = 'atomicCutoffs/@lnonsphr'
#atom_ncst_xpath = '@coreStates'
atom_econfig_xpath = 'electronConfig' # converting todo
atom_bmu_xpath = '@magMom'
atom_lo_xpath = 'lo' # converting todo
atom_element_xpath = '@element'
atom_name_xpath = '@name'
# comp
jspins_xpath = 'calculationSetup/magnetism/@jspins'
frcor_xpath = 'calculationSetup/coreElectrons/@frcor'
ctail_xpath = 'calculationSetup/coreElectrons/@ctail'
kcrel_xpath = 'calculationSetup/coreElectrons/@kcrel'
gmax_xpath = 'calculationSetup/cutoffs/@Gmax'
gmaxxc_xpath = 'calculationSetup/cutoffs/@GmaxXC'
kmax_xpath = 'calculationSetup/cutoffs/@Kmax'
# exco
exco_xpath = 'xcFunctional/@name'
# film
# soc
l_soc_xpath = '//calculationSetup/soc/@l_soc'
theta_xpath = '//calculationSetup/soc/@theta'
phi_xpath = '//calculationSetup/soc/@phi'
# qss
# kpt
title_xpath = '/fleurInput/comment/text()' # text
########
new_parameters = {}
#print('parsing inp.xml without XMLSchema')
#tree = etree.parse(inpxmlfile)
tree = inpxmlfile
root = tree.getroot()
# Create the cards
# &input # most things are not needed for AiiDA here. or we ignor them for now.
# film is set by the plugin depended on the structure
# symor per default = False? to avoid input which fleur can't take
# &comp
# attrib = get_xml_attribute(
comp_dict = {}
comp_dict = set_dict_or_not(comp_dict, 'jspins', convert_to_int(eval_xpath(root, jspins_xpath), suc_return=False))
comp_dict = set_dict_or_not(comp_dict, 'frcor', convert_from_fortran_bool(eval_xpath(root, frcor_xpath)))
comp_dict = set_dict_or_not(comp_dict, 'ctail', convert_from_fortran_bool(eval_xpath(root, ctail_xpath)))
comp_dict = set_dict_or_not(comp_dict, 'kcrel', eval_xpath(root, kcrel_xpath))
comp_dict = set_dict_or_not(comp_dict, 'gmax', convert_to_float(eval_xpath(root, gmax_xpath), suc_return=False))
comp_dict = set_dict_or_not(comp_dict, 'gmaxxc', convert_to_float(eval_xpath(root, gmaxxc_xpath), suc_return=False))
comp_dict = set_dict_or_not(comp_dict, 'kmax', convert_to_float(eval_xpath(root, kmax_xpath), suc_return=False))
new_parameters['comp'] = comp_dict
# &atoms
species_list = eval_xpath2(root, species_xpath)
for i, species in enumerate(species_list):
atom_dict = {}
atoms_name = 'atom{}'.format(i)
atom_z = convert_to_int(eval_xpath(species, atom_z_xpath), suc_return=False)
atom_rmt = convert_to_float(eval_xpath(species, atom_rmt_xpath), suc_return=False)
atom_dx = convert_to_float(eval_xpath(species, atom_dx_xpath), suc_return=False)
atom_jri = convert_to_int(eval_xpath(species, atom_jri_xpath), suc_return=False)
atom_lmax = convert_to_int(eval_xpath(species, atom_lmax_xpath), suc_return=False)
atom_lnosph = convert_to_int(eval_xpath(species, atom_lnosph_xpath), suc_return=False)
#atom_ncst = convert_to_int(eval_xpath(species, atom_ncst_xpath), suc_return=False)
atom_econfig = eval_xpath(species, atom_econfig_xpath)
atom_bmu = convert_to_float(eval_xpath(species, atom_bmu_xpath), suc_return=False)
atom_lo = eval_xpath(species, atom_lo_xpath)
atom_element = eval_xpath(species, atom_element_xpath)
atom_name_2 = eval_xpath(species, atom_name_xpath)
if not inpgen_ready:
atom_dict = set_dict_or_not(atom_dict, 'z', atom_z)
#atom_dict = set_dict_or_not(atom_dict, 'name', atom_name_2)
#atom_dict = set_dict_or_not(atom_dict, 'ncst', atom_ncst) (deprecated)
atom_dict = set_dict_or_not(atom_dict, 'rmt', atom_rmt)
atom_dict = set_dict_or_not(atom_dict, 'dx', atom_dx)
atom_dict = set_dict_or_not(atom_dict, 'jri', atom_jri)
atom_dict = set_dict_or_not(atom_dict, 'lmax', atom_lmax)
atom_dict = set_dict_or_not(atom_dict, 'lnonsph', atom_lnosph)
atom_dict = set_dict_or_not(atom_dict, 'econfig', atom_econfig)
atom_dict = set_dict_or_not(atom_dict, 'bmu', atom_bmu)
if atom_lo is not None:
atom_dict = set_dict_or_not(atom_dict, 'lo', convert_fleur_lo(atom_lo))
atom_dict = set_dict_or_not(atom_dict, 'element', '{}'.format(atom_element))
new_parameters[atoms_name] = atom_dict
# &soc
attrib = convert_from_fortran_bool(eval_xpath(root, l_soc_xpath))
theta = convert_to_float(eval_xpath(root, theta_xpath), suc_return=False)
phi = convert_to_float(eval_xpath(root, phi_xpath), suc_return=False)
if attrib:
new_parameters['soc'] = {'theta': theta, 'phi': phi}
# &kpt
#attrib = convert_from_fortran_bool(eval_xpath(root, l_soc_xpath))
#theta = eval_xpath(root, theta_xpath)
#phi = eval_xpath(root, phi_xpath)
# if kpt:
# new_parameters['kpt'] = {'theta' : theta, 'phi' : phi}
# # ['nkpt', 'kpts', 'div1', 'div2', 'div3', 'tkb', 'tria'],
# title
title = eval_xpath(root, title_xpath) # text
if title:
new_parameters['title'] = title.replace('\n', '').strip()
# &exco
#TODO, easy
exco_dict = {}
exco_dict = set_dict_or_not(exco_dict, 'xctyp', eval_xpath(root, exco_xpath))
# 'exco' : ['xctyp', 'relxc'],
new_parameters['exco'] = exco_dict
# &film
# TODO
# &qss
# TODO
# lattice, not supported?
return new_parameters
| 20,425
|
def confirm_revocation(cert):
"""Confirm revocation screen.
:param cert: certificate object
:type cert: :class:
:returns: True if user would like to revoke, False otherwise
:rtype: bool
"""
return util(interfaces.IDisplay).yesno(
"Are you sure you would like to revoke the following "
"certificate:{0}{cert}This action cannot be reversed!".format(
os.linesep, cert=cert.pretty_print()))
| 20,426
|
def main(config_path):
"""main entry point, load and validate config and call generate"""
with open(config_path) as handle:
config = json.load(handle)
http_config = config.get("http", {})
misc_config = config.get("misc", {})
data_config = config.get("data", {})
logger.debug("http config: {0}\nmisc config: {1}\ndata config: {2}".format(http_config, misc_config, data_config))
interval_ms = misc_config.get("interval_ms", 5000)
verbose = misc_config.get("verbose", False)
host = http_config.get("host", "127.0.0.1")
port = http_config.get("port", 8083)
token = http_config.get("token", "KkFSogMUNDcKd5M30KzW")
file_path = data_config.get("file_path")
# columns = data_config.get("columns").split(",")
# token needs to be added in generate function if ThingsBoard is to be included
generate(host, port, token, loaddata(file_path), interval_ms, verbose)
| 20,427
|
def Gaussian(y, model, yerr):
"""Returns the loglikelihood for a Gaussian distribution.
In this calculation, it is assumed that the parameters
are true, and the loglikelihood that the data is drawn from
the distribution established by the parameters is calculated
Parameters
----------
model : array_like
theoretical model data to be compared against
y : array_like
data points
yerr : standard deviations on individual data points,
assumed to be gaussian
Returns
-------
float
loglikelihood for the data."""
inv_sigma2 = 1.0/(yerr**2.0)
return -0.5*(np.sum((y-model)**2*inv_sigma2 - np.log(inv_sigma2)))
| 20,428
|
def clean_us_demographics(us_demographics_spark, spark_session):
"""
Clean data from us_demographics
Args:
us_demographics (object): Pyspark dataframe object
spark_session (object): Pyspark session
Returns:
(object): Pyspark dataframe with cleaned data
"""
spark = spark_session
us_demographics_spark.createOrReplaceTempView('us_demographics')
dum = spark.sql("""
SELECT City, State, cast(`Median Age` as float) as Median_Age, cast(`Male Population` as int) as Male_Population,
cast(`Female Population` as int) as Female_Population, cast(`Total Population` as int) as Total_Population,
cast(`Number of Veterans` as int) as Number_of_Veterans, cast(`Foreign-born` as int) as Foregin_born,
cast(`Average Household Size` as float) as Average_Household_Size, `State Code` as State_Code,Race, cast(Count as int)
FROM us_demographics
""")
us_demographics_spark_cleaned = dum.dropDuplicates()
us_demographics_spark_cleaned = us_demographics_spark_cleaned.na.drop()
us_demographics_spark_race = us_demographics_spark_cleaned.groupBy(['City','State']).pivot("Race").agg(F.first("Count"))
us_demographics_spark_race = us_demographics_spark_race.select('City', 'State', F.col('American Indian and Alaska Native').alias('American_Indian_and_Alaska_Native'),
'Asian', F.col('Black or African-American').alias('Black_or_African_American'), F.col('Hispanic or Latino').alias('Hispanic_or_Latino'), 'White')
us_demographics_spark_cleaned = us_demographics_spark_cleaned.drop('Race', 'Count')
us_demographics_spark_cleaned = us_demographics_spark_cleaned.dropDuplicates()
us_demographics_spark_cleaned = us_demographics_spark_cleaned.join(us_demographics_spark_race, ['State', 'City'])
us_demographics_spark_cleaned = us_demographics_spark_cleaned.fillna(
{'American_Indian_and_Alaska_Native':0,
'Asian':0,
'Black_or_African_American':0,
'Hispanic_or_Latino':0,
'White':0})
us_demographics_spark_cleaned = us_demographics_spark_cleaned.orderBy(['City','State'])
return us_demographics_spark_cleaned
| 20,429
|
def parse_date(date=None):
"""
Parse a string in YYYY-MM-DD format into a datetime.date object.
Throws ValueError if input is invalid
:param date: string in YYYY-MM-DD format giving a date
:return: a datetime.date object corresponding to the date given
"""
if date is None:
raise ValueError
fields = date.split('-')
if len(fields) != 3:
raise ValueError
return datetime.date(year=int(fields[0]),
month=int(fields[1]),
day=int(fields[2]))
| 20,430
|
def torch_save(path, model):
"""Function to save torch model states
:param str path: file path to be saved
:param torch.nn.Module model: torch model
"""
if hasattr(model, 'module'):
torch.save(model.module.state_dict(), path)
else:
torch.save(model.state_dict(), path)
| 20,431
|
def status():
""" Incoming status handler: forwarded by ForwardServerProvider """
req = jsonex_loads(request.get_data())
status = g.provider._receive_status(req['status'])
return {'status': status}
| 20,432
|
def cli(env, identifier, uri, ibm_api_key):
"""Export an image to object storage.
The URI for an object storage object (.vhd/.iso file) of the format:
swift://<objectStorageAccount>@<cluster>/<container>/<objectPath>
or cos://<regionName>/<bucketName>/<objectPath> if using IBM Cloud
Object Storage
"""
image_mgr = SoftLayer.ImageManager(env.client)
image_id = helpers.resolve_id(image_mgr.resolve_ids, identifier, 'image')
result = image_mgr.export_image_to_uri(image_id, uri, ibm_api_key)
if not result:
raise exceptions.CLIAbort("Failed to export Image")
| 20,433
|
def test_TimeSeriesEvent():
"""Test basic getters and setters."""
event = ktk.TimeSeriesEvent()
event.time = 1
event.name = 'one'
assert event.time == 1
assert event.name == 'one'
| 20,434
|
def get_feature_extractor_info():
"""Return tuple of pretrained feature extractor and its best-input image size for the extractor"""
return get_pretrained_feature_extractor(), K_MODEL_IMAGE_SIZE
| 20,435
|
def _alembic_connect(db_path: Path, enforce_foreign_keys=True) -> Iterator[Config]:
"""Context manager to return an instance of an Alembic configuration.
The profiles's database connection is added in the `attributes` property, through which it can then also be
retrieved, also in the `env.py` file, which is run when the database is migrated.
"""
with create_sqla_engine(db_path, enforce_foreign_keys=enforce_foreign_keys).connect() as connection:
config = _alembic_config()
config.attributes['connection'] = connection # pylint: disable=unsupported-assignment-operation
def _callback(step: MigrationInfo, **kwargs): # pylint: disable=unused-argument
"""Callback to be called after a migration step is executed."""
from_rev = step.down_revision_ids[0] if step.down_revision_ids else '<base>'
MIGRATE_LOGGER.report(f'- {from_rev} -> {step.up_revision_id}')
config.attributes['on_version_apply'] = _callback # pylint: disable=unsupported-assignment-operation
yield config
| 20,436
|
def compare_flowcorr( self, blocks=False ):
"""
Plots the flowcorr against the slopes data.
Creates two files: a 2d histogram and a point map
if blocks is set to true, then it breaks the result down by block and writes a unified HTML table
"""
if not ( hasattr( self, 'flowcorr') and hasattr( self, 'slopes' ) ):
self.annotate( 'Cannot compare slopes and flowcorr', 0 )
return
if blocks and isinstance( self, ( ECC_Analysis, Thumbnail ) ):
# Setup the limits for each block
lims = []
for x in np.arange( 0, self.chiptype.chipC, self.macroC ):
for y in np.arange( 0, self.chiptype.chipR, self.macroR ):
lims.append( ( y, y+self.macroR, x, x+self.macroR) )
# Setup directory to save each file
dirname = 'slopes_flowcorr'
system.makedir( os.path.join( self.analysisdir, dirname ) )
else:
lims = [ ( None, None, None, None ) ]
dirname = ''
# Calculate the medians only once
flowcorr_norm = np.median( self.flowcorr )
slopes_norm = np.median( self.slopes )
for l in lims:
# Get the data range
xdata = self.flowcorr[ l[0]:l[1], l[2]:l[3] ].flatten() / flowcorr_norm
ydata = self.slopes[ l[0]:l[1], l[2]:l[3] ].flatten() / slopes_norm
# Calculate the blockname
if blocks:
blockname = 'block_X%i_Y%i_' % ( l[2], l[0] )
else:
blockname = ''
# Make the point plot
filename = '%s/%s/%s%s.png' % ( self.analysisdir, dirname, blockname, 'slopes_flowcorr_raw' )
f = plt.figure( facecolor='w' )
plt.plot( xdata, ydata, '.', markersize=1 )
plt.xlabel( 'flowcorr/median' )
plt.ylabel( 'slopes/median' )
plt.xlim( (0,3) )
plt.ylim( (0,3) )
f.savefig( filename )
plt.close( f )
filename = '%s/%s/%s%s.png' % ( self.analysisdir, dirname, blockname, 'slopes_flowcorr' )
bins = np.arange(0,2.01,0.05)
H, x, y = np.histogram2d( ydata, xdata, bins=bins )
f = plt.figure()
extent = [0, 2, 0, 2]
plt.imshow( H, origin='lower', aspect='equal', interpolation='nearest', extent=extent )
plt.xlabel( 'flowcorr/median' )
plt.ylabel( 'slopes/median' )
f.savefig( filename )
plt.close( f )
if blocks:
self._make_slopes_flowcorr_html()
| 20,437
|
def nx_find_connected_limited(graph, start_set, end_set, max_depth=3):
"""Return the neurons in end_set reachable from start_set with limited depth."""
reverse_graph = graph.reverse()
reachable = []
for e in end_set:
preorder_nodes = list(
(
networkx.algorithms.traversal.depth_first_search.dfs_preorder_nodes(
reverse_graph, source=e, depth_limit=max_depth
)
)
)
for s in start_set:
if s in preorder_nodes:
reachable.append(e)
break
return reachable
| 20,438
|
def hexbyte_2integer_normalizer(first_int_byte, second_int_btye):
"""Function to normalize integer bytes to a single byte
Transform two integer bytes to their hex byte values and normalize
their values to a single integer
Parameters
__________
first_int_byte, second_int_byte : int
integer values to normalize (0 to 255)
Returns
_______
integer: int
Single normalized integer
"""
first_hex = f'{hex(first_int_byte)}'.lstrip('0x')
second_hex = f'{hex(second_int_btye)}'.lstrip('0x')
first_hex = first_hex if len(f'{first_hex}') == 2 else f'0{first_hex}'
second_hex = second_hex if len(f'{second_hex}') == 2 else f'0{second_hex}'
hex_string = f'{first_hex}{second_hex}'
integer = int(hex_string, 16)
return integer
| 20,439
|
def wrapAngle(angle):
""" Ensures angle is between -360 and 360
arguments:
angle - float angle that you want to be between -360 and 360
returns:
float - angle between -360 and 360
"""
printDebug("In wrapAngle, angle is " + str(angle), DEBUG_INFO)
if angle >= 0:
return angle % 360
else:
return angle % -360
| 20,440
|
def batch(iterable, batch_size):
"""Yields lists by batch"""
b = []
for i, t in enumerate(iterable):
b.append(t)
if (i + 1) % batch_size == 0:
yield b
b = []
if len(b) > 0:
yield b
| 20,441
|
def _save_first_checkpoint(keras_model, custom_objects, config):
"""Save first checkpoint for the keras Estimator.
Args:
keras_model: an instance of compiled keras model.
custom_objects: Dictionary for custom objects.
config: Estimator config.
Returns:
The path where keras model checkpoint is saved.
"""
# save checkpoint into subdirectory to allow warm start
keras_model_dir = os.path.join(config.model_dir, 'keras')
# Load weights and save to checkpoint if there is no checkpoint
latest_path = checkpoint_management.latest_checkpoint(keras_model_dir)
if not latest_path:
keras_weights = None
if _any_weight_initialized(keras_model):
keras_weights = keras_model.get_weights()
if not gfile.IsDirectory(keras_model_dir):
gfile.MakeDirs(keras_model_dir)
with ops.Graph().as_default():
random_seed.set_random_seed(config.tf_random_seed)
training_util.create_global_step()
model = _clone_and_build_model(model_fn_lib.ModeKeys.TRAIN, keras_model,
custom_objects)
# save to checkpoint
with session.Session(config=config.session_config) as sess:
if keras_weights:
model.set_weights(keras_weights)
# Make update ops and initialize all variables.
if not model.train_function:
# pylint: disable=protected-access
model._make_train_function()
K._initialize_variables(sess)
# pylint: enable=protected-access
saver = saver_lib.Saver()
latest_path = os.path.join(keras_model_dir, 'keras_model.ckpt')
saver.save(sess, latest_path)
return latest_path
| 20,442
|
def query_pubmed_mod_updates():
"""
:return:
"""
populate_alliance_pmids()
# query_pmc_mgi() # find pmc articles for mice and 9 journals, get pmid mappings and list of pmc without pmid
# download_pmc_without_pmid_mgi() # download pmc xml for pmc without pmid and find their article type
query_mods() # query pubmed for mod references
| 20,443
|
def build_reference_spectrum_list_from_config_file(config):
"""
Read reference spectrum file glob(s) from configuration file to create
and return a list of ReferenceSpectrum instances.
:param config: configparser instance
:return: list of ReferenceSpectrum instances
"""
log = logging.getLogger(name=__name__)
references = config.items("references")
log.debug(references)
reference_spectrum_list, _ = ReferenceSpectrum.read_all(
[
os.path.expanduser(reference_file_glob)
for reference_file_glob, _ in references
]
)
if len(reference_spectrum_list) == 0:
raise ConfigurationFileError(
'no reference spectrum files were found using globs "{}"'.format(references)
)
else:
return reference_spectrum_list
| 20,444
|
def make_window(signal, sample_spacing, which=None, alpha=4):
"""Generate a window function to be used in PSD analysis.
Parameters
----------
signal : `numpy.ndarray`
signal or phase data
sample_spacing : `float`
spacing of samples in the input data
which : `str,` {'welch', 'hann', None}, optional
which window to produce. If auto, attempts to guess the appropriate
window based on the input signal
alpha : `float`, optional
alpha value for welch window
Notes
-----
For 2D welch, see:
Power Spectral Density Specification and Analysis of Large Optical Surfaces
E. Sidick, JPL
Returns
-------
`numpy.ndarray`
window array
"""
s = signal.shape
if which is None:
# attempt to guess best window
ysamples = int(round(s[0] * 0.02, 0))
xsamples = int(round(s[1] * 0.02, 0))
corner1 = signal[:ysamples, :xsamples] == 0
corner2 = signal[-ysamples:, :xsamples] == 0
corner3 = signal[:ysamples, -xsamples:] == 0
corner4 = signal[-ysamples:, -xsamples:] == 0
if corner1.all() and corner2.all() and corner3.all() and corner4.all():
# four corners all "black" -- circular data, Welch window is best
# looks wrong but 2D welch takes x, y while indices are y, x
y, x = (e.arange(N) - (N / 2) for N in s)
which = window_2d_welch(x, y)
else:
# if not circular, square data; use Hanning window
y, x = (e.hanning(N) for N in s)
which = e.outer(y, x)
else:
if type(which) is str:
# known window type
wl = which.lower()
if wl == 'welch':
y, x = (e.arange(N) - (N / 2) for N in s)
which = window_2d_welch(x, y, alpha=alpha)
elif wl in ('hann', 'hanning'):
y, x = (e.hanning(N) for N in s)
which = e.outer(y, x)
else:
raise ValueError('unknown window type')
return which
| 20,445
|
def set_units(
df: pd.DataFrame, units: Dict[str, Union[pint.Unit, str]]
) -> pd.DataFrame:
"""Make dataframe unit-aware. If dataframe is already unit-aware, convert to specified
units. If not, assume values are in specified unit.
Parameters
----------
df : pd.DataFrame
units : Dict[str, Union[pint.Unit, str]]
key = column name, value = unit to set to that column
Returns
-------
pd.DataFrame
Same as input dataframe, but with specified units.
"""
df = df.copy() # don't change incoming dataframe
for name, unit in units.items():
df[name] = set_unit(df[name], unit)
return df
| 20,446
|
def makeTokensTable(medidasTokens__instance, table_dir="/home/r/repos/artigoTextoNasRedes/tables/",fname="tokensInline.tex",tag=None):
"""Tabela de medidas de tokens TTM"""
tms=medidasTokens__instance
mvars=("tokens",
"tokens_diff",
"knownw",
"knownw_diff",
"stopw",
"punct",
"contract")
tms_=[[tms[j][i] for j in range(4)] for i in mvars]
labelsh=("","g.","p.","i.","h.")
labels=(r"$tokens$",
r"$tokens_{\%}$",
r"$tokens \neq$",
r"$\frac{knownw}{tokens}$",
r"$\frac{knownw \neq}{knownw}$",
r"$\frac{stopw}{knownw}$",
r"$\frac{punct}{tokens}$",
r"$\frac{contrac}{tokens}$",
)
caption=r"""tokens in each Erd\"os sector ({{\bf p.}} for periphery, {{\bf i.}} for intermediary,
{{\bf h.}} for hubs)."""
data=tms_
ntoks=data[0]
ntoks_=perc_(ntoks)
data=n.array(data[1:])
data=n.vstack((ntoks,ntoks_,data*100))
fname_=mkName(table_dir,fname,tag)
g.lTable(labels,labelsh,data,caption,fname_,"textGeral")
ME(fname_[:-4],"\\bf",[(0,i) for i in range(1,5)])
DL(fname_[:-4]+"_",[1],[1],[2,3,5,7,8])
| 20,447
|
def address_repr(buf, reverse: bool = True, delimit: str = "") -> str:
"""Convert a buffer into a hexlified string."""
order = range(len(buf) - 1, -1, -1) if reverse else range(len(buf))
return delimit.join(["%02X" % buf[byte] for byte in order])
| 20,448
|
def _highlight(line1, line2):
"""Returns the sections that should be bolded in the given lines.
Returns:
two tuples. Each tuple indicates the start and end of the section
of the line that should be bolded for line1 and line2 respectively.
"""
start1 = start2 = 0
match = re.search(r'\S', line1) # ignore leading whitespace
if match:
start1 = match.start()
match = re.search(r'\S', line2)
if match:
start2 = match.start()
length = min(len(line1), len(line2)) - 1
bold_start1 = start1
bold_start2 = start2
while (bold_start1 <= length and bold_start2 <= length and
line1[bold_start1] == line2[bold_start2]):
bold_start1 += 1
bold_start2 += 1
match = re.search(r'\s*$', line1) # ignore trailing whitespace
bold_end1 = match.start() - 1
match = re.search(r'\s*$', line2)
bold_end2 = match.start() - 1
while (bold_end1 >= bold_start1 and bold_end2 >= bold_start2 and
line1[bold_end1] == line2[bold_end2]):
bold_end1 -= 1
bold_end2 -= 1
if bold_start1 - start1 > 0 or len(line1) - 1 - bold_end1 > 0:
return (bold_start1 + 1, bold_end1 + 2), (bold_start2 + 1, bold_end2 + 2)
return None, None
| 20,449
|
def test_memtable_flush_writer_blocked():
"""verify flush writer blocked"""
analyzer = recs.Engine()
stage = recs.Stage(
name="MemtableFlushWriter",
pending=0,
active=0,
local_backpressure=0,
completed=0,
blocked=1,
all_time_blocked=0,
)
reason, rec = analyzer.analyze_stage(stage)
assert rec == "lower memtable_cleanup_threshold in cassandra.yaml"
assert reason == "memtable flush writers blocked greater than zero"
stage.blocked = 0
reason, rec = analyzer.analyze_stage(stage)
assert not rec
assert not reason
| 20,450
|
def get_comrec_build(pkg_dir, build_cmd=build_py):
""" Return extended build command class for recording commit
The extended command tries to run git to find the current commit, getting
the empty string if it fails. It then writes the commit hash into a file
in the `pkg_dir` path, named ``COMMIT_INFO.txt``.
In due course this information can be used by the package after it is
installed, to tell you what commit it was installed from if known.
To make use of this system, you need a package with a COMMIT_INFO.txt file -
e.g. ``myproject/COMMIT_INFO.txt`` - that might well look like this::
# This is an ini file that may contain information about the code state
[commit hash]
# The line below may contain a valid hash if it has been substituted
during 'git archive' archive_subst_hash=$Format:%h$
# This line may be modified by the install process
install_hash=
The COMMIT_INFO file above is also designed to be used with git substitution
- so you probably also want a ``.gitattributes`` file in the root directory
of your working tree that contains something like this::
myproject/COMMIT_INFO.txt export-subst
That will cause the ``COMMIT_INFO.txt`` file to get filled in by ``git
archive`` - useful in case someone makes such an archive - for example with
via the github 'download source' button.
Although all the above will work as is, you might consider having something
like a ``get_info()`` function in your package to display the commit
information at the terminal. See the ``pkg_info.py`` module in the nipy
package for an example.
"""
class MyBuildPy(build_cmd):
''' Subclass to write commit data into installation tree '''
def run(self):
build_cmd.run(self)
import subprocess
proc = subprocess.Popen('git rev-parse HEAD',
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
repo_commit, _ = proc.communicate()
# Fix for python 3
repo_commit = str(repo_commit)
# We write the installation commit even if it's empty
cfg_parser = ConfigParser()
cfg_parser.read(pjoin(pkg_dir, 'COMMIT_INFO.txt'))
cfg_parser.set('commit hash', 'install_hash', repo_commit)
out_pth = pjoin(self.build_lib, pkg_dir, 'COMMIT_INFO.txt')
cfg_parser.write(open(out_pth, 'wt'))
return MyBuildPy
| 20,451
|
def get_detected_objects_new(df, siglim=5, Terr_lim=3, Toffset=2000):
"""
Get a dataframe with only the detected objects.
:param df: A DataFrame such as one output by get_ccf_summary with N > 1
:param siglim: The minimum significance to count as detected
:param Terr_lim: The maximum number of standard deviations of (Measured - Actual) to allow for detected objects
:param Toffset: The absolute difference to allow between the true and measured temperature.
:return: A dataframe similar to df, but with fewer rows
"""
S = get_initial_uncertainty(df)
S['Tdiff'] = S.Tmeas - S.Tactual
mean, std = S.Tdiff.mean(), S.Tdiff.std()
detected = S.loc[(S.significance > siglim) & (S.Tdiff - mean < Terr_lim * std) & (abs(S.Tdiff) < Toffset)]
return pd.merge(detected[['Primary', 'Secondary']], df, on=['Primary', 'Secondary'], how='left')
| 20,452
|
def test_refresh_repositories(nexus_mock_client):
"""
Ensure the method retrieves latest repositories and sets the class
attribute.
"""
repositories = nexus_mock_client.repositories.raw_list()
x_repositories = nexus_mock_client._request.return_value._json
nexus_mock_client._request.assert_called_with('get', 'repositories')
assert repositories == x_repositories
| 20,453
|
def cmp(a, b):
"""
Python 3 does not have a cmp function, this will do the cmp.
:param a: first object to check
:param b: second object to check
:return:
"""
# convert to lower case for string comparison.
if a is None:
return -1
if type(a) is str and type(b) is str:
a = a.lower()
b = b.lower()
# if list has string element, convert string to lower case.
if type(a) is list and type(b) is list:
a = [x.lower() if type(x) is str else x for x in a]
b = [x.lower() if type(x) is str else x for x in b]
a.sort()
b.sort()
return (a > b) - (a < b)
| 20,454
|
def pe(cmd, shell=True):
"""
Print and execute command on system
"""
ret = []
for line in execute(cmd, shell=shell):
ret.append(line)
print(line, end="")
return ret
| 20,455
|
def create_photo(user_id, text: str, greencolor: bool): # color: tuple(R,G,B)
"""
:param user_id: int or str
:param text: str
:param greencolor: bool
True = зеленый (204, 255, 204)
False = серый (240, 238, 237)
"""
color = (204, 255, 204)
if not greencolor:
color = (240, 238, 237)
fontname = os.path.join(dirs['font'], 'OpenSans-Regular.ttf')
fontsize = 14
font = ImageFont.truetype(fontname, fontsize)
preimg = Image.new('RGB', (2000, 1000), color)
text_draw = ImageDraw.Draw(preimg)
text_width, text_height = text_draw.multiline_textsize(text, font)
text_draw.multiline_text((10, 10), text, fill="black", font=font)
img = preimg.crop((0, 0, text_width + 20, text_height + 24))
path = os.path.join(dirs['images'], f'{user_id}.png')
img.save(path, "PNG")
return path
| 20,456
|
def mark_production_ready(config, incident, team, artifact, tag, url):
"""
Manually mark image as production ready.
"""
pierone_url = set_pierone_url(config, url)
registry = get_registry(pierone_url)
image = DockerImage(registry, team, artifact, tag)
if incident.startswith("INC-"):
# if it's a JIRA ticket, mark image as production ready in Pierone
api = PierOne(pierone_url)
api.mark_production_ready(image, incident)
else:
meta = DockerMeta()
meta.mark_production_ready(image, incident)
if team in ["ci", "automata", "torch"]:
click.echo("🧙 ", nl=False)
click.echo(
"Marked {} as `production_ready` due to incident {}.".format(
format_full_image_name(image), incident
)
)
| 20,457
|
def crop(image):
"""
Method to crop out the uncessary white parts of the image.
Inputs:
image (numpy array): Numpy array of the image label.
Outputs:
image (numpy array): Numpy array of the image label, cropped.
"""
image = ImageOps.invert(image)
imageBox = image.getbbox()
image = image.crop(imageBox)
return ImageOps.invert(image)
| 20,458
|
def test(dataset):
"""Test the solving algorithm against a dataset"""
reader = csv.reader(dataset)
total_problems = sum(1 for row in reader)
solved_problems = 0
dataset.seek(0)
print_progress(solved_problems, total_problems)
try:
for row in reader:
problem = row[0]
solution = row[1]
sudoku = Sudoku(problem)
sudoku = solve_sudoku(sudoku)
if str(sudoku) != solution:
raise SudokuSolvingError(
'Incorrect solution: {} != {}.'.format(
sudoku, solution))
solved_problems += 1
print_progress(solved_problems, total_problems)
print('\nDone!')
except SudokuSolvingError as error:
# We need two line breaks because of the progress bar
print('\n\n' + str(error))
| 20,459
|
def copy_param(code, target, source):
"""
Copy a parameter from source reg to preferred slot in the target reg.
For params in slot 0, this is just and add immediate.
For params in other slots, the source is rotated.
Note that other values in the source are copied, too.
"""
if source[SLOT] != 0:
code.add(spu.rotqbyi(target, source[REG], source[SLOT] * 4))
else:
code.add(spu.ai(target, source[REG], 0))
return
| 20,460
|
def all_pairs_normalized_distances(X):
"""
We can't really compute distances over incomplete data since
rows are missing different numbers of entries.
The next best thing is the mean squared difference between two vectors
(a normalized distance), which gets computed only over the columns that
two vectors have in common. If two vectors have no features in common
then their distance is infinity.
Parameters
----------
X : np.ndarray
Data matrix of shape (n_samples, n_features) with missing entries
marked using np.nan
Returns a (n_samples, n_samples) matrix of pairwise normalized distances.
"""
n_rows, n_cols = X.shape
# matrix of mean squared difference between between samples
D = np.ones((n_rows, n_rows), dtype="float32", order="C") * np.inf
# we can cheaply determine the number of columns that two rows share
# by taking the dot product between their finite masks
observed_elements = np.isfinite(X).astype(int)
n_shared_features_for_pairs_of_rows = np.dot(
observed_elements,
observed_elements.T)
no_overlapping_features_rows = n_shared_features_for_pairs_of_rows == 0
number_incomparable_rows = no_overlapping_features_rows.sum(axis=1)
row_overlaps_every_other_row = (number_incomparable_rows == 0)
row_overlaps_no_other_rows = number_incomparable_rows == n_rows
valid_rows_mask = ~row_overlaps_no_other_rows
valid_row_indices = np.where(valid_rows_mask)[0]
# preallocate all the arrays that we would otherwise create in the
# following loop and pass them as "out" parameters to NumPy ufuncs
diffs = np.zeros_like(X)
missing_differences = np.zeros_like(diffs, dtype=bool)
valid_rows = np.zeros(n_rows, dtype=bool)
ssd = np.zeros(n_rows, dtype=X.dtype)
for i in valid_row_indices:
x = X[i, :]
np.subtract(X, x.reshape((1, n_cols)), out=diffs)
np.isnan(diffs, out=missing_differences)
# zero out all NaN's
diffs[missing_differences] = 0
# square each difference
diffs **= 2
observed_counts_per_row = n_shared_features_for_pairs_of_rows[i]
if row_overlaps_every_other_row[i]:
# add up all the non-missing squared differences
diffs.sum(axis=1, out=D[i, :])
D[i, :] /= observed_counts_per_row
else:
np.logical_not(no_overlapping_features_rows[i], out=valid_rows)
# add up all the non-missing squared differences
diffs.sum(axis=1, out=ssd)
ssd[valid_rows] /= observed_counts_per_row[valid_rows]
D[i, valid_rows] = ssd[valid_rows]
return D
| 20,461
|
def forward_pass(model, target_angle, mixed_data, conditioning_label, args):
"""
Runs the network on the mixed_data
with the candidate region given by voice
"""
target_pos = np.array([
FAR_FIELD_RADIUS * np.cos(target_angle),
FAR_FIELD_RADIUS * np.sin(target_angle)
])
data, _ = utils.shift_mixture(
torch.tensor(mixed_data).to(args.device), target_pos, args.mic_radius,
args.sr)
data = data.float().unsqueeze(0) # Batch size is 1
# Normalize input
data, means, stds = normalize_input(data)
# Run through the model
valid_length = model.valid_length(data.shape[-1])
delta = valid_length - data.shape[-1]
padded = F.pad(data, (delta // 2, delta - delta // 2))
output_signal = model(padded, conditioning_label)
output_signal = center_trim(output_signal, data)
output_signal = unnormalize_input(output_signal, means, stds)
output_voices = output_signal[:, 0] # batch x n_mics x n_samples
output_np = output_voices.detach().cpu().numpy()[0]
energy = librosa.feature.rms(output_np).mean()
return output_np, energy
| 20,462
|
async def get_event_by_code(code: str, db: AsyncSession) -> Event:
"""
Get an event by its code
"""
statement = select(Event).where(Event.code == code)
result = await db.execute(statement)
event: Optional[Event] = result.scalars().first()
if event is None:
raise HTTPException(
status_code=HTTPStatus.BAD_REQUEST,
detail="invalid attendance code",
)
# Check that the code is still valid
with tracer.start_as_current_span("check-validity"):
now = datetime.now(tz=pytz.utc)
if not event.enabled or now < event.valid_from or now > event.valid_until:
raise HTTPException(status_code=HTTPStatus.FORBIDDEN, detail="invalid code")
return event
| 20,463
|
def get(name):
"""Returns an OpDef for a given `name` or None if the lookup fails."""
with _sync_lock:
return _registered_ops.get(name)
| 20,464
|
def angle_detect_dnn(img, adjust=True):
"""
文字方向检测
"""
h, w = img.shape[:2]
ROTATE = [0, 90, 180, 270]
if adjust:
thesh = 0.05
xmin, ymin, xmax, ymax = int(thesh * w), int(thesh * h), w - int(thesh * w), h - int(thesh * h)
img = img[ymin:ymax, xmin:xmax] ##剪切图片边缘
inputBlob = cv2.dnn.blobFromImage(img,
scalefactor=1.0,
size=(224, 224),
swapRB=True,
mean=[103.939, 116.779, 123.68], crop=False);
angleNet.setInput(inputBlob)
pred = angleNet.forward()
index = np.argmax(pred, axis=1)[0]
return ROTATE[index]
| 20,465
|
def get_correct_line(df_decisions):
"""
The passed df has repeated lines for the same file (same chemin_source).
We take the most recent one.
:param df_decisions: Dataframe of decisions
:return: Dataframe without repeated lines (according to the chemin_source column)
"""
return df_decisions.sort_values('timestamp_modification').drop_duplicates('chemin_source', keep='last')
| 20,466
|
def fix_1(lst1, lst2):
"""
Divide all of the elements in `lst1` by each element in `lst2`
and return the values in a list.
>>> fix_1([1, 2, 3], [0, 1])
[1.0, 2.0, 3.0]
>>> fix_1([], [])
[]
>>> fix_1([10, 20, 30], [0, 10, 10, 0])
[1.0, 2.0, 3.0, 1.0, 2.0, 3.0]
"""
out = []
for div in lst2:
for num in lst1:
try:
out.append(num / div) # add try-except block
except ZeroDivisionError:
pass
return out
| 20,467
|
def sleep_countdown(duration, print_step=2):
"""Sleep for certain duration and print remaining time in steps of print_step
Args:
duration (int): duration of timeout
print_step (int): steps to print countdown
Returns
None: Countdown in console
"""
for i in range(duration, 0, -print_step):
time.sleep(print_step)
sys.stdout.write(str(i - print_step) + " ")
sys.stdout.flush()
| 20,468
|
def user_wants_upload():
"""
Determines whether or not the user wants to upload the extension
:return: boolean
"""
choice = input("Do you want to upload your extension right now? :")
if "y" in choice or "Y" in choice:
return True
else:
return False
| 20,469
|
def has_genus_flag(df, genus_col="mhm_Genus", bit_col="mhm_HasGenus", inplace=False):
"""
Creates a bit flag: `mhm_HasGenus` where 1 denotes a recorded Genus and 0 denotes the contrary.
Parameters
----------
df : pd.DataFrame
A mosquito habitat mapper DataFrame
genus_col : str, default="mhm_Genus"
The name of the column in the mosquito habitat mapper DataFrame that contains the genus records.
bit_col : str, default="mhm_HasGenus"
The name of the column which will store the generated HasGenus flag
inplace : bool, default=False
Whether to return a new DataFrame. If True then no DataFrame copy is not returned and the operation is performed in place.
Returns
-------
pd.DataFrame
A DataFrame with the HasGenus flag. If `inplace=True` it returns None.
"""
if not inplace:
df = df.copy()
df[bit_col] = (~pd.isna(df[genus_col].to_numpy())).astype(int)
if not inplace:
return df
| 20,470
|
def record_point_cloud(target, file_name):
"""Record a raw point cloud and print the number of received bytes.
If the record_to_file parameter is set with a path and a filename, the stream will be recorded to this file.
:param target: hostname or IP address of the device
:param file_name: file path to record to
"""
device = blickfeld_scanner.scanner(args.target) # Connect to the device
raw_stream = device.record_point_cloud_stream(file_name) # Create a raw point cloud stream object
while True:
raw_file = raw_stream.recv_bytes() # Receive a raw file bytes
print(f"Got {len(raw_file)} bytes")
raw_file = raw_stream.stop()
print(f"Got {len(raw_file)} bytes and stopped")
| 20,471
|
def interact_model(
model_name='124M',
seed=int(_seed()[:-2]),
nsamples=8,
batch_size=1,
length=None,
temperature=1,
top_k=50,
top_p=0.93,
models_dir='models',
):
"""
Interactively run the model
:model_name=124M : String, which model to use
:seed=None : Integer seed for random number generators, fix seed to reproduce
results
:nsamples=1 : Number of samples to return total
:batch_size=1 : Number of batches (only affects speed/memory). Must divide nsamples.
:length=None : Number of tokens in generated text, if None (default), is
determined by model hyperparameters
:temperature=1 : Float value controlling randomness in boltzmann
distribution. Lower temperature results in less random completions. As the
temperature approaches zero, the model will become deterministic and
repetitive. Higher temperature results in more random completions.
:top_k=0 : Integer value controlling diversity. 1 means only 1 word is
considered for each step (token), resulting in deterministic completions,
while 40 means 40 words are considered at each step. 0 (default) is a
special setting meaning no restrictions. 40 generally is a good value.
:models_dir : path to parent folder containing model subfolders
(i.e. contains the <model_name> folder)
"""
print(seed)
models_dir = os.path.expanduser(os.path.expandvars(models_dir))
if batch_size is None:
batch_size = 1
assert nsamples % batch_size == 0
enc = encoder.get_encoder(model_name, models_dir)
hparams = model.default_hparams()
with open(os.path.join(models_dir, model_name, 'hparams.json')) as f:
hparams.override_from_dict(json.load(f))
if length is None:
length = random.randint(9, 93)
elif length > hparams.n_ctx:
raise ValueError("Can't get samples longer than window size: %s" % hparams.n_ctx)
with tf.Session(graph=tf.Graph()) as sess:
context = tf.placeholder(tf.int32, [batch_size, None])
np.random.seed(seed)
tf.set_random_seed(seed)
output = sample.sample_sequence(
hparams=hparams, length=length,
context=context,
batch_size=batch_size,
temperature=temperature, top_k=top_k, top_p=top_p
)
output2 = sample.sample_sequence(
hparams=hparams, length=length,
context=context,
batch_size=batch_size,
temperature=temperature, top_k=top_k, top_p=top_p
)
output3 = sample.sample_sequence(
hparams=hparams, length=length,
context=context,
batch_size=batch_size,
temperature=temperature, top_k=top_k, top_p=top_p
)
saver = tf.train.Saver()
ckpt = tf.train.latest_checkpoint(os.path.join(models_dir, model_name))
saver.restore(sess, ckpt)
# print('seed: ' + str(seed))
#
k = 0
i = 0
count = 0
while True:
raw_text = input("> ")
while not raw_text:
raw_text = input("> ")
filename = str(seed) + str(calendar.timegm(time.gmtime())) + ".txt"
o = open("../TXT/TXZ/GEN/" + filename, 'w')
context_tokens = enc.encode(raw_text)
generated = 0
o.write("-----\n" + raw_text + "\n-----\n\n")
for _ in range(nsamples // batch_size):
out = sess.run(output, feed_dict={
context: [context_tokens for _ in range(batch_size)]
})[:, len(context_tokens):]
generated2 = 0
for i in range(batch_size):
generated += 1
text = enc.decode(out[i])
context_tokens2 = enc.encode(text)
o.write(raw_text + " " + text + "\n----")
print(raw_text + " " + text + "\n----")
for _ in range(nsamples // batch_size):
out2 = sess.run(output2, feed_dict={
context: [context_tokens2 for _ in range(batch_size)]
})[:, len(context_tokens2):]
for j in range(batch_size):
generated2 += 1
text2 = enc.decode(out2[i])
o.write(text + " " + text2 + "\n\n")
o.write("---\n\n")
o.flush()
o.write("-------\n\n\n\n")
o.flush()
raw_text = U2V(squished(raw_text))
raw_text_squished = raw_text
context_tokens = enc.encode(raw_text)
generated = 0
o.write("-----\n" + raw_text + "\n-----\n\n")
for _ in range(nsamples // batch_size):
out = sess.run(output, feed_dict={
context: [context_tokens for _ in range(batch_size)]
})[:, len(context_tokens):]
generated2 = 0
for i in range(batch_size):
generated += 1
text = enc.decode(out[i])
context_tokens2 = enc.encode(text)
o.write(raw_text + " " + text + "\n----")
print(raw_text + " " + text + "\n----")
for _ in range(nsamples // batch_size):
out2 = sess.run(output2, feed_dict={
context: [context_tokens2 for _ in range(batch_size)]
})[:, len(context_tokens2):]
for j in range(batch_size):
generated2 += 1
text2 = enc.decode(out2[i])
o.write(text + " " + text2 + "\n\n")
o.write("---\n\n")
o.flush()
o.write("-------\n\n\n\n")
o.flush()
twr = trxtwr(raw_text, 3)
twrstr = trxtwrstr(trxtwr(raw_text, 3))
raw_text = twrstr
context_tokens = enc.encode(raw_text)
generated = 0
o.write("-----\n" + raw_text + "\n-----\n\n")
for _ in range(nsamples // batch_size):
out = sess.run(output, feed_dict={
context: [context_tokens for _ in range(batch_size)]
})[:, len(context_tokens):]
generated2 = 0
for i in range(batch_size):
generated += 1
text = enc.decode(out[i])
context_tokens2 = enc.encode(text)
print(raw_text + " " + text + "\n----")
o.write(raw_text + " " + text + "\n----")
for _ in range(nsamples // batch_size):
out2 = sess.run(output2, feed_dict={
context: [context_tokens2 for _ in range(batch_size)]
})[:, len(context_tokens2):]
for j in range(batch_size):
generated2 += 1
text2 = enc.decode(out2[i])
o.write(text + " " + text2 + "\n\n")
o.write("---\n\n")
o.flush()
twr = trxtwr(raw_text_squished, 3)
twrstr = trxtwrstr(trxtwr(raw_text, 3))
mtwr = magicVVVDecTower(twr)
mtwrstr = trxtwrstr(mtwr)
newTwr = []
i = 0
for level in mtwr:
newLvl = level + twr[i]
i = i + 1
newTwr.append(newLvl)
raw_text = trxtwrstr(newTwr)
context_tokens = enc.encode(raw_text)
generated = 0
o.write("-----\n" + raw_text + "\n-----\n\n")
for _ in range(nsamples // batch_size):
out = sess.run(output, feed_dict={
context: [context_tokens for _ in range(batch_size)]
})[:, len(context_tokens):]
generated2 = 0
for i in range(batch_size):
generated += 1
text = enc.decode(out[i])
context_tokens2 = enc.encode(text)
print(raw_text + " " + text + "\n----")
o.write(raw_text + " " + text + "\n----")
for _ in range(nsamples // batch_size):
out2 = sess.run(output2, feed_dict={
context: [context_tokens2 for _ in range(batch_size)]
})[:, len(context_tokens2):]
for j in range(batch_size):
generated2 += 1
text2 = enc.decode(out2[i])
o.write(text + " " + text2 + "\n\n")
o.write("---\n\n")
o.flush()
o.write("-------\n\n\n\n")
o.flush()
o.close()
print("done")
| 20,472
|
def compass(
size: Tuple[float, float] = (4.0, 2.0),
layer: Layer = gf.LAYER.WG,
port_type: str = "electrical",
) -> Component:
"""Rectangular contact pad with centered ports on rectangle edges
(north, south, east, and west)
Args:
size: rectangle size
layer: tuple (int, int)
port_type:
"""
c = gf.Component()
dx, dy = size
points = [
[-dx / 2.0, -dy / 2.0],
[-dx / 2.0, dy / 2],
[dx / 2, dy / 2],
[dx / 2, -dy / 2.0],
]
c.add_polygon(points, layer=layer)
c.add_port(
name="e1",
midpoint=[-dx / 2, 0],
width=dy,
orientation=180,
layer=layer,
port_type=port_type,
)
c.add_port(
name="e2",
midpoint=[0, dy / 2],
width=dx,
orientation=90,
layer=layer,
port_type=port_type,
)
c.add_port(
name="e3",
midpoint=[dx / 2, 0],
width=dy,
orientation=0,
layer=layer,
port_type=port_type,
)
c.add_port(
name="e4",
midpoint=[0, -dy / 2],
width=dx,
orientation=-90,
layer=layer,
port_type=port_type,
)
c.auto_rename_ports()
return c
| 20,473
|
def get_service(vm, port):
"""Return the service for a given port."""
for service in vm.get('suppliedServices', []):
if service['portRange'] == port:
return service
| 20,474
|
def test_create_mutant_with_cache(binop_file, stdoutIO):
"""Change ast.Add to ast.Mult in a mutation including pycache changes."""
genome = Genome(source_file=binop_file)
# this target is the add_five() function, changing add to mult
target_idx = LocIndex(ast_class="BinOp", lineno=10, col_offset=11, op_type=ast.Add)
mutation_op = ast.Mult
mutant = genome.mutate(target_idx, mutation_op, write_cache=True)
# uses the redirection for stdout to capture the value from the final output of binop_file
with stdoutIO() as s:
exec(mutant.mutant_code)
assert int(s.getvalue()) == 25
tag = sys.implementation.cache_tag
expected_cfile = binop_file.parent / "__pycache__" / ".".join([binop_file.stem, tag, "pyc"])
assert mutant.src_file == binop_file
assert mutant.cfile == expected_cfile
assert mutant.src_idx == target_idx
| 20,475
|
def write_ef_tree_solution(ef, solution_directory_name,
scenario_tree_solution_writer=scenario_tree_solution_writer):
""" Write a tree solution directory, if available, to the solution_directory_name provided
Args:
ef : A Concrete Model of the Extensive Form (output of create_EF).
We assume it has already been solved.
solution_file_name : filename to write the solution to
scenario_tree_solution_writer (optional) : custom scenario solution writer function
NOTE:
This utility is replicating WheelSpinner.write_tree_solution for EF
"""
if not haveMPI or (global_rank==0):
os.makedirs(solution_directory_name, exist_ok=True)
for scenario_name, scenario in ef_scenarios(ef):
scenario_tree_solution_writer(solution_directory_name,
scenario_name,
scenario,
bundling=False)
| 20,476
|
async def async_setup_entry(hass, config_entry):
"""Set up AirVisual as config entry."""
entry_updates = {}
if not config_entry.unique_id:
# If the config entry doesn't already have a unique ID, set one:
entry_updates["unique_id"] = config_entry.data[CONF_API_KEY]
if not config_entry.options:
# If the config entry doesn't already have any options set, set defaults:
entry_updates["options"] = DEFAULT_OPTIONS
if entry_updates:
hass.config_entries.async_update_entry(config_entry, **entry_updates)
websession = aiohttp_client.async_get_clientsession(hass)
hass.data[DOMAIN][DATA_CLIENT][config_entry.entry_id] = AirVisualData(
hass, Client(websession, api_key=config_entry.data[CONF_API_KEY]), config_entry
)
try:
await hass.data[DOMAIN][DATA_CLIENT][config_entry.entry_id].async_update()
except InvalidKeyError:
_LOGGER.error("Invalid API key provided")
raise ConfigEntryNotReady
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, "sensor")
)
async def refresh(event_time):
"""Refresh data from AirVisual."""
await hass.data[DOMAIN][DATA_CLIENT][config_entry.entry_id].async_update()
hass.data[DOMAIN][DATA_LISTENER][config_entry.entry_id] = async_track_time_interval(
hass, refresh, DEFAULT_SCAN_INTERVAL
)
config_entry.add_update_listener(async_update_options)
return True
| 20,477
|
def rotation_point_cloud(pc):
"""
Randomly rotate the point clouds to augment the dataset
rotation is per shape based along up direction
:param pc: B X N X 3 array, original batch of point clouds
:return: BxNx3 array, rotated batch of point clouds
"""
# rotated_data = np.zeros(pc.shape, dtype=np.float32)
rotation_angle = np.random.uniform() * 2 * np.pi
cosval = np.cos(rotation_angle)
sinval = np.sin(rotation_angle)
# rotation_matrix = np.array([[cosval, 0, sinval],
# [0, 1, 0],
# [-sinval, 0, cosval]])
rotation_matrix = np.array([[1, 0, 0],
[0, cosval, -sinval],
[0, sinval, cosval]])
# rotation_matrix = np.array([[cosval, -sinval, 0],
# [sinval, cosval, 0],
# [0, 0, 1]])
rotated_data = np.dot(pc.reshape((-1, 3)), rotation_matrix)
return rotated_data
| 20,478
|
def alliance_system_oneday(mongohandle, alliance_id, system):
"""find by corp and system - one day"""
allkills = mongohandle.allkills
system = int(system)
timeframe = 24 * 60 * 60
gmtminus = time.mktime(time.gmtime()) - timeframe
cursor = allkills.find({"alliance_id": alliance_id,
"solar_system_id": system,
"unix_kill_time": {
"$gte": gmtminus}},
{"ship": 1,
"items": 1,
"_id": 0}).hint('alliancesystemtime')
(ships, items, ammos) = parsecursor.ships_and_items(cursor)
return (ships, items, ammos)
| 20,479
|
def execute(**kwargs):
"""
Airflowのpython_operatorからcall
:param kwargs:
:return:
"""
print('Record ID:{}'.format(kwargs['record_id']))
record_id = int(kwargs['record_id'])
analyzer = LaneAnalyzer()
analyzer.execute(imported_data_id=record_id)
| 20,480
|
def slit_select(ra, dec, length, width, center_ra=0, center_dec=0, angle=0):
"""
:param ra: angular coordinate of photon/ray
:param dec: angular coordinate of photon/ray
:param length: length of slit
:param width: width of slit
:param center_ra: center of slit
:param center_dec: center of slit
:param angle: orientation angle of slit, angle=0 corresponds length in RA direction
:return: bool, True if photon/ray is within the slit, False otherwise
"""
ra_ = ra - center_ra
dec_ = dec - center_dec
x = np.cos(angle) * ra_ + np.sin(angle) * dec_
y = - np.sin(angle) * ra_ + np.cos(angle) * dec_
if abs(x) < length / 2. and abs(y) < width / 2.:
return True
else:
return False
| 20,481
|
def login():
""" Logs in user """
req = flask.request.get_json(force=True)
username = req.get('username', None)
password = req.get('password', None)
user = guard.authenticate(username, password)
ret = {'access_token': guard.encode_jwt_token(user)}
return ret, 200
| 20,482
|
def acor(value, bounds, nparams, nants=None, archive_size=None, maxit=1000,
diverse=0.5, evap=0.85, seed=None):
"""
Minimize the objective function using ACO-R.
ACO-R stands for Ant Colony Optimization for Continuous Domains (Socha and
Dorigo, 2008).
Parameters:
* value : function
Returns the value of the objective function at a given parameter vector
* bounds : list
The bounds of the search space. If only two values are given, will
interpret as the minimum and maximum, respectively, for all parameters.
Alternatively, you can given a minimum and maximum for each parameter,
e.g., for a problem with 3 parameters you could give
`bounds = [min1, max1, min2, max2, min3, max3]`.
* nparams : int
The number of parameters that the objective function takes.
* nants : int
The number of ants to use in the search. Defaults to the number of
parameters.
* archive_size : int
The number of solutions to keep in the solution archive. Defaults to
10 x nants
* maxit : int
The number of iterations to run.
* diverse : float
Scalar from 0 to 1, non-inclusive, that controls how much better
solutions are favored when constructing new ones.
* evap : float
The pheromone evaporation rate (evap > 0). Controls how spread out the
search is.
* seed : None or int
Seed for the random number generator.
Yields:
* i, estimate, stats:
* i : int
The current iteration number
* estimate : 1d-array
The current best estimated parameter vector
* stats : dict
Statistics about the optimization so far. Keys:
* method : stf
The name of the optimization algorithm
* iterations : int
The total number of iterations so far
* objective : list
Value of the objective function corresponding to the best
estimate per iteration.
"""
stats = dict(method="Ant Colony Optimization for Continuous Domains",
iterations=0,
objective=[])
numpy.random.seed(seed)
# Set the defaults for number of ants and archive size
if nants is None:
nants = nparams
if archive_size is None:
archive_size = 10 * nants
# Check is giving bounds for each parameter or one for all
bounds = numpy.array(bounds)
if bounds.size == 2:
low, high = bounds
archive = numpy.random.uniform(low, high, (archive_size, nparams))
else:
archive = numpy.empty((archive_size, nparams))
bounds = bounds.reshape((nparams, 2))
for i, bound in enumerate(bounds):
low, high = bound
archive[:, i] = numpy.random.uniform(low, high, archive_size)
# Compute the inital pheromone trail based on the objetive function value
trail = numpy.fromiter((value(p) for p in archive), dtype=numpy.float)
# Sort the archive of initial random solutions
order = numpy.argsort(trail)
archive = [archive[i] for i in order]
trail = trail[order].tolist()
stats['objective'].append(trail[0])
# Compute the weights (probabilities) of the solutions in the archive
amp = 1. / (diverse * archive_size * numpy.sqrt(2 * numpy.pi))
variance = 2 * diverse ** 2 * archive_size ** 2
weights = amp * numpy.exp(-numpy.arange(archive_size) ** 2 / variance)
weights /= numpy.sum(weights)
for iteration in range(maxit):
for k in range(nants):
# Sample the propabilities to produce new estimates
ant = numpy.empty(nparams, dtype=numpy.float)
# 1. Choose a pdf from the archive
pdf = numpy.searchsorted(
numpy.cumsum(weights),
numpy.random.uniform())
for i in range(nparams):
# 2. Get the mean and stddev of the chosen pdf
mean = archive[pdf][i]
std = (evap / (archive_size - 1)) * numpy.sum(
abs(p[i] - archive[pdf][i]) for p in archive)
# 3. Sample the pdf until the samples are in bounds
for atempt in range(100):
ant[i] = numpy.random.normal(mean, std)
if bounds.size == 2:
low, high = bounds
else:
low, high = bounds[i]
if ant[i] >= low and ant[i] <= high:
break
pheromone = value(ant)
# Place the new estimate in the archive
place = numpy.searchsorted(trail, pheromone)
if place == archive_size:
continue
trail.insert(place, pheromone)
trail.pop()
archive.insert(place, ant)
archive.pop()
stats['objective'].append(trail[0])
stats['iterations'] += 1
yield iteration, archive[0], copy.deepcopy(stats)
| 20,483
|
def devicePanel(q=1,e=1,ctl=1,cp="string",cs=1,dt="string",dtg="string",es=1,ex=1,init=1,iu=1,l="string",mrl=1,mbv=1,ni=1,p="string",pmp="script",rp="string",to=1,toc="string",tor=1,up=1,ut="string"):
"""
http://help.autodesk.com/cloudhelp/2019/ENU/Maya-Tech-Docs/CommandsPython/devicePanel.html
-----------------------------------------
devicePanel is undoable, queryable, and editable.
This command is now obsolete. It is included only for the purpose of file
compatibility. It creates a blank panel.
-----------------------------------------
Return Value:
string name of panel
In query mode, return type is based on queried flag.
-----------------------------------------
Flags:
-----------------------------------------
ctl : control [boolean] ['query']
Returns the top level control for this panel. Usually used for getting a parent to attach popup menus. CAUTION: panels may not have controls at times. This flag can return "" if no control is present.
-----------------------------------------
cp : copy [string] ['edit']
Makes this panel a copy of the specified panel. Both panels must be of the same type.
-----------------------------------------
cs : createString [boolean] ['edit']
Command string used to create a panel
-----------------------------------------
dt : defineTemplate [string] []
Puts the command in a mode where any other flags and arguments are parsed and added to the command template specified in the argument. They will be used as default arguments in any subsequent invocations of the command when templateName is set as the current template.
-----------------------------------------
dtg : docTag [string] ['query', 'edit']
Attaches a tag to the Maya panel.
-----------------------------------------
es : editString [boolean] ['edit']
Command string used to edit a panel
-----------------------------------------
ex : exists [boolean] []
Returns whether the specified object exists or not. Other flags are ignored.
-----------------------------------------
init : init [boolean] ['edit']
Initializes the panel's default state. This is usually done automatically on file -new and file -open.
-----------------------------------------
iu : isUnique [boolean] ['query']
Returns true if only one instance of this panel type is allowed.
-----------------------------------------
l : label [string] ['query', 'edit']
Specifies the user readable label for the panel.
-----------------------------------------
mrl : menuBarRepeatLast [boolean] ['query', 'edit']
Controls whether clicking on the menu header with the middle mouse button would repeat the last selected menu item.
-----------------------------------------
mbv : menuBarVisible [boolean] ['query', 'edit']
Controls whether the menu bar for the panel is displayed.
-----------------------------------------
ni : needsInit [boolean] ['query', 'edit']
(Internal) On Edit will mark the panel as requiring initialization. Query will return whether the panel is marked for initialization. Used during file -new and file -open.
-----------------------------------------
p : parent [string] []
Specifies the parent layout for this panel.
-----------------------------------------
pmp : popupMenuProcedure [script] ['query', 'edit']
Specifies the procedure called for building the panel's popup menu(s). The default value is "buildPanelPopupMenu". The procedure should take one string argument which is the panel's name.
-----------------------------------------
rp : replacePanel [string] ['edit']
Will replace the specified panel with this panel. If the target panel is within the same layout it will perform a swap.
-----------------------------------------
to : tearOff [boolean] ['query', 'edit']
Will tear off this panel into a separate window with a paneLayout as the parent of the panel. When queried this flag will return if the panel has been torn off into its own window.
-----------------------------------------
toc : tearOffCopy [string] []
Will create this panel as a torn of copy of the specified source panel.
-----------------------------------------
tor : tearOffRestore [boolean] ['edit']
Restores panel if it is torn off and focus is given to it. If docked, becomes the active panel in the docked window. This should be the default flag that is added to all panels instead of -to/-tearOff flag which should only be used to tear off the panel.
-----------------------------------------
up : unParent [boolean] ['edit']
Specifies that the panel should be removed from its layout. This (obviously) cannot be used with query.
-----------------------------------------
ut : useTemplate [string]
Forces the command to use a command template other than the current one.
"""
| 20,484
|
def statuses_filter(auth, **params):
"""
Collect tweets from the twitter statuses_filter api.
"""
endpoint = "https://stream.twitter.com/1.1/statuses/filter.json"
if "follow" in params and isinstance(params["follow"], (list, tuple)):
params["follow"] = list_to_csv(params["follow"])
if "track" in params and isinstance(params["track"], (list, tuple)):
params["track"] = list_to_csv(params["track"])
params.setdefault("delimited", 0)
params.setdefault("stall_warnings", 1)
return stream_call(endpoint, auth, params, "post")
| 20,485
|
def adjust_hue(image, hue_factor):
"""Adjusts hue of an image.
The image hue is adjusted by converting the image to HSV and
cyclically shifting the intensities in the hue channel (H).
The image is then converted back to original image mode.
`hue_factor` is the amount of shift in H channel and must be in the
interval `[-0.5, 0.5]`.
Args:
image (PIL.Image): PIL Image to be adjusted.
hue_factor (float): How much to shift the hue channel. Should be in
[-0.5, 0.5]. 0.5 and -0.5 give complete reversal of hue channel in
HSV space in positive and negative direction respectively.
0 means no shift. Therefore, both -0.5 and 0.5 will give an image
with complementary colors while 0 gives the original image.
Returns:
PIL.Image: Hue adjusted image.
"""
if not (-0.5 <= hue_factor <= 0.5):
raise ValueError('hue_factor ({}) is not in [-0.5, 0.5].'.format(hue_factor))
dtype = image.dtype
image = image.astype(np.uint8)
hsv_img = cv2.cvtColor(image, cv2.COLOR_RGB2HSV_FULL)
h, s, v = cv2.split(hsv_img)
alpha = np.random.uniform(hue_factor, hue_factor)
h = h.astype(np.uint8)
# uint8 addition take cares of rotation across boundaries
with np.errstate(over="ignore"):
h += np.uint8(alpha * 255)
hsv_img = cv2.merge([h, s, v])
return cv2.cvtColor(hsv_img, cv2.COLOR_HSV2RGB_FULL).astype(dtype)
| 20,486
|
def lambda_handler(event, context):
"""Sample pure Lambda function
Parameters
----------
event: dict, required
API Gateway Lambda Proxy Input Format
Event doc: https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html#api-gateway-simple-proxy-for-lambda-input-format
context: object, required
Lambda Context runtime methods and attributes
Context doc: https://docs.aws.amazon.com/lambda/latest/dg/python-context-object.html
Returns
------
API Gateway Lambda Proxy Output Format: dict
Return doc: https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html
"""
#print("Received event: " + json.dumps(event, indent=2))
body = json.loads(event['body'])
print(f"Body is: {body}")
url = body['url']
print(f"Getting image from URL: {url}")
response = requests.get(url)
print("Load image into memory")
img = PILImage.create(BytesIO(response.content))
print("Doing forward pass")
start = time.time()
pred,pred_idx,probs = learn.predict(img)
end = time.time()
inference_time = np.round((end - start) * 1000, 2)
print(f'class: {pred}, probability: {probs[pred_idx]:.04f}')
print(f'Inference time is: {str(inference_time)} ms')
return {
"statusCode": 200,
"body": json.dumps(
{
"class": pred,
"probability": "%.4f" % probs[pred_idx]
}
),
}
| 20,487
|
def compareDict(a, b):
"""
Compare two definitions removing the unique Ids from the entities
"""
ignore = ['Id']
_a = [hashDict(dict(x), ignore) for x in a]
_b = [hashDict(dict(y), ignore) for y in b]
_a.sort()
_b.sort()
return _a == _b
| 20,488
|
def create_twitter_auth(cf_t):
"""Function to create a twitter object
Args: cf_t is configuration dictionary.
Returns: Twitter object.
"""
# When using twitter stream you must authorize.
# these tokens are necessary for user authentication
# create twitter API object
auth = OAuth(cf_t['access_token'], cf_t['access_token_secret'], cf_t['consumer_key'], cf_t['consumer_secret'])
try:
# create twitter API object
twitter = Twitter(auth = auth)
except TwitterHTTPError:
traceback.print_exc()
time.sleep(cf_t['sleep_interval'])
return twitter
| 20,489
|
def main():
"""Entry point."""
debug = False
try:
argparser = ArgumentParser(description=modules[__name__].__doc__)
argparser.add_argument('device', nargs='?', default='ftdi:///?',
help='serial port device name')
argparser.add_argument('-S', '--no-smb', action='store_true',
default=False,
help='use regular I2C mode vs. SMBbus scan')
argparser.add_argument('-P', '--vidpid', action='append',
help='specify a custom VID:PID device ID, '
'may be repeated')
argparser.add_argument('-V', '--virtual', type=FileType('r'),
help='use a virtual device, specified as YaML')
argparser.add_argument('-v', '--verbose', action='count', default=0,
help='increase verbosity')
argparser.add_argument('-d', '--debug', action='store_true',
help='enable debug mode')
args = argparser.parse_args()
debug = args.debug
if not args.device:
argparser.error('Serial device not specified')
loglevel = max(DEBUG, ERROR - (10 * args.verbose))
loglevel = min(ERROR, loglevel)
if debug:
formatter = Formatter('%(asctime)s.%(msecs)03d %(name)-20s '
'%(message)s', '%H:%M:%S')
else:
formatter = Formatter('%(message)s')
FtdiLogger.log.addHandler(StreamHandler(stderr))
FtdiLogger.set_formatter(formatter)
FtdiLogger.set_level(loglevel)
if args.virtual:
#pylint: disable-msg=import-outside-toplevel
from pyftdi.usbtools import UsbTools
# Force PyUSB to use PyFtdi test framework for USB backends
UsbTools.BACKENDS = ('pyftdi.tests.backend.usbvirt', )
# Ensure the virtual backend can be found and is loaded
backend = UsbTools.find_backend()
loader = backend.create_loader()()
loader.load(args.virtual)
try:
add_custom_devices(Ftdi, args.vidpid)
except ValueError as exc:
argparser.error(str(exc))
I2cBusScanner.scan(args.device, not args.no_smb)
except (ImportError, IOError, NotImplementedError, ValueError) as exc:
print('\nError: %s' % exc, file=stderr)
if debug:
print(format_exc(chain=False), file=stderr)
exit(1)
except KeyboardInterrupt:
exit(2)
| 20,490
|
def _project(doc, projection):
"""Return new doc with items filtered according to projection."""
def _include_key(key, projection):
for k, v in projection.items():
if key == k:
if v == 0:
return False
elif v == 1:
return True
else:
raise ValueError('Projection value must be 0 or 1.')
if projection and key != '_id':
return False
return True
return {k: v for k, v in doc.items() if _include_key(k, projection)}
| 20,491
|
def test_null_model(objective: str, expectation: float) -> None:
"""
It outputs the mean/modal training value for all training predictors.
Args:
objective: The objective of the model (classification or regression).
expectation: The expected prediction of the model.
"""
# Data
X_train = np.empty((5, 2))
y_train = np.array([2, 2, 3, 4, 4])
X_test = np.empty((2, 2))
# Model
mod = NullModel(objective=objective)
mod.fit(X_train, y_train)
y_pred = mod.apply(X_test)
# Testing
y_test = np.full(2, expectation)
assert np.allclose(y_pred, y_test)
| 20,492
|
def random_swap(words, n):
"""
Randomly swap two words in the sentence n times
Args:
words ([type]): [description]
n ([type]): [description]
Returns:
[type]: [description]
"""
def swap_word(new_words):
random_idx_1 = random.randint(0, len(new_words) - 1)
random_idx_2 = random_idx_1
counter = 0
while random_idx_2 == random_idx_1:
random_idx_2 = random.randint(0, len(new_words) - 1)
counter += 1
if counter > 3:
return new_words
new_words[random_idx_1], new_words[random_idx_2] = (
new_words[random_idx_2],
new_words[random_idx_1],
)
return new_words
new_words = words.copy()
for _ in range(n):
new_words = swap_word(new_words)
return new_words
| 20,493
|
def get_dist_for_angles(dict_of_arrays, clusters, roll, pitch, yaw, metric='3d', kind='max'):
"""
Calculate a single distance metric for a combination of angles
"""
if (dict_of_arrays['yaw_corr'] == 0).all():
rot_by_boresight = apply_boresight_same(dict_of_arrays, roll, pitch, yaw)
else:
rot_by_boresight = apply_boresight_yaw_correct(dict_of_arrays, roll, pitch, yaw)
rot_to_real_world = rotate_to_real_world(rot_by_boresight)
real_wrld_coords = shift_to_real_world(rot_to_real_world)
if kind == 'mean':
distance = get_mean_3D_distance(real_wrld_coords, clusters, metric)
elif kind == 'median':
distance = get_median_3D_distance(real_wrld_coords, clusters, metric)
else:
distance = get_max_3D_distance(real_wrld_coords, clusters, metric)
return distance
| 20,494
|
def test_extra_yaml():
"""Test loading extra yaml file"""
load(settings, filename=YAML)
yaml = """
example:
helloexample: world
"""
settings.set("YAML", yaml)
settings.execute_loaders(env="EXAMPLE")
assert settings.HELLOEXAMPLE == "world"
| 20,495
|
async def stream():
"""Main streaming loop for PHD"""
while True:
if phd_client.is_connected and manager.active_connections:
response = await phd_client.get_responses()
if response is not None:
# Add to the websocket queue
# If it is the initial data, put in variable
if response.get('Event') == 'Version':
phd_client.initial_data = response
q.put_nowait(response)
await asyncio.sleep(STREAM_INTERVAL)
return None
| 20,496
|
def filter_camera_angle(places, angle=1.):
"""Filter pointclound by camera angle"""
bool_in = np.logical_and((places[:, 1] * angle < places[:, 0]),
(-places[:, 1] * angle < places[:, 0]))
return places[bool_in]
| 20,497
|
def main():
"""Carry out model performance estimation.
"""
parser = ArgumentParser()
parser.add_argument('--config', '-c', type=str, required=True, help='Path to config file')
args = parser.parse_args()
assert exists(args.config)
task_monitor = get_monitor(args.config)
task_monitor.performance()
| 20,498
|
def load_nifti(path: str) \
-> tuple[np.ndarray, np.ndarray, nib.nifti1.Nifti1Header]:
"""
This function loads a nifti image using
the nibabel library.
"""
# Extract image
img = nib.load(path)
img_aff = img.affine
img_hdr = img.header
# Extract the actual data in a numpy array
data = img.get_fdata()
return data, img_aff, img_hdr
| 20,499
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.