content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def test_raises_without_scheme() -> None:
"""Ensure that `chrome_cookies("domain.com")` raises.
The domain must specify a scheme (http or https).
"""
with pytest.raises(URLError):
chrome_cookies("n8henrie.com") | 5,325,400 |
def user_has_registered(userID):
"""Checks if a particular user has been registered in database"""
database = sqlite3.connect("users.db")
cursor = database.cursor()
cursor.execute(f"SELECT user_id FROM profile WHERE user_id = {userID}")
result = cursor.fetchone()
if result is None:
return False
return True | 5,325,401 |
def prepare_ocp(
biorbd_model_path: str,
final_time: float,
n_shooting: int,
marker_velocity_or_displacement: str,
marker_in_first_coordinates_system: bool,
control_type: ControlType,
ode_solver: OdeSolver = OdeSolver.RK4(),
) -> OptimalControlProgram:
"""
Prepare an ocp that targets some marker velocities, either by finite differences or by jacobian
Parameters
----------
biorbd_model_path: str
The path to the bioMod file
final_time: float
The time of the final node
n_shooting: int
The number of shooting points
marker_velocity_or_displacement: str
which type of tracking: finite difference ('disp') or by jacobian ('velo')
marker_in_first_coordinates_system: bool
If the marker to track should be expressed in the global or local reference frame
control_type: ControlType
The type of controls
ode_solver: OdeSolver
The ode solver to use
Returns
-------
The OptimalControlProgram ready to be solved
"""
biorbd_model = biorbd.Model(biorbd_model_path)
# Add objective functions
if marker_in_first_coordinates_system:
# Marker should follow this segment (0 velocity when compare to this one)
coordinates_system_idx = 0
else:
# Marker should be static in global reference frame
coordinates_system_idx = -1
objective_functions = ObjectiveList()
if marker_velocity_or_displacement == "disp":
objective_functions.add(
ObjectiveFcn.Lagrange.MINIMIZE_MARKERS_DISPLACEMENT,
coordinates_system_idx=coordinates_system_idx,
index=6,
weight=1000,
)
elif marker_velocity_or_displacement == "velo":
objective_functions.add(ObjectiveFcn.Lagrange.MINIMIZE_MARKERS_VELOCITY, index=6, weight=1000)
else:
raise RuntimeError(
f"Wrong choice of marker_velocity_or_displacement, actual value is "
f"{marker_velocity_or_displacement}, should be 'velo' or 'disp'."
)
# Make sure the segments actually moves (in order to test the relative speed objective)
objective_functions.add(ObjectiveFcn.Lagrange.MINIMIZE_STATE, index=6, weight=-1)
objective_functions.add(ObjectiveFcn.Lagrange.MINIMIZE_STATE, index=7, weight=-1)
# Dynamics
dynamics = DynamicsList()
dynamics.add(DynamicsFcn.TORQUE_DRIVEN)
# Path constraint
nq = biorbd_model.nbQ()
x_bounds = BoundsList()
x_bounds.add(bounds=QAndQDotBounds(biorbd_model))
for i in range(nq, 2 * nq):
x_bounds[0].min[i, :] = -10
x_bounds[0].max[i, :] = 10
# Initial guess
x_init = InitialGuessList()
x_init.add([1.5, 1.5, 0.0, 0.0, 0.7, 0.7, 0.6, 0.6])
# Define control path constraint
tau_min, tau_max, tau_init = -100, 100, 0
u_bounds = BoundsList()
u_bounds.add([tau_min] * biorbd_model.nbGeneralizedTorque(), [tau_max] * biorbd_model.nbGeneralizedTorque())
u_init = InitialGuessList()
u_init.add([tau_init] * biorbd_model.nbGeneralizedTorque())
return OptimalControlProgram(
biorbd_model,
dynamics,
n_shooting,
final_time,
x_init,
u_init,
x_bounds,
u_bounds,
objective_functions,
control_type=control_type,
ode_solver=ode_solver,
) | 5,325,402 |
def add_dependencies(dependencies, script_path, encountered_internal_modules, prefix="", debug=False):
"""
This function ...
:param dependencies:
:param script_path:
:param encountered_internal_modules:
:param prefix:
:return:
"""
# Skip files that are not python scripts
if not script_path.endswith(".py"): raise ValueError("Not a valid script path")
# Read the lines of the script file
import_lines = []
for line in open(script_path, 'r'):
# If the current line does not contain an 'import yyy' or 'from yyy import zzz' statement, skip it
if not (line.startswith("import ") or (line.startswith("from ") and "import" in line)): continue
import_lines.append(line)
if debug:
print("Import statements found in " + script_path + ":")
for import_line in import_lines:
print(" - " + import_line[:-1])
# Loop over the lines where something is imported
for line in import_lines:
# Get the path to the modules that are being imported in the current line
modules = get_modules(line, script_path, debug=debug)
for module in modules:
# Check if the imported module refers to a PTS module or an external package
if module.startswith("/"): # a PTS module
if module in encountered_internal_modules: continue
else:
encountered_internal_modules.add(module)
add_dependencies(dependencies, module, encountered_internal_modules, prefix=prefix+" ", debug=debug)
else: dependencies[module].add(script_path) | 5,325,403 |
def key_gen(**kwargs):
"""
Key generator for linux. Determines key based on
parameters supplied in kwargs.
Keyword Parameters:
@keyword geounit1: portable_id of a geounit
@keyword geounit2: portable_id of a geounit
@keyword region: region abbreviation
"""
if 'geounit1' in kwargs and 'geounit2' in kwargs:
return 'adj:geounit1:%s:geounit2:%s' % (kwargs['geounit1'],
kwargs['geounit2'])
if 'region' in kwargs:
return 'adj:region:%s' % kwargs['region'] | 5,325,404 |
def find(bindings: List[Binding], name: str) -> Optional[Binding]:
"""
Returns a Binding with a given name. Comparison is case-insensitive.
:param bindings: the Bindings to find in
:param name: the name of the Binding to find
:return: the Binding with a given name if it exists, None otherwise
"""
for b in bindings:
if b.get_name().lower() == name.lower():
return b
return None | 5,325,405 |
def computeResult():
"""Allows download of asset data result file.
---
tags:
- services
consumes:
- application/json
parameters:
- name: consumerAddress
in: query
description: The consumer address.
required: true
type: string
- name: jobId
in: query
description: jobId
required: true
type: string
- name: index
in: query
description: Result index
required: true
- name: nonce
in: query
description: The UTC timestamp, used to prevent replay attacks
- name: signature
in: query
description: Signature of (consumerAddress+jobId+index+nonce) to verify that the consumer has rights to download the result
responses:
200:
description: Content of the result
400:
description: One or more of the required attributes are missing or invalid.
404:
description: Result not found
503:
description: Service Unavailable
"""
data = get_request_data(request)
logger.info(f"computeResult called. arguments = {data}")
url = get_compute_result_endpoint()
consumer_address = data.get("consumerAddress")
job_id = data.get("jobId")
nonce, provider_signature = sign_for_compute(
provider_wallet, consumer_address, job_id
)
web3 = get_web3()
params = {
"index": data.get("index"),
"owner": data.get("consumerAddress"),
"jobId": job_id,
"consumerSignature": data.get("signature"),
"providerSignature": provider_signature,
"nonce": nonce,
"chainId": web3.chain_id,
}
req = PreparedRequest()
req.prepare_url(url, params)
result_url = req.url
logger.debug(f"Done processing computeResult, url: {result_url}")
update_nonce(data.get("consumerAddress"), data.get("nonce"))
response = build_download_response(
request, requests_session, result_url, result_url, None, validate_url=False
)
logger.info(f"computeResult response = {response}")
return response | 5,325,406 |
async def test_upgraded_extended_version_async_analysis_module(manager):
"""Tests the ability of an analysis module to update extended version data."""
#
# in this case the first call to get_next_analysis_request fails
# but the module.upgrade() is called
# since the work task is not acquired it stays in the queue
# until the event_loop comes back around with the correct extended version data
#
step_1 = asyncio.Event()
step_2 = asyncio.Event()
class CustomAnalysisModule(AnalysisModule):
async def execute_analysis(self, root, observable, analysis):
nonlocal step_1
analysis.set_details({"extended_version": self.type.extended_version})
if not step_1.is_set():
step_1.set()
return
step_2.set()
async def upgrade(self):
self.type.extended_version = {"intel": "v2"}
amt = AnalysisModuleType("test", "", extended_version={"intel": "v1"})
await manager.system.register_analysis_module_type(amt)
module = CustomAnalysisModule(type=amt)
manager.add_module(module)
root = manager.system.new_root()
observable = root.add_observable("test", "test")
await root.submit()
root_2 = manager.system.new_root()
observable_2 = root_2.add_observable("test", "test")
async def _update_intel():
nonlocal step_1
nonlocal root_2
await step_1.wait()
# update the extended version data for this module type
updated_amt = AnalysisModuleType("test", "", extended_version={"intel": "v2"})
await manager.system.register_analysis_module_type(updated_amt)
await root_2.submit()
async def _shutdown():
nonlocal step_2
nonlocal manager
await step_2.wait()
manager.stop()
upgrade_task = asyncio.create_task(_update_intel())
shutdown_task = asyncio.create_task(_shutdown())
await manager.run()
await upgrade_task
await shutdown_task
root = await manager.system.get_root_analysis(root_2)
observable = root.get_observable(observable)
assert (await observable.get_analysis(amt).get_details())["extended_version"] == {"intel": "v2"} | 5,325,407 |
async def init_menu_perms(request: Request) -> Dict:
"""
初始化菜单和权限
"""
return await services.init_menu_perms(request) | 5,325,408 |
def bert_process_sentence(example_tokens, max_seq_length, tokenizer):
"""
Tokenization and pre-processing of text as expected by Bert
Parameters
----------
example_tokens
max_seq_length
tokenizer
Returns
-------
"""
# Account for [CLS] and [SEP] with "- 2"
if len(example_tokens) > max_seq_length - 2:
example_tokens = example_tokens[0:(max_seq_length - 2)]
# The convention in BERT for single sequences is:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. (vv: Not relevant for us)
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
# vv: segment_ids seem to be the same as type_ids
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in example_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
return input_ids, input_mask, segment_ids | 5,325,409 |
def executor_callback(future):
"""The callback function called when a job has completed."""
ticket, result, job_type, success, comment = future.result()
if result is not None:
rel_path = datetime.now().strftime("%y%m%d")
rel_path = path.join(rel_path, ticket)
output_path: str = path.join(getenv('OUTPUT_DIR'), rel_path)
mkdir(output_path)
filepath = None
if job_type is JobType.PROFILE:
filepath = path.join(getenv('OUTPUT_DIR'), rel_path, "result.json")
result.to_file(filepath)
elif job_type is JobType.NORMALIZE:
gdf, resource_type, file_name = result
filepath = store_gdf(gdf, resource_type, file_name, output_path)
elif job_type is JobType.SUMMARIZE:
filepath = path.join(getenv('OUTPUT_DIR'), rel_path, "result.json")
with open(filepath, 'w') as fp:
json.dump(result, fp)
else:
filepath = None
with app.app_context():
elem: Queue = Queue().query.filter_by(ticket=ticket).first()
print(elem)
time = elem.requested_time
filesize = elem.filesize
execution_time = round((datetime.now(timezone.utc) - time.replace(tzinfo=timezone.utc)).total_seconds(), 3)
elem.result = filepath
elem.success = success
elem.status = 1
elem.execution_time = execution_time
elem.comment = comment
db.session.add(elem)
db.session.commit()
accountingLogger(ticket=ticket, success=success, execution_start=time, execution_time=execution_time,
comment=comment, filesize=filesize)
if job_type is JobType.PROFILE:
delete_from_temp(path.join(PROFILE_TEMP_DIR, ticket))
elif job_type is JobType.NORMALIZE:
delete_from_temp(path.join(NORMALIZE_TEMP_DIR, ticket))
elif job_type is JobType.SUMMARIZE:
delete_from_temp(path.join(SUMMARIZE_TEMP_DIR, ticket))
if success:
mainLogger.info(f'Processing of ticket: {ticket} is completed successfully')
else:
mainLogger.info(f'Processing of ticket: {ticket} completed with errors') | 5,325,410 |
def neighbors():
"""Show vnet neighbors information"""
config_db = ConfigDBConnector()
config_db.connect()
header = ['<vnet_name>', 'neighbor', 'mac_address', 'interfaces']
# Fetching data from config_db for interfaces
intfs_data = config_db.get_table("INTERFACE")
vlan_intfs_data = config_db.get_table("VLAN_INTERFACE")
vnet_intfs = {}
for k, v in intfs_data.items():
if 'vnet_name' in v:
vnet_name = v['vnet_name']
if vnet_name in vnet_intfs:
vnet_intfs[vnet_name].append(k)
else:
vnet_intfs[vnet_name] = [k]
for k, v in vlan_intfs_data.items():
if 'vnet_name' in v:
vnet_name = v['vnet_name']
if vnet_name in vnet_intfs:
vnet_intfs[vnet_name].append(k)
else:
vnet_intfs[vnet_name] = [k]
appl_db = SonicV2Connector()
appl_db.connect(appl_db.APPL_DB)
# Fetching data from appl_db for neighbors
nbrs = appl_db.keys(appl_db.APPL_DB, "NEIGH_TABLE:*")
nbrs_data = {}
for nbr in nbrs if nbrs else []:
tbl, intf, ip = nbr.split(":", 2)
mac = appl_db.get(appl_db.APPL_DB, nbr, 'neigh')
if intf in nbrs_data:
nbrs_data[intf].append((ip, mac))
else:
nbrs_data[intf] = [(ip, mac)]
table = []
for k, v in vnet_intfs.items():
v = natsorted(v)
header[0] = k
table = []
for intf in v:
if intf in nbrs_data:
for ip, mac in nbrs_data[intf]:
r = ["", ip, mac, intf]
table.append(r)
click.echo(tabulate(table, header))
click.echo()
if not bool(vnet_intfs):
click.echo(tabulate(table, header)) | 5,325,411 |
async def test_config_entry_authentication_failed(
hass: HomeAssistant,
mock_config_entry: MockConfigEntry,
mock_tailscale: MagicMock,
) -> None:
"""Test trigger reauthentication flow."""
mock_config_entry.add_to_hass(hass)
mock_tailscale.devices.side_effect = TailscaleAuthenticationError
await hass.config_entries.async_setup(mock_config_entry.entry_id)
await hass.async_block_till_done()
assert mock_config_entry.state is ConfigEntryState.SETUP_ERROR
flows = hass.config_entries.flow.async_progress()
assert len(flows) == 1
flow = flows[0]
assert flow.get("step_id") == "reauth_confirm"
assert flow.get("handler") == DOMAIN
assert "context" in flow
assert flow["context"].get("source") == SOURCE_REAUTH
assert flow["context"].get("entry_id") == mock_config_entry.entry_id | 5,325,412 |
def weight_diff(w1, w2):
""" Calculates the array of differences between the weights in arrays """
# Expand and flatten arrays
_w1 = np.hstack([x.flatten() for x in w1])
_w2 = np.hstack([x.flatten() for x in w2])
return _w1 - _w2 | 5,325,413 |
def get_root_modules():
"""
Returns a list containing the names of all the modules available in the
folders of the pythonpath.
"""
rootmodules_cache = XSH.modules_cache
rootmodules = list(sys.builtin_module_names)
start_time = time()
for path in sys.path:
try:
modules = rootmodules_cache[path]
except KeyError:
modules = module_list(path)
try:
modules.remove("__init__")
except ValueError:
pass
if path not in ("", "."): # cwd modules should not be cached
rootmodules_cache[path] = modules
if time() - start_time > TIMEOUT_GIVEUP:
print("\nwarning: Getting root modules is taking too long, we give up")
return []
rootmodules.extend(modules)
rootmodules = list(set(rootmodules))
return rootmodules | 5,325,414 |
def copy_fillmem_to_rootfs(rootfs_path):
"""Build and copy the 'memfill' program to the rootfs."""
subprocess.check_call("gcc ./host_tools/fillmem.c -o fillmem", shell=True)
subprocess.check_call("mkdir tmpfs", shell=True)
subprocess.check_call("mount {} tmpfs".format(rootfs_path), shell=True)
subprocess.check_call("cp fillmem tmpfs/sbin/fillmem", shell=True)
subprocess.check_call("rm fillmem", shell=True)
subprocess.check_call("umount tmpfs", shell=True)
subprocess.check_call("rmdir tmpfs", shell=True) | 5,325,415 |
def advect_salinity(vs):
"""
integrate salinity
"""
return advect_tracer(vs, vs.salt[..., vs.tau], vs.dsalt[..., vs.tau]) | 5,325,416 |
def lafferty_wyatt_point(lowedge, highedge, expo_slope):
"""calculates the l-w point for a bin where the true distribution is an
exponential characterized by expo_slope.
"""
import math
rhs = (math.exp(expo_slope*highedge) - math.exp(expo_slope*lowedge))
rhs /= expo_slope
rhs /= (highedge - lowedge)
return math.log(rhs) / expo_slope | 5,325,417 |
def _event_loop() -> Iterator[asyncio.AbstractEventLoop]:
"""Yield an event loop.
This is necessary because pytest-asyncio needs an event loop with a with an equal or higher
pytest fixture scope as any of the async fixtures. And remember, pytest-asynio is what allows us
to have async pytest fixtures.
"""
loop = asyncio.get_event_loop()
yield loop
loop.close() | 5,325,418 |
def init_critical_cases_20():
"""
Real Name: b'init Critical Cases 20'
Original Eqn: b'0'
Units: b'person'
Limits: (None, None)
Type: constant
b''
"""
return 0 | 5,325,419 |
def view_last_build_times():
"""Find build manifests and display to stdout."""
build_manifests = dict()
for root, _dirs, files in os.walk(SCRIPT_DIR):
if 'manifest.json' in files and 'box_info.json' in files:
box_info = os.path.join(root, 'box_info.json')
try:
with open(box_info, 'r') as box_data:
box_tag = json.load(box_data).get('box_tag')
except ValueError:
pass
manifest = os.path.join(root, 'manifest.json')
try:
with open(manifest, 'r') as manifest_data:
data = json.load(manifest_data)
last_run_uuid = data.get('last_run_uuid')
builds = data.get('builds')
if builds is not None:
for build in builds:
if build['packer_run_uuid'] == last_run_uuid:
current_time_epoch = time.mktime(
datetime.now().timetuple())
last_build_time_epoch = build['build_time']
build_manifests[box_tag] = dict(
days_since_last_build=int(
(current_time_epoch -
last_build_time_epoch)/86400))
except ValueError:
pass
print(json.dumps(build_manifests)) | 5,325,420 |
def preview_on():
"""Turn the preview on """
print
print preview_on.__doc__
command_send('camera', 'PV', '02') | 5,325,421 |
def build_doc(pic_dic):
"""
Gets dict {'image-name':['image-path',text]} ==> doc obj
"""
doc = word_obj()
# Add footer
doc.sections[0].footer.paragraphs[0].alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
add_page_number(doc.sections[0].footer.paragraphs[0].add_run())
# Started Page
starter_page(doc)
for pic_name, pic_details in pic_dic.items():
# Add head
head = doc.add_heading(pic_name, 6)
# add header
heading(head,doc)
# Add pic
doc.add_picture(pic_details[0], width=Inches(6) ,height=Inches(4))
# Add paragraph
p = doc.add_paragraph(pic_details[1])
paragraph(p,doc)
##
doc.add_page_break()
return doc | 5,325,422 |
async def register(app: web.Application):
"""Register routes."""
app.add_routes(
[web.post("/connections/{id}/start-introduction", introduction_start)]
) | 5,325,423 |
def save_object(obj, filename, tries=2, sleep_delay=3):
"""
Serialize an object
Parameters
----------
obj : object
object, that shall be serialized and stored
filename : str
absolute path, name and extension of the created file
tries : int
number of attempts to store the file
sleep_delay : int
time, the method shall wait before the next attempt, when writing the file didn't work
Returns
-------
None
"""
filename = r"{}".format(filename)
file_written = False
for i in range(tries):
try:
output = open(filename, "wb")
dump(obj, output, HIGHEST_PROTOCOL)
output.close()
file_written = True
break
except:
time.sleep(sleep_delay)
continue
if not file_written:
logger = logging.getLogger(__name__)
logger.warning(detect.badobjects(obj))
raise RuntimeError('Unable to write file %s.' % filename) | 5,325,424 |
def dispatch_every_hour(one_time_password):
""" This is the receiving point of start_every_hour's post request. It
checks that the one time password is correct and then dispatches
every_hour. """
EveryHourOTP.check_password(one_time_password)
Process(target=every_hour).start()
return "success" | 5,325,425 |
def check_list(data):
"""check if data is a list, if it is not a list, it will return a list as [data]"""
if type(data) is not list:
return [data]
else:
return data | 5,325,426 |
def test_to_float(input_value: FixedPoint, correct_output: float) -> None:
"""
Test whether the __float__ function works correctly
:param input_value: fixed-point number
:param correct_output: correct float cast of the fixed-point number
"""
output = float(input_value)
assert output == correct_output | 5,325,427 |
def nfvi_reinitialize(config):
"""
Re-initialize the NFVI package
"""
global _task_worker_pools
init_complete = True
compute_plugin_disabled = (config.get('compute_plugin_disabled',
'False') in DISABLED_LIST)
if not compute_plugin_disabled:
init_complete = nfvi_compute_initialize(config,
_task_worker_pools['compute'])
return init_complete | 5,325,428 |
def parse_freqs(lines, parameters):
"""Parse the basepair frequencies.
"""
root_re = re.compile("Note: node (\d+) is root.")
branch_freqs_found = False
base_freqs_found = False
for line in lines:
# Find all floating point numbers in this line
line_floats_res = line_floats_re.findall(line)
line_floats = [float(val) for val in line_floats_res]
# Find base frequencies from baseml 4.3
# Example match:
# "Base frequencies: 0.20090 0.16306 0.37027 0.26577"
if "Base frequencies" in line and line_floats:
base_frequencies = {}
base_frequencies["T"] = line_floats[0]
base_frequencies["C"] = line_floats[1]
base_frequencies["A"] = line_floats[2]
base_frequencies["G"] = line_floats[3]
parameters["base frequencies"] = base_frequencies
# Find base frequencies from baseml 4.1:
# Example match:
# "base frequency parameters
# " 0.20317 0.16768 0.36813 0.26102"
elif "base frequency parameters" in line:
base_freqs_found = True
# baseml 4.4 returns to having the base frequencies on the next line
# but the heading changed
elif "Base frequencies" in line and not line_floats:
base_freqs_found = True
elif base_freqs_found and line_floats:
base_frequencies = {}
base_frequencies["T"] = line_floats[0]
base_frequencies["C"] = line_floats[1]
base_frequencies["A"] = line_floats[2]
base_frequencies["G"] = line_floats[3]
parameters["base frequencies"] = base_frequencies
base_freqs_found = False
# Find frequencies
# Example match:
# "freq: 0.90121 0.96051 0.99831 1.03711 1.10287"
elif "freq: " in line and line_floats:
parameters["rate frequencies"] = line_floats
# Find branch-specific frequency parameters
# Example match (note: I think it's possible to have 4 more
# values per line, enclosed in brackets, so I'll account for
# this):
# (frequency parameters for branches) [frequencies at nodes] (see Yang & Roberts 1995 fig 1)
#
# Node #1 ( 0.25824 0.24176 0.25824 0.24176 )
# Node #2 ( 0.00000 0.50000 0.00000 0.50000 )
elif "(frequency parameters for branches)" in line:
parameters["nodes"] = {}
branch_freqs_found = True
elif branch_freqs_found:
if line_floats:
node_res = re.match("Node \#(\d+)", line)
node_num = int(node_res.group(1))
node = {"root": False}
node["frequency parameters"] = line_floats[:4]
if len(line_floats) > 4:
node["base frequencies"] = {"T": line_floats[4],
"C": line_floats[5],
"A": line_floats[6],
"G": line_floats[7]}
parameters["nodes"][node_num] = node
else:
root_res = root_re.match(line)
if root_res is not None:
root_node = int(root_res.group(1))
parameters["nodes"][root_node]["root"] =\
True
branch_freqs_found = False
return parameters | 5,325,429 |
def student2nation(id_num):
"""
Takes student id, returns nation id of the student.
"""
return school2nation(id_num) | 5,325,430 |
def get_projects_with_builds(only_public=True, only_active_versions=True):
"""Returns a queryset of Projects with active only public by default builds."""
builds = Build.objects.filter(
success=True,
state='finished',
version__active=True
)
if only_public:
builds = builds.filter(version__privacy_level='public',)
if only_active_versions:
builds = builds.filter(version__active=True)
filtered_projects = builds.values_list(
'project',
flat=True
)
return Project.objects.filter(
pk__in=filtered_projects
) | 5,325,431 |
def encode_base64(filename):
"""encode image to string.
Args
filename: image file path.
Returns:
a bites string.
"""
with open(filename, "rb")as f:
bs64 = base64.b64encode(f.read()).decode()
return bs64 | 5,325,432 |
def transfer_from_taoyuanagrichannel_to_taoyuanagriwaterdemand():
"""
Real Name: Transfer From TaoYuanAgriChannel To TaoYuanAgriWaterDemand
Original Eqn: (Transfer From ShiMenReservoir To HouChiWeir*Ratio AgriWater ShiMenReservoir To HouChiWeir In TaoYuanAgriChannel)*(1-Channel Transfer Loss Rate )
Units: m3
Limits: (None, None)
Type: component
Subs: None
"""
return (
transfer_from_shimenreservoir_to_houchiweir()
* ratio_agriwater_shimenreservoir_to_houchiweir_in_taoyuanagrichannel()
) * (1 - channel_transfer_loss_rate()) | 5,325,433 |
async def test_update_attributes(update_requests_mock_fixture, hass):
"""Test the drycontact binary sensor attributes are updated."""
await setup_platform(hass, BINARY_SENSOR_DOMAIN)
state = hass.states.get("binary_sensor.main_door_drycontact")
assert state.state == STATE_ON
assert state.attributes.get(ATTR_FRIENDLY_NAME) == "main door DryContact"
assert state.attributes.get(ATTR_DEVICE_CLASS) == DEVICE_CLASS_WINDOW
assert state.attributes.get("date") == "2022-01-09"
assert state.attributes.get("time") == "19:30:00" | 5,325,434 |
def Doxyfile_emitter(target, source, env):
"""
Modify the target and source lists to use the defaults if nothing
else has been specified.
Dependencies on external HTML documentation references are also
appended to the source list.
"""
doxyfile_template = env.File(env['DOXYFILE_FILE'])
source.insert(0, doxyfile_template)
return target, source | 5,325,435 |
def test_cli_token_file():
"""Test the token command."""
runner = CliRunner()
path = os.path.dirname(os.path.abspath(__file__))
key_file = os.path.join(path, 'auth', 'test.key')
result = runner.invoke(btutils.main, args=['-f', key_file, 'token'])
assert result.exit_code == 0 | 5,325,436 |
def _recursive_make_immutable(o):
"""Recursively transform an object into an immutable form
This is a cdev core specific transformation that is used to convert Dict and List and other native python
types into frozendict, frozenset, etc. The purpose is that the later set of objects are immutable in python
and therefor can be used to directly compare against each other and be used as __hash__ able objects in
things like dicts and `networkx` DAGs.
Note the special case of handling Cloud Output Dict. These are identified as a dict with the key `id` that has
a value `cdev_cloud_output`.
Args:
o (Any): original object
Returns:
transformed_os
"""
# Note this is designed to be specifically used within the loading of a resource state. Therefor,
# we do not much error handling and let an error in the structure of the data be passed up all the
# way to `load_resource_state`
if isinstance(o, list):
return frozenset([_recursive_make_immutable(x) for x in o])
elif isinstance(o, dict):
if "id" in o:
if o.get("id") == "cdev_cloud_output":
tmp = {k: _recursive_make_immutable(v) for k, v in o.items()}
if not o.get("output_operations"):
return frozendict(tmp)
correctly_loaded_output_operations = _load_cloud_output_operations(
o.get("output_operations")
)
tmp["output_operations"] = correctly_loaded_output_operations
return frozendict(tmp)
return frozendict({k: _recursive_make_immutable(v) for k, v in o.items()})
return o | 5,325,437 |
def retry_condition(exception):
"""Return True if we should retry (in this case when it's an IOError), False otherwise"""
if isinstance(exception, (HTTPError, AttributeError)):
print(f'HTTP error occurred: {exception}') # Python 3.6
return True
return False | 5,325,438 |
def mergesort(input_arr):
"""
Sort the array by application of merge sort
Time complexity: O(n log(n))
Space Complexity: O(n)
Args:
input_arr(array): Input array with numbers to be sorted
Returns:
sorted_arr(array) Sorted array with numbers in ascending order
"""
if len(input_arr) <= 1:
return input_arr
mid = len(input_arr) // 2
left = input_arr[:mid]
right = input_arr[mid:]
left = mergesort(left)
right = mergesort(right)
return _merge(left, right) | 5,325,439 |
def _clean_annotated_text(text):
"""Cleans text from the format that it was presented to annotators in the
S.M.A.R.T data annotation tool. Splits the title from the abstract text
and strips any trailing whitespace.
Returns:
title (str): The project title
text (str): The project abstract
"""
text = text.split('=====')
title = text[1].strip()
abstract = text[-1].strip()
return title, abstract | 5,325,440 |
def build_test_costs():
"""
Build test costs
"""
extra_layers = {'layers':
{'transmission_barrier':
os.path.join(TESTDATADIR, 'xmission',
'ri_trans_barriers.tif')}}
XmissionCostCreator.run(BASELINE_H5, ISO_REGIONS_F, excl_h5=EXCL_H5,
slope_layer='ri_srtm_slope', nlcd_layer='ri_nlcd',
tiff_dir=None, default_mults=TEST_DEFAULT_MULTS,
extra_layers=extra_layers) | 5,325,441 |
def get_or_create_event_loop():
"""
Tries to get the current event loop. If not found creates a new one.
Returns
-------
event_loop : ``EventThread``
"""
try:
event_loop = get_event_loop()
except RuntimeError:
event_loop = create_event_loop(daemon=False)
return event_loop | 5,325,442 |
def tnaming_Displace(*args):
"""
* Application de la Location sur les shapes du label et de ses sous labels.
:param label:
:type label: TDF_Label &
:param aLocation:
:type aLocation: TopLoc_Location &
:param WithOld: default value is Standard_True
:type WithOld: bool
:rtype: void
"""
return _TNaming.tnaming_Displace(*args) | 5,325,443 |
def main():
""" Set up the game and run the main game loop """
pygame.init() # Prepare the pygame module for use
surfaceSize = 480 # Desired physical surface size, in pixels.
clock = pygame.time.Clock() #Force frame rate to be slower
# Create surface of (width, height), and its window.
mainSurface = pygame.display.set_mode((surfaceSize, surfaceSize))
programState = "initialize"
while True:
ev = pygame.event.poll() # Look for any event
if ev.type == pygame.QUIT: # Window close button clicked?
break # ... leave game loop
if programState == "initialize":
#TODO set up the intial data for my 3 bar graphs
#TODO Draw a loading message
startingY =240
gyroXRectBase = [50,startingY,25,50]
gyroYRectBase = [100,startingY,25,0]
gyroZRectBase = [150,startingY,25,0]
programState = "set up microbit"
elif programState == "set up microbit":
print('looking for microbit')
microbit = findMicrobitComPort()
if not microbit:
print('microbit not found')
continue
print('opening and monitoring microbit port')
microbit.open()
programState = "display"
elif programState == "display":
#Grab the data from the microbit
line = microbit.readline().decode('utf-8')
#print(line)
if line: # If it isn't a blank line
#Update your data
#print(line)
data = line.split()
#print(data)
var1, var2 = [1,2]
*label, gyroX, gyroY, gyroZ = data
print(f' ({gyroX}, {gyroY}, {gyroZ})')
normalizeGyroValue(gyroX, startingY, gyroXRectBase)
normalizeGyroValue(gyroY, startingY, gyroYRectBase)
normalizeGyroValue(gyroZ, startingY, gyroZRectBase)
#print(gyroXRectBase)
mainSurface.fill((0, 200, 255))
#Draw the bars on the screen
pygame.draw.rect(mainSurface, (255,0,0), gyroXRectBase)
pygame.draw.rect(mainSurface, (0,255,0), gyroYRectBase)
pygame.draw.rect(mainSurface, (0,0,255), gyroZRectBase)
pygame.display.flip() #Update the display
clock.tick(60) #Force frame rate to be slower
#-----------------END of main while True loop!------------------------------
if microbit != None:
microbit.close() #Close the microbit serial connection
pygame.quit() # Once we leave the loop, close the window. | 5,325,444 |
def temp_database(con, test_data_db: str) -> typing.Generator[str, None, None]:
"""Create a temporary database.
Parameters
----------
con : ibis.heavyai.HeavyDBClient
test_data_db : str
Yields
------
str
"""
name = _random_identifier('database')
con.create_database(name)
try:
yield name
finally:
con.set_database(test_data_db)
con.drop_database(name, force=True) | 5,325,445 |
def get_capital_np(markets,signals,size,commiRate,climit = 4, wlimit = 2, op=True):
"""使用numpy回测,标签的盈亏, op 表示是否延迟一个tick以后撮合"""
postions = np.zeros(len(signals))
actions = np.zeros(len(signals))
costs = np.zeros(len(signals))
pnls = np.zeros(len(signals))
lastsignal = 0
lastpos = 0
lastcost = 0
num = 0
for num in range(1,len(signals)):
postions[num] = lastpos
actions[num] = 0
costs[num] = lastcost
pnls[num] = 0
# 止盈止损
if lastpos > 0 and \
(markets[num,1]<=lastcost-climit or markets[num,1]>=lastcost+wlimit):
postions[num] = 0
actions[num] = -1
costs[num] = 0
fee = (markets[num,1]+lastcost)*size*commiRate
pnls[num] = (markets[num,1]-lastcost)*size-fee
elif lastpos < 0 and \
(markets[num,0]>=lastcost+climit or markets[num,0]<=lastcost-wlimit):
postions[num] = 0
actions[num] = 1
costs[num] = 0
fee = (markets[num,0]+lastcost)*size*commiRate
pnls[num] = (lastcost-markets[num,0])*size-fee
# 开仓
if op:
lastsignal = signals[num]
if lastsignal > 0 and lastpos == 0:
postions[num] = 1
actions[num] = 1
costs[num] = markets[num,0]
elif lastsignal < 0 and lastpos == 0:
postions[num] = -1
actions[num] = -1
costs[num] = markets[num,1]
lastpos = postions[num]
lastcost = costs[num]
lastsignal = signals[num]
return pnls,actions | 5,325,446 |
def slicename_to_hostname(vs_name):
"""Converts a vserver slice name into a canonical FQDN.
Slice names use a pattern like: <some site>_<some name>.
Example:
If vs_name is 'mlab_utility' and the system hostname is
'mlab4.nuq01.measurement-lab.org', then slicename_to_hostname will return
'utility.mlab.mlab4.nuq01.measurement-lab.org'.
Args:
vs_name: str, name of a vserver slice, e.g. mlab_utility.
Returns:
str, the canonical FQDN based on system hostname and slice name.
"""
fields = vs_name.split('_')
if len(fields) == 1:
prefix = vs_name
else:
# The vs_name prefix is the PlanetLab site name.
# The rest is user-chosen. Place the site name after user-chosen name.
prefix = '.'.join(fields[1:] + [fields[0]])
return '%s.%s' % (prefix, _root_hostname) | 5,325,447 |
def unregister_server(zkclient, hostname):
"""Unregister server."""
_LOGGER.info('Unregistering server %s', hostname)
server_presence_path = find_server(zkclient, hostname)
if server_presence_path:
zkutils.ensure_deleted(zkclient, server_presence_path) | 5,325,448 |
def swim_for_a_day(life_counts: Dict[int, int]):
"""Process the shoal, decrement the life_counts:
any that get to -1 have procreated in the last day, their offspring are
created with 8 day life_counts, whilst they get reset to 6 days… and are
added to the count of any fish that moved down from 7 days.
"""
new_counts = {d - 1: p for d, p in life_counts.items()}
if -1 in new_counts.keys():
new_counts[8] = new_counts[-1]
new_counts[6] = new_counts[-1] + new_counts.get(6, 0)
del new_counts[-1]
return new_counts | 5,325,449 |
def page_not_found (error):
"""
Generic Error Message
"""
return "Unable to find Distill." | 5,325,450 |
def dict_to_obj(our_dict):
"""
Function that takes in a dict and returns a custom object associated with
the dict. This function makes use of the "__module__" and "__class__"
metadata in the dictionary to know which object type to create.
"""
if "__class__" in our_dict:
# Pop ensures we remove metadata from the dict to leave only the
# instance arguments
class_name = our_dict.pop("__class__")
# Get the module name from the dict and import it
module_name = our_dict.pop("__module__")
# We use the built in __import__ function since the module name is not
# yet known at runtime
module = __import__(module_name, globals(), locals(), [class_name])
# Get the class from the module
class_ = getattr(module, class_name)
# Use dictionary unpacking to initialize the object
obj = class_(**our_dict)
else:
obj = our_dict
return obj | 5,325,451 |
def test_generate_model_circuit_seed():
"""Test that a model circuit is determined by its seed ."""
model_circuit_1 = quantum_volume.generate_model_circuit(
3, 3, random_state=np.random.RandomState(1))
model_circuit_2 = quantum_volume.generate_model_circuit(
3, 3, random_state=np.random.RandomState(1))
model_circuit_3 = quantum_volume.generate_model_circuit(
3, 3, random_state=np.random.RandomState(2))
assert model_circuit_1 == model_circuit_2
assert model_circuit_2 != model_circuit_3 | 5,325,452 |
def soup(path):
"""
分解新聞
"""
print('-' * 75)
nsoup = NewsSoup(path)
print('原始路徑: {}'.format(path))
print('最終路徑: {}'.format(nsoup.path))
print('頻道: {}'.format(nsoup.channel))
print('標題: {}'.format(nsoup.title()))
ndt = nsoup.date()
if ndt is not None:
print('日期: {}'.format(ndt.strftime('%Y-%m-%d %H:%M:%S')))
else:
print('日期: None')
print('記者: {}'.format(nsoup.author()))
print('內文:')
print(nsoup.contents())
print('有效內容率: {:.2f}%'.format(nsoup.effective_text_rate() * 100))
print('-' * 75) | 5,325,453 |
def display_fips( collection_of_fips, fig, **kwargs ):
"""
Method that is very similar to :py:meth:`display_fips_geom <covid19_stats.engine.viz.display_fips_geom>`, except this *also* displays the FIPS code of each county. For example, for `Rhode Island`_, this is.
.. _viz_display_fips_rhodeisland:
.. figure:: /_static/viz/viz_display_fips_rhodeisland.png
:width: 100%
:align: left
Demonstration of this method showing the counties in `Rhode Island`_. The FIPS code of each county is shown in red. One can extract the patches in this object to manually change the colors of these county polygons.
Here are the arguments.
:param collection_of_fips: can be a :py:class:`list`, :py:class:`set`, or other iterable of FIPS codes to visualize and label.
:param fig: the :py:class:`Figure <matplotlib.figure.Figure>` onto which to draw this :py:class:`GeoAxes <cartopy.mpl.geoaxes.GeoAxes>`.
:rtype: :py:class:`GeoAxes <cartopy.mpl.geoaxes.GeoAxes>`
.. _`Rhode Island`: https://en.wikipedia.org/wiki/Rhode_Island
"""
bdict = core.get_boundary_dict( collection_of_fips )
bbox = gis.calculate_total_bbox( chain.from_iterable( bdict.values( ) ) )
ax = create_and_draw_fromfig( fig, bbox, **kwargs )
fc = list( to_rgba( '#1f77b4' ) )
fc[-1] = 0.25
for fips in sorted( bdict ):
for shape in bdict[ fips ]:
poly = Polygon(
shape, closed = True,
edgecolor = 'k', linewidth = 2.0, linestyle = 'dashed',
facecolor = tuple( fc ), alpha = 1.0, transform = ccrs.PlateCarree( ) )
ax.add_patch( poly )
lng_cent = shape[:,0].mean( )
lat_cent = shape[:,1].mean( )
ax.text(
lng_cent, lat_cent, fips, fontsize = 10, fontweight = 'bold', color = 'red',
transform = ccrs.PlateCarree( ) )
return ax | 5,325,454 |
def write_html_header(file):
"""Write HTML header."""
message = """
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<link rel="stylesheet" href="./resources/kanban.css">
<title>KanbanView for Things 3</title>
</head>
<body>
<header>
<a href="https://kanbanview.app"
title="visit product page" target="_blank">
<picture id="app">
<source class="logo" srcset="resources/logo-dark.png"
media="(prefers-color-scheme: dark)">
<img class="logo" src="resources/logo.png" alt="logo">
</picture>
</a>
</header>
<article class='some-page-wrapper'>
<div class='row'>
"""
file.write(message) | 5,325,455 |
def _apply_constraints(password_hash, size, is_non_alphanumeric):
"""
Fiddle with the password a bit after hashing it so that it will
get through most website filters. We require one upper and lower
case, one digit, and we look at the user's password to determine
if there should be at least one alphanumeric or not.
"""
starting_size = 0 if size < 4 else size - 4
result = password_hash[:starting_size]
extras = itertools.chain((ord(ch) for ch in password_hash[starting_size:]),
itertools.repeat(0))
extra_chars = (chr(ch) for ch in extras)
def next_between(start, end):
interval = ord(end) - ord(start) + 1
offset = next(extras) % interval
return chr(ord(start) + offset)
chars_ranges = (("A", "Z"), ("a", "z"), ("0", "9"))
for first, last in chars_ranges:
any_of_chars = re.compile("[{}-{}]".format(first, last))
if any_of_chars.search(result):
result += next(extra_chars)
else:
result += next_between(first, last)
non_word = re.compile(r"\W")
if non_word.search(result) and is_non_alphanumeric:
result += next(extra_chars)
else:
result += "+"
while non_word.search(result) and not is_non_alphanumeric:
result = non_word.sub(next_between("A", "Z"), result, 1)
flip_place = next(extras) % len(result)
result = result[flip_place:] + result[:flip_place]
return result.strip("\x00") | 5,325,456 |
def SplitLineRecursive(linepts,i,j,THRESHOLD=5.0,ds_min=50.0):
"""
Choose best point at which to split a line to minimize total reprojection error
"""
max_err = np.max(ProjectionError(np.stack((linepts[:,i],linepts[:,j])).T, linepts[:,i:j]))
if max_err < THRESHOLD:
ds = np.cumsum(np.sqrt(np.sum(np.diff(linepts[:,i:j])**2,axis=0)))
if ds[-1] > ds_min:
k = i + np.argmin((ds - ds[-1]/2.)**2) + 1
return k
else:
return j
errors1 = np.zeros(j-(i+1))
errors2 = np.zeros(j-(i+1))
max_errors1 = np.zeros(j-(i+1))
max_errors2 = np.zeros(j-(i+1))
for k in range(i+1,j):
l1 = np.stack((linepts[:,i],linepts[:,k])).T
l2 = np.stack((linepts[:,k],linepts[:,j])).T
errors1[k-i-1] = np.sum(ProjectionError(l1, linepts[:,i+1:k])) / (k-i)
errors2[k-i-1] = np.sum(ProjectionError(l2, linepts[:,k+1:j])) / (j-k)
max_errors1[k-i-1] = np.max(ProjectionError(l1, linepts[:,i:k]))
max_errors2[k-i-1] = np.max(ProjectionError(l2, linepts[:,k:j]))
k = i+1 + np.argmin(errors1 + errors2)
# max_err1 = np.max(max_errors1)
# max_err2 = np.max(max_errors2)
return k | 5,325,457 |
def create_report() -> FlaskResponse:
"""Creates a new report.
Note: This is the existing implementation, currently used for the v1 endpoint.
Returns:
FlaskResponse: details of the report just created or a list of errors with the corresponding HTTP status code.
"""
logger.info("Creating a new report")
try:
report_name = create_report_job()
report_details = get_reports_details(report_name)
return created(reports=report_details)
except Exception as e:
msg = f"{ERROR_UNEXPECTED} ({type(e).__name__})"
logger.error(msg)
logger.exception(e)
return internal_server_error(msg) | 5,325,458 |
def get_arguments():
"""Parsing the arguments"""
parser = argparse.ArgumentParser(description="",
usage='''
______________________________________________________________________
BiG-MAP map: maps the reads to the predicted MGCs
______________________________________________________________________
Generic command: python3 BiG-MAP.map.py {-I1 [mate-1s] -I2 [mate-2s] | -U [samples]} -O [outdir] -F [family] [Options*]
Maps the metagenomic/metatranscriptomic reads to the fasta reference
file and outputs RPKM read counts in .csv and BIOM format. Use
BiG-MAP_process conda environment.
Data inputs: either paired or unpaired
-I1 Provide the mate 1s of the paired metagenomic and/or
metatranscriptomic samples here. These samples should be
provided in fastq-format (.fastq, .fq, .fq.gz). Also, this
can be a space separated list from the command line.
-I2 Provide the mate 2s of the paired metagenomic and/or
metatranscriptomic samples here. These samples should be
provided in fastq-format (.fastq, .fq, .fq.gz). Also, this
can be a space separated list from the command line.
-U Provide the unpaired metagenomic/metatranscriptomic samples
here. These samples should be provided in fastq-format
(.fastq, .fq, .fq.gz). Also, this can be a space separated
list from the command line.
File inputs: either separated or pickled:
-F Directory with all the output files from the family module
-P Input files are in pickled format (named: BiG-MAP.[name].pickle).
The format of the pickled file: fasta file, GCF json file, and
optionally a bed file and/or BiG-SCAPE GCF dictionary.
Obligatory arguments:
-O Name of the output directory for where the output files are going
to be written. Default = current folder (.)
Options:
-b Outputs the resulting read counts in biom format (v1.0) as
well. This will be useful to analyze the results in
BiG-MAP.analyse. Therefore, it is important to include
the metadata here as well: this metagenomical data should
be in the same format as the example metadata
-f Input files are in fasta format (.fna, .fa, .fasta): True/False.
Default = False.
-s Bowtie2 setting:
END-TO-END mode: very-fast, fast, sensitive, very-sensitive
LOCAL mode: very-fast-local, fast-local, sensitive-local,
very-sensitive-local. Default = fast
-a Ouput read average values across GCFs instead of summed counts:
True/False. Default = False.
-th Number of used threads in the bowtie2 mapping step. Default = 6
______________________________________________________________________
''')
parser.add_argument("-O", "--outdir", help=argparse.SUPPRESS, required=True)
parser.add_argument("-I1","--fastq1", nargs='+',help=argparse.SUPPRESS, required=False)
parser.add_argument("-I2","--fastq2",nargs='+',help=argparse.SUPPRESS, required = False)
parser.add_argument("-U","--U_fastq",nargs='+',help=argparse.SUPPRESS, required = False)
parser.add_argument("-F", "--family", help=argparse.SUPPRESS, required=False)
parser.add_argument("-P", "--pickle_file", help=argparse.SUPPRESS, required=False)
parser.add_argument( "-b", "--biom_output",
help=argparse.SUPPRESS, type=str, required = False)
parser.add_argument( "-f", "--fasta", help=argparse.SUPPRESS,
type=str, required = False, default=False)
parser.add_argument( "-a", "--average", help=argparse.SUPPRESS,
type=str, required = False, default=False)
parser.add_argument( "-s", "--bowtie2_setting", help=argparse.SUPPRESS,
type=str, required = False, default="fast")
parser.add_argument( "-th", "--threads", help=argparse.SUPPRESS,
type=int, required = False, default=6)
return(parser, parser.parse_args()) | 5,325,459 |
def sample_exercise():
"""Create a sample exercise"""
return ExerciseModel.objects.create(
name='exercise name',
duration=10,
calories=10
) | 5,325,460 |
def test_message_defaults():
"""
Test that message defaults are applied when the corresponding arguments have been omitted.
"""
mailer = DummyMailer(subject='default_subject', sender='default_sender@example.org',
to='default_recipient@example.org', cc='default_cc@example.org',
bcc='default_bcc@example.org', charset='utf-16')
msg = mailer.create_message(plain_body='Hello åäö')
assert msg['Subject'] == 'default_subject'
assert msg['From'] == 'default_sender@example.org'
assert msg['To'] == 'default_recipient@example.org'
assert msg['Cc'] == 'default_cc@example.org'
assert msg['Bcc'] == 'default_bcc@example.org'
assert msg.get_charsets() == ['utf-16'] | 5,325,461 |
def der_kinetic_integral(a,bfi,bfj):
"""
The kinetic energy operator does not depend on the atomic position so we only
have to consider differentiating the Gaussian functions. There are 4 possible
cases we have to evaluate
Case 1: Neither of the basis functions depends on the position of atom A which gives:
dT_ij/dXa = 0
Cases 2 and 3: Only one of the basis functions depends the position of atom A which
gives us either of the following possible integrals to evaluate
dT_ij/dXa = integral{dr dg_i/dXa T g_j }
dT_ij/dXa = integral{dr g_i T dg_j/dXa }
Case 4: Both of the basis functions depend on the position of atom A which gives the
following integral to evaluate
dT_ij/dXa = integral{dr dg_i/dXa T g_j + g_i T dg_j/dXa }
"""
dTij_dXa,dTij_dYa,dTij_dZa = 0.0,0.0,0.0
#we use atom ids on the CGBFs to evaluate which of the 4 above case we have
#bfi is centered on atom a
if bfi.atid==a:
for upbf in bfj.prims():
for vpbf in bfi.prims():
alpha = vpbf.exp()
l,m,n = vpbf.powers()
origin = vpbf.origin()
coefs = upbf.coef()*vpbf.coef()
#x component
v = PGBF(alpha,origin,(l+1,m,n))
v.normalize()
terma = sqrt(alpha*(2.0*l+1.0))*coefs*v.kinetic(upbf)
if l>0:
v.reset_powers(l-1,m,n)
v.normalize()
termb = -2*l*sqrt(alpha/(2.0*l-1.0))*coefs*v.kinetic(upbf)
else: termb = 0.0
dTij_dXa += terma + termb
#y component
v.reset_powers(l,m+1,n)
v.normalize()
terma = sqrt(alpha*(2.0*m+1.0))*coefs*v.kinetic(upbf)
if m>0:
v.reset_powers(l,m-1,n)
v.normalize()
termb = -2*m*sqrt(alpha/(2.0*m-1.0))*coefs*v.kinetic(upbf)
else: termb = 0.0
dTij_dYa += terma + termb
#z component
v.reset_powers(l,m,n+1)
v.normalize()
terma = sqrt(alpha*(2.0*n+1.0))*coefs*v.kinetic(upbf)
if n>0:
v.reset_powers(l,m,n-1)
v.normalize()
termb = -2*n*sqrt(alpha/(2.0*n-1.0))*coefs*v.kinetic(upbf)
else: termb = 0.0
dTij_dZa += terma + termb
#bfj is centered on atom a
if bfj.atid==a:
for upbf in bfi.prims():
for vpbf in bfj.prims():
alpha = vpbf.exp()
l,m,n = vpbf.powers()
origin = vpbf.origin()
coefs = upbf.coef()*vpbf.coef()
#x component
v = PGBF(alpha,origin,(l+1,m,n))
v.normalize()
terma = sqrt(alpha*(2.0*l+1.0))*coefs*v.kinetic(upbf)
if l>0:
v.reset_powers(l-1,m,n)
v.normalize()
termb = -2*l*sqrt(alpha/(2.0*l-1.0))*coefs*v.kinetic(upbf)
else: termb = 0.0
dTij_dXa += terma + termb
#y component
v.reset_powers(l,m+1,n)
v.normalize()
terma = sqrt(alpha*(2.0*m+1.0))*coefs*v.kinetic(upbf)
if m>0:
v.reset_powers(l,m-1,n)
v.normalize()
termb = -2*m*sqrt(alpha/(2.0*m-1.0))*coefs*v.kinetic(upbf)
else: termb = 0.0
dTij_dYa += terma + termb
#z component
v.reset_powers(l,m,n+1)
v.normalize()
terma = sqrt(alpha*(2.0*n+1.0))*coefs*v.kinetic(upbf)
if n>0:
v.reset_powers(l,m,n-1)
v.normalize()
termb = -2*n*sqrt(alpha/(2.0*n-1.0))*coefs*v.kinetic(upbf)
else: termb = 0.0
dTij_dZa += terma + termb
return dTij_dXa,dTij_dYa,dTij_dZa | 5,325,462 |
def get_celeba():
"""Get and preprocess the CelebA dataset.
"""
transform = transforms.Compose([
transforms.Resize((64, 64)),
transforms.ToTensor()])
# Use only 18/40 labels as described in Appendix C.1
mask = torch.tensor(
[False, True, False, True, False, True, False, False, True, True,
False, True, True, True, False, True, False, False, True, False,
True, False, False, False, True, False, True, False, True, False,
False, True, False, True, False, False, False, False, True, True])
train = datasets.CelebA('./data', split='train', transform=transform,
download=True)
valid = datasets.CelebA('./data', split='valid', transform=transform,
download=True)
test = datasets.CelebA('./data', split='test', transform=transform,
download=True)
# Use 'train' split and 'valid' split as the training set
train = [(img, label[mask]) for img, label in chain(train, valid)]
test = [(img, label[mask]) for img, label in test]
return train, test | 5,325,463 |
def waa_adjust_baseline(rsl, baseline, wet, waa_max, delta_t, tau):
"""Calculate baseline adjustion due to wet antenna
Parameters
----------
rsl : iterable of float
Time series of received signal level
baseline : iterable of float
Time series of baseline for rsl
waa_max : float
Maximum value of wet antenna attenuation
delta_t : float
Parameter for wet antnenna attenation model
tau : float
Parameter for wet antnenna attenation model
wet : iterable of int or iterable of float
Time series with wet/dry classification information.
Returns
-------
iterable of float
Adjusted time series of baseline
iterable of float
Time series of wet antenna attenuation
"""
if type(rsl) == pd.Series:
rsl = rsl.values
if type(baseline) == pd.Series:
baseline = baseline.values
if type(wet) == pd.Series:
wet = wet.values
rsl = rsl.astype(np.float64)
baseline = baseline.astype(np.float64)
wet = wet.astype(np.float64)
waa = _numba_waa_schleiss(rsl, baseline, waa_max, delta_t, tau, wet)
#return baseline + waa, waa
return baseline + waa | 5,325,464 |
def linear_warmup_decay(learning_rate, warmup_steps, num_train_steps):
""" Applies linear warmup of learning rate from 0 and decay to 0."""
with fluid.default_main_program()._lr_schedule_guard():
lr = fluid.layers.tensor.create_global_var(
shape=[1],
value=0.0,
dtype='float32',
persistable=True,
name="scheduled_learning_rate")
global_step = fluid.layers.learning_rate_scheduler._decay_step_counter(
)
with fluid.layers.control_flow.Switch() as switch:
with switch.case(global_step < warmup_steps):
warmup_lr = learning_rate * (global_step / warmup_steps)
fluid.layers.tensor.assign(warmup_lr, lr)
with switch.default():
decayed_lr = fluid.layers.learning_rate_scheduler.polynomial_decay(
learning_rate=learning_rate,
decay_steps=num_train_steps,
end_learning_rate=0.0,
power=1.0,
cycle=False)
fluid.layers.tensor.assign(decayed_lr, lr)
return lr | 5,325,465 |
def _save_name(name):
"""Saves the dictionary as intervalTree files"""
for letter in name:
directory = './pkl/name/' + letter + '.pkl'
with open(directory, 'wb') as f:
pickle.dump(name[letter], f, -1)
print("Saved to " + directory) | 5,325,466 |
def update_mailing_list_extended(mailing_list):
"""
Your docstring documentation starts here.
For more information on how to proper document your function, please refer to the official PEP8:
https://www.python.org/dev/peps/pep-0008/#documentation-strings.
"""
# Checks it the flag `opt-out` is present. You can use lower() to lowercase the flags and contemplate both
# `opt-out` and `OPT-OUT` cases
# Then, checks for the presence of the `unsubscribed` flag Finally,
# checks if the email address contains `@gmail` provider
for key, value in mailing_list_copy.items():
# Your conditional logic to filter out the unsubscribed users
if ():
# Remove the key if one of the above conditions is satisfied
| 5,325,467 |
def poll(server):
"""
Function for polling the LP servers and getting the results
calls handle for everything it recieves
"""
while True:
try:
#Figure out everthing we need
url = known[server]
username, password = get_single_worker(server)
#If we have no user wait 5 minutes and try again
if username == None:
gevent.sleep(5*60)
continue
try:
content, server_headers = bitHopper.Network.get_lp( url, username, password, server)
except:
logging.debug(traceback.format_exc())
gevent.sleep(60)
continue
handle(content, server)
except:
logging.error(traceback.format_exc()) | 5,325,468 |
def space2():
"""Create a Space with two real dimensions."""
space = Space()
space.register(Real("lr", "uniform", 0, 1))
space.register(Real("weight_decay", "uniform", 0, 1))
return space | 5,325,469 |
def calculate_thresh(twindow, pctile, skipna):
"""Calculate threshold for one cell grid at the time
Parameters
----------
twindow: xarray DataArray
Stacked array timeseries with new 'z' dimension representing
a window of width 2*w+1
pctile: int
Threshold percentile used to detect events
skipna: bool
If True percentile and mean function will use skipna=True.
Using skipna option is much slower
Returns
-------
thresh_climYear: xarray DataArray
Climatological threshold
"""
thresh_climYear = (twindow
.groupby('doy')
.quantile(pctile/100., dim='z', skipna=skipna))
# calculate value for 29 Feb from mean of 28-29 feb and 1 Mar
thresh_climYear = thresh_climYear.where(thresh_climYear.doy!=60,
feb29(thresh_climYear))
thresh_climYear = thresh_climYear.chunk({'doy': -1})
return thresh_climYear | 5,325,470 |
def current_url(request, driver, login_action, login_page):
"""Use login button element depends from browser and get current page url"""
browser_name = request.config.getoption("--browser")
print(browser_name)
if browser_name == "firefox":
login_page.login()
driver.implicitly_wait(1)
elif browser_name == "chrome":
login_page.submit()
driver.implicitly_wait(1)
yield driver.current_url | 5,325,471 |
def iterparshios(year, israel=False):
"""Generate all the parshios in the year.
Parameters
----------
year : int
The Hebrew year to get the parshios for.
israel : bool, optional
``True`` if you want the parsha according to the Israel schedule
(with only one day of Yom Tov). Defaults to ``False``
Yields
------
list of ints or ``None``
A list of the numbers of the parshios for the next Shabbos in the given year.
Yields ``None`` for a Shabbos that doesn't have its own parsha
(i.e. it occurs on a yom tov).
"""
table = _gentable(year, israel)
for shabbos in table:
yield table[shabbos] | 5,325,472 |
def test():
"""Test code."""
# Baud rate of 14500000 seems about the max
spi = SPI(2, baudrate=14500000, sck=Pin(18), mosi=Pin(23))
display = Display(spi, dc=Pin(17), cs=Pin(5), rst=Pin(16))
x, y = 0, 0
angle = 0.0
# Loop all angles from 0 to 2 * PI radians
while angle < PI2:
# Calculate x, y from a vector with known length and angle
x = int(CENTER_X * sin(angle) + HALF_WIDTH)
y = int(CENTER_Y * cos(angle) + HALF_HEIGHT)
color = color565(*hsv_to_rgb(angle / PI2, 1, 1))
display.draw_line(x, y, CENTER_X, CENTER_Y, color)
angle += ANGLE_STEP_SIZE
sleep(5)
for r in range(CENTER_X, 0, -1):
color = color565(*hsv_to_rgb(r / HALF_WIDTH, 1, 1))
display.fill_circle(CENTER_X, CENTER_Y, r, color)
sleep(9)
display.cleanup() | 5,325,473 |
def edit_car(item_id):
"""
Edit item
:param item_id:
:return mix:
"""
# get user
user = get_user_by_id(session['uid'])
# Get car
car = get_item_by_id(item_id)
# Check the user is the owner
if int(session['uid']) != int(car.author):
flash('You don\'t have permission to edit it.', 'error')
return redirect('/profile', 302)
# Get token
token = user.generate_auth_token(3600)
if request.method == 'POST' and request.form['csrf_token'] == csrf_token:
_car = dict()
# cleaning data
try:
_car['description'] = clean(request.form['description'])
_car['title'] = clean(request.form['title'])
_car['model'] = clean(request.form['model'])
_car['price'] = clean(request.form['price'])
_car['brand'] = clean(request.form['brand'])
_car['author'] = session['uid']
except TypeError:
flash('fields can\'t be empty', 'error')
return render('catalog/new_car.html',
brands=brands, csrf=csrf_token)
# update car, create success message and redirect user
item = update_item(_car, item_id)
flash('Record "%s" was successfully updated' % item.title, 'success')
return redirect('/profile', 302)
return render('catalog/edit_car.html',
brands=brands,
car=car.serialize,
token=token,
user=user.serialize,
csrf_token=csrf_token) | 5,325,474 |
def preprocess_until_max_index(nw_name, max_index, number_cores = 1, network_dynamics_file = None):
""" computes the preprocessed travel time tables for all different travel time files
creates the travel time tables "tt_matrix" and "dis_matrix" and stores them
in the network-folder
:param network_name: name to the corresponding network file
:param max_index: preprocesses the network between all nodes with indices from 0 to max_index (excluded)
:param number_cores: number of travel_time files that are preprocessed in parallel
:param network_dynamics_file: network dynamicsfile
"""
nw_path = os.path.join(fleet_sim_path, "data", "networks", nw_name)
special_nodes = [i for i in range(max_index)]
preprocess(nw_path, network_dynamics_file=network_dynamics_file, number_cores=number_cores, special_nodes=special_nodes) | 5,325,475 |
def replace_ensembl_ids(expression_df, gene_id_mapping):
"""
Replaces ensembl gene ids with hgnc symbols
Arguments
---------
expression_df: df
gene expression data matrix (sample x gene)
gene_id_mapping: df
Dataframe mapping ensembl ids (used in DE_stats_file) to hgnc symbols,
used in Crow et. al.
NOTE:
-----
This function is deprecated due to large memory usage: when `expression_df`
is a large dataframe, manipulating it inside the momory becomes very slow
(and sometimes even impossible) due to large memory consumption.
The same functionality has been refactored into `get_renamed_columns()` and
`map_recount2_data()` functions in this module.
THIS FUNCTION IS KEPT AS A REFERENCE ONLY.
"""
# Some columns are duplicates, for example:
# (ENSG00000223773.7, ENSG00000223773) --> CD99P1
# (ENSG00000124334.17, ENSG00000124334) --> IL9R
# We keep the first occurence of duplicated ensembl ids
updated_mapping = gene_id_mapping.loc[
~gene_id_mapping.index.duplicated(keep="first")
]
# Same ensembl ids are mapped to different gene symbol twice (CCL3L1, CCL3L3)
# ENSG00000187510.7 ENSG00000187510 C12orf74
# ENSG00000187510.7 ENSG00000187510 PLEKHG7
# Manually mapping them based on what is found on ensembl site
manual_mapping = {
"ENSG00000187510.7": "PLEKHG7",
"ENSG00000230417.11": "LINC00595",
"ENSG00000255374.3": "TAS2R45",
"ENSG00000276085.1": "CCL3L1",
}
# Apply manual mappings to `updated_mapping`
for ensembl_id, gene_symbol in manual_mapping.items():
updated_mapping.loc[ensembl_id].hgnc_symbol = gene_symbol
# Remove paralogs.
# Some ensembl ids are paralogs (for example, "geneA" and "geneA_PAR_Y").
# They map to the same hgnc symbol. Homologous sequences are paralogous
# if they were separated by a gene duplication event: if a gene in an
# organism is duplicated to occupy two different positions in the same
# genome, then the two copies are paralogous.
updated_expression_df = expression_df.iloc[
:, ~expression_df.columns.str.contains("PAR_Y")
]
# Replace ensembl ids with gene symbol
updated_expression_df.columns = updated_expression_df.columns.map(
updated_mapping["hgnc_symbol"]
)
# Remove columns whose mapped ensembl id is an empty string
updated_expression_df = updated_expression_df.iloc[
:, updated_expression_df.columns != ""
]
# Remove columns whose mapped ensembl id is `NaN`
updated_expression_df = updated_expression_df.iloc[
:, updated_expression_df.columns.notnull()
]
return updated_expression_df | 5,325,476 |
def MPC_SelectModulationGeneration(pattern_index: int,
mode: ModulationShapeMode) -> None:
"""Selects the customizable modulation mode or the classical linear mode
Parameters
----------
pattern_index : int
Pattern index
mode : ModulationShapeMode
Modulation mode
"""
_check_limits(c_uint8, pattern_index, 'pattern_index')
if not isinstance(mode, ModulationShapeMode):
raise TypeError('mode must be an instance of '
'ModulationShapeMode IntEnum')
CTS3Exception._check_error(_MPuLib.MPC_SelectModulationGeneration(
c_uint8(pattern_index),
c_uint8(mode))) | 5,325,477 |
async def get_latency(ctx: Context) -> dict[str, str]:
"""
Get the bot's latency and database latency.
Parameters
----------
ctx : Context
The context.
"""
now = perf_counter()
collection = ctx.bot.db['test']['TESTS']
if await collection.find_one({'_id': PAYLOAD['_id']}) is None:
await collection.insert_one(PAYLOAD)
else:
await collection.find_one({'_id': PAYLOAD['_id']})
bot_latency = f'{round(ctx.bot.latency * 1000)}ms'
database_latency = f'{round(perf_counter() - now)}ms'
data: dict[str, str] = {
'bot': bot_latency,
'database': database_latency
}
return data | 5,325,478 |
def ssl_allowed(fn):
"""
Decorator - marks a route as allowing ssl, but not requiring it. It can be served over http and https.
NOTE: This must go BEFORE the route!
"""
fn.ssl_allowed = True
return fn | 5,325,479 |
def _get_visible_photos(browser, known_urls):
"""
extracts all *currently visible* photo URLs from a Flickr photoset/album
page, converts them into "embed code compatible" (i.e. sanctioned by
Flickr) URLs and returns them.
Parameters
----------
browser : TODO ???
a selenium webdriver instance
known_urls : dict(str: dict(str: str))
a dictionary mapping from embed code compatible image URLs to a
dictionary holding some metadata ('image_page', 'title' and
'orientation'). We'll update this dict, if we find new image
after scrolling down the page.
output : str or None
if 'cli': print an embed code as soon as a new image is found/parsed
Returns
-------
known_urls : dict(str: dict(str: str))
a dictionary mapping from embed code compatible image URLs to a
dictionary holding some metadata ('image_page', 'title' and
'orientation')
"""
image_elems = browser.find_elements_by_class_name('awake')
for elem in image_elems:
style_attrib = elem.get_attribute('style')
match = re.match(STYLE_STRING_PATTERN, style_attrib, re.VERBOSE)
width = int(match.group('width'))
height = int(match.group('height'))
orientation = get_orientation(width, height)
url = match.group('url')
# URL of the page that only shows one image
try:
image_page_elem = elem.find_element_by_class_name('overlay')
image_page = image_page_elem.get_attribute('href')
except NoSuchElementException as e:
image_page = browser.current_url
# title of the image
try:
title_elem = elem.find_element_by_class_name('interaction-bar')
title_str = title_elem.get_attribute('title')
title = re.match('^(?P<title>.*) by.*$', title_str).group('title')
except NoSuchElementException as e:
title = ''
try:
embed_url = hotlink_url2embed_url(url)
if not embed_url in known_urls:
known_urls[embed_url] = {
'image_page': image_page,
'title': title,
'orientation': orientation}
except AttributeError as e:
raise AttributeError("Warning: can't convert URL: {}".format(url))
return known_urls | 5,325,480 |
def get_parser():
""" return a parser """
parser = argparse.ArgumentParser("cli")
parser.add_argument('registryimage', help="registry/image:tag - tag is optional")
# Username and password come last to make them optional later
parser.add_argument('username', help='username')
parser.add_argument('password', help='password')
return parser | 5,325,481 |
def fold_with_enum_index(xtypes, x):
"""
see MixedIntegerContext.fold_with_enum_index
"""
x = np.atleast_2d(x)
xfold = np.zeros((x.shape[0], len(xtypes)))
unfold_index = 0
for i, xtyp in enumerate(xtypes):
if xtyp == FLOAT or xtyp == INT:
xfold[:, i] = x[:, unfold_index]
unfold_index += 1
elif isinstance(xtyp, tuple) and xtyp[0] == ENUM:
index = np.argmax(x[:, unfold_index : unfold_index + xtyp[1]], axis=1)
xfold[:, i] = index
unfold_index += xtyp[1]
else:
_raise_value_error(xtyp)
return xfold | 5,325,482 |
def unlock(arguments):
"""Unlock the database."""
import redis
u = coil.utils.ask("Redis URL", "redis://localhost:6379/0")
db = redis.StrictRedis.from_url(u)
db.set('site:lock', 0)
print("Database unlocked.")
return 0 | 5,325,483 |
def unzip_recursive(zipped_file, to_folder, set_remove=True):
"""
Function that recursively goes through folders and unpacks zips inside them.
All unzipped files are stored in the same folder (output_folder)
"""
logger.debug("Unzipping {} to {}".format(zipped_file, to_folder))
with zipfile.ZipFile(zipped_file, 'r') as zfile:
try:
zfile.extractall(path=to_folder)
except (zipfile.BadZipFile, IOError) as ziperror:
logger.fatal("Tried unzipping {} but got stuck: {}".format(zipped_file, ziperror))
exit(0)
# if set_remove is True, remove the original zip file after extraction
if (set_remove):
os.remove(zipped_file)
return
# walk through the selected folder
for dir_name, subdir_list, file_list in os.walk(to_folder):
for specific_file in file_list:
# look for zip-file
if (specific_file.endswith('.zip')):
new_file_path = os.path.join(dir_name, specific_file)
# if it is a zip file, extract its contents and enter the folder, then unzip and look for files again.
logger.debug("Zip file: {}".format(new_file_path))
unzip_recursive(new_file_path, os.path.dirname(new_file_path)) | 5,325,484 |
def filter_atom_tokens(entity: SerializableEntity) -> bool:
"""
When locating tokens for equations, only detect atom tokens (i.e., skipping affix tokens like
arrows and hats), because affixes be colorized by wrapping them in colorization commands.
"""
token = cast(SerializableToken, entity)
return token.type_ == "atom" | 5,325,485 |
def handle_subliminal_download(video, video_path, languages_to_retrieve):
""" # Download the best subtitles in french and english
Args:
video : Name of video
video_path: absolute path to videos
languages_to_retrieve : dict of subtitles languages to retrieve
return : two dicts with the path of each subtitles with str of language as key / Exemple : 'eng' for english, 'fra' for french .
the first dict is the path to vtt subtitles, the second one is the path to str subtitles
"""
webvtt_subtitles_returned = {}
srt_subtitles_returned = {}
best_subtitles = download_best_subtitles(
[video], set(map(Language, languages_to_retrieve)))
if best_subtitles[video]:
for retrieved_subtitle in best_subtitles[video]:
subtitles_are_saved = save_subtitles(
video, [retrieved_subtitle], encoding='utf8')
if subtitles_are_saved:
srt_fullpath = subtitle.get_subtitle_path(
video_path, retrieved_subtitle.language)
srt_subtitles_returned[
retrieved_subtitle.language.alpha3] = srt_fullpath
new_data = remove_nullcharacters(srt_fullpath)
with io.open(srt_fullpath, 'w', encoding='utf-8') as f:
for line in new_data:
f.write(line)
webvtt_fullpath = os.path.splitext(srt_fullpath)[0]+'.vtt'
if os.path.isfile(webvtt_fullpath):
# Add the subtitles path to subtitles_returned even if they are already downloaded/converted
webvtt_subtitles_returned[
retrieved_subtitle.language.alpha3] = webvtt_fullpath
if os.path.isfile(srt_fullpath):
# Add the subtitles path to subtitles_returned after converting them in .vtt
convert_subtitles_to_webvtt(srt_fullpath, webvtt_fullpath)
webvtt_subtitles_returned[
retrieved_subtitle.language.alpha3] = webvtt_fullpath
return webvtt_subtitles_returned, srt_subtitles_returned | 5,325,486 |
def xmp_extract(fns, type_map):
"""xmp_extract
:param fns:
:param type_map:
"""
logger.info("Extracting raw XMP data.")
func = partial(xmp_to_vec, type_map=type_map)
xmp_to_vec(fns[0], type_map=type_map)
xmp_data = imap_unordered_bar(func, fns, n_proc=2)
xmp_data = pd.DataFrame(xmp_data)
# convert the data types
data_fields, data = convert_types(xmp_data, type_map)
df = pd.DataFrame(data).transpose()
df.columns = data_fields
df['fn'] = fns
return df | 5,325,487 |
def fisbUnavailable(db):
"""Create string containing any FIS-B Unavailable messages.
Args:
db (object): Handle to database connection.
Returns:
str: Containing any FIS-B Unavailable information.
"""
if SHOW_UNAVAILABLE == False:
return ''
fisbStr = ''
for r in db.MSG.find({'type': 'FIS_B_UNAVAILABLE'},{'contents': 1, 'centers': 1}):
centerList = ','.join(r['centers'])
centerStr = ' [' + centerList + ']'
fisbEntry = r['contents'] + centerStr
fisbStr = fisbStr + textwrap.fill(fisbEntry, 78, subsequent_indent=' ') + '\n'
if fisbStr != '':
fisbStr = '\n' + fisbStr
return fisbStr | 5,325,488 |
def log_get():
""" Parses JSON log file and sends it to the web server """
log(bottle.request)
logbuf = []
try:
if os.path.exists('log.json'):
with open('log.json') as infile:
logbuf = json.load(infile)
except (ValueError, IOError) as e:
error(e)
# Prepare response header
bottle.response.content_type = 'application/json'
bottle.response.expires = 0
bottle.response.set_header('Pragma', 'no-cache')
bottle.response.set_header('Cache-Control',
'no-cache, no-store, must-revalidate')
# Send log buffer
return json.dumps(logbuf) | 5,325,489 |
def rotate_file(filename, copy=False):
"""
Rotate file like logrotate.
If given filename already exists, rename it to "filename".n, n=1...
Filename with larger n is older one.
"""
# If not exist,
if not os.path.isfile(filename):
return
# make list [ [filename, number], ... ]
old_list = []
dot_files = glob.glob(filename + ".*")
for f in dot_files:
suffix = f.replace(filename+".", "")
try:
i = int(suffix)
if str(i) == suffix: # ignore if suffix was such as 003...
old_list.append([f, i])
except ValueError, e:
continue
old_list.sort(lambda x,y: x[1]-y[1])
# rotate files
for f, i in reversed(old_list):
os.rename(f, "%s.%d" % (f[:f.rfind(".")], i+1))
if copy:
shutil.copyfile(filename, filename + ".1")
else:
os.rename(filename, filename + ".1")
return filename + ".1" | 5,325,490 |
def change_master(host, confirm=False):
"""
Change to different master host.
Arguments:
- host (str): Hostname of the new master to change to.
Optional arguments:
- confirm (bool): Acknowledge the execution of this command. Default is 'False'.
"""
if not confirm:
raise salt.exceptions.CommandExecutionError(
"This command will replace your current master host to '{:s}' - add parameter 'confirm=true' to continue anyway".format(host))
ret = {}
ret["master_key_removed"] = __salt__["file.remove"]("/etc/salt/pki/minion/minion_master.pub")
ret["config_changed"] = __salt__["file.replace"]("/etc/salt/minion", "^master:.*$", "master: {:s}".format(host))
ret["restart"] = restart()
return ret | 5,325,491 |
def decode_text(s):
"""
Decodes a PDFDocEncoding string to Unicode.
Adds py3 compatability to pdfminer's version.
"""
if type(s) == bytes and s.startswith(b'\xfe\xff'):
return six.text_type(s[2:], 'utf-16be', 'ignore')
else:
ords = (ord(c) if type(c) == str else c for c in s)
return ''.join(PDFDocEncoding[o] for o in ords) | 5,325,492 |
def normalize_bound(sig, lb=0, ub=1):
"""
Normalize a signal between the lower and upper bound.
Parameters
----------
sig : ndarray
Original signal to be normalized.
lb : int, float, optional
Lower bound.
ub : int, float, optional
Upper bound.
Returns
-------
ndarray
Normalized signal.
"""
mid = ub - (ub - lb) / 2
min_v = np.min(sig)
max_v = np.max(sig)
mid_v = max_v - (max_v - min_v) / 2
coef = (ub - lb) / (max_v - min_v)
return sig * coef - (mid_v * coef) + mid | 5,325,493 |
def test_create_publish_new_revision(
client_with_login,
location,
minimal_record,
identity_simple,
headers,
):
"""Test draft creation of an existing record and publish it."""
client = client_with_login
recid = _create_and_publish(client, minimal_record, headers)
# # FIXME: Allow ES to clean deleted documents.
# # Flush is not the same. Default collection time is 1 minute.
# time.sleep(70)
# Create new draft of said record
orig_title = minimal_record["metadata"]["title"]
minimal_record["metadata"]["title"] = "Edited title"
response = client.post(
"/records/{}/draft".format(recid),
headers=headers
)
assert response.status_code == 201
assert response.json['revision_id'] == 5
_assert_single_item_response(response)
# Update that new draft
response = client.put(
"/records/{}/draft".format(recid),
data=json.dumps(minimal_record),
headers=headers
)
assert response.status_code == 200
# Check the actual record was not modified
response = client.get(
"/records/{}".format(recid), headers=headers)
assert response.status_code == 200
_assert_single_item_response(response)
assert response.json['metadata']["title"] == orig_title
# Publish it to check the increment in reversion
response = client.post(
"/records/{}/draft/actions/publish".format(recid), headers=headers)
assert response.status_code == 202
_assert_single_item_response(response)
# TODO: Because of seting the `.bucket`/`.bucket_id` fields on the record
# there are extra revision bumps.
assert response.json['id'] == recid
assert response.json['revision_id'] == 4
assert response.json['metadata']["title"] == \
minimal_record["metadata"]["title"]
# Check it was actually edited
response = client.get(
"/records/{}".format(recid), headers=headers)
assert response.json["metadata"]["title"] == \
minimal_record["metadata"]["title"] | 5,325,494 |
def get_chisqr3d(res3d):
"""Extract fit3d result chisqr attribute into a 3d volume
Args:
res3d -- 3d numpy array of model.ModelResult; output of fit3d
Return:
attr3d -- numpy arrays of chi-square statistics of fit
"""
# create empty array
data_type = type(res3d[0,0,0].chisqr)
shape = res3d.shape
attr3d = np.zeros(shape,dtype=data_type)
# fill the arrays
for x, y, z in itertools.product(*map(range, shape)):
attr3d[x,y,z] = res3d[x,y,z].chisqr
return attr3d | 5,325,495 |
def detect_Telephony_SMS_abuse(x) :
"""
@param x : a VMAnalysis instance
@rtype : a list of formatted strings
"""
formatted_str = []
structural_analysis_results = x.tainted_packages.search_methods("Landroid/telephony/SmsManager","sendTextMessage", ".")
#structural_analysis_results = x.tainted_packages.search_methods("Lcom/geinimi/c/i","a", ".")
print (structural_analysis_results)
"""
# ke added
print (structural_analysis_results[0])
print (show_Path(structural_analysis_results))
print (structural_analysis_results[0].get_name())
print (structural_analysis_results[0].get_class_name())
print (structural_analysis_results[0].get_idx())
print (structural_analysis_results[0].get_descriptor())
#print (len(structural_analysis_results))
raw_input()
"""
for result in xrange(len(structural_analysis_results)) :
registers = data_flow_analysis(structural_analysis_results, result, x)
#print (registers)
#print (result)
#raw_input(" Y_______Y ")
"""
if len(registers) > 3 :
target_phone_number = get_register_value(1, registers)
sms_message = get_register_value(3, registers)
local_formatted_str = "This application sends an SMS message '%s' to the '%s' phone number" % (sms_message, target_phone_number)
if not(local_formatted_str in formatted_str) :
formatted_str.append(local_formatted_str)
"""
return formatted_str | 5,325,496 |
def depth_map_to_point_cloud(
depth_map: torch.Tensor,
valid_map: Optional[torch.Tensor],
focal_x: float,
focal_y: float,
center_x: float,
center_y: float,
trunc_depth_min: Optional[float],
trunc_depth_max: Optional[float],
flip_h: bool = True,
device: Optional[torch.device] = None,
_validate_args: bool = True
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Generate point clouds from `depth_map`. The generated point clouds are
in the camera space. X: camera right, Y: camera up, Z: forward (depth).
The rank of `depth_map` must be at least 4D (b, c, h, w). If not, it is
interpreted as (h, w), (c, h, w), (b, c, h, w) and (b, ..., h, w) for higher
ranks.
Note that the `valid_map` must be a image type as `depth_map`. (h, w), (c, h, w)
or (b, c, h, w). Note that (b, h, w) is not allowed and will be interpreted as
(c, h, w) without warnings.
Args:
depth_map (torch.Tensor): UNNORMALIZED depth map, which means the range of
values is [min_depth, max_depth]. torch.float32. The rank must be at
least 4D (b, c, h, w). If not, it is converted automatically.
valid_map (torch.Tensor, optional): binary mask to indicate which pixels are
valid or invalid. Invalid pixels are discard during scattering. torch.bool
focal_x (float): focal length on x direction.
focal_y (float): focal length on y direction.
center_x (float): center coordinate of depth map.
center_y (float): center coordinate of depth map.
trunc_depth_min (float): depth below this value is truncated. None to disable.
trunc_depth_max (float): depth above this value is truncated. None to disable.
flip_h (bool, optional): whether to flip the horizontal axis. Note that in
OpenCV format, the origin (0, 0) of an image is at the upper left corner,
which should be flipped before converting to point cloud. Defaults
to True.
device (torch.device, optional): torch device. Defaults to None.
Returns:
torch.Tensor: point cloud in shape (..., 3)
torch.Tensor: mask in shape (..., h, w) indicating the valid area.
"""
if _validate_args:
# Convert to tensors and ensure they are on the same device
depth_map = utils.validate_tensors(depth_map, same_device=device or True)
# Ensure tensor shape at least 4D (b, ..., h, w)
depth_map = utils.to_4D_image(depth_map) # (b, c, h, w)
# Ensure dtypes
depth_map = depth_map.to(dtype=torch.float32)
if valid_map is not None:
valid_map = utils.to_tensor(valid_map, device=depth_map.device)
valid_map = utils.to_4D_image(valid_map) # (b, c, h, w)
valid_map = valid_map.to(dtype=torch.bool)
device = depth_map.device
x, y = utils.generate_image_coords(
depth_map.shape,
dtype = torch.float32,
device = device
) # same shape as depth_map
z = depth_map # (..., h, w)
points = torch.stack((x, y, z), dim=-1)
point_cloud = image_to_camera_space(
points = points,
focal_x = focal_x,
focal_y = focal_y,
center_x = center_x,
center_y = center_y,
flip_h = flip_h,
height = depth_map.shape[-2],
_validate_args = False
) # (..., h, w, 3)
valid_area = torch.ones_like(z, dtype=torch.bool) # (..., h, w)
# Truncate invalid values
if trunc_depth_max is not None:
valid_area = torch.logical_and(z <= trunc_depth_max, valid_area)
if trunc_depth_min is not None:
valid_area = torch.logical_and(z >= trunc_depth_min, valid_area)
if valid_map is not None:
valid_area = torch.logical_and(valid_area, valid_map)
return point_cloud, valid_area | 5,325,497 |
async def admin_cmd_ping(message: discord.Message, args: str, isDM: bool):
"""admin command testing bot latency.
:param discord.Message message: the discord message calling the command
:param str args: ignored
:param bool isDM: Whether or not the command is being called from a DM channel
"""
start = time.perf_counter()
msg = await message.reply(mention_author=False, content="Ping...")
end = time.perf_counter()
duration = (end - start) * 1000
await msg.edit(content='Pong! {:.2f}ms'.format(duration)) | 5,325,498 |
def symbol_definitions(goto, wkdir, srcdir=None):
"""Symbol definitions appearing in symbol table.
Source file path names in symbol table are absolute or relative to
wkdir. If srcdir is given, return only symbols defined in files
under srcdir.
"""
wkdir = srcloct.abspath(wkdir)
srcdir = srcloct.abspath(srcdir)
symbols = {}
for dfn in parse_symbol_table(symbol_table(goto), wkdir):
sym, src, num = dfn['symbol'], dfn['file'], dfn['line']
if sym is None or src is None or num is None:
logging.info("Skipping symbol table entry: %s: %s, %s",
sym, src, num)
continue
if srcdir and not src.startswith(srcdir):
logging.info("Skipping symbol table entry: %s: %s, %s",
sym, src, num)
continue
srcloc = srcloct.make_srcloc(src, None, num, wkdir, srcdir)
if sym in symbols and srcloc != symbols[sym]:
logging.warning("Skipping redefinition of symbol name: %s", sym)
logging.warning(" Old symbol %s: file %s, line %s",
sym, symbols[sym]["file"], symbols[sym]["line"])
logging.warning(" New symbol %s: file %s, line %s",
sym, srcloc["file"], srcloc["line"])
continue
symbols[sym] = srcloc
return symbols | 5,325,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.