content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def st_oid(draw, max_value=2**512, max_size=50):
"""
Hypothesis strategy that returns valid OBJECT IDENTIFIERs as tuples
:param max_value: maximum value of any single sub-identifier
:param max_size: maximum length of the generated OID
"""
first = draw(st.integers(min_value=0, max_value=2))
if first < 2:
second = draw(st.integers(min_value=0, max_value=39))
else:
second = draw(st.integers(min_value=0, max_value=max_value))
rest = draw(st.lists(st.integers(min_value=0, max_value=max_value),
max_size=max_size))
return (first, second) + tuple(rest) | 36,300 |
def predict(model, image_pth=None, dataset=None):
"""
Args:
model : model
image (str): path of the image >>> ".../image.png"
dataset [Dataset.Tensor]:
Returns:
--------
predicted class
"""
transform = transforms.Compose([
transforms.Resize([28, 28]),
transforms.Grayscale(),
transforms.ToTensor()
])
if dataset:
dataset = torchvision.datasets.MNIST(root="./",
download=True,
train=True,
transform=transforms.ToTensor())
i, l = next(iter(dataset))
image = i.squeeze(0).numpy()
#plt.imshow(image, cmap="gray")
if image_pth:
i = Image.open(image_pth)
image = transform(i)
imag = image.squeeze(0).numpy()
#plt.imshow(imag, cmap="gray")
i = image
predict = model(i.unsqueeze(0))
predicted = np.argmax(predict.detach())
#plt.title(f"predicted label: {predicted.item()}")
#plt.show()
image_cv = cv.imread(image_pth)
font = cv.FONT_HERSHEY_SIMPLEX
img_label = f"predicted label: {predicted}"
cv.putText(image_cv, img_label, [10, 20], \
font,0.7, (0, 255, 0),1, cv.LINE_AA)
#cv.imshow("sample image", image_cv)
plt.imshow(image_cv)
plt.show()
return predicted | 36,301 |
def to_centers(sids):
""" Converts a (collection of) sid(s) into a (collection of) trixel center longitude, latitude pairs.
Parameters
----------
sids: int or collection of ints
sids to covert to vertices
Returns
--------
Centers: (list of) tuple(s)
List of centers. A center is a pair of longitude/latitude.
Examples
----------
>>> import starepandas
>>> sids = [4611263805962321926, 4611404543450677254]
>>> starepandas.to_centers(sids)
array([[19.50219018, 23.29074702],
[18.65957821, 25.34384175]])
"""
vertices = to_vertices(sids)
centers = vertices2centers(vertices)
return centers | 36,302 |
def get_atlas_by_id(atlas_id: str, request: Request):
"""
Get more information for a specific atlas with links to further objects.
"""
for atlas in siibra.atlases:
if atlas.id == atlas_id.replace('%2F', '/'):
return __atlas_to_result_object(atlas, request)
raise HTTPException(
status_code=404,
detail='atlas with id: {} not found'.format(atlas_id)) | 36,303 |
def gen_header_types(out, schema):
"""
Generates the types in the header file.
"""
#
# The list of types to generate is currently hard-coded below to match
# exactly with the legacy version produced by the C# tool. We can later
# make it shorter by iterating only on the top-level types (generation is
# recursive).
#
schema.gen_header_type(out, 'IdType')
schema.gen_header_type(out, 'Node')
schema.gen_header_type(out, 'InstanceNode')
schema.gen_header_type(out, 'TypeNode')
schema.gen_header_type(out, 'ObjectNode')
schema.gen_header_type(out, 'ObjectTypeNode')
schema.gen_header_type(out, 'VariableNode')
schema.gen_header_type(out, 'VariableTypeNode')
schema.gen_header_type(out, 'ReferenceTypeNode')
schema.gen_header_type(out, 'MethodNode')
schema.gen_header_type(out, 'ViewNode')
schema.gen_header_type(out, 'DataTypeNode')
schema.gen_header_type(out, 'Argument')
schema.gen_header_type(out, 'EnumValueType')
schema.gen_header_type(out, 'EnumField')
schema.gen_header_type(out, 'OptionSet')
schema.gen_header_type(out, 'TimeZoneDataType')
schema.gen_header_type(out, 'ApplicationDescription')
schema.gen_header_type(out, 'RequestHeader')
schema.gen_header_type(out, 'ServiceFault')
schema.gen_header_pair(out, 'FindServers')
schema.gen_header_type(out, 'ServerOnNetwork')
schema.gen_header_pair(out, 'FindServersOnNetwork')
schema.gen_header_type(out, 'EndpointDescription')
schema.gen_header_pair(out, 'GetEndpoints')
schema.gen_header_type(out, 'RegisteredServer')
schema.gen_header_pair(out, 'RegisterServer')
schema.gen_header_type(out, 'MdnsDiscoveryConfiguration')
schema.gen_header_pair(out, 'RegisterServer2')
schema.gen_header_type(out, 'SecurityTokenRequestType')
schema.gen_header_type(out, 'ChannelSecurityToken')
schema.gen_header_pair(out, 'OpenSecureChannel')
schema.gen_header_pair(out, 'CloseSecureChannel')
schema.gen_header_type(out, 'SignedSoftwareCertificate')
schema.gen_header_type(out, 'SignatureData')
schema.gen_header_pair(out, 'CreateSession')
schema.gen_header_type(out, 'UserIdentityToken')
schema.gen_header_type(out, 'AnonymousIdentityToken')
schema.gen_header_type(out, 'UserNameIdentityToken')
schema.gen_header_type(out, 'X509IdentityToken')
schema.gen_header_type(out, 'KerberosIdentityToken')
schema.gen_header_type(out, 'IssuedIdentityToken')
schema.gen_header_pair(out, 'ActivateSession')
schema.gen_header_pair(out, 'CloseSession')
schema.gen_header_pair(out, 'Cancel')
schema.gen_header_type(out, 'NodeAttributesMask')
schema.gen_header_type(out, 'NodeAttributes')
schema.gen_header_type(out, 'ObjectAttributes')
schema.gen_header_type(out, 'VariableAttributes')
schema.gen_header_type(out, 'MethodAttributes')
schema.gen_header_type(out, 'ObjectTypeAttributes')
schema.gen_header_type(out, 'VariableTypeAttributes')
schema.gen_header_type(out, 'ReferenceTypeAttributes')
schema.gen_header_type(out, 'DataTypeAttributes')
schema.gen_header_type(out, 'ViewAttributes')
schema.gen_header_type(out, 'AddNodesItem')
schema.gen_header_type(out, 'AddNodesResult')
schema.gen_header_pair(out, 'AddNodes')
schema.gen_header_type(out, 'AddReferencesItem')
schema.gen_header_pair(out, 'AddReferences')
schema.gen_header_type(out, 'DeleteNodesItem')
schema.gen_header_pair(out, 'DeleteNodes')
schema.gen_header_type(out, 'DeleteReferencesItem')
schema.gen_header_pair(out, 'DeleteReferences')
schema.gen_header_type(out, 'AttributeWriteMask')
schema.gen_header_type(out, 'BrowseDirection')
schema.gen_header_type(out, 'ViewDescription')
schema.gen_header_type(out, 'BrowseDescription')
schema.gen_header_type(out, 'BrowseResultMask')
schema.gen_header_type(out, 'BrowseResult')
schema.gen_header_pair(out, 'Browse')
schema.gen_header_pair(out, 'BrowseNext')
schema.gen_header_type(out, 'BrowsePath')
schema.gen_header_type(out, 'BrowsePathResult')
schema.gen_header_pair(out, 'TranslateBrowsePathsToNodeIds')
schema.gen_header_pair(out, 'RegisterNodes')
schema.gen_header_pair(out, 'UnregisterNodes')
schema.gen_header_type(out, 'EndpointConfiguration')
schema.gen_header_type(out, 'ComplianceLevel')
schema.gen_header_type(out, 'SupportedProfile')
schema.gen_header_type(out, 'SoftwareCertificate')
schema.gen_header_type(out, 'NodeTypeDescription')
schema.gen_header_type(out, 'FilterOperator')
schema.gen_header_type(out, 'QueryDataSet')
schema.gen_header_type(out, 'NodeReference')
schema.gen_header_type(out, 'ContentFilter')
schema.gen_header_type(out, 'ElementOperand')
schema.gen_header_type(out, 'LiteralOperand')
schema.gen_header_type(out, 'AttributeOperand')
schema.gen_header_type(out, 'SimpleAttributeOperand')
schema.gen_header_type(out, 'ContentFilterElementResult')
schema.gen_header_type(out, 'ContentFilterResult')
schema.gen_header_type(out, 'ParsingResult')
schema.gen_header_pair(out, 'QueryFirst')
schema.gen_header_pair(out, 'QueryNext')
schema.gen_header_type(out, 'TimestampsToReturn')
schema.gen_header_type(out, 'ReadValueId')
schema.gen_header_pair(out, 'Read')
schema.gen_header_type(out, 'HistoryReadValueId')
schema.gen_header_type(out, 'HistoryReadResult')
schema.gen_header_type(out, 'ReadEventDetails')
schema.gen_header_type(out, 'ReadRawModifiedDetails')
schema.gen_header_type(out, 'ReadProcessedDetails')
schema.gen_header_type(out, 'ReadAtTimeDetails')
schema.gen_header_type(out, 'HistoryData')
schema.gen_header_type(out, 'HistoryModifiedData')
schema.gen_header_type(out, 'HistoryEvent')
schema.gen_header_pair(out, 'HistoryRead')
schema.gen_header_type(out, 'WriteValue')
schema.gen_header_pair(out, 'Write')
schema.gen_header_type(out, 'HistoryUpdateDetails')
schema.gen_header_type(out, 'PerformUpdateType')
schema.gen_header_type(out, 'UpdateDataDetails')
schema.gen_header_type(out, 'UpdateStructureDataDetails')
schema.gen_header_type(out, 'UpdateStructureDataDetails')
schema.gen_header_type(out, 'UpdateEventDetails')
schema.gen_header_type(out, 'DeleteRawModifiedDetails')
schema.gen_header_type(out, 'DeleteAtTimeDetails')
schema.gen_header_type(out, 'DeleteEventDetails')
schema.gen_header_type(out, 'HistoryUpdateResult')
schema.gen_header_pair(out, 'HistoryUpdate')
schema.gen_header_type(out, 'CallMethodRequest')
schema.gen_header_type(out, 'CallMethodResult')
schema.gen_header_pair(out, 'Call')
schema.gen_header_type(out, 'MonitoringMode')
schema.gen_header_type(out, 'DataChangeTrigger')
schema.gen_header_type(out, 'DeadbandType')
schema.gen_header_type(out, 'DataChangeFilter')
schema.gen_header_type(out, 'AggregateFilter')
schema.gen_header_type(out, 'EventFilterResult')
schema.gen_header_type(out, 'AggregateFilterResult')
schema.gen_header_type(out, 'MonitoredItemCreateRequest')
schema.gen_header_type(out, 'MonitoredItemCreateResult')
schema.gen_header_pair(out, 'CreateMonitoredItems')
schema.gen_header_type(out, 'MonitoredItemModifyRequest')
schema.gen_header_type(out, 'MonitoredItemModifyResult')
schema.gen_header_pair(out, 'ModifyMonitoredItems')
schema.gen_header_pair(out, 'SetMonitoringMode')
schema.gen_header_pair(out, 'SetTriggering')
schema.gen_header_pair(out, 'DeleteMonitoredItems')
schema.gen_header_pair(out, 'CreateSubscription')
schema.gen_header_pair(out, 'ModifySubscription')
schema.gen_header_pair(out, 'SetPublishingMode')
schema.gen_header_type(out, 'NotificationMessage')
schema.gen_header_type(out, 'DataChangeNotification')
schema.gen_header_type(out, 'EventNotificationList')
schema.gen_header_type(out, 'StatusChangeNotification')
schema.gen_header_type(out, 'SubscriptionAcknowledgement')
schema.gen_header_pair(out, 'Publish')
schema.gen_header_pair(out, 'Republish')
schema.gen_header_type(out, 'TransferResult')
schema.gen_header_pair(out, 'TransferSubscriptions')
schema.gen_header_pair(out, 'DeleteSubscriptions')
schema.gen_header_type(out, 'EnumeratedTestType')
schema.gen_header_type(out, 'BuildInfo')
schema.gen_header_type(out, 'RedundancySupport')
schema.gen_header_type(out, 'RedundantServerDataType')
schema.gen_header_type(out, 'NetworkGroupDataType')
schema.gen_header_type(out, 'SamplingIntervalDiagnosticsDataType')
schema.gen_header_type(out, 'ServerDiagnosticsSummaryDataType')
schema.gen_header_type(out, 'ServerStatusDataType')
schema.gen_header_type(out, 'SessionDiagnosticsDataType')
schema.gen_header_type(out, 'SessionSecurityDiagnosticsDataType')
schema.gen_header_type(out, 'StatusResult')
schema.gen_header_type(out, 'SubscriptionDiagnosticsDataType')
schema.gen_header_type(out, 'ModelChangeStructureVerbMask')
schema.gen_header_type(out, 'ModelChangeStructureDataType')
schema.gen_header_type(out, 'SemanticChangeStructureDataType')
schema.gen_header_type(out, 'Range')
schema.gen_header_type(out, 'EUInformation')
schema.gen_header_type(out, 'AxisScaleEnumeration')
schema.gen_header_type(out, 'ComplexNumberType')
schema.gen_header_type(out, 'DoubleComplexNumberType')
schema.gen_header_type(out, 'AxisInformation')
schema.gen_header_type(out, 'XVType')
schema.gen_header_type(out, 'ProgramDiagnosticDataType')
schema.gen_header_type(out, 'Annotation')
schema.gen_header_type(out, 'ExceptionDeviationFormat') | 36,304 |
def load_etod(event):
"""Called at startup or when the Reload Ephemeris Time of Day rule is
triggered, deletes and recreates the Ephemeris Time of Day rule. Should be
called at startup and when the metadata is added to or removed from Items.
"""
# Remove the existing rule if it exists.
if not delete_rule(ephem_tod, log):
log.error("Failed to delete rule!")
return None
# Generate the rule triggers with the latest metadata configs.
etod_items = load_rule_with_metadata(NAMESPACE, check_config, "changed",
"Ephemeris Time of Day", ephem_tod,
log,
description=("Creates the timers that "
"drive the {} state"
"machine".format(ETOD_ITEM)),
tags=["openhab-rules-tools","etod"])
if etod_items:
for i in [i for i in timers.timers if not i in etod_items]:
timers.cancel(i)
# Generate the timers now.
ephem_tod(None) | 36,305 |
def extract_title(html):
"""Return the article title from the article HTML"""
# List of xpaths for HTML tags that could contain a title
# Tuple scores reflect confidence in these xpaths and the preference
# used for extraction
xpaths = [
('//header[@class="entry-header"]/h1[@class="entry-title"]//text()', 4), # noqa : E501
('//meta[@property="og:title"]/@content', 4),
('//h1[@class="entry-title"]//text()', 3),
('//h1[@itemprop="headline"]//text()', 3),
('//h2[@itemprop="headline"]//text()', 2),
('//meta[contains(@itemprop, "headline")]/@content', 2),
('//body/title//text()', 1),
('//div[@class="postarea"]/h2/a//text()', 1),
('//h1[@class="post__title"]//text()', 1),
('//h1[@class="title"]//text()', 1),
('//head/title//text()', 1),
('//header/h1//text()', 1),
('//meta[@name="dcterms.title"]/@content', 1),
('//meta[@name="fb_title"]/@content', 1),
('//meta[@name="sailthru.title"]/@content', 1),
('//meta[@name="title"]/@content', 1),
]
extracted_titles = extract_element(html, xpaths,
process_dict_fn=combine_similar_titles)
if not extracted_titles:
return None
return max(extracted_titles,
key=lambda x: extracted_titles[x].get('score')) | 36,306 |
def re_send_mail(request, user_id):
"""
re-send the email verification email
"""
user = User.objects.get(pk=user_id)
try:
verify = EmailVerify.objects.filter(user = user).get()
verify.delete()
except EmailVerify.DoesNotExist:
pass
email_verify = EmailVerify(user=user, user_activation=True)
email_verify.generate_code()
email_verify.save()
send_mail_account_confirmation(user, email_verify.code, request.shop.name_shop(), request.get_host())
return HttpResponseRedirect(reverse('welcome')) | 36,307 |
def get_snap_filenames(base, snap_prefix, num):
"""Return a list of paths (without file extensions) to snapshot files
corresponding to the snapshot indicated by num. The list may consist of
one or multiple files, depending on how the snapshot was written.
"""
num_pad = str(num).zfill(3)
path = "%s/%s_%s" % (base, snap_prefix, num_pad)
if os.path.exists("%s.hdf5" % path):
return [path]
path = "%s/snapdir_%s/%s_%s" % (base, num_pad, snap_prefix, num_pad)
if os.path.exists("%s.0.hdf5" % path):
files = np.array(glob.glob("%s.*.hdf5" % path))
basenames = [os.path.basename(this_file) for this_file in files]
filenrs = np.array([int(this_file[this_file.index('.')+1:this_file.index('.hdf5')]) for this_file in basenames])
files = files[ np.argsort(filenrs) ]
return [os.path.splitext(x)[0] for x in files]
raise RuntimeError("Cannot find snapshot number %s in %s." % (num, base)) | 36,308 |
def apply_playbook(playbook_path, hosts_inv=None, host_user=None,
ssh_priv_key_file_path=None, password=None, variables=None,
proxy_setting=None, inventory_file=None, become_user=None):
"""
Executes an Ansible playbook to the given host
:param playbook_path: the (relative) path to the Ansible playbook
:param hosts_inv: a list of hostnames/ip addresses to which to apply the
Ansible playbook (not required when PB is configured for
localhost)
:param host_user: A user for the host instances (must be a password-less
sudo user if playbook has "sudo: yes") (not required when
PB is configured for localhost)
:param ssh_priv_key_file_path: the file location of the ssh key. Required
if password is None (not required when PB is
configured for localhost)
:param password: the file location of the ssh key. Required if
ssh_priv_key_file_path is None (not required when PB is
configured for localhost)
:param variables: a dictionary containing any substitution variables needed
by the Jinga 2 templates
:param proxy_setting: instance of os_credentials.ProxySettings class
:param inventory_file: an inventory file that will supercede the hosts_inv
:param become_user: the username on this host that the playbook must run
as. When used, the become_method wil be sudo and
become will be 'yes'
:raises AnsibleException when the return code from the Ansible library is
not 0
:return: the return code from the Ansible library only when 0.
Implementation now raises an exception otherwise
"""
if not os.path.isfile(playbook_path):
raise AnsibleException(
'Requested playbook not found - ' + playbook_path)
else:
logger.info('Applying playbook [%s] with variables - %s',
playbook_path, variables)
pk_file_path = None
if ssh_priv_key_file_path:
pk_file_path = os.path.expanduser(ssh_priv_key_file_path)
if not password:
if not os.path.isfile(pk_file_path):
raise AnsibleException(
'Requested private SSH key not found - ' + pk_file_path)
passwords = None
if password:
passwords = {'conn_pass': password, 'become_pass': password}
import ansible.constants
ansible.constants.HOST_KEY_CHECKING = False
loader = DataLoader()
if inventory_file:
inventory = InventoryManager(loader=loader, sources=inventory_file)
connection = 'ssh'
elif hosts_inv:
inventory = InventoryManager(loader=loader)
for host in hosts_inv:
inventory.add_host(host=host, group='ungrouped')
connection = 'ssh'
else:
loader = DataLoader()
inventory = InventoryManager(loader=loader)
connection = 'local'
ssh_extra_args = None
if proxy_setting and proxy_setting.ssh_proxy_cmd:
ssh_extra_args = '-o ProxyCommand=\'%s\'' % proxy_setting.ssh_proxy_cmd
become = None
become_method = None
if become_user:
become = 'yes'
become_method = 'sudo'
context.CLIARGS = ImmutableDict(tags={},listtags=False, listtasks=False, listhosts=False, syntax=False,
connection=connection, module_path=None, forks=100, remote_user=host_user,
private_key_file=pk_file_path, ssh_common_args=None, ssh_extra_args=ssh_extra_args,
become=become, become_method=become_method, become_user=become_user, verbosity=11111,
check=False, timeout=30, diff=None, start_at_task=None, extra_vars=[variables])
variable_manager = VariableManager(loader=loader, inventory=inventory)
logger.debug('Setting up Ansible Playbook Executor for playbook - ' +
playbook_path)
executor = PlaybookExecutor(
playbooks=[playbook_path],
inventory=inventory,
variable_manager=variable_manager,
loader=loader,
passwords=passwords)
logger.debug('Executing Ansible Playbook - ' + playbook_path)
ret_val = executor.run()
if ret_val != 0:
raise AnsibleException(
'Error applying playbook [{}] with value [{}] using the connection'
' type of [{}]'.format(
playbook_path, ret_val, connection))
return ret_val | 36,309 |
def test_list_projects(mocker):
"""Test projects being outputed to the shell."""
runner = CliRunner()
mocked_login = mocker.patch.object(APIClient, "login", return_value=None)
mocked_get_projects = mocker.patch.object(
APIClient, "list_projects", return_value=Projects(**MOCKED_PROJECTS)
)
mocked_get_pipeline_capabilities = mocker.patch.object(
APIClient,
"get_pipeline_capabilities",
return_value=PipelineCapabilities(**MOCKED_PIPELINE_CAPABILITY),
)
res = runner.invoke(
list_projects, ["--email", "foo@bar.com", "--password", "123"]
)
assert res.exit_code == 0
mocked_login.assert_called_once()
mocked_get_projects.assert_called_once()
mocked_get_pipeline_capabilities.assert_called_once()
project = Project(**MOCKED_PROJECTS["results"][0])
pipeline = PipelineCapabilities(**MOCKED_PIPELINE_CAPABILITY)
output_line = io.BytesIO()
sys.stdout = output_line
echo(
"\t".join(
[
str(project.created),
str(project.id),
project.name.replace("\t", " "),
pipeline.name,
]
)
)
assert output_line.getvalue() == res.output.encode() | 36,310 |
def add_map_widget(
width: int,
height: int,
center: tuple[float, float],
zoom_level: int,
tile_server: TileServer,
) -> int | str:
"""Add map widget
Args:
width (int): Widget width
height (int): Widget height
center (tuple[float, float]): Center point coordinates:
latitude, longitude
zoom_level (int): Tile map zoom level
tile_server (TileServer): Tile supplier, from
dearpygui_map.tile_source
"""
map_widget = MapWidget(
width=width,
height=height,
center=center,
zoom_level=zoom_level,
tile_server=tile_server,
)
return map_widget.insert_widget() | 36,311 |
def _index_within_range(query: List[int], source: List[int]) -> bool:
"""Check if query is within range of source index.
:param query: List of query int
:param source: List of soure int
"""
dim_num = len(query)
for i in range(dim_num):
if query[i] > source[i]:
raise IndexError(f"index {query[i]} is out of bound for axis {i} with size {source[i]}")
return True | 36,312 |
def path_sender():
"""
Sends computed path to drones
"""
#TODO
return json.dumps({"result":2}) | 36,313 |
def test_transactions() -> List[TransactionObject]:
"""
Load some example transactions
"""
transaction_dict_1 = {
"amount": 1.0,
"asset_id": 23043,
"category_id": 229134,
"currency": "usd",
"date": "2021-09-19",
"external_id": None,
"fees": None,
"group_id": None,
"id": 55907882,
"is_group": False,
"notes": "Test Transaction 1",
"original_name": "Test 1",
"parent_id": None,
"payee": "Test 1",
"plaid_account_id": None,
"price": None,
"quantity": None,
"status": "uncleared",
"subtype": None,
"tags": None,
"type": None,
}
transaction_dict_2 = {
"amount": 2.0,
"asset_id": 23043,
"category_id": 229146,
"currency": "usd",
"date": "2021-09-19",
"external_id": None,
"fees": None,
"group_id": None,
"id": 55907976,
"is_group": False,
"notes": "Test Transaction 2",
"original_name": "Test 2",
"parent_id": None,
"payee": "Test 2",
"plaid_account_id": None,
"price": None,
"quantity": None,
"status": "uncleared",
"subtype": None,
"tags": None,
"type": None,
}
transaction_dict_3 = {
"amount": 3.0,
"asset_id": 23043,
"category_id": 229140,
"currency": "usd",
"date": "2021-09-19",
"external_id": None,
"fees": None,
"group_id": None,
"id": 55907977,
"is_group": False,
"notes": "Test Transaction 3",
"original_name": "Test 3",
"parent_id": None,
"payee": "Test 3",
"plaid_account_id": None,
"price": None,
"quantity": None,
"status": "uncleared",
"subtype": None,
"tags": None,
"type": None,
}
transaction_1 = TransactionObject(**transaction_dict_1)
transaction_2 = TransactionObject(**transaction_dict_2)
transaction_3 = TransactionObject(**transaction_dict_3)
return [transaction_1, transaction_2, transaction_3] | 36,314 |
def modified(mctx, x):
"""``modified()``
File that is modified according to status.
"""
# i18n: "modified" is a keyword
getargs(x, 0, 0, _("modified takes no arguments"))
s = mctx.status()[0]
return [f for f in mctx.subset if f in s] | 36,315 |
def parse_declarations(lang, state, code_only=False, keep_tokens=True):
"""
Return the comments or code of state.line.
Unlike parse_line, this function assumes the parser is *not*
in the context of a multi-line comment.
Args:
lang (Language):
Syntax description for the language being parsed.
state (State):
Parser state.
code_only (bool, default: False):
If False, each non-comment character is replaced with a space.
If True, each comment character is replaced with a space.
keep_tokens (bool, default: True):
If False, comment tokens are filtered out.
If True, comment tokens are preserved.
Returns:
(string, State)
"""
code, state = parse_code(lang, state)
comment, state = parse_line_comment(lang, state, keep_tokens)
comment2, state = parse_multiline_comment(lang, state, keep_tokens)
if comment or comment2:
line = state.line
if not state.multi_end_stack:
# Continue looking for declarations.
line, state = parse_declarations(lang, state, code_only, keep_tokens)
if code_only:
line = code + clear_line(comment) + clear_line(comment2) + line
else:
line = clear_line(code) + comment + comment2 + line
return line, state
else:
state.line = ''
if code_only:
return code, state
else:
return clear_line(code), state | 36,316 |
def rads_to_degs(rad):
"""Helper radians to degrees"""
return rad * 180.0 / math.pi | 36,317 |
def properties_entity(entity_a, properties_resource):
"""
An entity from properties_resource.
"""
entity_a.translation_set.all().delete()
entity_a.string = "something %s"
entity_a.save()
yield entity_a | 36,318 |
def test_post():
"""Test the post with a basic program"""
robot = RobotPost('Motomantest', 'Motoman robot', 6)
robot.ProgStart("Program")
robot.RunMessage("Program generated by RoboDK", True)
robot.setFrame(Pose([807.766544, -963.699898, 41.478944, 0, 0, 0]), None, 0)
robot.setTool(Pose([62.5, -108.253175, 100, -60, 90, 0]), None, 0)
robot.MoveJ(Pose([200, 200, 500, 180, 0, 180]), [-46.18419, -6.77518, -20.54925, 71.38674, 49.58727, -302.54752] )
robot.MoveL(Pose([200, 250, 348.734575, 180, 0, -150]), [-41.62707, -8.89064, -30.01809, 60.62329, 49.66749, -258.98418] )
robot.MoveL(Pose([200, 200, 262.132034, 180, 0, -150]), [-43.73892, -3.91728, -35.77935, 58.57566, 54.11615, -253.81122] )
robot.RunMessage("Setting air valve 1 on")
robot.RunCode("TCP_On", True)
robot.Pause(1000)
robot.MoveL(Pose([200, 250, 348.734575, 180, 0, -150]), [-41.62707, -8.89064, -30.01809, 60.62329, 49.66749, -258.98418] )
robot.MoveL(Pose([250, 300, 278.023897, 180, 0, -150]), [-37.52588, -6.32628, -34.59693, 53.52525, 49.24426, -251.44677] )
robot.MoveL(Pose([250, 250, 191.421356, 180, 0, -150]), [-39.75778, -1.04537, -40.37883, 52.09118, 54.15317, -246.94403] )
robot.RunMessage("Setting air valve off")
robot.RunCode("TCP_Off", True)
robot.Pause(1000)
robot.MoveL(Pose([250, 300, 278.023897, 180, 0, -150]), [-37.52588, -6.32628, -34.59693, 53.52525, 49.24426, -251.44677] )
robot.MoveL(Pose([250, 200, 278.023897, 180, 0, -150]), [-41.85389, -1.95619, -34.89154, 57.43912, 52.34162, -253.73403] )
robot.MoveL(Pose([250, 150, 191.421356, 180, 0, -150]), [-43.82111, 3.29703, -40.29493, 56.02402, 56.61169, -249.23532] )
robot.ProgFinish("Program")
# robot.ProgSave(".","Program",True)
robot.PROG = robot.PROG_LIST.pop()
for line in robot.PROG:
print(line)
if len(robot.LOG) > 0:
mbox('Program generation LOG:\n\n' + robot.LOG)
input("Press Enter to close...") | 36,319 |
def get_employer_jobpost(profile):
""" """
jobs = None
if profile.is_employer:
jobs = JobPost.objects.filter(user=profile.user).order_by('title', 'employment_option', 'is_active')
return jobs | 36,320 |
def _paginate_issues_with_cursor(page_url,
request,
query,
cursor,
limit,
template,
extra_nav_parameters=None,
extra_template_params=None):
"""Display paginated list of issues using a cursor instead of offset.
Args:
page_url: Base URL of issue page that is being paginated. Typically
generated by calling 'reverse' with a name and arguments of a view
function.
request: Request containing offset and limit parameters.
query: Query over issues
cursor: cursor object passed to web form and back again.
limit: Maximum number of issues to return.
template: Name of template that renders issue page.
extra_nav_parameters: Dictionary of extra parameters to append to the
navigation links.
extra_template_params: Dictionary of extra parameters to pass to page
rendering.
Returns:
Response for sending back to browser.
"""
issues, next_cursor, has_more = query.fetch_page(limit, start_cursor=cursor)
nav_parameters = {}
if extra_nav_parameters:
nav_parameters.update(extra_nav_parameters)
nav_parameters['cursor'] = next_cursor.urlsafe() if next_cursor else ''
params = {
'limit': limit,
'cursor': nav_parameters['cursor'],
'nexttext': 'Next',
}
if has_more:
params['next'] = _url(page_url, **nav_parameters)
if extra_template_params:
params.update(extra_template_params)
return _inner_paginate(request, issues, template, params) | 36,321 |
def raise_blame_caller(level_up, ex):
"""Raises an exception, changing the stack trace to point to the caller."""
new_st = get_stack_trace_of_caller(level_up + 2)
raise type(ex), ex, new_st | 36,322 |
def test_delay():
"""
>>> db = get_connection('sqlite://')
>>> db.metadata.drop_all()
>>> class Test(Model):
... username = Field(unicode)
... year = Field(int, default=0)
... birth = Field(datetime.date)
>>> c = Test(username='limodou', birth='2011-03-04', year=2012)
>>> c.save()
True
>>> a = dict(username='limodou', id=1)
>>> b = Test.load(a)
>>> b.birth
datetime.date(2011, 3, 4)
>>> b.year
2012
""" | 36,323 |
def overlapping(startAttribute, # X
endAttribute, # Y
startValue, # A
endValue, # B
):
"""
Return an L{axiom.iaxiom.IComparison} (an object that can be passed as the
'comparison' argument to Store.query/.sum/.count) which will constrain a
query against 2 attributes for ranges which overlap with the given
arguments.
For a database with Items of class O which represent values in this
configuration::
X Y
(a) (b)
|-------------------|
(c) (d)
|--------| (e) (f)
|--------|
(g) (h)
|---| (i) (j)
|------|
(k) (l)
|-------------------------------------|
(a) (l)
|-----------------------------|
(c) (b)
|------------------------|
(c) (a)
|----|
(b) (l)
|---------|
The query::
myStore.query(
O,
findOverlapping(O.X, O.Y,
a, b))
Will return a generator of Items of class O which represent segments a-b,
c-d, e-f, k-l, a-l, c-b, c-a and b-l, but NOT segments g-h or i-j.
(NOTE: If you want to pass attributes of different classes for
startAttribute and endAttribute, read the implementation of this method to
discover the additional join clauses required. This may be eliminated some
day so for now, consider this method undefined over multiple classes.)
In the database where this query is run, for an item N, all values of
N.startAttribute must be less than N.endAttribute.
startValue must be less than endValue.
"""
assert startValue <= endValue
return OR(
AND(startAttribute >= startValue,
startAttribute <= endValue),
AND(endAttribute >= startValue,
endAttribute <= endValue),
AND(startAttribute <= startValue,
endAttribute >= endValue)
) | 36,324 |
def _aligned_series(*many_series):
"""
Return a new list of series containing the data in the input series, but
with their indices aligned. NaNs will be filled in for missing values.
Parameters
----------
many_series : list[pd.Series]
Returns
-------
aligned_series : list[pd.Series]
A new list of series containing the data in the input series, but
with their indices aligned. NaNs will be filled in for missing values.
"""
return [series
for col, series in iteritems(pd.concat(many_series, axis=1))] | 36,325 |
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None): # pylint: disable=w0613
"""Set up the Car Wash sensor."""
# Print startup message
_LOGGER.info('Version %s', VERSION)
_LOGGER.info('If you have ANY issues with this,'
' please report them here: %s', ISSUE_URL)
name = config.get(CONF_NAME)
weather = config.get(CONF_WEATHER)
days = config.get(CONF_DAYS)
async_add_entities([CarWashBinarySensor(hass, name, weather, days)]) | 36,326 |
def structConfMat(confmat, index=0, multiple=False):
"""
Creates a pandas dataframe from the confusion matrix. It distinguishes
between binary and multi-class classification.
Parameters
----------
confmat : numpy.ndarray
Array with n rows, each of one being a flattened confusion matrix.
index : INT, optional
Integer for index of the dataframe. The default is 0.
multiple : BOOL, optional
If True, returns metrics per CV fold. If False, returns mean and std
of the metric over all folds (in complex format).
Returns
-------
performance : pd.DataFrame
Dataframe with all classification performance metrics.
Use "{0.real:.3} [{0.imag:.2}]".format to display float_format in latex
Example for latex tables:
print(structConfMat(confmat,multiple=False)
.to_latex(float_format="{0.real:.3} [{0.imag:.2}]".format))
Note: for coonverting multiple performance to average/std use
(performance.mean() + 1j*performance.std()).to_frame().T
"""
intdim = int(np.sqrt(confmat.shape[1]))
conf_n = confmat.reshape((len(confmat), intdim, intdim))
corrects = conf_n.transpose(2,1,0).reshape((-1,len(conf_n)))[::(intdim+1)]
corrects = corrects.sum(axis=0)
n_folds = conf_n.sum(axis=1).sum(axis=1)
cr = corrects/n_folds
aux_n = conf_n[:,0][:,0]/conf_n[:,0].sum(axis=1)
for ix in range(intdim-1):
aux_n = np.c_[aux_n, conf_n[:,ix+1][:,ix+1]/conf_n[:,ix+1].sum(axis=1)]
b_acc = np.nanmean(aux_n, axis=1)
performance = pd.DataFrame({'CorrectRate': cr, 'ErrorRate': 1-cr,
'balAcc': b_acc},
index=index+np.arange(confmat.shape[0]))
for ix in range(aux_n.shape[1]):
auxperf = pd.DataFrame({f'Class_{ix}': aux_n[:,ix]},
index=index+np.arange(confmat.shape[0]))
performance = pd.concat((performance, auxperf),axis=1)
if intdim==2:
columns = performance.columns.tolist()
columns[columns.index('Class_0')]='Sensitivity'
columns[columns.index('Class_1')]='Specificity'
performance.columns = columns
prec = aux_n[:,1]/(aux_n[:,1]+1-aux_n[:,0])
f1 = 2*prec*aux_n[:,1]/(prec+aux_n[:,1])
performance['Precision'] = prec
performance['F1'] = f1
if multiple==False:
performance = (performance.mean(skipna=True)
+ 1j*performance.std(skipna=True)).to_frame().T
return performance | 36,327 |
async def test_user(opp, spider):
"""Test user config."""
await setup.async_setup_component(opp, "persistent_notification", {})
result = await opp.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
with patch(
"openpeerpower.components.spider.async_setup", return_value=True
) as mock_setup, patch(
"openpeerpower.components.spider.async_setup_entry", return_value=True
) as mock_setup_entry:
result = await opp.config_entries.flow.async_configure(
result["flow_id"], user_input=SPIDER_USER_DATA
)
await opp.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == DOMAIN
assert result["data"][CONF_USERNAME] == USERNAME
assert result["data"][CONF_PASSWORD] == PASSWORD
assert not result["result"].unique_id
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1 | 36,328 |
def get_img(ds):
"""Get a standard image file as a Niimg
Parameters
----------
ds : str
Name of image to get.\n
Volume Masks:\n
"MNI152_T1_2mm_brain"\n
"MNI152_T1_2mm_brain_mask"\n
"MNI152_T1_2mm_brain_mask_dil"\n
"MNI152_T1_1mm_brain"\n
"MNI152_T1_1mm_brain_mask"\n
"MNI152_T1_1mm_brain_mask_dil"\n
Surface Masks:\n
"fs5_mask"\n
"fs5_mask_lh"\n
"fs5_mask_rh"\n
Returns
-------
Niimg-like object
"""
assert ds in datasets.keys(), "Unknown image specified"
fname = datasets[ds]
from . import data
with pkg_resources.path(data, fname) as datafile:
return nib.load(str(datafile)) | 36,329 |
def print_environ_usage():
"""Dump a list of environment variables used by CGI jako HTML."""
print("""
<H3>These environment variables could have been set:</H3>
<UL>
<LI>AUTH_TYPE
<LI>CONTENT_LENGTH
<LI>CONTENT_TYPE
<LI>DATE_GMT
<LI>DATE_LOCAL
<LI>DOCUMENT_NAME
<LI>DOCUMENT_ROOT
<LI>DOCUMENT_URI
<LI>GATEWAY_INTERFACE
<LI>LAST_MODIFIED
<LI>PATH
<LI>PATH_INFO
<LI>PATH_TRANSLATED
<LI>QUERY_STRING
<LI>REMOTE_ADDR
<LI>REMOTE_HOST
<LI>REMOTE_IDENT
<LI>REMOTE_USER
<LI>REQUEST_METHOD
<LI>SCRIPT_NAME
<LI>SERVER_NAME
<LI>SERVER_PORT
<LI>SERVER_PROTOCOL
<LI>SERVER_ROOT
<LI>SERVER_SOFTWARE
</UL>
In addition, HTTP headers sent by the server may be dalejed w the
environment jako well. Here are some common variable names:
<UL>
<LI>HTTP_ACCEPT
<LI>HTTP_CONNECTION
<LI>HTTP_HOST
<LI>HTTP_PRAGMA
<LI>HTTP_REFERER
<LI>HTTP_USER_AGENT
</UL>
""") | 36,330 |
def ajax_log(message):
""" Write a message for debugging ajax calls """
message = "%s : %s" % (datetime.datetime.today(), message)
open(os.path.join(settings.MEDIA_ROOT, "ajax_log.txt"), 'a').write("%s\n" % message)
if settings.DEBUG and settings.IS_LOCAL:
print message
#if Preferences.objects.filter(default_account=True)[0].send_to_admin == True:
# notification_send() | 36,331 |
def load_fish(return_X_y: bool = False, as_frame: bool = False):
"""
Loads in a subset of the Fish market dataset. You can find the full dataset [here](https://www.kaggle.com/aungpyaeap/fish-market).
Arguments:
return_X_y: return a tuple of (`X`, `y`) for convenience
as_frame: return all the data as a pandas dataframe
Usage:
```python
from hulearn.datasets import load_fish
df = load_fish(as_frame=True)
X, y = load_fish(return_X_y=True)
```
"""
filepath = resource_filename("hulearn", os.path.join("data", "fish.zip"))
df = pd.read_csv(filepath)
if as_frame:
return df
X, y = (
df[["Species", "Length1", "Length2", "Length3", "Height", "Width"]].values,
df["Weight"].values,
)
if return_X_y:
return X, y
return {"data": X, "target": y} | 36,332 |
def check_for_missing_kmers(is_fastq: bool,
subtype_result: str,
scheme: str,
df: pd.DataFrame,
exp: int,
obs: int,
p: SubtypingParams) -> Tuple[Optional[str], Optional[str]]:
"""Check if there are too many missing kmers
Also check if the mean kmer coverage depth is above the low coverage threshold.
Args:
is_fastq: Is input sample reads?
subtype_result: Single subtype designation
scheme: Scheme name
df: Subtyping results dataframe
exp: Expected number of kmers that should be found
obs: Actual observed number of kmers found
p: Subtyping parameters
Returns:
Tuple of QC status and any QC messages
"""
status = None
messages = None
# proportion of missing kmers
p_missing = (exp - obs) / exp # type: float
if p_missing > p.max_perc_missing_kmers:
status = QC.FAIL
if is_fastq:
kmers_with_hits = df[df['is_kmer_freq_okay']] # type: pd.DataFrame
depth = kmers_with_hits['freq'].mean()
if depth < p.low_coverage_depth_freq:
coverage_msg = f'Low coverage depth ({depth:.1f} < {float(p.low_coverage_depth_freq):.1f} expected); ' \
f'you may need more WGS data.'
else:
coverage_msg = f'Okay coverage depth ({depth:.1f} >= {float(p.low_coverage_depth_freq):.1f} expected), ' \
f'but this may be the wrong serovar or species for scheme "{scheme}"'
messages = f'{p_missing:.2%} missing kmers; more than {p.max_perc_missing_kmers:.2%} missing ' \
f'kmers threshold. {coverage_msg}'
else:
messages = f'{p_missing:.2%} missing kmers for subtype "{subtype_result}"; more than ' \
f'{p.max_perc_missing_kmers:.2%} missing kmer threshold'
return status, messages | 36,333 |
def splat_feat_nd(init_grid, feat, coords):
"""
Args:
init_grid: B X nF X W X H X D X ..
feat: B X nF X nPt
coords: B X nDims X nPt in [-1, 1]
Returns:
grid: B X nF X W X H X D X ..
"""
wts_dim = []
pos_dim = []
grid_dims = init_grid.shape[2:]
B = init_grid.shape[0]
F = init_grid.shape[1]
n_dims = len(grid_dims)
grid_flat = init_grid.view(B, F, -1)
for d in range(n_dims):
pos = coords[:, [d], :] * grid_dims[d] / 2 + grid_dims[d] / 2
pos_d = []
wts_d = []
for ix in [0, 1]:
pos_ix = torch.floor(pos) + ix
safe_ix = (pos_ix > 0) & (pos_ix < grid_dims[d])
safe_ix = safe_ix.type(pos.dtype)
wts_ix = 1 - torch.abs(pos - pos_ix)
wts_ix = wts_ix * safe_ix
pos_ix = pos_ix * safe_ix
pos_d.append(pos_ix)
wts_d.append(wts_ix)
pos_dim.append(pos_d)
wts_dim.append(wts_d)
l_ix = [[0, 1] for d in range(n_dims)]
for ix_d in itertools.product(*l_ix):
wts = torch.ones_like(wts_dim[0][0])
index = torch.zeros_like(wts_dim[0][0])
for d in range(n_dims):
index = index * grid_dims[d] + pos_dim[d][ix_d[d]]
wts = wts * wts_dim[d][ix_d[d]]
index = index.long()
grid_flat.scatter_add_(2, index.expand(-1, F, -1), feat * wts)
grid_flat = torch.round(grid_flat)
return grid_flat.view(init_grid.shape) | 36,334 |
def q_nstep_td_error_ngu(
data: namedtuple,
gamma: Any, # float,
nstep: int = 1,
cum_reward: bool = False,
value_gamma: Optional[torch.Tensor] = None,
criterion: torch.nn.modules = nn.MSELoss(reduction='none'),
) -> torch.Tensor:
"""
Overview:
Multistep (1 step or n step) td_error for q-learning based algorithm
Arguments:
- data (:obj:`q_nstep_td_data`): the input data, q_nstep_td_data to calculate loss
- gamma (:obj:`float`): discount factor
- cum_reward (:obj:`bool`): whether to use cumulative nstep reward, which is figured out when collecting data
- value_gamma (:obj:`torch.Tensor`): gamma discount value for target q_value
- criterion (:obj:`torch.nn.modules`): loss function criterion
- nstep (:obj:`int`): nstep num, default set to 1
Returns:
- loss (:obj:`torch.Tensor`): nstep td error, 0-dim tensor
- td_error_per_sample (:obj:`torch.Tensor`): nstep td error, 1-dim tensor
Shapes:
- data (:obj:`q_nstep_td_data`): the q_nstep_td_data containing\
['q', 'next_n_q', 'action', 'reward', 'done']
- q (:obj:`torch.FloatTensor`): :math:`(B, N)` i.e. [batch_size, action_dim]
- next_n_q (:obj:`torch.FloatTensor`): :math:`(B, N)`
- action (:obj:`torch.LongTensor`): :math:`(B, )`
- next_n_action (:obj:`torch.LongTensor`): :math:`(B, )`
- reward (:obj:`torch.FloatTensor`): :math:`(T, B)`, where T is timestep(nstep)
- done (:obj:`torch.BoolTensor`) :math:`(B, )`, whether done in last timestep
- td_error_per_sample (:obj:`torch.FloatTensor`): :math:`(B, )`
"""
q, next_n_q, action, next_n_action, reward, done, weight = data
assert len(action.shape) == 1, action.shape
if weight is None:
weight = torch.ones_like(action)
batch_range = torch.arange(action.shape[0])
q_s_a = q[batch_range, action]
target_q_s_a = next_n_q[batch_range, next_n_action]
if cum_reward:
if value_gamma is None:
target_q_s_a = reward + (gamma ** nstep) * target_q_s_a * (1 - done)
else:
target_q_s_a = reward + value_gamma * target_q_s_a * (1 - done)
else:
target_q_s_a = nstep_return_ngu(nstep_return_data(reward, target_q_s_a, done), gamma, nstep, value_gamma)
td_error_per_sample = criterion(q_s_a, target_q_s_a.detach())
return (td_error_per_sample * weight).mean(), td_error_per_sample | 36,335 |
def unzip(filename):
"""Generator that yields files in the given zip archive."""
with zipfile.ZipFile(filename, 'r') as archive:
for zipinfo in archive.infolist():
yield archive.open(zipinfo, 'r'), {
'name': zipinfo.filename,
} | 36,336 |
def list_datasets(*, direc=None, kind=None, hsh=None, seed=None, redshift=None):
"""Yield all datasets which match a given set of filters.
Can be used to determine parameters of all cached datasets, in conjunction with :func:`readbox`.
Parameters
----------
direc : str, optional
The directory in which to search for the boxes. By default, this is the centrally-managed
directory, given by the ``config.yml`` in ``.21cmfast``.
kind: str, optional
Filter by this kind (one of {"InitialConditions", "PerturbedField", "IonizedBox",
"TsBox", "BrightnessTemp"}
hsh: str, optional
Filter by this hsh.
seed: str, optional
Filter by this seed.
Yields
------
fname: str
The filename of the dataset (without directory).
parts: tuple of strings
The (kind, hsh, seed) of the data set.
"""
direc = path.expanduser(direc or config["direc"])
fname = "{}{}_{}_r{}.h5".format(
kind or r"(?P<kind>[a-zA-Z]+)",
f"_z{redshift:.4f}" if redshift is not None else "(.*)",
hsh or r"(?P<hash>\w{32})",
seed or r"(?P<seed>\d+)",
)
for fl in os.listdir(direc):
if re.match(fname, fl):
yield fl | 36,337 |
def thermal_error_estimation(experimental_data):
"""
To identify any anomalies in the thermographic data and identify the indices of those thermograms
Args:
experimental_data (3d array): thermographi data
"""
# calculating the mean tempeature of each thermogram
avg_temp = np.mean(experimental_data, axis=(0, 1))
# calculating the step difference to identify any anomalies
delta_temp = np.diff(avg_temp)
# identify the reflection start and end phase
reflection_index_st = np.argmax(delta_temp)
reflection_index_end = np.argmin(delta_temp)
# plotting the data to check for anomalies
plt.figure(figsize=(10, 6))
plt.plot(delta_temp, linewidth=2, linestyle='--', label='delta temperature profile')
plt.xlabel('Time (frames)')
plt.ylabel('step difference temperature (K)')
plt.scatter(reflection_index_st, delta_temp[reflection_index_st], linewidth=3, color='tab:red',
label='Reflection start point', marker='o')
plt.scatter(reflection_index_end, delta_temp[reflection_index_end], linewidth=3, color='tab:green',
label='Radiation start point', marker='o')
plt.text(reflection_index_st, delta_temp[reflection_index_st] - 2, reflection_index_st)
plt.text(reflection_index_end, delta_temp[reflection_index_end] + 2, reflection_index_end)
plt.legend()
plt.grid()
plt.show() | 36,338 |
def app_with_mail(app):
"""App with email test templates."""
app.register_blueprint(
Blueprint(
"invenio_app_ils_tests", __name__, template_folder="templates"
)
)
# add extra test templates to the search app blueprint, to fake the
# existence of `invenio-theme` base templates.
test_templates_path = os.path.join(os.path.dirname(__file__), "templates")
enhanced_jinja_loader = jinja2.ChoiceLoader(
[
app.jinja_loader,
jinja2.FileSystemLoader(test_templates_path),
]
)
# override default app jinja_loader to add the new path
app.jinja_loader = enhanced_jinja_loader
yield app | 36,339 |
def part1():
"""Solution to day 3, part 1."""
data = load_input()
moves = (3, 1)
initial_position = (0, 0)
tobbogan = Tobbogan(data, moves, initial_position)
n_trees = tobbogan.unhappy_encounters()
print(f"{n_trees} trees were encountered.") | 36,340 |
def generate_activation_code(email : str) -> str:
"""
Takes email address and combines it with a timestamp before encrypting everything with the ACTIVATION_LINK_SECRET
No database storage required for this action
:param email: email
:type email: unicode
:return: activation_code
:rtype: str
"""
email = str(email).lower().strip()
time_stamp = str(int(time.time()))
# normally encrypt emails, so they are not stored in plaintext with a random nonce
secret_key = hashlib.sha256(settings.ACTIVATION_LINK_SECRET.encode()).hexdigest()
crypto_box = nacl.secret.SecretBox(secret_key, encoder=nacl.encoding.HexEncoder)
validation_secret = crypto_box.encrypt((time_stamp + '#' + email).encode("utf-8"),
nacl.utils.random(nacl.secret.SecretBox.NONCE_SIZE))
return nacl.encoding.HexEncoder.encode(validation_secret).decode() | 36,341 |
def fmin_sgd(*args, **kwargs):
"""
See FMinSGD for documentation. This function creates that object, exhausts
the iterator, and then returns the final self.current_args values.
"""
print_interval = kwargs.pop('print_interval', sys.maxint)
obj = FMinSGD(*args, **kwargs)
while True:
t = time.time()
vals = obj.nextN(print_interval)
if len(vals):
print 'Value', np.mean(vals), 'time', (time.time() - t)
else:
break
return obj.current_args | 36,342 |
async def test_thermostat_power_state(hass, hk_driver, events):
"""Test if accessory and HA are updated accordingly."""
entity_id = "climate.test"
# SUPPORT_ON_OFF = True
hass.states.async_set(
entity_id,
HVAC_MODE_HEAT,
{
ATTR_SUPPORTED_FEATURES: 4096,
ATTR_TEMPERATURE: 23.0,
ATTR_CURRENT_TEMPERATURE: 18.0,
ATTR_HVAC_ACTION: CURRENT_HVAC_HEAT,
ATTR_HVAC_MODES: [
HVAC_MODE_HEAT_COOL,
HVAC_MODE_COOL,
HVAC_MODE_AUTO,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
],
},
)
await hass.async_block_till_done()
acc = Thermostat(hass, hk_driver, "Climate", entity_id, 1, None)
hk_driver.add_accessory(acc)
await acc.run()
await hass.async_block_till_done()
assert acc.char_current_heat_cool.value == 1
assert acc.char_target_heat_cool.value == 1
hass.states.async_set(
entity_id,
HVAC_MODE_OFF,
{
ATTR_TEMPERATURE: 23.0,
ATTR_CURRENT_TEMPERATURE: 18.0,
ATTR_HVAC_ACTION: CURRENT_HVAC_IDLE,
ATTR_HVAC_MODES: [
HVAC_MODE_HEAT_COOL,
HVAC_MODE_COOL,
HVAC_MODE_AUTO,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
],
},
)
await hass.async_block_till_done()
assert acc.char_current_heat_cool.value == 0
assert acc.char_target_heat_cool.value == 0
hass.states.async_set(
entity_id,
HVAC_MODE_OFF,
{
ATTR_TEMPERATURE: 23.0,
ATTR_CURRENT_TEMPERATURE: 18.0,
ATTR_HVAC_ACTION: CURRENT_HVAC_IDLE,
ATTR_HVAC_MODES: [
HVAC_MODE_HEAT_COOL,
HVAC_MODE_COOL,
HVAC_MODE_AUTO,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
],
},
)
await hass.async_block_till_done()
assert acc.char_current_heat_cool.value == 0
assert acc.char_target_heat_cool.value == 0
# Set from HomeKit
call_set_hvac_mode = async_mock_service(hass, DOMAIN_CLIMATE, "set_hvac_mode")
char_target_heat_cool_iid = acc.char_target_heat_cool.to_HAP()[HAP_REPR_IID]
hk_driver.set_characteristics(
{
HAP_REPR_CHARS: [
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_target_heat_cool_iid,
HAP_REPR_VALUE: 1,
},
]
},
"mock_addr",
)
await hass.async_block_till_done()
assert call_set_hvac_mode
assert call_set_hvac_mode[0].data[ATTR_ENTITY_ID] == entity_id
assert call_set_hvac_mode[0].data[ATTR_HVAC_MODE] == HVAC_MODE_HEAT
assert acc.char_target_heat_cool.value == 1
assert len(events) == 1
assert events[-1].data[ATTR_VALUE] == "TargetHeatingCoolingState to 1"
hk_driver.set_characteristics(
{
HAP_REPR_CHARS: [
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_target_heat_cool_iid,
HAP_REPR_VALUE: 2,
},
]
},
"mock_addr",
)
await hass.async_block_till_done()
assert call_set_hvac_mode
assert call_set_hvac_mode[1].data[ATTR_ENTITY_ID] == entity_id
assert call_set_hvac_mode[1].data[ATTR_HVAC_MODE] == HVAC_MODE_COOL
assert len(events) == 2
assert events[-1].data[ATTR_VALUE] == "TargetHeatingCoolingState to 2"
assert acc.char_target_heat_cool.value == 2 | 36,343 |
def normalize_timestamp(date_time):
"""
TODO: get rid of this function and all references to it / uses of it.
Attempt to convert a string timestamp in to a TruSTAR compatible format for submission.
Will return current time with UTC time zone if None
:param date_time: int that is seconds or milliseconds since epoch, or string/datetime object containing date, time,
and (ideally) timezone.
Examples of supported timestamp formats: 1487890914, 1487890914000, "2017-02-23T23:01:54", "2017-02-23T23:01:54+0000"
:return If input is an int, will return milliseconds since epoch. Otherwise, will return a normalized isoformat
timestamp.
"""
# if timestamp is null, just return the same null.
if not date_time:
return date_time
datetime_dt = datetime.now()
# get current time in seconds-since-epoch
current_time = int(time.time()) * 1000
try:
# identify type of timestamp and convert to datetime object
if isinstance(date_time, int):
# if timestamp has less than 10 digits, it is in seconds
if date_time < 10000000000:
date_time *= 1000
# if timestamp is incorrectly forward dated, set to current time
if date_time > current_time:
raise ValueError("The given time %s is in the future." % date_time)
return date_time
if isinstance(date_time, str):
datetime_dt = dateutil.parser.parse(date_time)
elif isinstance(date_time, datetime):
datetime_dt = date_time
# if timestamp is none of the formats above, error message is printed and timestamp is set to current time by
# default
except Exception as e:
logger.warning(e)
logger.warning("Using current time as replacement.")
datetime_dt = datetime.now()
# if timestamp is timezone naive, add timezone
if not datetime_dt.tzinfo:
# add system timezone and convert to UTC
datetime_dt = get_localzone().localize(datetime_dt).astimezone(pytz.utc)
# converts datetime to iso8601
return datetime_dt.isoformat() | 36,344 |
def round_rect(surface, rect, color, rad=20, border=0, inside=(0,0,0,0)):
"""
Draw a rect with rounded corners to surface. Argument rad can be specified
to adjust curvature of edges (given in pixels). An optional border
width can also be supplied; if not provided the rect will be filled.
Both the color and optional interior color (the inside argument) support
alpha.
"""
rect = pg.Rect(rect)
zeroed_rect = rect.copy()
zeroed_rect.topleft = 0,0
image = pg.Surface(rect.size).convert_alpha()
image.fill((0,0,0,0))
_render_region(image, zeroed_rect, color, rad)
if border:
zeroed_rect.inflate_ip(-2*border, -2*border)
_render_region(image, zeroed_rect, inside, rad)
surface.blit(image, rect) | 36,345 |
def strings_from_apk(app_file, app_dir, elf_strings):
"""Extract the strings from an app."""
try:
logger.info('Extracting Strings from APK')
dat = []
secrets = []
urls = []
urls_nf = []
emails_nf = []
apk_file = os.path.join(app_dir, app_file)
and_a = apk.APK(apk_file)
rsrc = and_a.get_android_resources()
if rsrc:
pkg = rsrc.get_packages_names()[0]
rsrc.get_strings_resources()
for i in rsrc.values[pkg].keys():
res_string = rsrc.values[pkg][i].get('string')
if res_string:
for duo in res_string:
cap_str = '"' + duo[0] + '" : "' + duo[1] + '"'
if is_secret(duo[0] + '"'):
secrets.append(cap_str)
dat.append(cap_str)
data_string = ''.join(dat)
urls, urls_nf, emails_nf = url_n_email_extract(
data_string, 'Android String Resource')
if elf_strings:
for solib in elf_strings:
for so, str_list in solib.items():
# add to strings from jar
dat.extend(str_list)
# extract url, email
so_str = ' '.join(str_list)
su, suf, sem = url_n_email_extract(
so_str, so)
urls.extend(su)
urls_nf.extend(suf)
emails_nf.extend(sem)
strings_dat = list(set(dat))
return {
'strings': strings_dat,
'urls_list': urls,
'url_nf': urls_nf,
'emails_nf': emails_nf,
'secrets': secrets,
}
except Exception:
logger.exception('Extracting Strings from APK')
return {} | 36,346 |
def scan(this, accumulator, seed=None):
"""Applies an accumulator function over an observable sequence and
returns each intermediate result. The optional seed value is used as
the initial accumulator value.
For aggregation behavior with no intermediate results, see OutputThing.aggregate.
1 - scanned = source.scan(lambda acc, x: acc + x)
2 - scanned = source.scan(lambda acc, x: acc + x, 0)
Keyword arguments:
accumulator -- An accumulator function to be invoked on each element.
seed -- [Optional] The initial accumulator value.
Returns an observable sequence containing the accumulated values.
"""
has_seed = False
if seed is not None:
has_seed = True
has_accumulation = [False]
accumulation = [None]
def calculate(x):
if has_accumulation[0]:
accumulation[0] = accumulator(accumulation[0], x)
else:
accumulation[0] = accumulator(seed, x) if has_seed else x
has_accumulation[0] = True
return accumulation[0]
return this.map(calculate) | 36,347 |
def get_efficient_pin_order_scramble():
""" Gets an efficient pin order scramble for a Rubik's Clock. """
return _UTIL_SCRAMBLER.call("util_scramble.getClockEfficientPinOrderScramble") | 36,348 |
def fixed_padding(inputs, kernel_size):
"""Pad the input along the spatial dimensions independently of input size.
This function is copied/modified from original repo:
https://github.com/tensorflow/tpu/blob/acb331c8878ce5a4124d4d7687df5fe0fadcd43b/models/official/resnet/resnet_model.py#L357
Args:
inputs: `Tensor` of size `[batch, channels, height, width]` or
`[batch, height, width, channels]` depending on `data_format`.
kernel_size: `int` kernel size to be used for `conv2d` or max_pool2d`
operations. Should be a positive integer.
Returns:
A padded `Tensor` of the same `data_format` with size either intact
(if `kernel_size == 1`) or padded (if `kernel_size > 1`).
"""
pad_total = kernel_size - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
# Use ZeroPadding as to avoid TFOpLambda layer
padded_inputs = tf.keras.layers.ZeroPadding2D(
padding=((pad_beg, pad_end), (pad_beg, pad_end))
)(inputs)
return padded_inputs | 36,349 |
def _is_si_object(storage_instance):
"""
Helper method for determining if a storage instance is object.
Args:
storage_instance:
Returns: (Bool) True if object, False if not.
"""
si_type = storage_instance.get("service_configuration", None)
if si_type is None:
# object not supported on storage instance
return False
elif si_type == "object":
return True
else:
return False | 36,350 |
def wgs84_to_bd09(lng, lat):
"""WGS84 -> BD09"""
lng, lat = wgs84_to_gcj02(lng, lat)
lng, lat = gcj02_to_bd09(lng, lat)
return lng, lat | 36,351 |
def spin_images(pc1, pc2, opt = spin_image_options()):
"""Compute spin image descriptors for a point cloud
Parameters
----------
pc1 : pcloud
The function computes a spin image descriptor for each point in
the point cloud pc1
pc2 : pcloud
The points in the point cloud pc2 are used for casting votes
when constructing the spin image descriptors. pc2 can simply be
the same as pc1. However, typically it will be a larger set of
points than pc1, so that the descriptors can be computed with
enough detail. In any case, pc1 and pc2 should be sampled from
the same shape
opt : geomproc.spin_image_options, optional
Object with the configuration for computing the spin image
descriptors
Returns
-------
desc : array_like
Spin image descriptors, represented as an array of shape (n,
radial_bins*height_bins), where 'n' is the number of points in
pc1, and radial_bins*height_bins is the total number of bins in
one descriptor according to the given configuration object.
desc[i, :] represents the descriptor of point 'i' in pc1
See Also
--------
geomproc.spin_image_options
Notes
-----
The implementation is based on the paper of Johnson and Hebert,
"Using Spin Images for Efficient Object Recognition in Cluttered 3D
Scenes", IEEE PAMI 21(5), 1999.
To compute one spin image descriptor, the method places a cylinder
at a point according to the position of the point and orientation of
the normal of the point. It then divides the cylinder radially and
along its normal to create a number of bins, and counts how many
points fall inside each bin. Finally, if desired, each bin is
normalized by the total number of points in all the bins, to make
the descriptor more robust to point clouds with different numbers of
samples.
"""
# Initialize descriptor
desc = np.zeros((pc1.point.shape[0], opt.radial_bins*opt.height_bins))
# Set up KDTree with all points from pc2
tree = KDTree(pc2.point.tolist())
# Build descriptor for each point in pc1
for i in range(pc1.point.shape[0]):
# Get point and its normal
point = pc1.point[i, :]
normal = pc1.normal[i, :]
# Get all the points in the range of the descriptor (neighbors)
neighbors = tree.dist_query(pc1.point[i, :], opt.radius)
# Iterate through each neighbor
for j in range(len(neighbors)):
# Get neighbor
neigh = np.array(neighbors[j])
#### Compute radial and height distances for this neighbor
# Form a vector from the reference point to the neighbor
vec = neigh - point
# Project the vector on the normal of the reference point
# to get the distance of the neighbor along the normal
# Also, normalize the distance by the height of the
# descriptor
height_dist = np.dot(normal, vec) / opt.height
# Project the vector on the plane perpendicular to the
# normal to get the distance of the neighbor along the
# radial direction
# Also, normalize the distance by the radius of the
# descriptor
radial_dist = np.linalg.norm(vec - height_dist*normal) / opt.radius
# Check if point is inside the range of the descriptor and
# can be considered in the descriptor construction
# Since we normalized the distances by radius and height, we
# can simply compare to 1.0
if (radial_dist < 1.0) and (abs(height_dist) < 1.0):
# Normalize the height_dist to a value between 0 and 1
height_dist = (height_dist + 1.0)/2.0
# Find bin index for radial and height distances
radial_index = math.floor(radial_dist*opt.radial_bins)
height_index = math.floor(height_dist*opt.height_bins)
# Convert two bin indices into one index and cast a vote
# in the corresponding bin
desc[i, radial_index + height_index*opt.radial_bins] += 1
# If normalizing, divide each bin by the total number of votes in
# all the bins
if opt.normalize:
desc /= desc.sum()
return desc | 36,352 |
def clone_file_info(input, output):
"""clone_file_info(FileConstHandle input, FileHandle output)"""
return _RMF.clone_file_info(input, output) | 36,353 |
def queries_to_retract_from_dataset(client,
project_id,
dataset_id,
person_id_query,
retraction_type=None):
"""
Get list of queries to remove all records in all tables associated with supplied ids
:param client: bigquery client
:param project_id: identifies associated project
:param dataset_id: identifies associated dataset
:param person_id_query: query to select person_ids to retract
:param retraction_type: string indicating whether all data needs to be removed, including RDR,
or if RDR data needs to be kept intact. Can take the values 'rdr_and_ehr' or 'only_ehr'
:return: list of dict with keys query, dataset, table
"""
LOGGER.info(f'Checking existing tables for {project_id}.{dataset_id}')
existing_tables = [
table.table_id
for table in client.list_tables(f'{project_id}.{dataset_id}')
]
queries = {TABLES: []}
tables_to_retract = set(list(TABLES_FOR_RETRACTION))
# Ignore RDR rows using id constant factor if retraction type is 'only_ehr'
id_const = 2 * ID_CONSTANT_FACTOR
if ru.is_unioned_dataset(
dataset_id) or retraction_type == RETRACTION_RDR_EHR:
tables_to_retract |= set(NON_EHR_TABLES)
id_const = 0
for table in tables_to_retract:
id_const_condition = JINJA_ENV.from_string(ID_CONST_CONDITION).render(
table_id=get_table_id(table), id_constant=id_const)
if table in existing_tables:
if table in [common.DEATH, common.PERSON]:
q_dataset = JINJA_ENV.from_string(
RETRACT_DATA_TABLE_QUERY).render(
project=project_id,
dataset=dataset_id,
table=table,
person_id_query=person_id_query)
queries[TABLES].append(q_dataset)
else:
q_dataset = JINJA_ENV.from_string(
RETRACT_DATA_TABLE_QUERY).render(
project=project_id,
dataset=dataset_id,
table=table,
table_id=get_table_id(table),
person_id_query=person_id_query,
id_const_condition=id_const_condition)
queries[TABLES].append(q_dataset)
table = common.FACT_RELATIONSHIP
if table in existing_tables:
q_fact_relationship = JINJA_ENV.from_string(
RETRACT_DATA_FACT_RELATIONSHIP).render(
project=project_id,
dataset=dataset_id,
table=table,
PERSON_DOMAIN=PERSON_DOMAIN,
person_id_query=person_id_query)
queries[TABLES].append(q_fact_relationship)
return queries[TABLES] | 36,354 |
def calc_moments(imcube, rmscube, mask=None):
"""
Calculate moments of a masked cube and their errors
Parameters
----------
imcube : SpectralCube
The image cube for which to calculate the moments and their errors.
rmscube : SpectralCube
A cube representing the noise estimate at each location in the image
cube. Should have the same units as the image cube.
mask : `~numpy.ndarray`
A binary mask array (0s and 1s) to be applied before measuring the flux
and uncertainty. This should NOT be a SpectralCube.
Returns
-------
altmom : `~numpy.ndarray`
A stack of the three moment maps. These are generally redundant since
they were previously calculated by SpectralCube.
errmom : `~numpy.ndarray`
A stack of the three uncertainty maps.
"""
if mask is not None:
immask = imcube.with_mask(mask > 0)
errmask = rmscube.with_mask(mask > 0)
else:
immask = imcube
errmask = rmscube
tbarry = immask.unitless_filled_data[:]
nsearry = errmask.unitless_filled_data[:]
vels = immask.spectral_axis.to(u.km/u.s)
vel3d = np.expand_dims(vels, axis=(1, 2))
velarry = np.broadcast_to(vel3d, immask.shape)
mom0 = np.nansum( tbarry, axis=0 )
mom0_var = np.nansum( nsearry**2, axis=0 )
mom0_err = np.sqrt(mom0_var)
mom1 = np.nansum( tbarry * velarry, axis=0) / mom0
mom1_var = np.nansum( ((velarry - mom1)/mom0 * nsearry)**2, axis=0 )
mom1_err = np.sqrt(mom1_var)
mom2 = np.nansum( tbarry * (velarry-mom1)**2, axis=0) / mom0
mom2_var = np.nansum( ((mom0 * (velarry-mom1)**2 - np.nansum(tbarry*(velarry
- mom1)**2, axis=0)) / mom0**2 * nsearry)**2 + (2*np.nansum(
tbarry*(velarry-mom1), axis=0)/mom0 * mom1_err)**2, axis=0 )
stdev = np.sqrt(mom2)
sderr = np.sqrt(mom2_var)/(2*stdev)
for x in [mom1, stdev, mom1_err, sderr]:
x[x == np.inf] = np.nan
x[x == -np.inf] = np.nan
altmom = np.stack([mom0, mom1, stdev], axis=0)
errmom = np.stack([mom0_err, mom1_err, sderr], axis=0)
return altmom, errmom | 36,355 |
def value(*, source: str, current_location: types.Location) -> types.TSourceMapEntries:
"""
Calculate the source map of any value.
Args:
source: The JSON document.
current_location: The current location in the source.
Returns:
A list of JSON pointers and source map entries.
"""
advance.to_next_non_whitespace(source=source, current_location=current_location)
check.not_end(source=source, current_location=current_location)
if source[current_location.position] == constants.BEGIN_ARRAY:
return array(source=source, current_location=current_location)
if source[current_location.position] == constants.BEGIN_OBJECT:
return object_(source=source, current_location=current_location)
return primitive(source=source, current_location=current_location) | 36,356 |
def greplines(lines, regexpr_list, reflags=0):
"""
grepfile - greps a specific file
TODO: move to util_str, rework to be core of grepfile
"""
found_lines = []
found_lxs = []
# Ensure a list
islist = isinstance(regexpr_list, (list, tuple))
islist2 = isinstance(reflags, (list, tuple))
regexpr_list_ = regexpr_list if islist else [regexpr_list]
reflags_list = reflags if islist2 else [reflags] * len(regexpr_list_)
re_list = [
re.compile(pat, flags=_flags) for pat, _flags in zip(regexpr_list_, reflags_list)
]
# print('regexpr_list_ = %r' % (regexpr_list_,))
# print('re_list = %r' % (re_list,))
import numpy as np
# import utool as ut
# cumsum = ut.cumsum(map(len, lines))
cumsum = np.cumsum(list(map(len, lines)))
text = ''.join(lines)
# Search each line for each pattern
for re_ in re_list:
# FIXME: multiline mode doesnt work
for match_object in re_.finditer(text):
lxs = np.where(match_object.start() < cumsum)[0][0:1]
if len(lxs) == 1:
lx = lxs[0]
if lx > 0:
line_start = cumsum[lx - 1]
else:
line_start = 0
line_end = cumsum[lx]
line = text[line_start:line_end]
found_lines.append(line)
found_lxs.append(lx)
return found_lines, found_lxs | 36,357 |
def get_class_cnts_by_feature_null(df, class_col, feature, normalize=True):
"""
Break out class fequencies (in `df[class_col]`) by whether or not
`df[feature]` is null.
Parameters
----------
df : pandas.DataFrame
DataFrame on which this function will operate.
class_col : str
Column name for the class / target.
feature : str
Column name for the feature.
normalize : bool (default=True)
Whether or not to normalize class counts by number of rows in
the respective feature is: [null / non-null] query. I.e. the
value for `normalize` is passed straight to the `normalize`
kwarg in `pandas.Series.value_counts`, which is called on data that
is filtered for either `df[feature].isnull()` of `df[feature].notnull()`.
Return
------
pandas.DataFrame of class counts, broken out by whether or not
`df[feature]` is null.
"""
null = df.loc[df[feature].isnull(), class_col
].value_counts(normalize=normalize
).rename("null"
).to_frame()
not_null = df.loc[df[feature].notnull(), class_col
].value_counts(normalize=normalize
).rename("not_null")
return pd.concat({feature: null.join(not_null)}, axis=1) | 36,358 |
def systemic_vel_est(z,param_dict,burn_in,run_dir,plot_param_hist=True):
"""
Estimates the systemic (stellar) velocity of the galaxy and corrects
the SDSS redshift (which is based on emission lines).
"""
c = 299792.458
# Get measured stellar velocity
stel_vel = np.array(param_dict['stel_vel']['chain'])
# Calculate new redshift
z_best = (z+1)*(1+stel_vel/c)-1
# Burned-in + Flattened (along walker axis) chain
# If burn_in is larger than the size of the chain, then
# take 50% of the chain length instead.
if (burn_in >= np.shape(z_best)[1]):
burn_in = int(0.5*np.shape(z_best)[1])
# print('\n Burn-in is larger than chain length! Using 50% of chain length for burn-in...\n')
flat = z_best[:,burn_in:]
flat = flat.flat
# Old confidence interval stuff; replaced by np.quantile
p = np.percentile(flat, [16, 50, 84])
pdfmax = p[1]
low1 = p[1]-p[0]
upp1 = p[2]-p[1]
if ((pdfmax-(3.0*low1))<0):
flag = 1
else: flag = 0
if (plot_param_hist==True):
# Initialize figures and axes
# Make an updating plot of the chain
fig = plt.figure(figsize=(10,8))
gs = gridspec.GridSpec(2, 2)
gs.update(wspace=0.35, hspace=0.35) # set the spacing between axes.
ax1 = plt.subplot(gs[0,0])
ax2 = plt.subplot(gs[0,1])
ax3 = plt.subplot(gs[1,0:2])
# Plot 1: Histogram plots
# Histogram; 'Doane' binning produces the best results from tests.
n, bins, patches = ax1.hist(flat, bins='doane', density=True, facecolor='xkcd:aqua green', alpha=0.75)
ax1.axvline(pdfmax,linestyle='--',color='white',label='$\mu=%0.6f$\n' % pdfmax)
ax1.axvline(pdfmax-low1,linestyle=':',color='white',label='$\sigma_-=%0.6f$\n' % low1)
ax1.axvline(pdfmax+upp1,linestyle=':',color='white',label='$\sigma_+=%0.6f$\n' % upp1)
# ax1.plot(xvec,yvec,color='white')
ax1.set_xlabel(r'$z_{\rm{best}}$',fontsize=12)
ax1.set_ylabel(r'$p(z_{\rm{best}})$',fontsize=12)
# Plot 2: best fit values
ax2.axvline(pdfmax,linestyle='--',color='black',alpha=0.0,label='$\mu=%0.6f$\n' % pdfmax)
ax2.axvline(pdfmax-low1,linestyle=':',color='black',alpha=0.0,label='$\sigma\_=%0.6f$\n' % low1)
ax2.axvline(pdfmax+upp1,linestyle=':',color='black',alpha=0.0,label='$\sigma_{+}=%0.6f$\n' % upp1)
ax2.legend(loc='center left',frameon=False,fontsize=14)
ax2.axis('off')
# Plot 3: Chain plot
for w in range(0,np.shape(z_best)[0],1):
ax3.plot(range(np.shape(z_best)[1]),z_best[w,:],color='white',linewidth=0.5,alpha=0.5,zorder=0)
# Calculate median and median absolute deviation of walkers at each iteration; we have depreciated
# the average and standard deviation because they do not behave well for outlier walkers, which
# also don't agree with histograms.
c_med = np.median(z_best,axis=0)
c_madstd = mad_std(z_best)
ax3.plot(range(np.shape(z_best)[1]),c_med,color='xkcd:red',alpha=1.,linewidth=2.0,label='Median',zorder=10)
ax3.fill_between(range(np.shape(z_best)[1]),c_med+c_madstd,c_med-c_madstd,color='xkcd:aqua',alpha=0.5,linewidth=1.5,label='Median Absolute Dev.',zorder=5)
ax3.axvline(burn_in,linestyle='--',color='xkcd:orange',label='burn-in = %d' % burn_in)
ax3.set_xlim(0,np.shape(z_best)[1])
ax3.set_xlabel('$N_\mathrm{iter}$',fontsize=12)
ax3.set_ylabel(r'$z_{\rm{best}}$',fontsize=12)
ax3.legend(loc='upper left')
# Save the figure
plt.savefig(run_dir+'histogram_plots/param_histograms/'+'z_best_MCMC.png' ,bbox_inches="tight",dpi=300,fmt='png')
# Close plot window
fig.clear()
plt.close()
# Collect garbage
del fig
del ax1
del ax2
del ax3
del flat
gc.collect()
z_dict = {'par_best':pdfmax,'sig_low':low1,'sig_upp':upp1,'chain':z_best,'flag':flag}
#
z_best = pdfmax
z_best_low = low1
z_best_upp = upp1
return (z_best,z_best_low,z_best_upp),z_dict | 36,359 |
def bookmark_fn(outdir):
"""Single line text file storing the epoch,brick,batch number of last ckpt"""
return os.path.join(outdir,'ckpts',
'last_epoch_brick_batch.txt') | 36,360 |
def aps_pause():
"""
:return:
"""
scheduler.pause_job('interval_task') | 36,361 |
def give_register_permission(username, base_uri):
"""Give a user register permission on a base URI."""
if not base_uri_exists(base_uri):
click.secho(
"Base URI '{}' not registered".format(base_uri),
fg="red",
err=True
)
sys.exit(1)
if not user_exists(username):
click.secho(
"User '{}' not registered".format(username),
fg="red",
err=True
)
sys.exit(1)
permissions = get_permission_info(base_uri)
if username in permissions["users_with_register_permissions"]:
click.secho(
"User '{}' already has register permissions".format(username),
fg="red",
err=True
)
sys.exit(1)
permissions["users_with_register_permissions"].append(username)
update_permissions(permissions) | 36,362 |
def set_model_weights(model, weights):
"""Set the given weights to keras model
Args:
model : Keras model instance
weights (dict): Dictionary of weights
Return:
Keras model instance with weights set
"""
for key in weights.keys():
model.get_layer(key).set_weights(weights[key])
return model | 36,363 |
def test_reference_links_extra_03e():
"""
Test case extra 03e: variation of 3 with autolink
"""
# Arrange
source_markdown = """[bar<http://autolink.com>foo]: /uri
[bar<http://autolink.com>foo]"""
expected_tokens = [
"[link-ref-def(1,1):True::bar<http://autolink.com>foo:: :/uri:::::]",
"[BLANK(2,1):]",
"[para(3,1):]",
"[link(3,1):shortcut:/uri:::::bar<http://autolink.com>foo:::::]",
"[text(3,2):bar:]",
"[uri-autolink(3,5):http://autolink.com]",
"[text(3,26):foo:]",
"[end-link::]",
"[end-para:::True]",
]
expected_gfm = """<p><a href="/uri">bar<a href="http://autolink.com">http://autolink.com</a>foo</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens) | 36,364 |
def check_history(history_file, inputList):
"""
Method to check in the given history file if
data files have been already processed.
"""
unprocessed = list(inputList)
if not (os.path.isfile(history_file)):
LOG.warning(history_file + " does not exist yet!")
return unprocessed
# Read checklist file
fr = open(history_file, 'r')
fileList = fr.read().split("\n")
fr.close()
for i, current_input in enumerate(inputList):
current_fileid = current_input[0]["filename"]
if (current_fileid is None):
current_fileid = os.path.basename(current_input[0]["fileid"])
if (current_fileid in fileList):
unprocessed.remove(inputList[i])
LOG.info(current_fileid + " already processed.")
else:
LOG.info(current_fileid + " not processed.")
return unprocessed | 36,365 |
def extract_manage_vlan(strValue):
"""处理show manage-vlan得到的信息
Args:
strValue(str): show manage-vlan得到的信息
Returns:
list: 包含管理Vlan字典的列表
"""
# ------------------------------------
# Manage name : xx
# ------------------------------------
# Svlan : 1000
# Scos : 7
# Port : 9:2[T]
# Device : sub
# Unit : 1000
# Ethernet address: 48:f9:7c:e9:8a:e3
# Total protocols : 0
# RX packets : 0
# TX packets : 8
# RX bytes : 0
# TX bytes : 704
# MTU : 0
# ------------------------------------
# Manage name : yy
# ------------------------------------
# Svlan : 2000
# Scos : 7
# Port : 9:2[T]
# Device : sub
# Unit : 2000
# Ethernet address: 48:f9:7c:e9:8a:e3
# Total protocols : 0
# RX packets : 0
# TX packets : 8
# RX bytes : 0
# TX bytes : 704
# MTU : 0
keyValueExp = re.compile('([\w\s]+):\s(.+)')
ret = [ ]
for line in strValue.splitlines():
match = keyValueExp.match(line)
if match:
k, v = match.groups()
k = auto_convert(k)
v = auto_convert(v)
if k == 'Manage name':
ret.append({ })
ret[-1][k] = v
return ret | 36,366 |
def train_and_evaluate(model, train_dataloader, val_dataloader, optimizer, loss_fn, metrics, params, model_dir,
restore_file=None, lr_scheduler=None):
"""Train the model and evaluate every epoch.
Args:
model: (torch.nn.Module) the neural network
train_dataloader: (DataLoader) a torch.utils.data.DataLoader object that fetches training data
val_dataloader: (DataLoader) a torch.utils.data.DataLoader object that fetches validation data
optimizer: (torch.optim) optimizer for parameters of model
loss_fn: a function that takes batch_output and batch_labels and computes the loss for the batch
metrics: (dict) a dictionary of functions that compute a metric using the output and labels of each batch
params: (Params) hyperparameters
model_dir: (string) directory containing config, weights and log
restore_file: (string) optional- name of file to restore from (without its extension .pth.tar)
lr_scheduler: (optim.lr_scheduler) learning rate scheduler
"""
# reload weights from restore_file if specified
if restore_file is not None:
restore_path = os.path.join(args.model_dir, args.restore_file + '.pth.tar')
logging.info("Restoring parameters from {}".format(restore_path))
utils.load_checkpoint(restore_path, model, optimizer)
best_val_metric = 0 # we use mse for metric here, so need to set the initial to a large number
for epoch in range(params.num_epochs):
# Run one epoch
logging.info("Epoch {}/{}".format(epoch + 1, params.num_epochs))
# learning rate scheduler
if lr_scheduler:
lr_scheduler.step()
# compute number of batches in one epoch (one full pass over the training set)
train(model, optimizer, loss_fn, train_dataloader, metrics, params)
# Evaluate for one epoch on validation set
val_metrics = evaluate(model, loss_fn, val_dataloader, metrics, params)
val_metric = val_metrics['psnr']
is_best = val_metric >= best_val_metric
# Save weights
utils.save_checkpoint({'epoch': epoch + 1,
'state_dict': model.state_dict(),
'optim_dict' : optimizer.state_dict()},
is_best=is_best,
checkpoint=model_dir)
# If best_eval, best_save_path
if is_best:
logging.info("- Found new best validation metric")
best_val_metric = val_metric
# Save best val metrics in a json file in the model directory
best_json_path = os.path.join(model_dir, "metrics_val_best_weights.json")
utils.save_dict_to_json(val_metrics, best_json_path)
# Save latest val metrics in a json file in the model directory
last_json_path = os.path.join(model_dir, "metrics_val_last_weights.json")
utils.save_dict_to_json(val_metrics, last_json_path) | 36,367 |
def etree_demo2():
"""
2. //选取所有节点
"""
html = etree.parse('./lxml_test.html', etree.HTMLParser())
result = html.xpath('//*') # 返回的是list,包含每一个节点,*代表任意节点
print(type(result))
print(result, '\n\n')
# 当只获取指点的节点时,只需指定名称
result2 = html.xpath('//ul')
print(result2) | 36,368 |
def generate_sample_space_plot_detailed(
run_directory: str,
step: int = 0,
agent_ids: List[int] = [0],
contour_z: str = "action_value",
circle_size: str = "action_visits",
) -> List[dcc.Graph]:
"""Generates detailed sample space plots for the given agents.
Parameters
----------
run_directory : str
directory of the mcts evaluator output.
step : int, optional
the currently selected step, by default 0
agent_ids : List[int], optional
list of agent ids for each of which a plot should be generated, by default [0]
contour_z : str, optional
string indicating which dataframe column the contour should display, by default "action_value"
circle_size : str, optional
string indicating which dataframe column determines the circle size, by default "action_visits"
Returns
-------
List[dcc.Graph]
plotly figure for every given agent.
"""
figs: List[dcc.Graph] = []
step_data = load_step(run_directory, step)
if step_data is None:
return [
dcc.Graph(
figure=go.Figure(
{
"layout": {
"xaxis": {"visible": False},
"yaxis": {"visible": False},
"annotations": [
{
"text": "Enable childMap export for sample space visualization.",
"xref": "paper",
"yref": "paper",
"showarrow": False,
"font": {"size": 28},
}
],
}
}
)
)
]
elif not agent_ids:
# list agent_ids is empty, so return an empty figure list
return figs
df = get_step_dataframe(step_data)
# this variable must be set here because the variable circle_size may change
use_action_value_circles = circle_size == "action_value"
for agent_id in agent_ids:
agent_df = df[df["id"] == agent_id]
# drop unvisited actions
agent_df = agent_df[agent_df["action_visits"] != 0]
# finally chosen action
chosen_x = agent_df[agent_df["action_chosen"] > 0]["d_velocity"]
chosen_y = agent_df[agent_df["action_chosen"] > 0]["d_lateral"]
labels = {
"d_velocity": "Velocity change",
"d_lateral": "Lateral change",
"action_visits": "Action visit count",
"action_value": "Action value",
}
if use_action_value_circles:
# action values can be negative, so transform them to positive values for the circle size
min_value = agent_df["action_value"].min()
if min_value < 0:
circle_size = agent_df["action_value"] - min_value
fig = px.scatter(
agent_df,
x="d_velocity",
y="d_lateral",
marginal_x="histogram",
marginal_y="histogram",
hover_data=["action_visits", "action_value"],
labels=labels,
size=circle_size,
).update_traces(
marker=dict(
line=dict(width=1, color="black"),
opacity=0.5,
symbol="circle-dot",
color="grey",
),
selector=dict(type="scatter"),
)
pivot_df = agent_df.pivot(
index="d_lateral", columns="d_velocity", values=contour_z
)
fig.add_trace(
go.Contour(
z=pivot_df.values,
x=pivot_df.columns.values,
y=pivot_df.index.values,
contours_coloring="heatmap", # "fill"
connectgaps=True,
# line_smoothing=1.3,
colorscale=px.colors.sequential.Plasma,
xaxis="x",
yaxis="y",
hoverinfo="skip",
colorbar=dict(title=labels[contour_z], titleside="right"),
)
)
fig.add_trace(
go.Scatter(
x=chosen_x,
y=chosen_y,
xaxis="x",
yaxis="y",
mode="markers",
name="Selected Action",
marker=dict(
line=dict(width=2, color="DarkSlateGrey"),
color="red",
size=15,
symbol="x",
),
)
)
# determine min/max x/y values to specify the axes ranges manually
min_x = agent_df.loc[:, "d_velocity"].min()
max_x = agent_df.loc[:, "d_velocity"].max()
min_y = agent_df.loc[:, "d_lateral"].min()
max_y = agent_df.loc[:, "d_lateral"].max()
fig.update_layout(
title=dict(
text=f"Agent: {agent_id}, Step: {step}",
x=0.5,
),
margin_t=110, # default: 100
height=460, # default: 450
xaxis_range=[min_x, max_x],
yaxis_range=[min_y, max_y],
legend=dict(
orientation="h", yanchor="bottom", y=1.02, xanchor="right", x=1
),
)
figs.append(fig)
return [
dcc.Graph(figure=fig, className="col-sm-12 col-md-6 col-lg-4") for fig in figs
] | 36,369 |
def raw_path_basename(path):
"""Returns basename from raw path string"""
import ntpath
path = raw(path)
return ntpath.basename(path) | 36,370 |
def prepare_request(
url: str,
access_token: str = None,
user_agent: str = None,
ids: MultiInt = None,
params: RequestParams = None,
headers: Dict = None,
json: Dict = None,
) -> Tuple[str, RequestParams, Dict, Optional[Dict]]:
"""Translate some ``pyinaturalist``-specific params into standard params and headers,
and other request param preprocessing. This is made non-``requests``-specific
so it could potentially be reused for ``aiohttp`` requests.
Returns:
Tuple of ``(URL, params, headers, data)``
"""
# Prepare request params
params = preprocess_request_params(params)
# Prepare user-agent and authentication headers
headers = headers or {}
headers['User-Agent'] = user_agent or pyinaturalist.user_agent
headers['Accept'] = 'application/json'
if access_token:
headers['Authorization'] = f'Bearer {access_token}'
# If one or more resources are requested by ID, valudate and update the request URL accordingly
if ids:
url = url.rstrip('/') + '/' + validate_ids(ids)
# Convert any datetimes to strings in request body
if json:
headers['Content-type'] = 'application/json'
json = preprocess_request_body(json)
return url, params, headers, json | 36,371 |
def send_folio_detail(channel):
"""FOLIO グラフ画像の通知
Arguments:
channel {str} -- チャンネルID
"""
print('[info] called service method. name=[send_folio_detail]')
# 投稿する画像をそれぞれ取得して保存する
# ファイルパスを辞書型で返してもらう
result = folio_repository.fetch_graph_images()
if result['status'] == 'NG':
post_message = '```データの取得に失敗しました```'
slack_messenger.post(post_message, channel)
# 画像を投稿する(テーマの資産)
slack_uploader.upload_image(
result['path']['theme'],
channel,
initial_comment='取得元: https://folio-sec.com/mypage/assets',
title='transition graph about theme assets.',
as_user=False,
icon_emoji=':moneybag:',
username='foliobot')
# 画像を投稿する(おまかせの資産)
slack_uploader.upload_image(
result['path']['roboad'],
channel,
initial_comment='取得元: https://folio-sec.com/mypage/assets/omakase',
title='transition graph about roboad assets.',
as_user=False,
icon_emoji=':moneybag:',
username='foliobot')
# アップロード完了後画像を削除する
os_manager.remove_file(result['path']['theme'])
os_manager.remove_file(result['path']['roboad'])
print('[info] finish service method. name=[send_folio_detail]') | 36,372 |
def require_permission(permission):
"""Pyramid decorator to check permissions for a request."""
def handler(f, *args, **kwargs):
request = args[0]
if check_permission(request, request.current_user, permission):
return f(*args, **kwargs)
elif request.current_user:
raise HTTPForbidden()
else:
raise HTTPFound(request.route_url('user.login', _query={'redirect': encode_route(request)}))
return decorator(handler) | 36,373 |
def _collapse_subgraph(graph_def, inputs, outputs, op_definition):
"""Substitute a custom op for the subgraph delimited by inputs and outputs."""
name = _uuid.uuid1().hex
# We need a default type, but it can be changed using 'op_definition'.
default_type = types_pb2.DT_FLOAT
new_graph = fuse_op(
graph_def=graph_def,
input_nodes=inputs,
output_nodes=outputs,
output_dtypes=[default_type for _ in outputs],
output_quantized=False,
op_name=name,
op_type="CustomTfLiteOp")
node_def = node_def_pb2.NodeDef()
text_format.Parse(op_definition, node_def)
for node in new_graph.node:
if node.name == name:
node.MergeFrom(node_def)
return new_graph | 36,374 |
def downsample_filter_simple(data_in, n_iter=1, offset=0):
"""
Do nearest-neighbor remixing to downsample data_in (..., nsamps)
by a power of 2.
"""
if n_iter <= 0:
return None
ns_in = data_in.shape[-1]
ns_out = ns_in // 2
dims = data_in.shape[:-1] + (ns_out,)
data_out = np.empty(dims, dtype=data_in.dtype)
# Central sample
data_out[...,:] = data_in[...,offset:ns_out*2+offset:2] * 2
# To the left (all output samples except maybe the first)
l_start = 1-offset
l_count = ns_out - l_start
data_out[...,l_start:] += data_in[...,(1-offset):2*l_count:2]
# To the right (all output samples except maybe the last)
# depending on 2*ns_out+offset <= ns_in
r_count = (ns_in - offset) // 2
data_out[...,:r_count] += data_in[...,offset+1::2]
# Normalization...
data_out[...,:] /= 4
if l_start > 0:
data_out[...,0] *= 4./3
if r_count < ns_out:
data_out[...,-1] *= 4./3
if n_iter <= 1:
return data_out
# Destroy intermediate storage, and iterate
data_in = data_out
return downsample_filter_simple(data_in, n_iter-1, offset) | 36,375 |
def _session_setup(calling_function_name='[FUNCTION NAME NOT GIVEN]'):
""" Typically called at the top of lightcurve workflow functions, to collect commonly required data.
:return: tuple of data elements: context [tuple], defaults_dict [py dict], log_file [file object].
"""
context = _get_session_context()
if context is None:
return
this_directory, mp_string, an_string, filter_string = context
defaults_dict = ini.make_defaults_dict()
session_dict = ini.make_session_dict(defaults_dict, this_directory)
log_filename = defaults_dict['session log filename']
log_file = open(log_filename, mode='a') # set up append to log file.
log_file.write('\n===== ' + calling_function_name + '() ' +
'{:%Y-%m-%d %H:%M:%S utc}'.format(datetime.now(timezone.utc)) + '\n')
return context, defaults_dict, session_dict, log_file | 36,376 |
def find_center(image, center_guess, cutout_size=30, max_iters=10):
"""
Find the centroid of a star from an initial guess of its position. Originally
written to find star from a mouse click.
Parameters
----------
image : numpy array or CCDData
Image containing the star.
center_guess : array or tuple
The position, in pixels, of the initial guess for the position of
the star. The coordinates should be horizontal first, then vertical,
i.e. opposite the usual Python convention for a numpy array.
cutout_size : int, optional
The default width of the cutout to use for finding the star.
max_iters : int, optional
Maximum number of iterations to go through in finding the center.
"""
pad = cutout_size // 2
x, y = center_guess
# Keep track of iterations
cnt = 0
# Grab the cutout...
sub_data = image[y - pad:y + pad, x - pad:x + pad] # - med
# ...do stats on it...
_, sub_med, _ = sigma_clipped_stats(sub_data)
# sub_med = 0
# ...and centroid.
x_cm, y_cm = centroid_com(sub_data - sub_med)
# Translate centroid back to original image (maybe use Cutout2D instead)
cen = np.array([x_cm + x - pad, y_cm + y - pad])
# ceno is the "original" center guess, set it to something nonsensical here
ceno = np.array([-100, -100])
while (cnt <= max_iters and
(np.abs(np.array([x_cm, y_cm]) - pad).max() > 3
or np.abs(cen - ceno).max() > 0.1)):
# Update x, y positions for subsetting
x = int(np.floor(x_cm)) + x - pad
y = int(np.floor(y_cm)) + y - pad
sub_data = image[y - pad:y + pad, x - pad:x + pad] # - med
_, sub_med, _ = sigma_clipped_stats(sub_data)
# sub_med = 0
mask = (sub_data - sub_med) < 0
x_cm, y_cm = centroid_com(sub_data - sub_med, mask=mask)
ceno = cen
cen = np.array([x_cm + x - pad, y_cm + y - pad])
if not np.all(~np.isnan(cen)):
raise RuntimeError('Centroid finding failed, '
'previous was {}, current is {}'.format(ceno, cen))
cnt += 1
return cen | 36,377 |
def zGetTraceArray(numRays, hx=None, hy=None, px=None, py=None, intensity=None,
waveNum=None, mode=0, surf=-1, want_opd=0, timeout=5000):
"""Trace large number of rays defined by their normalized field and pupil
coordinates on lens file in the LDE of main Zemax application (not in the DDE server)
Parameters
----------
numRays : integer
number of rays to trace. ``numRays`` should be equal to the length
of the lists (if provided) ``hx``, ``hy``, ``px``, etc.
hx : list, optional
list of normalized field heights along x axis, of length ``numRays``;
if ``None``, a list of 0.0s for ``hx`` is created.
hy : list, optional
list of normalized field heights along y axis, of length ``numRays``;
if ``None``, a list of 0.0s for ``hy`` is created
px : list, optional
list of normalized heights in pupil coordinates, along x axis, of
length ``numRays``; if ``None``, a list of 0.0s for ``px`` is created.
py : list, optional
list of normalized heights in pupil coordinates, along y axis, of
length ``numRays``; if ``None``, a list of 0.0s for ``py`` is created
intensity : float or list, optional
initial intensities. If a list of length ``numRays`` is given it is
used. If a single float value is passed, all rays use the same value for
their initial intensities. If ``None``, all rays use a value of ``1.0``
as their initial intensities.
waveNum : integer or list (of integers), optional
wavelength number. If a list of integers of length ``numRays`` is given
it is used. If a single integer value is passed, all rays use the same
value for wavelength number. If ``None``, all rays use wavelength
number equal to 1.
mode : integer, optional
0 = real (Default), 1 = paraxial
surf : integer, optional
surface to trace the ray to. Usually, the ray data is only needed at
the image surface (``surf = -1``, default)
want_opd : integer, optional
0 if OPD data is not needed (Default), 1 if it is. See Zemax manual
for details.
timeout : integer, optional
command timeout specified in milli-seconds
Returns
-------
error : list of integers
0 = ray traced successfully;
+ve number = the ray missed the surface;
-ve number = the ray total internal reflected (TIR) at surface
given by the absolute value of the ``error``
vigcode : list of integers
the first surface where the ray was vignetted. Unless an error occurs
at that surface or subsequent to that surface, the ray will continue
to trace to the requested surface.
x, y, z : list of reals
x, or , y, or z, coordinates of the ray on the requested surface
l, m, n : list of reals
the x, y, and z direction cosines after refraction into the media
following the requested surface.
l2, m2, n2 : list of reals
list of x or y or z surface intercept direction normals at requested
surface
opd : list of reals
computed optical path difference if ``want_opd > 0``
intensity : list of reals
the relative transmitted intensity of the ray, including any pupil
or surface apodization defined.
If ray tracing fails, a single integer error code is returned,
which has the following meaning: -1 = Couldn't retrieve data in
PostArrayTraceMessage, -999 = Couldn't communicate with Zemax,
-998 = timeout reached
Examples
--------
>>> n = 9**2
>>> nx = np.linspace(-1, 1, np.sqrt(n))
>>> hx, hy = np.meshgrid(nx, nx)
>>> hx, hy = hx.flatten().tolist(), hy.flatten().tolist()
>>> rayData = at.zGetTraceArray(numRays=n, hx=hx, hy=hy, mode=0)
>>> err, vig = rayData[0], rayData[1]
>>> x, y, z = rayData[2], rayData[3], rayData[4]
Notes
-----
The opd can only be computed if the last surface is the image surface,
otherwise, the opd value will be zero.
"""
rd = getRayDataArray(numRays, tType=0, mode=mode, endSurf=surf)
hx = hx if hx else [0.0] * numRays
hy = hy if hy else [0.0] * numRays
px = px if px else [0.0] * numRays
py = py if py else [0.0] * numRays
if intensity:
intensity = intensity if isinstance(intensity, list) else [intensity]*numRays
else:
intensity = [1.0] * numRays
if waveNum:
waveNum = waveNum if isinstance(waveNum, list) else [waveNum]*numRays
else:
waveNum = [1] * numRays
want_opd = [want_opd] * numRays
# fill up the structure
for i in xrange(1, numRays+1):
rd[i].x = hx[i-1]
rd[i].y = hy[i-1]
rd[i].z = px[i-1]
rd[i].l = py[i-1]
rd[i].intensity = intensity[i-1]
rd[i].wave = waveNum[i-1]
rd[i].want_opd = want_opd[i-1]
# call ray tracing
ret = zArrayTrace(rd, timeout)
# free up some memory
#del hx, hy, px, py, intensity, waveNum, want_opd # seems to increase running time
#_gc.collect()
d = {}
if ret == 0:
reals = ['x', 'y', 'z', 'l', 'm', 'n', 'l2', 'm2', 'n2', 'opd',
'intensity']
ints = ['error', 'vigcode']
for r in reals:
exec(r + " = [0.0] * numRays", locals(), d)
for i in ints:
exec(i + " = [0] * numRays", locals(), d)
for i in xrange(1, numRays+1):
d["x"][i-1] = rd[i].x
d["y"][i-1] = rd[i].y
d["z"][i-1] = rd[i].z
d["l"][i-1] = rd[i].l
d["m"][i-1] = rd[i].m
d["n"][i-1] = rd[i].n
d["opd"][i-1] = rd[i].opd
d["intensity"][i-1] = rd[i].intensity
d["l2"][i-1] = rd[i].Exr
d["m2"][i-1] = rd[i].Eyr
d["n2"][i-1] = rd[i].Ezr
d["error"][i-1] = rd[i].error
d["vigcode"][i-1] = rd[i].vigcode
return (d["error"], d["vigcode"], d["x"], d["y"], d["z"],
d["l"], d["m"], d["n"], d["l2"], d["m2"], d["n2"],
d["opd"], d["intensity"])
else:
return ret | 36,378 |
def bin2bytes(binvalue):
"""Convert binary string to bytes.
Sould be BYTE aligned"""
hexvalue = bin2hex(binvalue)
bytevalue = unhexlify(hexvalue)
return bytevalue | 36,379 |
def bind_and_listen_on_posix_socket(socket_name, accept_callback):
"""
:param accept_callback: Called with `PosixSocketConnection` when a new
connection is established.
"""
assert socket_name is None or isinstance(socket_name, six.text_type)
assert callable(accept_callback)
# Py2 uses 0027 and Py3 uses 0o027, but both know
# how to create the right value from the string '0027'.
old_umask = os.umask(int('0027', 8))
# Bind socket.
socket_name, socket = _bind_posix_socket(socket_name)
_ = os.umask(old_umask)
# Listen on socket.
socket.listen(0)
def _accept_cb():
connection, client_address = socket.accept()
# Note: We don't have to put this socket in non blocking mode.
# This can cause crashes when sending big packets on OS X.
posix_connection = PosixSocketConnection(connection)
accept_callback(posix_connection)
get_event_loop().add_reader(socket.fileno(), _accept_cb)
logger.info('Listening on %r.' % socket_name)
return socket_name | 36,380 |
def _report_maker(
*,
tback: str,
func_name: Optional[str] = None,
header: Optional[str] = None,
as_attached: bool = False,
) -> Report:
"""
Make report from
Args:
tback(str): traceback for report.
func_name(str, optional): name of function when raised error.
header(str, optional): first line in report message. Default - "Your program has crashed ☠️"
as_attached(bool, optional): make report for sending as a file. Default - False.
Returns:
isinstance of Report obj.
"""
return Report(tback, func_name, header, as_attached) | 36,381 |
def moments(data,x0=None,y0=None):
"""Returns (height, x, y, width_x, width_y)
the gaussian parameters of a 2D distribution by calculating its
moments """
total = data.sum()
X, Y = np.indices(data.shape)
x = (X*data).sum()/total
y = (Y*data).sum()/total
col = data[:, int(y)]
width_x = np.sqrt(abs((np.arange(col.size)-y)**2*col).sum()/col.sum())
row = data[int(x), :]
width_y = np.sqrt(abs((np.arange(row.size)-x)**2*row).sum()/row.sum())
height = data.max()
if x0 is None:
return height, x, y, width_x, width_y, 0.0, 0.0
else:
xstep = x0[1] - x0[0]
ystep = y0[1] - y0[0]
return height, x*xstep+x0[0], y*ystep+y0[0], width_x*xstep, width_y*ystep, 0.0, 0.0 | 36,382 |
def draw_trajectory(tr):
""" ss << "(" << tr.final_pose.tra.x() << ", " << tr.final_pose.tra.y()
<< ", " << tr.final_pose.rot << ", " << tr.rotate_circle_center.x()
<< ", " << tr.rotate_circle_center.y() << ", "
<< tr.rotate_circle_radius << ", " << tr.achieved_vel_pose.tra.x()
<< ", " << tr.achieved_vel_pose.tra.y() << ", "
<< tr.achieved_vel_pose.rot << "), ";"""
for t in tr:
ax = plt.gca()
ax.add_artist(plt.Circle((t[0], t[1]), 0.3, color='red', alpha=0.1))
ax.add_artist(plt.Circle((t[0], t[1]), 0.01, color='red', alpha=1))
ax.add_artist(plt.Circle((t[3], t[4]), t[5], color='green', alpha=0.1))
ax.add_artist(plt.Circle((t[6], t[7]), 0.3, color='blue', alpha=0.1)) | 36,383 |
def string_to_bytes(size, assume_binary=False):
"""Convert human-readable size str to bytes and return an int."""
LOG.debug('size: %s, assume_binary: %s', size, assume_binary)
scale = 1000
size = str(size)
tmp = REGEX_SIZE_STRING.search(size.upper())
# Raise exception if string can't be parsed as a size
if not tmp:
raise ValueError(f'Invalid size string: {size}')
# Set scale
if tmp.group('binary') or assume_binary:
scale = 1024
# Convert to bytes
size = float(tmp.group('size'))
units = tmp.group('units')
if units == 'P':
size *= scale ** 5
if units == 'T':
size *= scale ** 4
elif units == 'G':
size *= scale ** 3
elif units == 'M':
size *= scale ** 2
elif units == 'K':
size *= scale ** 1
elif units == 'B':
size *= scale ** 0
size = int(size)
# Done
LOG.debug('bytes: %s', size)
return size | 36,384 |
def set_online(rg = None):
"""
Sets the desired CPU cores online.
Parameters
----------
rg : int, list
An integer or list of integers with the indices of CPU cores that
will be set online. If None, all CPU cores will be set online.
"""
if isinstance(rg, int):
rg = [rg]
if rg is None:
_cpu.enable_all_cpu()
print("All CPUs enabled.")
else:
for core in rg:
try:
_cpu.enable_cpu(core)
if _verbose:
print(f"CPU {core} set online.")
except:
print(f"ERROR: An exception occurred. Check if CPU {core} exists.") | 36,385 |
def VecStack(vector_list, axis=0):
""" This is a helper function to stack vectors """
# Determine output size
single_vector_shape = [max([shape(vector)[0] for vector in vector_list]), max([shape(vector)[1] for vector in vector_list])]
vector_shape = dcopy(single_vector_shape)
vector_shape[axis] *= len(vector_list)
# Allocate memory
vector_full = zeros(vector_shape, getDatatype(vector_list[0]), getBackend(vector_list[0]))
# Assign vector elements to the output
for index, vector in enumerate(vector_list):
vector_full[index * single_vector_shape[0]:(index + 1) * single_vector_shape[0], :] = pad(vector, single_vector_shape)
# Return
return vector_full | 36,386 |
def precook(s, n=4, out=False):
"""
Takes a string as input and returns an object that can be given to
either cook_refs or cook_test. This is optional: cook_refs and cook_test
can take string arguments as well.
:param s: string : sentence to be converted into ngrams
:param n: int : number of ngrams for which representation is calculated
:return: term frequency vector for occuring ngrams
"""
words = s.split()
counts = defaultdict(int)
for k in xrange(1,n+1): #1,2,3,4
for i in xrange(len(words)-k+1):
ngram = tuple(words[i:i+k])
counts[ngram] += 1
return counts | 36,387 |
def get_full_path(path, nx_list_subgraph):
"""Creates a numpy array of the line result.
Args:
path (str): Result of ``nx.shortest_path``
nx_list_subgraph (list): See ``create_shortest path`` function
Returns:
ndarray: Coordinate pairs along a path.
"""
p_list = []
curp = None
for i in range(len(path)-1):
p = get_path(path[i], path[i+1], nx_list_subgraph)
if curp is None:
curp = p
if np.sum((p[0]-curp)**2) > np.sum((p[-1]-curp)**2):
p = p[::-1, :]
p_list.append(p)
curp = p[-1]
return np.vstack(p_list) | 36,388 |
def get_compliance_site_case_notifications(data, request):
"""
returns the count of notification for a compliance site case and all visit cases under it.
"""
ids = [item["id"] for item in data]
notifications = (
ExporterNotification.objects.filter(
user_id=request.user.pk, organisation_id=get_request_user_organisation_id(request), case_id__in=ids
)
.values("case")
.annotate(count=Count("case"))
)
cases_with_notifications = {str(notification["case"]): notification["count"] for notification in notifications}
visit_notifications = list(
ExporterNotification.objects.filter(
user_id=request.user.pk,
organisation_id=get_request_user_organisation_id(request),
case__compliancevisitcase__site_case__id__in=ids,
)
.values("case__compliancevisitcase__site_case_id")
.annotate(count=Count("case__compliancevisitcase__site_case_id"))
)
visit_cases_with_notifications = {
str(notification["case__compliancevisitcase__site_case_id"]): notification["count"]
for notification in visit_notifications
}
for item in data:
if item["id"] in cases_with_notifications:
item["exporter_user_notification_count"] = cases_with_notifications[item["id"]]
else:
item["exporter_user_notification_count"] = 0
if item["id"] in visit_cases_with_notifications:
item["exporter_user_notification_count"] += visit_cases_with_notifications[item["id"]]
return data | 36,389 |
def copy(src, dst):
"""
If the source is a folder, it will copy the contentes of the folder.
Otherwise, Windows will error out due to permissions problems.
"""
dst = expand(dst)
mkdir(dirname(dst))
if is_dir(src):
src = list_files(src)
else:
src = [src]
for s in src:
shutil.copyfile(expand(s), dst) | 36,390 |
def _call_function(self, *args, **kwargs):
"""
Monkey patching method. Odoo 11.0
Hiện tại request webhook của facebook gửi cả 2 type là 'http' và 'json' nên rơi vào exception BadRequest.
Phương thức này ghi đè lên method ban đầu.
TODO: Cần tìm giải pháp tối ưu hơn, ví dụ tạo một server riêng để nhận request rồi, chuyển về 1 type và gửi về odoo
https://github.com/odoo/odoo/issues/7766#issuecomment-230753503
"""
request = self
# Phần loại bỏ:
# if self.endpoint.routing['type'] != self._request_type:
# msg = "%s, %s: Function declared as capable of handling request of type '%s' but called with a request of type '%s'"
# params = (self.endpoint.original, self.httprequest.path, self.endpoint.routing['type'], self._request_type)
# _logger.info(msg, *params)
# raise werkzeug.exceptions.BadRequest(msg % params)
if self.endpoint_arguments:
kwargs.update(self.endpoint_arguments)
# Backward for 7.0
if self.endpoint.first_arg_is_req:
args = (request,) + args
# Correct exception handling and concurency retry
@service_model.check
def checked_call(___dbname, *a, **kw):
# The decorator can call us more than once if there is an database error. In this
# case, the request cursor is unusable. Rollback transaction to create a new one.
if self._cr:
self._cr.rollback()
self.env.clear()
result = self.endpoint(*a, **kw)
if isinstance(result, Response) and result.is_qweb:
# Early rendering of lazy responses to benefit from @service_model.check protection
result.flatten()
return result
if self.db:
return checked_call(self.db, *args, **kwargs)
return self.endpoint(*args, **kwargs) | 36,391 |
def compare_two_data_lists(data1, data2):
"""
Gets two lists and returns set difference of the two lists.
But if one of them is None (file loading error) then the return value is None
"""
set_difference = None
if data1 is None or data2 is None:
set_difference = None
else:
set_difference = len(set(data1).difference(data2))
return set_difference | 36,392 |
def get_on_request(field: Any, default_value: Any) -> Any:
"""
Функция получения значений
Args:
field: поле
default_value: если пустое то подставим это значение
Return:
значение поля или дефолтное
"""
if isinstance(field, datetime):
if field.timestamp() < 10:
return default_value
if field:
return field
return default_value | 36,393 |
def aptamer(ligand, piece='whole', liu=False):
"""
Construct aptamer sequences.
Parameters
----------
ligand: 'theo'
Specify the aptamer to generate. Right now only the theophylline
aptamer is known.
piece: 'whole', '5', '3', or 'splitter'
Specify which part of the aptamer to generate. The whole aptamer
is returned by default, but each aptamer can be broken into a
5' half, a 3' half, and a splitter between those halves.
Returns
-------
aptamer: Construct
The returned construct is given constraints, which can be used to force
RNAfold to approximate a ligand bound state.
"""
affinity_uM = float('inf')
# Get the right sequence for the requested aptamer.
if ligand in ('th', 'theo', 'theophylline'):
sequence_pieces = 'AUACCAGCC', 'GAAA', 'GGCCCUUGGCAG'
if liu: sequence_pieces = 'AUACCACGC', 'GAAA', 'GCGCCUUGGCAG'
constraint_pieces = '.((((.(((', '....', ')))....)))).'
affinity_uM = 0.32
elif ligand in ('gtheoc'):
# The theophylline aptamer, bracketed by a GC base pair. This
# construct is more convenient to use with ViennaRNA, because a
# bracketing base pair is required to make a constraint.
sequence_pieces = 'GAUACCAGCC', 'GAAA', 'GGCCCUUGGCAGC'
constraint_pieces = '(.((((.(((', '....', ')))....)))).)'
affinity_uM = 0.32
elif ligand in ('3', '3mx', '3-methylxanthine'):
# Soukup, Emilsson, Breaker. Altering molecular recognition of RNA
# aptamers by allosteric selection. J. Mol. Biol. (2000) 298, 623-632.
sequence_pieces = 'AUACCAGCC', 'GAAA', 'GGCCAUUGGCAG'
constraint_pieces = '.(.((((((', '....', ')))...))).).'
elif ligand in ('r', 'tmr', 'tetramethylrosamine', 'mg', 'malachite green'):
# Baugh, Grate, Wilson. 2.8Å structure of the malachite green aptamer.
# JMB (2000) 301:1:117-128.
# This aptamer was used to make riboswitches, but with luciferase and
# not RFP, possibly because TMR is a fluorescent dye: Borujeni et al.
# Automated physics-based design of synthetic riboswitches from diverse
# RNA aptamers. Nucl. Acids Res. (2016) 44:1:1-13.
# I can't find any commercial TMR. Sigma used to sell it as product
# number T1823, but has since discontinued it.
sequence_pieces = 'CCGACUGGCGAGAGCCAGGUAACGAAUG',
constraint_pieces = '(...(((((....))))).........)',
elif ligand in ('tpp', 'thiamine', 'thiamine pyrophosphate'):
# Winkler, Hahvi, Breaker. Thiamine derivatives bind messenger RNAs
# directly to regulate bacterial gene expression. Nature (2002)
# 419:952-956.
# The sequence I've copied here is the ThiM 91 fragment from Winkler et
# al. Weiland et al. used almost the same sequence, but they mutated
# the last nucleotide from A to U to break a base pair.
# Winker et al used "M9 glucose minimal media (plus 50 μg/mL vitamin
# assay Casamino acids; Difco)" with or without 100 μM thiamine for
# their in vivo assays (figure 4b, bottom). The "vitamin assay" means
# the casein digest was treated to remove certain vitamins; presumably
# this is an important detail.
# Weiland et al. used M63 media with or without 1 mM thiamine for their
# in vivo assays. This is a little confusing to me because the M63
# recipe I found contains thiamine. Also, the titrations in figure 3B
# and 3C only go to 50 μM (and saturate around 1 μM).
# My plan is to use M9 media with glucose and "vitamin assay" Casamino
# acids, with and without 100 μM thiamine.
sequence_pieces = 'UCGGGGUGCCCUUCUGCGUGAAGGCUGAGAAAUACCCGUAUCACCUGAUCUGGAUAAUGCCAGCGUAGGGAA',
constraint_pieces = '(..(((((.(((((.....)))))........)))))......((((..((((......))))..))))..)',
affinity_uM = 0.0324 # (interpolated from figure 2b in Winkler et al.)
elif ligand in ('a', 'add', 'adenine'):
# Serganov et al. Structural Basis for discriminative regulation of
# gene expression by adenine- and guanine-sensing mRNAs. Chemistry &
# Biology (2004) 11:1729-1741.
# I truncated 7 base pairs that weren't interacting with the ligand
# from the end of the construct. I haven't been able to find an
# example of the adenine aptamer being used in a riboswitch to see if
# this is what other people have done, but Nomura et al. made
# essentially the same truncation to the highly homologous guanine
# aptamer when using it to make an allosteric ribozyme, so I'm pretty
# confident that this could work.
# Dixon et al. used M9 + 0.4% glucose + 2 mg/mL cas-amino acids + 0.1
# mg/mL thiamine. This is a higher concentration of cas-amino acids
# than Winkler et al. use for the TPP aptamer, but this is much more in
# line with the standard protocols.
#
# The ligand was also in some amount of DMSO, but I'm not sure how
# much. The solubility of adenine in water is 7.6 mM, so maybe the
# DMSO was only necessary for some of their other ligands.
sequence_pieces = 'UAUAAUCCUAAUGAUAUGGUUUGGGAGUUUCUACCAAGAGCCUUAAACUCUUGAUUA',
constraint_pieces = '((...(((((((.......)))))))........((((((.......))))))..))',
elif ligand in ('b', 'amm', 'ammeline'):
# Dixon et al. Reengineering orthogonally selective riboswitches. PNAS
# (2010) 107:7:2830-2835.
# This is the M6 construct, which is just the adenine aptamer from
# above with U47C and U51C. The affinity measurement is actually for
# M6'', because it was not measured for M6.
sequence_pieces = 'UAUAAUCCUAAUGAUAUGGUUUGGGAGCUUCCACCAAGAGCCUUAAACUCUUGAUUA',
constraint_pieces = '((...(((((((.......)))))))........((((((.......))))))..))',
affinity_uM = 1.19
elif ligand in ('g', 'gua', 'guanine'):
# Nomura, Zhou, Miu, Yokobayashi. Controlling mammalian gene expression
# by allosteric Hepatitis Delta Virus ribozymes. ACS Synth. Biol.
# (2013) 2:684-689.
# Nomura et al. used guanine at 500 μM, but I still see toxicity at
# this concentration. I think I'm going to use 250 μM instead.
sequence_pieces = 'UAUAAUCGCGUGGAUAUGGCACGCAAGUUUCUACCGGGCACCGUAAAUGUCCGACUA',
constraint_pieces = '((...(.(((((.......))))).)........((((((.......))))))..))',
affinity_uM = 0.005
elif ligand in ('fmn', 'flavin', 'flavin mononucleotide'):
# Soukup, Breaker. Engineering precision RNA molecular switches. PNAS
# (1999) 96:3584-3589.
# I can't find any examples of anyone using this aptamer in vivo.
sequence_pieces = 'GAGGAUAUGCUUCGGCAGAAGGC',
constraint_pieces = '(......(((....))).....)',
elif ligand in ('m', 'ms2', 'ms2 coat protein'):
# Qi, Lucks, Liu, Mutalik, Arkin. Engineering naturally occurring
# trans-acting non-coding RNAs to sense molecular signals. Nucl. Acids
# Res. (2012) 40:12:5775-5786. Sequence in supplement.
# I can't really figure out which MS2 aptamer people use for synthetic
# biology. All the papers I've read agree that the aptamer has one
# stem and three unpaired adenosines. The sequences from Romaniuk,
# Convery, and Qi actually have the same stem, they just differ in the
# loop. The sequences from Batey and Culler are exactly the same, but
# different from those in the other papers.
# The loop from Romaniuk and Convery is AUUA (the wildtype sequence)
# while the loop from Qi is ACCA. I'm inclined to use ACCA because Qi
# was doing synthetic biology and because Convery mentions that the
# natural consensus sequence for the loop is ANYA, a standard tetraloop
# which doesn't preclude ACCA.
# I should consider making the N55K mutation to the coat protein
# itself. One of the plasmids on AddGene mentioned that this mutation
# increases affinity for the aptamer. That plasmid was for mammalian
# expression, and so far I haven't seen this assertion corroborated for
# bacterial systems.
sequence_pieces = 'AACAUGAGGACCACCCAUGUU',
constraint_pieces = '((((((.((....))))))))',
elif ligand in ('bca', 'beta-catenin'):
# Culler, Hoff, Smolke. Reprogramming cellular behavior with rna
# controllers responsive to endogenous proteins. Science (2010)
# 330:6008:1251-1255.
sequence_pieces = 'AGGCCGATCTATGGACGCTATAGGCACACCGGATACTTTAACGATTGGCT',
raise NotImplementedError
elif ligand in ('tc', 'tet', 'tetracycline'):
# Wittmann and Suess. Selection of tetracycline inducible
# self-cleaving ribozymes as synthetic devices for gene regulation in
# yeast. Mol BioSyst (2011) 7:2419-2427.
# The authors used 100 μM tetracycline in yeast. I've seen other
# papers that used as much as 250 μM.
sequence_pieces = 'AAAACAUACCAGAUUUCGAUCUGGAGAGGUGAAGAAUACGACCACCU',
constraint_pieces = '(.......((((((....))))))...((((...........)))))',
# Müller, Weigand, Weichenrieder, Suess. Thermodynamic characterization
# of an engineered tetracycline-binding riboswitch. Nucleic Acids
# Research (2006) 34:9:2607-2617.
affinity_uM = 0.00077 # 770 pM
elif ligand in ('neo', 'neomycin'):
# Weigand, Sanchez, Gunnesch, Zeiher, Schroeder, Suess. Screening for
# engineered neomycin riboswitches that control translation initiation.
# RNA (2008) 14:89-97.
# The authors show that the aptamer consists of two domains: one that
# binds neomycin and one which is just a stem. Both are important for
# regulating gene expression in their system, which is the 5'-UTR of an
# mRNA. However, here I only include the ligand-binding domain. The
# length and composition of the stem domain is probably application
# dependent, and that's what I need to pull out of directed evolution.
#
# The authors used 100 μM neomycin. Yeast were grown at 28°C for 48h
# in 5 mL minimal media.
sequence_pieces = 'GCUUGUCCUUUAAUGGUCC',
constraint_pieces = '(.....((......))..)',
elif ligand in ('asp', 'aspartame'):
# Ferguson et al. A novel strategy for selection of allosteric
# ribozymes yields RiboReporter™ sensors for caffeine and aspartame.
# Nucl. Acids Res. (2004) 32:5
sequence_pieces = 'CGGTGCTAGTTAGTTGCAGTTTCGGTTGTTACG',
constraint_pieces = '((.............................))',
elif ligand in ('caf', 'caffeine'):
# Ferguson et al. A novel strategy for selection of allosteric
# ribozymes yields RiboReporter™ sensors for caffeine and aspartame.
# Nucl. Acids Res. (2004) 32:5
sequence_pieces = 'GATCATCGGACTTTGTCCTGTGGAGTAAGATCG',
constraint_pieces = '.................................',
else:
raise ValueError("no aptamer for '{}'".format(ligand))
# Check for obvious entry errors in the aptamer sequences.
if len(sequence_pieces) not in (1, 3):
raise AssertionError("{} has {} sequence pieces, not 1 or 3.".format(ligand, len(sequence_pieces)))
if len(sequence_pieces) != len(constraint_pieces):
raise AssertionError("{} has {} sequence pieces and {} constraint pieces.".format(ligand, len(sequence_pieces), len(constraint_pieces)))
if len(''.join(sequence_pieces)) != len(''.join(constraint_pieces)):
raise AssertionError("the {} sequence has a different length than its constraints.".format(ligand))
# Define the domains that make up the aptamer.
if len(sequence_pieces) == 1:
aptamer = Domain("aptamer", sequence_pieces[0])
aptamer.constraints = constraint_pieces[0]
aptamer.style = 'yellow', 'bold'
aptamer.kd = affinity_uM
if len(sequence_pieces) == 3:
aptamer_5 = Domain("aptamer/5'", sequence_pieces[0])
aptamer_S = Domain("aptamer/splitter", sequence_pieces[1])
aptamer_3 = Domain("aptamer/3'", sequence_pieces[2])
aptamer_5.constraints = constraint_pieces[0]
aptamer_S.constraints = constraint_pieces[1]
aptamer_3.constraints = constraint_pieces[2]
aptamer_5.style = 'yellow', 'bold'
aptamer_S.style = 'yellow', 'bold'
aptamer_3.style = 'yellow', 'bold'
aptamer_S.mutable = True
# Assemble the aptamer domains into a single construct and return it.
construct = Construct('aptamer')
if len(sequence_pieces) == 1:
construct += aptamer
if len(sequence_pieces) == 3:
if piece == 'whole':
construct += aptamer_5
construct += aptamer_S
construct += aptamer_3
elif str(piece) == '5':
construct += aptamer_5
elif piece == 'splitter':
construct += aptamer_S
elif str(piece) == '3':
construct += aptamer_3
else:
raise ValueError("must request 'whole', '5', '3', or 'splitter' piece of aptamer, not {}.".format(piece))
return construct | 36,394 |
def bootstrap_ci(dataframe, kind='basic'):
"""Generate confidence intervals on the 1-sigma level for bootstrapped data
given in a DataFrame.
Parameters
----------
dataframe: DataFrame
DataFrame with the results of each bootstrap fit on a row. If the
t-method is to be used, a Panel is required, with the data in
the panel labeled 'data' and the uncertainties labeled 'stderr'
kind: str, optional
Selects which method to use: percentile, basic, or t-method (student).
Returns
-------
DataFrame
Dataframe containing the left and right limits for each column as rows.
"""
if isinstance(dataframe, pd.Panel):
data = dataframe['data']
stderrs = dataframe['stderr']
args = (data, stderrs)
else:
data = dataframe
args = (data)
def percentile(data, stderrs=None):
CI = pd.DataFrame(index=['left', 'right'], columns=data.columns)
left = data.apply(lambda col: np.percentile(col, 15.865), axis=0)
right = data.apply(lambda col: np.percentile(col, 84.135), axis=0)
CI.loc['left'] = left
CI.loc['right'] = right
return CI
def basic(data, stderrs=None):
CI = pd.DataFrame(index=['left', 'right'], columns=data.columns)
left = data.apply(lambda col: 2 * col[0] - np.percentile(col[1:],
84.135),
axis=0)
right = data.apply(lambda col: 2 * col[0] - np.percentile(col[1:],
15.865),
axis=0)
CI.loc['left'] = left
CI.loc['right'] = right
return CI
def student(data, stderrs=None):
CI = pd.DataFrame(index=['left', 'right'], columns=data.columns)
R = (data - data.loc[0]) / stderrs
left = R.apply(lambda col: np.percentile(col[1:], 84.135), axis=0)
right = R.apply(lambda col: np.percentile(col[1:], 15.865), axis=0)
left = data.loc[0] - stderrs.loc[0] * left
right = data.loc[0] - stderrs.loc[0] * right
CI.loc['left'] = left
CI.loc['right'] = right
return CI
method = {'basic': basic, 'percentile': percentile, 't': student}
method = method.pop(kind.lower(), basic)
return method(*args) | 36,395 |
def get_Trinity_gene_name(transcript_name):
"""
extracts the gene name from the Trinity identifier as the prefix
"""
(gene_name, count) = re.subn("_i\d+$", "", transcript_name)
if count != 1:
errmsg = "Error, couldn't extract gene_id from transcript_id: {}".format(transcript_name)
logger.critical(errmsg)
raise RuntimeError(errmsg)
return gene_name | 36,396 |
def k_means(k):
"""
K-Means clustering algorithm
"""
global dataset
print("Running k-Means for {} clusters..".format(k))
X = np.array(dataset)
init_centroids = random.sample(range(0, len(dataset)), k)
centroids, cluster_labels = [], []
for i in init_centroids:
centroids.append(dataset[i])
# converting to 2D - array
# centroids = np.array(centroids)
# get_centroids = assign_clusters(centroids, X)
prev_centroids = centroids.copy()
for i in range(ITERATIONS):
print("For iteration {}: ".format(i))
prev_centroids = np.array(prev_centroids)
cluster_labels = assign_clusters(prev_centroids, X)
centroids = calc_centroids(cluster_labels, k)
# print(prev_centroids)
print(centroids)
if match_centroids(centroids,prev_centroids):
print("Converged ...")
break
else:
prev_centroids = centroids.copy()
plotClusters(dataset, cluster_labels, k, INPUT_FILE+'_k_means.png') | 36,397 |
def exclude_block_notation(pattern: str, text: str) -> str:
"""
行を表現するテキストから、ブロック要素の記法を除外\n
除外した結果をInline Parserへ渡すことで、Block/Inlineの処理を分離することができる
:param pattern: 記法パターン
:param text: 行テキスト
:return: 行テキストからブロック要素の記法を除外した文字列
"""
return regex.extract_from_group(pattern, text, [INDEX_TEXT]) | 36,398 |
def envfile_to_params(data):
"""
Converts environment file content into a dictionary with all the parameters.
If your input looks like:
# comment
NUMBER=123
KEY="value"
Then the generated dictionary will be the following:
{
"NUMBER": "123",
"KEY": "value"
}
"""
params = filter(lambda x: len(x) == 2, map(lambda x: x.strip().split("="), data.splitlines()))
return { k: v[1:-1] if v.startswith('"') and v.endswith('"') else v for (k, v) in params } | 36,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.