content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def validate_besseli(nu, z, n):
"""
Compares the results of besseli function with scipy.special. If the return
is zero, the result matches with scipy.special.
.. note::
Scipy cannot compute this special case: ``scipy.special.iv(nu, 0)``,
where nu is negative and non-integer. The correct answer is -inf, but
scipy's result is +inf. This issue also affects derivatives of the
iv function at ``z = 0``. For example, ``scipy.special.ivp(nu, 0, n)``.
However, the results for *complex* argument ``z = 0j`` is correctly
returned by scipy (which is ``nan``).
"""
# Compute using special_functions package
i_specf = besseli(nu, z, n)
# Compute using scipy.special package
if n == 0:
if not isinstance(z, complex) and nu == 0:
i_scipy = i0(z)
elif not isinstance(z, complex) and nu == 1:
i_scipy = i1(z)
else:
i_scipy = iv(nu, z)
else:
i_scipy = ivp(nu, z, n)
# Whitelist false scipy results. See note in docstring above.
ignore_scipy = False
if (nu < 0) and (round(nu) != nu) and (z.real == 0) and (z.imag == 0):
ignore_scipy = True
if (round(nu) != nu) and (z.real == 0) and (z.imag == 0) and (n > 0):
ignore_scipy = True
# Compare
error = i_specf - i_scipy
tolerance = 1e-14
if ignore_scipy:
error_detected = False
elif isinstance(error, float) and isinf(i_specf) and isinf(i_scipy) \
and (copysign(1, i_specf) == copysign(1, i_scipy)):
error_detected = False
elif isinstance(error, complex) and isinf(i_specf.real) and \
isinf(i_scipy.real) and \
(copysign(1, i_specf.real) == copysign(1, i_scipy.real)):
error_detected = False
elif isinstance(error, float) and isnan(i_specf) and isnan(i_scipy):
error_detected = False
elif isinstance(error, complex) and isnan(i_specf.real) and \
isnan(i_scipy.real):
error_detected = False
elif error.real < tolerance and error.real > -tolerance and \
error.imag < tolerance and error.imag > -tolerance:
error_detected = False
else:
error_detected = True
if isinstance(z, complex):
print('ERROR: nu: %+0.2f, z: (%+0.2f,%+0.2f), n: %d, '
% (nu, z.real, z.imag, n), end=" ")
else:
print('ERROR: nu: %+0.2f, z: (%+0.2f,.....), n: %d, '
% (nu, z.real, n), end=" ")
if isinstance(i_specf, complex):
print('i_nu: (%+0.3f,%+0.3f) '
% (i_specf.real, i_specf.imag), end=" ")
else:
print('i_nu: (%+0.3f,......) ' % (i_specf), end=" ")
if isinstance(i_scipy, complex):
print('!= (%+0.3f,%+0.3f), '
% (i_scipy.real, i_scipy.imag), end=" ")
else:
print('!= (%+0.3f,......), ' % (i_scipy), end=" ")
if isinstance(error, complex):
print('error: (%+0.3e,%+0.3e)'
% (error.real, error.imag))
else:
print('error: (%+0.3e,..........)' % (error))
return error_detected | a8102c014fdcb2d256adf94aea842d1e5733ba72 | 3,637,800 |
from typing import Any
from typing import List
def delete_by_ip(*ip_address: Any) -> List:
"""
Remove the rules connected to specific ip_address.
"""
removed_rules = []
counter = 1
for rule in rules():
if rule.src in ip_address:
removed_rules.append(rule)
execute("delete", counter, force=True)
else:
counter += 1
return removed_rules | 88b430b83a5c3c82491f210e218a10719b5b75df | 3,637,801 |
def findMaxWindow(a, w):
"""
:param a: input array of integers
:param w: window size
:return: array of max val in every window
"""
max = [0] * (len(a)-w+1)
maxPointer = 0
maxCount = 0
q = Queue()
for i in range(0, w):
if a[i] > max[maxPointer]:
max[maxPointer] = a[i]
elif a[i] == max[maxPointer]:
maxCount += 1
if w>1:
q.enqueue(a[i])
maxPointer += 1
for i in range(w, len(a)):
if w>1:
a0 = q.dequeue()
if a0 == max[maxPointer-1]:
maxCount -= 1
if a[i] > max[maxPointer-1]:
maxCount = 0
max[maxPointer] = a[i]
elif a[i] == max[maxPointer-1]:
max[maxPointer] = a[i]
maxCount += 1
else:
max[maxPointer] = max[maxPointer-1]
q.enqueue(a[i])
maxPointer += 1
return max | af3e7f010b162e8f378e541be32a2d295e31e51c | 3,637,802 |
import logging
def filtering_news(news: list, filtered_news: list):
"""
Filters news to remove unwanted removed articles
Args:
news (list): List of articles to remove from
filtered_news (list): List of titles to filter the unwanted news with
Returns:
news (list): List of articles with undesired articles removed
"""
for x in filtered_news:
for y in news: # Nested loop to loop through the titles since it is a list of dictionaries
if y["title"] == x["title"]:
news.remove(y)
logging.info("News filtered, removed {}".format(x["title"]))
break
return news | 98049b6bd826109fe7bc8e2e42de4c50970988a9 | 3,637,803 |
def extract_subsequence(sequence, start_time, end_time):
"""Extracts a subsequence from a NoteSequence.
Notes starting before `start_time` are not included. Notes ending after
`end_time` are truncated.
Args:
sequence: The NoteSequence to extract a subsequence from.
start_time: The float time in seconds to start the subsequence.
end_time: The float time in seconds to end the subsequence.
Returns:
A new NoteSequence that is a subsequence of `sequence` in the specified time
range.
"""
subsequence = music_pb2.NoteSequence()
subsequence.CopyFrom(sequence)
del subsequence.notes[:]
for note in sequence.notes:
if note.start_time < start_time or note.start_time >= end_time:
continue
new_note = subsequence.notes.add()
new_note.CopyFrom(note)
new_note.end_time = min(note.end_time, end_time)
subsequence.total_time = min(sequence.total_time, end_time)
return subsequence | cf8e1be638163a6cb7c6fd6e69121ccc7100afd6 | 3,637,804 |
import re
def read_data(filename):
"""Read the raw tweet data from a file. Replace Emails etc with special tokens """
with open(filename, 'r') as f:
all_lines=f.readlines()
padded_lines=[]
for line in all_lines:
line = emoticonsPattern.sub(lambda m: rep[re.escape(m.group(0))], line.lower().strip())
line = userMentionsRegex.sub(' USER ', line )
line = emailsRegex.sub(' EMAIL ', line )
line=urlsRegex.sub(' URL ', line)
line=numsRegex.sub(' NUM ',line)
line=punctuationNotEmoticonsRegex.sub(' PUN ',line)
line=re.sub(r'(.)\1{2,}', r'\1\1',line)
words_tokens=[token for token in TweetTokenizer().tokenize(line)]
line= ' '.join(token for token in words_tokens )
padded_lines.append(line)
padded_data=' '.join(line for line in padded_lines)
encoded_data=tf.compat.as_str(padded_data).split()
return encoded_data | 8e15d6e4bd9e4a6b3b01ea5baffad8e6bc390034 | 3,637,805 |
def client():
"""AlgodClient for testing"""
client = _algod_client()
client.flat_fee = True
client.fee = 1000
print("fee ", client.fee)
return client | ad51102a58d9ffad4a9dd43c3e2b4bd5adc0f467 | 3,637,806 |
def GRU_sent_encoder(batch_size, max_len, vocab_size, hidden_dim, wordembed_dim,
dropout=0.0, is_train=True, n_gpus=1):
"""
Implementing the GRU of skip-thought vectors.
Use masks so that sentences at different lengths can be put into the same batch.
sent_seq: sequence of tokens consisting a sentence, shape: batch_size x max_len
mask: 1 indicating valid, 0 invalid, shape: batch_size x max_len
embed_weight: word embedding, shape:
"""
sent_seq = mx.sym.Variable('sent_seq')
mask = mx.sym.Variable('mask')
embed_weight = mx.sym.Variable('embed_weight')
embeded_seq = mx.sym.Embedding(data=sent_seq, input_dim=vocab_size, weight=embed_weight,
output_dim=wordembed_dim, name='sent_embedding')
sent_vec = GRU_unroll(batch_size, embeded_seq, mask=mask,
in_dim=wordembed_dim, seq_len=max_len,
num_hidden=hidden_dim, dropout=dropout,
prefix='sent', n_gpus=n_gpus)
return sent_vec | fe7090efe78ec97ba88651ecf8f7918bb5277eec | 3,637,807 |
def process_contours(frame_resized):
"""Get contours of the object detected"""
blurred = cv2.GaussianBlur(frame_resized, (11, 9), 0)
hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, constants.blueLower, constants.blueUpper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
# find contours in the mask and initialize the current
# (x, y) center of the ball
contours = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = imutils.grab_contours(contours)
return contours | 5725b12a3e5e0447a3b587d091f4fdeae1f5bac9 | 3,637,808 |
from typing import Optional
from typing import List
import itertools
def add_ignore_file_arguments(files: Optional[List[str]] = None) -> List[str]:
"""Adds ignore file variables to the scope of the deployment"""
default_ignores = ["config.json", "Dockerfile", ".dockerignore"]
# Combine default files and files
ingore_files = default_ignores + (files or [])
return list(
itertools.chain.from_iterable(
[["--ignore-file", filename] for filename in ingore_files]
)
) | f7e7487c4a17a761f23628cbb79cbade64237ce6 | 3,637,809 |
import torch
def compute_accuracy(logits, targets):
"""Compute the accuracy"""
with torch.no_grad():
_, predictions = torch.max(logits, dim=1)
accuracy = torch.mean(predictions.eq(targets).float())
return accuracy.item() | af15e4d077209ff6e790d6fdaa7642bb65ff8dbf | 3,637,810 |
def division_by_zero(number: int):
"""Divide by zero. Should raise exception.
Try requesting http://your-app/_divide_by_zero/7
"""
result = -1
try:
result = number / 0
except ZeroDivisionError:
logger.exception("Failed to divide by zero", exc_info=True)
return f"{number} divided by zeor is {result}" | b97d7f38aea43bfb6ee4db23549e89799bd299b7 | 3,637,811 |
def is_ELF_got_pointer_to_external(ea):
"""Similar to `is_ELF_got_pointer`, but requires that the eventual target
of the pointer is an external."""
if not is_ELF_got_pointer(ea):
return False
target_ea = get_reference_target(ea)
return is_external_segment(target_ea) | cd62d43bb266d229ae31e477dc60d21f73b8850a | 3,637,812 |
from pathlib import Path
def _normalise_dataset_path(input_path: Path) -> Path:
"""
Dataset path should be either the direct imagery folder (mtl+bands) or a tar path.
Translate other inputs (example: the MTL path) to one of the two.
>>> tmppath = Path(tempfile.mkdtemp())
>>> ds_path = tmppath.joinpath('LE07_L1GT_104078_20131209_20161119_01_T1')
>>> ds_path.mkdir()
>>> mtl_path = ds_path / 'LC08_L1TP_090084_20160121_20170405_01_T1_MTL.txt'
>>> mtl_path.write_text('<mtl content>')
13
>>> _normalise_dataset_path(ds_path).relative_to(tmppath).as_posix()
'LE07_L1GT_104078_20131209_20161119_01_T1'
>>> _normalise_dataset_path(mtl_path).relative_to(tmppath).as_posix()
'LE07_L1GT_104078_20131209_20161119_01_T1'
>>> tar_path = tmppath / 'LS_L1GT.tar.gz'
>>> tar_path.write_text('fake tar')
8
>>> _normalise_dataset_path(tar_path).relative_to(tmppath).as_posix()
'LS_L1GT.tar.gz'
>>> _normalise_dataset_path(Path(tempfile.mkdtemp()))
Traceback (most recent call last):
...
ValueError: No MTL files within input path .... Not a dataset?
"""
input_path = normalise_nci_symlinks(input_path)
if input_path.is_file():
if ".tar" in input_path.suffixes:
return input_path
input_path = input_path.parent
mtl_files = list(input_path.rglob("*_MTL.txt"))
if not mtl_files:
raise ValueError(
"No MTL files within input path '{}'. Not a dataset?".format(input_path)
)
if len(mtl_files) > 1:
raise ValueError(
"Multiple MTL files in a single dataset (got path: {})".format(input_path)
)
return input_path | cf61da9a043db9c67714d7437c7ef18ee6235acb | 3,637,813 |
def get_customers():
"""returns an array of dicts with the customers
Returns:
Array[Dict]: returns an array of dicts of the customers
"""
try:
openConnection
with conn.cursor() as cur:
result = cur.run_query('SELECT * FROM customer')
cur.close()
conn.close()
except:
return Exception
customers = []
for row in result:
if row[0] == 1:
continue
customer = {'id': row[0], 'name':row[1], 'credit': 0, 'rfid': row[2]}
customers.append(customer)
return customers | 4440fb5d226070facb4e5c1b854535e40f42d607 | 3,637,814 |
def fixtureid_es_server(fixture_value):
"""
Return a fixture ID to be used by pytest for fixture `es_server()`.
Parameters:
fixture_value (:class:`~easy_server.Server`):
The server the test runs against.
"""
es_obj = fixture_value
assert isinstance(es_obj, easy_server.Server)
return "es_server={0}".format(es_obj.nickname) | f795a8e909354e0004ea81ebdf71f7da81153a64 | 3,637,815 |
def topn_vocabulary(document, TFIDF_model, topn=100):
"""
Find the top n most important words in a document.
Parameters
----------
`document` : The document to find important words in.
`TFIDF_model` : The TF-IDF model that will be used.
`topn`: Default = 100. Amount of top words.
Returns
-------
`dictionary` : A dictionary containing words and their importance as a `float`.
"""
if type(document) == list:
document = " ".join(document)
weight_list = TFIDF_list_of_weigths(TFIDF_model=TFIDF_model, abstract=document)
temp_dict = utils.tuples_to_dict(weight_list[:topn])
return temp_dict | 4c58e2f041c76407bb2e7c686713b12e2c1e8256 | 3,637,816 |
def embedding_table(inputs, vocab_size, embed_size, zero_pad=False,
trainable=True, scope="embedding", reuse=None):
""" Generating Embedding Table with given parameters
:param inputs: A 'Tensor' with type 'int8' or 'int16' or 'int32' or 'int64'
containing the ids to be looked up in 'lookup table'.
:param vocab_size: An int. Vocabulary size.
:param embed_size: An int. Number of size of embedding vector.
:param zero_pad: A boolean. If True, all the values of the first low (id 0)
should be constant zeros.
:param trainable: A boolean. Whether freeze the embedding matrix or not.
:param scope: A str, Optional scope for 'variable_scope'.
:param reuse: A boolean. Whether to reuse the weights of a previous layer
by the same name.
:return: A 'Tensor' with ...
"""
with tf.variable_scope(scope, reuse=reuse):
embed_table = tf.get_variable('embedding_table',
shape=[vocab_size, embed_size],
initializer=_init,
trainable=trainable,
dtype=tf.float32)
if zero_pad:
embed_table = tf.concat((tf.zeros(shape=[1, embed_size]), embed_table[1:, :]),
axis=0)
return tf.nn.embedding_lookup(embed_table, inputs) | bc509e18048230372b8f52dc5bbb77295014aec8 | 3,637,817 |
def get_trading_dates(start_date, end_date):
"""
获取某个国家市场的交易日列表(起止日期加入判断)。目前仅支持中国市场。
:param start_date: 开始日期
:type start_date: `str` | `date` | `datetime` | `pandas.Timestamp`
:param end_date: 结束如期
:type end_date: `str` | `date` | `datetime` | `pandas.Timestamp`
:return: list[`datetime.date`]
:example:
.. code-block:: python3
:linenos:
[In]get_trading_dates(start_date='2016-05-05', end_date='20160505')
[Out]
[datetime.date(2016, 5, 5)]
"""
return DataProxy.get_instance().get_trading_dates(start_date, end_date) | 5b0bf331376c5b2f9d1c8308be285b54fa053e5f | 3,637,818 |
def gm_put(state, b1, b2):
"""
If goal is ('pos',b1,b2) and we're holding b1,
Generate either a putdown or a stack subtask for b1.
b2 is b1's destination: either the table or another block.
"""
if b2 != 'hand' and state.pos[b1] == 'hand':
if b2 == 'table':
return [('a_putdown', b1)]
elif state.clear[b2]:
return [('a_stack', b1, b2)] | c9076ac552529c60b5460740c74b1602c42414f2 | 3,637,819 |
import os
def cs_management_client(context):
"""Return Cloud Services mgmt client"""
context.cs_mgmt_client = CSManagementClient(user=os.environ['F5_CS_USER'],
password=os.environ['F5_CS_PWD'])
return context.cs_mgmt_client | b90a435058625557ad4fff82925905bd9cf6c62e | 3,637,820 |
def pad_to_shape_label(label, shape):
"""
Pad the label array to the given shape by 0 and 1.
:param label: The label for padding, of shape [n_batch, *vol_shape, n_class].
:param shape: The shape of the padded array, of value [n_batch, *vol_shape, n_class].
:return: The padded label array.
"""
assert np.all(label.shape <= shape), "The shape of array to be padded is larger than the target shape."
offset1 = (shape[1] - label.shape[1]) // 2
offset2 = (shape[2] - label.shape[2]) // 2
remainder1 = (shape[1] - label.shape[1]) % 2
remainder2 = (shape[2] - label.shape[2]) % 2
class_pred = []
for k in range(label.shape[-1]):
if k == 0:
class_pred.append(np.pad(label[..., k],
((0, 0),
(offset1, offset1 + remainder1),
(offset2, offset2 + remainder2)),
'constant', constant_values=1))
else:
class_pred.append(np.pad(label[..., k],
((0, 0),
(offset1, offset1 + remainder1),
(offset2, offset2 + remainder2)),
'constant'))
return np.stack(class_pred, axis=-1) | e40d7c1949cc891353c9899767c92419202c325d | 3,637,821 |
def download_report(
bucket_name: str, client: BaseClient, report: str, location: str
) -> bool:
"""
Downloads the original report
to the temporary work area
"""
response = client.download_file(
Bucket=bucket_name, FileName=report, Location=location
)
return response | d46fb279d5a315c60f1908664951436edc997ab8 | 3,637,822 |
import os
def _collect_exit_info(container_dir):
"""Read exitinfo, check if app was aborted and why."""
exitinfo_file = os.path.join(container_dir, 'exitinfo')
exitinfo = _read_exitinfo(exitinfo_file)
_LOGGER.info('check for exitinfo file %r: %r', exitinfo_file, exitinfo)
aborted_file = os.path.join(container_dir, 'aborted')
aborted = os.path.exists(aborted_file)
_LOGGER.info('check for aborted file: %s, %s', aborted_file, aborted)
aborted_reason = None
if aborted:
with open(aborted_file) as f:
aborted_reason = f.read()
return exitinfo, aborted, aborted_reason | c8e21d87dd1826591e8775b9101dd6adbc3795d1 | 3,637,823 |
from typing import Dict
from typing import List
import click
def main( # pylint: disable=too-many-arguments,too-many-locals
private_key: PrivateKey,
state_db: str,
web3: Web3,
contracts: Dict[str, Contract],
start_block: BlockNumber,
confirmations: BlockTimeout,
host: str,
port: int,
service_fee: TokenAmount,
operator: str,
info_message: str,
enable_debug: bool,
matrix_server: List[str],
accept_disclaimer: bool,
enable_tracing: bool,
tracing_sampler: str,
tracing_param: str,
) -> int:
"""The Pathfinding service for the Raiden Network."""
log.info("Starting Raiden Pathfinding Service")
click.secho(PFS_DISCLAIMER, fg="yellow")
if not accept_disclaimer:
click.confirm(CONFIRMATION_OF_UNDERSTANDING, abort=True)
log.info("Using RPC endpoint", rpc_url=get_web3_provider_info(web3))
hex_addresses = {
name: to_checksum_address(contract.address) for name, contract in contracts.items()
}
log.info("Contract information", addresses=hex_addresses, start_block=start_block)
if enable_tracing:
tracing_config = Config(
config={"sampler": {"type": tracing_sampler, "param": tracing_param}, "logging": True},
service_name="pfs",
scope_manager=GeventScopeManager(),
validate=True,
)
# Tracer is stored in `opentracing.tracer`
tracing_config.initialize_tracer()
assert isinstance(web3.provider, HTTPProvider), MYPY_ANNOTATION
assert web3.provider.endpoint_uri is not None, MYPY_ANNOTATION
# Set `Web3` requests Session to use `SessionTracing`
cache_session(
web3.provider.endpoint_uri,
SessionTracing(propagate=False, span_tags={"target": "ethnode"}),
)
service = None
api = None
try:
service = PathfindingService(
web3=web3,
contracts=contracts,
sync_start_block=start_block,
required_confirmations=confirmations,
private_key=private_key,
poll_interval=DEFAULT_POLL_INTERVALL,
db_filename=state_db,
matrix_servers=matrix_server,
enable_tracing=enable_tracing,
)
service.start()
log.debug("Waiting for service to start before accepting API requests")
try:
service.startup_finished.get(timeout=PFS_START_TIMEOUT)
except gevent.Timeout:
raise Exception("PFS did not start within time.")
log.debug("Starting API")
api = PFSApi(
pathfinding_service=service,
service_fee=service_fee,
debug_mode=enable_debug,
one_to_n_address=to_canonical_address(contracts[CONTRACT_ONE_TO_N].address),
operator=operator,
info_message=info_message,
enable_tracing=enable_tracing,
)
api.run(host=host, port=port)
service.get()
except (KeyboardInterrupt, SystemExit):
print("Exiting...")
finally:
log.info("Stopping Pathfinding Service...")
if api:
api.stop()
if service:
service.stop()
return 0 | f1615a9ca1b9648fa3689d80258e8ac793653d39 | 3,637,824 |
def get_service(hass, config):
"""Get the Google Voice SMS notification service."""
if not validate_config({DOMAIN: config},
{DOMAIN: [CONF_USERNAME,
CONF_PASSWORD]},
_LOGGER):
return None
return GoogleVoiceSMSNotificationService(config[CONF_USERNAME],
config[CONF_PASSWORD]) | c7fda936ca9448587e2c4167d9c765186344fb43 | 3,637,825 |
import random
import time
def hammer_op(context, chase_duration):
"""what better way to do a lot of gnarly work than to pointer chase?"""
ptr_length = context.op_config["chase_size"]
data = list(range(0, ptr_length))
random.shuffle(data)
curr = random.randint(0, ptr_length - 1)
# and away we go
start_time = time.time()
while (time.time() - start_time) < chase_duration:
curr = data[curr]
context.log.info("Hammered - start %d end %d" % (start_time, time.time()))
return chase_duration | f4a51fe1e2f89443b79fd4c9a5b3f5ee459e79ca | 3,637,826 |
import os
import re
import warnings
def validate_sourcedata(path, source_type, pattern='sub-\\d+'):
"""
This function validates the "sourcedata/" directory provided by user to
see if it's contents are consistent with the pipeline's requirements.
"""
if not path:
path = './'
if not source_type:
source_type = ['eeg']
# construct relative sourcedata path
sourcedata_dir = os.path.join(path, 'sourcedata')
source_name = None
sub_dirs = []
n_subs = 0
subject_names = None
data_dirs = []
data_files = []
# browse through directories in parent_dir
for root, directories, files in os.walk(path):
if root == path:
if 'sourcedata' not in directories:
raise ValueError('The provided directory does not contain "sourcedata/".') # noqa: E501
else:
source_name = 'OK'
if root == sourcedata_dir:
sub_dirs.extend([os.path.join(root, sub) for sub in directories])
n_subs = len(sub_dirs)
valid_names = []
for sub in sub_dirs:
if re.findall(pattern=pattern,
string=os.path.basename(sub)):
valid_names.append(True)
else:
valid_names.append(False)
if len(set(valid_names)) > 1:
bad_names = np.where(np.logical_not(valid_names))[0]
bad_names = [sub_dirs[bn] for bn in bad_names]
warnings.warn('The subject directory names in "sourcedata/" are inconsistent.') # noqa: E501
subject_names = 'error in %s' \
% (', '.join([str(bn) for bn in bad_names]))
else:
subject_names = 'OK'
elif root in sub_dirs:
data_dirs.append([data_dir for data_dir in directories
if data_dir in source_type])
n_dirs = [len(d) for d in data_dirs]
if len(set(n_dirs)) > 1:
warnings.warn("Subject directories contain different number of data directories.") # noqa: E501
modal = max(set(n_dirs), key=n_dirs.count)
inconscistent = [i for i, n in enumerate(n_dirs)
if not n == modal]
bads = [sub_dirs[i] for i in inconscistent]
elif root in [os.path.join(s_dir, source)
for s_dir in sub_dirs for source in source_type]:
data_files.extend([os.path.join(root, file) for file in files])
valid_file_names = []
file_patterns = [pattern + '_\\w+_' + source + '.*'
for source in source_type]
for file in data_files:
for file_pattern in file_patterns:
if re.findall(pattern=file_pattern,
string=os.path.basename(file)):
valid_file_names.append(True)
else:
valid_file_names.append(False)
if len(set(valid_file_names)) > 1:
bad_files = np.where(np.logical_not(valid_file_names))[0]
bad_files = [data_files[bf] for bf in bad_files]
warnings.warn("Some file names are not named accordance to the pipeline's requirements.")
file_names = 'error in %s' % (', '.join([str(bn)
for bn in bad_files]))
else:
file_names = 'OK'
data_val = {
'source_data_path':
{
'path': sourcedata_dir,
'naming': source_name,
'dirs_in_sourcedata': n_subs
},
'subject_directories':
{
'name_pattern': pattern,
'naming': subject_names
},
'data_files':
{
'file_pattern': (', '.join([f_pat for f_pat in file_patterns])), # noqa
'naming': file_names # noqa
}
}
return data_val | 391c1cb9e5d7c372bf7cac0e3ba584fc8705d7c9 | 3,637,827 |
from typing import Callable
from typing import Mapping
import copy
import torch
def generate_optimization_fns(
loss_fn: Callable,
opt_fn: Callable,
k_fn: Callable,
normalize_grad: bool = False,
optimizations: Mapping = None,
):
"""Directly generates upper/outer bilevel program derivative functions.
Args:
loss_fn: loss_fn(z, *params), upper/outer level loss
opt_fn: opt_fn(*params) = z, lower/inner argmin function
k_fn: k_fn(z, *params) = 0, lower/inner implicit function
normalize_grad: whether to normalize the gradient by its norm
jit: whether to apply just-in-time (jit) compilation to the functions
Returns:
``f_fn(*params), g_fn(*params), h_fn(*params)``
parameters-only upper/outer level loss, gradient and Hessian.
"""
sol_cache = dict()
opt_fn_ = lambda *args, **kwargs: opt_fn(*args, **kwargs).detach()
optimizations = {} if optimizations is None else copy(optimizations)
@fn_with_sol_cache(opt_fn_, sol_cache)
def f_fn(z, *params):
z = z.detach() if isinstance(z, torch.Tensor) else z
params = _detach_args(*params)
return loss_fn(z, *params)
@fn_with_sol_cache(opt_fn_, sol_cache)
def g_fn(z, *params):
z = z.detach() if isinstance(z, torch.Tensor) else z
params = _detach_args(*params)
g = JACOBIAN(loss_fn, (z, *params))
Dp = implicit_jacobian(
k_fn, z.detach(), *params, Dg=g[0], optimizations=optimizations
)
Dp = Dp if len(params) != 1 else [Dp]
# opts = dict(device=z.device, dtype=z.dtype)
# Dp = [
# torch.zeros(param.shape, **opts) for param in params
# ]
ret = [Dp + g for (Dp, g) in zip(Dp, g[1:])]
if normalize_grad:
ret = [(z / (torch.norm(z) + 1e-7)).detach() for z in ret]
ret = [ret.detach() for ret in ret]
return ret[0] if len(ret) == 1 else ret
@fn_with_sol_cache(opt_fn_, sol_cache)
def h_fn(z, *params):
z = z.detach() if isinstance(z, torch.Tensor) else z
params = _detach_args(*params)
g = JACOBIAN(loss_fn, (z, *params))
if optimizations.get("Hz_fn", None) is None:
optimizations["Hz_fn"] = lambda z, *params: HESSIAN_DIAG(
lambda z: loss_fn(z, *params), (z,)
)[0]
Hz_fn = optimizations["Hz_fn"]
Hz = Hz_fn(z, *params)
H = [Hz] + HESSIAN_DIAG(lambda *params: loss_fn(z, *params), params)
Dp, Dpp = implicit_hessian(
k_fn,
z,
*params,
Dg=g[0],
Hg=H[0],
optimizations=optimizations,
)
Dpp = Dpp if len(params) != 1 else [Dpp]
ret = [Dpp + H for (Dpp, H) in zip(Dpp, H[1:])]
ret = [ret.detach() for ret in ret]
return ret[0] if len(ret) == 1 else ret
return f_fn, g_fn, h_fn | 5e70f05c5aa0e754e5c1fbe585e4a0856a732006 | 3,637,828 |
def get_weighted_spans(doc, vec, feature_weights):
# type: (Any, Any, FeatureWeights) -> Optional[WeightedSpans]
""" If possible, return a dict with preprocessed document and a list
of spans with weights, corresponding to features in the document.
"""
if isinstance(vec, FeatureUnion):
return _get_weighted_spans_from_union(doc, vec, feature_weights)
else:
result = _get_doc_weighted_spans(doc, vec, feature_weights)
if result is not None:
found_features, doc_weighted_spans = result
return WeightedSpans(
[doc_weighted_spans],
other=_get_other(feature_weights, [('', found_features)]),
) | 0896a8449690895d922ae409c7e278f38002f111 | 3,637,829 |
def get_child(parent, child_index):
"""
Get the child at the given index, or return None if it doesn't exist.
"""
if child_index < 0 or child_index >= len(parent.childNodes):
return None
return parent.childNodes[child_index] | 37f7752a4a77f3d750413e54659f907b5531848c | 3,637,830 |
def testAtomicSubatomic():
"""
Test atomic/subatomic links defined in memes.
"""
method = moduleName + '.' + 'testAtomicSubatomic'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
resultSet = []
errata = []
testResult = "True"
expectedResult = "True"
errorMsg = ""
#The testcase entities
try:
parentMeme1 = Graph.api.createEntityFromMeme("AtomicSubatomic.ParentMeme1") #Both shild entites have subatomic links
parentMeme2 = Graph.api.createEntityFromMeme("AtomicSubatomic.ParentMeme2") #One child has an atomic link, the other subatomic
parentMeme3 = Graph.api.createEntityFromMeme("AtomicSubatomic.ParentMeme3") #Both shild entites have atomic links
except Exception as e:
testResult = "False"
errorMsg = ('Error creating test entities! Traceback = %s' % (e) )
errata.append(errorMsg)
try:
pm1aChildren = api.getLinkCounterpartsByMetaMemeType(parentMeme1, "AtomicSubatomic.ChildMM", linkTypes.ATOMIC)
pm1sChildren = api.getLinkCounterpartsByMetaMemeType(parentMeme1, "AtomicSubatomic.ChildMM", linkTypes.SUBATOMIC)
if len(pm1sChildren) < 2:
testResult = "False"
errorMsg = "Meme AtomicSubatomic.ParentMeme1 should have two subatomic children. It actually has %s\n" %(len(pm1sChildren))
if len(pm1aChildren) > 0:
testResult = "False"
errorMsg = "Meme AtomicSubatomic.ParentMeme1 should have no atomic children. It actually has %s\n" %(len(pm1aChildren))
except Exception as e:
testResult = "False"
errorMsg = ('Error when searching for children of AtomicSubatomic.ParentMeme1! Traceback = %s' % (e) )
errata.append(errorMsg)
try:
pm2aChildren = api.getLinkCounterpartsByMetaMemeType(parentMeme2, "AtomicSubatomic.ChildMM", linkTypes.ATOMIC)
pm2sChildren = api.getLinkCounterpartsByMetaMemeType(parentMeme2, "AtomicSubatomic.ChildMM", linkTypes.SUBATOMIC)
if len(pm2sChildren) != 1:
testResult = "False"
errorMsg = "Meme AtomicSubatomic.ParentMeme2 should have one subatomic child. It actually has %s\n" %(len(pm2sChildren))
if len(pm2aChildren) != 1:
testResult = "False"
errorMsg = "Meme AtomicSubatomic.ParentMeme2 should have one atomic child. It actually has %s\n" %(len(pm2aChildren))
except Exception as e:
testResult = "False"
errorMsg = ('Error when searching for children of AtomicSubatomic.ParentMeme2! Traceback = %s' % (e) )
errata.append(errorMsg)
try:
pm3aChildren = api.getLinkCounterpartsByMetaMemeType(parentMeme3, "AtomicSubatomic.ChildMM", linkTypes.ATOMIC)
pm3sChildren = api.getLinkCounterpartsByMetaMemeType(parentMeme3, "AtomicSubatomic.ChildMM", linkTypes.SUBATOMIC)
if len(pm3sChildren) > 0:
testResult = "False"
errorMsg = "Meme AtomicSubatomic.ParentMeme1 should have no subatomic children. It actually has %s\n" %(len(pm3sChildren))
if len(pm3aChildren) < 2:
testResult = "False"
errorMsg = "Meme AtomicSubatomic.ParentMeme1 should have two atomic children. It actually has %s\n" %(len(pm3aChildren))
except Exception as e:
testResult = "False"
errorMsg = ('Error when searching for children of AtomicSubatomic.ParentMeme3! Traceback = %s' % (e) )
errata.append(errorMsg)
testcase = "testAtomicSubatomic()"
results = [1, testcase, testResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(1)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet | 5ba9acee6b889c705d040a6e3607595659e19754 | 3,637,831 |
def extinction(species, adj, z, independent):
"""
Returns the presence/absence of each species after taking into account
the secondary extinctions.
Parameters
----------
species : numpy array of shape (nbsimu, S) with nbsimu being the number
of simulations (decompositions). This array contains the information
about the presence (1) or absence (0) of each species (columns) in
each simulation (rows).
adj : numpy array of size (S,S) with S being the species richness
Adjacency matrix.
z : float
Number of species which might not undergo secondary extinction.
independent : bool
Should the species having no incoming links be considered as
independent (i.e. not undergo secondary extinction)?
Returns
-------
Numpy array of shape (nbsimu, S) containing, for each decomposition (row),
the presence (1) or absence (0) of each species (columns).
"""
#-------- Extinction of dependent species --------
# Basic rule for dependent species :
# they need to be linked to another species to be part of the network
left = np.sum(adj, axis = 2)[:,z:] # Number of neighbours left
Psurvival = (left > 0).astype(int) # Survival if at least 1 neighbour
# Extinction cascade through trophic levels
while np.sum(species[:,z:] != Psurvival) != 0 :
### Extinction(s) ###
# Removal of non surviving species
species[:,z:] = (species[:,z:])*Psurvival
# Removal of non surviving links (i.e. links of the extinct species)
adj = cancel(adj, species)
### Check for higher order extinctions ###
left = np.sum(adj, axis=2)[:,z:] # Number of neighbours left
Psurvival = (left > 0).astype(int) # Survival if at least 1 neighbour
#-------- Extinction of independent species --------
if independent==False: # If there is no independent species
# Species having no incoming link undergo secondary extinction
interact = np.sum(cancel(adj, species),axis=1)[:, :z] # Outgoing links
(species[:,:z])[interact == 0] = 0 # Removed if no outgoing links left
return(species) | 2a9cb1884cfceb3a7c06aede60191d8a86f4741b | 3,637,832 |
def fix_variable_mana(card):
"""
This function was created to fix a problem in the dataset.
We're currently pretty up against the wall and I realized
that 'Variable' mana texts were not correctly converted to {X}
so this function is fed cards and corrects their mana values
if it detects this problem.
"""
def correct_field(symbol):
if 'Variable' in symbol:
# strip out brackets:
symbol = symbol[1:-1]
# remove 'Variable'
symbol = symbol.replace('Variable', '').strip()
# get the correct color-letter
symbol = alt_text_to_curly_bracket(symbol)
# 'insert' X and return the corrected symbol.
return f'{{X{symbol[1:-1]}}}'
else:
return symbol
corrected = [x for x in card.mana_cost]
card.mana_cost = corrected | de0a0fe10d7ebbe02cd36088765be373c7dd9789 | 3,637,833 |
def cli_arg(
runner: CliRunner,
notebook_path: Path,
mock_terminal: Mock,
remove_link_ids: Callable[[str], str],
mock_tempfile_file: Mock,
mock_stdin_tty: Mock,
mock_stdout_tty: Mock,
) -> Callable[..., str]:
"""Return function that applies arguments to cli."""
def _cli_arg(
*args: Union[str, None],
truecolor: bool = True,
paging: Union[bool, None] = False,
material_theme: bool = True,
images: bool = True,
**kwargs: Union[str, None],
) -> str:
"""Apply given arguments to cli.
Args:
*args (Union[str, None]): The extra arguments to pass to the
command.
truecolor (bool): Whether to pass
'--color-system=truecolor' option. By default True.
paging (Union[bool, None]): Whether to pass '--paging' or
'--no-paging' option. By default False, which
corresponds to '--no-paging'.
material_theme (bool): Whether to set the theme to
'material'. By default True.
images (bool): Whether to pass '--images'. By default True.
**kwargs (Union[str, None]): Environmental variables to set.
Will be uppercased.
Returns:
str: The output of the invoked command.
"""
cleaned_args = [arg for arg in args if arg is not None]
upper_kwargs = {
name.upper(): value for name, value in kwargs.items() if value is not None
}
cli_args = [os.fsdecode(notebook_path), *cleaned_args]
if images:
cli_args.append("--images")
if material_theme:
cli_args.append("--theme=material")
if truecolor:
cli_args.append("--color-system=truecolor")
if paging is True:
cli_args.append("--paging")
elif paging is False:
cli_args.append("--no-paging")
result = runner.invoke(
__main__.typer_click_object,
args=cli_args,
color=True,
env=upper_kwargs,
)
output = remove_link_ids(result.output)
return output
return _cli_arg | 5d7e02b11ace8ee44fa85ce7d2dc4c5a24fb72cf | 3,637,834 |
from sklearn.cluster import KMeans
from sklearn.model_selection import StratifiedKFold
import os
def grid_search(x_train, y_train, x_val=None, y_val=None, args=None, config_filename: str = None, folds: int = 5,
verbose: int = 0, default_config: str = CONFIG_PATH_MLP, working_dir: str = WORKING_DIR,
save_path: str = os.path.join(WORKING_DIR, 'logs')):
""" Optimize MLP through grid searching
Args:
working_dir: (str) path to working directory
save_path: (str) where to store intermediate stuff
config_filename: (str) path to config file. Best hyperparameters will be saved under this name
x_train: array of train data
y_train: array of train data labels, default = None
x_val: array of validation data
y_val: array of validation labels, default = None
args: (dict) hyperparameters of the MLP (see config file)
default_config: (str) path to default config file
verbose: (bool) printout all training steps
Returns: trained model with best hyperparameters
"""
# If no args are given, get the default settings to optimize
if args is None:
args = utils.get_config(default_config)
if type(y_train) is list:
y_train = np.array(y_train)
# Create the grid-search grid
grid = get_grid(args)
print(f"--- Optimizing {5*len(grid)} models with {folds}-fold cross-validation")
# Get 5 shuffled train/test folds based on kmeans clustering of the training molecules
kmeans = KMeans(n_clusters=10, random_state=RANDOM_SEED).fit(x_train)
skf = StratifiedKFold(n_splits=folds, random_state=RANDOM_SEED, shuffle=True)
folds = [{'train': tr, 'test': tst} for tr, tst in skf.split(x_train, kmeans.labels_)]
# Train n folds for every set of hyperparameters
results = []
for hyperparameters in grid:
results_folds = []
for fold in folds:
# Get data from this split.
fold_x_train = x_train[fold['train']]
fold_y_train = y_train[fold['train']]
fold_x_test = x_train[fold['test']]
fold_y_test = y_train[fold['test']]
# Train model
model = train_mlp(fold_x_train, fold_y_train, args=hyperparameters, verbose=verbose, save_path=save_path)
# Predictions
predictions = list(model.predict(fold_x_test).flatten())
res = np.square(np.subtract(fold_y_test, predictions)).mean()
results_folds.append(res)
# Append the mean test results from the n fold cross-validation + corresponding hyperparameters to a list
results.append((np.mean(results_folds), hyperparameters))
# Get the best hyperparameters
results.sort(key=lambda x: x[0])
best_hyperparamters = results[0][1]
# Write them as a yml
if config_filename is None:
config_filename = os.path.join(working_dir, 'configures', 'MLP.yml')
utils.write_config(config_filename, best_hyperparamters)
# Train a model on all training data with the best hyperparameters
model = train_mlp(x_train, y_train, x_val, y_val, args=best_hyperparamters, verbose=verbose, save_path=save_path)
return model | 3c0daeb512789dce892e093f586a983b9e71f71b | 3,637,835 |
def distinguish_system_application(vulner_info):
"""
Test whether CVE has system CIA loss or application CIA loss.
:param vulner_info: object of class Vulnerability from cve_parser.py
:return: result impact or impacts
"""
result_impacts = []
if system_confidentiality_changed(
vulner_info.description,
vulner_info.cvssv2,
vulner_info.cpe_type):
result_impacts.append("System confidentiality loss")
if system_integrity_changed(
vulner_info.description,
vulner_info.cvssv2,
vulner_info.cpe_type):
result_impacts.append("System integrity loss")
if system_availability_changed(
vulner_info.description,
vulner_info.cvssv2,
vulner_info.cpe_type):
result_impacts.append("System availability loss")
if not result_impacts:
if vulner_info.cvssv3['i'] != "NONE":
result_impacts.append("Application integrity loss")
if vulner_info.cvssv3['a'] != "NONE":
result_impacts.append("Application availability loss")
if vulner_info.cvssv3['c'] != "NONE":
result_impacts.append("Application confidentiality loss")
return result_impacts | c10ec04a761b038fe3c0d6408a31660ccf23a205 | 3,637,836 |
import os
def split_missions_and_dates(fname):
"""
Examples
--------
>>> fname = 'nustar-nicer_gt55000_lt58000.csv'
>>> outdict = split_missions_and_dates(fname)
>>> outdict['mission1']
'nustar'
>>> outdict['mission2']
'nicer'
>>> outdict['mjdstart']
'MJD 55000'
>>> outdict['mjdstop']
'MJD 58000'
>>> fname = 'nustar-nicer.csv'
>>> outdict = split_missions_and_dates(fname)
>>> outdict['mission1']
'nustar'
>>> outdict['mission2']
'nicer'
>>> outdict['mjdstart']
'Mission start'
>>> outdict['mjdstop']
'Today'
"""
no_ext = os.path.splitext(fname)[0]
split_date = no_ext.split('_')
mjdstart = 'Mission start'
mjdstop = 'Today'
if len(split_date) > 1:
for date_str in split_date[1:]:
if 'gt' in date_str:
mjdstart = 'MJD ' + date_str.replace('gt', '')
elif 'lt' in date_str:
mjdstop = 'MJD ' + date_str.replace('lt', '')
mission1, mission2 = split_date[0].split('-')
outdict = {'mission1': mission1, 'mission2': mission2,
'mjdstart': mjdstart, 'mjdstop': mjdstop}
return outdict | 851fa5a85d0acfd9d309725284ebb1859734432e | 3,637,837 |
import platform
import os
def run_in_windows_bash(conanfile, command, cwd=None, env=None):
""" Will run a unix command inside a bash terminal It requires to have MSYS2, CYGWIN, or WSL"""
if env:
# Passing env invalidates the conanfile.environment_scripts
env_win = [env] if not isinstance(env, list) else env
env_shell = []
else:
env_shell = ["conanenv.sh"]
env_win = ["conanenv.bat"]
subsystem = conanfile.conf["tools.microsoft.bash:subsystem"]
shell_path = conanfile.conf["tools.microsoft.bash:path"]
if not platform.system() == "Windows":
raise ConanException("Command only for Windows operating system")
if not subsystem or not shell_path:
raise ConanException("The config 'tools.microsoft.bash:subsystem' and 'tools.microsoft.bash:path' are "
"needed to run commands in a Windows subsystem")
if subsystem == MSYS2:
# Configure MSYS2 to inherith the PATH
msys2_mode_env = Environment(conanfile)
_msystem = {"x86": "MINGW32"}.get(conanfile.settings.get_safe("arch"), "MINGW64")
msys2_mode_env.define("MSYSTEM", _msystem)
msys2_mode_env.define("MSYS2_PATH_TYPE", "inherit")
path = os.path.join(conanfile.generators_folder, "msys2_mode.bat")
msys2_mode_env.save_bat(path)
env_win.append(path)
# Needed to change to that dir inside the bash shell
wrapped_shell = '"%s"' % shell_path if " " in shell_path else shell_path
if env_win:
wrapped_shell = environment_wrap_command(conanfile, env_win, shell_path,
cwd=conanfile.generators_folder)
cwd = cwd or os.getcwd()
if not os.path.isabs(cwd):
cwd = os.path.join(os.getcwd(), cwd)
cwd_inside = unix_path(conanfile, cwd)
wrapped_user_cmd = command
if env_shell:
# Wrapping the inside_command enable to prioritize our environment, otherwise /usr/bin go
# first and there could be commands that we want to skip
wrapped_user_cmd = environment_wrap_command(conanfile, env_shell, command,
cwd=conanfile.generators_folder)
inside_command = 'cd "{cwd_inside}" && ' \
'{wrapped_user_cmd}'.format(cwd_inside=cwd_inside,
wrapped_user_cmd=wrapped_user_cmd)
inside_command = escape_windows_cmd(inside_command)
final_command = 'cd "{cwd}" && {wrapped_shell} --login -c {inside_command}'.format(
cwd=cwd,
wrapped_shell=wrapped_shell,
inside_command=inside_command)
conanfile.output.info('Running in windows bash: %s' % final_command)
return conanfile._conan_runner(final_command, output=conanfile.output, subprocess=True) | 074b63e8fe1b482984afccda01f86f88890fb824 | 3,637,838 |
from sys import path
def remove_uploaded_records(db):
"""
Removes all records archived and uploaded.
:param db: DB Connection to Pony
:return: List of Records removed
"""
list_of_local_records = query.get_records_uploaded(db)
if len(list_of_local_records) == 0:
return 0
removed_records_list = list()
for record in list_of_local_records:
record_path = record.path
if not record.removed and path.isfile(record_path):
remove(record_path)
record.removed = True
removed_records_list.append(record)
return removed_records_list | 29919e5d68cc6374f39c03d6f0bcb60eddd429c2 | 3,637,839 |
from typing import Tuple
def nearest_with_mask_regrid(
distances: ndarray,
indexes: ndarray,
surface_type_mask: ndarray,
in_latlons: ndarray,
out_latlons: ndarray,
in_classified: ndarray,
out_classified: ndarray,
vicinity: float,
) -> Tuple[ndarray, ndarray]:
"""
Main regridding function for the nearest distance option.
some input just for handling island-like points.
Args:
distances:
Distnace array from each target grid point to its source grid points.
indexes:
Source grid point indexes for each target grid point.
surface_type_mask:
Boolean true if source point type matches target point type.
in_latlons:
Source points's latitude-longitudes.
out_latlons:
Target points's latitude-longitudes.
in_classified:
Land/sea type for source grid points (land -> True).
out_classified:
Land/sea type for target grid points (land -> True).
vicinity:
Radius of specified searching domain, in meter.
Returns:
- Updated distances - array from each target grid point to its source grid points.
- Updated indexes - source grid point number for all target grid points.
"""
# Check if there are output points with mismatched surface types
matched_nearby_points_count = np.count_nonzero(surface_type_mask, axis=1)
points_with_mismatches = (np.where(matched_nearby_points_count < 4))[0]
# Look for nearest input points for the output points with mismatched surface
indexes, distances, surface_type_mask = update_nearest_points(
points_with_mismatches,
in_latlons,
out_latlons,
indexes,
distances,
surface_type_mask,
in_classified,
out_classified,
)
# Handle island and lake like output points - find more distant same surface type input points
# Note: surface_type_mask has been updated above
matched_nearby_points_count = np.count_nonzero(surface_type_mask, axis=1)
fully_mismatched_points = (np.where(matched_nearby_points_count == 0))[0]
if fully_mismatched_points.shape[0] > 0:
indexes, surface_type_mask = lakes_islands(
fully_mismatched_points,
indexes,
surface_type_mask,
in_latlons,
out_latlons,
in_classified,
out_classified,
vicinity,
)
# Convert mask to be true where input points should not be considered
inverse_surface_mask = np.logical_not(surface_type_mask)
# Replace distances with infinity where they should not be used
masked_distances = np.where(inverse_surface_mask, np.float64(np.inf), distances)
# Distances and indexes have been prepared to handle the mask, so can now
# call the non-masked regrid function in process
return masked_distances, indexes | 75b69ddbbdca4c316ecf2d4e3933f6e3a55ff0e1 | 3,637,840 |
from typing import List
from typing import Literal
from pathlib import Path
import logging
import shutil
import json
def _validator(
directory: str,
output_types: List[str] = OUTPUT_TYPES,
log_level: Literal["INFO", "DEBUG"] = "INFO",
coverages: dict = {},
schemas_path: Path = Path(__file__).parent.joinpath(r"./schemas"),
raise_error: bool = False,
) -> dict:
"""
Parameters
----------
directory : str
Directory with datasets sub-directory and validation_rules.json
output_types : List[str], optional
The types of output files that will be written. Options are
["geojson", "csv", "geopackage"]. By default all will be written
log_level : Literal['INFO', 'DEBUG'], optional
Level for logger. The default is "INFO".
coverages : dict, optional
Location of coverages. E.g. {"AHN: path_to_ahn_dir} The default is {}.
schemas_path : Path, optional
Path to the HyDAMO and validation_rules schemas.
The default is Path(__file__).parent.joinpath(r"./schemas").
raise_error: bool, optional
Will raise an error (or not) when Exception is raised. The default is False
Returns
-------
HyDAMO, LayersSummary, ResultSummary
Will return a tuple with a filled HyDAMO datamodel, layers_summary and result_summary
"""
timer = Timer()
try:
results_path = None
logger = logging.getLogger(__name__)
logger.setLevel(getattr(logging, log_level))
date_check = pd.Timestamp.now().isoformat()
result_summary = ResultSummary(date_check=date_check)
layers_summary = LayersSummary(date_check=date_check)
# check if all files are present
dir_path = Path(directory)
# create a results_path
if dir_path.exists():
results_path = dir_path.joinpath("results")
if results_path.exists():
try:
shutil.rmtree(results_path)
except PermissionError:
pass
results_path.mkdir(parents=True, exist_ok=True)
else:
raise FileNotFoundError(f"{dir_path.absolute().resolve()} does not exist")
dataset_path = dir_path.joinpath("datasets")
validation_rules_json = dir_path.joinpath("validationrules.json")
missing_paths = []
for path in [dataset_path, validation_rules_json]:
if not path.exists():
missing_paths += [str(path)]
if missing_paths:
result_summary.error = f'missing_paths: {",".join(missing_paths)}'
raise FileNotFoundError(f'missing_paths: {",".join(missing_paths)}')
else:
try:
validation_rules_sets = json.loads(validation_rules_json.read_text())
except Exception as e:
result_summary.error = "the file with validationrules is not a valid JSON (see exception)"
raise e
try:
rules_version = validation_rules_sets["schema"]
schema = _read_schema(rules_version, schemas_path)
except Exception as e:
result_summary.error = "schema version cannot be read from validation rules (see exception)"
raise e
try:
validate(validation_rules_sets, schema)
except ValidationError as e:
result_summary.error = (
f"validation rules invalid according to json-schema (see exception)"
)
raise e
# check if output-files are supported
unsupported_output_types = [
item for item in output_types if item not in OUTPUT_TYPES
]
if unsupported_output_types:
error_message = (
r"unsupported output types: " f'{",".join(unsupported_output_types)}'
)
result_summary.error = error_message
raise TypeError(error_message)
# set coverages
if coverages:
for key, item in coverages.items():
logical_validation.general_functions._set_coverage(key, item)
# start validation
# read data-model
result_summary.status = "define data-model"
try:
hydamo_version = validation_rules_sets["hydamo_version"]
datamodel = HyDAMO(version=hydamo_version)
except Exception as e:
result_summary.error = "datamodel cannot be defined (see exception)"
raise e
# validate dataset syntax
result_summary.status = "syntax-validation (layers)"
datasets = DataSets(dataset_path)
result_summary.dataset_layers = datasets.layers
## validate syntax of datasets on layers-level and append to result
logger.info("syntax-validation of object-layers")
valid_layers = datamodel_layers(datamodel.layers, datasets.layers)
result_summary.missing_layers = missing_layers(
datamodel.layers, datasets.layers
)
## validate valid_layers on fields-level and add them to data_model
result_summary.status = "syntax-validation (fields)"
syntax_result = []
## get status_object if any
status_object = None
if "status_object" in validation_rules_sets.keys():
status_object = validation_rules_sets["status_object"]
for layer in valid_layers:
logger.info(f"syntax-validation of fields in {layer}")
gdf, schema = datasets.read_layer(
layer, result_summary=result_summary, status_object=status_object
)
layer = layer.lower()
for col in INCLUDE_COLUMNS:
if not col in gdf.columns:
gdf[col] = None
schema["properties"][col] = "str"
if not INDEX in gdf.columns:
result_summary.error = f"Index-column '{INDEX}' is compulsory and not defined for layer '{layer}'."
raise KeyError(f"{INDEX} not in columns")
gdf, result_gdf = fields_syntax(
gdf,
schema,
datamodel.validation_schemas[layer],
INDEX,
keep_columns=INCLUDE_COLUMNS,
)
# Add the syntax-validation result to the results_summary
layers_summary.set_data(result_gdf, layer, schema["geometry"])
# Add the corrected datasets_layer data to the datamodel.
datamodel.set_data(gdf, layer, index_col=INDEX)
syntax_result += [layer]
# do logical validation: append result to layers_summary
result_summary.status = "logical validation"
layers_summary, result_summary = logical_validation.execute(
datamodel,
validation_rules_sets,
layers_summary,
result_summary,
log_level,
raise_error,
)
# finish validation and export results
logger.info("exporting results")
result_summary.status = "export results"
result_layers = layers_summary.export(results_path, output_types)
result_summary.result_layers = result_layers
result_summary.error_layers = [
i for i in datasets.layers if i.lower() not in result_layers
]
result_summary.syntax_result = syntax_result
result_summary.validation_result = [
i["object"] for i in validation_rules_sets["objects"] if i["object"] in result_layers
]
result_summary.success = True
result_summary.status = "finished"
result_summary.duration = timer.report()
result_summary.to_json(results_path)
logger.info(f"finished in {result_summary.duration:.2f} seconds")
return datamodel, layers_summary, result_summary
except Exception as e:
e_str = str(e).replace("\n", " ")
e_str = " ".join(e_str.split())
if result_summary.error is not None:
result_summary.error = fr"{result_summary.error} Python Exception: '{e_str}'"
else:
result_summary.error = fr"Python Exception: '{e_str}'"
if results_path is not None:
result_summary.to_json(results_path)
if raise_error:
raise e
else:
result_summary.to_dict() | 4f50fd8ff4300af6cd9e5ade7883a6eeda655c4b | 3,637,841 |
def get_renaming(mappers, year):
"""Get original to final column namings."""
renamers = {}
for code, attr in mappers.items():
renamers[code] = attr['df_name']
return renamers | 33197b5c748b3ecc43783d5f1f3a3b5a071d3a4e | 3,637,842 |
async def clap(text, args):
""" Puts clap emojis between words. """
if args != []:
clap_str = args[0]
else:
clap_str = "👏"
words = text.split(" ")
clappy_text = f" {clap_str} ".join(words)
return clappy_text | 09865461e658213a2f048b89757b75b2a37c0602 | 3,637,843 |
from typing import Union
from typing import Callable
from typing import List
def apply_binary_str(
a: Union[pa.Array, pa.ChunkedArray],
b: Union[pa.Array, pa.ChunkedArray],
*,
func: Callable,
output_dtype,
parallel: bool = False,
):
"""
Apply an element-wise numba-jitted function on two Arrow columns.
The supplied function must return a numpy-compatible scalar.
Handling of missing data and chunking of the inputs is done automatically.
"""
if len(a) != len(b):
raise ValueError("Inputs don't have the same length.")
if isinstance(a, pa.ChunkedArray):
if isinstance(b, pa.ChunkedArray):
in_a_offsets, in_b_offsets = _combined_in_chunk_offsets(a, b)
new_chunks: List[pa.Array] = []
for a_offset, b_offset in zip(in_a_offsets, in_b_offsets):
a_slice = a.chunk(a_offset[0])[a_offset[1] : a_offset[1] + a_offset[2]]
b_slice = b.chunk(b_offset[0])[b_offset[1] : b_offset[1] + b_offset[2]]
new_chunks.append(
_apply_binary_str_array(
a_slice,
b_slice,
func=func,
output_dtype=output_dtype,
parallel=parallel,
)
)
return pa.chunked_array(new_chunks)
elif isinstance(b, pa.Array):
new_chunks = []
offsets = _calculate_chunk_offsets(a)
for chunk, offset in zip(a.iterchunks(), offsets):
new_chunks.append(
_apply_binary_str_array(
chunk,
b[offset : offset + len(chunk)],
func=func,
output_dtype=output_dtype,
parallel=parallel,
)
)
return pa.chunked_array(new_chunks)
else:
raise ValueError(f"left operand has unsupported type {type(b)}")
elif isinstance(a, pa.Array):
if isinstance(b, pa.ChunkedArray):
new_chunks = []
offsets = _calculate_chunk_offsets(b)
for chunk, offset in zip(b.iterchunks(), offsets):
new_chunks.append(
_apply_binary_str_array(
a[offset : offset + len(chunk)],
chunk,
func=func,
output_dtype=output_dtype,
parallel=parallel,
)
)
return pa.chunked_array(new_chunks)
elif isinstance(b, pa.Array):
return _apply_binary_str_array(
a, b, func=func, output_dtype=output_dtype, parallel=parallel
)
else:
raise ValueError(f"left operand has unsupported type {type(b)}")
else:
raise ValueError(f"left operand has unsupported type {type(a)}") | 853cd326b5812314bb6595fee191ca1c6e1f89f6 | 3,637,844 |
def product_review(product_id: str):
"""
Shows review statistics for a product.
Returns a python dictionary with content-type: application/json
"""
session = Session()
date = request.args.get('date') # parse a query string formatted as BIGINT unixReviewTime
# SELECT AVG(overall) AS average, COUNT(overall) AS total FROM reviews WHERE productID=<product_id> (AND unixReviewTime=date);
query_1 = (
session.query(
func.avg(reviews.columns.overall)
.label('average'),
func.count(reviews.columns.overall)
.label('total')
)
.filter(reviews.columns.productID==product_id)
)
if date:
query_1 = query_1.filter(reviews.columns.unixReviewTime==date)
query_1 = query_1.first()
# SELECT overall AS stars, COUNT(overall) AS count FROM reviews WHERE productID=<product_id> (AND unixReviewTime=date) GROUP BY overall;
query_2 = (
session.query(
reviews.columns.overall
.label('stars'),
func.count(reviews.columns.overall)
.label('count')
)
.filter(reviews.columns.productID==product_id)
)
if date:
query_2 = query_2.filter(reviews.columns.unixReviewTime==date)
query_2 = (
query_2.group_by(reviews.columns.overall)
.all()
)
try:
json = {
"productID": product_id,
"average": round(query_1.average,1),
"percent_breakdown": {f"{int(row.stars)}_star": round((row.count*100)/query_1.total) for row in query_2},
"count_breakdown": {f"{int(row.stars)}_star": row.count for row in query_2},
"total": query_1.total
}
return json
except:
return Response("Error",404)
finally:
session.close() | 945f29a536a5645b602633c4558ac3d68affe85a | 3,637,845 |
def remove_extra_two_spaces(text: str) -> str:
"""Replaces two consecutive spaces with one wherever they occur in a text"""
return text.replace(" ", " ") | d8b9600d3b442216b1fbe85918f313fec8a5c9cb | 3,637,846 |
def reflect_table(table_name, engine):
"""
Gets the table with the given name from the sqlalchemy engine.
Args:
table_name (str): Name of the table to extract.
engine (sqlalchemy.engine.base.Engine): Engine to extract from.
Returns:
table (sqlalchemy.ext.declarative.api.DeclarativeMeta): The extracted table, which can be now be used to read from the database.
"""
meta = MetaData()
table = Table(table_name, meta, autoload=True, autoload_with=engine)
return table | 414a04172cec7e840bf257eaf5b15b1fc3fa9d59 | 3,637,847 |
def load_utt_list(utt_list):
"""Load a list of utterances.
Args:
utt_list (str): path to a file containing a list of utterances
Returns:
List[str]: list of utterances
"""
with open(utt_list) as f:
utt_ids = f.readlines()
utt_ids = map(lambda utt_id: utt_id.strip(), utt_ids)
utt_ids = filter(lambda utt_id: len(utt_id) > 0, utt_ids)
return list(utt_ids) | 6a77e876b0cc959ac4151b328b718ae45522448b | 3,637,848 |
def kfunc_vals(points, area):
"""
Input
points: a list of Point objects
area: an Extent object
Return
ds: list of radii
lds: L(d) values for each radius in ds
"""
# This function is taken from kfunction file in spatialanalysis library
n = len(points)
density = n/area.area()
t = kdtree2(points)
d = min([area.xmax-area.xmin,area.ymax-area.ymin])*2/3/10
ds = [ d*(i+1) for i in range(10)]
lds = [0 for d in ds]
for i, d in enumerate(ds):
for p in points:
ld = kfunc(t, p, d, density)[1]
lds[i] += ld
lds = [ld/n for ld in lds]
return ds, lds | 2fd56da45f8fb4ede38a219b158dce802d68ae44 | 3,637,849 |
from datetime import datetime
async def get_locations():
"""
Retrieves the locations from the categories. The locations are cached for 1 hour.
:returns: The locations.
:rtype: List[Location]
"""
# Get all of the data categories locations.
confirmed = await get_category("confirmed")
deaths = await get_category("deaths")
# recovered = await get_category("recovered")
locations_confirmed = confirmed["locations"]
locations_deaths = deaths["locations"]
# locations_recovered = recovered["locations"]
# Final locations to return.
locations = []
# Go through locations.
for index, location in enumerate(locations_confirmed):
# Get the timelines.
timelines = {
"confirmed": locations_confirmed[index]["history"],
"deaths": locations_deaths[index]["history"],
# 'recovered' : locations_recovered[index]['history'],
}
# Grab coordinates.
coordinates = location["coordinates"]
# Create location (supporting timelines) and append.
locations.append(
TimelinedLocation(
# General info.
index,
location["country"],
location["province"],
# Coordinates.
Coordinates(coordinates["lat"], coordinates["long"]),
# Last update.
datetime.utcnow().isoformat() + "Z",
# Timelines (parse dates as ISO).
{
"confirmed": Timeline(
{
datetime.strptime(date, "%m/%d/%y").isoformat() + "Z": amount
for date, amount in timelines["confirmed"].items()
}
),
"deaths": Timeline(
{
datetime.strptime(date, "%m/%d/%y").isoformat() + "Z": amount
for date, amount in timelines["deaths"].items()
}
),
"recovered": Timeline({}),
},
)
)
# Finally, return the locations.
return locations | 24272f06ca3732f053d6efcc41a31ec205603a27 | 3,637,850 |
def MDAPE(y_true, y_pred, multioutput='raw_values'):
"""
calculate Median Absolute Percentage Error (MDAPE).
:param y_true: array-like of shape = (n_samples, *)
Ground truth (correct) target values.
:param y_pred: array-like of shape = (n_samples, *)
Estimated target values.
:param multioutput: string in ['raw_values', 'uniform_average']
:return:float or ndarray of floats
A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.
"""
y_true, y_pred, original_shape = _standardize_input(y_true, y_pred, multioutput)
output_errors = np.median(100 * np.abs((y_true - y_pred) / (y_true + EPSILON)), axis=0,)
if multioutput == 'raw_values':
return output_errors.reshape(original_shape)
return np.mean(output_errors) | 05cfbef6bd3e63ca151a584dc25b9b6574d2aa37 | 3,637,851 |
import pandas
import numpy
def fast_spearman(x, y=None):
"""calculate the spearnab correlation matrix for the columns of x (MxN), or optionally, the spearmancorrelaton matrix between x and y (OxP).
In the language of statistics the columns are the variables and the rows are the observations.
Args:
x (numpy array-like) MxN in shape
y (optional, numpy array-like) OxP in shape
returns:
(numpy array-like) array of the covariance values
for defaults (y=None), shape is NxN
if y is provied, shape is NxP
"""
logger.debug("x.shape: {}".format(x.shape))
if hasattr(y, "shape"):
logger.debug("y.shape: {}".format(y.shape))
x_ranks = pandas.DataFrame(x).rank(method="average").values
logger.debug("some min and max ranks of x_ranks:\n{}\n{}".format(numpy.min(x_ranks[:10], axis=0), numpy.max(x_ranks[:10], axis=0)))
y_ranks = pandas.DataFrame(y).rank(method="average").values if y is not None else None
return fast_corr(x_ranks, y_ranks) | 9debe5d3c47a3da93569e9668f7a1735852d6eb7 | 3,637,852 |
import matplotlib
from pycocotools.cocoeval import COCOeval
import copy
def analyze_individual_category(k, cocoDt, cocoGt, catId, iou_type, areas=None):
"""针对某个特定类别,分析忽略亚类混淆和类别混淆时的准确率。
Refer to https://github.com/open-mmlab/mmdetection/blob/master/tools/analysis_tools/coco_error_analysis.py#L174
Args:
k (int): 待分析类别的序号。
cocoDt (pycocotols.coco.COCO): 按COCO类存放的预测结果。
cocoGt (pycocotols.coco.COCO): 按COCO类存放的真值。
catId (int): 待分析类别在数据集中的类别id。
iou_type (str): iou计算方式,若为检测框,则设置为'bbox',若为像素级分割结果,则设置为'segm'。
Returns:
int:
dict: 有关键字'ps_supercategory'和'ps_allcategory'。关键字'ps_supercategory'的键值是忽略亚类间
混淆时的准确率,关键字'ps_allcategory'的键值是忽略类别间混淆时的准确率。
"""
# matplotlib.use() must be called *before* pylab, matplotlib.pyplot,
# or matplotlib.backends is imported for the first time
# pycocotools import matplotlib
matplotlib.use('Agg')
nm = cocoGt.loadCats(catId)[0]
print(f'--------------analyzing {k + 1}-{nm["name"]}---------------')
ps_ = {}
dt = copy.deepcopy(cocoDt)
nm = cocoGt.loadCats(catId)[0]
imgIds = cocoGt.getImgIds()
dt_anns = dt.dataset['annotations']
select_dt_anns = []
for ann in dt_anns:
if ann['category_id'] == catId:
select_dt_anns.append(ann)
dt.dataset['annotations'] = select_dt_anns
dt.createIndex()
# compute precision but ignore superclass confusion
gt = copy.deepcopy(cocoGt)
child_catIds = gt.getCatIds(supNms=[nm['supercategory']])
for idx, ann in enumerate(gt.dataset['annotations']):
if ann['category_id'] in child_catIds and ann['category_id'] != catId:
gt.dataset['annotations'][idx]['ignore'] = 1
gt.dataset['annotations'][idx]['iscrowd'] = 1
gt.dataset['annotations'][idx]['category_id'] = catId
cocoEval = COCOeval(gt, copy.deepcopy(dt), iou_type)
cocoEval.params.imgIds = imgIds
cocoEval.params.maxDets = [100]
cocoEval.params.iouThrs = [0.1]
cocoEval.params.useCats = 1
if areas:
cocoEval.params.areaRng = [[0**2, areas[2]], [0**2, areas[0]],
[areas[0], areas[1]], [areas[1], areas[2]]]
cocoEval.evaluate()
cocoEval.accumulate()
ps_supercategory = cocoEval.eval['precision'][0, :, k, :, :]
ps_['ps_supercategory'] = ps_supercategory
# compute precision but ignore any class confusion
gt = copy.deepcopy(cocoGt)
for idx, ann in enumerate(gt.dataset['annotations']):
if ann['category_id'] != catId:
gt.dataset['annotations'][idx]['ignore'] = 1
gt.dataset['annotations'][idx]['iscrowd'] = 1
gt.dataset['annotations'][idx]['category_id'] = catId
cocoEval = COCOeval(gt, copy.deepcopy(dt), iou_type)
cocoEval.params.imgIds = imgIds
cocoEval.params.maxDets = [100]
cocoEval.params.iouThrs = [0.1]
cocoEval.params.useCats = 1
if areas:
cocoEval.params.areaRng = [[0**2, areas[2]], [0**2, areas[0]],
[areas[0], areas[1]], [areas[1], areas[2]]]
cocoEval.evaluate()
cocoEval.accumulate()
ps_allcategory = cocoEval.eval['precision'][0, :, k, :, :]
ps_['ps_allcategory'] = ps_allcategory
return k, ps_ | bcf5670bb78d4c5662cc3fbaec558bc22ddf0cd1 | 3,637,853 |
def read_line1(line):
"""! Function read_line1
Reads as argument a string formatted as a Line 1 in SEISAN's Nordic format
Returns a Hypocenter dataclass with all the fields in a SEISAN's Line 1
@param[in] line string with SEISAN's Nordic hypocenter format (Line 1)
@return Hypocenter dataclass
"""
if len(line) != 81:
print('ERROR: invalid line length')
if line[79] != '1':
print('ERROR: invalid line type')
year = int(line[1:5])
month = int(line[6:8])
day = int(line[8:10])
fixed_time = line[10] # 'F if origin time fixed'
hour = int(line[11:13])
minute = int(line[13:15])
if not line[16:20].isspace():
second = float(line[16:20])
else:
second = None
location_model = line[20]
distance_indicator = line[21]
event_type = line[22] # blank for earthquake, 'E' for explosion
if not line[23:30].isspace():
latitude = float(line[23:30])
else:
latitude = None
if not line[30:38].isspace():
longitude = float(line[30:38])
else:
longitude = None
if not line[38:43].isspace():
depth = float(line[38:43])
else:
depth = None
depth_indicator = line[43] # blank free depth, 'F' fixed, 'S' starting
locating_indicator = line[44] # blank free depth, 'F' fixed, 'S' starting, '*' do not locate
locating_agency = line[45:48]
if not line[48:51].isspace():
num_sta = int(line[48:51])
else:
num_sta = None
if not line[51:55].isspace():
rms = float(line[51:55])
else:
rms = None
if not line[55:59].isspace():
mag1 = float(line[55:59])
mag_type1 = 'M' + line[59]
mag_agency1 = line[60:63]
else:
mag1 = None
mag_type1 = ' '
mag_agency1 = ' '
if not line[63:67].isspace():
mag2 = float(line[63:67])
mag_type2 = 'M' + line[67]
mag_agency2 = line[68:71]
else:
mag2 = None
mag_type2 = ' '
mag_agency2 = ' '
if not line[71:75].isspace():
mag3 = float(line[71:75])
mag_type3 = 'M' + line[75]
mag_agency3 = line[76:79]
else:
mag3 = None
mag_type3 = ' '
mag_agency3 = ' '
return Hypocenter(year, month, day, fixed_time, hour, minute, second,
location_model, distance_indicator, event_type, latitude, longitude, depth, depth_indicator,
locating_indicator, locating_agency, num_sta, rms,
mag1, mag_type1, mag_agency1, mag2, mag_type2, mag_agency2, mag3, mag_type3, mag_agency3) | 871f468c2ec4dd9e0a5e8784d2beb7dd958d068d | 3,637,854 |
import os
import tempfile
import subprocess
import time
import json
def ghidra_headless(address,
xml_file_path,
bin_file_path,
ghidra_headless_path,
ghidra_plugins_path):
"""
Call Ghidra in headless mode and run the plugin
FunctionDecompile.py to decompile the code of the function.
"""
try:
if not os.path.isfile(ghidra_headless_path):
print("GhIDA:: [!] ghidra analyzeHeadless not found.")
raise Exception("analyzeHeadless not found")
decompiled_code = None
idaapi.show_wait_box("Ghida decompilation started")
prefix = "%s_" % address
output_temp = tempfile.NamedTemporaryFile(prefix=prefix, delete=False)
output_path = output_temp.name
# print("GhIDA:: [DEBUG] output_path: %s" % output_path)
output_temp.close()
cmd = [ghidra_headless_path,
".",
"Temp",
"-import",
xml_file_path,
'-scriptPath',
ghidra_plugins_path,
'-postScript',
'FunctionDecompile.py',
address,
output_path,
"-noanalysis",
"-deleteProject"]
# Options to 'safely' terminate the process
if os.name == 'posix':
kwargs = {
'preexec_fn': os.setsid
}
else:
kwargs = {
'creationflags': subprocess.CREATE_NEW_PROCESS_GROUP,
'shell': True
}
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
**kwargs)
stop = False
counter = 0
print("GhIDA:: [INFO] Ghidra headless (timeout: %ds)" % TIMEOUT)
print("GhIDA:: [INFO] Waiting Ghidra headless analysis to finish...")
while not stop:
time.sleep(SLEEP_LENGTH)
counter += 1
subprocess.Popen.poll(p)
# Process terminated
if p.returncode is not None:
stop = True
print("GhIDA:: [INFO] Ghidra analysis completed!")
continue
# User terminated action
if idaapi.user_cancelled():
# Termiante the process!
terminate_process(p.pid)
stop = True
print("GhIDA:: [!] Ghidra analysis interrupted.")
continue
# Process timeout
if counter > COUNTER_MAX:
terminate_process(p.pid)
stop = True
print("GhIDA:: [!] Decompilation error - timeout reached")
continue
# Check if JSON response is available
if os.path.isfile(output_path):
with open(output_path) as f_in:
j = json.load(f_in)
if j['status'] == "completed":
decompiled_code = j['decompiled']
else:
print("GhIDA:: [!] Decompilation error -",
" JSON response is malformed")
# Remove the temporary JSON response file
os.remove(output_path)
else:
print("GhIDA:: [!] Decompilation error - JSON response not found")
idaapi.warning("Ghidra headless decompilation error")
except Exception as e:
print("GhIDA:: [!] %s" % e)
print("GhIDA:: [!] Ghidra headless analysis failed")
idaapi.warning("Ghidra headless analysis failed")
decompiled_code = None
finally:
idaapi.hide_wait_box()
return decompiled_code | b3ee78b9f44a2dcf9cf145b9ae00580b3e7c1683 | 3,637,855 |
import logging
from datetime import datetime
def validate_id(
endpoint_name,
type_id,
cache_buster=False,
config=api_config.CONFIG,
logger=logging.getLogger('publicAPI'),
):
"""Check EVE Online CREST as source-of-truth for id lookup
Args:
endpoint_name (str): desired endpoint for data lookup
type_id (int): id value to look up at endpoint (NOTE: only SDE replacement)
cache_buster (bool, optional): skip caching, fetch from internet
config (:obj:`prosper.common.ProsperConfig`): configuration object
logger (:obj:`logging.logger`): logging handle
Returns:
int: HTTP status code for error validation
"""
## Check local cache for value ##
try:
db_handle = setup_cache_file(endpoint_name)
except Exception as err_msg: # pragma: no cover
logger.error(
'ERROR: unable to connect to local tinyDB cache' +
'\n\tendpoint_name: {0}'.format(endpoint_name) +
'\n\tcache_path: {0}'.format(CACHE_PATH),
exc_info=True
)
if not cache_buster:
logger.info('--searching cache for id: %s', type_id)
logger.debug('endpoint_name=%s', endpoint_name)
logger.debug('type_id=%s', type_id)
cache_time = datetime.utcnow().timestamp() - int(config.get('CACHING', 'sde_cache_limit'))
cache_val = db_handle.search(
(Query().cache_datetime >= cache_time) &
(Query().index_key == type_id)
)
if cache_val:
logger.info('--found type_id cache for id: {0}'.format(type_id))
logger.debug(cache_val)
return cache_val[0]['payload'] #skip CREST
## Request info from CREST ##
logger.info('--fetching CREST ID information')
logger.debug('endpoint_name=%s', endpoint_name)
logger.debug('type_id=%s', type_id)
try:
kwarg_pair = endpoint_to_kwarg(
endpoint_name,
type_id
)
type_info = None
type_info = fetch_esi_endpoint(
endpoint_name,
**kwarg_pair,
config=config
)
except Exception as err_msg:
logger.warning(
'ERROR: unable to connect to CREST' +
'\n\tendpoint_name: {0}'.format(endpoint_name) +
'\n\ttype_id: {0}'.format(type_id),
exc_info=True
)
raise exceptions.IDValidationError(
status=404,
message='Unable to validate {0}:{1}'.format(
endpoint_name,
type_id
)
)
## Update cache ##
logger.info('--updating cache')
try:
write_cache_entry(
db_handle,
type_id,
type_info
)
except Exception as err_msg: # pragma: no cover
logger.error(
'ERROR: unable to write to cache' +
'\n\ttype_id: {0}'.format(type_id) +
'\n\ttype_info: {0}'.format(type_info),
exc_info=True
)
db_handle.close()
return type_info | 8c6ed549d8387fa43a713b96c19f8a2b31740067 | 3,637,856 |
import socket
import os
def init_server_socket() -> socket.socket:
"""Initialize and bind the server unix socket."""
socket_address = get_socket_address()
try:
os.unlink(socket_address)
except (OSError, EnvironmentError):
pass
sock = socket.socket(family=socket.AF_UNIX, type=socket.SOCK_DGRAM)
sock.bind(socket_address)
sock.settimeout(1)
return sock | bf73ff851536062c90ae9430054cf036a571cf84 | 3,637,857 |
def getInfo_insert(sql : str, tableInfo : table_info_module.TableInfo) -> tuple:
"""테이블 이름과 컬럼을 반환합니다."""
sql = string_module.removeNoise(sql)
tableName = string_module.getParenthesesContext2(sql, "INSERT INTO ", " ")
columns = tableInfo[tableName]
return (tableName, columns) | 25f2087b5fbb15ab1012d3f37749430a74e6faaa | 3,637,858 |
def compute_flow_for_supervised_loss(
feature_model,
flow_model,
batch,
training
):
"""Compute flow for an image batch.
Args:
feature_model: A model to compute features for flow.
flow_model: A model to compute flow.
batch: A tf.tensor of shape [b, seq, h, w, c] holding a batch of triplets.
training: bool that tells the model to use training or inference code.
Returns:
A tuple consisting of the images, the extracted features, the estimated
flows, and the upsampled refined flows.
"""
feature_dict = feature_model(batch[:, 0],
batch[:, 1],
training=training)
return flow_model(feature_dict, training=training) | a74f392c1d4e234fdb66d18e63d7c733ec6669a7 | 3,637,859 |
import os
def _get_filename_from_request(request):
"""
Gets the filename from an url request.
:param request: url request to get filename from
:type request: urllib.requests.Request or urllib2.Request
:rtype: str
"""
try:
headers = request.headers
content = headers["content-disposition"]
filename_str = content.split("filename=")[1]
return filename_str.strip("\"")
except (KeyError, AttributeError):
return os.path.basename(request.url) | 51d2f79ebc5f2abf57d5b12d0271d6d704a24297 | 3,637,860 |
def farey_sequence(n):
"""Return the nth Farey sequence as order pairs of the form (N,D) where `N' is the numerator and `D' is the denominator."""
a, b, c, d = 0, 1, 1, n
sequence=[(a,b)]
while (c <= n):
k = int((n + b) / d)
a, b, c, d = c, d, (k*c-a), (k*d-b)
sequence.append( (a,b) )
return sequence | d55bb90d05b4930d05a83dac9feb58e747288754 | 3,637,861 |
def make_vgg19_block(block):
"""Builds a vgg19 block from a dictionary
Args:
block: a dictionary
"""
layers = []
for i in range(len(block)):
one_ = block[i]
for k, v in one_.items():
if 'pool' in k:
layers += [nn.MaxPool2d(kernel_size=v[0], stride=v[1],
padding=v[2])]
else:
conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1],
kernel_size=v[2], stride=v[3],
padding=v[4])
layers += [conv2d, nn.ReLU(inplace=True)]
return nn.Sequential(*layers) | 512543dfb32f9ed97b6ce99dd6ffc692d0ffa3b8 | 3,637,862 |
import os
def process_one(f, mesh_directory, dataset_directory, skip_existing, log_level):
"""Processes a single mesh, adding it to the dataset."""
relpath = f.replace(mesh_directory, '')
print('relpath:', relpath)
assert relpath[0] == '/'
relpath = relpath[1:]
split, synset = relpath.split('/')[:2]
log.verbose(f'The split is {split} and the synset is {synset}')
name = os.path.basename(f)
name, extension = os.path.splitext(name)
valid_extensions = ['.ply']
if extension not in valid_extensions:
raise ValueError(f'File with unsupported extension {extension} found: {f}.'
f' Only {valid_extensions} are supported.')
output_dir = f'{dataset_directory}/{split}/{synset}/{name}/'
# This is the last file the processing writes, if it already exists the
# example has already been processed.
final_file_written = f'{output_dir}/depth_and_normals.npz'
make_example.mesh_to_example(
os.path.join(path_util.get_path_to_ldif_parent(), 'ldif'), f,
f'{dataset_directory}/{split}/{synset}/{name}/', skip_existing, log_level)
return output_dir | 57369ce24c2ed21829b8b7a8ca658d9d0185e9a2 | 3,637,863 |
def tld():
"""
Return a random tld (Top Level Domain) from the tlds list below
:return: str
"""
tlds = ('com', 'org', 'edu', 'gov', 'co.uk', 'net', 'io', 'ru', 'eu',)
return pickone(tlds) | 8e9341058ccf79d991aab6317ab3c29858f00fdf | 3,637,864 |
def validate_boolean(option, value):
"""Validates that 'value' is 'true' or 'false'.
"""
if isinstance(value, bool):
return value
elif isinstance(value, basestring):
if value not in ('true', 'false'):
raise ConfigurationError("The value of '%s' must be "
"'true' or 'false'" % (option,))
return value == 'true'
raise TypeError("Wrong type for %s, value must "
"be a boolean or string representation" % (option,)) | 85b9a256e57ce7715fceea556ff7ad48b05bd996 | 3,637,865 |
def A2RT(room_size, A_wall_all, F_abs, c=343, A_air=None, estimator='Norris_Eyring'):
""" Estimate reverberation time based on room acoustic parameters,
translated from matlab code developed by Douglas R Campbell
Args:
room_size: three-dimension measurement of shoebox room
A_wall_all: sound absorption coefficients of six wall surfaces
c: sound speed, default to 343 m/s
F_abs: center frequency of each frequency band
A_air: absorption coefficients of air, if not specified, it will
calculated based on humidity of 50
estimator: estimate methods, choose from [Sabine,SabineAir,
SabineAirHiAbs,Norris_Eyring], default to Norris_Eyring
"""
if A_air is None:
humidity = 50
A_air = (5.5e-4)*(50/humidity)*((F_abs/1000)**1.7)
Lx, Ly, Lz = room_size
V_room = np.prod(room_size) # Volume of room m^3
S_wall_all = [Lx*Lz, Ly*Lz, Lx*Ly]
S_room = 2.*np.sum(S_wall_all) # Total area of shoebox room surfaces
# Effective absorbing area of room surfaces at each frequency
Se = (S_wall_all[1]*(A_wall_all[:, 0] + A_wall_all[:, 1])
+ S_wall_all[0]*(A_wall_all[:, 2] + A_wall_all[:, 3])
+ S_wall_all[2]*(A_wall_all[:, 4] + A_wall_all[:, 5]))
A_mean = Se/S_room # Mean absorption of wall surfaces
# Mean absorption of air averaged across frequency.
# A_air_mean = np.mean(A_air)
# Mean Free Path (Average distance between succesive reflections) (Ref A4)
# MFP = 4*V_room/S_room
# Reverberation time estimate
# Detect anechoic case and force RT60 all zeros
if np.linalg.norm(1-A_mean) < EPSILON:
RT60 = np.zeros(F_abs.shape)
else: # Select an estimation equation
if estimator == 'Sabine':
RT60 = np.divide((55.25/c)*V_room, Se) # Sabine equation
if estimator == 'SabineAir':
# Sabine equation (SI units) adjusted for air
RT60 = np.divide((55.25/c)*V_room, (4*A_air*V_room+Se))
if estimator == 'SabineAirHiAbs':
# % Sabine equation (SI units) adjusted for air and high absorption
RT60 = np.divide(55.25/c*V_room,
4*A_air*V_room+np.multiply(Se, (1+A_mean/2)))
if estimator == 'Norris_Eyring':
# Norris-Eyring estimate adjusted for air absorption
RT60 = np.divide(55.25/c*V_room,
4*A_air*V_room-S_room*np.log(1-A_mean+EPSILON))
return RT60 | 8a8df0bf8f91c93dfb7480775ea9eadc552edcfe | 3,637,866 |
def GetVideoFromRate(content):
"""
从视频搜索源码页面提取视频信息
"""
#av号和标题
regular1 = r'<a href="/video/av(\d+)/" target="_blank" class="title" [^>]*>(.*)</a>'
info1 = GetRE(content, regular1)
#观看数
regular2 = r'<i class="b-icon b-icon-v-play" title=".+"></i><span number="([^"]+)">\1</span>'
info2 = GetRE(content, regular2)
#收藏
regular3 = r'<i class="b-icon b-icon-v-fav" title=".+"></i><span number="([^"]+)">\1</span></span>'
info3 = GetRE(content, regular3)
#弹幕
regular4 = r'<i class="b-icon b-icon-v-dm" title=".+"></i><span number="([^"]+)">\1</span>'
info4 = GetRE(content, regular4)
#日期
regular5 = r'<span class="v-date" title=".+">(.+)</span>'
info5 = GetRE(content, regular5)
#封面
regular6 = r'<img data-img="(.+)" [^>]*>'
info6 = GetRE(content, regular6)
#Up的id和名字
regular7 = r'<a class="v-author" href=".+/(\d+).+">(.+)</a>'
info7 = GetRE(content, regular7)
#!!!!!!!!这里可以断言所有信息长度相等
videoNum = len(info1) #视频长度
videoList = []
for i in range(videoNum):
video_t = Video()
video_t.aid = getint(info1[i][0])
video_t.title = info1[i][1]
video_t.guankan = getint(info2[i])
video_t.shoucang = getint(info3[i])
video_t.danmu = getint(info4[i])
video_t.date = info5[i]
video_t.cover = info6[i]
video_t.author = User(info7[i][0], info7[i][1])
videoList.append(video_t)
return videoList | 446343bc3f2597310b7e4b22dd784bb0bc9b06ea | 3,637,867 |
def PPVfn(Mw, fc, Rho, V):
"""Calculates the peak-particle-velocity (PPV) at the source
for a given homogeneous density and velocity model.
:param Mw: the moment magnitude
:type Mw: float
:param fc: the corner frequency in Hz
:type fc: float
:param Rho: Density at the source in kg/m**3
:type Rho: float
:param V: Seismic velocity at the source in m/s
:type V: float
:returns: the PPV
:rtype: float
"""
M0 = Mw2M0(Mw) # the seismic moment
w0 = 2 * np.pi * fc
PPV = w0 ** 2 * M0 / (4 * np.pi * Rho * (V ** 3))
return PPV | 5629abb351e46ff41f11feef00bd8b7195b90e8f | 3,637,868 |
import math
def extract_feature_label(feat_path, lab_path, audio_sr=22050, hop_size=1024):
"""Basic feature extraction block.
Parameters
----------
feat_path: Path
Path to the raw feature folder.
lab_path: Path
Path to the corresponding label folder.
audio_sr: int
sampling rate, default=22050.
hop_size: int
number of samples between successive CQT columns, default=1024.
Returns
-------
data:
Processed data
"""
label = load_label(lab_path)
feature = load_feature(feat_path)
pitch_shift = int(feat_path.replace(".npy", "").split('_pitch_shift=')[-1])
beatles_id = feat_path.replace(".npy", "")
n_frames = feature.shape[0]
# Get frame-wise labels
chords = np.zeros(n_frames, dtype=np.int32)
for lab in label:
onset_idx = int(lab['onset'] * audio_sr / hop_size)
end_idx = int(math.ceil(lab['end'] * audio_sr / hop_size))
chord = CHORD_INT_MAPPING_2[lab['chord']]
chords[onset_idx:end_idx] = chord
# Chord labels modulation
chords_shift = _shift_chord_labels(chords, pitch_shift)
# Chord transition
transition = _get_chord_transition(chords_shift)
data = {'cqt': feature, 'chord': chords_shift, 'transition': transition}
# Reshape
data = reshape_data(data, beatles_id)
return data | 2bdca45bcfe19e0b103d4b1762aab6ddf8e67b89 | 3,637,869 |
import os
def get_local_episodes(anime_folder, name):
"""return a list of files of a anime-folder inside ANIME_FOLDER"""
episodes = []
name = name.replace("'", "_")
path = os.path.join(anime_folder, name)
if not os.path.isdir(path):
os.makedirs(path)
return episodes
for episode in os.listdir(path):
ep_path = os.path.join(path, episode)
if os.path.isfile(ep_path):
anime = parse_name(episode)
anime["size"] = os.stat(ep_path).st_size
episodes.append(anime)
return episodes | b12048fd49607b20d61f94b554a040c046c49f5e | 3,637,870 |
import os
def _find_pkg_info(directory):
"""find and return the full path to a PKG-INFO file or None if not found"""
for root, dirs, files in os.walk(directory):
for filename in files:
if filename == 'PKG-INFO':
return os.path.join(root, filename)
# no PKG-INFO file found
return None | ada0afe963cb859a5c5b19813ebbdea03cda7db3 | 3,637,871 |
import re
def get_m3u8_url(text):
# type: (str) -> Union[str, None]
"""Attempts to get the first m3u8 url from the given string"""
m3u8 = re.search(r"https[^\"]*\.m3u8", text)
sig = re.search(r"(\?sig=[^\"]*)", text)
if m3u8 and sig:
return "{}{}".format(clean_uri(m3u8.group()), sig.group())
return None | 25373d6fe8958dc28c6ddcb4eda1b02c9497fd18 | 3,637,872 |
def xavier_init(fan_in, fan_out, constant=1):
""" Xavier initialization of network weights"""
# https://stackoverflow.com/questions/33640581/how-to-do-xavier-initialization-on-tensorflow
low = -constant*np.sqrt(6.0/(fan_in + fan_out))
high = constant*np.sqrt(6.0/(fan_in + fan_out))
return tf.random_uniform((fan_in, fan_out), minval=low, maxval=high, dtype=tf.float32) | df8c812a81d22082add014a8bb17e8cc4966f58c | 3,637,873 |
from typing import Union
from typing import Optional
def object_bbox_flip(
bbox: remote_blob_util.BlobDef,
image_size: remote_blob_util.BlobDef,
flip_code: Union[int, remote_blob_util.BlobDef],
name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
"""This operator flips the object bounding box.
The flip code corresponds to the different flip mode:
0 (0x00): Non Flip
1 (0x01): Horizontal Flip
16 (0x10): Vertical Flip
17 (0x11): Both Horizontal and Vertical Flip
Args:
bbox (BlobDef): The bounding box.
image_size (BlobDef): The size of input image.
flip_code (Union[int, BlobDef]): The flip code.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
BlobDef: The result Blob
For example:
.. code-block:: python
import numpy as np
import oneflow as flow
import oneflow.typing as tp
def _of_object_bbox_flip(bbox_list, image_size, flip_code):
bbox_shape = _get_bbox_static_shape(bbox_list)
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.mirrored_view())
@flow.global_function(function_config=func_config)
def object_bbox_flip_job(
bbox_def: tp.ListListNumpy.Placeholder(
shape=tuple(bbox_shape), dtype=flow.float
),
image_size_def: tp.ListNumpy.Placeholder(
shape=image_size.shape, dtype=flow.int32
),
) -> tp.ListListNumpy:
bbox_buffer = flow.tensor_list_to_tensor_buffer(bbox_def)
flip_bbox = flow.object_bbox_flip(bbox_buffer, image_size_def, flip_code)
return flow.tensor_buffer_to_tensor_list(
flip_bbox, shape=bbox_shape[1:], dtype=flow.float
)
input_bbox_list = [np.expand_dims(bbox, axis=0) for bbox in bbox_list]
bbox_tensor = object_bbox_flip_job([input_bbox_list], [image_size])
return bbox_tensor[0]
def _get_bbox_static_shape(bbox_list):
bbox_shapes = [bbox.shape for bbox in bbox_list]
bbox_static_shape = np.amax(bbox_shapes, axis=0)
assert isinstance(
bbox_static_shape, np.ndarray
), "bbox_shapes: {}, bbox_static_shape: {}".format(
str(bbox_shapes), str(bbox_static_shape)
)
bbox_static_shape = bbox_static_shape.tolist()
bbox_static_shape.insert(0, len(bbox_list))
return bbox_static_shape
if __name__ == "__main__":
bbox = np.array([[[20.0, 40.0, 80.0, 160.0],
[30.0, 50.0, 70.0, 100.0]]]).astype(np.single) # [x1, y1, x2, y2]
image_size = np.array([[480, 620]]).astype(np.int32)
bbox_flip = _of_object_bbox_flip(bbox,
image_size,
flip_code=1) # Horizontal Flip
print(bbox_flip[0][0])
# [[399. 40. 459. 160.]
# [409. 50. 449. 100.]]
"""
assert isinstance(bbox, remote_blob_util.BlobDef)
assert isinstance(image_size, remote_blob_util.BlobDef)
assert bbox.shape[0] == image_size.shape[0]
if name is None:
name = id_util.UniqueStr("ObjectBboxFlip_")
if not isinstance(flip_code, remote_blob_util.BlobDef):
assert isinstance(flip_code, int)
flip_code = flow.constant(
flip_code,
shape=(bbox.shape[0],),
dtype=flow.int8,
name="{}_FlipCode".format(name),
)
else:
assert bbox.shape[0] == flip_code.shape[0]
op = (
flow.user_op_builder(name)
.Op("object_bbox_flip")
.Input("bbox", [bbox])
.Input("image_size", [image_size])
.Input("flip_code", [flip_code])
.Output("out")
.Build()
)
return op.InferAndTryRun().SoleOutputBlob() | 8be9a58c2c8a10e8aaba402d45abf25edc42c0ab | 3,637,874 |
def compile(spec):
"""
Args:
spec (dict): A specification dict that attempts to "break" test dicts
Returns:
JsonMatcher.
"""
return JsonMatcher(spec) | bddb743e9f4fcbf3987363007f67c7e8dcf44c37 | 3,637,875 |
import itertools
def labels_to_intervals(labels_list):
"""
labels_to_intervals() converts list of labels of each frame into set of time intervals where a tag occurs
Args:
labels_list: list of labels of each frame
e.g. [{'person'}, {'person'}, {'person'}, {'surfboard', 'person'}]
Returns:
tags - set of time intervals where a tag occurs:
{ (label, start, end) }, a video from time 0 (inclusive) to time T (exclusive)
e.g. {('cat', 3, 9), ('dog', 5, 8), ('people', 0, 6)}
e.g. {('cat', 0, 1), ('cat', 2, 4), ('cat', 6, 8), ('dog', 0, 3),
('dog', 6, 8), ('people', 0, 2), ('people', 4, 6)}
"""
labels_dict = dict()
for frame, labels in enumerate(labels_list):
for label in labels:
if label in labels_dict:
labels_dict[label].add(frame)
else:
labels_dict[label] = {frame}
output = set()
for key, value in labels_dict.items():
frame_list = sorted(value)
for interval in [(t[0][1], t[-1][1]) for t in
(tuple(g[1]) for g in itertools.groupby(enumerate(frame_list), lambda x: x[0]-x[1]))]:
output.add((key, interval[0], interval[1]+1))
return output | 65b63ea3e6f097e9605e1c1ddb8dd434d7db9370 | 3,637,876 |
def get_wolfram_query_url(query):
"""Get Wolfram query URL."""
base_url = 'www.wolframalpha.com'
if not query:
return 'http://{0}'.format(base_url)
return 'http://{0}/input/?i={1}'.format(base_url, query) | 0122515f1a666cb897b53ae6bd975f65da072438 | 3,637,877 |
from typing import Sequence
def center_of_mass(points: Sequence[float]) -> np.ndarray:
"""Gets the center of mass of the points in space.
Parameters
----------
points
The points to find the center of mass from.
Returns
-------
np.ndarray
The center of mass of the points.
"""
points = [np.array(point).astype("float") for point in points]
return sum(points) / len(points) | 8d142a0b2b680900d5a20a0119702124bcdf3db6 | 3,637,878 |
def get_posts(session, client_id, now=None):
"""Returns all posts."""
now = _utcnow(now)
try:
results = _get_post_query(session, client_id)\
.order_by(MappedPost.created_datetime.desc())
posts = tuple(_make_post(*result) for result in results)
return PaginatedSequence(posts)
except sa.exc.IntegrityError:
session.rollback()
raise db_util.DbException._chain() | 8cf5eb1ef9ec84a8d98cd8cc285ade7725f0dc5a | 3,637,879 |
def tessellate_cell(csn, children, acells, position, parent, cell_params):
"""
Tessellate a cell.
:param int csn: Cell number.
:param ndarray children: Array specifying children of each cell.
:param ndarray acells: Array specifying the adjacent cells of each cell.
:param ndarray position: Array specifying the position of each cell.
:param ndarray parent: Array specifying the parent of each cell.
:param ndarray cell_params: Array specifying the corner parameters of each
cell.
:return: The number of triangles and the parameter values for each of the
three triangle vertices (ntri, triangles). The *triangles* list will
be *ntri* x 3, where each row is a triangle and contains a list of
three tuples containing two parameter values on the original surface.
These parameter values represent a vertex of the triangle. The order of
the parameter values should result in a normal vector oriented the same
as the original surface.
:rtype: tuple
Reference: Anderson, J., Khamayseh, A., and Jean, B. "Adaptive Resolution
Refinement," Technical Report, Los Alamos National Laboratory.
"""
# Determine number of interior points and parameters on each edge.
nprms = []
edge_prms = []
edge_order = [1, 3, 4, 2]
simple = False
for i in edge_order:
adj_cells = _find_neighbors(csn, i, children, acells, position, parent)
prms = _find_edge_params(i, adj_cells, cell_params)
nprms.append(len(prms))
edge_prms.append(prms)
if len(prms) > 1:
simple = True
# Use simple triangulation if any edge has more than one interior point.
if simple:
# Make a single list of parameters in counter-clockwise order.
# Edge 1
all_params = [cell_params[csn, 0]]
for uv in edge_prms[0]:
all_params.append(uv)
all_params.append(cell_params[csn, 1])
# Edge 3
for uv in edge_prms[1]:
all_params.append(uv)
all_params.append(cell_params[csn, 2])
# Edge 4
for uv in edge_prms[2]:
all_params.append(uv)
all_params.append(cell_params[csn, 3])
# Edge 2
for uv in edge_prms[3]:
all_params.append(uv)
all_params.append(cell_params[csn, 0])
# Middle parameter.
uv0 = cell_params[csn, 0]
uv1 = cell_params[csn, 2]
uvc = 0.5 * (uv0 + uv1)
# Generate triangles
triangles = []
for i in range(len(all_params) - 1):
uv0 = all_params[i]
uv1 = all_params[i + 1]
triangles.append([uv0, uv1, uvc])
return len(triangles), triangles
# Use predefined triangles.
# Determine triangulation case by the number of interior points on each
# edge.
triangles = []
triapp = triangles.append
cprms = cell_params[csn, :]
eprms = [row[0] for row in edge_prms if len(row) > 0]
case = nprms[0] + nprms[1] * 10 + nprms[2] * 100 + nprms[3] * 1000
# Case 0
if case == 0:
triapp([cprms[0], cprms[1], cprms[2]])
triapp([cprms[2], cprms[3], cprms[0]])
return len(triangles), triangles
# Case 1
if case == 1:
triapp([cprms[0], eprms[0], cprms[3]])
triapp([eprms[0], cprms[1], cprms[2]])
triapp([cprms[2], cprms[3], eprms[0]])
return len(triangles), triangles
# Case 2
if case == 10:
triapp([cprms[0], cprms[1], eprms[0]])
triapp([eprms[0], cprms[2], cprms[3]])
triapp([cprms[3], cprms[0], eprms[0]])
return len(triangles), triangles
# Case 3
if case == 11:
triapp([cprms[0], eprms[0], cprms[3]])
triapp([eprms[0], cprms[1], eprms[1]])
triapp([eprms[1], cprms[2], cprms[3]])
triapp([cprms[3], eprms[0], eprms[1]])
return len(triangles), triangles
# Case 4
if case == 100:
triapp([cprms[0], cprms[1], eprms[0]])
triapp([cprms[1], cprms[2], eprms[0]])
triapp([eprms[0], cprms[3], cprms[0]])
return len(triangles), triangles
# Case 5
if case == 101:
triapp([cprms[0], eprms[0], cprms[3]])
triapp([eprms[0], eprms[1], cprms[3]])
triapp([eprms[0], cprms[1], eprms[1]])
triapp([cprms[1], cprms[2], eprms[1]])
return len(triangles), triangles
# Case 6
if case == 110:
triapp([cprms[0], cprms[1], eprms[0]])
triapp([eprms[0], cprms[2], eprms[1]])
triapp([eprms[1], cprms[0], eprms[0]])
triapp([eprms[1], cprms[3], cprms[0]])
return len(triangles), triangles
# Case 7
if case == 111:
triapp([cprms[0], eprms[0], eprms[2]])
triapp([eprms[0], cprms[1], eprms[1]])
triapp([eprms[1], cprms[2], eprms[2]])
triapp([eprms[2], eprms[0], eprms[1]])
triapp([eprms[2], cprms[3], cprms[0]])
return len(triangles), triangles
# Case 8
if case == 1000:
triapp([cprms[0], cprms[1], eprms[0]])
triapp([cprms[1], cprms[2], eprms[0]])
triapp([cprms[2], cprms[3], eprms[0]])
return len(triangles), triangles
# Case 9
if case == 1001:
triapp([cprms[0], eprms[0], eprms[1]])
triapp([eprms[0], cprms[1], cprms[2]])
triapp([cprms[2], eprms[1], eprms[0]])
triapp([cprms[2], cprms[3], eprms[1]])
return len(triangles), triangles
# Case 10
if case == 1010:
triapp([cprms[0], cprms[1], eprms[0]])
triapp([eprms[0], cprms[2], eprms[1]])
triapp([cprms[2], cprms[3], eprms[1]])
triapp([eprms[1], cprms[0], eprms[0]])
return len(triangles), triangles
# Case 11
if case == 1011:
triapp([cprms[0], eprms[0], eprms[2]])
triapp([eprms[0], eprms[1], eprms[2]])
triapp([eprms[0], cprms[1], eprms[1]])
triapp([eprms[1], cprms[2], eprms[2]])
triapp([cprms[2], cprms[3], eprms[2]])
return len(triangles), triangles
# Case 12
if case == 1100:
triapp([cprms[0], cprms[1], eprms[1]])
triapp([cprms[1], cprms[2], eprms[0]])
triapp([eprms[0], eprms[1], cprms[1]])
triapp([eprms[0], cprms[3], eprms[1]])
return len(triangles), triangles
# Case 13
if case == 1101:
triapp([cprms[0], eprms[0], eprms[2]])
triapp([eprms[0], cprms[1], cprms[2]])
triapp([cprms[2], eprms[1], eprms[0]])
triapp([eprms[1], eprms[2], eprms[0]])
triapp([eprms[1], cprms[3], eprms[2]])
return len(triangles), triangles
# Case 14
if case == 1110:
triapp([cprms[0], cprms[1], eprms[0]])
triapp([eprms[0], eprms[2], cprms[0]])
triapp([eprms[0], cprms[2], eprms[1]])
triapp([eprms[1], eprms[2], eprms[0]])
triapp([eprms[1], cprms[3], eprms[2]])
return len(triangles), triangles
# Case 15
if case == 1111:
triapp([cprms[0], eprms[0], eprms[3]])
triapp([eprms[0], eprms[1], eprms[3]])
triapp([eprms[0], cprms[1], eprms[1]])
triapp([eprms[1], cprms[2], eprms[2]])
triapp([eprms[2], eprms[3], eprms[1]])
triapp([eprms[2], cprms[3], eprms[3]])
return len(triangles), triangles
# Return empty list.
return 0, [] | 0c9993f49a147488488c7131d772c1996bc12d0f | 3,637,880 |
import torch
def add_eig_vec(g, pos_enc_dim):
"""
Graph positional encoding v/ Laplacian eigenvectors
This func is for eigvec visualization, same code as positional_encoding() func,
but stores value in a diff key 'eigvec'
"""
# Laplacian
A = g.adjacency_matrix_scipy(return_edge_ids=False).astype(float)
N = sp.diags(dgl.backend.asnumpy(g.in_degrees()).clip(1) ** -0.5, dtype=float)
L = sp.eye(g.number_of_nodes()) - N * A * N
# Eigenvectors with numpy
EigVal, EigVec = np.linalg.eig(L.toarray())
idx = EigVal.argsort() # increasing order
EigVal, EigVec = EigVal[idx], np.real(EigVec[:, idx])
g.ndata["eigvec"] = torch.from_numpy(EigVec[:, 1 : pos_enc_dim + 1]).float()
# zero padding to the end if n < pos_enc_dim
n = g.number_of_nodes()
if n <= pos_enc_dim:
g.ndata["eigvec"] = F.pad(
g.ndata["eigvec"], (0, pos_enc_dim - n + 1), value=float("0")
)
return g | a7487f048dfd14cc4d9e04e8a754327dd9c8b19a | 3,637,881 |
import numpy as np
from scipy import ndimage
from skimage.morphology import ball
def _advanced_clip(
data, p_min=35, p_max=99.98, nonnegative=True, dtype="int16", invert=False
):
"""
Remove outliers at both ends of the intensity distribution and fit into a given dtype.
This interface tries to emulate ANTs workflows' massaging that truncate images into
the 0-255 range, and applies percentiles for clipping images.
For image registration, normalizing the intensity into a compact range (e.g., uint8)
is generally advised.
To more robustly determine the clipping thresholds, spikes are removed from data with
a median filter.
Once the thresholds are calculated, the denoised data are thrown away and the thresholds
are applied on the original image.
"""
# Calculate stats on denoised version, to preempt outliers from biasing
denoised = ndimage.median_filter(data, footprint=ball(3))
a_min = np.percentile(denoised[denoised > 0] if nonnegative else denoised, p_min)
a_max = np.percentile(denoised[denoised > 0] if nonnegative else denoised, p_max)
# Clip and cast
data = np.clip(data, a_min=a_min, a_max=a_max)
data -= data.min()
data /= data.max()
if invert:
data = 1.0 - data
if dtype in ("uint8", "int16"):
data = np.round(255 * data).astype(dtype)
return data | 9444db42b146798900fde89d8436b742ba9082a6 | 3,637,882 |
def allocate_buffers(engine):
"""
Allocates all buffers required for the specified engine
"""
inputs = []
outputs = []
bindings = []
# Iterate over binding names in engine
for binding in engine:
# Get binding (tensor/buffer) size
size = trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_size
# Get binding (tensor/buffer) data type (numpy-equivalent)
dtype = trt.nptype(engine.get_binding_dtype(binding))
# Allocate page-locked memory (i.e., pinned memory) buffers
host_mem = cuda.pagelocked_empty(size, dtype)
# Allocate linear piece of device memory
device_mem = cuda.mem_alloc(host_mem.nbytes)
# Append the device buffer to device bindings
bindings.append(int(device_mem))
# Append to inputs/ouputs list
if engine.binding_is_input(binding):
inputs.append(HostDeviceMem(host_mem, device_mem))
else:
outputs.append(HostDeviceMem(host_mem, device_mem))
# Create a stream (to eventually copy inputs/outputs and run inference)
stream = cuda.Stream()
return inputs, outputs, bindings, stream | b7f28c256a1ec169392a4cfb27347ae742c922bb | 3,637,883 |
def D_to_M(D, ecc):
"""Mean anomaly from eccentric anomaly.
Parameters
----------
D : float
Parabolic eccentric anomaly (rad).
ecc : float
Eccentricity.
Returns
-------
M : float
Mean anomaly (rad).
"""
with u.set_enabled_equivalencies(u.dimensionless_angles()):
M = _kepler_equation_parabolic(D, 0.0 * u.rad, ecc)
return M | 2f6b6ac3c3a0d02456f0e9b03dd6a183583a8bb4 | 3,637,884 |
def dict_merge(a, b):
"""Merge a and b.
Parameters
----------
a
One dictionary that will be merged
b
Other dictionary that will be merged
"""
return _merge(dict(a), b) | 2209659fafb6c1d7d8877bfe923ca98516d255bc | 3,637,885 |
import copy
def merge_dictionary(src: dict, dest: dict) -> dict:
"""
Merge two dictionaries.
:param src: A dictionary with the values to merge.
:param dest: A dictionary where to merge the values.
"""
for name, value in src.items():
if name not in dest:
# When field is not available in destination add the value from the source
if isinstance(value, dict):
# A new dictionary is created to avoid keeping references
dest[name] = copy.deepcopy(value)
elif isinstance(value, list):
# A new list is created to avoid keeping references
dest[name] = copy.deepcopy(value)
else:
dest[name] = value
elif isinstance(value, dict):
# When field exists in destination and is dict merge the source value
merge_dictionary(value, dest[name])
elif isinstance(value, list) and isinstance(dest[name], list):
# When both values are a list merge them
dest[name].extend(copy.deepcopy(value))
return dest | 12305510a9a2d50bcdc691cb7fe8d5a573621e69 | 3,637,886 |
def create_from_source(wp_config, source: Location):
"""
Using a Location object and the WP config, generates the appropriate LuhSql
object
"""
if isinstance(source, SshLocation):
ssh_user = source.user
ssh_host = source.host
elif isinstance(source, LocalLocation):
ssh_user = None
ssh_host = None
else:
raise LuhError(f"Unknown source type: {source.__class__.__name__}")
return LuhSql(
host=wp_config["db_host"],
user=wp_config["db_user"],
password=wp_config["db_password"],
db_name=wp_config["db_name"],
ssh_user=ssh_user,
ssh_host=ssh_host,
) | 3854d70889a1fdc0517f2557431887ca560acb14 | 3,637,887 |
def eye(N, M=None, k=0, dtype=DEFAULT_FLOAT_DTYPE):
"""
Returns a 2-D tensor with ones on the diagnoal and zeros elsewhere.
Args:
N (int): Number of rows in the output, must be larger than 0.
M (int, optional): Number of columns in the output. If None, defaults to N,
if defined, must be larger than 0. Deault is None.
k (int, optional): Index of the diagonal: 0 (the default) refers to the main
diagonal, a positive value refers to an upper diagonal, and a negative value
to a lower diagonal. Default is 0.
dtype (Union[mstype.dtype, str], optional): Designated tensor dtype, can
be in format of np.float32, or `float32`. Default is mstype.float32.
Returns:
result (Tensor): A tensor of shape (N,M). A tensor where all elements
are equal to zero, except for the k-th diagonal, whose values are equal to one.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore.numpy as np
>>> print(np.eye(2, 2))
[[1. 0.]
[0. 1.]]
"""
dtype = _check_dtype(dtype)
make_eye = P.Eye()
if M is None:
M = N
M = int(M)
N = int(N)
k = int(k)
out = None
if k != 0 or N == 0 or M == 0:
# Fall back to original numpy creation method
out = onp.eye(N, M, k)
else:
out = make_eye(N, M, dtype)
return asarray(out, dtype=dtype) | 952da74fbedfaa433244a65cff463ccf0b389cf1 | 3,637,888 |
import os
def create_inception_graph():
"""
从被保存的GraphDef文件创建一个graph
:return: 受inception 训练过的图,同时保存了几个tensor
"""
with tf.Session() as sess:
model_filename = os.path.join(model_dir, 'def.pb')
if not os.path.exists(model_filename):
model_filename = os.path.join(model_dir, 'classify_image_graph_def.pb')
with gfile.FastGFile(model_filename, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
bottleneck_tensor, jpeg_data_tensor, resized_input_tensor = (
tf.import_graph_def(graph_def, name='', return_elements=[
BOTTLENECK_TENSOR_NAME, JPEG_DATA_TENSOR_NAME,
RESIZED_INPUT_TENSOR_NAME]))
return sess.graph, bottleneck_tensor, jpeg_data_tensor, resized_input_tensor | 1b11127b0d1916cda0ccca5f869dba9178d7374b | 3,637,889 |
import requests
def team_game_log(request, team_id, season):
"""Individual team season game log page.
"""
response = requests.get(f'http://{request.get_host()}/api/teams/{team_id}/{season}/Regular')
return render(request, 'main/team_games.html', context=response.json()) | 1e4c59febb2d5d5f2c3496242c8de8068c4bb329 | 3,637,890 |
def infer_labels(fn_pickle,testdata, fout_pickle, weak_lower,weak_upper):
#def infer_labels(fn_pickle,testdata, fout_pickle, weak_lower=0.935,weak_upper=0.98):
"""
- this is the linear case of getting labels for new spectra
best log g = weak_lower = 0.95, weak_upper = 0.98
best teff = weak_lower = 0.95, weak_upper = 0.99
best_feh = weak_lower = 0.935, weak_upper = 0.98
this returns the parameters for a field of data - and normalises if it is not already normalised
this is slow because it reads a pickle file
"""
file_in = open(fn_pickle, 'r')
dataall, metaall, labels, offsets, coeffs, covs, scatters,chis,chisqs = pickle.load(file_in)
file_in.close()
nstars = (testdata.shape)[1]
nlabels = len(labels)
Params_all = np.zeros((nstars, nlabels))
MCM_rotate_all = np.zeros((nstars, nlabels, nlabels))
for jj in range(0,nstars):
if np.any(testdata[:,jj,0] != dataall[:, 0, 0]):
print testdata[range(5),jj,0], dataall[range(5),0,0]
assert False
xdata = testdata[:,jj,0]
ydata = testdata[:,jj,1]
ysigma = testdata[:,jj,2]
ydata_norm = ydata - coeffs[:,0] # subtract the mean
cut_to = shape(metaall)[1]*-1.
coeffs_slice = coeffs[:,cut_to:]
#ind1 = np.logical_and(logical_and(dataall[:,jj,0] > 16200., dataall[:,jj,0] < 16500.), np.logical_and(ydata > weak_lower , ydata < weak_upper))
ind1 = np.logical_and(ydata > weak_lower , ydata < weak_upper)
Cinv = 1. / (ysigma ** 2 + scatters ** 2)
MCM_rotate = np.dot(coeffs_slice[ind1].T, Cinv[:,None][ind1] * coeffs_slice[ind1])
MCy_vals = np.dot(coeffs_slice[ind1].T, Cinv[ind1] * ydata_norm[ind1])
Params = np.linalg.solve(MCM_rotate, MCy_vals)
Params = Params + offsets
print Params
Params_all[jj,:] = Params
MCM_rotate_all[jj,:,:] = MCM_rotate
file_in = open(fout_pickle, 'w')
pickle.dump((Params_all, MCM_rotate_all), file_in)
file_in.close()
return Params_all , MCM_rotate_all | 8321f8cdc8c7cfb97106a3eba8c5846a108adcb5 | 3,637,891 |
import argparse
from pathlib import Path
def main():
"""Console script for vaqc."""
parser = argparse.ArgumentParser()
parser.add_argument('derivatives_dir',
type=Path,
action='store',
help='the root folder of a BIDS derivative dataset '
'(sub-XXXXX folders '
'should be found at the top level in this folder).')
args = parser.parse_args()
print("using input directory as ", args.derivatives_dir)
if args.derivatives_dir.name == 'fmriprep':
process_fmriprep(args.derivatives_dir)
return 0 | 661e9ea78b1ba0fc507c0bddc6cc0f7a30a05225 | 3,637,892 |
import pylab as pl
def figure(*args, grid=True, style='default', figsize=(9, 5), **kwargs):
"""
Returns a matplotlib axis object.
"""
available = [s for s in pl.style.available + ['default'] if not s.startswith('_')]
if style not in available:
raise ValueError(f'\n\n Valid Styles are {available}')
pl.style.use(style)
pl.figure(*args, figsize=figsize, **kwargs)
pl.grid(grid)
ax = pl.gca()
return ax | 58a50dfda449518c1fed06a380ec0dac82eb1943 | 3,637,893 |
def boundcond(stato):
"""This function applies the boundary conditions that one chooses to adopt.
The boundaries can be reflective, periodic or constant.
It takes as input the state to be evolved. """
if bc=='const':
status=''.join(('.',stato,'.')) #constant boundaries
elif bc=='refl':
status=''.join((stato[0],stato,stato[n-1]))#reflective boundaries
elif bc=='period':
status=''.join((stato[n-1],stato,stato[0]))#periodic boundaries
else:
print('Invalid BCs')
return status | 826ea52b1b2bbda01b88f03ce546757225a3bec8 | 3,637,894 |
import logging
def general_string_parser(content_string, location):
"""
Parse the given string of endpoint/method/header/body content
* search for all parameters in this string
** all params are replaced with a starting and ending symbol of non priority tag
* evaluate what type of parameter it is:
** enumerate type
** global variable type
** local variable type
* add these parameteres to a 'globe.all_parameters' (to indicate which global variable names were used within this application run)
Return a tuple:
* modified string -> every parameter is replaced with a non priority start and end tag
** before: https://mydomain/addUser/<:user:>/<>/<1,2,3,4,5>/<:used:>
** after: https://mydomain/addUser/<>/<>/<>/<>
# before: {'Content-type': '<123,456>', '<>': '<:var:>'}
# after: {'Content-type': '<>', '<>': '<>'}
* list of parameters found:
[
{'location': $location, 'type': 'global_variable', 'name': 'user', 'id': 0}
{'location': $location, 'type': 'global_variable', 'name': 'used', 'id': 1}
{'location': $location, 'type': 'local_variable', 'id': 2}
{'location': $location, 'type': 'enumerate', 'content': 'ABC', 'id': 3}
{'location': $location, 'type': 'enumerate', 'content': '1,2,3,4,5', 'id': 4}
]
"""
logging.debug('Calling the general_string_parser function with a following parameters: [{}, {}]'.format(content_string, location))
"""
Get the format of tags
* 2 types of tags in each part
** enumerate and local_variable (default <>)
** global_variable (default <::>)
* By default, the enumerate tag is substring of global variable tag
** the priority tag is the one which should be searched in string first
** by default, priority tag is global_variable
"""
# enumerate tags
enum_start_tag = getattr(globe.config, location).enum.start
enum_end_tag = getattr(globe.config, location).enum.end
# global varaible tags
variable_start_tag = getattr(globe.config, location).variable.start
variable_end_tag = getattr(globe.config, location).variable.end
# priority tags
prio_start_tag = getattr(globe.config, location).priority_start
prio_end_tag = getattr(globe.config, location).priority_end
# non priority tags
non_prio_start = getattr(globe.config, location).non_priority_start
non_prio_end = getattr(globe.config, location).non_priority_end
# list of all parameters in given string
parameters = []
"""
Search for all parameters in endpoint/method/header/body
variables:
* e_idx = index of location, where the enumerate start tag was found
* v_idx = index of location, where the global variable start tag was found
* position = indicates the location of pointer in string (to avoid searching tags which were already been found)
* globe.param_id_counter = current parameter id (each parameter has a different ID - in context of whole app run)
used functions:
* find_between() = to get the content between the starting and ending tag
"""
position = 0 # possitional index in the string
# the first search of enumerate and variable starting tags in string
e_idx = content_string.find(enum_start_tag)
v_idx = content_string.find(variable_start_tag)
while e_idx != -1 or v_idx != -1: # end this loop when no more starting tags were found
"""
If the ENUM and VARIABLE indexes were found in the same index
-> it means one of them is substring of another one
The one which is not a substring has always the priority
* default example: https://<:variable:>
-> both '<' and '<:' starts at the same position
-> the '<:' is more important
"""
if e_idx == v_idx:
if prio_start_tag == enum_start_tag:
"""
ENUM tag is the priority one
* Get the content of current parameter
* Modify a tagged string
* Count the position for next search
* find_between(string, start_tag, end_tag, start_replacement, end_replacement) returns tuple:
* string cut
* before: https://<:variable:>/the/rest/of/a/url
* after: <>/the/rest/of/a/url
* content between the tags
* 'variable'
* position in url
* 10
"""
resulted_tuple = find_between(content_string[position:], enum_start_tag, enum_end_tag, non_prio_start, non_prio_end)
original_string = content_string
content_string = content_string[:position] + resulted_tuple[0]
content = resulted_tuple[1]
position = len(original_string[:position]) + resulted_tuple[2]
"""
Add the information about this parameter to a resulted array of parameters
"""
# check if the content is empty string
if len(content) == 0:
# -> it is a local variable
p = {"location": location, "type": "local_variable", "id": globe.param_id_counter}
else:
# -> it is a enumerated type
p = {"location": location, "type": "enumerate", "content": content, "id": globe.param_id_counter}
parameters.append(p)
"""
Evaluate which tag was already evaluated
If e_idx or v_idx was evaluated, the next occurence of it has to be searched
In this case the indexes are the same -> have to search new indexes for both
"""
e_change = e_idx
v_change = v_idx
elif prio_start_tag == variable_start_tag:
"""
VARIABLE tag is the priority one
* Get the content of current parameter
* Modify a tagged string
* Count the position for next search
"""
resulted_tuple = find_between(content_string[position:], variable_start_tag, variable_end_tag, non_prio_start, non_prio_end)
original_string = content_string
content_string = content_string[:position] + resulted_tuple[0]
variable = resulted_tuple[1]
position = len(original_string[:position]) + resulted_tuple[2]
"""
Add the information about this parameter to a resulted array of parameters
"""
p = {"location": location, "type": "global_variable", "name": variable, "id": globe.param_id_counter}
parameters.append(p)
"""
Evaluate which tag was already evaluated
If e_idx or v_idx was evaluated, the next occurence have to be searched
In this case the indexes are the same -> have to search new idx for both
"""
v_change = v_idx
e_change = e_idx
else:
message = "Should have never gotten here"
raise EndpointSemanticError(__name__, "general_string_parser", message)
else:
if e_idx < v_idx:
"""
ENUM is found before VARIABLE
* unless the e_idx is not -1 (no more tag was found)
-> e_idx should be evaluated before v_idx
-> v_idx will stay the same, only the new e_idx will be search at the end
"""
if e_idx == -1:
"""
No more ENUM tags were found in string
* Get the content of current parameter
* Modify a tagged string
* Count the position for next search
"""
resulted_tuple = find_between(content_string[position:], variable_start_tag, variable_end_tag, non_prio_start, non_prio_end)
original_string = content_string
content_string = content_string[:position] + resulted_tuple[0]
variable = resulted_tuple[1]
position = len(original_string[:position]) + resulted_tuple[2]
"""
Add the information about this parameter to a resulted array of parameters
"""
p = {"location": location, "type": "global_variable", "name": variable, "id": globe.param_id_counter}
parameters.append(p)
"""
Evaluate which tag was already evaluated
If e_idx or v_idx was evaluated, the next occurence have to be searched
In this case only the new v_idx should be searched
(e_idx will be search as well, but from a v_idx starting point) -> algorithm will find the same one as before
"""
v_change = v_idx
e_change = v_idx
else:
"""
ENUM is found before VARIABLE
* Get the content of current parameter
* Modify a tagged string
* Count the position for next search
"""
resulted_tuple = find_between(content_string[position:], enum_start_tag, enum_end_tag, non_prio_start, non_prio_end)
original_string = content_string
content_string = content_string[:position] + resulted_tuple[0]
content = resulted_tuple[1]
position = len(original_string[:position]) + resulted_tuple[2]
"""
Add the information about this parameter to a resulted array of parameters
"""
# check if the content is empty string
if len(content) == 0:
# -> it is a local variable
p = {"location": location, "type": "local_variable", "id": globe.param_id_counter}
else:
# -> it is a enumerated type
p = {"location": location, "type": "enumerate", "content": content, "id": globe.param_id_counter}
parameters.append(p)
"""
Evaluate which tag was already evaluated
If e_idx or v_idx was evaluated, the next occurence have to be searched
In this case only the new e_idx should be searched
(v_idx will be search as well, but from a e_idx starting point) -> algorithm will find the same one as before
"""
e_change = e_idx
v_change = e_idx
elif v_idx < e_idx:
"""
VARIABLE is found before ENUM
* unless the v_idx is not -1 (no more tag was found)
-> v_idx should be evaluated before e_idx
-> e_idx will stay the same, only the new v_idx will be search at the end
"""
if v_idx == -1:
"""
No more VARIABLE tags were found in string
* Get the content of current parameter
* Modify a tagged string
* Count the position for next search
"""
resulted_tuple = find_between(content_string[position:], enum_start_tag, enum_end_tag, non_prio_start, non_prio_end)
original_string = content_string
content_string = content_string[:position] + resulted_tuple[0]
content = resulted_tuple[1]
position = len(original_string[:position]) + resulted_tuple[2]
"""
Add the information about this parameter to a resulted array of parameters
"""
# check if the content is empty string
if len(content) == 0:
# -> it is a local variable
p = {"location": location, "type": "local_variable", "id": globe.param_id_counter}
else:
# -> it is an enumerated type
p = {"location": location, "type": "enumerate", "content": content, "id": globe.param_id_counter}
parameters.append(p)
"""
Evaluate which tag was already evaluated
If e_idx or v_idx was evaluated, the next occurence have to be searched
In this case only the new e_idx should be searched
(v_idx will be search as well, but from a e_idx starting point) -> algorithm will find the same one as before
"""
e_change = e_idx
v_change = e_idx
else:
"""
VARIABLE is found before ENUM
* Get the content of current parameter
* Modify a tagged string
* Count the position for next search
"""
resulted_tuple = find_between(content_string[position:], variable_start_tag, variable_end_tag, non_prio_start, non_prio_end)
original_string = content_string
content_string = content_string[:position] + resulted_tuple[0]
variable = resulted_tuple[1]
position = len(original_string[:position]) + resulted_tuple[2]
"""
Add the information about this parameter to a resulted array of parameters
"""
p = {"location": location, "type": "global_variable", "name": variable, "id": globe.param_id_counter}
parameters.append(p)
"""
Evaluate which tag was already evaluated
If e_idx or v_idx was evaluated, the next occurence have to be searched
In this case only the new v_idx should be searched
(e_idx will be search as well, but from a v_idx starting point) -> algorithm will find the same one as before
"""
v_change = v_idx
e_change = v_idx
else:
message = "Should have never gotten here"
raise EndpointSemanticError(__name__, "general_string_parser", message)
"""
WHILE EVALUATION
Find the first occurence of ENUM start tag or VARIABLE start tag
in the next iteration of while, the already evaluated part of content_string is ignored
(e_change+1 and v_change+1 means it starts to search from this index)
if nothing is found -> -1 is returned
"""
e_idx = content_string.find(enum_start_tag, e_change+1)
v_idx = content_string.find(variable_start_tag, v_change+1)
globe.param_id_counter += 1
return content_string,parameters | aef44e4494d2db948a91b63740e6112afbcf7831 | 3,637,895 |
def get_compare_collection(name, csv_line):
"""get compare collection data"""
session = tables.get_session()
if session is None:
return {'isExist': False}
response = {}
try:
collection_table = CollectionTable()
cid = collection_table.get_field_by_key(CollectionTable.collection_id,
CollectionTable.collection_name, name, session)
cip = collection_table.get_field_by_key(CollectionTable.collection_ip,
CollectionTable.collection_name, name, session)
get_collection_data_dirs(cip, cid, csv_line, response, session)
if csv_line < response['nextCsv']:
response['hasNext'] = True
else:
response['hasNext'] = False
except SQLAlchemyError as err:
LOGGER.error('Get compare collection data failed: %s', err)
return {'isExist': False}
finally:
session.close()
response['isExist'] = True
return response | 4312336786de0cd2107e4d662d5527bed37e89b9 | 3,637,896 |
from qutepart.indenter.base import IndentAlgNormal as indenterClass
from qutepart.indenter.base import IndentAlgBase as indenterClass
from qutepart.indenter.base import IndentAlgNormal as indenterClass
from qutepart.indenter.cstyle import IndentAlgCStyle as indenterClass
from qutepart.indenter.python import IndentAlgPython as indenterClass
from qutepart.indenter.ruby import IndentAlgRuby as indenterClass
from qutepart.indenter.xmlindent import IndentAlgXml as indenterClass
from qutepart.indenter.haskell import IndenterHaskell as indenterClass
from qutepart.indenter.lilypond import IndenterLilypond as indenterClass
from qutepart.indenter.lisp import IndentAlgLisp as indenterClass
from qutepart.indenter.scheme import IndentAlgScheme as indenterClass
def _getSmartIndenter(indenterName, qpart, indenter):
"""Get indenter by name.
Available indenters are none, normal, cstyle, haskell, lilypond, lisp, python, ruby, xml
Indenter name is not case sensitive
Raise KeyError if not found
indentText is indentation, which shall be used. i.e. '\t' for tabs, ' ' for 4 space symbols
"""
indenterName = indenterName.lower()
if indenterName in ('haskell', 'lilypond'): # not supported yet
logger.warning('Smart indentation for %s not supported yet. But you could be a hero who implemented it' % indenterName)
elif 'none' == indenterName:
elif 'normal' == indenterName:
elif 'cstyle' == indenterName:
elif 'python' == indenterName:
elif 'ruby' == indenterName:
elif 'xml' == indenterName:
elif 'haskell' == indenterName:
elif 'lilypond' == indenterName:
elif 'lisp' == indenterName:
elif 'scheme' == indenterName:
else:
raise KeyError("Indenter %s not found" % indenterName)
return indenterClass(qpart, indenter) | 3d6f905b66fa7808ad6863e891c30b0d0fb02e7f | 3,637,897 |
import json
def scheming_multiple_choice_output(value):
"""
return stored json as a proper list
"""
if isinstance(value, list):
return value
try:
return json.loads(value)
except ValueError:
return [value] | d45bbb1af249d0fed00892ccc55cf8f28f7f099f | 3,637,898 |
def logmap(x, x0):
"""
This functions maps a point lying on the manifold into the tangent space of a second point of the manifold.
Parameters
----------
:param x: point on the manifold
:param x0: basis point of the tangent space where x will be mapped
Returns
-------
:return: vector in the tangent space of x0
"""
if np.ndim(x0) < 2:
x0 = x0[:, None]
if np.ndim(x) < 2:
x = x[:, None]
theta = np.arccos(np.dot(x0.T, x))
u = (x - x0 * np.cos(theta)) * theta/np.sin(theta)
u[:, theta[0] < 1e-16] = np.zeros((u.shape[0], 1))
return u | be18b7a78f13f7159572429cf77fbc763747076b | 3,637,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.