content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def test_edit_format(sp, tempfile, setup_edit_patches, cleandir, fake_db, funk_dict):
"""Tests that the edit command reformats command strings when needed."""
edited_cmd_string = 'EDITED CMD STRING'
setup_edit_patches(sp, tempfile, edited_cmd_string)
some_funk = list(funk_dict.keys())[0]
cmd = commands.Edit([some_funk])
cmd()
loaded_funks = load_funks()
assert loaded_funks[some_funk] == '{} "$@"'.format(edited_cmd_string)
| 17,800
|
def test_alert_get_by_id_command(mocker, grafana_client):
"""
Given:
- All relevant arguments for the command that is executed
When:
- alert-get-by-id command is executed
Then:
- The http request is called with the right arguments
"""
http_request = mocker.patch.object(grafana_client, '_http_request')
args = {'alert_id': "4"}
alert_get_by_id_command(grafana_client, args)
http_request.assert_called_with('GET', 'api/alerts/4', headers=None)
| 17,801
|
def log(message, cmap="INFO", type=None, verbosity_check=False, **kwargs):
"""Utility function to log a `message` to stdout
Args:
message (typing.Any): an object that supports `__str__()`
cmap (str, optional): what colormap to use. "INFO" corresponds to blue,
"WARN" Defaults to "INFO".
type (str, optional): Type of message, for user knowledge. If provided, will be used as the
tag for this output (e.g. "info"). If no value is provided, the same string as `cmap` is
used as the tag. Defaults to None.
verbosity_check (bool, optional): Whether to check for a "VERBOSE" environment flag before
outputting. If false, always output text regardless of verbosity setting. Defaults to False.
"""
if verbosity_check and not verbose():
return
_message = str(message)
class T:
HEADER = "\033[95m"
OKBLUE = "\033[94m"
OKCYAN = "\033[96m"
OKGREEN = "\033[92m"
WARNING = "\033[93m"
FAIL = "\033[91m"
ENDC = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
if cmap == "INFO":
c = T.OKBLUE
elif cmap == "WARN":
c = T.BOLD + T.WARNING
elif cmap == "ANNOUNCE":
c = T.BOLD + T.OKGREEN
elif cmap == "ERR":
c = "\n" + T.BOLD + T.FAIL
else:
c = T.OKBLUE
timestamp = f"{time() - START_TIME():.2f}s"
lines = textwrap.wrap(
_message + T.ENDC,
width=shutil.get_terminal_size((120, 24))[0] - 1,
initial_indent=c + "%" * 3 + f" [{type or cmap.lower()} @ {timestamp}] ",
subsequent_indent=". " * 6 + "",
)
tqdm.write("\n".join(lines), file=stderr)
# print(*lines, sep='\n', file=stderr)
| 17,802
|
def get_psi_part(v, q):
"""Return the harmonic oscillator wavefunction for level v on grid q."""
Hr = make_Hr(v + 1)
return N(v) * Hr[v](q) * np.exp(-q * q / 2.0)
| 17,803
|
def get_tempdir() -> str:
"""Get the directory where temporary files are stored."""
return next((os.environ[var] for var in (
'XDG_RUNTIME_DIR', 'TMPDIR', 'TMP', 'TEMP'
) if var in os.environ), '/tmp')
| 17,804
|
def get_pij(d, scale, i, optim = "fast"):
"""
Compute probabilities conditioned on point i from a row of distances
d and a Gaussian scale (scale = 2*sigma^2). Vectorized and unvectorized
versions available.
"""
if optim == "none":
#
# TO BE DONE
#
return get_pij(d, scale, i, optim = "fast")
else:
d_scaled = -d/scale
d_scaled -= np.max(d_scaled)
exp_D = np.exp(d_scaled)
exp_D[i] = 0
return exp_D/np.sum(exp_D)
| 17,805
|
def print_stack_trace():
"""Print the current stack trace"""
for line in traceback.format_stack():
print(line.strip())
| 17,806
|
def makePlot(counts, files):
"""
It takes the list with files names and list with alle the counts.
Then it loops through the count list and adds these to a bar object.
The height calculates the bottom of the next bar so it gets stacked on
top of each other. outside the loop the plt.bar object gets finished
with labels for the x & y-axsis and the plot is shown in a sepparate window.
"""
r = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
barWidth = 0.75
for i in range(0,len(counts)):
height = 0
for j in range(len(counts[i])):
plt.bar(r[i], counts[i][j], bottom = height ,color=LS_COLOR[j],
edgecolor='#000000', width=barWidth, label=LS_CHECK[j])
height = np.add(height, counts[i][j])
plt.xticks(r, files)
plt.xlabel("Sample", fontweight='bold', fontsize='large')
plt.ylabel("Count", fontweight='bold', fontsize='large')
plt.legend()
plt.yticks(np.arange(0, 2000, 50))
plt.show()
| 17,807
|
def hash_file(path):
"""
Returns the hash of a file.
Based on https://stackoverflow.com/questions/22058048/hashing-a-file-in-python
"""
# Return error as hash, if file does not exist
if not os.path.exists(path):
return f"error hashing file, file does not exist: {path}"
# BUF_SIZE is totally arbitrary, change for your app!
BUF_SIZE = 65536 # lets read stuff in 64kb chunks!
# Use sha1
sha1 = hashlib.sha1()
# Read and hash file (with buffering)
with open(path, "rb") as f:
while True:
data = f.read(BUF_SIZE)
if not data:
break
sha1.update(data)
# Return hash
return sha1.hexdigest()
| 17,808
|
def get_section_names(target_dir, dir_name, ext="wav"):
"""
Get section name (almost equivalent to machine ID).
target_dir : str
base directory path
dir_name : str
sub directory name
ext : str (default="wav)
file extension of audio files
return :
section_names : list [ str ]
list of section names extracted from the names of audio files
"""
# create test files
query = os.path.abspath(
"{target_dir}/{dir_name}/*.{ext}".format(
target_dir=target_dir, dir_name=dir_name, ext=ext
)
)
file_paths = sorted(glob.glob(query))
# extract section names
section_names = sorted(
list(
set(
itertools.chain.from_iterable(
[re.findall("section_[0-9][0-9]", ext_id) for ext_id in file_paths]
)
)
)
)
return section_names
| 17,809
|
def get_user_input(prompt: str, current_setting: str):
"""
Get user input
:param prompt: prompt to display
:param current_setting: current value
:return:
"""
if current_setting != '':
print(f'-- Current setting: {current_setting}')
use_current = '/return to use current'
else:
use_current = ''
user_ip = ''
while user_ip == '':
user_ip = input(f'{prompt} [q to quit{use_current}]: ')
if user_ip.lower() == 'q':
break
if user_ip == '' and current_setting != '':
user_ip = current_setting
return user_ip
| 17,810
|
def about():
"""Provide a simple description of the package."""
msg ='''
# ===== nbev3devsim, version: {__version__} =====
The `nbev3devsim` package loads a simple 2D robot simulator based on ev3devsim into a Jupyter notebook widget.
You can test that key required packages are installed by running the command: nbev3devsim.test_install()
'''
print(msg)
| 17,811
|
def storeCalibrationParams(summary, xyzOff, xyzSlope, xyzSlopeT):
"""Store calibration parameters to output summary dictionary
:param dict summary: Output dictionary containing all summary metrics
:param list(float) xyzOff: intercept [x, y, z]
:param list(float) xyzSlope: slope [x, y, z]
:param list(float) xyzSlopeT: temperature slope [x, y, z]
:return: Calibration summary values written to dict <summary>
:rtype: void
"""
# store output to summary dictionary
summary['calibration-xOffset(g)'] = accUtils.formatNum(xyzOff[0], 4)
summary['calibration-yOffset(g)'] = accUtils.formatNum(xyzOff[1], 4)
summary['calibration-zOffset(g)'] = accUtils.formatNum(xyzOff[2], 4)
summary['calibration-xSlope'] = accUtils.formatNum(xyzSlope[0], 4)
summary['calibration-ySlope'] = accUtils.formatNum(xyzSlope[1], 4)
summary['calibration-zSlope'] = accUtils.formatNum(xyzSlope[2], 4)
summary['calibration-xSlopeTemp'] = accUtils.formatNum(xyzSlopeT[0], 4)
summary['calibration-ySlopeTemp'] = accUtils.formatNum(xyzSlopeT[1], 4)
summary['calibration-zSlopeTemp'] = accUtils.formatNum(xyzSlopeT[2], 4)
| 17,812
|
def class_is_u16_len(cls):
"""
Return True if cls_name is an object which uses initial uint16 length
"""
ofclass = loxi_globals.unified.class_by_name(cls)
if not ofclass:
return False
if len(ofclass.members) < 1:
return False
m = ofclass.members[0]
if not isinstance(m, ir.OFLengthMember):
return False
if m.oftype != "uint16_t":
return False
return True
| 17,813
|
def uploadfile(ticket_id):
"""
Anexa um arquivo ao ticket.
"""
if "file" not in request.files:
return "arquivo inválido"
filename = request.files.get("file").filename
maxfilesize = int(cfg("attachments", "max-size"))
blob = b""
filesize = 0
while True:
chunk = request.files.get("file").file.read(4096)
if not chunk:
break
chunksize = len(chunk)
if filesize + chunksize > maxfilesize:
return "erro: arquivo maior do que máximo permitido"
filesize += chunksize
blob += chunk
log.debug(type(blob))
blob = zlib.compress(blob)
username = current_user()
with db_trans() as c:
c.execute(
"""
insert into files (
ticket_id,
name,
user,
size,
contents
)
values (
:ticket_id,
:filename,
:username,
:filesize,
:blob
)
""",
{
"ticket_id": ticket_id,
"filename": filename,
"username": username,
"filesize": filesize,
"blob": blob,
},
)
c.execute(
"""
update tickets
set datemodified = datetime('now', 'localtime')
where id = :ticket_id
""",
{"ticket_id": ticket_id},
)
return redirect("/ticket/%s" % ticket_id)
| 17,814
|
def urlretrieve(url, path):
"""
Same as 'urllib.urlretrieve()', but with a nice reporthook to show
a progress bar.
If 'path' exists, doesn't download anything.
Args:
url (str): the url to retrieve
path (str): the path where to save the result of the url
"""
if os.path.exists(path):
print("Skipping: " + url)
else:
print("Downloading: " + url)
urllib.request.urlretrieve(url, path, reporthook)
| 17,815
|
def split(data, train_ids, test_ids, valid_ids=None):
"""Split data into train, test (and validation) subsets."""
datasets = {
"train": (
tuple(map(lambda x: x[train_ids], data[0])),
data[1][train_ids],
),
"test": (tuple(map(lambda x: x[test_ids], data[0])), data[1][test_ids]),
}
if valid_ids is not None:
datasets["valid"] = (
tuple(map(lambda x: x[valid_ids], data[0])),
data[1][valid_ids],
)
else:
datasets["valid"] = None
return datasets
| 17,816
|
def parse_value_file(path):
"""return param: [(value type, value)]"""
data = {}
samples = [x.strip("\n").split("\t") for x in open(path)]
for row in samples:
parameter = row[0]
values = [x for x in row[1:] if x != SKIP_VAL]
if values != []:
if parameter not in data:
data[parameter] = []
data[parameter] += values
return data
| 17,817
|
def fizzbuzz(num):
"""
>>> fizzbuzz(15)
FizzBuzz
1
2
Fizz
4
Buzz
Fizz
7
8
Fizz
Buzz
11
Fizz
13
14
FizzBuzz
"""
for num in range(num + 1):
if num % 3 == 0 and num % 5 == 0:
print('FizzBuzz')
elif num % 3 == 0:
print('Fizz')
elif num % 5 == 0:
print('Buzz')
else:
print(num)
| 17,818
|
def WriteGroupedImages(info, group_name, images, blacklist = []):
""" Write a group of partition images to the OTA package,
and add the corresponding flash instructions to the recovery
script. Skip any images that do not have a corresponding
entry in recovery.fstab."""
for i in images:
if i.name not in blacklist:
WritePartitionImage(info, i, group_name)
| 17,819
|
def get_stop_words(stop_text, filename='ChineseStopWords.txt'):
"""读取指定停用词文件"""
_fp = os.path.join(stopwords_path, filename)
with open(_fp, 'r', encoding='utf-8') as f:
lines = f.readlines()
stop_words = [word.strip() for word in lines]
if stop_text:
input_stop_words = stop_text.strip().split('\n')
if input_stop_words:
stop_words.extend(input_stop_words)
return stop_words
| 17,820
|
def _is_comments_box(shape):
""" Checks if this shape represents a Comments question; RECTANGLE with a green outline """
if shape.get('shapeType') != 'RECTANGLE':
return False
color = get_dict_nested_value(shape, 'shapeProperties', 'outline', 'outlineFill', 'solidFill', 'color', 'rgbColor')
return 'blue' not in color and 'red' not in color and 'green' in color and color.get('green') == 1
| 17,821
|
def read_tree(path):
"""Returns a dict with {filepath: content}."""
if not os.path.isdir(path):
return None
out = {}
for root, _, filenames in os.walk(path):
for filename in filenames:
p = os.path.join(root, filename)
with open(p, 'rb') as f:
out[os.path.relpath(p, path)] = f.read()
return out
| 17,822
|
def test_solving_data_route_has_all_algorithms_at_each_complexity(testapp):
"""Test solving data route has all algorithms at each complexity."""
response = testapp.get("/api/data/solve")
assert len(response.json) == 32
assert 'tree' in response.json['0']
assert 'greedy' in response.json['0']
assert 'a_star' in response.json['0']
| 17,823
|
def get_config(cfg, name):
"""Given the argument name, read the value from the config file.
The name can be multi-level, like 'optimizer.lr'
"""
name = name.split('.')
suffix = ''
for item in name:
assert item in cfg, f'attribute {item} not cfg{suffix}'
cfg = cfg[item]
suffix += f'.{item}'
return cfg
| 17,824
|
def get_current_frame_content_entire_size(driver):
# type: (AnyWebDriver) -> ViewPort
"""
:return: The size of the entire content.
"""
try:
width, height = driver.execute_script(_JS_GET_CONTENT_ENTIRE_SIZE)
except WebDriverException:
raise EyesError('Failed to extract entire size!')
return dict(width=width, height=height)
| 17,825
|
def conv_slim_capsule(input_tensor,
input_dim,
output_dim,
layer_name,
input_atoms=8,
output_atoms=8,
stride=2,
kernel_size=5,
padding='SAME',
**routing_args):
"""Builds a slim convolutional capsule layer.
This layer performs 2D convolution given 5D input tensor of shape
`[batch, input_dim, input_atoms, input_height, input_width]`. Then refines
the votes with routing and applies Squash non linearity for each capsule.
Each capsule in this layer is a convolutional unit and shares its kernel over
the position grid and different capsules of layer below. Therefore, number
of trainable variables in this layer is:
kernel: [kernel_size, kernel_size, input_atoms, output_dim * output_atoms]
bias: [output_dim, output_atoms]
Output of a conv2d layer is a single capsule with channel number of atoms.
Therefore conv_slim_capsule is suitable to be added on top of a conv2d layer
with num_routing=1, input_dim=1 and input_atoms=conv_channels.
Args:
input_tensor: tensor, of rank 5. Last two dimmensions representing height
and width position grid.
input_dim: scalar, number of capsules in the layer below.
output_dim: scalar, number of capsules in this layer.
layer_name: string, Name of this layer.
input_atoms: scalar, number of units in each capsule of input layer.
output_atoms: scalar, number of units in each capsule of output layer.
stride: scalar, stride of the convolutional kernel.
kernel_size: scalar, convolutional kernels are [kernel_size, kernel_size].
padding: 'SAME' or 'VALID', padding mechanism for convolutional kernels.
**routing_args: dictionary {leaky, num_routing}, args to be passed to the
update_routing function.
Returns:
Tensor of activations for this layer of shape
`[batch, output_dim, output_atoms, out_height, out_width]`. If padding is
'SAME', out_height = in_height and out_width = in_width. Otherwise, height
and width is adjusted with same rules as 'VALID' in tf.nn.conv2d.
"""
with tf.variable_scope(layer_name):
# convolution. return [batch_size, 1, 32, 8, 6, 6]
kernel = variables.weight_variable(shape=[
kernel_size, kernel_size, input_atoms, output_dim * output_atoms
])
biases = variables.bias_variable([output_dim, output_atoms, 1, 1])
votes, votes_shape, input_shape = _depthwise_conv3d(
input_tensor, kernel, input_dim, output_dim, input_atoms, output_atoms,
stride, padding)
# convolution End
with tf.name_scope('routing'):
logit_shape = tf.stack([
input_shape[0], input_dim, output_dim, votes_shape[2], votes_shape[3]
])
biases_replicated = tf.tile(biases,
[1, 1, votes_shape[2], votes_shape[3]])
activations = _update_routing(
votes=votes,
biases=biases_replicated,
logit_shape=logit_shape,
num_dims=6,
input_dim=input_dim,
output_dim=output_dim,
**routing_args)
return activations
| 17,826
|
def main():
"""Main"""
argument_spec = vmware_argument_spec()
argument_spec.update(
database=dict(
type='dict',
options=dict(
max_connections=dict(type='int', default=50),
task_cleanup=dict(type='bool', default=True),
task_retention=dict(type='int', default=30),
event_cleanup=dict(type='bool', default=True),
event_retention=dict(type='int', default=30),
),
default=dict(
max_connections=50,
task_cleanup=True,
task_retention=30,
event_cleanup=True,
event_retention=30,
),
),
runtime_settings=dict(
type='dict',
options=dict(
unique_id=dict(type='int'),
managed_address=dict(type='str'),
vcenter_server_name=dict(type='str'),
),
),
user_directory=dict(
type='dict',
options=dict(
timeout=dict(type='int', default=60),
query_limit=dict(type='bool', default=True),
query_limit_size=dict(type='int', default=5000),
validation=dict(type='bool', default=True),
validation_period=dict(type='int', default=1440),
),
default=dict(
timeout=60,
query_limit=True,
query_limit_size=5000,
validation=True,
validation_period=1440,
),
),
mail=dict(
type='dict',
options=dict(
server=dict(type='str'),
sender=dict(type='str'),
),
default=dict(
server='',
sender='',
),
),
snmp_receivers=dict(
type='dict',
options=dict(
snmp_receiver_1_url=dict(type='str', default='localhost'),
snmp_receiver_1_enabled=dict(type='bool', default=True),
snmp_receiver_1_port=dict(type='int', default=162),
snmp_receiver_1_community=dict(type='str', default='public'),
snmp_receiver_2_url=dict(type='str', default=''),
snmp_receiver_2_enabled=dict(type='bool', default=False),
snmp_receiver_2_port=dict(type='int', default=162),
snmp_receiver_2_community=dict(type='str', default=''),
snmp_receiver_3_url=dict(type='str', default=''),
snmp_receiver_3_enabled=dict(type='bool', default=False),
snmp_receiver_3_port=dict(type='int', default=162),
snmp_receiver_3_community=dict(type='str', default=''),
snmp_receiver_4_url=dict(type='str', default=''),
snmp_receiver_4_enabled=dict(type='bool', default=False),
snmp_receiver_4_port=dict(type='int', default=162),
snmp_receiver_4_community=dict(type='str', default=''),
),
default=dict(
snmp_receiver_1_url='localhost',
snmp_receiver_1_enabled=True,
snmp_receiver_1_port=162,
snmp_receiver_1_community='public',
snmp_receiver_2_url='',
snmp_receiver_2_enabled=False,
snmp_receiver_2_port=162,
snmp_receiver_2_community='',
snmp_receiver_3_url='',
snmp_receiver_3_enabled=False,
snmp_receiver_3_port=162,
snmp_receiver_3_community='',
snmp_receiver_4_url='',
snmp_receiver_4_enabled=False,
snmp_receiver_4_port=162,
snmp_receiver_4_community='',
),
),
timeout_settings=dict(
type='dict',
options=dict(
normal_operations=dict(type='int', default=30),
long_operations=dict(type='int', default=120),
),
default=dict(
normal_operations=30,
long_operations=120,
),
),
logging_options=dict(default='info', choices=['none', 'error', 'warning', 'info', 'verbose', 'trivia']),
advanced_settings=dict(type='dict', default=dict(), required=False),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
host_snmp = VmwareVcenterSettings(module)
host_snmp.ensure()
| 17,827
|
def p_op_mean0_update(prev_p_op_mean0: float, p_op_var0: float, op_choice: int):
"""0-ToM updates mean choice probability estimate"""
# Input variable transforms
p_op_var0 = np.exp(p_op_var0)
# Update
new_p_op_mean0 = prev_p_op_mean0 + p_op_var0 * (
op_choice - inv_logit(prev_p_op_mean0)
)
# For numerical purposes, according to the VBA package
new_p_op_mean0 = logit(inv_logit(new_p_op_mean0))
return new_p_op_mean0
| 17,828
|
def register_deployable_on_tier(ts, deployable, attributes):
"""
Deployable registration callback.
'deployable' is from table 'deployables'.
"""
# Add a route to this deployable.
pk = {'tier_name': deployable['tier_name'], 'deployable_name': deployable['deployable_name']}
row = ts.get_table('routing').get(pk)
if row is None:
row = ts.get_table('routing').add(pk)
# Apply optional attributes wholesale.
row.update(attributes)
| 17,829
|
def get_last_code(entry_point_name):
"""Return a `Code` node of the latest code executable of the given entry_point_name in the database.
The database will be queried for the existence of a inpgen node.
If this is not exists and NotExistent error is raised.
:param entry_point_name: string
:return: the uuid of a inpgen `Code` node
:raise: aiida.common.exceptions.NotExistent
"""
from aiida.orm import QueryBuilder, Code
from aiida.common.exceptions import NotExistent
filters = {'attributes.input_plugin': {'==': entry_point_name}}
builder = QueryBuilder().append(Code, filters=filters)
builder.order_by({Code: {'ctime': 'asc'}})
results = builder.first()
if not results:
raise NotExistent(f'ERROR: Could not find any Code in the database with entry point: {entry_point_name}!')
else:
inpgen = results[0]
return inpgen.uuid
| 17,830
|
def main():
"""Primary S3 upload function."""
conn = boto.connect_s3()
bucket = conn.get_bucket(bucket_name)
# walk the source directory. Ignore .git and any file in IGNORE
namelist = []
for root, dirs, files in os.walk(src_folder):
dirs[:] = [d for d in dirs if d not in IGNORE_FOLDERS]
for f in files:
if f not in IGNORE_FILES:
path = os.path.relpath(root, src_folder)
namelist.append(os.path.normpath(os.path.join(path, f)))
for name in namelist:
key = bucket.get_key(name)
if(key):
full_path = src_folder + "/" + name
full_path = os.path.abspath(full_path)
remote = (iso8601_to_timestamp(key.last_modified) - server_offset)
if (os.stat(full_path).st_mtime > remote):
stage_file = 'Updating File:'
else:
stage_file = False
else:
stage_file = "Creating File:"
if stage_file:
content = open(os.path.join(src_folder, name))
key = bucket.new_key(os.path.join(prefix, name))
type, encoding = mimetypes.guess_type(name)
type = type or 'application/octet-stream'
headers = {'Content-Type': type, 'x-amz-acl': 'public-read'}
states = [type]
# We only use HTTP 1.1 headers because they are
# relative to the time of download
# instead of being hardcoded.
headers['Cache-Control'] = 'max-age=%d' % (3600 * 24 * 365)
if type in COMPRESSIBLE:
headers['Content-Encoding'] = 'gzip'
compressed = BytesIO()
gz = gzip.GzipFile(filename=name, fileobj=compressed, mode='w')
gz.writelines(content)
gz.close()
content.close
content = BytesIO(compressed.getvalue())
states.append('gzipped')
states = ', '.join(states)
print('{} > {} ({})'.format(stage_file, key.name, states))
key.set_contents_from_file(content, headers)
content.close()
else:
print('Not updated: {}'.format(name))
| 17,831
|
def save_birthday_to_dynamodb(fields: dict) -> None:
"""
Saves birthday to dynamodb, with proper time and an auto generated unsubscribe key.
:param fields: Fields to be saved in dynamodb.
:return: None.
"""
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table('BirthdaysTable')
table.put_item(Item=fields)
| 17,832
|
def digamma(x):
"""Digamma function.
Parameters
----------
x : array-like
Points on the real line
out : ndarray, optional
Output array for the values of `digamma` at `x`
Returns
-------
ndarray
Values of `digamma` at `x`
"""
return _digamma(x)
| 17,833
|
def get_time_index_dataset_from_file(the_file: h5py.File) -> h5py.Dataset:
"""Return the dataset for time indices from the H5 file object."""
return the_file[TIME_INDICES]
| 17,834
|
def get_database_connection():
"""Возвращает соединение с базой данных Redis, либо создаёт новый, если он ещё не создан."""
global _database
if _database is None:
database_password = os.getenv("DB_PASSWORD", default=None)
database_host = os.getenv("DB_HOST", default='localhost')
database_port = os.getenv("DB_PORT", default=6379)
_database = Client(
host=database_host,
port=database_port,
password=database_password,
decode_responses=True
)
return _database
| 17,835
|
def main():
"""
Create the hdf5 file + datasets, iterate thriough the folders DICOM imgs
Normalize the imgs, create mini patches and write them to the hdf5 file system
"""
with h5py.File(PREPROCESSED_PATH + str(PATCH_DIM) + 'x' + str(PATCH_DIM) + 'x' + str(NUM_SLICES) + '-patch.hdf5', 'w') as HDF5:
# Datasets for 3d patch tensors & class_id/x,y,z coords
total_patch_dim = PATCH_DIM * PATCH_DIM * NUM_SLICES
patch_dset = HDF5.create_dataset('input', (1,total_patch_dim), maxshape=(None,total_patch_dim)) #patches = inputs
class_dset = HDF5.create_dataset('output', (1,1), maxshape=(None,1), dtype=int) #classes = outputs
notrain_dset = HDF5.create_dataset('notrain', (1,1), maxshape=(None,1), dtype=int) # test holdout
centroid_dset = HDF5.create_dataset('centroid', (1,3), maxshape=(None,3), dtype=float)
uuid_dset = HDF5.create_dataset('uuid', (1,1), maxshape=(None,None), dtype=h5py.special_dtype(vlen=bytes))
subset_dset = HDF5.create_dataset('subsets', (1,1), maxshape=(None,1), dtype=int)
HDF5['input'].attrs['lshape'] = (PATCH_DIM, PATCH_DIM, NUM_SLICES, CHANNELS) # (Height, Width, Depth)
print("Successfully initiated the HDF5 file. Ready to recieve data!")
#### ---- Iterating through a CT scan ---- ####
counter = 0
scan_number = 1
first_patch = True # flag for saving first img to hdf5
for img_file, subset_id in tqdm(zip(FILE_LIST,SUBSET_LIST)):
print("Processing CT Scan: {}".format(scan_number))
base=os.path.basename(img_file) # Strip the filename out
seriesuid = os.path.splitext(base)[0] # Get the filename without the extension
mini_df = DF_NODE[DF_NODE["seriesuid"] == seriesuid]
#### ---- Downsampling Class 0s ---- ####
mini_df = downsample_class_0(mini_df)
# Load the CT scan (3D .mhd file)
# Numpy is z,y,x and SimpleITK is x,y,z -- (note the ordering of dimesions)
itk_img = sitk.ReadImage(img_file)
# Normalize the image spacing so that a voxel is 1x1x1 mm in dimension
itk_img = normalize_img(itk_img)
# SimpleITK keeps the origin and spacing information for the 3D image volume
img_array = sitk.GetArrayFromImage(itk_img) # indices are z,y,x (note the ordering of dimesions)
img_array = np.pad(img_array, int(PATCH_DIM), mode="constant", constant_values=-2000)#, constant_values=0) #0 padding 3d array for patch clipping issue
slice_z, height, width = img_array.shape
origin = np.array(itk_img.GetOrigin()) # x,y,z Origin in world coordinates (mm) - Not same as img_array
spacing = np.array(itk_img.GetSpacing()) # spacing of voxels in world coordinates (mm)
scan_number += 1
#### ---- Iterating through a CT scan's slices ---- ####
for candidate_idx, cur_row in mini_df.iterrows(): # Iterate through all candidates (in dataframe)
# This is the real world x,y,z coordinates of possible nodule (in mm)
class_id = cur_row["class"] #0 for false, 1 for true nodule
no_train = cur_row["no_train"]
candidate_x = cur_row["coordX"] + PATCH_DIM
candidate_y = cur_row["coordY"] + PATCH_DIM
candidate_z = cur_row["coordZ"] + PATCH_DIM
center = np.array([candidate_x, candidate_y, candidate_z]) # candidate center
voxel_center = np.rint(np.abs(center / spacing - origin)).astype(int) # candidate center in voxels
#### ---- Generating the 2d/2.5d/3d Patch ---- ####
bbox = make_bbox(voxel_center, width, height, slice_z, origin, class_id) #return bounding box
patch = img_array[
bbox[2][0]:bbox[2][1],
bbox[0][0]:bbox[0][1],
bbox[1][0]:bbox[1][1]]
# DEBUG print(patch.shape) #uncomment to debug shape size being written
#### ---- Prepare Data for HDF5 insert ---- ####
patch = patch.ravel().reshape(1,-1) #flatten img to (1 x N)
if patch.shape[1] != total_patch_dim: # Catch any class 0 bbox issues and pass them
counter += 1
continue
#minor fix to subtract the PATCH_DIM from each centroid when saving to HDF5 to match candidates_V2.csv
centroid_data = np.array([candidate_x - PATCH_DIM,candidate_y - PATCH_DIM,candidate_z - PATCH_DIM]).ravel().reshape(1,-1)
seriesuid_str = np.string_(seriesuid) #set seriesuid str to numpy.bytes_ type
#### ---- Write Data to HDF5 insert ---- ####
hdf5_dsets = [patch_dset, class_dset, notrain_dset, uuid_dset, subset_dset, centroid_dset]
hdf5_data = [patch, class_id, no_train, seriesuid_str, subset_id, centroid_data]
for dset_and_data in zip(hdf5_dsets,hdf5_data):
if first_patch == True:
write_to_hdf5(dset_and_data,first_patch=True)
else:
write_to_hdf5(dset_and_data)
first_patch = False
print("Did not write: " + str(counter) + " patches to HDF5")
print("All {} CT Scans Processed and Individual Patches written to HDF5!".format(scan_number))
print('\a')
| 17,836
|
def is_feature_enabled(feature_name):
"""A short-form method for server-side usage. This method evaluates and
returns the values of the feature flag, using context from the server only.
Args:
feature_name: str. The name of the feature flag that needs to
be evaluated.
Returns:
bool. The value of the feature flag, True if it's enabled.
"""
return _evaluate_feature_flag_value_for_server(feature_name)
| 17,837
|
def str_to_size(size_str):
"""
Receives a human size (i.e. 10GB) and converts to an integer size in
mebibytes.
Args:
size_str (str): human size to be converted to integer
Returns:
int: formatted size in mebibytes
Raises:
ValueError: in case size provided in invalid
"""
if size_str is None:
return None
# no unit: assume mebibytes as default and convert directly
if size_str.isnumeric():
return int(size_str)
size_str = size_str.upper()
# check if size is non-negative number
if size_str.startswith('-'):
raise ValueError(
'Invalid size format: {}'.format(size_str)) from None
# decimal units are converted to bytes and then to mebibytes
dec_units = ('KB', 'MB', 'GB', 'TB')
for index, unit in enumerate(dec_units):
# unit used is different: try next
if not size_str.endswith(unit):
continue
try:
size_int = int(size_str[:-2]) * pow(1000, index+1)
except ValueError:
raise ValueError(
'Invalid size format: {}'.format(size_str)) from None
# result is returned in mebibytes
return int(size_int / pow(1024, 2))
# binary units are just divided/multipled by powers of 2
bin_units = ('KIB', 'MIB', 'GIB', 'TIB')
for index, unit in enumerate(bin_units):
# unit used is different: try next
if not size_str.endswith(unit):
continue
try:
size_int = int(int(size_str[:-3]) * pow(1024, index-1))
except ValueError:
raise ValueError(
'Invalid size format: {}'.format(size_str)) from None
return size_int
raise ValueError(
'Invalid size format: {}'.format(size_str)) from None
| 17,838
|
def point_line_distance(point, line):
"""Distance between a point and great circle arc on a sphere."""
start, end = line
if start == end:
dist = great_circle_distance(point, start, r=1)/np.pi*180
else:
dist = cross_track_distance(point, line, r=1)
dist = abs(dist/np.pi*180)
return dist
| 17,839
|
def experiment(save_key, model, data_splits_file, batch_size, active_str, muxrate):
"""
This should be common code for all experiments
"""
exper_dir = config.exper_output
(save_path, _, plot_save_path,
model_scores_path, _, _
) = utils_train.get_paths(exper_dir, save_key)
model_save_path = '/scratch/hc2945/data/models/'
if not os.path.exists(model_save_path):
os.mkdir(model_save_path)
model_save_path = os.path.join(model_save_path, "{}.pkl".format(save_key))
'''
# create data splits file if it doesnt exist
if not os.path.exists(
os.path.join(exper_dir, 'data_splits.json')):
create_data_splits(path_to_metadata_file='./mtracks_info.json', exper_dir=exper_dir)
'''
model, history, dat = train(model, model_save_path, data_splits_file,
batch_size, active_str, muxrate)
run_evaluation(exper_dir, save_key, history, dat, model)
print("Done! Results saved to {}".format(save_path))
| 17,840
|
def uniq_by(array, iteratee=None):
"""This method is like :func:`uniq` except that it accepts iteratee which
is invoked for each element in array to generate the criterion by which
uniqueness is computed. The order of result values is determined by the
order they occur in the array. The iteratee is invoked with one argument:
``(value)``.
Args:
array (list): List to process.
iteratee (mixed, optional): Function to transform the elements of the
arrays. Defaults to :func:`.identity`.
Returns:
list: Unique list.
Example:
>>> uniq_by([1, 2, 3, 1, 2, 3], lambda val: val % 2)
[1, 2]
.. versionadded:: 4.0.0
"""
return list(iterunique(array, iteratee=iteratee))
| 17,841
|
def process_lvq_pak(dataset_name='lvq-pak', kind='all', numeric_labels=True, metadata=None):
"""
kind: {'test', 'train', 'all'}, default 'all'
numeric_labels: boolean (default: True)
if set, target is a vector of integers, and label_map is created in the metadata
to reflect the mapping to the string targets
"""
untar_dir = interim_data_path / dataset_name
unpack_dir = untar_dir / 'lvq_pak-3.1'
if kind == 'train':
data, target = read_space_delimited(unpack_dir / 'ex1.dat', skiprows=[0,1])
elif kind == 'test':
data, target = read_space_delimited(unpack_dir / 'ex2.dat', skiprows=[0])
elif kind == 'all':
data1, target1 = read_space_delimited(unpack_dir / 'ex1.dat', skiprows=[0,1])
data2, target2 = read_space_delimited(unpack_dir / 'ex2.dat', skiprows=[0])
data = np.vstack((data1, data2))
target = np.append(target1, target2)
else:
raise Exception(f'Unknown kind: {kind}')
if numeric_labels:
if metadata is None:
metadata = {}
mapped_target, label_map = normalize_labels(target)
metadata['label_map'] = label_map
target = mapped_target
dset_opts = {
'dataset_name': dataset_name,
'data': data,
'target': target,
'metadata': metadata
}
return dset_opts
| 17,842
|
def StrToPtkns(path_string):
""" The inverse of PtknsToStr(), this function splits a string like
'/usr/local/../bin/awk' into ['usr','local','..','bin','awk'].
For illustrative purposes only. Use text.split('/') directly instead."""
return orig_text.split('/')
| 17,843
|
def _check_symmgroup(autom, symmgroup):
"""Asserts that symmgroup consists of automorphisms listed in autom and has no duplicate elements."""
for el in symmgroup.to_array():
assert group.Permutation(el) in autom.elems
assert symmgroup == symmgroup.remove_duplicates()
assert isinstance(symmgroup[0], group.Identity)
| 17,844
|
def load_key(file, callback=util.passphrase_callback):
# type: (AnyStr, Callable) -> EC
"""
Factory function that instantiates a EC object.
:param file: Names the filename that contains the PEM representation
of the EC key pair.
:param callback: Python callback object that will be invoked
if the EC key pair is passphrase-protected.
"""
with BIO.openfile(file) as bio:
return load_key_bio(bio, callback)
| 17,845
|
def glob_path_match(path: str, pattern_list: list) -> bool:
"""
Checks if path is in a list of glob style wildcard paths
:param path: path of file / directory
:param pattern_list: list of wildcard patterns to check for
:return: Boolean
"""
return any(fnmatch(path, pattern) for pattern in pattern_list)
| 17,846
|
def sample_df(df, col_name='family', n_sample_per_class=120, replace = False):
"""
samples the dataframe based on a column, duplicates only if the
number of initial rows < required sample size
"""
samples = df.groupby(col_name)
list_cls = df[col_name].unique()
df_lst = []
for cls in list_cls:
cls_df = samples.get_group(cls)
if (cls_df.shape[0] < n_sample_per_class) and (replace==False):
cls_sample = cls_df
else:
cls_sample = cls_df.sample(n=n_sample_per_class,replace=replace,random_state=42)
df_lst.append(cls_sample)
df_sampled = pd.concat(df_lst, sort=False)
df_sampled = shuffle(df_sampled)
return df_sampled
| 17,847
|
def inventory_user_policies_header(encode):
"""generate output header"""
if encode == 'on':
return misc.format_line((
base64.b64encode(str("Account")),
base64.b64encode(str("UserName")),
base64.b64encode(str("PolicyName")),
base64.b64encode(str("Policy"))
))
else:
return misc.format_line((
str("Account"),
str("UserName"),
str("PolicyName"),
str("Policy")
))
| 17,848
|
def you_rock(N, R, d):
"""
N: int, number of samples, e.g., 1000.
R: int, maximum feature value, e.g., 100.
d: int, number of features, e.g., 3.
"""
numpy.random.seed() # re-random the seed
hits = 0
for _ in range(N):
X = numpy.random.randint(1, R, (8, d)) # generate training samples
y = numpy.array([+1, +1, +1, +1, -1, -1, -1, -1])
_, feature_id, bts = grid_search_split_midpoint(X, y)
clf = sklearn.tree.DecisionTreeClassifier(max_depth=1)
clf = clf.fit(X, y)
if clf.tree_.feature[0] == feature_id and clf.tree_.threshold[0] == bts:
hits += 1
print("your Decision tree is {:2.2%} consistent with Scikit-learn's result.".format(hits / N))
| 17,849
|
def fields_dict(cls):
"""
Return an ordered dictionary of ``attrs`` attributes for a class, whose
keys are the attribute names.
:param type cls: Class to introspect.
:raise TypeError: If *cls* is not a class.
:raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
class.
:rtype: an ordered dict where keys are attribute names and values are
`attr.Attribute`\\ s. This will be a `dict` if it's
naturally ordered like on Python 3.6+ or an
:class:`~collections.OrderedDict` otherwise.
.. versionadded:: 18.1.0
"""
if not isclass(cls):
raise TypeError("Passed object must be a class.")
attrs = getattr(cls, "__attrs_attrs__", None)
if attrs is None:
raise NotAnAttrsClassError(
"{cls!r} is not an attrs-decorated class.".format(cls=cls)
)
return ordered_dict(((a.name, a) for a in attrs))
| 17,850
|
def cdlxsidegap3methods(
client,
symbol,
timeframe="6m",
opencol="open",
highcol="high",
lowcol="low",
closecol="close",
):
"""This will return a dataframe of upside/downside gap three methods for the given symbol across
the given timeframe
Args:
client (pyEX.Client): Client
symbol (string): Ticker
timeframe (string): timeframe to use, for pyEX.chart
opencol (string): column to use to calculate
highcol (string): column to use to calculate
lowcol (string): column to use to calculate
closecol (string): column to use to calculate
Returns:
DataFrame: result
"""
df = client.chartDF(symbol, timeframe)
val = t.CDLXSIDEGAP3METHODS(
df[opencol].values.astype(float),
df[highcol].values.astype(float),
df[lowcol].values.astype(float),
df[closecol].values.astype(float),
)
return pd.DataFrame(
{
opencol: df[opencol].values,
highcol: df[highcol].values,
lowcol: df[lowcol].values,
closecol: df[closecol].values,
"cdlxsidegap3methods": val,
}
)
| 17,851
|
def add_execution_path(new_path):
"""
Add a path to sys.path
With a given path
- verify if path already in sys.path
- if not add it
With a given list of paths
- do the same for each path
:param path: a path or a list of paths to add
:type path: str or unicode or list
:return: Nothing
:rtype: None or ValueError if path is neither str nor unicode
"""
if isinstance(new_path, str) or isinstance(new_path, unicode):
new_path = [new_path]
if isinstance(new_path, list):
for p in new_path:
if DEBUG:
print("Trying to add {} to execution path".format(p))
if not p in sys.path:
sys.path.append(p)
if DEBUG:
print("Path {} added !".format(p))
else:
if DEBUG:
print("Path {} already in execution path ! Nothing done...".format(p))
else:
raise ValueError("'new_path' parameter : {}, is neither str nor list !".format(new_path))
| 17,852
|
def process_triggers_task(**kwargs):
"""Task form - wraps call to testable function `fire_trigger_events` """
# Include within function as not all applications include the blueprint
from portal.trigger_states.empro_states import fire_trigger_events
fire_trigger_events()
| 17,853
|
def finditer(pattern, string, flags=0):
"""Return an iterator over all non-overlapping matches in the
string. For each match, the iterator returns a match object.
Empty matches are included in the result."""
return _pyre().finditer(pattern, string, flags)
| 17,854
|
def nufft_j(x, y, freq = None, period_max=1., period_min=.5/24, window=False, oversamp=10.):
"""
nufft_j(x, y, period_max=1.,
period_min=.5/24, window=False, oversamp=10.):
Basic STFT algorithm
for evenly sampled data
"""
srt = np.argsort(x)
x = x[srt] # get sorted x, y arrays
y = y[srt]
if freq is None:
# Get a good frequency sampling, based on scargle in IDL
# freq = LombScargle(x,y).autofrequency()
# minimum_frequency=1./period_max,maximum_frequency=1./period_min)
freq = freq_grid(x,fmin=1./period_max,fmax=1./period_min,oversamp=oversamp)
# create array to hold fft results
fft = np.zeros_like(freq)
if window:
np.absolute(nufft.nufft3(x,y/y,freq*np.pi*2),out=fft)
else:
np.absolute(nufft.nufft3(x,y-np.nanmean(y),freq*np.pi*2),out=fft)
return fft,freq
| 17,855
|
def logger(verbosity=levels['error'], log_file=None):
"""Create a logger which streams to the console, and optionally a file."""
# create/get logger for this instance
logger = logging.getLogger(__name__)
logger.setLevel(levels['debug'])
fmt = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
# with stream (console) handle
ch = logging.StreamHandler()
ch.setLevel(verbosity)
ch.setFormatter(fmt)
logger.addHandler(ch)
# optionally with file handle
if log_file:
log_file.setFormatter(fmt)
logger.addHandler(log_file)
return logger
| 17,856
|
def head(input_file, in_format, nrows):
"""
Convert tables between formats, optionally modifying column names in the tables
"""
# Guess format if not specified:
if in_format.upper() == "AUTO":
in_format = utils.guess_format(input_file)
# Read PARQUET:
if in_format.upper() == "PARQUET":
df = pd.read_parquet(input_file)
print(df.head(nrows), file=sys.stdout)
if in_format.upper() == "TSV" or in_format.upper() == "CSV":
df = pd.read_csv(
input_file, sep="\t" if in_format.upper() == "TSV" else ",", nrows=nrows
)
print(df, file=sys.stdout)
elif in_format.upper() == "HDF5":
h = h5py.File(input_file, "r")
dct = {k: h[k][:nrows] for k in h.keys()}
h.close()
df = pd.DataFrame.from_dict(dct)
print(df, file=sys.stdout)
return 0
| 17,857
|
def getNamespacePermissions(paths):
"""Get L{Namespace}s and L{NamespacePermission}s for the specified paths.
@param paths: A sequence of L{Namespace.path}s to get L{Namespace}s and
L{NamespacePermission}s for.
@return: A C{ResultSet} yielding C{(Namespace, NamespacePermission)}
2-tuples for the specified L{Namespace.path}s.
"""
store = getMainStore()
return store.find((Namespace, NamespacePermission),
NamespacePermission.namespaceID == Namespace.id,
Namespace.path.is_in(paths))
| 17,858
|
def serialize_engine(engine, output_path: str):
"""
Serializes engine to store it on disk
:param engine: engine to serialize
:param output_path: path to save the engine
:return: None
"""
assert os.path.exists(output_path)
trt.utils.write_engine_to_file(output_path, engine.serialize())
print("Model serialized at:", output_path)
| 17,859
|
def plot(x, y, ey=[], ex=[], frame=[], kind="scatter", marker_option=".",
ls="-", lw=1, label="", color="royalblue", zorder=1, alpha=1.,
output_folder="", filename=""):
"""
Erstellt einen Plot (plot, scatter oder errorbar).
Parameters
----------
x : array-like
x-Werte
y : array-like
y-Werte
ey : array_like
Fehler auf die y-Werte
ex : array_like
Fehler auf die x-Werte
kind : string
Die Art des plots
Möglich sind "plot" (default), "scatter" und "errorbar".
marker_option : string
Definiert die Option marker bei Plottyp "plot" oder "scatter" sowie
die Option fmt bei Plottyp "errorbar".
ls : string
linestyle
lw : float
linewidth
zorder : int
Die "Ebene" der zu plottenden Daten
return frame
"""
#error arrays
if len(ex)==1:
ex = np.ones(len(x))*ex[0]
elif ex==[]:
ex = np.zeros(len(x))
if len(ey)==1:
ey = np.ones(len(y))*ey[0]
#plotting
fig, plot = plt.subplots(1,1) if frame == [] else frame
if kind=="plot":
plot.plot(x, y, color=color, marker=marker_option, ls=ls, lw=lw, label=label, zorder=zorder, alpha=alpha)
elif kind=="scatter":
plot.scatter(x, y, color=color, marker=marker_option, lw=lw, label=label, zorder=zorder, alpha=alpha)
elif kind=="errorbar":
plot.errorbar(x, y, ey, ex, color=color, fmt=marker_option, ls="", lw=lw, label=label, zorder=zorder, alpha=alpha)
elif kind=="bar":
plot.bar(x, y, color=color, label=label, zorder=zorder, alpha=alpha)
#saving plot
if filename!="":
fig.savefig(output_folder+filename,bbox_inches='tight',pad_inches=pad_inches)
return [fig,plot]
| 17,860
|
def find_nominal_hv(filename, nominal_gain):
"""
Finds nominal HV of a measured PMT dataset
Parameters
----------
filename: string
nominal gain: float
gain for which the nominal HV should be found
Returns
-------
nominal_hv: int
nominal HV
"""
f = h5py.File(filename, "r")
gains = []
hvs = []
keys = f.keys()
for key in keys:
gains.append(f[key]["fit_results"]["gain"][()])
hvs.append(int(key))
f.close()
gains = np.array(gains)
hvs = np.array(hvs)
diff = abs(np.array(gains) - nominal_gain)
nominal_hv = int(hvs[diff == np.min(diff)])
return nominal_hv
| 17,861
|
def parse_match(field, tokens):
"""Parses a match or match_phrase node
:arg field: the field we're querying on
:arg tokens: list of tokens to consume
:returns: list of match clauses
"""
clauses = []
while tokens and tokens[-1] not in (u'OR', u'AND'):
token = tokens.pop()
if token.startswith(u'"'):
clauses.append(build_match_phrase(field, token[1:-1]))
else:
clauses.append(build_match(field, token))
return clauses
| 17,862
|
async def test_nersc_mover_do_work_no_results(config, mocker):
"""Test that _do_work goes on vacation when the LTA DB has no work."""
logger_mock = mocker.MagicMock()
dwc_mock = mocker.patch("lta.nersc_mover.NerscMover._do_work_claim", new_callable=AsyncMock)
dwc_mock.return_value = False
p = NerscMover(config, logger_mock)
await p._do_work()
dwc_mock.assert_called()
| 17,863
|
def get_filesystem(namespace):
"""
Returns a patched pyfilesystem for static module storage based on
`DJFS_SETTINGS`. See `patch_fs` documentation for additional details.
The file system will have two additional properties:
1) get_url: A way to get a URL for a static file download
2) expire: A way to expire files (so they are automatically destroyed)
"""
if DJFS_SETTINGS['type'] == 'osfs':
return get_osfs(namespace)
elif DJFS_SETTINGS['type'] == 's3fs':
return get_s3fs(namespace)
else:
raise AttributeError("Bad filesystem: " + str(DJFS_SETTINGS['type']))
| 17,864
|
def sym_normalize_adj(adj):
"""symmetrically normalize adjacency matrix"""
adj = sp.coo_matrix(adj)
degree = np.array(adj.sum(1)).flatten()
d_inv_sqrt = np.power(np.maximum(degree, np.finfo(float).eps), -0.5)
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
| 17,865
|
def prepare_label(input_batch, new_size):
"""Resize masks and perform one-hot encoding.
Args:
input_batch: input tensor of shape [batch_size H W 1].
new_size: a tensor with new height and width.
Returns:
Outputs a tensor of shape [batch_size h w 21]
with last dimension comprised of 0's and 1's only.
"""
with tf.name_scope('label_encode'):
input_batch = tf.image.resize_nearest_neighbor(input_batch, new_size) # as labels are integer numbers, need to use NN interp.
input_batch = tf.squeeze(input_batch, squeeze_dims=[3]) # reducing the channel dimension.
input_batch = tf.one_hot(input_batch, depth=n_classes)
return input_batch
| 17,866
|
def test_quantity_pickelability():
"""
Testing pickleability of quantity
"""
q1 = np.arange(10) * u.m
q2 = pickle.loads(pickle.dumps(q1))
assert np.all(q1.value == q2.value)
assert q1.unit.is_equivalent(q2.unit)
assert q1.unit == q2.unit
| 17,867
|
def find_index_halfmax(data1d):
"""
Find the two indices at half maximum for a bell-type curve (non-parametric). Uses center of mass calculation.
:param data1d:
:return: xmin, xmax
"""
# normalize data between 0 and 1
data1d = data1d / float(np.max(data1d))
# loop across elements and stops when found 0.5
for i in range(len(data1d)):
if data1d[i] > 0.5:
break
# compute center of mass to get coordinate at 0.5
xmin = i - 1 + (0.5 - data1d[i - 1]) / float(data1d[i] - data1d[i - 1])
# continue for the descending slope
for i in range(i, len(data1d)):
if data1d[i] < 0.5:
break
# compute center of mass to get coordinate at 0.5
xmax = i - 1 + (0.5 - data1d[i - 1]) / float(data1d[i] - data1d[i - 1])
# display
# import matplotlib.pyplot as plt
# plt.figure()
# plt.plot(src1d)
# plt.plot(xmin, 0.5, 'o')
# plt.plot(xmax, 0.5, 'o')
# plt.savefig('./normalize1d.png')
return xmin, xmax
| 17,868
|
def format_autoupdate_jira_msg(
message_body: str, header_body: Optional[str] = None
) -> str:
"""
Format a JIRA message with useful headers.
An "Automated JIRA Update" title will be added,
as well as either a URL link if a ``BUILD_URL`` env variable is present,
or a note indicating a manual run with user id otherwise.
Args:
message_body: the body of the message
header_body: a header to be added with ``h2`` tag
Returns:
a formatted message with headers
"""
message = "h2. {}".format(header_body) if header_body else ""
message += "\n\nAutomated JIRA Update:\n\n{}\n\n{}".format(
_build_source(), message_body
)
return message
| 17,869
|
def unbind_contextvars(*args):
"""
Remove keys from the context-local context.
Use this instead of :func:`~structlog.BoundLogger.unbind` when you want to
remove keys from a global (context-local) context.
.. versionadded:: 20.1.0
"""
ctx = _get_context()
for key in args:
ctx.pop(key, None)
| 17,870
|
def _unescape_token(escaped_token):
"""Inverse of _escape_token().
Args:
escaped_token: a unicode string
Returns:
token: a unicode string
"""
def match(m):
if m.group(1) is None:
return "_" if m.group(0) == "\\u" else "\\"
try:
return chr(int(m.group(1)))
except (ValueError, OverflowError) as _:
return "\u3013" # Unicode for undefined character.
trimmed = escaped_token[:-1] if escaped_token.endswith("_") else escaped_token
return _UNESCAPE_REGEX.sub(match, trimmed)
| 17,871
|
def create_option_learner(action_space: Box) -> _OptionLearnerBase:
"""Create an option learner given its name."""
if CFG.option_learner == "no_learning":
return KnownOptionsOptionLearner()
if CFG.option_learner == "oracle":
return _OracleOptionLearner()
if CFG.option_learner == "direct_bc":
return _DirectBehaviorCloningOptionLearner(action_space)
if CFG.option_learner == "implicit_bc":
return _ImplicitBehaviorCloningOptionLearner(action_space)
if CFG.option_learner == "direct_bc_nonparameterized":
return _DirectBehaviorCloningOptionLearner(action_space,
is_parameterized=False)
raise NotImplementedError(f"Unknown option_learner: {CFG.option_learner}")
| 17,872
|
def close() -> None:
"""
Should be called at the end of the program - however, not required unless Tensorboard is used
"""
global summary_writer
if summary_writer:
summary_writer.close()
| 17,873
|
def parse_foochow_romanized_phrase(phrase, allow_omit_ingbing = True):
"""Parse a dash-separated phrase / word in Foochow Romanized."""
syllables = phrase.strip().split('-')
result = []
for syllable in syllables:
try:
parsed = FoochowRomanizedSyllable.from_string(syllable, allow_omit_ingbing)
result.append(parsed)
except:
raise ValueError("%s is not a valid Foochow Romanized syllable.", syllable)
return result
| 17,874
|
def set_surf_file(filename):
"""
Function prepares h5py file for storing loss function values
:param filename: Filename of a surface file
"""
xmin, xmax, xnum = -1, 2, 20
ymin, ymax, ynum = -1, 2, 20
if filename.exists():
return
with h5py.File(filename, 'a') as fd:
xcoord = np.linspace(xmin, xmax, xnum)
fd["xcoordinates"] = xcoord
ycoord = np.linspace(ymin, ymax, ynum)
fd["ycoordinates"] = ycoord
shape = (len(xcoord), len(ycoord))
losses = -np.ones(shape=shape)
fd["loss"] = losses
| 17,875
|
async def kick(ctx, member : discord.Member):
"""| Kicks a member. Don't try this!"""
try:
await member.kick(reason=None)
await ctx.send("🦵 Get lost, "+member.mention) # Kickee kickee, heheee XD
except:
await ctx.send("""Why should I? 🤷♂️""")
| 17,876
|
def _update(dict_merged: _DepDict, dict_new: _DepDict) -> _DepDict:
"""
Merge a dictionary `dict_new` into `dict_merged` asserting if there are
conflicting (key, value) pair.
"""
for k, v in dict_new.items():
v = dict_new[k]
if k in dict_merged:
if v != dict_merged[k]:
raise ValueError(
"Key '%s' is assigned to different values '%s' and '%s'"
% (k, v, dict_merged[k])
)
else:
dict_merged[k] = v
return dict_merged
| 17,877
|
def date_convert(value):
"""
日期字符串转化为数据库的日期类型
:param value:
:return:
"""
try:
create_date = datetime.strptime(value, '%Y/%m/%d').date()
except Exception as e:
create_date = datetime.now().date()
return create_date
| 17,878
|
def discriminator_txt2img_resnet(input_images, t_txt, is_train=True, reuse=False):
""" 64x64 + (txt) --> real/fake """
# https://github.com/hanzhanggit/StackGAN/blob/master/stageI/model.py
# Discriminator with ResNet : line 197 https://github.com/reedscot/icml2016/blob/master/main_cls.lua
w_init = tf.random_normal_initializer(stddev=0.02)
gamma_init=tf.random_normal_initializer(1., 0.02)
df_dim = 64 # 64 for flower, 196 for MSCOCO
s = 64 # output image size [64]
s2, s4, s8, s16 = int(s / 2), int(s / 4), int(s / 8), int(s / 16)
tl.layers.set_name_reuse(reuse)
net_in = Input(input_images)
net_h0 = Conv2d(df_dim, (4, 4), (2, 2), act=lambda x: tl.act.lrelu(x, 0.2), padding='SAME', W_init=w_init, name='d_h0/conv2d')(net_in)
net_h1 = Conv2d(df_dim * 2, (4, 4), (2, 2), act=None, padding='SAME', W_init=w_init, b_init=None, name='d_h1/conv2d')(net_h0)
net_h1 = BatchNorm(act=lambda x: tl.act.lrelu(x, 0.2), is_train=is_train, gamma_init=gamma_init, name='d_h1/batchnorm')(net_h1)
net_h2 = Conv2d(df_dim * 4, (4, 4), (2, 2), act=None, padding='SAME', W_init=w_init, b_init=None, name='d_h2/conv2d')(net_h1)
net_h2 = BatchNorm(act=lambda x: tl.act.lrelu(x, 0.2), is_train=is_train, gamma_init=gamma_init, name='d_h2/batchnorm')(net_h2)
net_h3 = Conv2d(df_dim * 8, (4, 4), (2, 2), act=None, padding='SAME', W_init=w_init, b_init=None, name='d_h3/conv2d')(net_h2)
net_h3 = BatchNorm(is_train=is_train, gamma_init=gamma_init, name='d_h3/batchnorm')(net_h3)
net = Conv2d(df_dim * 2, (1, 1), (1, 1), act=None, padding='VALID', W_init=w_init, b_init=None, name='d_h4_res/conv2d')(net_h3)
net = BatchNorm(act=lambda x: tl.act.lrelu(x, 0.2), is_train=is_train, gamma_init=gamma_init, name='d_h4_res/batchnorm')(net)
net = Conv2d(df_dim * 2, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init, b_init=None, name='d_h4_res/conv2d2')(net)
net = BatchNorm(act=lambda x: tl.act.lrelu(x, 0.2), is_train=is_train, gamma_init=gamma_init, name='d_h4_res/batchnorm2')(net)
net = Conv2d(df_dim * 8, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init, b_init=None, name='d_h4_res/conv2d3')(net)
net = BatchNorm(is_train=is_train, gamma_init=gamma_init, name='d_h4_res/batchnorm3')(net)
net_h4 = Elementwise(act=lambda x: tl.act.lrelu(x, 0.2), combine_fn=tf.add, name='d_h4/add')([net_h3, net])
# net_h4.outputs = tl.act.lrelu(net_h4.outputs, 0.2)
if t_txt is not None:
net_in2 = Input(t_txt)
#net_txt = Dense(n_units=t_dim, act=lambda x: tl.act.lrelu(x, 0.2), W_init=w_init, name='d_reduce_txt/dense')(net_txt)
net_txt = ExpandDims(1, name='d_txt/expanddim1')(net_in2)
net_txt = ExpandDims(1, name='d_txt/expanddim2')(net_txt)
net_txt = Tile([1, 4, 4, 1], name='d_txt/tile')(net_txt)
net_h4_concat = Concat(concat_dim=3, name='d_h3_concat')([net_h4, net_txt])
# 243 (ndf*8 + 128 or 256) x 4 x 4
net_h4 = Conv2d(df_dim * 8, (1, 1), (1, 1), padding='VALID', W_init=w_init, b_init=None, name='d_h3/conv2d_2')(net_h4_concat)
net_h4 = BatchNorm(act=lambda x: tl.act.lrelu(x, 0.2), is_train=is_train, gamma_init=gamma_init, name='d_h3/batch_norm_2')(net_h4)
net_ho = Conv2d(1, (s16, s16), (s16, s16), act=tf.nn.sigmoid, padding='VALID', W_init=w_init, name='d_ho/conv2d')(net_h4)
# 1 x 1 x 1
net_ho = Flatten()(net_ho)
# logits = net_ho.outputs
# net_ho.outputs = tf.nn.sigmoid(net_ho.outputs)
return tl.models.Model(inputs=[net_in,net_in2], outputs=net_ho)
| 17,879
|
def get_img_content(session,
file_url,
extension=None,
max_retry=3,
req_timeout=5):
"""
Returns:
(data, actual_ext)
"""
retry = max_retry
while retry > 0:
try:
response = session.get(file_url, timeout=req_timeout)
except Exception as e:
print(f'Exception caught when downloading file {file_url}, '
f'error: {e}, remaining retry times: {retry - 1}')
else:
if response.status_code != 200:
print(f'Response status code {response.status_code}, '
f'file {file_url}')
break
# get the response byte
data = response.content
if isinstance(data, str):
print('Converting str to byte, later remove it.')
data = data.encode(data)
actual_ext = imghdr.what(extension, data)
actual_ext = 'jpg' if actual_ext == 'jpeg' else actual_ext
# do not download original gif
if actual_ext == 'gif' or actual_ext is None:
return None, actual_ext
return data, actual_ext
finally:
retry -= 1
return None, None
| 17,880
|
def nextbus(a, r, c="vehicleLocations", e=0):
"""Returns the most recent latitude and
longitude of the selected bus line using
the NextBus API (nbapi)"""
nbapi = "http://webservices.nextbus.com"
nbapi += "/service/publicXMLFeed?"
nbapi += "command=%s&a=%s&r=%s&t=%s" % (c,a,r,e)
xml = minidom.parse(urllib.urlopen(nbapi))
bus=xml.getElementsByTagName("vehicle")
if bus:
at = bus.attributes
return(at["lat"].value, at["lon"].value)
else: return (False, False)
| 17,881
|
def xml_translate(callback, value):
""" Translate an XML value (string), using `callback` for translating text
appearing in `value`.
"""
if not value:
return value
try:
root = parse_xml(value)
result = translate_xml_node(root, callback, parse_xml, serialize_xml)
return serialize_xml(result)
except etree.ParseError:
# fallback for translated terms: use an HTML parser and wrap the term
root = parse_html(u"<div>%s</div>" % value)
result = translate_xml_node(root, callback, parse_xml, serialize_xml)
# remove tags <div> and </div> from result
return serialize_xml(result)[5:-6]
| 17,882
|
def check_sc_sa_pairs(tb, pr_sc, pr_sa, ):
"""
Check whether pr_sc, pr_sa are allowed pairs or not.
agg_ops = ['', 'MAX', 'MIN', 'COUNT', 'SUM', 'AVG']
"""
bS = len(pr_sc)
check = [False] * bS
for b, pr_sc1 in enumerate(pr_sc):
pr_sa1 = pr_sa[b]
hd_types1 = tb[b]['types']
hd_types11 = hd_types1[pr_sc1]
if hd_types11 == 'text':
if pr_sa1 == 0 or pr_sa1 == 3: # ''
check[b] = True
else:
check[b] = False
elif hd_types11 == 'real':
check[b] = True
else:
raise Exception("New TYPE!!")
return check
| 17,883
|
def translate_resource_args(func):
"""
Decorator that converts Issue and Project resources to their keys when used as arguments.
"""
@wraps(func)
def wrapper(*args, **kwargs):
arg_list = []
for arg in args:
if isinstance(arg, (Issue, Project)):
arg_list.append(arg.key)
else:
arg_list.append(arg)
result = func(*arg_list, **kwargs)
return result
return wrapper
| 17,884
|
def nan_jumps_dlc(files, max_jump=200):
"""Nan stretches in between large jumps, assuming most of the trace is correct"""
# copy the data
corrected_trace = files.copy()
# get the column names
column_names = corrected_trace.columns
# run through the columns
for column in column_names:
# skip the index column if it's there
if column == 'index':
continue
# find the jumps
jump_length = np.diff(corrected_trace[column], axis=0)
jump_location = np.argwhere(abs(jump_length) > max_jump)
if jump_location.shape[0] == 0:
continue
jump_location = [el[0] for el in jump_location]
# initialize a flag
pair_flag = True
# go through pairs of jumps
for idx, jump in enumerate(jump_location[:-1]):
# if this is the second member of a pair, skip
if not pair_flag:
# reset the pair flag
pair_flag = True
continue
# if this jump and the next have the same sign, skip
if (jump_length[jump]*jump_length[jump_location[idx+1]]) > 0:
continue
# nan the segment in between
corrected_trace.loc[jump+1:jump_location[idx+1]+1, column] = np.nan
# set the pair flag
pair_flag = False
return corrected_trace
| 17,885
|
def index():
"""View: Site Index Page"""
return render_template("pages/index.html")
| 17,886
|
def view(
ctx,
pathspec,
hash=None,
type=None,
id=None,
follow_resumed=False,
):
"""
View the HTML card in browser based on the pathspec.\n
The pathspec can be of the form:\n
- <stepname>\n
- <runid>/<stepname>\n
- <runid>/<stepname>/<taskid>\n
"""
card_id = id
available_card_paths, card_datastore, pathspec = resolve_card(
ctx,
pathspec,
type=type,
hash=hash,
card_id=card_id,
follow_resumed=follow_resumed,
)
if len(available_card_paths) == 1:
open_in_browser(card_datastore.cache_locally(available_card_paths[0]))
else:
list_available_cards(
ctx,
pathspec,
available_card_paths,
card_datastore,
command="view",
)
| 17,887
|
def export_transformed_profile(kind, scenario_info, grid, ct, filepath, slice=True):
"""Apply transformation to the given kind of profile and save the result locally.
:param str kind: which profile to export. This parameter is passed to
:meth:`TransformProfile.get_profile`.
:param dict scenario_info: a dict containing the profile version, with
key in the form base_{kind}
:param powersimdata.input.grid.Grid grid: a Grid object previously
transformed.
:param dict ct: change table.
:param str filepath: path to save the result, including the filename
:param bool slice: whether to slice the profiles by the Scenario's time range.
"""
tp = TransformProfile(scenario_info, grid, ct, slice)
profile = tp.get_profile(kind)
print(f"Writing scaled {kind} profile to {filepath} on local machine")
profile.to_csv(filepath)
| 17,888
|
def test_multisurfstar_pipeline_cont_endpoint():
"""Ensure that MultiSURF* works in a sklearn pipeline with continuous endpoint data"""
np.random.seed(320931)
clf = make_pipeline(MultiSURFstar(n_features_to_select=2, n_jobs=-1),
RandomForestRegressor(n_estimators=100, n_jobs=-1))
assert abs(np.mean(cross_val_score(clf, features_cont_endpoint, labels_cont_endpoint, cv=3))) < 0.5
| 17,889
|
async def get_latest_digest_from_registry(
repository: str,
tag: str,
credentials: Optional[meadowrun.credentials.RawCredentials],
) -> str:
"""
Queries the Docker Registry HTTP API to get the current digest of the specified
repository:tag. The output of this function should always match the output of
`docker inspect --format='{{.RepoDigests}}' [repository]:[tag]` AFTER calling
`docker pull [repository]:[tag]`. Example output is something like
sha256:76eaa9e5bd357d6983a88ddc9c4545ef4ad64c50f84f081ba952c7ed08e3bdd6. Note that
this hash is also part of the output when `docker pull` is run.
This function gets the latest version of that hash without pulling the image first.
The docker command line/client does not provide this capability
(https://stackoverflow.com/questions/56178911/how-to-obtain-docker-image-digest-from-tag/56178979#56178979),
so we have to resort to the Docker Registry HTTP API.
Note that we could also use skopeo for this: https://github.com/containers/skopeo
The functionality we're implementing is exactly the same as `skopeo inspect
[repository]:[tag]` and then reading the "digest" field.
"""
# The correct implementation is to read the manifest for a repository/tag and
# compute the sha256 hash of the content of the manifest.
#
# At the time of writing this comment, the accepted answers on the first couple
# Google results on this topic were out of date, incomplete, or incorrect:
#
# https://stackoverflow.com/questions/39375421/can-i-get-an-image-digest-without-downloading-the-image/39376254#39376254
# https://stackoverflow.com/questions/41808763/how-to-determine-the-docker-image-id-for-a-tag-via-docker-hub-api/41830007#41830007
# https://ops.tips/blog/inspecting-docker-image-without-pull/
#
# Part of the confusion seems to be that there are many different digests for the
# sub-parts of the image (e.g. for different architectures, different layers,
# etc.). We only care about the digest that we get from `docker inspect` (as
# described above) because this is what we'll use to figure out if we have the
# latest version of the image or not. Correctness can be easily verified using the
# docker command line as described above.
#
# Reading the docker-content-digest header in the response is an alternative to
# computing the hash ourselves, but this header is not in the responses from AWS
# ECR.
registry_domain, repository = get_registry_domain(repository)
manifests_url = f"https://{registry_domain}/v2/{repository}/manifests/{tag}"
headers = {"Accept": _MANIFEST_ACCEPT_HEADER_FOR_DIGEST}
if credentials is None:
basic_auth = None
elif isinstance(credentials, meadowrun.credentials.UsernamePassword):
basic_auth = aiohttp.BasicAuth(credentials.username, credentials.password)
else:
raise ValueError(f"Unexpected type of credentials {type(credentials)}")
manifests_text: Optional[bytes] = None
# First, try requesting the manifest without any authentication. It might work, and
# if it doesn't, the response will tell us how the authentication should work.
# TODO add logic to "remember" which repositories require what kinds of
# authentication, as well as potentially tokens as well
async with aiohttp.request("GET", manifests_url, headers=headers) as response:
if response.ok:
manifests_text = await response.read()
else:
# Regardless of the type of error, try again with authentication as long as
# we have a www-authenticate header. response.headers is case insensitive:
if "www-authenticate" not in response.headers:
# we don't know how to authenticate, so just propagate the error
response.raise_for_status()
authentication_header = response.headers["www-authenticate"]
authentication_header_error_message = (
"Don't know how to interpret authentication header "
+ authentication_header
)
scheme, space, auth_params = authentication_header.partition(" ")
if not space:
raise ValueError(authentication_header_error_message)
elif scheme.lower() == "basic":
if basic_auth is None:
raise ValueError(
f"Basic auth is required to access {manifests_url} but no "
"username/password was provided"
)
# We've already set the basic_auth variable above, so we'll leave it as
# is so that it gets used directly in the next request for the manifest
# below.
elif scheme.lower() == "bearer":
# For bearer auth, we need to request a token. Parsing the
# www-authenticate header should tell us everything we need to know to
# construct the request for the token. Example of a www-authenticate
# header is
# `Bearer realm="https://auth.docker.io/token",service="registry.docker.io",scope="repository:library/python:pull"` # noqa E501
auth_params_parsed = urllib.request.parse_keqv_list(
urllib.request.parse_http_list(auth_params)
)
# a bit hacky, but we're going to use auth_params to create the query
# string, so we remove realm from it because that's the only one we
# don't need
# TODO should this be case insensitive?
realm = auth_params_parsed["realm"]
del auth_params_parsed["realm"]
token_request_url = (
f"{realm}?{urllib.parse.urlencode(auth_params_parsed)}"
)
# Even if no username_password was provided (i.e. basic_auth is None)
# it's worth trying this. E.g. DockerHub requires an anonymous token for
# public repositories
async with aiohttp.request(
"GET", token_request_url, auth=basic_auth
) as token_response:
if not token_response.ok:
token_response.raise_for_status()
# TODO should this be case insensitive?
token = (await token_response.json())["token"]
# Now we add the Bearer token to headers which will get used in the
# next request for the manifest. We also need to unset basic_auth as
# we've used that to get the token, and it should not be used in
# subsequent requests.
headers["Authorization"] = f"Bearer {token}"
basic_auth = None
else:
raise ValueError(authentication_header_error_message)
# now exactly one of basic_auth or headers["Authorization"] should be set
async with aiohttp.request(
"GET", manifests_url, headers=headers, auth=basic_auth
) as response_authenticated:
if not response_authenticated.ok:
response_authenticated.raise_for_status()
manifests_text = await response_authenticated.read()
if not manifests_text:
raise ValueError(
"Programming error: manifests_text should not be None/empty string"
)
# compute the digest from the manifest text
digest = hashlib.sha256(manifests_text).hexdigest()
return f"sha256:{digest}"
# TODO test case where image doesn't exist
| 17,890
|
def test_list_repository_contents(capsys, folder_data):
"""Test the `list_repository_contents` method."""
list_repository_contents(folder_data, path='', color=True)
assert capsys.readouterr().out == 'file.txt\nnested\n'
| 17,891
|
def histogram(x, bins, bandwidth, epsilon=1e-10):
"""
Function that estimates the histogram of the input tensor.
The calculation uses kernel density estimation which requires a bandwidth (smoothing) parameter.
"""
pdf, _ = marginal_pdf(x.unsqueeze(2), bins, bandwidth, epsilon)
return pdf
| 17,892
|
def revisions_upload(deployment_name, version_name, zip_path, format_):
"""Create a revision of a deployment version by uploading a ZIP.
Please, specify the deployment package `<zip_path>` that should be uploaded.
"""
project_name = get_current_project(error=True)
client = init_client()
revision = client.revisions_file_upload(project_name=project_name, deployment_name=deployment_name,
version=version_name, file=zip_path)
client.api_client.close()
print_item(revision, row_attrs=['revision', 'build'], fmt=format_)
| 17,893
|
def file_deal(paths, set_list: list, list_search: list, list_enter: list, file_path: dict, clear_list: bool = False,
pattern=r'^[.\n]*$', is_file=True, replace_str: str = '', names: dict = None):
"""
:param clear_list: is need clear the list
:param paths: DirPicker path or FilePicker files
:param set_list: the list save the keys
:param list_search: the list to search
:param list_enter: the list to show
:param file_path: the dict of loaded files
:param pattern: the pattern of the base_filename
:param is_file: a bool of the load type bool True->FilePicker,False->DirPicker
:param replace_str: the need replace string in the base filename
:param names: the Chinese-base_name dict
:return: if do not raise any error and worked bool->True,else bool->False
"""
try:
if names is None:
names = {}
pattern_re = re.compile(pattern)
if not is_file:
dict_path = paths.copy()
paths = paths.keys()
num = len(paths)
else:
dict_path = {}
num = len(set_list)
if clear_list:
set_list.clear()
list_enter.clear()
list_search.clear()
num = 0
if not len(paths) == 0:
path = filter(lambda x: pattern_re.match(os.path.basename(x)) is not None, paths)
path = list(path)
info_write = info_write_builder(is_file, dict_path, replace_str, set_list, file_path, list_enter,
names, list_search)
path_len = len(path)
paths = zip(path, range(path_len))
paths = list(paths)
num += len(list(map(info_write, list(paths))))
if path_len == 0:
return False, '导入完成,无新增项!'
else:
return False, '导入失败,无导入项!'
except (TypeError, KeyError, RuntimeError)as info:
return False, '导入失败,发生错误!%s' % info
else:
return True, '导入成功! 成功导入%d个!' % num
| 17,894
|
def load_variables(data, nvariables, variables):
"""TODO."""
for i in range(nvariables):
# TODO: read types from struct?
# TODO: byteswap only if system is little-endian
buf = data[(27 * i):(27 * i + 8)]
reverse_array(buf)
variableId = np.frombuffer(buf, dtype=np.int64)[0]
isEvidence = data[27 * i + 8]
buf = data[(27 * i + 9):(27 * i + 17)]
reverse_array(buf)
initialValue = np.frombuffer(buf, dtype=np.int64)[0]
buf = data[(27 * i + 17):(27 * i + 19)]
reverse_array(buf)
dataType = np.frombuffer(buf, dtype=np.int16)[0]
buf = data[(27 * i + 19):(27 * i + 27)]
reverse_array(buf)
cardinality = np.frombuffer(buf, dtype=np.int64)[0]
variables[variableId]["isEvidence"] = isEvidence
variables[variableId]["initialValue"] = initialValue
variables[variableId]["dataType"] = dataType
variables[variableId]["cardinality"] = cardinality
print("LOADED VARS")
| 17,895
|
def generate_kam(
kam_path: str
) -> nx.DiGraph:
"""
Generates the knowledge assembly model as a NetworkX graph.
:param kam_path: Path to the file containing the source, relationship and the target nodes of a knowledge
assembly model (KAM).
:return: KAM graph as a NetworkX DiGraph.
"""
# Read the file containing the kam file
kam_df = pd.read_csv(kam_path, sep='\t', header=None)
# Rename the column headers are Source, Relationship and Target
kam_df.columns = ['Source', 'Relationship', 'Target']
# Map relationships between the nodes as either +1 or -1 based on the interaction
rlsp_mapping = {
'activates': 1,
'inhibits': -1
}
# Add the data to a directed graph
kam = nx.DiGraph()
for edge in kam_df.index:
kam.add_edge(
kam_df.at[edge, 'Source'],
kam_df.at[edge, 'Target'],
effect=rlsp_mapping[kam_df.at[edge, 'Relationship']]
)
return kam
| 17,896
|
def toDrive(collection, folder, namePattern='{id}', scale=30,
dataType="float", region=None, datePattern=None,
extra=None, verbose=False, **kwargs):
""" Upload all images from one collection to Google Drive. You can use
the same arguments as the original function
ee.batch.export.image.toDrive
:param collection: Collection to upload
:type collection: ee.ImageCollection
:param folder: Google Drive folder to export the images to
:type folder: str
:param namePattern: pattern for the name. See make_name function
:type namePattern: str
:param region: area to upload. Defualt to the footprint of the first
image in the collection
:type region: ee.Geometry.Rectangle or ee.Feature
:param scale: scale of the image (side of one pixel). Defults to 30
(Landsat resolution)
:type scale: int
:param maxImgs: maximum number of images inside the collection
:type maxImgs: int
:param dataType: as downloaded images **must** have the same data type
in all bands, you have to set it here. Can be one of: "float",
"double", "int", "Uint8", "Int8" or a casting function like
*ee.Image.toFloat*
:type dataType: str
:param datePattern: pattern for date if specified in namePattern.
Defaults to 'yyyyMMdd'
:type datePattern: str
:return: list of tasks
:rtype: list
"""
# empty tasks list
tasklist = []
# get region
region = tools.geometry.getRegion(region)
# Make a list of images
img_list = collection.toList(collection.size())
n = 0
while True:
try:
img = ee.Image(img_list.get(n))
name = makeName(img, namePattern, datePattern, extra)
name = name.getInfo()
description = utils.matchDescription(name)
# convert data type
img = utils.convertDataType(dataType)(img)
task = ee.batch.Export.image.toDrive(image=img,
description=description,
folder=folder,
fileNamePrefix=name,
region=region,
scale=scale, **kwargs)
task.start()
if verbose:
print("exporting {} to folder '{}' in GDrive".format(name, folder))
tasklist.append(task)
n += 1
except Exception as e:
error = str(e).split(':')
if error[0] == 'List.get':
break
else:
raise e
return tasklist
| 17,897
|
def area(box):
"""Computes area of boxes.
B: batch_size
N: number of boxes
Args:
box: a float Tensor with [N, 4], or [B, N, 4].
Returns:
a float Tensor with [N], or [B, N]
"""
with tf.name_scope('Area'):
y_min, x_min, y_max, x_max = tf.split(
value=box, num_or_size_splits=4, axis=-1)
return tf.squeeze((y_max - y_min) * (x_max - x_min), axis=-1)
| 17,898
|
def read(fn):
"""
return a list of the operating systems and a list of the groups in
the given fingerbank config file
"""
cfg = parse_config_with_heredocs(fn)
return create_systems_and_groups(cfg)
| 17,899
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.