content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def get_single_io_arg(info):
"""
Get single input/output arg from io info
:param info:
:return:input/output arg
"""
if 'valid' not in info:
raise ValueError("Json string Errors, key:valid not found.")
if info['valid']:
check_arg_info(info)
del info['valid']
del info['name']
if 'range' in info:
for i in range(len(info['range'])):
if info['range'][i][1] == -1:
info['range'][i][1] = None
res = info
else:
res = None
return res | 37,200 |
def new_credentials(
client_id: str, consumer_secret: str, data: Dict[str, Any]
) -> Credentials:
"""Create Credentials from config and json."""
return Credentials(
access_token=str_or_raise(data.get("access_token")),
token_expiry=arrow.utcnow().timestamp + data.get("expires_in"),
token_type=str_or_raise(data.get("token_type")),
refresh_token=str_or_raise(data.get("refresh_token")),
userid=int_or_raise(data.get("userid")),
client_id=str_or_raise(client_id),
consumer_secret=str_or_raise(consumer_secret),
) | 37,201 |
def get_chains(table, ipv6=False):
""" Return the existing chains of a table """
iptc_table = _iptc_gettable(table, ipv6)
return [iptc_chain.name for iptc_chain in iptc_table.chains] | 37,202 |
def check_file(file):
"""
检查本地有没有这个文件,相关文件路径能否找到文件 并返回文件名
"""
# 如果传进来的是文件或者是’‘, 直接返回文件名str
if os.path.isfile(file) or file == '':
return file
# 如果传进来的就是当前项目下的一个全局路径 查找匹配的文件名返回第一个
else:
files = glob.glob('./**/' + file, recursive=True)
# 验证文件名是否存在
assert len(files), 'File Not Found: %s' % file
# 验证文件名是否唯一
assert len(files) == 1, "Multiple files match '%s', specify exact path: %s" % (file, files)
# 返回第一个匹配到的文件名
return files[0] | 37,203 |
def unscale_fundamental_matrix(fundamental_matrix, M):
"""
Unscale fundamental matrix by coordinate scaling factor
:param fundamental_matrix:
:param M: Scaling factor
:return: Unscaled fundamental matrix
"""
T = np.diag([1 / M, 1 / M, 1])
unscaled_F = T.T.dot(fundamental_matrix).dot(T)
return unscaled_F | 37,204 |
def initialize_server_request(request):
"""Shortcut for initialization."""
# Django converts Authorization header in HTTP_AUTHORIZATION
# Warning: it doesn't happen in tests but it's useful, do not remove!
auth_header = {}
if 'Authorization' in request.META:
auth_header = {'Authorization': request.META['Authorization']}
elif 'HTTP_AUTHORIZATION' in request.META:
auth_header = {'Authorization': request.META['HTTP_AUTHORIZATION']}
oauth_request = OAuthRequest.from_request(request.method,
request.build_absolute_uri(),
headers=auth_header,
parameters=dict(request.REQUEST.items()),
query_string=request.environ.get('QUERY_STRING', ''))
if oauth_request:
oauth_server = OAuthServer(DataStore(oauth_request))
oauth_server.add_signature_method(OAuthSignatureMethod_PLAINTEXT())
oauth_server.add_signature_method(OAuthSignatureMethod_HMAC_SHA1())
else:
oauth_server = None
return oauth_server, oauth_request | 37,205 |
def _parse_coordinate_arg(coords, frame, units, init_kwargs):
"""
Single unnamed arg supplied. This must be:
- Coordinate frame with data
- Representation
- SkyCoord
- List or tuple of:
- String which splits into two values
- Iterable with two values
- SkyCoord, frame, or representation objects.
Returns a dict mapping coordinate attribute names to values (or lists of
values)
"""
from .sky_coordinate import SkyCoord
is_scalar = False # Differentiate between scalar and list input
# valid_kwargs = {} # Returned dict of lon, lat, and distance (optional)
components = {}
skycoord_kwargs = {}
frame_attr_names = list(frame.representation_component_names.keys())
repr_attr_names = list(frame.representation_component_names.values())
repr_attr_classes = list(frame.representation.attr_classes.values())
n_attr_names = len(repr_attr_names)
# Turn a single string into a list of strings for convenience
if isinstance(coords, str):
is_scalar = True
coords = [coords]
if isinstance(coords, (SkyCoord, BaseCoordinateFrame)):
# Note that during parsing of `frame` it is checked that any coordinate
# args have the same frame as explicitly supplied, so don't worry here.
if not coords.has_data:
raise ValueError('Cannot initialize from a frame without coordinate data')
data = coords.data.represent_as(frame.representation_type)
values = [] # List of values corresponding to representation attrs
repr_attr_name_to_drop = []
for repr_attr_name in repr_attr_names:
# If coords did not have an explicit distance then don't include in initializers.
if (isinstance(coords.data, UnitSphericalRepresentation) and
repr_attr_name == 'distance'):
repr_attr_name_to_drop.append(repr_attr_name)
continue
# Get the value from `data` in the eventual representation
values.append(getattr(data, repr_attr_name))
# drop the ones that were skipped because they were distances
for nametodrop in repr_attr_name_to_drop:
nameidx = repr_attr_names.index(nametodrop)
del repr_attr_names[nameidx]
del units[nameidx]
del frame_attr_names[nameidx]
del repr_attr_classes[nameidx]
if coords.data.differentials and 's' in coords.data.differentials:
orig_vel = coords.data.differentials['s']
vel = coords.data.represent_as(frame.representation, frame.get_representation_cls('s')).differentials['s']
for frname, reprname in frame.get_representation_component_names('s').items():
if (reprname == 'd_distance' and not hasattr(orig_vel, reprname) and
'unit' in orig_vel.get_name()):
continue
values.append(getattr(vel, reprname))
units.append(None)
frame_attr_names.append(frname)
repr_attr_names.append(reprname)
repr_attr_classes.append(vel.attr_classes[reprname])
for attr in frame_transform_graph.frame_attributes:
value = getattr(coords, attr, None)
use_value = (isinstance(coords, SkyCoord)
or attr not in coords._attr_names_with_defaults)
if use_value and value is not None:
skycoord_kwargs[attr] = value
elif isinstance(coords, BaseRepresentation):
if coords.differentials and 's' in coords.differentials:
diffs = frame.get_representation_cls('s')
data = coords.represent_as(frame.representation_type, diffs)
values = [getattr(data, repr_attr_name) for repr_attr_name in repr_attr_names]
for frname, reprname in frame.get_representation_component_names('s').items():
values.append(getattr(data.differentials['s'], reprname))
units.append(None)
frame_attr_names.append(frname)
repr_attr_names.append(reprname)
repr_attr_classes.append(data.differentials['s'].attr_classes[reprname])
else:
data = coords.represent_as(frame.representation)
values = [getattr(data, repr_attr_name) for repr_attr_name in repr_attr_names]
elif (isinstance(coords, np.ndarray) and coords.dtype.kind in 'if'
and coords.ndim == 2 and coords.shape[1] <= 3):
# 2-d array of coordinate values. Handle specially for efficiency.
values = coords.transpose() # Iterates over repr attrs
elif isinstance(coords, (Sequence, np.ndarray)):
# Handles list-like input.
vals = []
is_ra_dec_representation = ('ra' in frame.representation_component_names and
'dec' in frame.representation_component_names)
coord_types = (SkyCoord, BaseCoordinateFrame, BaseRepresentation)
if any(isinstance(coord, coord_types) for coord in coords):
# this parsing path is used when there are coordinate-like objects
# in the list - instead of creating lists of values, we create
# SkyCoords from the list elements and then combine them.
scs = [SkyCoord(coord, **init_kwargs) for coord in coords]
# Check that all frames are equivalent
for sc in scs[1:]:
if not sc.is_equivalent_frame(scs[0]):
raise ValueError("List of inputs don't have equivalent "
"frames: {0} != {1}".format(sc, scs[0]))
# Now use the first to determine if they are all UnitSpherical
allunitsphrepr = isinstance(scs[0].data, UnitSphericalRepresentation)
# get the frame attributes from the first coord in the list, because
# from the above we know it matches all the others. First copy over
# the attributes that are in the frame itself, then copy over any
# extras in the SkyCoord
for fattrnm in scs[0].frame.frame_attributes:
skycoord_kwargs[fattrnm] = getattr(scs[0].frame, fattrnm)
for fattrnm in scs[0]._extra_frameattr_names:
skycoord_kwargs[fattrnm] = getattr(scs[0], fattrnm)
# Now combine the values, to be used below
values = []
for data_attr_name, repr_attr_name in zip(frame_attr_names, repr_attr_names):
if allunitsphrepr and repr_attr_name == 'distance':
# if they are *all* UnitSpherical, don't give a distance
continue
data_vals = []
for sc in scs:
data_val = getattr(sc, data_attr_name)
data_vals.append(data_val.reshape(1,) if sc.isscalar else data_val)
concat_vals = np.concatenate(data_vals)
# Hack because np.concatenate doesn't fully work with Quantity
if isinstance(concat_vals, u.Quantity):
concat_vals._unit = data_val.unit
values.append(concat_vals)
else:
# none of the elements are "frame-like"
# turn into a list of lists like [[v1_0, v2_0, v3_0], ... [v1_N, v2_N, v3_N]]
for coord in coords:
if isinstance(coord, str):
coord1 = coord.split()
if len(coord1) == 6:
coord = (' '.join(coord1[:3]), ' '.join(coord1[3:]))
elif is_ra_dec_representation:
coord = _parse_ra_dec(coord)
else:
coord = coord1
vals.append(coord) # Assumes coord is a sequence at this point
# Do some basic validation of the list elements: all have a length and all
# lengths the same
try:
n_coords = sorted(set(len(x) for x in vals))
except Exception:
raise ValueError('One or more elements of input sequence does not have a length')
if len(n_coords) > 1:
raise ValueError('Input coordinate values must have same number of elements, found {0}'
.format(n_coords))
n_coords = n_coords[0]
# Must have no more coord inputs than representation attributes
if n_coords > n_attr_names:
raise ValueError('Input coordinates have {0} values but '
'representation {1} only accepts {2}'
.format(n_coords,
frame.representation_type.get_name(),
n_attr_names))
# Now transpose vals to get [(v1_0 .. v1_N), (v2_0 .. v2_N), (v3_0 .. v3_N)]
# (ok since we know it is exactly rectangular). (Note: can't just use zip(*values)
# because Longitude et al distinguishes list from tuple so [a1, a2, ..] is needed
# while (a1, a2, ..) doesn't work.
values = [list(x) for x in zip(*vals)]
if is_scalar:
values = [x[0] for x in values]
else:
raise ValueError('Cannot parse coordinates from first argument')
# Finally we have a list of values from which to create the keyword args
# for the frame initialization. Validate by running through the appropriate
# class initializer and supply units (which might be None).
try:
for frame_attr_name, repr_attr_class, value, unit in zip(
frame_attr_names, repr_attr_classes, values, units):
components[frame_attr_name] = repr_attr_class(value, unit=unit,
copy=False)
except Exception as err:
raise ValueError('Cannot parse first argument data "{0}" for attribute '
'{1}'.format(value, frame_attr_name), err)
return skycoord_kwargs, components | 37,206 |
def build_names(dependency: Dependency, version_in_url: bool = True) -> Tuple[RemoteResolver, str, str]:
"""
A function to build directory and file names based on the given dependency..
:param dependency: the dependency to create the file container for.
:param version_in_url: a flag noting whether the dependency version should be included
in the URL we build.
:return: a tuple containing an appropriate remote resolver, a classified base file name
and a base file name.
"""
resolver = create_remote_resolver(
dependency.group, dependency.name, dependency.version if version_in_url else None
)
name = dependency.name
version = dependency.version
classifier = dependency.classifier
base_name = f'{name}-{version}'
classified_name = f'{base_name}-{classifier}' if classifier else base_name
return resolver, classified_name, base_name | 37,207 |
def convert_apc_examples_to_features(examples, label_list, max_seq_len, tokenizer, opt=None):
"""Loads a data file into a list of `InputBatch`s."""
configure_spacy_model(opt)
bos_token = tokenizer.bos_token
eos_token = tokenizer.eos_token
label_map = {label: i for i, label in enumerate(label_list, 1)}
opt.IOB_label_to_index = label_map
features = []
for (ex_index, example) in enumerate(examples):
text_tokens = example.text_a[:]
aspect_tokens = example.text_b[:]
IOB_label = example.IOB_label
# aspect_label = example.aspect_label
aspect_label = ['B-ASP'] * len(aspect_tokens)
polarity = [-999] + example.polarity + [-999]
positions = np.where(np.array(polarity) > 0)[0].tolist()
tokens = []
labels = []
valid = []
label_mask = []
enum_tokens = [bos_token] + text_tokens + [eos_token] + aspect_tokens + [eos_token]
IOB_label = [bos_token] + IOB_label + [eos_token] + aspect_label + [eos_token]
enum_tokens = enum_tokens[:max_seq_len]
IOB_label = IOB_label[:max_seq_len]
aspect = ' '.join(example.text_b)
try:
text_left, _, text_right = [s.strip() for s in ' '.join(example.text_a).partition(aspect)]
except:
text_left = ' '.join(example.text_a)
text_right = ''
aspect = ''
text_raw = text_left + ' ' + aspect + ' ' + text_right
validate_example(text_raw, aspect, '')
prepared_inputs = prepare_input_for_atepc(opt, tokenizer, text_left, text_right, aspect)
lcf_cdm_vec = prepared_inputs['lcf_cdm_vec']
lcf_cdw_vec = prepared_inputs['lcf_cdw_vec']
for i, word in enumerate(enum_tokens):
token = tokenizer.tokenize(word)
tokens.extend(token)
cur_iob = IOB_label[i]
for m in range(len(token)):
if m == 0:
label_mask.append(1)
labels.append(cur_iob)
valid.append(1)
else:
valid.append(0)
tokens = tokens[0:min(len(tokens), max_seq_len - 2)]
labels = labels[0:min(len(labels), max_seq_len - 2)]
valid = valid[0:min(len(valid), max_seq_len - 2)]
segment_ids = [0] * len(example.text_a[:]) + [1] * (max_seq_len - len([0] * len(example.text_a[:])))
segment_ids = segment_ids[:max_seq_len]
label_ids = []
for i, token in enumerate(tokens):
if len(labels) > i:
label_ids.append(label_map[labels[i]])
input_ids_spc = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids_spc)
label_mask = [1] * len(label_ids)
while len(input_ids_spc) < max_seq_len:
input_ids_spc.append(0)
input_mask.append(0)
label_ids.append(0)
label_mask.append(0)
while len(valid) < max_seq_len:
valid.append(1)
while len(label_ids) < max_seq_len:
label_ids.append(0)
label_mask.append(0)
assert len(input_ids_spc) == max_seq_len
assert len(input_mask) == max_seq_len
assert len(segment_ids) == max_seq_len
assert len(label_ids) == max_seq_len
assert len(valid) == max_seq_len
assert len(label_mask) == max_seq_len
features.append(
InputFeatures(input_ids_spc=input_ids_spc,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_ids,
polarity=polarity,
valid_ids=valid,
label_mask=label_mask,
tokens=example.text_a,
lcf_cdm_vec=lcf_cdm_vec,
lcf_cdw_vec=lcf_cdw_vec,
aspect=aspect,
positions=positions
)
)
return features | 37,208 |
def write_output(opts: AppOptions, out_lines):
"""
Writes the modified document lines to a new file with "MODIFIED" and
a date_time tag added to the file name. Returns the file name.
"""
ds = datetime.now().strftime("%Y%m%d_%H%M%S")
out_name = f"{opts.doc_path.stem}_MODIFIED_{ds}{opts.doc_path.suffix}"
out_path = Path(opts.doc_path).parent.joinpath(out_name)
assert not out_path.exists()
print(f"\nSaving '{out_path}'")
with open(out_path, "w") as out_file:
for s in out_lines:
out_file.write(f"{s}\n")
return str(out_path) | 37,209 |
def ssh_pub_key(key_file):
"""Creates a string of a public key from the private key file.
"""
key = paramiko.RSAKey(filename=key_file)
pub = "{0} {1} autogenerated by polyphemus"
pub = pub.format(key.get_name(), key.get_base64())
return pub | 37,210 |
def _variable_with_weight_decay(name, shape, stddev, wd, use_xavier=True, use_zeros=False, init=None):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
use_xavier: bool, whether to use xavier initializer
use_zeros: bool, whether to use zero initializer
Returns:
Variable Tensor
"""
if use_xavier:
#initializer = tf.contrib.layers.xavier_initializer()
initializer = tf.initializers.glorot_normal()
elif use_zeros:
initializer = tf.constant_initializer(0.0)
else:
initializer = tf.truncated_normal_initializer(stddev=stddev)
var = _variable_on_cpu(name, shape, initializer)
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var | 37,211 |
def test_main_info_with_ble_interface(capsys, reset_globals):
"""Test --info"""
sys.argv = ['', '--info', '--ble', 'foo']
Globals.getInstance().set_args(sys.argv)
iface = MagicMock(autospec=BLEInterface)
def mock_showInfo():
print('inside mocked showInfo')
iface.showInfo.side_effect = mock_showInfo
with patch('meshtastic.ble_interface.BLEInterface', return_value=iface) as mo:
main()
out, err = capsys.readouterr()
assert re.search(r'Connected to radio', out, re.MULTILINE)
assert re.search(r'inside mocked showInfo', out, re.MULTILINE)
assert err == ''
mo.assert_called() | 37,212 |
def build_mobile_vit(config):
"""Build MobileViT by reading options in config object
Args:
config: config instance contains setting options
Returns:
model: MobileViT model
"""
model = MobileViT(in_channels=config.MODEL.IN_CHANNELS,
dims=config.MODEL.DIMS, # XS: [16, 32, 48, 48, 48, 64, 80, 96, 384]
hidden_dims=config.MODEL.HIDDEN_DIMS, # XS: [96, 120, 144], # d: hidden dims in mobilevit block
num_classes=config.MODEL.NUM_CLASSES)
return model | 37,213 |
def _openSerialPort(comport):
"""Opens the serial port name passed in comport. Returns the stream id"""
#debuglog.info("Check if serial module is available in sys {}".format(sys.modules["serial"]))
s = None
try:
s = serial.Serial(
port=comport,
baudrate=115200,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=1
)
except serial.SerialException as ex:
print(f"Failed to capture serial port: {ex}")
raise serial.SerialException
finally:
return s | 37,214 |
def search_youtube(query, retries = 4, max_num_results = -1):
""" Unlimited youtube search by web scrapping """
transformed_query = reduce(lambda s_ant, s_sig : s_ant + '+' + s_sig, query) if len(query) != 0 else ''
scrapped_data = []
num_of_requests = 0
for i in range(retries):
page = get_html(transformed_query)
num_of_requests += 1
if "</ol>" in page.text:
break
logger.info(f" Number of requests : {num_of_requests}")
soup = BeautifulSoup(page.content, 'html.parser')
item_list = soup.find('ol', class_='item-section')
if item_list is None:
raise Exception(" Html without list of results ")
items = item_list.find_all('li')
scrapped_data = [x for x in map(extract_data, items) if x is not None]
return scrapped_data if max_num_results <= 0 else scrapped_data[:max_num_results] | 37,215 |
def threaded(count):
"""This is the main body of each thread. It will generate a `count`
number of log messages (which will cause actor transmits to the
logger) and then exit.
"""
try:
time.sleep(1)
for x in range(count):
logging.debug('Msg %s of %s', x, count)
time.sleep(0.0001)
time.sleep(1)
logging.debug('Done')
except Exception as ex:
thesplog('Failed threading because: %s', ex)
logging.exception('Failed threading') | 37,216 |
def assert_less(first, second, msg_fmt="{msg}"):
"""Fail if first is not less than second.
>>> assert_less('bar', 'foo')
>>> assert_less(5, 5)
Traceback (most recent call last):
...
AssertionError: 5 is not less than 5
The following msg_fmt arguments are supported:
* msg - the default error message
* first - the first argument
* second - the second argument
"""
if not first < second:
msg = "{!r} is not less than {!r}".format(first, second)
fail(msg_fmt.format(msg=msg, first=first, second=second)) | 37,217 |
def Pull( display_sentinel,
json_filename,
result_filename,
first=False,
output_stream=sys.stdout,
):
"""Called prior to committing a change pushed from a client to the local repository"""
return _Impl( display_sentinel,
json_filename,
result_filename,
first,
output_stream,
Constants.HOOK_ENVIRONMENT_PULL_METHOD_NAME,
HooksImplParser.Pushed_FromJson,
) | 37,218 |
def show_vars(names: List[str]) -> None:
"""
List "variables" with values.
This shows all known configuration settings as "variables" with their values or just
the variables that are selected.
This is a callback of a command.
"""
config_mapping = etl.config.get_config_map()
all_keys = sorted(config_mapping)
if not names:
keys = all_keys
else:
selected_keys = set()
for name in names:
matching_keys = [key for key in all_keys if fnmatch.fnmatch(key, name)]
if not matching_keys:
raise InvalidArgumentError("no matching setting for '{}'".format(name))
selected_keys.update(matching_keys)
keys = sorted(selected_keys)
values = [config_mapping[key] for key in keys]
print(etl.text.format_lines(zip(keys, values), header_row=["Name", "Value"])) | 37,219 |
def define_empty_source_parallel_buckets(max_seq_len_target: int,
bucket_width: int = 10) -> List[Tuple[int, int]]:
"""
Returns (source, target) buckets up to (None, max_seq_len_target). The source
is empty since it is supposed to not contain data that can be bucketized.
The target is used as reference to create the buckets.
:param max_seq_len_target: Maximum target bucket size.
:param bucket_width: Width of buckets on longer side.
"""
target_step_size = max(1, bucket_width)
target_buckets = define_buckets(max_seq_len_target, step=target_step_size)
# source buckets are always 0 since there is no text
source_buckets = [0 for b in target_buckets]
target_buckets = [max(2, b) for b in target_buckets]
parallel_buckets = list(zip(source_buckets, target_buckets))
# deduplicate for return
buckets = list(OrderedDict.fromkeys(parallel_buckets))
buckets.sort()
return buckets | 37,220 |
def _hue_scaling(args):
"""return scaled hue values as described in
http://dlmf.nist.gov/help/vrml/aboutcolor
args : ndarray of args / angle of complex numbers between in the open
interval [0, 2*pi)
q : scaled values returned in the interval [0, 1)
"""
q = 4.0*_np.mod((args/(2*_np.pi) + 1), 1)
mask1 = (q >= 0) * (q < 1)
mask2 = (q >= 1) * (q < 2)
mask3 = (q >= 2) * (q < 3)
mask4 = (q >= 3) * (q < 4)
q[mask1] = (60.0/360)*q[mask1]
q[mask2] = (60.0/360)*(2.0*q[mask2] - 1)
q[mask3] = (60.0/360)*(q[mask3] + 1)
q[mask4] = (60.0/360)*2.0*(q[mask4] - 1)
return q | 37,221 |
def _java_junit5_test(name,
srcs,
test_package = None,
deps = [],
runtime_deps = [],
testonly = True,
classpath_resources = None,
**kwargs):
""" Establish Bazel targets and configuration for a Junit5 test case. """
FILTER_KWARGS = [
"main_class",
"use_testrunner",
"args",
]
for arg in FILTER_KWARGS:
if arg in kwargs.keys():
kwargs.pop(arg)
junit_console_args = []
if test_package:
junit_console_args += ["--select-package", test_package]
else:
fail("must specify 'test_package'")
_java_test(
name = name,
srcs = srcs,
use_testrunner = False,
main_class = "org.junit.platform.console.ConsoleLauncher",
args = junit_console_args,
testonly = testonly,
deps = dedupe_deps_(deps + [
maven("org.junit.jupiter:junit-jupiter-api"),
maven("org.junit.jupiter:junit-jupiter-engine"),
maven("org.junit.jupiter:junit-jupiter-params"),
maven("org.junit.platform:junit-platform-suite-api"),
maven("org.apiguardian:apiguardian-api"),
maven("org.opentest4j:opentest4j"),
maven("com.google.guava:guava"),
]),
runtime_deps = dedupe_deps_(runtime_deps + [
maven("org.junit.platform:junit-platform-commons"),
maven("org.junit.platform:junit-platform-console"),
maven("org.junit.platform:junit-platform-engine"),
maven("org.junit.platform:junit-platform-launcher"),
maven("org.junit.platform:junit-platform-suite-api"),
maven("ch.qos.logback:logback-classic"),
]),
classpath_resources = (classpath_resources or [
"@gust//javatests:logback.xml",
]),
**kwargs
) | 37,222 |
def validate_max_synapse_rates(parsed_args):
"""Run the check"""
use_saved_data = parsed_args.use_saved_data
max_rates_2 = load_txt_data(DATA_DIR + "max_rates_2.txt") # max rates / 2
syn_n = len(max_rates_2)
if use_saved_data:
low_rates = load_txt_data(DATA_DIR + "low_rates.txt")
low_rates_o = load_txt_data(DATA_DIR + "low_rates_o.txt")
high_rates = load_txt_data(DATA_DIR + "high_rates.txt")
high_rates_o = load_txt_data(DATA_DIR + "high_rates_o.txt")
else:
low_rates = np.zeros(syn_n)
low_rates_o = np.zeros(syn_n)
high_rates = np.zeros(syn_n)
high_rates_o = np.zeros(syn_n)
build_net()
set_analog()
set_hal()
start_time = get_time()
for syn_idx in range(syn_n):
print("Testing synapse {}".format(syn_idx))
vdata = validate_syn(
syn_idx, max_rates_2[syn_idx], DEFAULT_TEST_TIME, DEFAULT_SLOP_TIME)
low_rates[syn_idx], low_rates_o[syn_idx] = vdata[0:2]
high_rates[syn_idx], high_rates_o[syn_idx] = vdata[2:4]
report_time_remaining(start_time, syn_idx)
np.savetxt(DATA_DIR + "low_rates.txt", low_rates)
np.savetxt(DATA_DIR + "low_rates_o.txt", low_rates_o)
np.savetxt(DATA_DIR + "high_rates.txt", high_rates)
np.savetxt(DATA_DIR + "high_rates_o.txt", high_rates_o)
plot_data(max_rates_2, low_rates, low_rates_o, high_rates, high_rates_o)
plt.show() | 37,223 |
def show(unmask: bool) -> None:
"""Show the account information stored in the plan engine."""
result = retrieve_account()
if not result:
click.secho("Account information is not set")
else:
token = result.pat_token if unmask else _mask_token(result.pat_token)
click.secho("PAT token: %s" % token) | 37,224 |
def get_orbs(fp, orbs, truncate=False, tol=1e-8):
""" return the list of requested Kohn-Sham orbitals
Args:
fp (h5py.File): wf h5 file
orbs (list): a list of 3-tuples, each tuple species the KS state
by (kpoint/twist, spin, band) i.e. (ik, ispin, ib)
truncate (bool, optional): remove PWs with ``small'' coefficient
tol (float, optional): define ``small'' as |ck|^2 < tol
"""
from qharv.inspect import axes_pos
gvecs = get(fp, 'gvectors')
qvecs = get_twists(fp)
axes = get(fp, 'axes')
raxes = axes_pos.raxes(axes)
kvecsl = []
psigl = []
for orb in orbs:
ik, ispin, ib = orb
# PW basis
kvecs = np.dot(gvecs+qvecs[ik], raxes)
npw = len(kvecs)
# PW coefficients
psig = get_orb_in_pw(fp, ik, ispin, ib)
sel = np.ones(npw, dtype=bool)
if truncate: # cut down on the # of PWs
pg2 = (psig.conj()*psig).real
sel = pg2 > tol
kvecsl.append(kvecs[sel])
psigl.append(psig[sel])
return kvecsl, psigl | 37,225 |
def geturlcgivars(baseurl, port):
"""
Extract CGI variables from baseurl
>>> geturlcgivars("http://host.org/base", "80")
('host.org', '80', '/base')
>>> geturlcgivars("http://host.org:8000/base", "80")
('host.org', '8000', '/base')
>>> geturlcgivars('/base', 8000)
('', '8000', '/base')
>>> geturlcgivars("base", '8000')
('', '8000', '/base')
>>> geturlcgivars("http://host", '8000')
('host', '8000', '/')
>>> geturlcgivars("http://host/", '8000')
('host', '8000', '/')
"""
u = util.url(baseurl)
name = u.host or ''
if u.port:
port = u.port
path = u.path or ""
if not path.startswith('/'):
path = '/' + path
return name, str(port), path | 37,226 |
def insert_rails(file_path, half_gauge, drive_right = True, mr = 12.5, copy = True): # TODO - print 'progress messages' for done steps
"""
Deduces all rails' vertices from graph data and adds them to the json blueprint
half_gauge represents distance between center of the road and of the vehicle,
drive_right characterizes whether cars in network drive right (eg. Germany) or left (eg. UK)
mr stands for 'minimal radius' - minimal turning radius of the network in meters
(...default - 12.5 meters, German car standard)
Function's present inner structure is:
(0 - define nested strip())
1 - validate input JSON dict (must contain valid Vertices and Edges)
2 - iterate through all intersections (graph vertices)
* load and order all its neighbours (establish edges)
* double each edge into 'intersection_rails' to find new rails' points
* find 'intersection_rails' crossings - Rails: Vertices xys
3 - add found Rails' Vertices into emerging pyJSON dict
4 - deduce rails (edges) from vertices, add into pyJSON dict
5 - add Shapes of complex rails (Rails' Edges) derived from complex graph edges
* divide Shape by type into polyline and bezier graph segments
* double shape segments one by one, then glue them back together in correct driving order
* Chop off multi-segment Shapes' inner overcrossings and insert Shapes to rails_dict
6 - Fix Shapes' incorrect, ignored 1st points and overflowing end bits
* Correct Shape's first, incorrect, ignored point
* Chop off Shapes' overflowing last points
7 - Recompute beziers to fit inside Offsets
8 - Smoothen out all corners and insert crossings
9 - Write finished "Rails" pyJSON into the source .json file
* (OPTIONAL) Create legible .txt copy
Includes: strip()
Uses: abstract_geometry.offset_polyline(), .offset_bezier(), .intersection_point(), .bezier_crossing(), .orient_line(), .bezier_intersection()
read_json.get_dict(), .order_neighbours(), .smoothen_rails()
.replicate_json
Input: json file (path str), half_gauge (int) IN METERS, drive_right (bool), step (float), mr (False or float - meters) copy (bool)
Output: modifies json file - adds "Rails" dictionary
"""
""" ~~~~~~~~~~ (0 - define nested strip()) ~~~~~~~~~~ """
# strip():
# Strips vertice indexes from label ("ACE41" -> "ACE")
#
# Input: label (str)
# Output: stripped label (str)
def strip(label):
label = list(label)
ciphers = set([str(num) for num in range(10)])
for char in label[::-1]:
if char in ciphers:
label.pop()
return "".join(label)
""" ~~~~~~~~~~ 1 - validate input JSON dict (must contain valid Vertices and Edges) ~~~~~~~~~~ """
try:
json_data = get_dict(file_path)
vertices = json_data["Vertices"]
edges = json_data["Edges"]
assert vertices and edges, "JSON invalid (\"Vertices\" or \"Edges\" could not be loaded). ~ write_json.insert_rails()"
except KeyError:
print("JSON invalid (\"Vertices\" or \"Edges\" could not be loaded). ~ write_json.insert_rails()")
assert 1 < half_gauge < 10, "given road width (half_gauge) is out of this world! ~ write_json.insert_rails()"
""" ~~~~~~~~~~ 2 - iterate through all intersections (graph vertices) ~~~~~~~~~~ """
# 2.1 # finding rail points of all intersections:
rails_dict = {"Vertices":{}, "Edges": []} # will be written inside JSON
for vertice_key in vertices:
vertice_points = [] # cross points of intersection' rails
vertice = vertices[vertice_key]
# edges stemming from vertice must be in counter-clockwise order...
intersection_rails = [] # list of [xy1, xy2] pairs (1 pair for each rail)
# 2.2 # doubling edges into rails, ALWAYS: 1, right-of-edge 2, left-of-edge:
# orders neighbour vertices counter-clockwise (clockwise == False)
neighbours = order_neighbours(vertice_key, vertices, False)
for neighbour in neighbours: # go through all neighbours
try:
doubled_edge = next(edge
for edge in edges
if edge["Vertices"] == [vertice_key, neighbour]
or edge["Vertices"] == [neighbour, vertice_key]
)
except StopIteration:
print("Could not find edge for", [vertice_key, neighbour], "- meaning:")
print("Invalid entry data (Vertices' Neighbours don't cross-match). ~ write_json.insert_rails()")
return False
xy1 = vertice["Coordinates"]
neighbour_index = 1 if doubled_edge["Vertices"].index(neighbour) == 1 else -2 # second or second last (is edge oriented vertice -> neighbour?)
if "Shape" not in doubled_edge.keys(): # simple line
xy2 = vertices[neighbour]["Coordinates"]
intersection_rails.append(offset_polyline([xy1, xy2], half_gauge, True)) # First append right rail
intersection_rails.append(offset_polyline([xy1, xy2], half_gauge, False)) # Then append left rail
elif not isinstance(doubled_edge["Shape"][1-abs(neighbour_index)][0], list): # 1st (or -1st for inverted) segment is a line (Ie. is not Bezier)
xy2 = doubled_edge["Shape"][neighbour_index] # 2nd polyline control point
intersection_rails.append(offset_polyline([xy1, xy2], half_gauge, True)) # First append right rail
intersection_rails.append(offset_polyline([xy1, xy2], half_gauge, False)) # Then append left rail
else: # edge is bezier, append doubled control points as a rail
points = doubled_edge["Shape"][1-abs(neighbour_index)][::3-abs(neighbour_index)*2] # sliced either ::1 (no change) or ::-1 (reversed)
assert len(points) == 3, "Only quadratic (3-control-points) beziers are allowed. ~ write_json.insert_rails()"
# only append first offset subbeziers:
if mr:
iterations = int((distance(*points[:2]) + distance(*points[1:])) // (mr * 3)) # adjust subdivision to the length of bezier's control polygon
else:
iterations = 2
iterations = 2 if iterations > 2 else iterations
intersection_rails.append(offset_bezier(points, half_gauge, True, split_iterations=iterations)) # First append right rail
intersection_rails.append(offset_bezier(points, half_gauge, False, split_iterations=iterations)) # Then append left rail
# shuffle first rail to the end to change order to L, R-L, R-L, (...) , R-L, R
first_vertice = intersection_rails.pop(0)
intersection_rails.append(first_vertice)
# 2.3 # find 'intersection_rails' crossings - Rails: Vertices xys
# ...first found intersection point (or intersection line) is always between: 1st vertice's left rail and 2nd vertice's right rail
for i in range(len(intersection_rails) // 2):
if len(neighbours) == 1: # this vertice is a dead-end, no intersection point
# find out which end of edge is further from graph:
for rail in intersection_rails:
distances = []
ref_point = vertices[neighbours[0]]["Coordinates"]
if isinstance(rail[0][0], list): # dead-end is bezier
end_points = [rail[0][0], rail[0][-1]]
else: # dead-end is line (edge itself or 1st polyline section)
end_points = rail
for end_point in end_points:
distances.append(distance(end_point, ref_point))
dead_point = rail[0] if distances[0] > distances[1] else rail[1]
vertice_points.append(dead_point)
if isinstance(intersection_rails[2*i][0][0], list) or isinstance(intersection_rails[2*i+1][0][0], list): # at least one bezier in currently computed rail pair
if isinstance(intersection_rails[2*i][0][0], list):
vertice_point = beziers_crossing(intersection_rails[2*i], intersection_rails[2*i+1])[0]
else:
vertice_point = beziers_crossing(intersection_rails[2*i+1], intersection_rails[2*i])[0]
else:
vertice_dict = intersection_point(intersection_rails[2*i], intersection_rails[2*i+1], cut_lines=True)
if vertice_dict:
intersection_rails[2*i] = vertice_dict["line1"]
intersection_rails[2*i+1] = vertice_dict["line2"]
vertice_point = vertice_dict["point"]
else:
vertice_point = False
if vertice_point != False: # point found
vertice_points.append(vertice_point)
else: # lines don't cross
vertice_line = []
for rail in [intersection_rails[2*i], intersection_rails[2*i+1]]:
if isinstance(rail[0][0], list): # line is bezier
rail = orient_line(vertice["Coordinates"], [rail[0][0], rail[0][-1]]) # transforms bezier into properly oriented line
vertice_line.append(rail[0]) # beginning of properly ordered line abstracted from bezier
else:
rail = orient_line(vertice["Coordinates"], rail) # Order lines' points by proximity to vertice point:
vertice_line.append(rail[0]) # beginning of properly ordered line
# Insert beginnings of a rail - [[xy], [xy]] - Vertice Coordinates are a line!
vertice_line = vertice_line[::(drive_right*2)-1] # vertice line counter-clockwise, flip it if drive_right == False (0)
vertice_points.append(vertice_line)
if len(neighbours) == 1: # parallel lines - skip crossing process in step 3 (right below)
for i in range(2):
if isinstance(vertice_points[i][0], list): # vertice_point is bezier
rails_dict["Vertices"][vertice_key + str(i+1)] = {"Coordinates" : vertice_points[i][0]}
else: # vertice_point is part of line
rails_dict["Vertices"][vertice_key + str(i+1)] = {"Coordinates" : vertice_points[i]}
# adhere to "NeighbourVertices" naming convention:
# [first's left rail, counter-clockwise second's right rail] (from present vertice's perspective)
if i == 0:
rails_dict["Vertices"][vertice_key + str(i+1)]["NeighbourVertices"] = [neighbours[0], " "]
else:
rails_dict["Vertices"][vertice_key + str(i+1)]["NeighbourVertices"] = [" ", neighbours[0]]
continue
""" ~~~~~~~~~~ 3 - write Rails' Vertices into emerging pyJSON dict ~~~~~~~~~~ """
# Write JSON vertices:
for i in range(len(vertice_points)):
rails_dict["Vertices"][vertice_key + str(i+1)] = {"Coordinates" : vertice_points[i]}
rails_dict["Vertices"][vertice_key + str(i+1)]["NeighbourVertices"] = [neighbours[i]] # making use of the prior counter-clockwise ordering
try:
rails_dict["Vertices"][vertice_key + str(i+1)]["NeighbourVertices"].append(neighbours[i+1]) # Neighbours: [left rail's, right rails's]
except IndexError: # last intersection - 2nd neighbour is right rail of first neighbour's edge
rails_dict["Vertices"][vertice_key + str(i+1)]["NeighbourVertices"].append(neighbours[0])
""" ~~~~~~~~~~ 4 - deduce rails (edges) from vertices, add into pyJSON dict ~~~~~~~~~~ """
# deduce rails from vertices, thanks to naming convention:
for key in rails_dict["Vertices"].keys(): # add Neighbours list to vertices
rails_dict["Vertices"][key]["Neighbours"] = []
for vertice_label, vertice_data in rails_dict["Vertices"].items():
neighbours = []
label = strip(vertice_label)
# inserting Neighbours in vertices:
searched_neighbours = vertice_data["NeighbourVertices"]
for neighbour_label, neighbour_data in rails_dict["Vertices"].items():
if neighbour_label == vertice_label:
continue
# insert "Rails": "Edges"
if strip(neighbour_label) == searched_neighbours[0] and neighbour_data["NeighbourVertices"][1] == label:
rails_dict["Vertices"][vertice_label]["Neighbours"].insert(0, neighbour_label)
if drive_right == False: # rail Edges format: [start, end]
rails_dict["Edges"].append({"Vertices": [vertice_label, neighbour_label]})
elif strip(neighbour_label) == searched_neighbours[1] and neighbour_data["NeighbourVertices"][0] == label:
rails_dict["Vertices"][vertice_label]["Neighbours"].append(neighbour_label)
if drive_right:
rails_dict["Edges"].append({"Vertices": [vertice_label, neighbour_label]})
""" ~~~~~~~~~~ 5 - add Shapes of complex rails (Rails' Edges) derived from complex graph edges ~~~~~~~~~~ """
# modify the shapes of those Rails edges based on complex graph edges:
# note for direction - first load and compute, eventually reverse order only at the end
complex_edges = {} # format: set(vertice1, vertice2) : [shape]
for edge in json_data["Edges"]: # find complex edges
if "Shape" in edge.keys():
complex_edges[tuple(edge["Vertices"])] = edge["Shape"]
# complex rails' last part wasn't chopped off in step 2, it can only be done ex-post
unchopped_shapes = {} # dict, fotmat: {rail_index : complex rail, ...}
# insert "Shape" into Rails' Edges
for rail_index in range(len(rails_dict["Edges"])): # iterate through all rails
label1 = rails_dict["Edges"][rail_index]["Vertices"][0]
label2 = rails_dict["Edges"][rail_index]["Vertices"][1]
original_labels = [strip(label1), strip(label2)]
if tuple(original_labels) in complex_edges.keys() or tuple(original_labels[::-1]) in complex_edges.keys(): # rail should have complex Shape
original_shape = complex_edges[tuple(original_labels)] if tuple(original_labels) in complex_edges.keys() else complex_edges[tuple(original_labels[::-1])]
# 5.1 # divide doubled Shape into individual polyline and bezier sublines:
shape_segments = [] # Bezier distinguished by virtue of being a nested list, as customary
segment_index = 0
polyline_started = False # algorithm assumes we begin on bezier
for shape_index in range(len(original_shape)): # going through the entire "Shape"
if isinstance(original_shape[shape_index][0], list): # bezier encountered
if polyline_started:
segment_index += 1 # move from polyline to this new segment
polyline_started = False
shape_segments.append([original_shape[shape_index]]) # bezier distinguished by being nested list, as is conventional
segment_index += 1 # move to next segment
else: # polyline point encountered
if polyline_started == False:
shape_segments.append([]) # new segment buffer
polyline_started = True
shape_segments[segment_index].append(original_shape[shape_index])
# 5.2 # double shape segments one by one, then glue them back together in correct driving order (into doubled_segments):
doubled_segments = []
for segment in shape_segments:
if isinstance(segment[0][0], list): # Bezier curve, add shifted control points
if tuple(original_labels) in complex_edges.keys(): # control points are in the right direction
if mr:
iterations = int((distance(*segment[0][:2]) + distance(*segment[0][1:])) // (mr * 3)) # adjust subdivision to the length of bezier's control polygon
else:
iterations = 2
iterations = 2 if iterations > 2 else iterations
subbeziers = offset_bezier(segment[0], half_gauge, drive_right, split_iterations=iterations)
for subbez in subbeziers:
doubled_segments.append([subbez]) # drive_right True -> we want right rail
elif tuple(original_labels[::-1]) in complex_edges.keys(): # control points are in opposite direction - reverse control points, append to start
if mr:
iterations = int((distance(*segment[0][:2]) + distance(*segment[0][1:])) // (mr * 3)) # adjust subdivision to the length of bezier's control polygon
else:
iterations = 2
iterations = 2 if iterations > 2 else iterations
subbeziers = offset_bezier(segment[0][::-1], half_gauge, drive_right, split_iterations=iterations)
wrapped_subbeziers = []
for subbez in subbeziers:
wrapped_subbeziers.append([subbez])
doubled_segments = wrapped_subbeziers + doubled_segments
else: # polyline, add shifted points
if tuple(original_labels) in complex_edges.keys(): # polyline is in right direction
doubled_segments.append(offset_polyline(segment, half_gauge, drive_right)) # drive_right True -> we want right rail
elif tuple(original_labels[::-1]) in complex_edges.keys(): # polyline is in opposite direction - reverse rail points, append to start
doubled_segments = [offset_polyline(segment, half_gauge, 1-drive_right)[::-1]] + doubled_segments # append to front
# 5.4 # Chop off multi-segment Shapes' inner overcrossings and insert Shapes to rails_dict:
if len(doubled_segments) == 1: # just 1 segment
rails_dict["Edges"][rail_index]["Shape"] = doubled_segments[0]
else:
# solve inner crossings, only then insert:
doubled_index = 0
while doubled_index < len(doubled_segments) - 1: # list may dynamically expand, this prevents overflow
segment = doubled_segments[doubled_index]
next_segment = doubled_segments[doubled_index+1]
if isinstance(segment[0][0], list) and isinstance(next_segment[0][0], list): # segments: bezier ; bezier
chop_point = bezier_intersection(segment, next_segment)
last_dict = int(isinstance(segment[0][-1], dict)) # segment ends on dict -> 1 / doesn't -> 0
if chop_point: # interection exists
if chop_point != segment[0][-1]: # interection exists, it's not just a touch
if isinstance(segment[0][-1], dict):
segment[0][-1]["Offsets"][1] = chop_point
else:
segment[0].append({"Offsets":[False, chop_point]})
if chop_point != next_segment[0][-1]: # next bezier needs to be chopped, it's not just a touch
next_segment[0].append({"Offsets":[chop_point, False]})
doubled_segments[doubled_index] = segment
doubled_segments[doubled_index+1] = next_segment
elif segment[0][-1-last_dict] != next_segment[0][0]: # beziers don't touch, insert connecting line (complex_vert alternative)
doubled_segments.insert(doubled_index + 1, [segment[0][-1-last_dict], next_segment[0][0]]) # insert new "poly"line between beziers
doubled_index += 1 # move on to next segment
elif isinstance(segment[0][0], list): # segments: bezier ; line
offsets_dict = segment[0].pop() if isinstance(segment[0][-1], dict) else False
chop_point = bezier_intersection(segment, [next_segment[0], next_segment[1]])
if offsets_dict: # offsets were lost in bezier_intersection()
segment[0].append(offsets_dict)
if chop_point:
if isinstance(segment[0][-1], dict):
segment[0][-1]["Offsets"][1] = chop_point
else:
segment[0].append({"Offsets":[False, chop_point]})
next_segment[0] = chop_point
else:
last_dict = int(isinstance(segment[0][-1], dict)) # segment ends on dict -> 1 / doesn't -> 0
next_segment = [segment[0][-1-last_dict]] + next_segment # insert point at the beginning of next polyline
doubled_segments[doubled_index] = segment
doubled_segments[doubled_index+1] = next_segment
elif isinstance(next_segment[0][0], list): # segments: line ; bezier
chop_point = bezier_intersection(next_segment, [segment[-1], segment[-2]])
if chop_point:
segment[-1] = chop_point
next_segment[0].append({"Offsets":[chop_point, False]})
else:
segment.append(next_segment[0][0]) # append bezier's point to this polyline
doubled_segments[doubled_index] = segment
doubled_segments[doubled_index+1] = next_segment
else: # segments: line ; line
chop_point = intersection_point([segment[-1], segment[-2]], [next_segment[0], next_segment[1]])
if chop_point:
segment[-1] = chop_point
next_segment[0] = chop_point
else:
segment.append(next_segment[0]) # append point to this polyline
doubled_segments[doubled_index] = segment
doubled_segments[doubled_index+1] = next_segment
doubled_index += 1
rails_dict["Edges"][rail_index]["Shape"] = []
for doubled_segment in doubled_segments: # finally insert Shape to rails_dict
if isinstance(doubled_segment[0][0], list): # appending bezier
rails_dict["Edges"][rail_index]["Shape"].append(doubled_segment[0])
else: # appending polyline
rails_dict["Edges"][rail_index]["Shape"] += doubled_segment
unchopped_shapes[rail_index] = rails_dict["Edges"][rail_index] # add for multi-edge corrections in step 6
""" ~~~~~~~~~~ 6 - Fix Shapes' incorrect, ignored 1st points and overflowing end bits ~~~~~~~~~~ """
# chop off last part of complex edges, that ignore intersection point (bit of line or bit of curve):
for rail_index, unchopped_rail in unchopped_shapes.items():
# 6.1 # Correct Shape's first, incorrect, ignored point
vert_label = unchopped_rail["Vertices"][0] # current rail's start vertice label
# load standardized (vertice) xy
if isinstance(rails_dict["Vertices"][vert_label]["Coordinates"][0], list): # Complex vertice [[xy], [xy]]
ignored_start = rails_dict["Vertices"][vert_label]["Coordinates"][1]
else: # Simple point
ignored_start = rails_dict["Vertices"][vert_label]["Coordinates"]
if isinstance(rails_dict["Edges"][rail_index]["Shape"][0][0], list): # correcting 1st control point of bezier
suspect_subbeziers = [] # ignored_start may cut some of those
subbez_index = 0
while isinstance(rails_dict["Edges"][rail_index]["Shape"][subbez_index][0], list): # scans through subbeziers, will run at least once
# create a protected copy to prevent bezier_crossing from overriding Offsets:
if not isinstance(rails_dict["Edges"][rail_index]["Shape"][subbez_index][-1], dict):
subbez_copy = [xy.copy() for xy in rails_dict["Edges"][rail_index]["Shape"][subbez_index]]
else: # omit offset from creating subbez_copy
subbez_copy = [xy.copy() for xy in rails_dict["Edges"][rail_index]["Shape"][subbez_index][:-1]]
suspect_subbeziers.append(subbez_copy)
subbez_index += 1
# suspecing last subbez, while condition would overflow
if len(rails_dict["Edges"][rail_index]["Shape"]) == subbez_index:
break
(crossed_start, crossed_index) = beziers_crossing(suspect_subbeziers, ignored_start)
if crossed_start:
# chop off omitted beginning:
rails_dict["Edges"][rail_index]["Shape"] = rails_dict["Edges"][rail_index]["Shape"][crossed_index:] # cut off entirely omitted subbeziers, perhaps TEMP?
offsets = rails_dict["Edges"][rail_index]["Shape"][0][-1] # protect offsets from bezier_intersection()
offsets_defined = isinstance(rails_dict["Edges"][rail_index]["Shape"][0][-1], dict) # Were offsets already inserted?
if offsets_defined:
rails_dict["Edges"][rail_index]["Shape"][0][-1]["Offsets"][0] = ignored_start
else:
rails_dict["Edges"][rail_index]["Shape"][0].append({"Offsets":[ignored_start, False]})
else: # ignored point is not part of bezier
if isinstance(rails_dict["Vertices"][vert_label]["Coordinates"][0], list): # Coordinates already a complex vertice [[xy], [xy]]
rails_dict["Vertices"][vert_label]["Coordinates"][1] = rails_dict["Edges"][rail_index]["Shape"][0][0]
else:
rails_dict["Vertices"][vert_label]["Coordinates"] = [rails_dict["Vertices"][vert_label]["Coordinates"], rails_dict["Edges"][rail_index]["Shape"][0][0]] # Make coordinates a complex vertice
else: # correcting 1st point of polyline
rails_dict["Edges"][rail_index]["Shape"][0] = ignored_start # rewrite 1st polyline point
# 6.2 # Chop off Shapes' overflowing last point
end_vertice = next(vert # finding the vertice at the end of doubled complex edge
for vert in rails_dict["Vertices"]
if vert == unchopped_rail["Vertices"][1]
)
sibling_vertice = next(vert # finding sibling graph vertice (needed to fing its crossed copy)
for vert in rails_dict["Vertices"][end_vertice]["NeighbourVertices"]
if vert != strip(vert_label) # target's NeighbourVertices are *this one* (vert_label) and *the other one*. We want the other one
)
if sibling_vertice != " ": # ignore dead-end vertices, no cutting required there...
try:
ignored_label = next(rail["Vertices"][0] # finding crossed intersection
for rail in rails_dict["Edges"]
if rail["Vertices"][0] == end_vertice
and sibling_vertice in rail["Vertices"][1]
)
except StopIteration:
print("JSON doesn't have properly linked Vertices' Neighbours ~ write_json.insert_rails()")
return False
if isinstance(rails_dict["Vertices"][ignored_label]["Coordinates"][0], list): # is intersection a complex vertice?
ignored_end = rails_dict["Vertices"][ignored_label]["Coordinates"][0]
else:
ignored_end = rails_dict["Vertices"][ignored_label]["Coordinates"]
if type(rails_dict["Edges"][rail_index]["Shape"][-1][-1]) in [list, dict]: # unchopped_rail ends on a bezier
last_dict = isinstance(rails_dict["Edges"][rail_index]["Shape"][-1][-1], dict) # unchopped_rail ends on a bezier with defined Offsets
if last_dict:
offsets = rails_dict["Edges"][rail_index]["Shape"][-1].pop(-1)["Offsets"]
if ignored_end == rails_dict["Edges"][rail_index]["Shape"][-1][-1]: # does part of bezier need to be chopped off at all?
continue
else: # check for beziers' crossing:
end_subbeziers = [] # ignored_start may cut some of those, in end-start order! (respective to vertice)
negative_index = -1
while isinstance(rails_dict["Edges"][rail_index]["Shape"][negative_index][0], list): # iterate through ending subbeziers
# create a protected copy to prevent bezier_crossing from overriding Offsets:
if not isinstance(rails_dict["Edges"][rail_index]["Shape"][negative_index][-1], dict):
subbez_copy = [xy.copy() for xy in rails_dict["Edges"][rail_index]["Shape"][negative_index]]
else: # omit offset from creating subbez_copy
subbez_copy = [xy.copy() for xy in rails_dict["Edges"][rail_index]["Shape"][negative_index][:-1]]
end_subbeziers.append(subbez_copy)
negative_index -= 1
# suspecing first subbez, while condition would underflow
if negative_index == -len(rails_dict["Edges"][rail_index]["Shape"]) - 1:
break
(crossed_end, crossed_index) = beziers_crossing(end_subbeziers, ignored_end)
if crossed_end:
# chop off omitted end subbeziers:
if crossed_index != 0: # [:-0] slice would delete whole Shape
rails_dict["Edges"][rail_index]["Shape"] = rails_dict["Edges"][rail_index]["Shape"][:-crossed_index]
if last_dict and crossed_index == 0:
rails_dict["Edges"][rail_index]["Shape"][-1].append({"Offsets":[offsets[0], ignored_end]})
else:
rails_dict["Edges"][rail_index]["Shape"][-1].append({"Offsets":[False, ignored_end]})
elif isinstance(rails_dict["Vertices"][ignored_label]["Coordinates"][0], list): # no intersection, modify complex vertice
rails_dict["Vertices"][ignored_label]["Coordinates"][0] = rails_dict["Edges"][rail_index]["Shape"][-1][-1]
else: # no intersection, complexify vertice
rails_dict["Vertices"][ignored_label]["Coordinates"] = [rails_dict["Edges"][rail_index]["Shape"][-1][-1], rails_dict["Vertices"][ignored_label]["Coordinates"]]
else: # unchopped_rail ends on a polyline:
rails_dict["Edges"][rail_index]["Shape"][-1] = ignored_end
""" ~~~~~~~~~~ 7 - Recompute beziers to fit inside Offsets ~~~~~~~~~~ """
rails_dict["Edges"] = evaluate_offsets(rails_dict["Edges"])
""" ~~~~~~~~~~ 8 - Smoothen out all corners and insert crossings ~~~~~~~~~~ """
rails_dict = smoothen_rails(rails_dict, mr)
json_data["Rails"] = rails_dict
json_data["Vertices"] = vertices
json_data["Edges"] = edges
json_data = add_crossings(json_data, mr, drive_right)
""" ~~~~~~~~~~ 9 - Insert finished "Rails" pyJSON into the source .json file ~~~~~~~~~~ """
with open(file_path, "w", encoding="utf-8") as json_file:
json.dump(json_data, json_file)
print("Added Rails to " + file_path)
# 9.1 #: OPTIONAL 9 - create legible .txt copy)
if copy:
replicate_json(file_path) | 37,227 |
def prepare_log_for_upload(symbolized_output, return_code):
"""Prepare log for upload."""
# Add revision information to the logs.
app_revision = environment.get_value('APP_REVISION')
job_name = environment.get_value('JOB_NAME')
components = revisions.get_component_list(app_revision, job_name)
component_revisions = (
revisions.format_revision_list(components, use_html=False) or
'Not available.\n')
revisions_header = (
'Component revisions (build r{app_revision}):\n{component_revisions}\n'.
format(
app_revision=app_revision, component_revisions=component_revisions))
return_code_header = 'Return code: %s\n\n' % return_code
return revisions_header + return_code_header + symbolized_output | 37,228 |
def bucketize(point, bucket_size):
"""floor the point to the next lower multiple of bucket_size"""
return bucket_size * math.floor(point / bucket_size) | 37,229 |
def find_unit(df):
"""find unit in the df, add column to df indicating which token contains unit
and return the unit as string."""
doc_unit = ""
# thousand = "(\$)(0){3}|thousand|€(\s*)thous|TEUR|T(\s*)€|Tsd|Tausend"
# million = "millions|million|£(\s*)m|$(\s*)m|€(\s*)m|mn|mio(\s*)€|in(\s+)mio|MM|\d+(M){1}"
# billion = "billion|Bn|Mrd|Md"
units = {"thousand": THOUSAND, "million": MILLION, "billion": BILLION}
for key, value in units.items():
if df.apply(lambda x: x.str.contains(value, case=True).any(), axis=1).any(
axis=None
):
# If doc_unit is set two times => set undefined
if doc_unit:
doc_unit = "1"
break
# Set doc currency
else:
doc_unit = key
# Create column for unit in df marking the token which contains unit
df.loc[:, "unit"] = False
for key, value in units.items():
df.loc[df["text"].str.contains(value, case=True), "unit"] = True
# Set default unit to 1
if not doc_unit:
doc_unit = "1"
return doc_unit | 37,230 |
def regular_channels(audio ,new_channels):
"""
torchaudio-file([tensor,sample_rate])+target_channel -> new_tensor
"""
sig ,sr =audio
if sig.shape[0 ]==new_channels:
return audio
if new_channels==1:
new_sig =sig[:1 ,:] # 直接取得第一个channel的frame进行操作即可
else:
# 融合(赋值)第一个通道
new_sig =torch.cat([sig ,sig] ,dim=0) # c*f->2c*f
# 顺带提一句——
return [new_sig ,sr] | 37,231 |
def is_less_than(maximum: Union[int, float, Decimal]) -> Callable[[Union[int, float, Decimal]], bool]:
"""
:param maximum: A number
:return: A predicate that checks if a value is less than the given number
"""
def predicate(i: Union[int, float, Decimal]):
"""
:param i: A number
:return: Whether the number is less than the maximum
"""
return i < maximum
predicate.__name__ = f'_{is_less_than.__name__}_{maximum}'
return predicate | 37,232 |
def a_function(my_arg, another):
"""
This is the brief description of my function.
This is a more complete example of my function. It can include doctest,
code blocks or any other reST structure.
>>> a_function(10, [MyClass('a'), MyClass('b')])
20
:param int my_arg: The first argument of the function. Just a number.
:param another: The other argument of the important function.
:type another: A list of :class:`MyClass`
:return: The length of the second argument times the first argument.
:rtype: int
"""
return my_arg * len(another) | 37,233 |
def substitute_vars(oldList, runSet=None, task_file=None):
"""
This method replaces special substrings from a list of string
and return a new list.
"""
keyValueList = []
if runSet:
benchmark = runSet.benchmark
# list with tuples (key, value): 'key' is replaced by 'value'
keyValueList = [
("benchmark_name", benchmark.name),
("benchmark_date", benchmark.instance),
("benchmark_path", benchmark.base_dir or "."),
("benchmark_path_abs", os.path.abspath(benchmark.base_dir)),
("benchmark_file", os.path.basename(benchmark.benchmark_file)),
(
"benchmark_file_abs",
os.path.abspath(os.path.basename(benchmark.benchmark_file)),
),
("logfile_path", os.path.dirname(runSet.log_folder) or "."),
("logfile_path_abs", os.path.abspath(runSet.log_folder)),
("rundefinition_name", runSet.real_name if runSet.real_name else ""),
("test_name", runSet.real_name if runSet.real_name else ""),
]
if task_file:
var_prefix = "taskdef_" if task_file.endswith(".yml") else "inputfile_"
keyValueList.append((var_prefix + "name", os.path.basename(task_file)))
keyValueList.append((var_prefix + "path", os.path.dirname(task_file) or "."))
keyValueList.append(
(var_prefix + "path_abs", os.path.dirname(os.path.abspath(task_file)))
)
# do not use keys twice
assert len({key for (key, value) in keyValueList}) == len(keyValueList)
return [util.substitute_vars(s, keyValueList) for s in oldList] | 37,234 |
def run(csv_file_path, version, local_working_dir):
"""Main function to start the labeling."""
label_set, data_set = extract_relevant_columns(csv_file_path)
# Build a label index and ask user to input more labels if needed.
labels_list = _add_labels(list(label_set))
for i, row in enumerate(data_set):
if not row[LABELS_INDEX]: # If unlabeled row
text = row[TEXT_INDEX]
if not text:
with open(row[FILE_PATH_INDEX], 'rb') as f:
text = f.read()
if not text:
print('Invalid row {} in file {}'.format(i + 1, csv_file_path))
sys.exit(1)
# Run the get_label function for each unlabeled data
inp = _get_label_id(text, labels_list)
if inp == 'd':
break
elif inp != 's':
row[LABELS_INDEX] = labels_list[int(inp)]
create_new_csv(data_set, local_working_dir, version) | 37,235 |
def _safe_filename(filename):
"""
Generates a safe filename that is unlikely to collide with existing objects
in Google Cloud Storage.
``filename.ext`` is transformed into ``filename-YYYY-MM-DD-HHMMSS.ext``
"""
filename = secure_filename(filename)
date = datetime.datetime.utcnow().strftime("%Y-%m-%d-%H%M%S")
basename, extension = filename.rsplit('.', 1)
return "{0}-{1}.{2}".format(basename, date, extension) | 37,236 |
def load_model(model_name, data_dir=''):
"""
Load and return a trained model
@param model_name: base name for saved files
@param data_dir: directory containing trained model
"""
# load json and create model
json_file = open(os.path.join(data_dir, '%s.json' % model_name), 'r')
loaded_model_json = json_file.read()
json_file.close()
with CustomObjectScope({'GlorotUniform': glorot_uniform()}):
model = model_from_json(loaded_model_json)
# load weights into new model
model.load_weights(os.path.join(data_dir, '%s.h5' % model_name))
return model | 37,237 |
def _make_with_custom_variables(func, variables):
"""Calls func and replaces any trainable variables.
This returns the output of func, but whenever `get_variable` is called it
will replace any trainable variables with the tensors in `variables`, in the
same order. Non-trainable variables will re-use any variables already
created.
Args:
func: Function to be called.
variables: A list of tensors replacing the trainable variables.
Returns:
The return value of func is returned.
"""
variables = collections.deque(variables)
def custom_getter(getter, name, **kwargs):
if kwargs["trainable"]:
return variables.popleft()
else:
kwargs["reuse"] = True
return getter(name, **kwargs)
return _wrap_variable_creation(func, custom_getter) | 37,238 |
def create_loss_and_learner(
model, labels, learning_rate,
momentum_coef=0.0, wdecay=0.0, nesterov=False,
gradient_clip_norm=None, gradient_clip_value=None):
"""
Auxiliary function to create loss function (cross entropy and softmax)
and trainer using stochastic gradient descent with momentum.
Arguments:
model - imported model
labels - placeholder for one-hot labels array
learning_rate - learning rate for trainer
momentum_coef - coefficient of momentum (deafult 0.0)
wdecay - amount of weight decay (default 0.0)
nesterov - use nesterov accelerated gradient (dafault False)
gradient_clip_norm - target gradient norm (default None)
gradient_clip_value - value to element-wise clip gradients (default None)
Returns:
Loss function (mean for batch)
"""
if model.axes.lengths != labels.axes.lengths:
labels = ng.Transpose(labels)
assert model.axes.lengths == labels.axes.lengths
model = ng.cast_axes(model, axes=labels.axes)
loss = ng.cross_entropy_multi(ng.softmax(model), labels)
optimizer = GradientDescentMomentum(
learning_rate, momentum_coef, wdecay,
gradient_clip_norm, gradient_clip_value, nesterov
)
return ng.sequential([optimizer(loss), ng.mean(loss, out_axes=())]) | 37,239 |
def main_CL():
""" Command line parsing and and defaults methods
"""
parser = OptionParser(usage=mainTaskUsage(), version='%s'%version)
parser.add_option("-a", "--cmd", dest="cmd", default="get", help="Command type to use.")
parser.add_option("-x", "--parm", dest="parm", default="", help="Command parms.")
parser.add_option("-s", "--cs", dest="cs", default=DEFAULT_STREAM, help="Code Stream of Branch")
parser.add_option("-b", "--br", dest="br", default="", help="Branch Label")
parser.add_option("-t", "--tbr", dest="tbr", default="", help="Team Branch Label")
parser.add_option("-n", "--team", dest="team", default="", help="Team Name")
parser.add_option("-p", "--priority",dest="priority",default="Low", help="Branch Priority: Low, Medium, High, Urgent, Critical")
parser.add_option("--status", dest="status", default=None,
help="status to set on task branch")
parser.add_option("--forcenew", dest="force", default=False,
action="store_true",
help="Bypass certain Mudflow error checks. "
"An explanation must be given as to why you are bypassing the check(s)")
yesnochoices = ['Yes','YES','yes','y','Y','No','NO','no','n','N']
riskgr = OptionGroup(parser, "Branch Risk Options","Assign risk to the Branches(s) using the following options.")
riskgr.add_option("-r", "--risk", dest="risk", default=False, action="store_true", help="Designate mod as High Risk")
riskgr.add_option("-c", "--cmdchg",dest="cmdchg",default=None, action="store", choices=yesnochoices, help="Designate branch as having a Command Change")
riskgr.add_option("-v", "--volcanochg",dest="volchg",default=None, action="store", choices=yesnochoices, help="Designate branch as having a Volcano Change")
parser.add_option_group(riskgr)
textgr = OptionGroup(parser, "Description Options","Add text description using the options below ")
textgr.add_option("-d", "--desc", dest="desc", default=False, action="store_true", help="General description details")
textgr.add_option("-e", "--rdesc", dest="rdesc", default=False, action="store_true", help="Risk description details")
textgr.add_option("-o", "--cdesc", dest="cdesc", default=False, action="store_true", help="Command Change description details")
textgr.add_option("-m", "--mdesc", dest="mdesc", default=False, action="store_true", help="Merge description details")
textgr.add_option("-w", "--part", dest="part", default=False, action="store_true", help="By Pass mudflow")
textgr.add_option("-f", "--path", dest="path", default="./details.txt", help="Path to details file.")
parser.add_option_group(textgr)
parser.add_option("-u", "--user", dest="user", default="my", help="User alias on sForce")
parser.add_option("-z", "--trace", dest="trace", default="soap.out", help="SOAP output trace file.")
parser.add_option("--debug", dest='debug', action="count", help="The debug level, use multiple to get more.")
parser.add_option("-l", "--listBranchPath", dest="listBranchPath", default=None, help="no help available")
if (sys.platform == 'win32'):
(options, args) = parser.parse_args(args=sys.argv)
else:
(options, args) = parser.parse_args()
if options.debug > 1:
msg = []
msg.append(' cmd %s, parms %s' %(options.cmd, options.parm))
msg.append(' stream %s, branch %s, team %s' %(options.cs, options.br, options.tbr))
msg.append(' risk %s, cmdchg %s' %(options.risk, options.cmdchg))
msg.append(' desc %s, risk desc %s, cmd desc %s merge desc %s' %(options.desc, options.rdesc, options.cdesc, options.mdesc))
msg.append(' path %s, trace %s, debug %s' %(options.path,options.trace ,options.debug))
msg.append(' args: %s' %args)
print '\n'.join(msg)
else:
options.debug = 0
if len(args) > 0:
st = time.time()
parm = options.parm
query = args[1:]
# parse the stream
if options.cs in [None,'']: stream = DEFAULT_STREAM
else: stream = options.cs
stream = str(stream).strip()
stream = stream.lower()
if stream[:5] == "blast" and \
stream not in ACTIVE_STREAMS and \
stream[5:] in LEGACY_STREAMS:
stream = stream[5:]
print "Stream %s" %stream
elif stream in LEGACY_STREAMS:
# honor legacy "number only" streams for 4 and lower.
if stream == '5':
stream = 'blast%s' %stream
#print "Error: '%s' is not a valid stream." %(stream)
#sys.exit(1)
pass
elif "blast%s" %stream in ACTIVE_STREAMS:
print "Error: '%s' is not a valid stream. You must use the full name, 'blast%s'." %(stream, stream)
sys.exit(1)
elif stream not in ACTIVE_STREAMS and stream != '':
print "Error: '%s' is not a valid stream." %(stream)
sys.exit(1)
pass
if options.cmd in ['mkcr','addmod','addcr','lncr']:
if options.br in [None,''] or stream in [None,'']:
print '%s' %addModUsage('You must provide a least a valid stream, branch or label and a CR number!')
sys.exit()
elif validateBranch(options.br) is False:
print '%s' %addModUsage('Branch or label "%s" contains invalid characters!\nOnly letters (upper and lower case), numbers, period (.) and underscore (_)\nare allowed.' %options.br)
sys.exit()
elif options.team in [None,'']:
print '%s' %addModUsage("You must provide a team name using -n.\nView valid team names using the 'teams' command.\nTo really create an individual task branch, use '-n none'")
sys.exit()
elif options.cmdchg is None:
print '%s' %addModUsage("You must specify whether or not your branch has a command change with '-c (yes|no)'")
sys.exit()
elif options.volchg is None:
print '%s' %addModUsage("You must specify whether or not your branch has a volcano change with '-v (yes|no)'")
sys.exit()
branch = options.br
addModCL(branch, stream, query, options, st)
elif options.cmd in ['rmcr','rmmod','delmod','delcr']:
print "doing rmcr"
if options.br in [None,''] or stream in [None,'']:
print '%s' %rmModUsage('You must provide a least a valid stream and branch or label!')
sys.exit()
branch = options.br
rmModCL(branch, stream, query, options, st)
elif options.cmd in ['mvbr','merge']:
if options.br in [None,''] or stream in [None,'']:
print '%s' %mvTBUsage('You must provide a least a branch label!')
sys.exit()
branch = options.br
mvBranchCL(branch, stream, query, options)
elif options.cmd in ['clone']:
if options.br in [None,''] or stream in [None,''] \
or parm in [None, '']:
print '%s' %cloneTbUsage('You must provide a task branch, stream and a new stream to clone into')
sys.exit()
branch = options.br
cloneTbCL(branch, stream, parm, options, st)
elif options.cmd in ['submit','submitbranch']:
if options.br in [None,''] or stream in [None,'']:
print '%s' %submitBranchUsage('You must provide a least a valid stream and branch or label!')
sys.exit()
branch = options.br
submitBranchCL(branch, stream, query, options)
elif options.cmd in ['reject','rejectbranch']:
if options.br in [None,''] or stream in [None,'']:
print '%s' %rejectBranchUsage('You must provide a least a valid stream and a branch or label!')
sys.exit()
branch = options.br
rejectBranchCL(branch, stream, query, options)
elif options.cmd in ['approve','approvebranch']:
if options.br in [None,''] or stream in [None,'']:
print '%s' %approveBranchUsage('You must provide a least a valid stream and a branch or label!')
sys.exit()
branch = options.br
approveBranchCL(branch, stream, query, options)
elif options.cmd in ['toscm','submitscm']:
if options.br in [None,''] or stream in [None,'']:
print '%s' %submitToSCMUsage('You must provide a least a valid stream and a branch or label!')
sys.exit()
branch = options.br
submitToSCMCL(branch, stream, query, options)
elif options.cmd in ['ls','list']:
if options.br in [None,''] or stream in [None,'']:
print '%s' %listBranchUsage('You must provide a least a valid stream and branch or label!')
sys.exit()
branch = options.br
listBranchCL(branch, stream, query, options)
elif options.cmd in ['lsbrs','listbranches']:
if query in [None,'']:
print lsBRsUsage()
sys.exit()
user = options.user
if options.parm not in ['min', 'list','info','all']: show = 'list'
else: show = options.parm
n = getMyBranches(stream, user, show, options)
if n == 0:
print lsBRsUsage()
elif options.cmd in ['listapprovals']:
try: num = int(query)
except: num = 2000
getALLBranchApprovals(num, options)
elif options.cmd in ['lsba','myba','myapprovals']:
if options.parm not in ['del', 'list','info']: do = 'list'
else: do = options.parm
if do == 'list':
getMyBranchApprovals(options)
else:
print 'I will not do %s'%do
elif options.cmd in ['walk','sfwalk']:
secsAgo = int(options.parm)
if secsAgo in [None,'',0]: secsAgo = 432000
getLatestBA(options, secsAgo)
#walkSFCL(secsAgo, stream options)
elif options.cmd in ['scmreject']:
if options.br in [None,''] or stream in [None,'']:
print '%s' %scmRejectBranchUsage('You must provide a least a valid stream and a branch or label!')
sys.exit()
branch = options.br
team = None
if options.team != "":
team = options.team
scmRejectBranchCL(branch, stream, options, team=team, st=st)
elif options.cmd in ['stat', 'status']:
if options.br in [None,''] or stream in [None,'']:
msg = 'You must provide at least a valid stream and task branch name!'
print msg
sys.exit()
setTaskBranchStatusCL(stream, options.br, options)
else:
doSomething('search', query, options.parm, options)
msg = 'doing nothing yet, I can do it twice if you like'
print msg
if options.parm not in ['min', 'line']:
msg = ' Took a total of %3.2f secs -^' %(time.time()-st)
print msg
elif options.cmd in ['taskhelp']:
print mainTaskUsage()
sys.exit()
else:
msg = '%s' %usage(options.cmd)
print msg | 37,240 |
def get_opentsdb_config():
"""Read and parse Open TSDB config from config.ini"""
if os.path.exists(os.path.abspath(os.path.join(__file__, os.pardir, "config.ini"))):
config_parser = ConfigParser.SafeConfigParser()
config_parser.read(os.path.abspath(os.path.join(__file__, os.pardir, "config.ini")))
try:
opentsdb_url = config_parser.get('opentsdb', 'opentsdb_server_url')
opentsdb_token = config_parser.get('opentsdb', 'token')
opentsdb_metrics = config_parser.get('opentsdb', 'metrics')
except ConfigParser.NoOptionError:
logger.error(
"Agent not correctly configured. Check config file.")
sys.exit(1)
if len(opentsdb_url) == 0:
logger.warning(
"Agent not correctly configured(OPENTSDB_URL). Check config file. Using \"127.0.0.1:4242\" as default.")
opentsdb_url = "http://127.0.0.1:4242"
if len(opentsdb_metrics) != 0:
opentsdb_metrics = opentsdb_metrics.split(",")
else:
opentsdb_metrics = []
opentsdb_config = {
"OPENTSDB_URL": opentsdb_url,
"OPENTSDB_METRICS": opentsdb_metrics,
"OPENTSDB_TOKEN": opentsdb_token
}
else:
logger.warning("No config file found. Using defaults.")
opentsdb_config = {
"OPENTSDB_URL": "http://127.0.0.1:4242",
"OPENTSDB_METRICS": "",
"OPENTSDB_TOKEN": ""
}
return opentsdb_config | 37,241 |
def test_copying():
"""
5.3.8
:return:
"""
mo = my_object("mo")
rhs = my_object("rhs")
# 5.3.8.1
with pytest.raises(error_classes.UsePythonMethod):
mo.copy(rhs)
# 5.3.8.2
with pytest.raises(error_classes.UsePythonMethod):
mo.do_copy(rhs) | 37,242 |
def mesh_subdivide_tri(mesh, k=1):
"""Subdivide a mesh using simple insertion of vertices.
Parameters
----------
mesh : Mesh
The mesh object that will be subdivided.
k : int
Optional. The number of levels of subdivision. Default is ``1``.
Returns
-------
Mesh
A new subdivided mesh.
Examples
--------
>>> box = Box.from_corner_corner_height([0.0, 0.0, 0.0], [1.0, 1.0, 0.0], 1.0)
>>> mesh = Mesh.from_shape(box)
>>> k = 2
>>> subd = mesh_subdivide_tri(mesh, k=k)
>>> mesh is subd
False
>>> type(mesh) is type(subd)
True
>>> k1 = sum(len(mesh.face_vertices(fkey)) for fkey in mesh.faces())
>>> subd.number_of_faces() == (k1 if k == 1 else k1 * 3 ** (k - 1))
True
"""
cls = type(mesh)
subd = mesh_fast_copy(mesh)
for _ in range(k):
for fkey in list(subd.faces()):
subd.insert_vertex(fkey)
return cls.from_data(subd.data) | 37,243 |
def make_projection(proj_params):
"""
turn a set of proj4 parameters into a cartopy laea projection
introduced in read_resample.ipynb
Parameters
----------
proj_params: dict
dictionary with parameters lat_0, lon_0 datum and ellps
Returns
-------
cartopy projection object
"""
import cartopy.crs as ccrs
globe_w = ccrs.Globe(datum=proj_params["datum"],ellipse=proj_params['ellps'])
projection_w=ccrs.LambertAzimuthalEqualArea(central_latitude=float(proj_params['lat_0']),
central_longitude= float(proj_params['lon_0']),globe=globe_w)
return projection_w | 37,244 |
def _shape(df):
""" Return DataFrame shape even if is not a Pandas dataframe."""
if type(df) == pandas.DataFrame or type(df) == pandas.Series:
return df.shape
try:
shape = (len(df), len(df.columns))
except Exception as e:
logging.error(e)
raise e
return shape | 37,245 |
def hvplot_line(
df, title, x, y: List[str], output_dir: Path, vlines=None, save_figure=True, **kwargs
):
"""Draw line splot with optional vertical lines.
Example:
hvplot_line(
df,
title=col,
x="time", # This is index name
y=col_name,
vlines=outliers,
output_dir=args.output_dir / "single",
save_figure=True,
width=1500,
height=500,
# by="timestamp.month",
# groupby=["timestamp.year", "timestamp.month"],
)
Args:
df ([type]): Input dataframe
title ([type]): Graph title
x ([type]): Column name for x-axis, can be index's name
y (List[str]): Column name for y-axis
output_dir (Path): Output dir for html files
vlines ([type], optional): Vertiline of interest. Defaults to None.
save_figure (bool, optional): True to save html file. Defaults to True.
Returns:
[type]: [description]
"""
output_dir.mkdir(parents=True, exist_ok=True)
p = df.hvplot(
x=x,
y=y,
title=title,
kind="line",
xlabel="Time",
ylabel="Value",
size=10,
grid=True,
legend=True,
fontsize=15,
rot=45,
**kwargs,
)
if vlines is not None:
for x in vlines:
p = p * hv.VLine(pd.to_datetime(x)).opts(color="red", alpha=0.3)
if save_figure:
hvplot.save(p, output_dir / f"{title}.html")
return p | 37,246 |
def load_zstack(fn):
"""
Returns zstack, [zmin, zmax]
"""
with open(fn, "rb") as f:
d = np.fromfile(f,dtype=header_dtype,count=1,sep="")
version, shape, zrange = d[0]
zstack = np.fromfile(f,dtype='<f4',sep="").reshape(shape)
return zstack, zrange | 37,247 |
def Download_ALEXI_from_WA_FTP(local_filename, DirFile, filename,
lonlim, latlim, yID, xID, TimeStep):
"""Retrieves ALEXI data
This function retrieves ALEXI data for a given date from the
`<ftp.wateraccounting.unesco-ihe.org>`_ server.
Args:
local_filename (str): name of the temporary file which contains global ALEXI data.
DirFile (str): name of the end file with the weekly ALEXI data.
filename (str): name of the end file.
latlim (list): [ymin, ymax] (values must be between -60 and 70).
lonlim (list): [xmin, xmax] (values must be between -180 and 180).
yID (list): latlim to index.
xID (list): lonlim to index.
TimeStep (str): 'daily' or 'weekly' (by using here monthly,
an older dataset will be used).
:Example:
>>> print('Example')
Example
"""
# Collect account and FTP information
ftpserver = "ftp.wateraccounting.unesco-ihe.org"
user = collect.get_user('FTP_WA')
username = user['username']
password = user['password']
# Download data from FTP
ftp = FTP(ftpserver)
ftp.login(username, password)
if TimeStep == "weekly":
directory = "/WaterAccounting/Data_Satellite/Evaporation/ALEXI/World/"
if TimeStep == "daily":
directory = "/WaterAccounting/Data_Satellite/Evaporation/ALEXI/World_05182018/"
ftp.cwd(directory)
lf = open(local_filename, "wb")
ftp.retrbinary("RETR " + filename, lf.write)
lf.close()
if TimeStep == "daily":
collect.Extract_Data_gz(local_filename, os.path.splitext(local_filename)[0])
raw_data = np.fromfile(os.path.splitext(local_filename)[0], dtype="<f4")
dataset = np.flipud(np.resize(raw_data, [3000, 7200]))
# Values are in MJ/m2d so convert to mm/d
data = dataset[yID[0]:yID[1], xID[0]:xID[1]] / 2.45 # mm/d
data[data < 0] = -9999
if TimeStep == "weekly":
# Open global ALEXI data
dataset = collect.Open_tiff_array(local_filename)
# Clip extend out of world data
data = dataset[yID[0]:yID[1], xID[0]:xID[1]]
data[data < 0] = -9999
# make geotiff file
geo = [lonlim[0], 0.05, 0, latlim[1], 0, -0.05]
collect.Save_as_tiff(name=DirFile, data=data, geo=geo, projection="WGS84")
return | 37,248 |
def parseFile(path):
"""
Read sections headed by :SectionName into lists by section name in a dictionary
blank lines, line preceeding and ending whitespace and #Comments are stripped
"""
d={}
currentList=None
f = open(pathPrefix()+path, 'r')
for t in f.readlines():
# Remove comments
i=t.find('#')
if i!=-1:
t=t[:i]
# Strip excess whitespace
t=t.strip()
if len(t)>0:
if t[0]==':':
currentList=[]
d[t[1:]]=currentList
else:
if currentList!=None:
currentList.append(t)
return d | 37,249 |
def test_kubernetes():
""" Test import of system_config file for kubernetes """
# Create the path to the system_config file
systemfile = os.path.join(
os.getcwd(), "controllerconfig/tests/files/",
"system_config.kubernetes")
# Test import and generation of answer file
_test_system_config(systemfile)
# Test CLUSTER_NETWORK start address specified without end address
system_config = cr.parse_system_config(systemfile)
system_config.set('CLUSTER_NETWORK', 'IP_START_ADDRESS', '192.168.204.2')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
validate_only=True)
with pytest.raises(exceptions.ConfigFail):
validate(system_config, DEFAULT_CONFIG, None, False)
# Test CLUSTER_NETWORK end address specified without start address
system_config = cr.parse_system_config(systemfile)
system_config.set('CLUSTER_NETWORK', 'IP_END_ADDRESS', '192.168.204.200')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
validate_only=True)
with pytest.raises(exceptions.ConfigFail):
validate(system_config, DEFAULT_CONFIG, None, False)
# Test detection of overspecification of CLUSTER network addresses
system_config = cr.parse_system_config(systemfile)
system_config.set('CLUSTER_NETWORK', 'IP_FLOATING_ADDRESS',
'192.168.206.103')
system_config.set('CLUSTER_NETWORK', 'IP_IP_UNIT_0_ADDRESS',
'192.168.206.106')
system_config.set('CLUSTER_NETWORK', 'IP_IP_UNIT_1_ADDRESS',
'192.168.206.109')
with pytest.raises(exceptions.ConfigFail):
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
validate_only=True)
with pytest.raises(exceptions.ConfigFail):
validate(system_config, DEFAULT_CONFIG, None, False)
# Test absence of optional DNS configuration
system_config = cr.parse_system_config(systemfile)
system_config.remove_section('DNS')
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
validate_only=True)
validate(system_config, DEFAULT_CONFIG, None, False)
# Test absence of optional docker proxy configuration
system_config = cr.parse_system_config(systemfile)
system_config.remove_section('DOCKER_PROXY')
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
validate_only=True)
validate(system_config, DEFAULT_CONFIG, None, False)
# Test absence of optional docker registry configuration
system_config = cr.parse_system_config(systemfile)
system_config.remove_section('DOCKER_REGISTRY')
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
validate_only=True)
validate(system_config, DEFAULT_CONFIG, None, False) | 37,250 |
def setName(name):
"""
Sets the name of the robot.
This is cleared with a power cycle and displayed on the robot screen during idle times
Name will be shortened to 11 characters
Args:
name (any): Name to set for the robot. Will be cast to a string
Returns:
None
"""
name = str(name)[:11]
return _rc.writeAttribute(OPTYPE.ROBOT_NAME, stringToBytes(name) + [0]) | 37,251 |
def get_cmd_output(cmd):
"""Run a command in shell, and return the Unicode output."""
try:
data = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as ex:
data = ex.output
try:
data = data.decode("utf-8")
except UnicodeDecodeError:
data = data.decode("latin1")
return data | 37,252 |
def twoBodyCMmom(m_0, m_1, m_2):
"""relative momentum for 0 -> 1 + 2"""
M12S = m_1 + m_2
M12D = m_1 - m_2
if hasattr(M12S, "dtype"):
m_0 = tf.convert_to_tensor(m_0, dtype=M12S.dtype)
# m_eff = tf.where(m_0 > M12S, m_0, M12S)
# p = (m_eff - M12S) * (m_eff + M12S) * (m_eff - M12D) * (m_eff + M12D)
# if p is negative, which results from bad data, the return value is 0.0
# print("p", tf.where(p==0), m_0, m_1, m_2)
p = (m_0 - M12S) * (m_0 + M12S) * (m_0 - M12D) * (m_0 + M12D)
zeros = tf.zeros_like(m_0)
ret = tf.where(p > 0, tf.sqrt(p) / (2 * m_0), zeros)
return ret | 37,253 |
def svn_repos_post_commit_hook(*args):
"""svn_repos_post_commit_hook(svn_repos_t repos, apr_pool_t pool) -> char"""
return _repos.svn_repos_post_commit_hook(*args) | 37,254 |
def add_reconstruction_summaries(images, reconstructions, prebinary,
num_imgs_to_visualize=8):
"""Adds image summaries."""
reshaped_img = stack_images(images, reconstructions, num_imgs_to_visualize)
tf.summary.image('real_vs_reconstruction', reshaped_img, max_outputs=1)
if prebinary is not None:
tf.summary.histogram('prebinary_codes', prebinary) | 37,255 |
def run(one_worker_per_rse=False, once=False, rses=[], scheme=None, all_os_rses=False, older_than=30, sleep_time=1):
"""
Starts up the injector threads.
:param one_worker_per_rse: If True, one worker per RSE; Otherwise, one worker for all RSEs.
:param once: If True, only runs one iteration of the main loop.
:param rses: List of RSEs the reaper should work against. If empty, it considers all RSEs.
:param scheme: Force the reaper to use a particular protocol/scheme, e.g., mock.
:param all_os_rses: All Objectstore RSEs.
:param older_than: List control: older objects more than this value of days to list.
:param sleep_time: Days to sleep.
"""
logging.info('main: starting processes')
if all_os_rses:
rses = []
for rse in rse_core.list_rses():
if rse['rse'].endswith('_ES'):
rses.append(rse['rse'])
threads = []
if one_worker_per_rse:
worker = 0
for rse in rses:
kwargs = {'once': once, 'rses': [rse], 'scheme': scheme, 'worker_number': worker, 'total_workers': len(rses),
'older_than': older_than, 'sleep_time': sleep_time}
threads.append(threading.Thread(target=injector, kwargs=kwargs, name='Worker: %s, Total_Workers: %s' % (worker, len(rses))))
worker += 1
else:
kwargs = {'once': once, 'rses': rses, 'scheme': scheme, 'older_than': older_than, 'sleep_time': sleep_time}
threads.append(threading.Thread(target=injector, kwargs=kwargs, name='Worker: %s, Total_Workers: %s' % (0, 1)))
[t.start() for t in threads]
while threads[0].is_alive():
[t.join(timeout=3.14) for t in threads] | 37,256 |
def get_window_size():
"""Return the window width and height"""
width = os.popen(
"xrandr --current | grep '*' | uniq | awk '{print $1}' | cut -d 'x' -f1").read().strip(
"\n")
height = os.popen(
"xrandr --current | grep '*' | uniq | awk '{print $1}' | cut -d 'x' -f2").read().strip(
"\n")
if '\n' in width:
widths = width.split('\n')
heights = height.split('\n')
return widths, heights
else:
return width, height | 37,257 |
def test_geo_ops_smoke(backend, fn_expr):
"""Smoke tests for geo spatial operations."""
geo_table = backend.table('geo')
assert fn_expr(geo_table).compile() != '' | 37,258 |
def slot_selection_is_free(effect):
"""
all slots ar selected when participant applies
"""
activity = effect.instance.activity
return activity.slot_selection == 'free' | 37,259 |
def get_consumption_tax(amount, tax_rate, decimal_type):
"""消費税を取得する。
:param amount:
:param tax_rate:
:param decimal_type:
:return:
"""
if not amount:
return 0
return get_integer(decimal_type, float(amount) * float(tax_rate)) | 37,260 |
def homology(long_sequence, short_sequence):
"""
Cross-compare to find the strand of long sequence with the highest similarity with the short sequence.
:param long_sequence: str
:param short_sequence: str
:return ans: str, the strand of long sequence with the highest similarity with the short sequence
"""
# number of characters in the long sequence
i = len(long_sequence)
# number of characters in the short sequence
j = len(short_sequence)
# number of the same element between long- and short- sequence in a certain part of the long sequence
max_match = 0
# position where the max_match begins in long sequence
max_match_point = 0
ans = ''
# (i - j + 1) = times needed for cross-comparison
for k in range(i - j + 1):
match = 0
for n in range(j):
# if find the same element in the same position of long- and short- sequence, count one
if short_sequence[n] == long_sequence[n+k]:
match += 1
# find the biggest match, and the start position(k) in long sequence
if match > max_match:
max_match = match
max_match_point = k
# the strand of long sequence with the highest similarity with the short sequence
ans = long_sequence[max_match_point:(max_match_point + j)]
return ans | 37,261 |
def oneliner_to_phylip(line):
"""Convert one-liner to phylip format."""
seqs = line.strip(";\n").split(',')
label_seqs = zip(seqs[:-1:2], seqs[1::2])
taxa_count = len(label_seqs)
seq_length = len(label_seqs[0][1])
# pad all names to length of longest name + 1 space
max_name_length = max([len(val) for val in seqs[:-1:2]]) + 1
# add header
header = "%s %s\n" % (taxa_count, seq_length)
alignment = '\n'.join(['%s%s' % (i[0].ljust(max_name_length), i[1]) for i in label_seqs])
return header + alignment | 37,262 |
def _no_op_missing_kt_jvm_lib_impl(name, **kwargs):
"""
This is a help macro for missing concrete rule implementation.
This will be used in cases when some dependencies require Kotlin rule implementation.
Args:
name: A unique name for this target.
**kwargs: Anything else. Not used.
"""
fail(
"Unable to create target {} since it is a kt_jvm_library which was not provided. Add argument kt_jvm_library when calling generate_transitive_dependency_targets."
.format(name),
) | 37,263 |
def get_final_histogram(n_states, logfile, temp):
"""
This function analyzes the log file and performs the following tasks:
1. Output the counts of each lambda state at the last time frame (for plotting histogram)
2. Estimate the uncertainty of free energy difference from the final histogram
Paraneters
----------
n_states : int
Number of lambda states
logfile : str
The filename of the log file
Returns
-------
counts : np.array
The counts of each lambda state
Example
-------
>>> get_final_histogram(40, 'solvent_0.log')
[8678. 8437. 8680. 9007. 8606. 7642. 8269. 7878. 7689. 7906. 7451. 7416.
7939. 7470. 7540. 7858. 7664. 7423. 7527. 7322. 7325. 7538. 7173. 7034.
6943. 6910. 6935. 6805. 6463. 6371. 6249. 6425. 6353. 6618. 6789. 6810.
6426. 6408. 6675. 6271.]
"""
f = open(logfile, 'r')
lines = f.readlines()
f.close()
lines.reverse() # from this point, lines has been reverse
line_n = 0
counts = np.zeros(n_states)
for l in lines:
line_n += 1
if 'MC-lambda information' in l:
for i in range(n_states):
# start from lines[line_n - 3]
counts[i] = float(lines[line_n - 3 - i].split()[5])
break
kb = 1.38064852E-23 # Boltzmann constant
Na = 6.0221409E23 # Avogadro's number
error = np.abs(np.log(counts[0] / counts[-1])) # dimensionless error
if temp is None:
print('The uncertainty of the free energy difference is %5.3f kT.' % error)
temp = 298.15 # default
error *= (kb * Na * temp / 1000) * 0.23900573613
print('Or at 298.15K, the uncertainty is %5.3f kcal/mol' % error)
else:
error *= (kb * Na * float(temp) / 1000) * \
0.23900573613 # unit: kcal/mol
print('The uncertainty of the free energy difference is %5.3f kcal/mol.' % error)
return counts | 37,264 |
def create_sql_delete_stmt(del_list, name):
"""
:param del_list: list of records that need to be formatted in SQL delete statement.
:param name: the name of the table
:return: SQL statement for deleting the specific records
"""
sql_list = ", ".join(del_list)
sql_stmt = f"DELETE FROM method_usage.pandas_{name} WHERE {name}_id IN ({sql_list})"
logging.info(f"{len(del_list)} {name} in delete statement")
return sql_stmt | 37,265 |
def parse_track(trackelement):
"""Extract info from every track entry and output to list."""
print(trackelement)
if trackelement.find('artist').getchildren():
#artist info is nested in loved/banned tracks xml
artistname = trackelement.find('artist').find('name').text
artistmbid = trackelement.find('artist').find('mbid').text
else:
artistname = trackelement.find('artist').text
artistmbid = trackelement.find('artist').get('mbid')
if trackelement.find('album') is None:
#no album info for loved/banned tracks
albumname = ''
albummbid = ''
else:
albumname = trackelement.find('album').text
albummbid = trackelement.find('album').get('mbid')
trackname = trackelement.find('name').text
trackmbid = trackelement.find('mbid').text
date = trackelement.find('date').get('uts')
output = [date, trackname, artistname, albumname, trackmbid, artistmbid, albummbid]
for i, v in enumerate(output):
if v is None:
output[i] = ''
return output | 37,266 |
def flattencommand(input, separator, sort_keys, style, **kwargs):
"""
Flattens JSON input with nested or hierarchical structure into a flat (depth 1) hierarchy. Requires valid input.
Examples:
\b
Example: Basic usage:
$ echo '{"a":{"b":null,"c":"null","d":"","e":{"f":null},"g":{},"h":[]}}' | python -mclifunzone.jsontool flatten -c
{"a__b":null,"a__c":"null","a__d":"","a__e__f":null,"a__h":[]}
"""
if style == 'compact':
dumps_separators = (',', ':')
dumps_indent = None
elif style == 'pretty':
dumps_separators = None
dumps_indent = 2
elif style == 'flat':
dumps_separators = (',', ': ')
dumps_indent = 0
else:
dumps_separators = None
dumps_indent = None
if not input:
input = '-'
if separator is None:
separator = '__'
with click.open_file(input, mode='rb') as f:
data = json_utils.load_ordered(f)
data = flatten(data, separator)
s = json.dumps(data, indent=dumps_indent, separators=dumps_separators, sort_keys=sort_keys)
click.echo(s) | 37,267 |
def AddMutexEnvVarsFlags(parser):
"""Add flags for creating updating and deleting env vars."""
# TODO(b/119837621): Use env_vars_util.AddUpdateEnvVarsFlags when
# `gcloud run` supports an env var file.
key_type = env_vars_util.EnvVarKeyType
value_type = env_vars_util.EnvVarValueType
flag_name = 'env-vars'
long_name = 'environment variables'
group = parser.add_mutually_exclusive_group()
update_remove_group = group.add_argument_group(
help=('Only --update-{0} and --remove-{0} can be used together. If both '
'are specified, --remove-{0} will be applied first.'
).format(flag_name))
map_util.AddMapUpdateFlag(update_remove_group, flag_name, long_name,
key_type=key_type, value_type=value_type)
map_util.AddMapRemoveFlag(update_remove_group, flag_name, long_name,
key_type=key_type)
map_util.AddMapClearFlag(group, flag_name, long_name)
map_util.AddMapSetFlag(group, flag_name, long_name, key_type=key_type,
value_type=value_type) | 37,268 |
def _coord_matrix(model, pos, noutp):
"""
Create an array representing inputs and outputs of a simple model.
The array has a shape (noutp, model.n_inputs).
Parameters
----------
model : `astropy.modeling.Model`
model
pos : str
Position of this model in the expression tree.
One of ['left', 'right'].
noutp : int
Number of outputs of the compound model of which the input model
is a left or right child.
"""
if isinstance(model, Mapping):
axes = []
for i in model.mapping:
axis = np.zeros((model.n_inputs,))
axis[i] = 1
axes.append(axis)
m = np.vstack(axes)
mat = np.zeros((noutp, model.n_inputs))
if pos == 'left':
mat[: model.n_outputs, :model.n_inputs] = m
else:
mat[-model.n_outputs:, -model.n_inputs:] = m
return mat
if not model.separable:
# this does not work for more than 2 coordinates
mat = np.zeros((noutp, model.n_inputs))
if pos == 'left':
mat[:model.n_outputs, : model.n_inputs] = 1
else:
mat[-model.n_outputs:, -model.n_inputs:] = 1
else:
mat = np.zeros((noutp, model.n_inputs))
for i in range(model.n_inputs):
mat[i, i] = 1
if pos == 'right':
mat = np.roll(mat, (noutp - model.n_outputs))
return mat | 37,269 |
def showp2rev(context, mapping):
"""Integer. The repository-local revision number of the changeset's
second parent, or -1 if the changeset has no second parent."""
ctx = context.resource(mapping, 'ctx')
return ctx.p2().rev() | 37,270 |
def puan_kam(text: str = 'สวัสดี',
first: Optional[bool] = None,
keep_tone: Optional[bool] = None,
all: Optional[bool] = False,
skip_tokenize: Optional[bool] = None):
"""Puan kum (ผวนคำ) is a Thai toung twister, This API convert string into kampuan
Play around with the options to see different results.
-Args:
- **text** (str): Defaults to 'สวัสดี'.
- input string 'ไปเที่ยว' -> auto tokenize will apply and split to ไป and เที่ยว
- list of string which accepted 3 formats: ['ไป','กิน','ข้าว'] | 'ไป','กิน','ข้าว' | ไป,กิน,ข้าว, the list input will also neglect auto tokenization.
- **first** (bool, optional): if True use the first word to puan together with the last word otherwise will select second word and last word
(None will let us decide). Defaults to None.
- **keep_tone** (bool, optional): force whether to keep the tone when doing the puan (None will let us decide). Defaults to None.
- **all** (bool, optional): if True will provide all 4 puan results. Defaults to False.
- **skip_tokenize** (bool, optional): if True will skip tokenzation and use user provided list of words (input pure string will force to False or dont skip tokenization). Defaults to None.
-Returns:
- **results**: List of คำผวน
"""
if not check_thai_ch(text):
raise HTTPException(400, detail=f'Input contains non Thai')
text = process_text_2_list(text)
try:
split_words = kp.puan_kam_preprocess(text, skip_tokenize=skip_tokenize)
except ValueError:
try:
split_words = kp.puan_kam_preprocess(
text, skip_tokenize=not(skip_tokenize))
except ValueError as e:
raise HTTPException(422, detail=f'Input error: {e}')
if all is not None and all:
return {'input': text,
'results': kp.puan_kam_all(text=split_words)}
else:
if first is None and keep_tone is None:
return {'input': text,
'results': kp.puan_kam(text=split_words)}
else:
return {'input': text,
'results': kp.puan_kam_base(text=split_words, keep_tone=keep_tone, use_first=first)} | 37,271 |
def _flask_app_from_location(module_name: str) -> flask.app.Flask:
"""
:param module_name: String specifying path and module name as well as
actual flask app attribute. e.g., /path/to/module:flask_app
"""
module_and_app_name: str = (module_name.split('/')[-1])
module_file: str = module_and_app_name.split(':')[0]
flask_app_obj: str = module_and_app_name.split(':')[1]
path = '/'.join(module_name.split('/')[0:-1])
sys.path.append(path)
flask_app_module = importlib.import_module(module_file)
return getattr(flask_app_module, flask_app_obj) | 37,272 |
def get_app_wx(*args, **kwargs):
"""Create a new wx app or return an exiting one."""
import wx
app = wx.GetApp()
if app is None:
if 'redirect' not in kwargs:
kwargs['redirect'] = False
app = wx.PySimpleApp(*args, **kwargs)
return app | 37,273 |
def do_part_1():
"""
Solve the puzzle.
"""
data = input_lines(1)
total = 0
for line in data:
val, op = interpret_line(line)
total = op(total, val)
print(total)
return total | 37,274 |
def handle_rpc_errors(fnc):
"""Decorator to add more context to RPC errors"""
@wraps(fnc)
def wrapper(*args, **kwargs):
try:
return fnc(*args, **kwargs)
except grpc.RpcError as exc:
# lnd might be active, but not possible to contact
# using RPC if the wallet is encrypted. If we get
# an rpc error code Unimplemented, it means that lnd is
# running, but the RPC server is not active yet (only
# WalletUnlocker server active) and most likely this
# is because of an encrypted wallet.
exc.code().value
exc.details()
if exc.code() == grpc.StatusCode.UNIMPLEMENTED:
# raise WalletEncryptedError from None
print("unimplemented")
raise exc
elif exc.code() == grpc.StatusCode.UNAVAILABLE:
print("UNAVAILABLE")
print(f"ERROR MESSAGE: {exc.details()}")
elif exc.code() == grpc.StatusCode.UNKNOWN and exc.details() == "wallet locked, unlock it to enable full RPC access":
print("WALLET IS LOCKED!")
raise exc
elif exc.code() == grpc.StatusCode.UNKNOWN:
print("unknown")
print(f"ERROR MESSAGE: {exc.details()}")
elif exc.code() == grpc.StatusCode.NOT_FOUND:
print("NOT FOUND")
print(f"ERROR MESSAGE: {exc.details()}")
elif exc.code() == grpc.StatusCode.PERMISSION_DENIED:
print("PERMISSION_DENIED")
print(f"ERROR MESSAGE: {exc.details()}")
else:
raise exc
return exc
except Exception as exc:
print("unknown exception")
print(exc)
return wrapper | 37,275 |
def _check_BoolOp_expr(boolop, t, env):
"""Boolean Operations."""
assert boolop.__class__ is ast.BoolOp
op = boolop.op
es = boolop.values
assert op.__class__ in bool_ops, "%s not in bool ops" % cname(op)
# (BoolOp) assignment rule.
return all(check_expr(e, t, env) for e in es) | 37,276 |
def get_matproj(dbpath, cutoff, api_key, dataset_properties):
"""
Args:
dbpath (str): path to the local database
cutoff (float): cutoff radius
api_key (str): personal api_key for materialsproject.org
dataset_properties (list): properties of the dataset
Returns:
AtomsData object
"""
return MaterialsProject(dbpath, cutoff, api_key,
properties=dataset_properties) | 37,277 |
def parse_change_values_from_opts(opts):
"""
Convert optparse style options into a dictionary for changing.
:param opts: optparse style options
:returns: a dictonary with change values to filter devices,
supported parameters are ip, port, replication_ip,
replication_port
"""
change_values = {}
for key in ('change_ip', 'change_port', 'change_replication_ip',
'change_replication_port', 'change_device', 'change_meta'):
value = getattr(opts, key, None)
if value:
if key == 'change_ip' or key == 'change_replication_ip':
value = validate_and_normalize_address(value)
change_values[key.replace('change_', '')] = value
return change_values | 37,278 |
def save_annotations(index_value):
"""
Function to save the annotations
"""
try:
if os.path.exists(CSV_FILENAME):
with open(CSV_FILENAME, "a", encoding='utf-8') as file_object:
wavfile_information_object = csv.writer(file_object)
wavfile_information_object.writerow([FOLDER_WAV_FILES[index_value].split("\\")[-1]] + ANNOTATION_ENTRY_VAR.get().split(","))
# print(FOLDER_WAV_FILES[index_value].split("/")[-1]+" - " '{}'.format(ANNOTATION_ENTRY_VAR.get().split(",")))
# with open(FOLDER_TO_SAVE_ANNOTATIONS+"/"+FOLDER_WAV_FILES[index_value].split("/")[-1][:-4]+".pkl", "wb") as file_obj:
# pickle.dump(ANNOTATION_ENTRY_VAR.get().split(","), file_obj)
# next_audio_update_index()
else:
with open(CSV_FILENAME, "w", encoding='utf-8') as file_object:
wavfile_information_object = csv.writer(file_object)
wavfile_information_object.writerow(["Filename","Label1","Label2","Label3","Label4"])
wavfile_information_object.writerow([FOLDER_WAV_FILES[index_value].split("\\")[-1]]+ANNOTATION_ENTRY_VAR.get().split(","))
Label(root, text="SUBMITTED",
font=FONT_STYLE_BUTTON).grid(row=11, column=10,
sticky=(N, S, W, E), pady=10)
except NameError:
messagebox.showerror("No Path", "Specify path to save annotations!") | 37,279 |
def chooseMove(board,gameState):
"""called once per turn. Calls either escapeTrail or approachOpponent to determine move choice"""
def escapeTrail():
"""returns a command to move to the next space if we are in danger of an explosion Trail, or None if we are safe"""
# if we are not currently on a space that is slated to contain a trail, we don't need to do anything
if (not board[int(gameState['player']['x'])][int(gameState['player']['y'])].containsUpcomingTrail):
return None
escapePath = util.findPath(gameState,board,board[int(gameState['player']['x'])][int(gameState['player']['y'])],"containsUpcomingTrail",False,allowSoftBlocks=False,allowOpponent=False)
print("escape path: {0}\nnext block is: {1}".format(escapePath,escapePath[-1]))
if (escapePath == None): # todo: we should probably do something here even though we couldn't find a path to escape
return ''
if (not escapePath[-1].containsTrail):
if (escapePath[-1].type == SpaceType.softBlock):
# todo: we should probably do something here even though the next space in our path is currently a soft block
return ''
return util.moveTo(gameState,board,escapePath[-1])
else:
# todo: we should probably do something here even though the next space in our path is currently lethal
return ''
def approachOpponent():
"""returns a command to move to the next space in order to approach the opponent, or a bomb command if in range to hit opponent"""
approachPath = util.findPath(gameState,board,board[int(gameState['player']['x'])][int(gameState['player']['y'])],"containsOpponent")
print("approach path: {0}\nnext block is: {1}".format(approachPath,approachPath[-1]))
if (approachPath == None): # todo: we should probably do something here even though we couldn't find a path to approach (this state may be unreachable though depending on implementation)
return ''
if (not (approachPath[-1].containsTrail or approachPath[-1].containsUpcomingTrail)): #don't approach into a trail OR an upcoming trail todo: check number of ticks on upcoming trail instead
if (approachPath[-1].type == SpaceType.softBlock or approachPath[-1].containsOpponent): # place a bomb if we are right next to a soft block or the opponent
return "b" # todo: this assumes that we currently have a bomb available. Account for case when we do not have any bombs available to use
return util.moveTo(gameState,board,approachPath[-1])
else:
# todo: we should probably do something here even though the next space in our path is currently lethal
return ''
def tryPurchaseUpgrade():
# attempt to select an upgrade to purchase
# we only buy pierce up til 3 (max pierce for range 3)
if(gameState['player']['bombPierce'] < 3):
return "buy_pierce"
return ''
move = escapeTrail()
if (move == None):
move = approachOpponent()
if (move == None or move == ""):
move = tryPurchaseUpgrade()
return move | 37,280 |
def xr_linear_trends_2D(da, dim_names, with_nans=False):
""" calculate linear trend of 2D field in time
! slow, use xr_2D_trends instead
input:
da .. 3D xr DataArray with (dim_names) dimensions
dim_names .. tuple of 2 strings: e.g. lat, lon dimension names
output:
da_trend .. slope of linear regression
"""
if type(da.time.values[0]) in [np.datetime64, cftime._cftime.Datetime360Day]:
x, time_ = datetime_to_float(da)
# time_to_float = True
def xr_linear_trend_with_nans(x):
""" function to compute a linear trend coefficient of a timeseries """
if np.isnan(x).any():
x = x.dropna(dim='time')
if x.size>1:
pf = np.polynomial.polynomial.polyfit(x.time, x, 1)
else:
pf = np.array([np.nan, np.nan])
else:
pf = np.polynomial.polynomial.polyfit(x.time, x, 1)
return xr.DataArray(pf[1])
(dim1, dim2) = dim_names
# stack lat and lon into a single dimension called allpoints
stacked = da.stack(allpoints=[dim1, dim2])
# apply the function over allpoints to calculate the trend at each point
if with_nans==False:
trend = stacked.groupby('allpoints').apply(xr_linear_trend)
# unstack back to lat lon coordinates
da_trend = trend.unstack('allpoints')
if with_nans==True:
trend = stacked.groupby('allpoints').apply(xr_linear_trend_with_nans)
# unstack back to lat lon coordinates
da_trend = trend.unstack('allpoints')
# if time_to_float: da_trend.time.values = time_
# print(da_trend)
if 'allpoints_level_0' in da_trend.coords.keys():
da_trend = da_trend.rename({'allpoints_level_0':dim1, 'allpoints_level_1':dim2})
return da_trend | 37,281 |
def sanitize_email(email):
"""
Returns an e-mail address in lower-case and strip leading and trailing
whitespaces.
>>> sanitize_email(' MyEmailAddress@example.com ')
'myemailaddress@example.com'
"""
return email.lower().strip() | 37,282 |
def draw_perm_reps(data_1, data_2, func, size=1, args=()):
"""
Generate permutation replicates of `func` from `data_1` and
`data_2`
Parameters
----------
data_1 : array_like
One-dimensional array of data.
data_2 : array_like
One-dimensional array of data.
func : function
Function, with call signature `func(x, y, *args)` to compute
replicate statistic from permutation sample. It must return
a single, scalar value.
size : int, default 1
Number of pairs bootstrap replicates to draw.
args : tuple, default ()
Arguments to be passed to `func`.
Returns
-------
output : ndarray
Permutation replicates.
"""
# Convert to Numpy arrays
data_1 = utils._convert_data(data_1)
data_2 = utils._convert_data(data_2)
if args == ():
if func == diff_of_means:
return _draw_perm_reps_diff_of_means(data_1, data_2, size=size)
elif func == studentized_diff_of_means:
if len(data_1) == 1 or len(data_2) == 1:
raise RuntimeError("Data sets must have at least two entries")
return _draw_perm_reps_studentized_diff_of_means(data_1, data_2, size=size)
# Make a Numba'd function for drawing reps.
f, numba_success = utils._make_two_arg_numba_func(func, args)
if numba_success:
jit = numba.jit
else:
jit = utils._dummy_jit
@jit(nopython=True)
def _draw_perm_reps(data_1, data_2):
n1 = len(data_1)
x = np.concatenate((data_1, data_2))
perm_reps = np.empty(size)
for i in range(size):
np.random.shuffle(x)
perm_reps[i] = f(x[:n1], x[n1:], args)
return perm_reps
return _draw_perm_reps(data_1, data_2) | 37,283 |
def construct(template_name, parameter_dict, path=""):
"""Construct an HTML file using a given template and parameters.
Handles all necessary tasks for generating finished HTML files in output directory.
Likely the tuscon function that the user will call most often in their code.
:param template_name: Path and name of the template file
:type template_name: str
:param parameter_dict: Dictionary of parameters and their values which will be used for filling the template
:type parameter_dict: dict
:param path: Path and name of the newly constructed HTML file (If path == "", no file is output)
:type path: str
:return: Final HTML string that would go into the newly constructed HTML file
:rtype: str
:raises Exception: If tuscon_params HTML tag is absent in template or if a parameter demanded by tuscon_params is
not found in dictionary"""
template_name = check_path(os.path.join(templates_dir, template_name))
final_html = ""
with open(template_name) as template_file:
template = BeautifulSoup(template_file, "lxml")
if len(template.find_all("tuscon_params")) == 0:
raise Exception(
"<tuscon_params> tag must be present in a template. If generation is not needed, then serve() "
"as a static file instead.")
parameter_names = str(template.tuscon_params.string).split(",")
# Remove spaces from parameter names and ensure they all exist within the dictionary
for p in range(len(parameter_names)):
parameter_names[p] = parameter_names[p].replace(" ", "")
if parameter_names[p] not in parameter_dict:
raise Exception("Parameter \"" + parameter_names[p] + "\" demanded by template not found in dictionary")
parse_children(template, parameter_dict, template)
cleanup(template)
final_html = template.prettify()
if path != "":
path = check_path(os.path.join(output_dir + path), True)
with open(path, "w") as output_file:
output_file.write(final_html)
print("Successfully generated " + path + " using template " + template_name)
return final_html | 37,284 |
def get_attachment_form(parser, token):
"""
Get a (new) form object to upload a new attachment
Syntax::
{% get_attachment_form for [object] as [varname] %}
{% get_attachment_for for [app].[model] [object_id] as [varname] %}
"""
return AttachmentFormNode.handle_token(parser, token) | 37,285 |
def greet(name):
"""Greet message, formatted differently for johnny."""
if name == "Johnny":
return "Hello, my love!"
return "Hello, {name}!".format(name=name) | 37,286 |
def get_naca_points(naca_digits, number_of_points=100,
sharp_trailing_edge=True,
abscissa_map=lambda x: 0.03*x+0.97*x**2,
verbose=False):
"""
Return a list of coordinates of NACA 4-digit and 5-digit series
airfoils.
"""
if verbose:
def explain(*s):
print(" ".join(str(s_i) for s_i in s))
else:
def explain(*s):
pass
explain("Airfoil: NACA-%s" % naca_digits)
if sharp_trailing_edge:
explain("Sharp trailing edge")
edge_coeff = 0.1036
else:
explain("Blunt trailing edge")
edge_coeff = 0.1015
raw_abscissae = numpy.linspace(0, 1, number_of_points, endpoint=True)
abscissae = numpy.empty_like(raw_abscissae)
for i in range(number_of_points):
abscissae[i] = abscissa_map(raw_abscissae[i])
digits_int = int(naca_digits)
if len(naca_digits) == 4:
thickness = (digits_int % 100)
max_camber_pos = (digits_int % 1000) - thickness
max_camber = (digits_int % 10000) - max_camber_pos - thickness
thickness = thickness / 1e2
max_camber_pos = max_camber_pos / 1e3
max_camber = max_camber / 1e5
explain("Thickness:", thickness)
explain("Position of maximum camber:", max_camber_pos)
explain("Maximum camber:", max_camber)
if max_camber == 0 and max_camber_pos == 0:
explain("Symmetric 4-digit airfoil")
points = FourDigitsSymmetric(thickness, edge_coeff)
elif max_camber != 0 and max_camber_pos != 0:
explain("Cambered 4-digit airfoil")
points = FourDigitsCambered(thickness, max_camber,
max_camber_pos, edge_coeff)
else:
raise NotImplementedError(
"You must decide whether your airfoil shall be cambered or not!")
elif len(naca_digits) == 5:
thickness = (digits_int % 100)
max_camber_pos = (digits_int % 10000) - thickness
thickness = thickness / 1e2
max_camber_pos = max_camber_pos / 2e4
explain("Thickness:", thickness)
explain("Position of maximum camber:", max_camber_pos)
identifier = digits_int // 100
if identifier == 210:
m = 0.058
k1 = 361.4
elif identifier == 220:
m = 0.126
k1 = 51.64
elif identifier == 230:
m = 0.2025
k1 = 15.957
elif identifier == 240:
m = 0.29
k1 = 6.643
elif identifier == 250:
m = 0.391
k1 = 3.23
else:
raise NotImplementedError("5-digit series only implemented for "
"the first three digits in 210, 220, 230, 240, 250!")
explain("5-digit airfoil")
points = FiveDigits(thickness, m, k1, edge_coeff)
else:
raise NotImplementedError(
"Only the 4-digit and 5-digit series are implemented!")
points_upper = numpy.zeros((len(abscissae), 2))
points_lower = numpy.zeros((len(abscissae), 2))
for i in range(len(abscissae)):
points_upper[i] = points(abscissae[i], "upper")
points_lower[i] = points(abscissae[i], "lower")
if sharp_trailing_edge:
return list(points_upper)[1:-1] + list(points_lower[::-1])
else:
return list(points_upper)[1:] + list(points_lower[::-1]) | 37,287 |
def get_request_now():
"""
When constructing the SOAP request, the timestamps have to be naive but with localtime values.
E.g. if the current offset is utc+1 and the utc now is 2016/03/30 0:00, the SOAP endpoint expects 2016/03/30 1:00
without tzinfo. That's pretty ugly but ¯\_(ツ)_/¯
In order to do that, this function gets the utc value, translates it into a local one and makes it naive by
deleting the tzinfo.
"""
now = timezone.localtime(timezone.now())
return timezone.make_naive(now) | 37,288 |
def test_2_2():
"""Testing PWBS Config Manager"""
# Import
from ..config.pwbs_config import PWBS_ConfigManager
from ..config.config_manager import PWBSConfigFileDontExistError
from ..config.config_manager import PWBSInvalidConfigFile
# Values
t1 = PWBS_ConfigManager()
t1.log.log("Test 2.2")
try:
t1.config_file()
t1.commands_to_commandlist()
except PWBSConfigFileDontExistError:
assert True # It's Working
except PWBSInvalidConfigFile:
assert True | 37,289 |
def _make_decorator(
obj: Wrappable,
to_wrap: tp.Iterable[str]
) -> tp.Callable[[tp.Type[WrapperInjector]], tp.type[WrapperInjector]]:
"""Makes the decorator function to use for wrapping.
Parameters
----------
obj : :obj:`ModuleType`, :obj:`type` or :obj:`object`
The source object to wrap the `to_wrap` attributes of.
to_wrap : Iterable[str]
The names of the attributes of `obj` to wrap.
Returns
-------
Callable[[Type[WrapperInjector]], Type[WrapperInjector]]
The decorator to use for wrapping a new :obj:`WrapperInjector`
class.
"""
def _wrapper(cls: tp.Type[WrapperInjector]) -> tp.Type[WrapperInjector]:
cls.__wrapped__ = tuple(to_wrap)
to_wrap = {x: getattr(obj, x) for x in to_wrap}
for k, v in to_wrap.items():
if isinstance(v, FunctionType):
setattr(cls, k, cls.__wrap_function(v))
else:
setattr(cls, k, cls.__wrap_method(v))
return cls
return _wrapper | 37,290 |
def logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True,
max_iter=100, tol=1e-4, verbose=0,
solver='lbfgs', coef=None,
class_weight=None, dual=False, penalty='l2',
intercept_scaling=1., multi_class='warn',
random_state=None, check_input=True,
max_squared_sum=None, sample_weight=None):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is an implementation that uses the result of the previous model
to speed up computations along the set of solutions, making it faster
than sequentially calling LogisticRegression for the different parameters.
Note that there will be no speedup with liblinear solver, since it does
not handle warm-starting.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Input data, target values.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : int | array-like, shape (n_cs,)
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
fit_intercept : bool
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int
Maximum number of iterations for the solver.
tol : float
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag', 'saga'}
Numerical solver to use.
coef : array-like, shape (n_features,), default None
Initialization value for coefficients of logistic regression.
Useless for liblinear solver.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties.
intercept_scaling : float, default 1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial', 'auto'}, default: 'ovr'
If the option chosen is 'ovr', then a binary problem is fit for each
label. For 'multinomial' the loss minimised is the multinomial loss fit
across the entire probability distribution, *even when the data is
binary*. 'multinomial' is unavailable when solver='liblinear'.
'auto' selects 'ovr' if the data is binary, or if solver='liblinear',
and otherwise selects 'multinomial'.
.. versionadded:: 0.18
Stochastic Average Gradient descent solver for 'multinomial' case.
.. versionchanged:: 0.20
Default will change from 'ovr' to 'auto' in 0.22.
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`. Used when ``solver`` == 'sag' or
'liblinear'.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like, shape(n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept. For
``multiclass='multinomial'``, the shape is (n_classes, n_cs,
n_features) or (n_classes, n_cs, n_features + 1).
Cs : ndarray
Grid of Cs used for cross-validation.
n_iter : array, shape (n_cs,)
Actual number of iteration for each Cs.
Notes
-----
You might get slightly different results with the solver liblinear than
with the others since this uses LIBLINEAR which penalizes the intercept.
.. versionchanged:: 0.19
The "copy" parameter was removed.
"""
if isinstance(Cs, numbers.Integral):
Cs = np.logspace(-4, 4, Cs)
solver = _check_solver(solver, penalty, dual)
# Preprocessing.
if check_input:
X = check_array(X, accept_sparse='csr', dtype=np.float64,
accept_large_sparse=solver != 'liblinear')
y = check_array(y, ensure_2d=False, dtype=None)
check_consistent_length(X, y)
_, n_features = X.shape
classes = np.unique(y)
random_state = check_random_state(random_state)
multi_class = _check_multi_class(multi_class, solver, len(classes))
if pos_class is None and multi_class != 'multinomial':
if (classes.size > 2):
raise ValueError('To fit OvR, use the pos_class argument')
# np.unique(y) gives labels in sorted order.
pos_class = classes[1]
# If sample weights exist, convert them to array (support for lists)
# and check length
# Otherwise set them to 1 for all examples
if sample_weight is not None:
sample_weight = np.array(sample_weight, dtype=X.dtype, order='C')
check_consistent_length(y, sample_weight)
default_weights = False
else:
default_weights = (class_weight is None)
sample_weight = np.ones(X.shape[0], dtype=X.dtype)
daal_ready = use_daal and solver in ['lbfgs', 'newton-cg'] and not sparse.issparse(X)
# If class_weights is a dict (provided by the user), the weights
# are assigned to the original labels. If it is "balanced", then
# the class_weights are assigned after masking the labels with a OvR.
le = LabelEncoder()
if isinstance(class_weight, dict) or multi_class == 'multinomial':
class_weight_ = compute_class_weight(class_weight, classes, y)
sample_weight *= class_weight_[le.fit_transform(y)]
# For doing a ovr, we need to mask the labels first. for the
# multinomial case this is not necessary.
if multi_class == 'ovr':
mask_classes = np.array([-1, 1])
mask = (y == pos_class)
y_bin = np.ones(y.shape, dtype=X.dtype)
y_bin[~mask] = -1.
# for compute_class_weight
if class_weight == "balanced":
class_weight_ = compute_class_weight(class_weight, mask_classes,
y_bin)
sample_weight *= class_weight_[le.fit_transform(y_bin)]
daal_ready = daal_ready and (default_weights or np.allclose(sample_weight, np.ones_like(sample_weight)))
if daal_ready:
w0 = np.zeros(n_features + 1, dtype=X.dtype)
y_bin[~mask] = 0.
else:
w0 = np.zeros(n_features + int(fit_intercept), dtype=X.dtype)
else:
daal_ready = daal_ready and (default_weights or np.allclose(sample_weight, np.ones_like(sample_weight)))
if solver not in ['sag', 'saga']:
if daal_ready:
Y_multi = le.fit_transform(y).astype(X.dtype, copy=False)
else:
lbin = LabelBinarizer()
Y_multi = lbin.fit_transform(y)
if Y_multi.shape[1] == 1:
Y_multi = np.hstack([1 - Y_multi, Y_multi])
else:
# SAG multinomial solver needs LabelEncoder, not LabelBinarizer
Y_multi = le.fit_transform(y).astype(X.dtype, copy=False)
if daal_ready:
w0 = np.zeros((classes.size, n_features + 1),
order='C', dtype=X.dtype)
else:
w0 = np.zeros((classes.size, n_features + int(fit_intercept)),
order='F', dtype=X.dtype)
if coef is not None:
# it must work both giving the bias term and not
if multi_class == 'ovr':
if coef.size not in (n_features, w0.size):
raise ValueError(
'Initialization coef is of shape %d, expected shape '
'%d or %d' % (coef.size, n_features, w0.size))
if daal_ready:
w0[-coef.size:] = np.roll(coef, 1, -1) if coef.size != n_features else coef
else:
w0[:coef.size] = coef
else:
# For binary problems coef.shape[0] should be 1, otherwise it
# should be classes.size.
n_classes = classes.size
if n_classes == 2:
n_classes = 1
if (coef.shape[0] != n_classes or
coef.shape[1] not in (n_features, n_features + 1)):
raise ValueError(
'Initialization coef is of shape (%d, %d), expected '
'shape (%d, %d) or (%d, %d)' % (
coef.shape[0], coef.shape[1], classes.size,
n_features, classes.size, n_features + 1))
if daal_ready:
w0[:, -coef.shape[1]:] = np.roll(coef, 1, -1) if coef.shape[1] != n_features else coef
else:
if n_classes == 1:
w0[0, :coef.shape[1]] = -coef
w0[1, :coef.shape[1]] = coef
else:
w0[:, :coef.shape[1]] = coef
C_daal_multiplier = 1
# commented out because this is Py3 feature
#def _map_to_binary_logistic_regression():
# nonlocal C_daal_multiplier
# nonlocal w0
# C_daal_multiplier = 2
# w0 *= 2
if multi_class == 'multinomial':
# fmin_l_bfgs_b and newton-cg accepts only ravelled parameters.
if solver in ['lbfgs', 'newton-cg']:
if daal_ready and classes.size == 2:
w0_saved = w0
w0 = w0[-1:, :]
w0 = w0.ravel()
target = Y_multi
if solver == 'lbfgs':
if daal_ready:
if classes.size == 2:
# _map_to_binary_logistic_regression()
C_daal_multiplier = 2
w0 *= 2
daal_extra_args_func = _daal4py_logistic_loss_extra_args
else:
daal_extra_args_func = _daal4py_cross_entropy_loss_extra_args
func = _daal4py_loss_and_grad
else:
func = lambda x, *args: _multinomial_loss_grad(x, *args)[0:2]
elif solver == 'newton-cg':
if daal_ready:
if classes.size == 2:
# _map_to_binary_logistic_regression()
C_daal_multiplier = 2
w0 *= 2
daal_extra_args_func = _daal4py_logistic_loss_extra_args
else:
daal_extra_args_func = _daal4py_cross_entropy_loss_extra_args
func = _daal4py_loss_
grad = _daal4py_grad_
hess = _daal4py_grad_hess_
else:
func = lambda x, *args: _multinomial_loss(x, *args)[0]
grad = lambda x, *args: _multinomial_loss_grad(x, *args)[1]
hess = _multinomial_grad_hess
warm_start_sag = {'coef': w0.T}
else:
target = y_bin
if solver == 'lbfgs':
if daal_ready:
func = _daal4py_loss_and_grad
daal_extra_args_func = _daal4py_logistic_loss_extra_args
else:
func = _logistic_loss_and_grad
elif solver == 'newton-cg':
if daal_ready:
daal_extra_args_func = _daal4py_logistic_loss_extra_args
func = _daal4py_loss_
grad = _daal4py_grad_
hess = _daal4py_grad_hess_
else:
func = _logistic_loss
grad = lambda x, *args: _logistic_loss_and_grad(x, *args)[1]
hess = _logistic_grad_hess
warm_start_sag = {'coef': np.expand_dims(w0, axis=1)}
coefs = list()
n_iter = np.zeros(len(Cs), dtype=np.int32)
for i, C in enumerate(Cs):
if solver == 'lbfgs':
if daal_ready:
extra_args = daal_extra_args_func(classes.size, w0, X, target, 0., 0.5 / C / C_daal_multiplier,
fit_intercept, value=True, gradient=True, hessian=False)
else:
extra_args = (X, target, 1. / C, sample_weight)
iprint = [-1, 50, 1, 100, 101][
np.searchsorted(np.array([0, 1, 2, 3]), verbose)]
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=extra_args,
iprint=iprint, pgtol=tol, maxiter=max_iter)
if daal_ready and C_daal_multiplier == 2:
w0 *= 0.5
if info["warnflag"] == 1:
warnings.warn("lbfgs failed to converge. Increase the number "
"of iterations.", ConvergenceWarning)
# In scipy <= 1.0.0, nit may exceed maxiter.
# See https://github.com/scipy/scipy/issues/7854.
n_iter_i = min(info['nit'], max_iter)
elif solver == 'newton-cg':
if daal_ready:
def make_ncg_funcs(f, value=False, gradient=False, hessian=False):
daal_penaltyL2 = 0.5 / C / C_daal_multiplier
_obj_, X_, y_, n_samples = daal_extra_args_func(
classes.size, w0, X, target, 0., daal_penaltyL2, fit_intercept,
value=value, gradient=gradient, hessian=hessian)
_func_ = lambda x, *args: f(x, _obj_, *args)
return _func_, (X_, y_, n_samples, daal_penaltyL2)
loss_func, extra_args = make_ncg_funcs(func, value=True)
grad_func, _ = make_ncg_funcs(grad, gradient=True)
grad_hess_func, _ = make_ncg_funcs(hess, gradient=True)
w0, n_iter_i = _newton_cg(grad_hess_func, loss_func, grad_func, w0, args=extra_args,
maxiter=max_iter, tol=tol)
else:
args = (X, target, 1. / C, sample_weight)
w0, n_iter_i = _newton_cg(hess, func, grad, w0, args=args,
maxiter=max_iter, tol=tol)
elif solver == 'liblinear':
coef_, intercept_, n_iter_i, = _fit_liblinear(
X, target, C, fit_intercept, intercept_scaling, None,
penalty, dual, verbose, max_iter, tol, random_state,
sample_weight=sample_weight)
if fit_intercept:
w0 = np.concatenate([coef_.ravel(), intercept_])
else:
w0 = coef_.ravel()
elif solver in ['sag', 'saga']:
if multi_class == 'multinomial':
target = target.astype(np.float64)
loss = 'multinomial'
else:
loss = 'log'
if penalty == 'l1':
alpha = 0.
beta = 1. / C
else:
alpha = 1. / C
beta = 0.
w0, n_iter_i, warm_start_sag = sag_solver(
X, target, sample_weight, loss, alpha,
beta, max_iter, tol,
verbose, random_state, False, max_squared_sum, warm_start_sag,
is_saga=(solver == 'saga'))
else:
raise ValueError("solver must be one of {'liblinear', 'lbfgs', "
"'newton-cg', 'sag'}, got '%s' instead" % solver)
if multi_class == 'multinomial':
if daal_ready:
if classes.size == 2:
multi_w0 = w0[np.newaxis, :]
else:
multi_w0 = np.reshape(w0, (classes.size, -1))
else:
n_classes = max(2, classes.size)
multi_w0 = np.reshape(w0, (n_classes, -1))
if n_classes == 2:
multi_w0 = multi_w0[1][np.newaxis, :]
coefs.append(np.require(multi_w0, requirements='O'))
else:
coefs.append(np.require(w0, requirements='O'))
n_iter[i] = n_iter_i
if daal_ready:
if fit_intercept:
for i, ci in enumerate(coefs):
coefs[i] = np.roll(ci, -1, -1)
else:
for i, ci in enumerate(coefs):
coefs[i] = np.delete(ci, 0, axis=-1)
return coefs, np.array(Cs), n_iter | 37,291 |
def merge_labels_below_minsize(labels: np.array,
min_size: int,
connectivity: int = 8) -> np.array:
"""
Takes labels below min_size and merges a label with a connected neighbor
(with respect to the connectivity description). Ignores label 0 as
background.
Parameters
----------
labels : np.array
2d label array. Assumes 0 is background and ignores.
min_size : int
Keeps only segments of at least this size.
connectivity : int
4 or 8 connectivity accepted. Default 8. If
`apply_mask_buffer` was used to compute distance,
then connectivity must be 8.
See: https://en.wikipedia.org/wiki/Pixel_connectivity
Returns
-------
np.array:
Updated 2d label array
Note
----
Does not recursively update size and simply assigns a label to its
neighbor based on initialize size.
"""
size_features = get_superpixel_area_as_features(labels)
unique_labels = np.arange(0, labels.max() + 1)
labels_to_merge = list(unique_labels[size_features.ravel() < min_size])
neighbor_dict = get_RAG_neighbors(labels,
label_subset=labels_to_merge,
connectivity=connectivity)
def merger(label_arr):
label = label_arr[0]
neighbors = neighbor_dict.get(label)
# Do nothing if label is background or doesn't meet size criterion.
if (label == 0) or (label not in labels_to_merge):
return label
if len(neighbors) > 0:
return neighbors[0]
# If neighbor is isolated then assign it to background
else:
return 0
label_features = apply_func_to_superpixels(merger,
labels,
labels, dtype=int)
labels = get_array_from_features(labels, label_features)
labels, _, _ = relabel_sequential(labels)
return labels | 37,292 |
def create_model_error_grid(
func: MultiFidelityFunction,
instances: Sequence[Instance],
mfbo_options: Dict[str, Any],
save_dir: Path,
extra_attributes=dict(),
plot_1d: bool=False,
record_values: bool=False
) -> None:
"""Create a grid of model errors for the given MFF-function case at the
given list of instances.
The results are saved in a NETCDF .nc file at the specified `save_dir`"""
start = datetime.now()
print(f"Timestamp: {start}")
print(f"Starting case {func}")
print(f"{len(instances)} instances passed in")
Results = namedtuple('Results', 'mses r2 values')
# Determine unique output path for this experiment
surr_name = repr_surrogate_name(mfbo_options)
fname = standardize_fname_for_file(func.name)
output_path = save_dir / f"{surr_name}-{func.ndim}d-{fname}.nc"
# Don't redo any prior data that already exists
if output_path.exists():
print(f"existing file '{output_path.name}' found, loading instances...")
num_orig_instances = len(instances)
with xr.open_mfdataset(f'{output_path}*') as ds:
with ds['mses'].load() as da:
instances = filter_instances(instances, da.sel(model='high_hier'))
print(f"{len(instances)} out of {num_orig_instances} instances left to do")
# Return early if there is nothing left to do
if not instances:
return
# Setup some (final) options for the hierarchical model
mfbo_options['test_sample'] = get_test_sample(func.ndim, save_dir)
results = []
print('starting loops')
for i, (num_high, num_low, rep) in enumerate(instances):
if i % 100 == 0:
print(f'{i}/{len(instances)}')
set_seed_by_instance(num_high, num_low, rep)
# Create Multi-Fidelity DoE in- and output according to instance specification
high_x, low_x = mlcs.bi_fidelity_doe(func.ndim, num_high, num_low)
high_x, low_x = scale_to_function(func, [high_x, low_x])
high_y, low_y = func.high(high_x), \
func.low(low_x)
# Create an archive from the MF-function and MF-DoE data
archive = mlcs.CandidateArchive.from_multi_fidelity_function(func, ndim=func.ndim)
archive.addcandidates(low_x, low_y, fidelity='low')
archive.addcandidates(high_x, high_y, fidelity='high')
# (Automatically) Create the hierarchical model
mfbo = mlcs.MultiFidelityBO(func, archive, **mfbo_options)
# Get the results we're interested in from the model for this instance
mses = mfbo.getMSE()
r2s = mfbo.getR2()
if record_values:
values = [model.predict(mfbo.test_sample).flatten()
for model in [mfbo.models['high'],
mfbo.direct_models['high'],
mfbo.models['low']]
]
else:
values = None
if plot_1d:
X = np.linspace(0, 1, 1001).reshape((-1, 1))
plt.scatter(low_x, low_y, s=20)
plt.scatter(high_x, high_y, s=15)
plt.plot(X, mfbo.models['high'].predict(X))
plt.plot(X, func.high(X))
plt.savefig(save_dir / f'1d-forrester-visualization-{num_high}-{num_low}.png')
plt.close()
# Store the results
results.append(Results(mses, r2s, values))
print(f'{len(instances)}/{len(instances)}')
# Create attributes dictionary
attributes = dict(experiment='create_model_error_grid',
function=func.name,
ndim=func.ndim,
kernel=mfbo_options.get('kernel', 'N/A'),
surrogate_name=mfbo_options.get('surrogate_name', 'Kriging'),
scaling=mfbo_options['scaling'],
**extra_attributes,
)
## Iteration finished, arranging data into xr.Dataset
output = results_to_dataset(results, instances, mfbo_options, attributes,
record_values=record_values)
store_output(output, output_path)
end = datetime.now()
print(f"Ended case {func} at {end}\n"
f"Time spent: {str(end - start)}") | 37,293 |
def plot_testfn():
"""Make contour plots of all 6 test functions in 2d."""
assert plt_loaded, 'Matplotlib not installed'
dim = 2
global c, w
c = np.array([0.5] * dim)
c = c / sum(c) * 9.
w = np.array([0.5] * dim)
xi = np.linspace(0., 1., 100)
xx = mylib.meshgrid_flatten(xi, xi)
fig = plt.figure(figsize=(12, 8))
for i in range(1, 7):
fn = get_fn(i)
ax = fig.add_subplot(2, 3, i)
F = np.zeros(xx.shape[0])
for i, x in enumerate(xx):
F[i] = fn(np.array(x))
F.reshape(xi.size, xi.size)
ax.contour(xi, xi, F.reshape(xi.size, xi.size).T)
fig.savefig('test_genz.contours.pdf') | 37,294 |
def custom_cnn_model(config, labels, model_weights=None):
"""
Convolutional Neural network architecture based on 'Photonic Human Identification based on
Deep Learning of Back Scattered Laser Speckle Patterns' paper.
:param conf: Configuration list of models hyper & learning params
:param labels: List of data labels
:param model_weights: Weights of pre-trained model
:return: CNN model
"""
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(config[1], config[2], input_shape=(config[0], config[0], 1)),
tf.keras.layers.MaxPooling2D(pool_size=(config[3], config[3])),
tf.keras.layers.Dropout(config[4]),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config[5]),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Activation(activation=tf.nn.relu),
tf.keras.layers.Dropout(config[6]),
tf.keras.layers.Dense(config[7]),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Activation(activation=tf.nn.relu),
tf.keras.layers.Dropout(config[8]),
tf.keras.layers.Dense(len(labels), activation=tf.nn.softmax)
])
if model_weights is not None:
print('loading pre-trained model')
model.load_weights(model_weights, by_name=True)
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
return model | 37,295 |
def check_day_crossover(tRxSeconds, tTxSeconds):
"""
Checks time propagation time for day crossover
:param tRxSeconds: received time in seconds of week
:param tTxSeconds: transmitted time in seconds of week
:return: corrected propagation time
"""
tau = tRxSeconds - tTxSeconds
if tau > DAYSEC / 2:
del_sec = round(tau/DAYSEC)*DAYSEC
rho_sec = tau - del_sec
if rho_sec > 10:
tau = 0.0
else:
tau = rho_sec
return tau | 37,296 |
def matrix2xyx_extrinsic(rotation_matrices: np.ndarray) -> np.ndarray:
"""
Rx(k3) @ Ry(k2) @ Rx(k1) = [[c2, s1s2, c1s2],
[s2s3, -s1c2s3+c1c3, -c1c2s3-s1c3],
[-s2c3, s1c2c3+c1s3, c1c2c3-s1s3]]
"""
rotation_matrices = rotation_matrices.reshape((-1, 3, 3))
angles_radians = np.zeros((rotation_matrices.shape[0], 3))
# Angle 2 can be taken directly from matrices
angles_radians[:, 1] = np.arccos(rotation_matrices[:, 0, 0])
# Gimbal lock case (s2 = 0)
tolerance = 1e-4
# Find indices where this is the case
gimbal_idx = np.abs(rotation_matrices[:, 0, 2]) < tolerance
# Calculate angle 1 and set angle 3 = 0 for those indices
r23 = rotation_matrices[gimbal_idx, 1, 2]
r22 = rotation_matrices[gimbal_idx, 1, 1]
angles_radians[gimbal_idx, 0] = np.arctan2(-r23, r22)
angles_radians[gimbal_idx, 2] = 0
# Normal case (s2 > 0)
idx = np.invert(gimbal_idx)
r12 = rotation_matrices[idx, 0, 1]
r13 = rotation_matrices[idx, 0, 2]
r21 = rotation_matrices[idx, 1, 0]
r31 = rotation_matrices[idx, 2, 0]
angles_radians[idx, 0] = np.arctan2(r12, r13)
angles_radians[idx, 2] = np.arctan2(r21, -r31)
# convert to degrees
euler_angles = np.rad2deg(angles_radians)
return euler_angles | 37,297 |
def _merge_low_rank_eigendecomposition(S1, V1, S2, V2, rank=None):
"""Private helper function for merging SVD based low rank approximations.
Given factors S1, V1 and S2, V2 of shapes [K1], [M, K1] and [K2], [M, K2]
respectively of singular value decompositions
A1 = U1 @ np.diag(S1) @ V1.T
A2 = U2 @ np.diag(S2) @ V2.T
merge them into factors S, V of shape [K], [M, K] of an approximate
decomposition A = U @ np.diag(S) @ V.T, where A is the concatenation of A1
and A2 along the first axis. This is done without the need of calculating
U1, U2, and U.
This is useful for merging eigendecompositions V @ np.diag(S**2) @ V.T of
autocorrelation (or similarly covariance) matrices A.T @ A that do not
require U. Using truncated singular value decompositons can be used for
merging low rank approximations.
Parameters
----------
S1 : array
Singular values of first matrix.
V1 : array
Factor of the singular value decomposition of first matrix.
S2 : array
Singular values of second matrix.
V2 : array
Factor of the singular value decomposition of second matrix.
rank : int
Number of singular values to keep after merging. If set to `None`
no truncation will be done, thus rank will equal the sum of
singular values given in S1 and S2.
Returns
-------
S : array
(Truncated) singular values of the singular value decomposition of
concatenated matrix.
V : array
Factor of the singular value decomposition of concatenated matrix.
Notes
-----
The algorithm for combining SVD based low rank approximations is
described in more detail in [1]_.
References
----------
.. [1] Radim, Rehurek,
"Scalability of Semantic Analysis in Natural Language Processing",
2011.
"""
rank1, rank2 = S1.size, S2.size
if not rank or rank > rank1 + rank2:
rank = rank1 + rank2
if rank > min(V1.shape[0], V2.shape[0]):
rank = min(V1.shape[0], V2.shape[0])
Z = np.matmul(V1.T, V2)
Q, R = np.linalg.qr(V2 - np.matmul(V1, Z), mode="reduced")
Zfill = np.zeros([rank2, rank1])
B = np.concatenate(
[
np.concatenate([np.diag(S1), np.matmul(Z, np.diag(S2))], axis=1),
np.concatenate([Zfill, np.matmul(R, np.diag(S2))], axis=1),
],
axis=0,
)
U, S, VT = _truncated_svd(B, rank=rank)
V = np.matmul(V1, U[:rank1, :]) + np.matmul(Q, U[rank1:, :])
return S, V | 37,298 |
def usgs(path):
"""Reads USGS-formatted ASCII files.
Reads the ascii format spectral data from USGS and returns an object with the mean
and +/- standard deviation. Reference: https://www.sciencebase.gov/catalog/item/5807a2a2e4b0841e59e3a18d
Args:
path: file path the the USGS spectra text file.
Returns:
s: an earthlib spectralObject with the USGS reflectance data.
"""
# open the file and read header info
with open(path, "r") as f:
x_start = "gibberish"
for line in f:
if x_start in line:
break
if "Name:" in line:
spectrum_name = line.strip().split("Name:")[-1].strip()
if "X Units:" in line:
band_unit = line.strip().split()
band_unit = band_unit[-1].strip("()").capitalize()
if "Y Units:" in line:
refl_unit = line.strip().split()
refl_unit = refl_unit[-1].strip("()").capitalize()
if "First X Value:" in line:
x_start = line.strip().split()[-1]
if "Number of X Values:" in line:
n_values = int(line.strip().split()[-1])
# now that we got our header info, create the arrays
band_centers = _np.empty(n_values)
reflectance = _np.empty(n_values)
line = line.strip().split()
band_centers[0] = float(line[0])
reflectance[0] = float(line[1])
# resume reading through file
i = 1
for line in f:
line = line.strip().split()
band_centers[i] = float(line[0])
reflectance[i] = float(line[1])
i += 1
# some files read last -> first wavelength
if band_centers[0] > band_centers[-1]:
band_centers = band_centers[::-1]
reflectance = reflectance[::1]
# convert units to nanometers and scale 0-1
if band_unit.lower() == "micrometers":
band_centers *= 1000.0
band_unit = "Nanometers"
if refl_unit.lower() == "percent":
reflectance /= 100.0
# create the spectral object
s = spectralObject(
1,
n_values,
band_centers=band_centers,
band_unit=band_unit,
band_quantity="Wavelength",
)
# assign relevant values
s.spectra[0] = reflectance
if spectrum_name:
s.names[0] = spectrum_name
return s | 37,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.