content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def XMLToPython (pattern):
"""Convert the given pattern to the format required for Python
regular expressions.
@param pattern: A Unicode string defining a pattern consistent
with U{XML regular
expressions<http://www.w3.org/TR/xmlschema-2/index.html#regexs>}.
@return: A Unicode string specifying a Python regular expression
that matches the same language as C{pattern}."""
new_pattern_elts = []
new_pattern_elts.append('^')
position = 0
while position < len(pattern):
cg = MaybeMatchCharacterClass(pattern, position)
if cg is None:
new_pattern_elts.append(pattern[position])
position += 1
else:
(cps, position) = cg
new_pattern_elts.append(cps.asPattern())
new_pattern_elts.append('$')
return ''.join(new_pattern_elts)
| 22,500
|
def test_bad_color_array():
"""Test adding shapes to InfiniteLineList."""
np.random.seed(0)
data = np.random.random(1)
vert = VerticalLine(data)
line_list = InfiniteLineList()
line_list.add(vert)
# test setting color with a color array of the wrong shape
bad_color_array = np.array([[0, 0, 0, 1], [1, 1, 1, 1]])
with pytest.raises(ValueError):
setattr(line_list, "color", bad_color_array)
| 22,501
|
def write_deterministic_to_file(configuration, nowcast_data, filename=None, metadata=None):
"""Write deterministic output in ODIM HDF5 format..
Input:
configuration -- Object containing configuration parameters
nowcast_data -- generated deterministic nowcast
filename -- filename for output deterministic HDF5 file
metadata -- dictionary containing nowcast metadata (optional)
"""
#Output filename
if filename is None:
filename = os.path.join(defaults["output_options"]["path"], "deterministic.h5")
_write(nowcast_data, filename, metadata, configuration=configuration, optype="det")
| 22,502
|
def __termios(fd):
"""Try to discover terminal width with fcntl, struct and termios."""
#noinspection PyBroadException
try:
import fcntl
import termios
import struct
cr = struct.unpack('hh',
fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
except Exception:
return None
return cr
| 22,503
|
def encode(numbers, GCE=GCE):
"""
do extended encoding on a list of numbers for the google chart api
>>> encode([1690, 90,1000])
'chd=e:aaBaPo'
"""
encoded = []
for number in numbers:
if number > 4095: raise ValueError('too large')
first, second = divmod(number, len(GCE))
encoded.append("%s%s" % (GCE[first], GCE[second]))
return "chd=e:%s" % ''.join(encoded)
| 22,504
|
def compute_f_mat(mat_rat,user_count,movie_count):
"""
compute the f matrix
:param mat_rat: user`s rating matrix([user number,movie number]) where 1 means user likes the index movie.
:param user_count: statistics of moive numbers that user have watch.
:param movie_count: statistics of user numbers that movie have been rated.
:return: f matrix
"""
temp = (mat_rat / user_count.reshape([-1,1]) )/ movie_count.reshape([1,-1])
D = np.dot(mat_rat.T, temp)
f = np.dot(D, mat_rat.T).T
return f
| 22,505
|
def main(device, transfer_json, test_mode):
"""
Main
"""
release_josn = f"metadata/release-{device}.json"
if test_mode:
log.warn("Test mode is active")
mount_point = "test/"
else:
mount_point = MOUNT_POINT
define_tag_from_json(release_josn)
copy_metadata_files()
copy_release_files(mount_point, transfer_json)
| 22,506
|
def metadata(path="xdress/metadata.json"):
"""Build a metadata file."""
md = {}
md.update(INFO)
# FIXME: Add the contents of CMakeCache.txt to the metadata dictionary
# write the metadata file
with open(path, 'w') as f:
json.dump(md, f, indent=2)
return md
| 22,507
|
def query_assemblies(organism, output, quiet=False):
"""from a taxid or a organism name, download all refseq assemblies
"""
logger = logging.getLogger(__name__)
assemblies = []
genomes = Entrez.read(Entrez.esearch(
"assembly",
term=f"{organism}[Organism]",
retmax=10000))["IdList"]
logger.info(
f"Found {len(genomes)} organisms in ncbi assemblies for {organism}")
logger.info("Downloading the assemblies. Please be patient.")
for id in tqdm(genomes, disable=quiet):
try:
entrez_assembly = Entrez.read(
Entrez.esummary(
db="assembly",
id=id))["DocumentSummarySet"]["DocumentSummary"][0]
except KeyError as e:
entrez_assembly = Entrez.read(
Entrez.esummary(db="assembly", id=id))["DocumentSummarySet"]
print(entrez_assembly.keys())
raise
else:
assembly = Assembly(entrez_assembly)
output_file = f"{output}/{assembly.accession}.fasta"
download(assembly.ftp_refseq, output_file)
assemblies.append(assembly)
return assemblies
| 22,508
|
def setup_autoscale():
"""Setup AWS autoscale"""
my_id = 'wedding_plattform'
kwargs = {
"ami_id": 'ami-3fec7956', # Official Ubuntu 12.04.1 LTS US-EAST-1
"instance_type": "t1.micro",
"key_name": my_id,
"security_groups": [my_id],
"availability_zones": ["us-east-1a", "us-east-1b", "us-east-1d"],
"min_instances": 0,
"sp_up_adjustment": 1,
"load_balancers": [my_id]
}
ec2.setup_autoscale(my_id, **kwargs)
| 22,509
|
def main(params):
"""Loads the file containing the collation results from Wdiff.
Then, identifies various kinds of differences that can be observed.
Assembles this information for each difference between the two texts."""
print("\n== coleto: running text_analyze. ==")
difftext = get_difftext(params["wdiffed_file"])
analysisresults = analyse_diffs(difftext, params)
analysissummary = save_summary(difftext, analysisresults,
params["analysissummary_file"])
save_analysis(analysisresults, params["analysis_file"])
return analysissummary
| 22,510
|
def test__get_title_failure(_root_errors, _ns):
"""
GIVEN a XML root Element of an METS/XML file with missing title values
and a dictionary with the mets namespace
WHEN calling the method _get_title()
THEN check that the returned string is correct
"""
expected = ""
actual = _get_title(_root_errors, _ns)
assert actual == expected
| 22,511
|
def test_stacking():
"""Assert that the stacking method creates a Stack model."""
atom = ATOMClassifier(X_bin, y_bin, experiment="test", random_state=1)
pytest.raises(NotFittedError, atom.stacking)
atom.run(["LR", "LGB"])
atom.stacking()
assert hasattr(atom, "stack")
assert "Stack" in atom.models
assert atom.stack._run
| 22,512
|
def matrix_horizontal_stack(matrices: list, _deepcopy: bool = True):
"""
stack matrices horizontally.
:param matrices: (list of Matrix)
:param _deepcopy: (bool)
:return: (Matrix)
"""
assert matrices
for _i in range(1, len(matrices)):
assert matrices[_i].basic_data_type() == matrices[0].basic_data_type()
assert matrices[_i].size()[0] == matrices[0].size()[0]
if _deepcopy:
_matrices = deepcopy(matrices)
else:
_matrices = matrices
_kernel = []
for _i in range(_matrices[0].size()[0]):
_kernel.append([])
for _j in range(len(_matrices)):
for _k in range(_matrices[_j].size()[1]):
_kernel[_i].append(_matrices[_j].kernel[_i][_k])
return Matrix(_kernel)
| 22,513
|
def inds_to_invmap_as_array(inds: np.ndarray):
"""
Returns a mapping that maps global indices to local ones
as an array.
Parameters
----------
inds : numpy.ndarray
An array of global indices.
Returns
-------
numpy.ndarray
Mapping from global to local.
"""
res = np.zeros(inds.max() + 1, dtype=inds.dtype)
for i in prange(len(inds)):
res[inds[i]] = i
return res
| 22,514
|
def install_plugin_urls():
"""
urlpatterns - bCTF original urlpatterns
"""
urls = []
for plugin in list_plugins():
urls.append(path('{0}/'.format(plugin), include('plugins.{0}.urls'.format(plugin))))
return urls
| 22,515
|
def ident_keys(item, cfg):
"""Returns the list of keys in item which gives its identity
:param item: dict with type information
:param cfg: config options
:returns: a list of fields for item that give it its identity
:rtype: list
"""
try:
return content.ident_keys(item)
except Exception as e:
logger.error('Failed to extract ident keys for %s' % (item), e)
raise e
| 22,516
|
def playlists_by_date(formatter, albums):
"""Returns a single playlist of favorite tracks from albums
sorted by decreasing review date.
"""
sorted_tracks = []
sorted_albums = sorted(albums, key=lambda x: x["date"], reverse=True)
for album in sorted_albums:
if album["picks"] is None:
continue
tracks = [
{
"artist_tag": album["artist_tag"],
"album_tag": album["album_tag"],
"artist": album["artist"],
"album": album["album"],
"track": album["tracks"][p],
}
for p in album["picks"]
]
sorted_tracks.extend(tracks)
return formatter.parse_list(sorted_tracks, formatter.format_track)
| 22,517
|
def cryptdisks_start(target, context=None):
"""
Execute :man:`cryptdisks_start` or emulate its functionality.
:param target: The mapped device name (a string).
:param context: See :func:`.coerce_context()` for details.
:raises: :exc:`~executor.ExternalCommandFailed` when a command fails,
:exc:`~exceptions.ValueError` when no entry in `/etc/crypttab`_
matches `target`.
"""
context = coerce_context(context)
logger.debug("Checking if `cryptdisks_start' program is installed ..")
if context.find_program('cryptdisks_start'):
logger.debug("Using the real `cryptdisks_start' program ..")
context.execute('cryptdisks_start', target, sudo=True)
else:
logger.debug("Emulating `cryptdisks_start' functionality (program not installed) ..")
for entry in parse_crypttab(context=context):
if entry.target == target and 'luks' in entry.options:
logger.debug("Matched /etc/crypttab entry: %s", entry)
if entry.is_unlocked:
logger.debug("Encrypted filesystem is already unlocked, doing nothing ..")
else:
unlock_filesystem(context=context,
device_file=entry.source_device,
key_file=entry.key_file,
options=entry.options,
target=entry.target)
break
else:
msg = "Encrypted filesystem not listed in /etc/crypttab! (%r)"
raise ValueError(msg % target)
| 22,518
|
def attach_ipv6_raguard_policy_to_interface(device, interface, policy_name):
""" Attach IPv6 RA Guard Policy to an interface
Args:
device (`obj`): Device object
interface (`str`): Interface to attach policy
policy_name (`str`): Policy name to be attached to interface
Returns:
None
Raises:
SubCommandFailure
"""
cmd_1 = "interface {intf}".format(intf=interface)
cmd_2 = "ipv6 nd raguard attach-policy {policy_name}".format(policy_name=policy_name)
# Configure device
try:
device.configure([cmd_1, cmd_2])
except SubCommandFailure as e:
raise SubCommandFailure(
"Failed to attach IPv6 RA Guard policy {policy_name} on interface "
"{interface} on device {dev}. Error:\n{error}".format(
policy_name=policy_name,
interface=interface,
dev=device.name,
error=e,
)
)
| 22,519
|
def merge_two_sorted_array(l1, l2):
"""
Time Complexity: O(n+m)
Space Complexity: O(n+m)
:param l1: List[int]
:param l2: List[int]
:return: List[int]
"""
if not l1:
return l2
if not l2:
return l1
merge_list = []
i1 = 0
i2 = 0
l1_len = len(l1) - 1
l2_len = len(l2) - 1
while i1 <= l1_len and i2 <= l2_len:
if l1[i1] < l2[i2]:
merge_list.append(l1[i1])
i1 += 1
else:
merge_list.append(l2[i2])
i2 += 1
while i1 <= l1_len:
merge_list.append(l1[i1])
i1 += 1
while i2 <= l2_len:
merge_list.append(l2[i2])
i2 += 1
return merge_list
| 22,520
|
def regression_metrics(y_true,y_pred):
"""
param1: pandas.Series/pandas.DataFrame/numpy.darray
param2: pandas.Series/pandas.DataFrame/numpy.darray
return: dictionary
Function accept actual prediction labels from the dataset and predicted values from the model and utilizes this
two values/data to calculate r2 score, mean absolute error, mean squared error, and root mean squared error at same time add them to result dictionary.
Finally return the result dictionary
"""
result=dict()
result['R2']=round(r2_score(y_true, y_pred),3)
result['MAE']=round(mean_absolute_error(y_true, y_pred),3)
result['MSE']=round(mean_squared_error(y_true, y_pred),3)
result['RMSE']=round(mean_squared_error(y_true, y_pred,squared=False),3)
return result
| 22,521
|
def test_auto_id():
"""Tests auto_id option."""
acc = mm.MOTAccumulator(auto_id=True)
acc.update([1, 2, 3, 4], [], [])
acc.update([1, 2, 3, 4], [], [])
assert acc.events.index.levels[0][-1] == 1
acc.update([1, 2, 3, 4], [], [])
assert acc.events.index.levels[0][-1] == 2
with pytest.raises(AssertionError):
acc.update([1, 2, 3, 4], [], [], frameid=5)
acc = mm.MOTAccumulator(auto_id=False)
with pytest.raises(AssertionError):
acc.update([1, 2, 3, 4], [], [])
| 22,522
|
def convert_from_quint8(arr):
"""
Dequantize a quint8 NumPy ndarray into a float one.
:param arr: Input ndarray.
"""
assert isinstance(arr, np.ndarray)
assert (
"mgb_dtype" in arr.dtype.metadata
and arr.dtype.metadata["mgb_dtype"]["name"] == "Quantized8Asymm"
), "arr should be a ndarray with quint8 dtype"
scale, zp = (
arr.dtype.metadata["mgb_dtype"]["scale"],
arr.dtype.metadata["mgb_dtype"]["zero_point"],
)
return (arr.astype(np.float32) - zp) * scale
| 22,523
|
def _process_payment_id(state: State, tsx_data: MoneroTransactionData):
"""
Writes payment id to the `extra` field under the TX_EXTRA_NONCE = 0x02 tag.
The second tag describes if the payment id is encrypted or not.
If the payment id is 8 bytes long it implies encryption and
therefore the TX_EXTRA_NONCE_ENCRYPTED_PAYMENT_ID = 0x01 tag is used.
If it is not encrypted, we use TX_EXTRA_NONCE_PAYMENT_ID = 0x00.
Since Monero release 0.13 all 2 output payments have encrypted payment ID
to make BC more uniform.
See:
- https://github.com/monero-project/monero/blob/ff7dc087ae5f7de162131cea9dbcf8eac7c126a1/src/cryptonote_basic/tx_extra.h
"""
# encrypted payment id / dummy payment ID
view_key_pub_enc = None
if not tsx_data.payment_id or len(tsx_data.payment_id) == 8:
view_key_pub_enc = _get_key_for_payment_id_encryption(
tsx_data, state.change_address(), state.client_version > 0
)
if not tsx_data.payment_id:
return
elif len(tsx_data.payment_id) == 8:
view_key_pub = crypto.decodepoint(view_key_pub_enc)
payment_id_encr = _encrypt_payment_id(
tsx_data.payment_id, view_key_pub, state.tx_priv
)
extra_nonce = payment_id_encr
extra_prefix = 1 # TX_EXTRA_NONCE_ENCRYPTED_PAYMENT_ID
# plain text payment id
elif len(tsx_data.payment_id) == 32:
extra_nonce = tsx_data.payment_id
extra_prefix = 0 # TX_EXTRA_NONCE_PAYMENT_ID
else:
raise ValueError("Payment ID size invalid")
lextra = len(extra_nonce)
if lextra >= 255:
raise ValueError("Nonce could be 255 bytes max")
# write it to extra
extra_buff = bytearray(3 + lextra)
extra_buff[0] = 2 # TX_EXTRA_NONCE
extra_buff[1] = lextra + 1
extra_buff[2] = extra_prefix
extra_buff[3:] = extra_nonce
state.extra_nonce = extra_buff
| 22,524
|
def classpartial(*args, **kwargs):
"""Bind arguments to a class's __init__."""
cls, args = args[0], args[1:]
class Partial(cls):
__doc__ = cls.__doc__
def __new__(self):
return cls(*args, **kwargs)
Partial.__name__ = cls.__name__
return Partial
| 22,525
|
def load_xml(xml_path):
"""
Загружает xml в etree.ElementTree
"""
if os.path.exists(xml_path):
xml_io = open(xml_path, 'rb')
else:
raise ValueError(xml_path)
xml = objectify.parse(xml_io)
xml_io.close()
return xml
| 22,526
|
def alphabetize_concat(input_list):
"""
Takes a python list.
List can contain arbitrary objects with .__str__() method
(so string, int, float are all ok.)
Sorts them alphanumerically.
Returns a single string with result joined by underscores.
"""
array = np.array(input_list, dtype=str)
array.sort()
return '_'.join(array)
| 22,527
|
def kick(state, ai, ac, af, cosmology=cosmo, dtype=np.float32, name="Kick",
**kwargs):
"""Kick the particles given the state
Parameters
----------
state: tensor
Input state tensor of shape (3, batch_size, npart, 3)
ai, ac, af: float
"""
with tf.name_scope(name):
state = tf.convert_to_tensor(state, name="state")
fac = 1 / (ac ** 2 * E(cosmo,ac)) * (Gf(cosmo,af) - Gf(cosmo,ai)) / gf(cosmo,ac)
indices = tf.constant([[1]])
#indices = tf.constant([1])
Xjl = tf.multiply(fac, state[2])
update = tf.expand_dims(Xjl, axis=0)
shape = state.shape
update = tf.scatter_nd(indices, update, shape)
state = tf.add(state, update)
return state
| 22,528
|
def check_similarity(var1, var2, error):
"""
Check the simulatiry between two numbers, considering a error margin.
Parameters:
-----------
var1: float
var2: float
error: float
Returns:
-----------
similarity: boolean
"""
if((var1 <= (var2 + error)) and (var1 >= (var2 - error))):
return True
else:
return False
| 22,529
|
def model_type_by_code(algorithm_code):
"""
Method which return algorithm type by algorithm code.
algorithm_code MUST contain any 'intable' type
:param algorithm_code: code of algorithm
:return: algorithm type name by algorithm code or None
"""
# invalid algorithm code case
if algorithm_code not in ALGORITHM[ALGORITHM_CODE].keys():
return None
return ALGORITHM[TYPE][algorithm_code]
| 22,530
|
def getCasing(word):
""" Returns the casing of a word"""
if len(word) == 0:
return 'other'
elif word.isdigit(): #Is a digit
return 'numeric'
elif word.islower(): #All lower case
return 'allLower'
elif word.isupper(): #All upper case
return 'allUpper'
elif word[0].isupper(): #is a title, initial char upper, then all lower
return 'initialUpper'
return 'other'
| 22,531
|
def normalize_angle(deg):
"""
Take an angle in degrees and return it as a value between 0 and 360
:param deg: float or int
:return: float or int, value between 0 and 360
"""
angle = deg
while angle > 360:
angle -= 360
while angle < 360:
angle += 360
return angle
| 22,532
|
def test_staging_different_volumes_with_the_same_staging_target_path():
"""Staging different volumes with the same staging_target_path."""
| 22,533
|
def current_global_irradiance(site_properties, solar_properties, timestamp):
"""Calculate the clear-sky POA (plane of array) irradiance for a specific time (seconds timestamp)."""
dt = datetime.datetime.fromtimestamp(timestamp=timestamp, tz=tz.gettz(site_properties.tz))
n = dt.timetuple().tm_yday
sigma = math.radians(solar_properties.tilt)
rho = solar_properties.get('rho', 0.0)
C = 0.095 + 0.04 * math.sin(math.radians((n - 100) / 365))
sin_sigma = math.sin(sigma)
cos_sigma = math.cos(sigma)
altitude = get_altitude(latitude_deg=site_properties.latitude, longitude_deg=site_properties.longitude, when=dt)
beta = math.radians(altitude)
sin_beta = math.sin(beta)
cos_beta = math.cos(beta)
azimuth = get_azimuth(latitude_deg=site_properties.latitude, longitude_deg=site_properties.longitude, when=dt)
phi_s = math.radians(180 - azimuth)
phi_c = math.radians(180 - solar_properties.azimuth)
phi = phi_s - phi_c
cos_phi = math.cos(phi)
# Workaround for a quirk of pvsolar since the airmass for the sun ele===altitude of zero
# is infinite and very small numbers close to zero result in NaNs being returned rather
# than zero
if altitude < 0.0:
altitude = -1.0
cos_theta = cos_beta * cos_phi * sin_sigma + sin_beta * cos_sigma
ib = get_radiation_direct(when=dt, altitude_deg=altitude)
ibc = ib * cos_theta
idc = C * ib * (1 + cos_sigma) / 2
irc = rho * ib * (sin_beta + C) * ((1 - cos_sigma) / 2)
igc = ibc + idc + irc
# If we still get a bad result just return 0
if math.isnan(igc):
igc = 0.0
return igc
| 22,534
|
def agglomeration_energy_gather(bundle_activities, nonbundle_activities,
n_bundles, agglomeration_energy):
"""
Accumulate the energy binding a new feature to an existing bundle..
This formulation takes advantage of loops and the sparsity of the data.
The original arithmetic looks like
coactivities = bundle_activities * nonbundle_activities.T
agglomeration_energy += coactivities * agglomeration_energy_rate
Parameters
----------
bundle_activities : array of floats
The activity level of each bundle.
nonbundle_activities : array of floats
The current activity of each input feature that is not explained by
or captured in a bundle.
n_bundles : int
The number of bundles that have been created so far.
agglomeration_energy : 2D array of floats
The total energy that has been accumulated between each input feature
and each bundle.
Results
-------
Returned indirectly by modifying `agglomeration_energy.
"""
for i_col, _ in enumerate(nonbundle_activities):
activity = nonbundle_activities[i_col]
if activity > 0.:
# Only decay bundles that have been created
for i_row in range(n_bundles):
if bundle_activities[i_row] > 0.:
coactivity = activity * bundle_activities[i_row]
agglomeration_energy[i_row, i_col] += coactivity
| 22,535
|
def add_glider(i, j, grid):
"""adds a glider with top left cell at (i, j)"""
glider = np.array([[0, 0, 255],
[255, 0, 255],
[0, 255, 255]])
grid[i:i+3, j:j+3] = glider
| 22,536
|
def date_formatting(format_date, date_selected):
"""Date formatting management.
Arguments:
format_date {str} -- Date
date_selected {str} -- Date user input
Returns:
str -- formatted date
"""
if len(date_selected) == 19:
date_selected = datetime.strptime(
date_selected, "%d/%m/%Y %H:%M:%S")
elif len(date_selected) == 10:
date_selected = datetime.strptime(date_selected, "%d/%m/%Y")
try:
if "yyyy" in format_date:
format_date = format_date.replace(
"yyyy", date_selected.strftime("%Y"))
elif "yy" in format_date:
format_date = format_date.replace(
"yy", date_selected.strftime("%y"))
if "mm" in format_date:
format_date = format_date.replace(
"mm", date_selected.strftime("%m"))
if "dd" in format_date:
format_date = format_date.replace(
"dd", date_selected.strftime("%d"))
if "hh" in format_date:
format_date = format_date.replace(
"hh", date_selected.strftime("%H"))
if "nn" in format_date:
format_date = format_date.replace(
"nn", date_selected.strftime("%M"))
if "ss" in format_date:
format_date = format_date.replace(
"ss", date_selected.strftime("%S"))
return (format_date, None)
except AttributeError:
return (
None,
_("Date entry error, format is dd/mm/yyyy or dd/mm/yyyy hh:mm:ss")
)
| 22,537
|
def update_validationsets_sources(validation_dict, date_acquired=False):
"""Adds or replaces metadata dictionary of validation reference dataset to
the validation sets sources file
:param validation_dict: dictionary of validation metadata
:param date_acquired:
"""
if not date_acquired:
date = datetime.today().strftime('%d-%b-%Y')
validation_dict['Date Acquired'] = date
v_table = read_ValidationSets_Sources()
existing = v_table.loc[(v_table['Inventory'] == validation_dict['Inventory']) &
(v_table['Year'] == validation_dict['Year'])]
if len(existing)>0:
i = existing.index[0]
v_table = v_table.loc[~v_table.index.isin(existing.index)]
line = pd.DataFrame.from_records([validation_dict], index=[(i)])
else:
inventories = list(v_table['Inventory'])
i = max(loc for loc, val in enumerate(inventories) if val == validation_dict['Inventory'])
line = pd.DataFrame.from_records([validation_dict], index=[(i+0.5)])
v_table = v_table.append(line, ignore_index=False)
v_table = v_table.sort_index().reset_index(drop=True)
log.info('updating ValidationSets_Sources.csv with %s %s',
validation_dict['Inventory'], validation_dict['Year'])
v_table.to_csv(data_dir + 'ValidationSets_Sources.csv', index=False)
| 22,538
|
def test_search(runner):
"""
Test search
"""
res = runner.invoke(search, ["author=wangonya"])
assert res.exit_code == 0
assert "Status code: 200" in res.output
res = runner.invoke(search, ["auonya"])
assert res.exit_code == 0
assert "Status code: 400" in res.output
| 22,539
|
def parse_activity_from_metadata(metadata):
"""Parse activity name from metadata
Args:
metadata: List of metadata from log file
Returns
Activity name from metadata"""
return _parse_type_metadata(metadata)[1]
| 22,540
|
def mask_data_by_FeatureMask(eopatch, data_da, mask):
"""
Creates a copy of array and insert 0 where data is masked.
:param data_da: dataarray
:type data_da: xarray.DataArray
:return: dataaray
:rtype: xarray.DataArray
"""
mask = eopatch[FeatureType.MASK][mask]
if len(data_da.values.shape) == 4:
mask = np.repeat(mask, data_da.values.shape[-1], -1)
else:
mask = np.squeeze(mask, axis=-1)
data_da = data_da.copy()
data_da.values[~mask] = 0
return data_da
| 22,541
|
def makeFields(prefix, n):
"""Generate a list of field names with this prefix up to n"""
return [prefix+str(n) for n in range(1,n+1)]
| 22,542
|
def process_input_dir(input_dir):
"""
Find all image file paths in subdirs, convert to str and extract labels from subdir names
:param input_dir Path object for parent directory e.g. train
:returns: list of file paths as str, list of image labels as str
"""
file_paths = list(input_dir.rglob('*.png'))
file_path_strings = [str(path) for path in file_paths]
label_strings = [path.parent.name for path in file_paths]
return file_path_strings, label_strings
| 22,543
|
def test_kappa_RTA_aln_with_sigma(aln_lda):
"""Test RTA with smearing method by AlN."""
aln_lda.sigmas = [
0.1,
]
aln_lda.sigma_cutoff = 3
kappa_P_RTA, kappa_C = _get_kappa_RTA(aln_lda, [7, 7, 5])
np.testing.assert_allclose(aln_lda_kappa_P_RTA_with_sigmas, kappa_P_RTA, atol=0.5)
np.testing.assert_allclose(aln_lda_kappa_C_with_sigmas, kappa_C, atol=0.02)
aln_lda.sigmas = None
aln_lda.sigma_cutoff = None
| 22,544
|
def plot_2d_projection_many_mp(ax, mp):
"""
Plot many motion primitives projected onto the x-y axis, independent of
original dimension.
"""
n_dim = mp[0]['p0'].shape[0]
for m in mp:
if m['is_valid']:
st, sj, sa, sv, sp = min_time_bvp.uniformly_sample(
m['p0'], m['v0'], m['a0'], m['t'], m['j'], dt=0.001)
if n_dim > 1:
ax.plot(sp[0,:], sp[1,:])
else:
ax.plot(sp[0,:], np.zeros_like(sp[0,:]))
ax.axis('equal')
| 22,545
|
def main():
"""
Entry of module
:return:
"""
setup_logger()
config = None
try:
config = create_config(FLAGS.config_source, FLAGS.json_file)
_LOGGER.info('Begin to compile model, config: %s', config)
except Exception as error: # pylint:disable=broad-except
_LOGGER.fatal("create config fail, error: %s, and can't send response", error)
exit(1)
try:
compiler = create_compiler(config)
_LOGGER.info('Create compiler: %s', compiler)
result = compiler.compile()
_LOGGER.info('Compile result: %s', result)
send_response(config.get_attribute('callback'), result)
except Exception as error: # pylint:disable=broad-except
_LOGGER.error('Model compiler failed, error: %s', error)
_LOGGER.exception(error)
send_response(config.get_attribute('callback'), fail(str(error)))
| 22,546
|
def fak(n):
""" Berechnet die Fakultaet der ganzen Zahl n. """
erg = 1
for i in range(2, n+1):
erg *= i
return erg
| 22,547
|
def save(data, destination_path, **kwargs):
"""Generate a csv file from a datastructure.
:param data: Currently data must be a list of dicts.
:type data: list of dict
:param destination_path: Path of the resulting CSV file.
:type destination_path: str
:raises ValueError: If the format of data cannot be determined.
:return: Returns True on success.
:rtype: bool
:Keyword Arguments:
Currently None
"""
if isinstance(data, list):
if isinstance(data[0], dict):
with open(destination_path, "w") as f:
writer = csv.DictWriter(f, fieldnames=data[0].keys(), dialect=csv.excel)
writer.writeheader()
writer.writerows(data)
log.debug(
f"successfully written {len(data)} lines of data to {destination_path}"
)
return True
raise ValueError(f"csv save not implemented for list of {type(data[0])}")
raise ValueError(f"csv save not implemented for {type(data)}")
| 22,548
|
def create_sequences_sonnets(sonnets):
"""
This creates sequences as done in Homework 6, by mapping each word
to an integer in order to create a series of sequences. This function
specifically makes entire sonnets into individual sequences
and returns the list of processed sonnets back to be used in the basic
HMM notebook for generation.
"""
sequences = []
obs_counter = 0
obs_map = {}
for sonnet in sonnets:
sequence = []
for i, line in enumerate(sonnet):
split = line.split()
for word in split:
word = re.sub(r'[^\w]', '', word).lower()
if word not in obs_map:
# Add unique words to the observations map.
obs_map[word] = obs_counter
obs_counter += 1
# Add the encoded word.
sequence.append(obs_map[word])
# Add the encoded sequence.
sequences.append(sequence)
return obs_map, sequences
| 22,549
|
def detect_venv_command(command_name: str) -> pathlib.Path:
"""Detect a command in the same venv as the current utility."""
venv_path = pathlib.Path(sys.argv[0]).parent.absolute()
expected_command_path = venv_path / command_name
if expected_command_path.is_file():
result = expected_command_path
else:
# assume command in the PATH when run...
found = shutil.which(command_name)
if found:
result = pathlib.Path(found)
else:
raise CommandError(
"Command not in users path or venv, {0}".format(command_name)
)
return result
| 22,550
|
def l_to_rgb(img_l):
"""
Convert a numpy array (l channel) into an rgb image
:param img_l:
:return:
"""
lab = np.squeeze(255 * (img_l + 1) / 2)
return color.gray2rgb(lab) / 255
| 22,551
|
def plotLogsInteract(d2, d3, rho1, rho2, rho3, v1, v2, v3, usingT=False):
"""
interactive wrapper of plotLogs
"""
d = np.array((0., d2, d3), dtype=float)
rho = np.array((rho1, rho2, rho3), dtype=float)
v = np.array((v1, v2, v3), dtype=float)
plotLogs(d, rho, v, usingT)
| 22,552
|
def blockchain_assert_absolute_time_exceeds(condition: ConditionWithArgs, timestamp):
"""
Checks if current time in millis exceeds the time specified in condition
"""
try:
expected_mili_time = int_from_bytes(condition.vars[0])
except ValueError:
return Err.INVALID_CONDITION
current_time = timestamp
if current_time <= expected_mili_time:
return Err.ASSERT_SECONDS_ABSOLUTE_FAILED
return None
| 22,553
|
def test_check_phi():
"""Tests the _check_phi function"""
numeaf = 175
def set_phi(f):
tm.phi = f
# First check that None is properly converted
tm._phi = None
assert_is(tm.phi, None)
tm.phi = np.ones(numeaf)
assert_array_equal(tm.phi, np.ones(numeaf))
# Check that incorrect number of entries raises an exception
assert_raises(ValueError, set_phi, np.ones((50, 1)))
# Check that a negative entry raises an exception
x = np.ones(numeaf)
x[123] = -1
assert_raises(ValueError, set_phi, x)
| 22,554
|
def show_aip(mets_file):
"""Show a METS file"""
mets_instance = METS.query.filter_by(metsfile='%s' % (mets_file)).first()
level = mets_instance.level
original_files = mets_instance.metslist
dcmetadata = mets_instance.dcmetadata
divs = mets_instance.divs
filecount = mets_instance.originalfilecount
aip_uuid = mets_file
for element in dcmetadata:
tag = element.get('element')
if tag and tag == 'ark identifier':
aip_uuid = element['value']
break
return render_template(
'aip.html', original_files=original_files,
mets_file=mets_file, level=level, dcmetadata=dcmetadata, divs=divs,
filecount=filecount, aip_uuid=aip_uuid
)
| 22,555
|
def testCartesianEpehemeris(
ephemeris_actual,
ephemeris_desired,
position_tol=1*u.m,
velocity_tol=(1*u.mm/u.s),
magnitude=True,
raise_error=True
):
"""
Tests that the two sets of cartesian ephemeris are within the desired absolute tolerances
of each other. The absolute difference is calculated as |actual - desired|.
Parameters
----------
ephemeris_actual : `~numpy.ndarray` (N, 3) or (N, 6)
Array of ephemeris to compare to the desired ephemeris, may optionally
include velocities.
Assumed units for:
positions : AU,
velocities : AU per day
ephemeris_desired : `~numpy.ndarray` (N, 3) or (N, 6)
Array of desired ephemeris to which to compare the actual ephemeris to, may optionally
include velocities.
Assumed units for:
positions : AU,
velocities : AU per day
position_tol : `~astropy.units.quantity.Quantity` (1)
Absolute tolerance positions need to satisfy (x, y, z, r).
velocity_tol : `~astropy.units.quantity.Quantity` (1)
Absolute tolerance velocity need to satisfy. (vx, vy, vz, v).
magnitude : bool
Test the magnitude of the position difference
and velocity difference vectors as opposed to testing per individual coordinate.
Raises
------
AssertionError:
If |ephemeris_actual - ephemeris_desired| > tolerance.
ValueError:
If ephemeris shapes are not equal.
ValueError:
If coordinate dimensions are not one of 3 or 6.
Returns
-------
None
"""
any_error = False
error_message = "\n"
differences = {}
statistics = {}
if ephemeris_actual.shape != ephemeris_desired.shape:
err = (
"The shapes of the actual and desired ephemeris should be the same."
)
raise ValueError(err)
N, D = ephemeris_actual.shape
if D not in (3, 6):
err = (
"The number of coordinate dimensions should be one of 3 or 6.\n"
"If 3 then the expected inputs are x, y, z positions in AU.\n"
"If 6 then the expected inputs are x, y, z postions in AU\n"
"and vx, vy, vz velocities in AU per day."
)
raise ValueError(err)
# Test positions
if magnitude:
names = ["r"]
else:
names = ["x", "y", "z"]
diff, stats, error = _evaluateDifference(
ephemeris_actual[:, :3],
ephemeris_desired[:, :3],
u.AU,
position_tol,
magnitude=magnitude
)
for i, n in enumerate(names):
differences[n] = diff[:, i]
statistics[n] = {k : v[i] for k, v in stats.items()}
# If any of the differences between desired and actual are
# greater than the allowed tolerance set any_error to True
# and build the error message
if error:
any_error = True
error_message += "{} difference (|actual - desired|) is not within {}.\n".format(names, position_tol)
error_message = __statsToErrorMessage(
stats,
error_message
)
if D == 6:
# Test velocities
if magnitude:
names = ["v"]
else:
names = ["vx", "vy", "vz"]
diff, stats, error = _evaluateDifference(
ephemeris_actual[:, 3:],
ephemeris_desired[:, 3:],
(u.AU / u.d),
velocity_tol,
magnitude=magnitude
)
for i, n in enumerate(names):
differences[n] = diff[:, i]
statistics[n] = {k : v[i] for k, v in stats.items()}
# If any of the differences between desired and actual are
# greater than the allowed tolerance set any_error to True
# and build the error message
if error:
any_error = True
error_message += "{} difference (|actual - desired|) is not within {}.\n".format(names, velocity_tol)
error_message = __statsToErrorMessage(
stats,
error_message
)
if any_error and raise_error:
raise AssertionError(error_message)
return differences, statistics, error
| 22,556
|
def test_fuse_circuit_two_qubit_gates(backend):
"""Check circuit fusion in circuit with two-qubit gates only."""
c = Circuit(2)
c.add(gates.CNOT(0, 1))
c.add(gates.RX(0, theta=0.1234).controlled_by(1))
c.add(gates.SWAP(0, 1))
c.add(gates.fSim(1, 0, theta=0.1234, phi=0.324))
c.add(gates.RY(1, theta=0.1234).controlled_by(0))
fused_c = c.fuse()
K.assert_allclose(fused_c(), c())
| 22,557
|
def mixin_post_create(cls, sender, instance, created, *args, **kwargs):
"""
Create or update services without indetifiers
"""
if not created:
return
user = instance
service_classes = tuple(
c for c in SERVICES if not getattr(c, 'IDENTIFIER_FIELD', None)
)
for service_class in service_classes:
values = {'user': user}
try:
d = service_class.objects.get(**values)
except service_class.DoesNotExist:
d = None
if not d:
service_class.objects.create(**values)
| 22,558
|
def get_longitude_latitude(city_info, station):
"""
利用高德地图查询对应的地铁站经纬度信息,下面的key需要自己去高德官网申请
https://lbs.amap.com/api/webservice/guide/api/georegeo
:param city_info: 具体城市的地铁,如:广州市地铁
:param station: 具体的地铁站名称,如:珠江新城站
:return: 经纬度
"""
addr = city_info + station
print('*要查找的地点:' + addr)
parameters = {'address': addr, 'key': '98a3444618af14c0f20c601f5a442000'}
base = 'https://restapi.amap.com/v3/geocode/geo'
response = requests.get(base, parameters, timeout=10) # 超时设置为10s,翻墙开了全局代理会慢点的
if response.status_code == 200:
answer = response.json()
x, y = answer['geocodes'][0]['location'].split(',')
coor = (float(x), float(y))
print('*' + station + '的坐标是:', coor)
return coor
else:
return (None, None)
| 22,559
|
def make_resource_object(resource_type, credentials_path):
"""Creates and configures the service object for operating on resources.
Args:
resource_type: [string] The Google API resource type to operate on.
credentials_path: [string] Path to credentials file, or none for default.
"""
try:
api_name, resource = resource_type.split('.', 1)
except ValueError:
raise ValueError('resource_type "{0}" is not in form <api>.<resource>'
.format(resource_type))
version = determine_version(api_name)
service = make_service(api_name, version, credentials_path)
path = resource.split('.')
node = service
for elem in path:
try:
node = getattr(node, elem)()
except AttributeError:
path_str = '.'.join(path[0:path.index(elem)])
raise AttributeError('"{0}{1}" has no attribute "{2}"'.format(
api_name, '.' + path_str if path_str else '', elem))
return node
| 22,560
|
def AreBenchmarkResultsDifferent(result_dict_1, result_dict_2, test=MANN,
significance_level=0.05):
"""Runs the given test on the results of each metric in the benchmarks.
Checks if the dicts have been created from the same benchmark, i.e. if
metric names match (e.g. first_non_empty_paint_time). Then runs the specified
statistical test on each metric's samples to find if they vary significantly.
Args:
result_dict_1: Benchmark result dict of format {metric: list of values}.
result_dict_2: Benchmark result dict of format {metric: list of values}.
test: Statistical test that is used.
significance_level: The significance level the p-value is compared against.
Returns:
test_outcome_dict: Format {metric: (bool is_different, p-value)}.
"""
AssertThatKeysMatch(result_dict_1, result_dict_2)
test_outcome_dict = {}
for metric in result_dict_1:
is_different, p_value = AreSamplesDifferent(result_dict_1[metric],
result_dict_2[metric],
test, significance_level)
test_outcome_dict[metric] = (is_different, p_value)
return test_outcome_dict
| 22,561
|
def _load_pascal_annotation(image_index):
"""
Load image and bounding boxes info from XML file in the PASCAL VOC
format.
"""
#image_index = _load_image_set_index()
classes = ('__background__', # always index 0
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
num_classes = len(classes)
_class_to_ind = dict(zip(classes, xrange(num_classes)))
_data_path = "/var/services/homes/kchakka/py-faster-rcnn/VOCdevkit/VOC2007"
image_index = [image_index]
for index in image_index:
filename = os.path.join(_data_path, 'Annotations', index + '.xml')
tree = ET.parse(filename)
objs = tree.findall('object')
if True:
# Exclude the samples labeled as difficult
non_diff_objs = [
obj for obj in objs if int(obj.find('difficult').text) == 0]
# if len(non_diff_objs) != len(objs):
# print 'Removed {} difficult objects'.format(
# len(objs) - len(non_diff_objs))
objs = non_diff_objs
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros((num_objs), dtype=np.int32)
##
# commented below by chaitu
##
#overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
##
# commented above by chaitu
##
# "Seg" area for pascal is just the box area
seg_areas = np.zeros((num_objs), dtype=np.float32)
# Load object bounding boxes into a data frame.
for ix, obj in enumerate(objs):
bbox = obj.find('bndbox')
# Make pixel indexes 0-based
x1 = float(bbox.find('xmin').text) - 1
y1 = float(bbox.find('ymin').text) - 1
x2 = float(bbox.find('xmax').text) - 1
y2 = float(bbox.find('ymax').text) - 1
cls = _class_to_ind[obj.find('name').text.lower().strip()]
boxes[ix, :] = [x1, y1, x2, y2]
gt_classes[ix] = cls
#overlaps[ix, cls] = 1.0
#seg_areas[ix] = (x2 - x1 + 1) * (y2 - y1 + 1)
#overlaps = scipy.sparse.csr_matrix(overlaps)
return {'boxes' : boxes, 'gt_classes' : gt_classes}
| 22,562
|
def virsh_memtune_conf(params, env):
"""
"""
pass
| 22,563
|
def create_global_var(shape,
value,
dtype,
persistable=False,
force_cpu=False,
name=None):
"""
This function creates a new tensor variable with value in the global block(block 0).
Parameters:
shape (list of int): Shape of the variable
value (float): The value of the variable. The new created
variable will be filled with it.
dtype (str): Data type of the variable
persistable (bool, optional): If this variable is persistable.
Default: False
force_cpu (bool, optional): Force this variable to be on CPU.
Default: False
name (str, optional): For detailed information, please refer to
:ref:`api_guide_Name` . Usually name is no need to set and None by default.
Returns:
Variable: The created Variable
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
var = layers.create_global_var(shape=[2,3], value=1.0, dtype='float32',
persistable=True, force_cpu=True, name='new_var')
"""
helper = LayerHelper("global_var", **locals())
var = helper.create_global_variable(
dtype=dtype,
shape=shape,
persistable=persistable,
name=name,
stop_gradient=True)
helper.set_variable_initializer(
var, initializer=Constant(
value=float(value), force_cpu=force_cpu))
return var
| 22,564
|
def select(population, fitness_val):
"""
选择操作,用轮盘赌法进行选择
:param population: 种群基因型
:param fitness_val: 种群适应度
:return selected_pop: 选择后的种群
"""
f_sum = sum(fitness_val)
cumulative = []
for i in range(1, len(fitness_val)+1):
cumulative.append(sum(fitness_val[:i]) / f_sum)
selected_pop = []
for i in range(len(fitness_val)):
rand = np.random.rand()
prand = [(c - rand) for c in cumulative]
j = 0
while prand[j] < 0:
j = j+1
selected_pop.append(population[j])
return selected_pop
| 22,565
|
def _nan_helper(y, nan=False, inf=False, undef=None):
"""
Helper to handle indices and logical indices of NaNs, Infs or undefs.
Definition
----------
def _nan_helper(y, nan=False, inf=False, undef=None):
Input
-----
y 1d numpy array with possible missing values
Optional Input
--------------
At least one of the following has to be given
nan if True, check only for NaN and not Inf.
inf if True, check only for Inf and not NaN.
undef if given then check for undef value rather than NaN and Inf.
Output
------
ind logical indices of missing values
find function, with signature indices = find(ind),
to convert logical indices of NaNs to 'equivalent' indices
Examples
--------
>>> # linear interpolation of NaNs
>>> y = np.array([1, np.nan, 3])
>>> nans, z = _nan_helper(y, nan=True)
>>> y[nans] = np.interp(z(nans), z(~nans), y[~nans])
History
-------
Written, Matthias Cuntz, Jul 2013 - modified from
http://stackoverflow.com/questions/6518811/interpolate-nan-values-in-a-numpy-array
Modified, Matthias Cuntz, Apr 2014 - assert
Matthias Cuntz, Sep 2021 - code refactoring
"""
assert not ((not nan) & (not inf) & (undef is None)), (
'at least one of nan, inf or undef has to be given.')
out = np.zeros(y.shape, dtype=bool)
if nan:
out = out | np.isnan(y)
if inf:
out = out | np.isinf(y)
if undef is not None:
out = out | (y == undef)
return out, lambda ind: ind.nonzero()[0]
| 22,566
|
def unpacking(block_dets, *, repeat=False, **_kwargs):
"""
Identify name unpacking e.g. x, y = coord
"""
unpacked_els = block_dets.element.xpath(ASSIGN_UNPACKING_XPATH)
if not unpacked_els:
return None
title = layout("""\
### Name unpacking
""")
summary_bits = []
for unpacked_el in unpacked_els:
unpacked_names = [
name_el.get('id') for name_el in unpacked_el.xpath('elts/Name')]
if not unpacked_names:
continue
nice_str_list = gen_utils.get_nice_str_list(unpacked_names, quoter='`')
summary_bits.append(layout(f"""\
Your code uses unpacking to assign names {nice_str_list}
"""))
summary = ''.join(summary_bits)
if not repeat:
unpacking_msg = get_unpacking_msg()
else:
unpacking_msg = ''
message = {
conf.Level.BRIEF: title + summary,
conf.Level.EXTRA: unpacking_msg,
}
return message
| 22,567
|
def load_training_data():
"""Loads the Fashion-MNIST dataset.
Returns:
Tuple of Numpy arrays: `(x_train, y_train)`.
License:
The copyright for Fashion-MNIST is held by Zalando SE.
Fashion-MNIST is licensed under the [MIT license](
https://github.com/zalandoresearch/fashion-mnist/blob/master/LICENSE).
"""
download_directory = tempfile.mkdtemp()
base = "https://storage.googleapis.com/tensorflow/tf-keras-datasets/"
files = [
"train-labels-idx1-ubyte.gz",
"train-images-idx3-ubyte.gz",
]
paths = []
for fname in files:
paths.append(get_file(fname, origin=base + fname, cache_subdir=download_directory))
with gzip.open(paths[0], "rb") as lbpath:
y_train = np.frombuffer(lbpath.read(), np.uint8, offset=8)
with gzip.open(paths[1], "rb") as imgpath:
x_train = np.frombuffer(imgpath.read(), np.uint8, offset=16).reshape(len(y_train), 28, 28)
return x_train, y_train
| 22,568
|
def set_difference(tree, context, attribs):
"""A meta-feature that will produce the set difference of two boolean features
(will have keys set to 1 only for those features that occur in the first set but not in the
second).
@rtype: dict
@return: dictionary with keys for key occurring with the first feature but not the second, and \
keys equal to 1
"""
ret = {}
for key, val in context['feats'][attribs[0]].items():
if key not in context['feats'][attribs[1]]:
ret[key] = val
return ret
| 22,569
|
def _output_to_new(parsed_file: parse.PyTestGenParsedFile,
output_dir: str,
include: List[str] = []) -> None:
"""Output the tests in 'parsed_file' to an output directory, optionally
only including a whitelist of functions to output tests for.
Args:
parsed_file: The parsed file to output.
output_dir: The path to the dir to output test files in.
include: The list of function names to generate tests for. If empty,
all functions will be used.
"""
test_file_path = parsed_file.input_file.get_test_file_path(output_dir)
module_name = parsed_file.input_file.get_module()
_ensure_dir(test_file_path)
with open(test_file_path, "w", encoding="utf-8") as test_file:
test_file.write(
generator.generate_test_file(TEST_FILE_MODULES, module_name))
for testable_func in parsed_file.testable_funcs:
if testable_func.function_def.name in UNTESTABLE_FUNCTIONS:
continue
# skip the function if it isn't in the include list (if we have one)
if any(include) and testable_func.function_def.name not in include:
continue
test_file.write(
generator.generate_test_func(testable_func, module_name))
| 22,570
|
def create_STATES(us_states_location):
"""
Create shapely files of states.
Args:
us_states_location (str): Directory location of states shapefiles.
Returns:
States data as cartopy feature for plotting.
"""
proj = ccrs.LambertConformal(central_latitude = 25,
central_longitude = 265,
standard_parallels = (25, 25))
reader = shpreader.Reader(
f'{us_states_location}/ne_50m_admin_1_states_provinces_lines.shp')
states = list(reader.geometries())
STATES = cfeature.ShapelyFeature(states, ccrs.PlateCarree())
return STATES
| 22,571
|
def get_name_by_url(url):
"""Returns the name of a stock from the instrument url. Should be located at ``https://api.robinhood.com/instruments/<id>``
where <id> is the id of the stock.
:param url: The url of the stock as a string.
:type url: str
:returns: Returns the simple name of the stock. If the simple name does not exist then returns the full name.
"""
data = helper.request_get(url)
if not data:
return(None)
# If stock doesn't have a simple name attribute then get the full name.
filter = helper.filter(data, info = 'simple_name')
if not filter or filter == "":
filter = helper.filter(data, info = 'name')
return(filter)
| 22,572
|
def reducer(event, context):
"""Combine results from workers into a single result."""
format_reducer_fn = FORMAT_HANDLERS[event["format"]]["reducer"]
format_reducer_fn(event["request_id"])
increment_state_field(event["request_id"], "CompletedReducerExecutions", 1)
record_timing_event(event["request_id"], "ReduceComplete")
| 22,573
|
def recursively_replace(original, replacements, include_original_keys=False):
"""Clones an iterable and recursively replaces specific values."""
# If this function would be called recursively, the parameters 'replacements' and 'include_original_keys' would have to be
# passed each time. Therefore, a helper function with a reduced parameter list is used for the recursion, which nevertheless
# can access the said parameters.
def _recursion_helper(obj):
#Determine if the object should be replaced. If it is not hashable, the search will throw a TypeError.
try:
if obj in replacements:
return replacements[obj]
except TypeError:
pass
# An iterable is recursively processed depending on its class.
if hasattr(obj, "__iter__") and not isinstance(obj, (str, bytes, bytearray)):
if isinstance(obj, dict):
contents = {}
for key, val in obj.items():
new_key = _recursion_helper(key) if include_original_keys else key
new_val = _recursion_helper(val)
contents[new_key] = new_val
else:
contents = []
for element in obj:
new_element = _recursion_helper(element)
contents.append(new_element)
# Use the same class as the original.
return obj.__class__(contents)
# If it is not replaced and it is not an iterable, return it.
return obj
return _recursion_helper(original)
| 22,574
|
def test_indexLengthCheck(wkwdataset):
"""Test whether the dataset length matches the length of train, validation and test datasets"""
indexStrings = getIndsPropString(wkwdataset)
indexList = getIndices(wkwdataset, indexStrings)
lengthSeparate = [len(index) for index in indexList]
assert sum(lengthSeparate) == len(wkwdataset)
| 22,575
|
def set_default_node_as(as_num):
"""
Set the default node BGP AS Number.
:param as_num: The default AS number
:return: None.
"""
client.set_default_node_as(as_num)
| 22,576
|
def destructure(hint: t.Any) -> t.Tuple[t.Any, t.Tuple[t.Any, ...]]:
"""Return type hint origin and args."""
return get_origin(hint), get_args(hint)
| 22,577
|
def f_x_pbe(x, kappa=0.804, mu=0.2195149727645171):
"""Evaluates PBE exchange enhancement factor.
10.1103/PhysRevLett.77.3865 Eq. 14.
F_X(x) = 1 + kappa ( 1 - 1 / (1 + mu s^2)/kappa )
kappa, mu = 0.804, 0.2195149727645171 (PBE values)
s = c x, c = 1 / (2 (3pi^2)^(1/3) )
Args:
x: Float numpy array with shape (num_grids,), the reduced density gradient.
kappa: Float, parameter.
mu: Float, parameter.
Returns:
Float numpy array with shape (num_grids,), the PBE exchange enhancement
factor.
"""
c = 1 / (2 * (3 * jnp.pi ** 2) ** (1 / 3))
s = c * x
f_x = 1 + kappa - kappa / (1 + mu * s ** 2 / kappa)
return f_x
| 22,578
|
def index():
"""
if no browser and no platform: it's a CLI request.
"""
if g.client['browser'] is None or g.client['platform'] is None:
string = "hello from API {} -- in CLI Mode"
msg = {'message': string.format(versions[0]),
'status': 'OK',
'mode': 200}
r = Response(j.output(msg))
r.headers['Content-type'] = 'application/json; charset=utf-8'
return r, 200
"""
ELSE: it's obviously on a web browser
"""
string = "<h1>hello from API v1 | {} | {} | {} | {}</h1>"
return string.format(g.client['browser'],
g.client['platform'],
g.client['version'],
g.client['language'])
| 22,579
|
def getKeyWait(opCode):
"""
A key press is awaited, and then stored in VX. (Blocking Operation. All instruction halted until next key event)
OPCODE: FX0A
Parameters:
opCode: (16 bit hexadecimal number)
"""
global PC, register
press = False # STATE OF KEYSTROKE
for keys in KEYBOARD:
if current_key[KEYBOARD[keys]]: # CHECK IF ON
press = True
register[(opCode & 0x0F00) >> 8] = keys # VX TO KEYSTROKE
if not press: PC -= 4 # ELSE SKIP
| 22,580
|
def prompt_yes_no(question, default=None):
"""Asks a yes/no question and returns either True or False."""
prompt = (default is True and 'Y/n') or (default is False and 'y/N') or 'y/n'
valid = {'yes': True, 'ye': True, 'y': True, 'no': False, 'n': False}
while True:
choice = input(question + prompt + ': ').lower()
if not choice and default is not None:
return default
if choice in valid:
return valid[choice]
else:
sys.stdout.write("Invalid reponse\n")
| 22,581
|
def attention_resnet20(**kwargs):
"""Constructs a ResNet-20 model.
"""
model = CifarAttentionResNet(CifarAttentionBasicBlock, 3, **kwargs)
return model
| 22,582
|
def download_data(download_path: str) -> None:
"""
Downloads data from kaggle.
Requires kaggle API key to be set up.
Args:
download_path : str
Download destination
"""
system(
"kaggle competitions download "
+ "-c tpu-getting-started "
+ f'-p "{download_path}"'
)
| 22,583
|
def get_basename(name):
""" [pm/cmds] オブジェクト名からベースネームを取得する """
fullpath = get_fullpath(name)
return re(r"^.*\|", "", fullpath)
| 22,584
|
def get_live_token(resource_uri: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetLiveTokenResult:
"""
The response to a live token query.
:param str resource_uri: The identifier of the resource.
"""
__args__ = dict()
__args__['resourceUri'] = resource_uri
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:insights/v20200602preview:getLiveToken', __args__, opts=opts, typ=GetLiveTokenResult).value
return AwaitableGetLiveTokenResult(
live_token=__ret__.live_token)
| 22,585
|
def load_subspace_vectors(embd, subspace_words):
"""Loads all word vectors for the particular subspace in the list of words as a matrix
Arguments
embd : Dictonary of word-to-embedding for all words
subspace_words : List of words representing a particular subspace
Returns
subspace_embd_mat : Matrix of word vectors stored row-wise
"""
subspace_embd_mat = []
ind = 0
for word in subspace_words:
if word in embd:
subspace_embd_mat.append(embd[word])
ind = ind+1
return subspace_embd_mat
| 22,586
|
def _safe_filename(filename):
"""
Generates a safe filename that is unlikely to collide with existing
objects in Google Cloud Storage.
``filename.ext`` is transformed into ``filename-YYYY-MM-DD-HHMMSS.ext``
"""
filename = secure_filename(filename)
date = datetime.datetime.utcnow().strftime("%Y-%m-%d-%H%M%S")
basename, extension = filename.rsplit('.', 1)
return "{0}-{1}.{2}".format(basename, date, extension)
| 22,587
|
def get_raw(contents: List[str]) -> Tuple[sections.Raw, List[str]]:
"""Parse the \\*RAW section"""
raw_dict, rest = get_section(contents, "raw")
remarks = raw_dict[REMARKS] if REMARKS in raw_dict else ""
raw_info = sections.Raw(
remarks=remarks,
raw=raw_dict,
)
return raw_info, rest
| 22,588
|
async def add(ctx):
"""Country Name
Add a player to the game as country"""
command, country, player = ctx.message.content.split(" ")
if country in Country.__members__:
member = discord.utils.find(lambda m: m.name == player, ctx.message.channel.server.members)
if member != None:
with session_scope() as session:
if session.query(Movelist).filter(Movelist.country == country).one_or_none() is None:
new_movelist = Movelist(country=country,
playername=player,
discord_id=member.id,
moveset=None)
session.add(new_movelist)
userlist.append(member)
await bot.say('Player added')
else:
await bot.say('That country has already been allocated')
else:
await bot.say('Invalid Player')
else:
await bot.say('Invalid Country')
| 22,589
|
def requireIsExisting( path ):
"""
Throws an AssertionError if 'path' does not exist.
"""
requireIsTextNonEmpty( path )
requireMsg( isExisting( path ), path + ": No such file or directory" )
| 22,590
|
def transfers_from_stops(
stops,
stop_times,
transfer_type=2,
trips=False,
links_from_stop_times_kwargs={'max_shortcut': False, 'stop_id': 'stop_id'},
euclidean_kwargs={'latitude': 'stop_lat', 'longitude': 'stop_lon'},
seek_traffic_redundant_paths=True,
seek_transfer_redundant_paths=True,
max_distance=800,
euclidean_speed=5 * 1000 / 3600 / 1.4,
geometry=False,
gtfs_only=False
):
"""
Builds a relevant footpath table from the stop_times and stops tables of a transitfeed.
The trips table may be used to spot the "dominated" footpaths that offer no new connection
(compared to the pool of stops), for example:
* line A stops at station i and station k;
* line B stops at station j and station k;
* no other line stops at a or b;
* the footpath F goes from i to j;
* In our understanding : F is dominated by the station k
:param stops: DataFrame consistent with the GTFS table "trips"
:param stop_times: DataFrame consistent with the GTFS table "trips"
:param transfer_type: how to fill the 'transfer_type' column of the feed
:param trips: DataFrame consistent with the GTFS table "trips"
:param links_from_stop_times_kwargs: kwargs to pass to transitlinks.links_from_stop_times, called on stop_times
:param euclidean_kwargs: kwargs to pass to skims.euclidean (the name of the latitude and longitude column)
:param seek_traffic_redundant_paths: if True, only the footpaths that do not belong to the transit links are kept.
the transit links are built from the stop times using transitlinks.links_from_stop_times. The maximum number of
transit links to concatenate in order to look for redundancies may be passed in the kwargs ('max_shortcut').
For example, if max_shortcut = 5: the footpath that can be avoided be taking a five stations ride will be tagged
as "dominated".
:param seek_transfer_redundant_paths: if True, the "trips" table is used to look for the dominated footpaths
:param max_distance: maximum distance of the footpaths (meters as the crows fly)
:param euclidean_speed: speed as the crows fly on the footpaths.
:param geometry: If True, a geometry column (shapely.geometry.linestring.Linestring object) is added to the table
:return: footpaths data with optional "dominated" tag
"""
stop_id = links_from_stop_times_kwargs['stop_id']
origin = stop_id + '_origin'
destination = stop_id + '_destination'
euclidean = skims.euclidean(stops.set_index(stop_id), **euclidean_kwargs)
euclidean.reset_index(drop=True, inplace=True)
euclidean['tuple'] = pd.Series(list(zip(list(euclidean['origin']), list(euclidean['destination']))))
short_enough = euclidean[euclidean['euclidean_distance'] < max_distance]
short_enough = short_enough[short_enough['origin'] != short_enough['destination']]
footpath_tuples = {tuple(path) for path in short_enough[['origin', 'destination']].values.tolist()}
paths = euclidean[euclidean['tuple'].isin(footpath_tuples)]
paths['dominated'] = False
_stop_times = stop_times
if stop_id in stops.columns and stop_id not in stop_times.columns:
_stop_times = pd.merge(
stop_times, stops[['id', stop_id]], left_on='stop_id', right_on='id', suffixes=['', '_merged'])
if seek_traffic_redundant_paths:
links = feed_links.link_from_stop_times(_stop_times, **links_from_stop_times_kwargs).reset_index()
in_links_tuples = {tuple(path) for path in links[[origin, destination]].values.tolist()}
paths['trafic_dominated'] = paths['tuple'].isin(in_links_tuples)
paths['dominated'] = paths['dominated'] | paths['trafic_dominated']
stop_routes = {}
stop_set = set(_stop_times[stop_id])
# if two routes are connected by several footpaths we only keep the shortest one
# if routes a and b are connected to route c, d and e by several footpaths :
# we keep only the shortest one that does the job.
if trips is not False:
grouped = pd.merge(_stop_times, trips, left_on='trip_id', right_on='id').groupby(stop_id)['route_id']
stop_routes = grouped.aggregate(lambda x: frozenset(x)).to_dict()
def get_routes(row):
return tuple((stop_routes[row['origin']], stop_routes[row['destination']]))
paths = paths[(paths['origin'].isin(stop_set) & paths['destination'].isin(stop_set))]
paths['trips'] = paths.apply(get_routes, axis=1)
paths = paths.sort('euclidean_distance').groupby(['trips', 'dominated'], as_index=False).first()
paths['min_transfer_time'] = paths['euclidean_distance'] / euclidean_speed
paths = paths[paths['origin'] != paths['destination']]
if seek_transfer_redundant_paths:
paths['frozen'] = paths['trips'].apply(lambda a: frozenset(a[0]).union(frozenset(a[1])))
max_length = max([len(f) for f in list(paths['frozen'])])
to_beat = []
for length in range(max_length + 1):
for stop in stop_routes.values():
for c in list(itertools.combinations(stop, length)):
to_beat.append(frozenset(c))
to_beat = set(to_beat)
paths['transfer_dominated'] = paths['frozen'].apply(lambda f: f in to_beat)
paths['dominated'] = paths['dominated'] | paths['transfer_dominated']
if geometry and not gtfs_only:
paths['geometry'] = paths.apply(linestring_geometry, axis=1)
paths['from_stop_id'] = paths['origin']
paths['to_stop_id'] = paths['destination']
paths['transfer_type'] = transfer_type
if gtfs_only:
paths = paths[~paths['dominated']]
paths = paths[['from_stop_id', 'to_stop_id', 'transfer_type', 'min_transfer_time']]
return paths
| 22,591
|
def export_retinanet(args: argparse.Namespace) -> None:
""" Loads, converts and export retinanet as tf serving model. """
weights_path = args.weights
version = args.version
num_classes = args.num_classes
export_path = args.output_path
backbone_name = args.backbone_name
anchors_file = args.anchors
anchors = AnchorParametersWrap.from_conf(anchors_file)
input_model = string_input_model()
retina_model = load_retina(
weights_path, num_classes, anchors, backbone_name)
final_model = merge_models(retina_model, input_model)
log.info(f'merged retinanet and input model')
final_model.summary()
retina_export = RetinaServingExporter(final_model, ServingSignature())
retina_export.export(export_path, version)
| 22,592
|
def PySequence_ITEM(space, w_obj, i):
"""Return the ith element of o or NULL on failure. Macro form of
PySequence_GetItem() but without checking that
PySequence_Check(o)() is true and without adjustment for negative
indices.
This function used an int type for i. This might require
changes in your code for properly supporting 64-bit systems."""
# XXX we should call Py*_GET_ITEM() instead of Py*_GetItem()
# from here, but we cannot because we are also called from
# PySequence_GetItem()
py_obj = as_pyobj(space, w_obj)
if isinstance(w_obj, tupleobject.W_TupleObject):
from pypy.module.cpyext.tupleobject import PyTuple_GetItem
py_res = PyTuple_GetItem(space, py_obj, i)
incref(space, py_res)
keepalive_until_here(w_obj)
return py_res
if isinstance(w_obj, W_ListObject):
from pypy.module.cpyext.listobject import PyList_GetItem
py_res = PyList_GetItem(space, py_obj, i)
incref(space, py_res)
keepalive_until_here(w_obj)
return py_res
as_sequence = py_obj.c_ob_type.c_tp_as_sequence
if as_sequence and as_sequence.c_sq_item:
ret = generic_cpy_call(space, as_sequence.c_sq_item, w_obj, i)
return make_ref(space, ret)
w_ret = space.getitem(w_obj, space.newint(i))
return make_ref(space, w_ret)
| 22,593
|
def run_model_pipeline(process_number, uuid_list):
"""
Run the modeling stage of the pipeline.
"""
for uuid in uuid_list:
if uuid is None:
continue
try:
run_model_pipeline_for_user(uuid)
except Exception as e:
print "dang flabbit failed on error %s" % e
| 22,594
|
def decode_hex(data):
"""Decodes a hex encoded string into raw bytes."""
try:
return codecs.decode(data, 'hex_codec')
except binascii.Error:
raise TypeError()
| 22,595
|
def network(ipaddress, info_type):
"""Show IP (IPv4) BGP network"""
command = 'sudo vtysh -c "show ip bgp'
if ipaddress is not None:
if '/' in ipaddress:
# For network prefixes then this all info_type(s) are available
pass
else:
# For an ipaddress then check info_type, exit if specified option doesn't work.
if info_type in ['longer-prefixes']:
click.echo('The parameter option: "{}" only available if passing a network prefix'.format(info_type))
click.echo("EX: 'show ip bgp network 10.0.0.0/24 longer-prefixes'")
raise click.Abort()
command += ' {}'.format(ipaddress)
# info_type is only valid if prefix/ipaddress is specified
if info_type is not None:
command += ' {}'.format(info_type)
command += '"'
run_command(command)
| 22,596
|
def crop_bbox_by_coords(bbox, crop_coords, crop_height, crop_width, rows, cols):
"""Crop a bounding box using the provided coordinates of bottom-left and top-right corners in pixels and the
required height and width of the crop.
"""
bbox = denormalize_bbox(bbox, rows, cols)
x_min, y_min, x_max, y_max = bbox
x1, y1, x2, y2 = crop_coords
cropped_bbox = [x_min - x1, y_min - y1, x_max - x1, y_max - y1]
return normalize_bbox(cropped_bbox, crop_height, crop_width)
| 22,597
|
def computeDateGranularity(ldf):
"""
Given a ldf, inspects temporal column and finds out the granularity of dates.
Example
----------
['2018-01-01', '2019-01-02', '2018-01-03'] -> "day"
['2018-01-01', '2019-02-01', '2018-03-01'] -> "month"
['2018-01-01', '2019-01-01', '2020-01-01'] -> "year"
Parameters
----------
ldf : lux.luxDataFrame.LuxDataFrame
LuxDataFrame with a temporal field
Returns
-------
field: str
A str specifying the granularity of dates for the inspected temporal column
"""
dateFields = ["day", "month", "year"]
if ldf.dataType["temporal"]:
dateColumn = ldf[ldf.dataType["temporal"][0]] # assumes only one temporal column, may need to change this function to recieve multiple temporal columns in the future
dateIndex = pd.DatetimeIndex(dateColumn)
for field in dateFields:
if hasattr(dateIndex,field) and len(getattr(dateIndex, field).unique()) != 1 : #can be changed to sum(getattr(dateIndex, field)) != 0
return field
| 22,598
|
def next_pending_location(user_id: int, current_coords: Optional[Tuple[int, int]] = None) -> Optional[Tuple[int, int]]:
"""
Retrieves the next pending stone's coordinates. If current_coords is not specified (or is not pending),
retrieves the longest-pending stone's coordinates. The order for determining which stone is "next" is
defined by how long stones have been pending -- successive applications of this function will retrieve
successively younger pending stones. If there is no younger pending stone, the coordinates of the
oldest pending stone are returned. If there are no pending stones at all, None is returned.
"""
with sqlite3.connect(db_file) as db:
cur = db.cursor()
current_stone_pending_since = 0 # Will always be older than any stone.
if current_coords is not None:
current_stone = get_stone(*current_coords)
if current_stone is not None and current_stone["player"] == user_id and current_stone["status"] == "Pending":
# The current stone belongs to the player and is pending.
current_stone_pending_since = current_stone["last_status_change_time"]
query = """SELECT
x, y
FROM
stones
WHERE
player = ? AND
status = 'Pending' AND
last_status_change_time > ?
ORDER BY
last_status_change_time ASC;"""
cur.execute(query, [user_id, current_stone_pending_since])
next_pending_coords = cur.fetchone()
# A younger pending stone exists.
if next_pending_coords is not None:
return next_pending_coords
# Otherwise, a younger pending stone does not exist.
# Retrieve the oldest stone.
cur.execute(query, [user_id, 0])
next_pending_coords = cur.fetchone()
# Return either the coords of the oldest pending stone, or None if no such stone exists.
return next_pending_coords
| 22,599
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.