content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def almost_equal_ignore_nan(a, b, rtol=None, atol=None):
"""Test that two NumPy arrays are almost equal (ignoring NaN in either array).
Combines a relative and absolute measure of approximate eqality.
If either the relative or absolute check passes, the arrays are considered equal.
Including an absolute check resolves issues with the relative check where all
array values are close to zero.
Parameters
----------
a : np.ndarray
b : np.ndarray
rtol : None or float
The relative threshold. Default threshold will be used if set to ``None``.
atol : None or float
The absolute threshold. Default threshold will be used if set to ``None``.
"""
a = np.copy(a)
b = np.copy(b)
nan_mask = np.logical_or(np.isnan(a), np.isnan(b))
a[nan_mask] = 0
b[nan_mask] = 0
return almost_equal(a, b, rtol, atol) | 5,334,500 |
def make_commands(manager):
"""Prototype"""
# pylint: disable=no-member
return (cmd_t(manager) for cmd_t in
AbstractTwitterFollowersCommand.__subclasses__()) | 5,334,501 |
def copy_rds_snapshot(
target_snapshot_identifier: str,
source_snapshot_identifier: str,
target_kms: str,
wait: bool,
rds,
):
"""Copy snapshot from source_snapshot_identifier to target_snapshot_identifier and encrypt using target_kms"""
logger = logging.getLogger("copy_rds_snapshot")
xs = rds.copy_db_cluster_snapshot(
SourceDBClusterSnapshotIdentifier=source_snapshot_identifier,
TargetDBClusterSnapshotIdentifier=target_snapshot_identifier,
KmsKeyId=target_kms,
)["DBClusterSnapshot"]
if not wait:
return xs
else:
sleep(5)
waiter = rds.get_waiter("db_cluster_snapshot_available")
logger.warning(
"Waiting for snapshot {} to be created...".format(
xs["DBClusterSnapshotIdentifier"]
)
)
try:
waiter.wait(
DBClusterSnapshotIdentifier=xs["DBClusterSnapshotIdentifier"],
SnapshotType="manual",
Filters=[
{
"Name": "db-cluster-id",
"Values": [
xs["DBClusterIdentifier"],
],
},
],
WaiterConfig={"Delay": 10, "MaxAttempts": 100},
)
except:
logger.exception(
"Unable to wait for snapshot {} to be created for cluster {}".format(
xs["DBClusterSnapshotIdentifier"], xs["DBClusterIdentifier"]
)
)
else:
return xs | 5,334,502 |
def test_load_yaml_without_yaml_support():
"""
Test that YAML files are not loaded if YAML is not installed.
"""
@ddt
class NoYAMLInstalledTest(object):
@file_data('test_data_dict.yaml')
def test_file_data_yaml_dict(self, value):
assert_true(has_three_elements(value))
tests = filter(_is_test, NoYAMLInstalledTest.__dict__)
obj = NoYAMLInstalledTest()
for test in tests:
method = getattr(obj, test)
assert_raises(ValueError, method) | 5,334,503 |
def get_other_menuitems():
"""
returns other menu items
each menu pk will be dict key
{0: QuerySet, 1: QuerySet, ..}
"""
menuitems = {}
all_objects = Menu.objects.all()
for obj in all_objects:
menuitems[obj.pk] = obj.menuitem_set.all()
return menuitems | 5,334,504 |
def buildlxc(host):
""" Creates lxc in proxmox using given hostresource configuration """
if not exists(host):
raise ValueError("Host template is missing. Please create host template")
container = Container.getContainer(HOST_CONTAINER)
hostresource = container.loadResource(host)
#get proxmox user and hypervisor
userresource = proxmoxutil.listuser()
if userresource is None:
raise ValueError("No proxmox user found!! Please use proxmoxutil command to update user credentials")
user = userresource.properties[PROPERTIES_USER]
password = userresource.properties[PROPERTIES_PASSWORD]
authrealm = userresource.properties[PROPERTIES_AUTHREALM]
puser = user+'@'+authrealm
primary = proxmoxutil.listprimary()
if primary is None:
raise ValueError("Primary proxmox hypervisor not found!! Please use proxmoxutil command to update primary hypervisor")
hypervisor = primary.properties[PROPERTIES_HYPERVISOR]
print "Authenticating "+puser +" on "+ hypervisor
proxmox = ProxmoxAPI(hypervisor, user=puser, password=password, verify_ssl=False)
node = proxmox.nodes(hostresource.properties[HYPERVISOR])
hostname = hostresource.properties[HOSTNAME]
vmid = int(hostresource.properties[HOSTID])
ostemplate = str(hostresource.properties[PROPERTIES_OSTEMPLATE])
cpulimit = int(hostresource.properties[PROPERTIES_CPULIMIT])
cpuunits = int(hostresource.properties[PROPERTIES_CPUUNITS])
memory = int(hostresource.properties[PROPERTIES_MEMORY])
swap = int(hostresource.properties[PROPERTIES_SWAP])
storage = hostresource.properties[PROPERTIES_STORAGE]
disk = int(hostresource.properties[PROPERTIES_DISK])
disksize="%dG"%(disk)
interfaces = hostresource.properties[INTERFACES]
i=0
netconfig = dict()
for interface in interfaces:
print "Configuring %s" %interface
netconfig["net"+str(i)] = hostresource.properties[interface]
i=i+1
print "Building LXC with the following parameters:"
print "Vmid: %d" %vmid
print "Template: %s" %ostemplate
print "Cpu Limit: %d" %cpulimit
print "Cpu Units: %d" %cpuunits
print "Memory: %d" %memory
print "Swap: %d" %swap
print "Storage: %s" %storage
print "Disk: %d" %disk
node.lxc.create(vmid=vmid, hostname=hostname, ostemplate=ostemplate, password=DEFAULT_PASSWORD, cpuunits=cpuunits, cpulimit=cpulimit, memory=memory, swap=swap, **netconfig)
print "Creating LXC....."
time.sleep(30)
print "Resizing rootfs"
node.lxc(vmid).resize.put(disk='rootfs', size=disksize)
time.sleep(30)
print "LXC created" | 5,334,505 |
def event_loop(handle_key, delay=10):
""""Processes events and updates callbacks."""
while True:
pygame.event.pump()
event = pygame.event.poll()
if event.type == KEYDOWN:
handle_key(event.key)
pygame.time.delay(delay) | 5,334,506 |
def gather_simulation_file_paths(in_folder: str, filePrefix: str = "",
fileSuffixes: Union[str, List[str]] = [".tre", ".tre.tar.gz"],
files_per_folder: int = 1,
verbose: bool = False) -> List[str]:
"""gather_simulation_file_paths
find energy trajectory files in a folder
Parameters
----------
in_folder : str
directory where the files should be searched
filePrefix : str, optional
prefix of the file name pattern (default "")
fileSuffixes : Union[str, List[str]]
suffixes of the file name pattern (default [".tre", ".tre.tar.gz"])
files_per_folder : int, optional
number of files per folder (default 1)
verbose : bool, optional
verbose output (default False)
Returns
-------
List[str]
list of sorted files
"""
files = []
if (isinstance(fileSuffixes, str)):
fileSuffixes = [fileSuffixes]
if (verbose): print("SEARCH PATTERN: " + filePrefix + " + * +" + str(fileSuffixes))
for dirname, dirnames, filenames in os.walk(in_folder):
if (str(dirname[-1]).isdigit() and os.path.basename(dirname).startswith("eq")):
continue
# check actual in_dir for fle pattern
tmp_files = [file for file in filenames if
(filePrefix in file and any([suffix in file for suffix in fileSuffixes]))]
if (len(tmp_files) == files_per_folder):
files.extend(list(map(lambda x: dirname + "/" + x, tmp_files)))
if verbose: print("walking to in_dir: ", os.path.basename(dirname), "found: ", len(tmp_files))
try:
keys = [[int(y) for y in x.split("_") if (y.isdecimal())][-1] for x in files]
sorted_files = list(map(lambda y: y[1], sorted(zip(keys, files), key=lambda x: x[0])))
except:
warnings.warn("Files are not all enumerated! no file sorting.")
sorted_files = files
if (verbose):
print("\nfoundFiles:\n")
print("\t" + "\n\t".join(sorted_files))
if (len(sorted_files) == 0):
raise ValueError("could not find any file with the prefix: " + filePrefix + " in folder : \n" + in_folder)
return sorted_files | 5,334,507 |
def create_deck(shuffle=False):
"""Create a new deck of 52 cards"""
deck = [(s, r) for r in RANKS for s in SUITS]
if shuffle:
random.shuffle(deck)
return deck | 5,334,508 |
def mock_gate_util_provider_oldest_namespace_feed_sync(
monkeypatch, mock_distromapping_query
):
"""
Mocks for anchore_engine.services.policy_engine.engine.policy.gate_util_provider.GateUtilProvider.oldest_namespace_feed_sync
"""
# required for FeedOutOfDateTrigger.evaluate
# setup for anchore_engine.services.policy_engine.engine.feeds.feeds.FeedRegistry.registered_vulnerability_feed_names
init_feed_registry()
@contextmanager
def mock_session_scope():
"""
Mock context manager for anchore_engine.db.session_scope.
"""
yield None
def raise_no_active_grypedb(session):
raise NoActiveGrypeDB
def _setup_mocks(feed_group_metadata=None, grype_db_feed_metadata=None):
# required for FeedOutOfDateTrigger.evaluate
# mocks anchore_engine.services.policy_engine.engine.feeds.db.get_feed_group_detached
monkeypatch.setattr(
"anchore_engine.services.policy_engine.engine.policy.gate_util_provider.session_scope",
mock_session_scope,
)
if grype_db_feed_metadata:
monkeypatch.setattr(
"anchore_engine.services.policy_engine.engine.policy.gate_util_provider.get_most_recent_active_grypedb",
lambda x: grype_db_feed_metadata,
)
else:
monkeypatch.setattr(
"anchore_engine.services.policy_engine.engine.policy.gate_util_provider.get_most_recent_active_grypedb",
raise_no_active_grypedb,
)
# mocks anchore_engine.db.db_grype_db_feed_metadata.get_most_recent_active_grypedb
# if feed_group_metadata:
monkeypatch.setattr(
"anchore_engine.services.policy_engine.engine.policy.gate_util_provider.get_feed_group_detached",
lambda x, y: feed_group_metadata,
)
return _setup_mocks | 5,334,509 |
def ESMP_LocStreamGetBounds(locstream, localDe=0):
"""
Preconditions: An ESMP_LocStream has been created.\n
Postconditions: .\n
Arguments:\n
:RETURN: Numpy.array :: \n
:RETURN: Numpy.array :: \n
ESMP_LocStream :: locstream\n
"""
llde = ct.c_int(localDe)
# locstream rank is always one
locstreamrank = 1
exLB = np.zeros(locstreamrank, dtype=np.int32)
exUB = np.zeros(locstreamrank, dtype=np.int32)
rc = _ESMF.ESMC_LocStreamGetBounds(locstream.ptr, llde, exLB, exUB)
# adjust bounds to be 0 based
exLB = exLB - 1
if rc != constants._ESMP_SUCCESS:
raise ValueError('ESMC_LocStreamGetBounds() failed with rc = '+str(rc)+'. '+
constants._errmsg)
return exLB, exUB | 5,334,510 |
def PlotEStarRStarBasins(DataDirectory, FilenamePrefix, PlotDirectory, Sc = 0.8):
"""
Function to make an E*R* plot for a series of drainage basins.
Changing so that we calculate E* and R* in the python script following
Martin's example, so that we can test sensitivity to Sc.
Args:
DataDirectory (str): the data directory
FilenamePrefix (str): the file name prefix
PlotDirectory (str): The directory into which the plots are saved
Sc (float): The critical slope to be used in the analysis
Author: FJC
"""
# SMM: It is not clear where this file comes from
input_csv = PlotDirectory+FilenamePrefix+'_basin_hillslope_data.csv'
df = pd.read_csv(input_csv)
# set up the figure
fig, ax = plt.subplots(nrows=1, ncols=1, sharex=True, figsize=(5,5))
PlotEStarRStarTheoretical()
#choose colormap
ColourMap = cm.viridis
# get the basins
basins = df['basin_keys'].unique()
NoBasins = len(basins)
MinMChi = df.mchi_median.min()
MaxMChi = df.mchi_median.max()
for basin_key in basins:
Data = CalculateEStarRStar(DataDirectory,FilenamePrefix,basin_key, Sc=Sc)
# colour code by basin number
#colour = float(basin_key)/float(NoBasins)
colour = df.mchi_median[df.basin_keys == basin_key].values[0]
EStarMedian = Data.EStar.median()
RStarMedian = Data.RStar.median()
EStar_lower_err = np.percentile(Data.EStar.as_matrix(), 25)
EStar_upper_err = np.percentile(Data.EStar.as_matrix(), 75)
RStar_lower_err = np.percentile(Data.RStar.as_matrix(), 25)
RStar_upper_err = np.percentile(Data.RStar.as_matrix(), 75)
cNorm = colors.Normalize(vmin=MinMChi, vmax=MaxMChi)
plt.cm.ScalarMappable(norm=cNorm, cmap=ColourMap)
# plot the rstar vs estar
sc = ax.scatter(EStarMedian,RStarMedian,c=colour,s=50, edgecolors='k', zorder=100, norm=cNorm)
ax.errorbar(EStarMedian,RStarMedian,xerr=[[EStarMedian-EStar_lower_err],[EStar_upper_err-EStarMedian]], yerr=[[RStarMedian-RStar_lower_err],[RStar_upper_err-RStarMedian]],fmt='o', zorder=1, ecolor='0.5',markersize=1,mfc='white',mec='k')
# Finalise the figure
plt.xlabel('$E^*={{-2\:C_{HT}\:L_H}/{S_C}}$')
plt.ylabel('$R^*=S/S_C$')
plt.xlim(0.1,20)
plt.ylim(0.05,1)
# add colour bar
cbar = plt.colorbar(sc,cmap=ColourMap)
colorbarlabel='Basin ID'
cbar.set_label(colorbarlabel, fontsize=10)
# tick_locator = ticker.MaxNLocator(nbins=5)
# cbar.locator = tick_locator
# cbar.update_ticks()
#save output
plt.savefig(PlotDirectory+FilenamePrefix +"_estar_vs_rstar{}.png".format(Sc), dpi=300)
plt.clf() | 5,334,511 |
def reverse(collection):
"""
Reverses a collection.
Args:
collection: `dict|list|depset` - The collection to reverse
Returns:
`dict|list|depset` - A new collection of the same type, with items in the reverse order
of the input collection.
"""
forward_list = None
collection_type = type(collection)
if collection_type == "dict":
forward_list = collection.items()
elif collection_type == "list":
forward_list = collection
elif collection_type == "depset":
forward_list = collection.to_list()
else:
fail("Unsupported collection type: " + collection_type)
reverse_list = []
for value in forward_list:
reverse_list.insert(0, value)
ret = None
if collection_type == "dict":
ret = dict(reverse_list)
elif collection_type == "list":
ret = reverse_list
elif collection_type == "depset":
ret = depset(reverse_list)
else:
fail("Unsupported collection type: " + collection_type)
return ret | 5,334,512 |
def A_fast_full5(S, phase_factors, r, r_min, MY, MX):
""" Fastest version, takes precomputed phase factors, assumes S-matrix with beam tilt included
:param S: B x NY x NX
:param phase_factors: K x B
:param r: K x 2
:param out: K x MY x MX
:return: exit waves in out
"""
B = S.shape[0]
K, _ = r.shape
out = th.zeros((K, B, MY, MX), dtype=th.complex64, device=S.device)
K, B, MY, MX = out.shape
gpu = cuda.get_current_device()
stream = th.cuda.current_stream().cuda_stream
threadsperblock = gpu.MAX_THREADS_PER_BLOCK // 2
blockspergrid = m.ceil(np.prod(np.array((K, B, MY, MX))) / threadsperblock)
# 1 - get crops from S-matrix
split_kernel4[blockspergrid, threadsperblock, stream](th.view_as_real(S), r, th.view_as_real(out))
# threadsperblock = 128 # gpu.MAX_THREADS_PER_BLOCK
# blockspergrid = m.ceil(np.prod(np.array((K, B))) / threadsperblock)
# # 1 - get crops from S-matrix
# split_kernel2[blockspergrid, threadsperblock, stream](th.view_as_real(S), r, out)
out = out.view((K, B, MY * MX))
# 1.5 - convert to cupy
# 2 - complex batched matmul: K x 1 x B @ K x B x MY*MX --> K x 1 x MY * MX
# print(out.shape)
# print(phase_factors2.shape)
# print(out.dtype)
# print(phase_factors2.dtype)
phase_factors2 = phase_factors.unsqueeze(1)
exitwaves = phase_factors2 @ out
# 3 - reshape
exitwaves = exitwaves.view((K, MY, MX))
#4 convert to pytorch
return exitwaves | 5,334,513 |
def get_metadata_for_druid(druid, redownload_mods):
"""Obtains a .mods metadata file for the roll specified by DRUID either
from the local mods/ folder or the Stanford Digital Repository, then
parses the XML to build the metadata dictionary for the roll.
"""
def get_value_by_xpath(xpath):
try:
return xml_tree.xpath(
xpath,
namespaces=NS,
)[0]
except IndexError:
return None
# Takes an array of potential xpaths, returns the first one that matches,
# or None
def get_value_by_xpaths(xpaths):
for xpath in xpaths:
value = get_value_by_xpath(xpath)
if value is not None:
return value
return value
mods_filepath = Path(f"input/mods/{druid}.mods")
if not mods_filepath.exists() or redownload_mods:
response = requests.get(f"{PURL_BASE}{druid}.mods")
try:
xml_tree = etree.fromstring(response.content)
except etree.XMLSyntaxError:
logging.error(
f"Unable to parse MODS metadata for {druid} - record is likely missing."
)
return None
with mods_filepath.open("w") as _fh:
_fh.write(etree.tostring(xml_tree, encoding="unicode", pretty_print=True))
else:
xml_tree = etree.parse(mods_filepath.open())
# The representation of the roll type in the MODS metadata continues to
# evolve. Hopefully this logic covers all cases.
roll_type = "NA"
type_note = get_value_by_xpath(
"x:physicalDescription/x:note[@displayLabel='Roll type']/text()"
)
scale_note = get_value_by_xpath(
"x:physicalDescription/x:note[@displayLabel='Scale']/text()"
)
if type_note is not None and type_note in ROLL_TYPES:
roll_type = ROLL_TYPES[type_note]
if (
scale_note is not None
and scale_note in ROLL_TYPES
and (roll_type == "NA" or type_note == "standard")
):
roll_type = ROLL_TYPES[scale_note]
if roll_type == "NA" or type_note == "standard":
for note in xml_tree.xpath("(x:note)", namespaces=NS):
if note is not None and note.text in ROLL_TYPES:
roll_type = ROLL_TYPES[note.text]
metadata = {
"title_prefix": get_value_by_xpath(
"(x:titleInfo[@usage='primary']/x:nonSort)[1]/text()"
),
"title": get_value_by_xpath(
"(x:titleInfo[@usage='primary']/x:title)[1]/text()"
),
"title_part_number": get_value_by_xpath(
"(x:titleInfo[@usage='primary']/x:partNumber)[1]/text()"
),
"title_part_name": get_value_by_xpath(
"(x:titleInfo[@usage='primary']/x:partName)[1]/text()"
),
"subtitle": get_value_by_xpath("(x:titleInfo/x:subTitle)[1]/text()"),
"composer": get_value_by_xpaths(
[
"x:name[descendant::x:roleTerm[text()='composer']]/x:namePart[not(@type='date')]/text()",
"x:name[descendant::x:roleTerm[text()='Composer']]/x:namePart[not(@type='date')]/text()",
"x:name[descendant::x:roleTerm[text()='composer.']]/x:namePart[not(@type='date')]/text()",
"x:name[descendant::x:roleTerm[text()='cmp']]/x:namePart[not(@type='date')]/text()",
]
),
"performer": get_value_by_xpaths(
[
"x:name[descendant::x:roleTerm[text()='instrumentalist']]/x:namePart[not(@type='date')]/text()",
"x:name[descendant::x:roleTerm[text()='instrumentalist.']]/x:namePart[not(@type='date')]/text()",
]
),
"arranger": get_value_by_xpaths(
[
"x:name[descendant::x:roleTerm[text()='arranger of music']]/x:namePart[not(@type='date')]/text()",
"x:name[descendant::x:roleTerm[text()='arranger']]/x:namePart[not(@type='date')]/text()",
]
),
"original_composer": get_value_by_xpaths(
[
"x:relatedItem[@displayLabel='Based on (work) :']/x:name[@type='personal']/x:namePart[not(@type='date')]/text()",
"x:relatedItem[@displayLabel='Based on']/x:name[@type='personal']/x:namePart[not(@type='date')]/text()",
"x:relatedItem[@displayLabele='Adaptation of (work) :']/x:name[@type='personal']/x:namePart[not(@type='date')]/text()",
"x:relatedItem[@displayLabel='Adaptation of']/x:name[@type='personal']/x:namePart[not(@type='date')]/text()",
"x:relatedItem[@displayLabel='Arrangement of :']/x:name[@type='personal']/x:namePart[not(@type='date')]/text()",
"x:relatedItem[@displayLabel='Arrangement of']/x:name[@type='personal']/x:namePart[not(@type='date')]/text()",
]
),
"label": get_value_by_xpaths(
[
"x:identifier[@type='issue number' and @displayLabel='Roll number']/text()",
"x:identifier[@type='issue number']/text()",
]
),
"publisher": get_value_by_xpaths(
[
"x:identifier[@type='publisher']/text()",
"x:originInfo[@eventType='publication']/x:publisher/text()",
"x:name[@type='corporate']/x:nameType/text()",
"x:name[descendant::x:roleTerm[text()='publisher.']]/x:namePart/text()",
]
),
"number": get_value_by_xpath("x:identifier[@type='publisher number']/text()"),
"publish_date": get_value_by_xpaths(
[
"x:originInfo[@eventType='publication']/x:dateIssued[@keyDate='yes']/text()",
"x:originInfo[@eventType='publication']/x:dateIssued/text()",
"x:originInfo/x:dateIssued[@point='start']/text()",
"x:originInfo[@displayLabel='publisher']/x:dateIssued/text()",
]
),
"publish_place": get_value_by_xpaths(
[
"x:originInfo[@eventType='publication']/x:place/x:placeTerm[@type='text']/text()",
"x:originInfo[@displayLabel='publisher']/x:place/x:placeTerm/text()",
]
),
"recording_date": get_value_by_xpaths(
[
"x:note[@type='venue']/text()",
"x:originInfo[@eventType='publication']/x:dateCaptured/text()",
]
),
# The call number is not consistently available in all MODS variants
# "call_number": get_value_by_xpath("x:location/x:shelfLocator/text()"),
"type": roll_type,
"PURL": PURL_BASE + druid,
}
return metadata | 5,334,514 |
def logistic_dataset_gen_data(num, w, dim, temp, rng_key):
"""Samples data from a standard Gaussian with binary noisy labels.
Args:
num: An integer denoting the number of data points.
w: An array of size dim x odim, the weight vector used to generate labels.
dim: An integer denoting the number of input dimensions.
temp: A float denoting the temperature parameter controlling label noise.
rng_key: JAX random number generator key.
Returns:
x: An array of size dim x num denoting data points.
y_pm: An array of size num x odim denoting +/-1 labels.
"""
rng_subkey = jax.random.split(rng_key, 3)
x = jax.random.normal(rng_subkey[0], (dim, num))
prob = jax.nn.sigmoid(-(1 / temp) * w.T.dot(x))
y = jax.random.bernoulli(rng_subkey[1], (prob))
y_pm = 2. * y - 1
return x, y_pm | 5,334,515 |
def sech(x):
"""Computes the hyperbolic secant of the input"""
return 1 / cosh(x) | 5,334,516 |
def generate_output_files(variants, output_consequences, output_dataframe):
"""Postprocess and output final tables."""
# Rearrange order of dataframe columns
variants = variants[
['Name', 'RCVaccession', 'GeneSymbol', 'HGNC_ID',
'RepeatUnitLength', 'CoordinateSpan', 'IsProteinHGVS', 'TranscriptID',
'EnsemblGeneID', 'EnsemblGeneName', 'EnsemblChromosomeName', 'GeneAnnotationSource',
'RepeatType', 'RecordIsComplete']
]
# Write the full dataframe. This is used for debugging and investigation purposes.
variants.sort_values(by=['Name', 'RCVaccession', 'GeneSymbol'])
variants.to_csv(output_dataframe, sep='\t', index=False)
# Generate consequences table
consequences = variants[variants['RecordIsComplete']] \
.groupby(['RCVaccession', 'EnsemblGeneID', 'EnsemblGeneName'])['RepeatType'] \
.apply(set).reset_index(name='RepeatType')
if consequences.empty:
logger.info('There are no records ready for output')
return
# Check that for every (RCV, gene) pair there is only one consequence type
assert consequences['RepeatType'].str.len().dropna().max() == 1, 'Multiple (RCV, gene) → variant type mappings!'
# Get rid of sets
consequences['RepeatType'] = consequences['RepeatType'].apply(list)
consequences = consequences.explode('RepeatType')
# Form a six-column file compatible with the consequence mapping pipeline, for example:
# RCV000005966 1 ENSG00000156475 PPP2R2B trinucleotide_repeat_expansion 0
consequences['PlaceholderOnes'] = 1
consequences['PlaceholderZeroes'] = 0
consequences = consequences[['RCVaccession', 'PlaceholderOnes', 'EnsemblGeneID', 'EnsemblGeneName', 'RepeatType',
'PlaceholderZeroes']]
consequences.sort_values(by=['RepeatType', 'RCVaccession', 'EnsemblGeneID'], inplace=True)
# Check that there are no empty cells in the final consequences table
assert consequences.isnull().to_numpy().sum() == 0
# Write the consequences table. This is used by the main evidence string generation pipeline.
consequences.to_csv(output_consequences, sep='\t', index=False, header=False)
# Output statistics
logger.info(f'Generated {len(consequences)} consequences in total:')
logger.info(f' {sum(consequences.RepeatType == "trinucleotide_repeat_expansion")} trinucleotide repeat expansion')
logger.info(f' {sum(consequences.RepeatType == "short_tandem_repeat_expansion")} short tandem repeat expansion') | 5,334,517 |
def main(country='Spain',
randomized=True,
num_points=2,
date_begin='2018-01-01', date_end='2018-01-02',
hourly_data=True,
api_key='953235aa4e74fcb593bd59c2b548d03a',
specific_coordinates=[],
interval=6):
"""
Generates hourly or daily weather data for a given country over
specific time period. Data sourced from visualcrossing
Function will generate either random weather data over country or will
generate weather data at a given point set of points chosen by the user.
Parameters:
country (str): country of interest
randomized (bool): specify whether user desires randomly placed points
within country or specific location
num_points (int): number of points over country which are desired
date_begin (str): beginning date YYYY-MM-DD
date_end (str): end date YYYY-MM-DD
daily_data (bool): daily (True) or hourly (False) data
specific_coordinates (list):
list of lists of floats [[lat1, long1], [lat2, long2], ....]
at which you would like to extract the weather
Returns:
saves weather data
"""
# Generate 'num_points' randomly sampled coordinates in country
if randomized:
coordinates = generate_coordinates(country=country,
num_points=num_points)
else: # use specified coordinates
coordinates = specific_coordinates
plot_points(coordinates) # Plot points on map for reference
for coordinate in coordinates:
latitude, longitude = coordinate
json_conditions = get_historical_conditions(
latitude=latitude,
longitude=longitude,
date_begin=date_begin,
date_end=date_end,
api_key=api_key,
hourly_data=hourly_data,
interval=interval)
clean_conditions(json_conditions,
hourly_data=hourly_data
).to_csv(
'{}_{}_weather_data_@_{:.2f}_{:.2f}.csv'.format(
country,
'hourly' if hourly_data else 'daily',
latitude,
longitude))
# location_data_list.append(clean_conditions(json_conditions)) | 5,334,518 |
def _map_triples_elements_to_ids(
triples: LabeledTriples,
entity_to_id: EntityMapping,
relation_to_id: RelationMapping,
) -> MappedTriples:
"""Map entities and relations to pre-defined ids."""
if triples.size == 0:
logger.warning('Provided empty triples to map.')
return torch.empty(0, 3, dtype=torch.long)
heads, relations, tails = slice_triples(triples)
# When triples that don't exist are trying to be mapped, they get the id "-1"
entity_getter = np.vectorize(entity_to_id.get)
head_column = entity_getter(heads, [-1])
tail_column = entity_getter(tails, [-1])
relation_getter = np.vectorize(relation_to_id.get)
relation_column = relation_getter(relations, [-1])
# Filter all non-existent triples
head_filter = head_column < 0
relation_filter = relation_column < 0
tail_filter = tail_column < 0
num_no_head = head_filter.sum()
num_no_relation = relation_filter.sum()
num_no_tail = tail_filter.sum()
if (num_no_head > 0) or (num_no_relation > 0) or (num_no_tail > 0):
logger.warning(
f"You're trying to map triples with {num_no_head + num_no_tail} entities and {num_no_relation} relations"
f" that are not in the training set. These triples will be excluded from the mapping.",
)
non_mappable_triples = (head_filter | relation_filter | tail_filter)
head_column = head_column[~non_mappable_triples, None]
relation_column = relation_column[~non_mappable_triples, None]
tail_column = tail_column[~non_mappable_triples, None]
logger.warning(
f"In total {non_mappable_triples.sum():.0f} from {triples.shape[0]:.0f} triples were filtered out",
)
triples_of_ids = np.concatenate([head_column, relation_column, tail_column], axis=1)
triples_of_ids = np.array(triples_of_ids, dtype=np.long)
# Note: Unique changes the order of the triples
# Note: Using unique means implicit balancing of training samples
unique_mapped_triples = np.unique(ar=triples_of_ids, axis=0)
return torch.tensor(unique_mapped_triples, dtype=torch.long) | 5,334,519 |
def pinf_two_networks(grgd: Tuple[float, float],
k: Tuple[float, float] = (3, 3),
alpha_i: Tuple[float, float] = (1, 1),
solpoints: int = 10,
eps: float = 1e-5,
method: str = "hybr"):
"""Find the fixed points for two recovery coupled ER networks (not-symmetric)
Args:
grgd (Tuple[float, float]): gamma_r / gamma_d ratio in each network
k (Tuple[float, float], optional): avg degree in each network. Defaults to (3, 3).
alpha_i (Tuple[float, float], optional): coupling strength in each network. Defaults to (1, 1).
solpoints (int, optional): number of guesses to feed solver. Defaults to 10.
eps (float, optional): precision of solution. Defaults to 1e-5.
method (str, optional): method to pass to `root`. Defaults to "hybr".
Returns:
List[np.ndarray]: a list of all solutions found
"""
g = list(map(u_factory, k))
mu = lambda p: (1 - alpha_i[0] * g[0]
(1 - p)), lambda p: (1 - alpha_i[1] * g[1](1 - p))
def two_networks_self_consistent(f1f2):
cond1 = 1 / (1 + (grgd[0] * mu[1](f1f2[1]))) - f1f2[0]
cond2 = 1 / (1 + (grgd[1] * mu[0](f1f2[0]))) - f1f2[1]
return np.array([cond1, cond2], dtype=float).squeeze()
return get_all_sols_two_networks(
two_networks_self_consistent,
eps=eps,
method=method,
solpoints=solpoints,
) | 5,334,520 |
def uncapped_flatprice_goal_reached(chain, uncapped_flatprice, uncapped_flatprice_finalizer, preico_funding_goal, preico_starts_at, customer) -> Contract:
"""A ICO contract where the minimum funding goal has been reached."""
time_travel(chain, preico_starts_at + 1)
wei_value = preico_funding_goal
uncapped_flatprice.functions.buy().transact({"from": customer, "value": wei_value})
return uncapped_flatprice | 5,334,521 |
def depfile_name(request, tmp_path_factory):
"""A fixture for a temporary doit database file(s) that will be removed after running"""
depfile_name = str(tmp_path_factory.mktemp('x', True) / 'testdb')
def remove_depfile():
remove_db(depfile_name)
request.addfinalizer(remove_depfile)
return depfile_name | 5,334,522 |
def _convert_v3_response_to_v2(pbx_name, termtype, command, v3_response):
"""
Convert the v3 response to the legacy v2 xml format.
"""
logger.debug(v3_response)
obj = {
'command': {'@cmd': command, '@cmdType': termtype, '@pbxName': pbx_name}
}
if v3_response.get('error') is not None:
obj['command']['error'] = 'ERROR: {}'.format(v3_response['error'])
elif v3_response.get('screens') is not None:
screens = []
for i, screen in enumerate(v3_response['screens']):
screens.append(OrderedDict([('@page', i + 1), ('#text', screen)]))
obj['command']['screen'] = screens
elif v3_response.get('ossi_objects') is not None:
ossi_objects = []
for i, o in enumerate(v3_response['ossi_objects']):
fields = []
for field in o:
fields.append(OrderedDict([('@fid', field), ('#text', o[field])]))
od = OrderedDict([('@i', i + 1), ('field', fields)])
ossi_objects.append(od)
if len(ossi_objects) == 0:
ossi_objects = {}
obj['command']['ossi_object'] = ossi_objects
logger.debug(obj)
xml = xmltodict.unparse(obj, pretty=True, indent=' ')
return xml | 5,334,523 |
def calculate(dbf, comps, phases, mode=None, output='GM', fake_points=False, broadcast=True, parameters=None, **kwargs):
"""
Sample the property surface of 'output' containing the specified
components and phases. Model parameters are taken from 'dbf' and any
state variables (T, P, etc.) can be specified as keyword arguments.
Parameters
----------
dbf : Database
Thermodynamic database containing the relevant parameters.
comps : str or sequence
Names of components to consider in the calculation.
phases : str or sequence
Names of phases to consider in the calculation.
mode : string, optional
See 'make_callable' docstring for details.
output : string, optional
Model attribute to sample.
fake_points : bool, optional (Default: False)
If True, the first few points of the output surface will be fictitious
points used to define an equilibrium hyperplane guaranteed to be above
all the other points. This is used for convex hull computations.
broadcast : bool, optional
If True, broadcast given state variable lists against each other to create a grid.
If False, assume state variables are given as equal-length lists.
points : ndarray or a dict of phase names to ndarray, optional
Columns of ndarrays must be internal degrees of freedom (site fractions), sorted.
If this is not specified, points will be generated automatically.
pdens : int, a dict of phase names to int, or a seq of both, optional
Number of points to sample per degree of freedom.
Default: 2000; Default when called from equilibrium(): 500
model : Model, a dict of phase names to Model, or a seq of both, optional
Model class to use for each phase.
sampler : callable, a dict of phase names to callable, or a seq of both, optional
Function to sample phase constitution space.
Must have same signature as 'pycalphad.core.utils.point_sample'
grid_points : bool, a dict of phase names to bool, or a seq of both, optional (Default: True)
Whether to add evenly spaced points between end-members.
The density of points is determined by 'pdens'
parameters : dict, optional
Maps SymPy Symbol to numbers, for overriding the values of parameters in the Database.
Returns
-------
Dataset of the sampled attribute as a function of state variables
Examples
--------
None yet.
"""
# Here we check for any keyword arguments that are special, i.e.,
# there may be keyword arguments that aren't state variables
pdens_dict = unpack_kwarg(kwargs.pop('pdens', 2000), default_arg=2000)
points_dict = unpack_kwarg(kwargs.pop('points', None), default_arg=None)
model_dict = unpack_kwarg(kwargs.pop('model', FallbackModel), default_arg=FallbackModel)
callable_dict = unpack_kwarg(kwargs.pop('callables', None), default_arg=None)
sampler_dict = unpack_kwarg(kwargs.pop('sampler', None), default_arg=None)
fixedgrid_dict = unpack_kwarg(kwargs.pop('grid_points', True), default_arg=True)
parameters = parameters or dict()
if isinstance(parameters, dict):
parameters = OrderedDict(sorted(parameters.items(), key=str))
param_symbols = tuple(parameters.keys())
param_values = np.atleast_1d(np.array(list(parameters.values()), dtype=np.float))
if isinstance(phases, str):
phases = [phases]
if isinstance(comps, str):
comps = [comps]
if points_dict is None and broadcast is False:
raise ValueError('The \'points\' keyword argument must be specified if broadcast=False is also given.')
components = [x for x in sorted(comps) if not x.startswith('VA')]
# Convert keyword strings to proper state variable objects
# If we don't do this, sympy will get confused during substitution
statevar_dict = collections.OrderedDict((v.StateVariable(key), unpack_condition(value)) \
for (key, value) in sorted(kwargs.items()))
# XXX: CompiledModel assumes P, T are the only state variables
if statevar_dict.get(v.P, None) is None:
statevar_dict[v.P] = 101325
if statevar_dict.get(v.T, None) is None:
statevar_dict[v.T] = 300
str_statevar_dict = collections.OrderedDict((str(key), unpack_condition(value)) \
for (key, value) in statevar_dict.items())
all_phase_data = []
comp_sets = {}
largest_energy = 1e30
maximum_internal_dof = 0
# Consider only the active phases
active_phases = dict((name.upper(), dbf.phases[name.upper()]) \
for name in unpack_phases(phases))
for phase_name, phase_obj in sorted(active_phases.items()):
# Build the symbolic representation of the energy
mod = model_dict[phase_name]
# if this is an object type, we need to construct it
if isinstance(mod, type):
try:
model_dict[phase_name] = mod = mod(dbf, comps, phase_name, parameters=parameters)
except DofError:
# we can't build the specified phase because the
# specified components aren't found in every sublattice
# we'll just skip it
warnings.warn("""Suspending specified phase {} due to
some sublattices containing only unspecified components""".format(phase_name))
continue
if points_dict[phase_name] is None:
maximum_internal_dof = max(maximum_internal_dof, sum(len(x) for x in mod.constituents))
else:
maximum_internal_dof = max(maximum_internal_dof, np.asarray(points_dict[phase_name]).shape[-1])
for phase_name, phase_obj in sorted(active_phases.items()):
try:
mod = model_dict[phase_name]
except KeyError:
continue
# this is a phase model we couldn't construct for whatever reason; skip it
if isinstance(mod, type):
continue
if (not isinstance(mod, CompiledModel)) or (output != 'GM'):
if isinstance(mod, CompiledModel):
mod = Model(dbf, comps, phase_name, parameters=parameters)
# Construct an ordered list of the variables
variables, sublattice_dof = generate_dof(phase_obj, mod.components)
# Build the "fast" representation of that model
if callable_dict[phase_name] is None:
try:
out = getattr(mod, output)
except AttributeError:
raise AttributeError('Missing Model attribute {0} specified for {1}'
.format(output, mod.__class__))
# As a last resort, treat undefined symbols as zero
# But warn the user when we do this
# This is consistent with TC's behavior
undefs = list(out.atoms(Symbol) - out.atoms(v.StateVariable))
for undef in undefs:
out = out.xreplace({undef: float(0)})
warnings.warn('Setting undefined symbol {0} for phase {1} to zero'.format(undef, phase_name))
comp_sets[phase_name] = build_functions(out, list(statevar_dict.keys()) + variables,
include_obj=True, include_grad=False, include_hess=False,
parameters=param_symbols)
else:
comp_sets[phase_name] = callable_dict[phase_name]
phase_record = PhaseRecord_from_cython(comps, list(statevar_dict.keys()) + variables,
np.array(dbf.phases[phase_name].sublattices, dtype=np.float),
param_values, comp_sets[phase_name], None, None)
else:
variables = sorted(set(mod.variables) - {v.T, v.P}, key=str)
sublattice_dof = mod.sublattice_dof
phase_record = PhaseRecord_from_compiledmodel(mod, param_values)
points = points_dict[phase_name]
if points is None:
points = _sample_phase_constitution(phase_name, phase_obj.constituents, sublattice_dof, comps,
tuple(variables), sampler_dict[phase_name] or point_sample,
fixedgrid_dict[phase_name], pdens_dict[phase_name])
points = np.atleast_2d(points)
fp = fake_points and (phase_name == sorted(active_phases.keys())[0])
phase_ds = _compute_phase_values(phase_obj, components, variables, str_statevar_dict,
points, phase_record, output,
maximum_internal_dof, broadcast=broadcast,
largest_energy=float(largest_energy), fake_points=fp)
all_phase_data.append(phase_ds)
# speedup for single-phase case (found by profiling)
if len(all_phase_data) > 1:
final_ds = _fast_concat(all_phase_data, dim='points')
final_ds['points'].values = np.arange(len(final_ds['points']))
final_ds.coords['points'].values = np.arange(len(final_ds['points']))
else:
final_ds = all_phase_data[0]
return final_ds | 5,334,524 |
def is_negative(value):
"""Checks if `value` is negative.
Args:
value (mixed): Value to check.
Returns:
bool: Whether `value` is negative.
Example:
>>> is_negative(-1)
True
>>> is_negative(0)
False
>>> is_negative(1)
False
.. versionadded:: 2.0.0
"""
return is_number(value) and value < 0 | 5,334,525 |
def get_optimizer_noun(lr, decay, mode, cnn_features, role_features):
""" To get the optimizer
mode 0: training from scratch
mode 1: cnn fix, verb fix, role training
mode 2: cnn fix, verb fine tune, role training
mode 3: cnn finetune, verb finetune, role training"""
if mode == 0:
set_trainable_param(cnn_features, True)
set_trainable_param(role_features, True)
optimizer = torch.optim.Adam([
{'params': cnn_features},
{'params': role_features}
], lr=lr, weight_decay=decay)
elif mode == 1:
set_trainable_param(role_features, True)
optimizer = torch.optim.Adam([
{'params': role_features}
], lr=lr, weight_decay=decay)
elif mode == 2:
set_trainable_param(role_features, True)
optimizer = torch.optim.Adam([
{'params': role_features}],
lr=1e-3)
elif mode == 3:
set_trainable_param(cnn_features, True)
set_trainable_param(role_features, True)
optimizer = torch.optim.Adam([
{'params': cnn_features},
{'params': role_features}
], lr=lr, weight_decay=decay)
return optimizer | 5,334,526 |
def lan_manifold(
parameter_df=None,
vary_dict={"v": [-1.0, -0.75, -0.5, -0.25, 0, 0.25, 0.5, 0.75, 1.0]},
model="ddm",
n_rt_steps=200,
max_rt=5,
fig_scale=1.0,
save=False,
show=True,
):
"""Plots lan likelihoods in a 3d-plot.
:Arguments:
parameter_df: pandas.core.frame.DataFrame <default=None>
DataFrame that holds a parameter vector and has parameter names as keys.
vary_dict: dict <default={'v': [-1.0, -0.75, -.5, -0.25, 0, 0.25, 0.5, 0.75, 1.0]}>
Dictionary where key is a valid parameter name, and value is either a list of numpy.ndarray() of values
of the respective parameter that you want to plot.
model: str <default='ddm'>
String that specifies the model to be used to plotting. (The plot loads the corresponding LAN)
n_rt_steps: int <default=200>
Numer of rt steps to include (x-axis)
max_rt: numeric <default=5.0>
The n_rt_steps argument splits the reaction time axis in to n_rt_step from 0 to max_rt.
fig_scale: numeric <default=1.0>
Basic handle to scale the figure.
save: bool <default=False>
Whether to save the plot.
show: bool <default=True>
Whether to show the plot.
:Returns:
empty
"""
# mpl.rcParams.update(mpl.rcParamsDefault)
# mpl.rcParams['text.usetex'] = True
# #matplotlib.rcParams['pdf.fonttype'] = 42
# mpl.rcParams['svg.fonttype'] = 'none'
assert (
model_config[model]["n_choices"] == 2
), "This plot works only for 2-choice models at the moment. Improvements coming!"
if parameter_df.shape[0] > 0:
parameters = parameter_df.iloc[0, :]
print("Using only the first row of the supplied parameter array !")
if type(parameter_df) == pd.core.frame.DataFrame:
parameters = np.squeeze(
parameters[model_config[model]["params"]].values.astype(np.float32)
)
else:
parameters = parameter_df
# Load Keras model and initialize batch container
torch_model = get_torch_mlp(model=model)
# Prepare data structures
# Data template
plot_data = np.zeros((n_rt_steps * 2, 2))
plot_data[:, 0] = np.concatenate(
(
[(i * (max_rt / n_rt_steps)) for i in range(n_rt_steps, 0, -1)],
[(i * (max_rt / n_rt_steps)) for i in range(1, n_rt_steps + 1, 1)],
)
)
plot_data[:, 1] = np.concatenate(
(np.repeat(-1, n_rt_steps), np.repeat(1, n_rt_steps))
)
n_params = model_config[model]["n_params"]
n_levels = vary_dict[list(vary_dict.keys())[0]].shape[0]
data_var = np.zeros(((n_rt_steps * 2) * n_levels, n_params + 3))
cnt = 0
vary_param_name = list(vary_dict.keys())[0]
for par_tmp in vary_dict[vary_param_name]:
tmp_begin = (n_rt_steps * 2) * cnt
tmp_end = (n_rt_steps * 2) * (cnt + 1)
parameters[model_config[model]["params"].index(vary_param_name)] = par_tmp
data_var[tmp_begin:tmp_end, :n_params] = parameters
data_var[tmp_begin:tmp_end, n_params : (n_params + 2)] = plot_data
data_var[tmp_begin:tmp_end, (n_params + 2)] = np.squeeze(
np.exp(torch_model(data_var[tmp_begin:tmp_end, :-1].astype(np.float32)))
)
cnt += 1
fig = plt.figure(figsize=(8 * fig_scale, 5.5 * fig_scale))
ax = fig.add_subplot(111, projection="3d")
ax.plot_trisurf(
data_var[:, -2] * data_var[:, -3],
data_var[:, model_config[model]["params"].index(vary_param_name)],
data_var[:, -1],
linewidth=0.5,
alpha=1.0,
cmap=cm.coolwarm,
)
ax.set_ylabel(vary_param_name.upper().replace("_", "-"), fontsize=16, labelpad=20)
ax.set_xlabel("RT", fontsize=16, labelpad=20)
ax.set_zlabel("Likelihood", fontsize=16, labelpad=20)
ax.set_zticks(
np.round(np.linspace(min(data_var[:, -1]), max(data_var[:, -1]), 5), 1)
)
ax.set_yticks(
np.round(
np.linspace(
min(data_var[:, model_config[model]["params"].index(vary_param_name)]),
max(data_var[:, model_config[model]["params"].index(vary_param_name)]),
5,
),
1,
)
)
ax.set_xticks(
np.round(
np.linspace(
min(data_var[:, -2] * data_var[:, -3]),
max(data_var[:, -2] * data_var[:, -3]),
5,
),
1,
)
)
ax.tick_params(labelsize=16)
ax.set_title(
model.upper().replace("_", "-") + " - MLP: Manifold", fontsize=20, pad=20
)
ax.w_xaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))
ax.w_yaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))
ax.w_zaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))
# Save plot
if save:
if os.path.isdir("figures/"):
pass
else:
os.mkdir("figures/")
plt.savefig("figures/mlp_manifold_" + model + ".png", format="png")
if show:
return plt.show()
plt.close()
return | 5,334,527 |
def test_parse_optional_type_in_docstring():
"""Parse optional types in docstring."""
docstring = """
Parameters:
x (int): X value.
y (int, optional): Y value.
Keyword Args:
z (int, optional): Z value.
"""
arguments = Arguments()
arguments.add(Argument(name="x", annotation=None, kind=inspect.Parameter.POSITIONAL_ONLY, default="1"))
arguments.add(Argument(name="y", annotation=None, kind=inspect.Parameter.POSITIONAL_ONLY, default="None"))
arguments.add(Argument(name="z", annotation=None, kind=inspect.Parameter.POSITIONAL_OR_KEYWORD, default="None"))
function = Function("func", arguments=arguments, returns=None)
sections, warnings = parse(docstring, function)
assert len(sections) == 2
assert not warnings
assert sections[0].kind is DocstringSectionKind.arguments
assert sections[1].kind is DocstringSectionKind.keyword_arguments
argx, argy = sections[0].value
(argz,) = sections[1].value # noqa: WPS460
assert argx.name == "x"
assert argx.annotation == "int"
assert argx.description == "X value."
assert argx.value == "1"
assert argy.name == "y"
assert argy.annotation == "int"
assert argy.description == "Y value."
assert argy.value == "None"
assert argz.name == "z"
assert argz.annotation == "int"
assert argz.description == "Z value."
assert argz.value == "None" | 5,334,528 |
def test_gc_cmd(vim_bot, text, cmd_list, text_expected, cursor_pos):
"""Test gc command."""
_, _, editor, vim, qtbot = vim_bot
editor.set_text(text)
cmd_line = vim.get_focus_widget()
for cmd in cmd_list:
qtbot.keyClicks(cmd_line, cmd)
assert cmd_line.text() == ""
assert editor.toPlainText() == text_expected
assert editor.textCursor().position() == cursor_pos
assert vim.vim_cmd.vim_status.get_pos_start_in_selection() is None | 5,334,529 |
def test_his_write_with_args(mock):
# GIVEN
"""
Args:
mock:
"""
envs = {'HAYSTACK_PROVIDER': 'shaystack.providers.ping'}
time_serie = [
(datetime(2020, 1, 1, tzinfo=pytz.utc).isoformat() + " UTC", 100),
(datetime(2020, 1, 2, tzinfo=pytz.utc).isoformat() + " UTC", 200)]
mock.return_value = ping._PingGrid
mime_type = DEFAULT_MIME_TYPE
request = HaystackHttpRequest()
request.args['id'] = str(Ref("1234"))
request.args['ts'] = str(time_serie)
# WHEN
response = shaystack.his_write(envs, request, "dev")
# THEN
result_ts = Grid(version=VER_3_0, columns=["date", "val"])
result_ts.extend([{"date": parse_hs_datetime_format(d, pytz.UTC), "val": v} for d, v in time_serie])
mock.assert_called_once_with(Ref("1234"), result_ts, None)
assert response.status_code == 200
assert response.headers["Content-Type"].startswith(mime_type)
assert shaystack.parse(response.body, mime_type) is not None | 5,334,530 |
def _loc_str_to_pars(loc, x=None, y=None, halign=None, valign=None, pad=_PAD):
"""Convert from a string location specification to the specifying parameters.
If any of the specifying parameters: {x, y, halign, valign}, are 'None', they are set to
default values.
Returns
-------
x : float
y : float
halign : str
valign : str
"""
_valid_loc = [['t', 'u', 'b', 'l', 'c'], ['l', 'r', 'c']]
for ii, (ll, vv) in enumerate(zip(loc, _valid_loc)):
if ll not in vv:
err = "Unrecognized `loc`[{}] = '{}' (`loc` = '{}').".format(ii, ll, loc)
err += "\n\t`loc`[{}] must be one of '{}'".format(ii, vv)
raise ValueError(err)
pad = np.atleast_1d(pad)
if pad.size == 1:
pad = np.concatenate([pad, pad])
if loc[0] == 't' or loc[0] == 'u':
if valign is None:
valign = 'top'
if y is None:
y = 1 - pad[1]
elif loc[0] == 'b' or loc[0] == 'l':
if valign is None:
valign = 'bottom'
if y is None:
y = pad[1]
elif loc[0] == 'c':
if valign is None:
valign = 'center'
if y is None:
y = 0.5
if loc[1] == 'l':
if halign is None:
halign = 'left'
if x is None:
x = pad[0]
elif loc[1] == 'r':
if halign is None:
halign = 'right'
if x is None:
x = 1 - pad[0]
elif loc[1] == 'c':
if halign is None:
halign = 'center'
if x is None:
x = 0.5
return x, y, halign, valign | 5,334,531 |
def query_yes_no(question, default="no"):
"""
Ask a yes/no question via raw_input() and return their answer.
:param str question: a string that is presented to the user.
:param str default: the presumed answer if the user just hits <Enter>.
:return bool: True for "yes" or False for "no"
"""
def parse(ans):
return {"yes": True, "y": True, "ye": True, "no": False, "n": False}[
ans.lower()
]
try:
prompt = {None: "[y/n]", "yes": "[Y/n]", "no": "[y/N]"}[
None if default is None else default.lower()
]
except (AttributeError, KeyError):
raise ValueError("invalid default answer: {}".format(default))
msg = "{q} {p} ".format(q=question, p=prompt)
while True:
sys.stdout.write(msg)
try:
return parse(_read_from_user() or default)
except KeyError:
sys.stdout.write("Please respond with 'yes' or 'no' (or 'y' or 'n').\n") | 5,334,532 |
def skeda_from_skedadict(line_dict, filing_number, line_sequence, is_amended):
"""
We can either pass the header row in or not; if not, look it up.
"""
line_dict['transaction_id'] = line_dict['transaction_id'][:20]
line_dict['line_sequence'] = line_sequence
line_dict['superseded_by_amendment'] = is_amended
line_dict['filing_number'] = filing_number
if line_dict['contribution_date']:
try:
line_dict['contribution_date_formatted'] = parser.parse(line_dict['contribution_date'])
except ValueError:
pass
return line_dict | 5,334,533 |
def oracle_to_date(string2convert, fmt, nlsparam=None):
"""
https://docs.oracle.com/cd/B19306_01/server.102/b14200/functions183.htm
TO_DATE(char [, fmt [, 'nlsparam' ] ])
TO_DATE converts char of CHAR, VARCHAR2, NCHAR, or NVARCHAR2 datatype to a value of DATE datatype.
The fmt is a datetime model format specifying the format of char. If you omit fmt, then char must be in the default date format.
If fmt is J, for Julian, then char must be an integer.
On SQLite date are in iso-8601 format: 'YYYY-MM-DD HH:MM:SS'
Also, the supported format is the C standard (1989 version)
The Function is cached for performance reason
"""
dobj = datetime.datetime.strptime(string2convert, fmt)
# Return a nice Sqlite date string
return dobj.isoformat(sep=" ", timespec="seconds") | 5,334,534 |
def replace_whitespace(s, rep=' '):
"""Replace any length white spaces in the given string with a replacement.
Parameters
----------
s : str
The string in which any length whitespaces should be replaced.
rep : Optional[str]
The string with which all whitespace should be replaced. By default,
the plain ASCII space ( ) is used.
Returns
-------
str
The string in which whitespaces have been replaced.
"""
s = re.sub(r'\s+', rep, s)
return s | 5,334,535 |
def add_s3_prefix(s3_bucket):
"""
Ensure a bucket has the s3:// prefix
:param s3_bucket: string - The bucket name
"""
s3_prefix = 's3://'
return prefix(s3_bucket, s3_prefix) | 5,334,536 |
def main(args=None):
"""
:param args:
:return:
"""
args = sys.argv[1:] if args is None else args
parser = argparse.ArgumentParser()
parser.add_argument('emg_path',
nargs='*',
default=sys.stdin,
help="The path to '.emt' file or a directory containing '.emt' files.")
parser.add_argument('--by-track',
action='store_true',
default=False,
help='normalize track by track instead of all tracks together')
parser.add_argument('--min',
type=float,
help='the min value to use for the normalization '
'(default use the min value of the matrix or column)')
parser.add_argument('--max',
type=float,
help='the max value to use for the normalization '
'(default use the max value of the matrix or column)')
parser.add_argument('--dyn-cal',
help='The path to the file to use for the dynamic calibration.')
parser.add_argument('--version',
action=argparse_utils.VersionAction,
version=get_version_message(),
help='Display version and exit.')
parser.add_argument('-v', '--verbosity',
action='count',
default=0,
help="Set the output verbosity. can be set several times -vv for instance.")
args = parser.parse_args(args)
args.verbosity = max(10, 30 - (10 * args.verbosity))
emg_analyzer.logger_set_level(args.verbosity)
_log = colorlog.getLogger('emg_analyzer')
if not isinstance(args.emg_path, list):
# args must be read from stdin
if sys.stdin.isatty():
# stdin is empty
msg = ''
_log.error(msg)
parser.print_help()
sys.exit(msg)
else:
args.emg_path = [p.strip() for p in args.emg_path.readlines()]
if not args.emg_path:
parser.print_help()
sys.exit(1)
norm_method = 'norm_by_track' if args.by_track or args.dyn_cal else 'norm'
_log.debug("morm_method = '{}'".format(norm_method))
options = {}
if args.min is not None:
options['min'] = args.min
if args.max is not None:
options['max'] = args.max
if args.dyn_cal:
_log.info("Loading dynamic calibration file '{}'".format(args.dyn_cal))
dyn_cal = pd.read_table(args.dyn_cal, comment='#', index_col=0)
dyn_cal = dyn_cal.T[['min', 'max']].T
print(dyn_cal)
options['dyn_cal'] = dyn_cal
for path in args.emg_path:
path = path.strip()
if os.path.isdir(path):
processed = process_dir(path,
norm_method,
method_args=tuple(),
method_kwargs=options,
suffix='norm'
)
else:
processed = process_one_emt_file(path,
norm_method,
method_args=tuple(),
method_kwargs=options,
suffix='norm'
)
print(processed) | 5,334,537 |
def test_provider_system_hook_block_loop_empty(change_dir):
"""Verify the hook call works properly."""
output = tackle('.', context_file='loop_empty.yaml', no_input=True)
assert len(output['empty']) == 0 | 5,334,538 |
def test_tree_intersection_third(balanced_bst):
"""Test tree intersection."""
assert tree_intersection(balanced_bst, BST([10, 12, 14, 16])) == {10, 12, 16} | 5,334,539 |
async def handle_xml_response(request):
""" Faking response """
response = load_data("equipment_data.xml")
return aiohttp.web.Response(
content_type="text/xml",
body=response
) | 5,334,540 |
def plugin_prefs(parent, cmdr, is_beta):
"""
Return a TK Frame for adding to the EDMC settings dialog.
"""
global listbox
frame = nb.Frame(parent)
nb.Label(frame, text="Faction Name:").grid(row=0,column=0)
nb.Label(frame, text="System Name").grid(row=0,column=1)
faction_entry = nb.Entry(frame,width=35)
faction_entry.grid(row=2,column=0)
faction_listbox = tk.Listbox(frame,width=35)
faction_listbox.grid(row=3,column=0)
this.faction_el = entry_lookup.EntryLookup(faction_entry,faction_listbox, db_connection.get_faction_names(),this.faction_name.get())
system_entry = nb.Entry(frame,width=35)
system_entry.grid(row=2,column=1)
system_listbox = tk.Listbox(frame,width=35)
system_listbox.grid(row=3,column=1)
this.system_el = entry_lookup.EntryLookup(system_entry,system_listbox, db_connection.get_system_names(),this.system_name.get())
b = nb.Button(frame, text="Scrape history", command=scrape_history)
b.grid(row=4, column=1)
nb.Label(frame,text="Warning, this will take a while. Shut down ED before running").grid(row=4,column=0)
return frame | 5,334,541 |
def test_runner_handle_general_exception_in_module_setup(with_dec_classpath, local_config, tmpdir, mock_pm):
"""
Check that if we got exception in the module setup no one test executed.
:return:
"""
import pytest
var_dir = _ensure_var_dir(tmpdir)
xunit_file = _ensure_xunit_file_empty(var_dir)
suite_var_dir = str(var_dir.mkdir('suite-mock'))
config_path = str(var_dir.join('config.yaml'))
config = deepcopy(local_config)
config.update({
'artifacts': {},
'suite_var_dir': suite_var_dir,
'suite_dir': join(dirname(__file__), 'res', 'decorators', 'suites'),
'remote': {
'suite_var_dir': suite_var_dir,
},
'config_path': config_path,
})
ssh_pool = LocalPool(local_config['ssh'])
modules = {
'mock3.mock_test_module_with_exceptions_in_setup': {
'path': '%s/mock3/mock_test_module_with_exceptions_in_setup.py' % config['suite_dir'],
'module_short_name': 'mock_test_module_with_general_exceptions_in_setup',
}
}
tr = TidenRunner(config, modules=modules, ssh_pool=ssh_pool, plugin_manager=mock_pm, xunit_path=xunit_file)
with pytest.raises(SystemExit):
tr.process_tests()
res = tr.get_tests_results()
_tests = res.get_tests()
print(_tests)
assert len(_tests) == 0
for status in res.statuses:
assert res.get_tests_num(status) == 0 | 5,334,542 |
def Conv1D_positive_r(x, kernel_size):
"""index of r is hard-coded to 2!"""
out1 = Conv1D(1, kernel_size=kernel_size, padding='valid', activation='linear')(x)
out2 = Conv1D(1, kernel_size=kernel_size, padding='valid', activation='linear')(x)
out3 = Conv1D(1, kernel_size=kernel_size, padding='valid', activation='relu')(x)
return Concatenate()([out1, out2, out3]) | 5,334,543 |
def remove_innermost_template_usage(raw_code: str) -> str:
"""
If the code does not include templates, should return the exact same code
FIXME: check if any task is templated
"""
_temp_code = raw_code
template_types = get_all_template_types(raw_code)
_temp_code = replace_template_type(_temp_code, template_types)
_temp_code = add_fake_template_types_def(_temp_code, template_types)
_temp_code = replace_template_types(_temp_code, template_types)
return _temp_code | 5,334,544 |
def decrypt(text, key):
"""Decrypt the supplied text and return the result.
Args:
text (str): The text to decrypt.
key (str): The key with which to perform the decryption.
"""
return transform(text, key, True) | 5,334,545 |
def modify_replication_task(ReplicationTaskArn=None, ReplicationTaskIdentifier=None, MigrationType=None, TableMappings=None, ReplicationTaskSettings=None, CdcStartTime=None, CdcStartPosition=None, CdcStopPosition=None, TaskData=None):
"""
Modifies the specified replication task.
You can\'t modify the task endpoints. The task must be stopped before you can modify it.
For more information about AWS DMS tasks, see Working with Migration Tasks in the AWS Database Migration Service User Guide .
See also: AWS API Documentation
Exceptions
:example: response = client.modify_replication_task(
ReplicationTaskArn='string',
ReplicationTaskIdentifier='string',
MigrationType='full-load'|'cdc'|'full-load-and-cdc',
TableMappings='string',
ReplicationTaskSettings='string',
CdcStartTime=datetime(2015, 1, 1),
CdcStartPosition='string',
CdcStopPosition='string',
TaskData='string'
)
:type ReplicationTaskArn: string
:param ReplicationTaskArn: [REQUIRED]\nThe Amazon Resource Name (ARN) of the replication task.\n
:type ReplicationTaskIdentifier: string
:param ReplicationTaskIdentifier: The replication task identifier.\nConstraints:\n\nMust contain from 1 to 255 alphanumeric characters or hyphens.\nFirst character must be a letter.\nCannot end with a hyphen or contain two consecutive hyphens.\n\n
:type MigrationType: string
:param MigrationType: The migration type. Valid values: full-load | cdc | full-load-and-cdc
:type TableMappings: string
:param TableMappings: When using the AWS CLI or boto3, provide the path of the JSON file that contains the table mappings. Precede the path with file:// . When working with the DMS API, provide the JSON as the parameter value, for example: --table-mappings file://mappingfile.json
:type ReplicationTaskSettings: string
:param ReplicationTaskSettings: JSON file that contains settings for the task, such as task metadata settings.
:type CdcStartTime: datetime
:param CdcStartTime: Indicates the start time for a change data capture (CDC) operation. Use either CdcStartTime or CdcStartPosition to specify when you want a CDC operation to start. Specifying both values results in an error.\nTimestamp Example: --cdc-start-time \xe2\x80\x9c2018-03-08T12:12:12\xe2\x80\x9d\n
:type CdcStartPosition: string
:param CdcStartPosition: Indicates when you want a change data capture (CDC) operation to start. Use either CdcStartPosition or CdcStartTime to specify when you want a CDC operation to start. Specifying both values results in an error.\nThe value can be in date, checkpoint, or LSN/SCN format.\nDate Example: --cdc-start-position \xe2\x80\x9c2018-03-08T12:12:12\xe2\x80\x9d\nCheckpoint Example: --cdc-start-position 'checkpoint:V1#27#mysql-bin-changelog.157832:1975:-1:2002:677883278264080:mysql-bin-changelog.157832:1876#0#0#*#0#93'\nLSN Example: --cdc-start-position \xe2\x80\x9cmysql-bin-changelog.000024:373\xe2\x80\x9d\n\nNote\nWhen you use this task setting with a source PostgreSQL database, a logical replication slot should already be created and associated with the source endpoint. You can verify this by setting the slotName extra connection attribute to the name of this logical replication slot. For more information, see Extra Connection Attributes When Using PostgreSQL as a Source for AWS DMS .\n\n
:type CdcStopPosition: string
:param CdcStopPosition: Indicates when you want a change data capture (CDC) operation to stop. The value can be either server time or commit time.\nServer time example: --cdc-stop-position \xe2\x80\x9cserver_time:3018-02-09T12:12:12\xe2\x80\x9d\nCommit time example: --cdc-stop-position \xe2\x80\x9ccommit_time: 3018-02-09T12:12:12 \xe2\x80\x9c\n
:type TaskData: string
:param TaskData: Supplemental information that the task requires to migrate the data for certain source and target endpoints. For more information, see Specifying Supplemental Data for Task Settings in the AWS Database Migration User Guide.
:rtype: dict
ReturnsResponse Syntax
{
'ReplicationTask': {
'ReplicationTaskIdentifier': 'string',
'SourceEndpointArn': 'string',
'TargetEndpointArn': 'string',
'ReplicationInstanceArn': 'string',
'MigrationType': 'full-load'|'cdc'|'full-load-and-cdc',
'TableMappings': 'string',
'ReplicationTaskSettings': 'string',
'Status': 'string',
'LastFailureMessage': 'string',
'StopReason': 'string',
'ReplicationTaskCreationDate': datetime(2015, 1, 1),
'ReplicationTaskStartDate': datetime(2015, 1, 1),
'CdcStartPosition': 'string',
'CdcStopPosition': 'string',
'RecoveryCheckpoint': 'string',
'ReplicationTaskArn': 'string',
'ReplicationTaskStats': {
'FullLoadProgressPercent': 123,
'ElapsedTimeMillis': 123,
'TablesLoaded': 123,
'TablesLoading': 123,
'TablesQueued': 123,
'TablesErrored': 123,
'FreshStartDate': datetime(2015, 1, 1),
'StartDate': datetime(2015, 1, 1),
'StopDate': datetime(2015, 1, 1),
'FullLoadStartDate': datetime(2015, 1, 1),
'FullLoadFinishDate': datetime(2015, 1, 1)
},
'TaskData': 'string'
}
}
Response Structure
(dict) --
ReplicationTask (dict) --
The replication task that was modified.
ReplicationTaskIdentifier (string) --
The user-assigned replication task identifier or name.
Constraints:
Must contain from 1 to 255 alphanumeric characters or hyphens.
First character must be a letter.
Cannot end with a hyphen or contain two consecutive hyphens.
SourceEndpointArn (string) --
The Amazon Resource Name (ARN) string that uniquely identifies the endpoint.
TargetEndpointArn (string) --
The Amazon Resource Name (ARN) string that uniquely identifies the endpoint.
ReplicationInstanceArn (string) --
The Amazon Resource Name (ARN) of the replication instance.
MigrationType (string) --
The type of migration.
TableMappings (string) --
Table mappings specified in the task.
ReplicationTaskSettings (string) --
The settings for the replication task.
Status (string) --
The status of the replication task.
LastFailureMessage (string) --
The last error (failure) message generated for the replication instance.
StopReason (string) --
The reason the replication task was stopped.
ReplicationTaskCreationDate (datetime) --
The date the replication task was created.
ReplicationTaskStartDate (datetime) --
The date the replication task is scheduled to start.
CdcStartPosition (string) --
Indicates when you want a change data capture (CDC) operation to start. Use either CdcStartPosition or CdcStartTime to specify when you want the CDC operation to start. Specifying both values results in an error.
The value can be in date, checkpoint, or LSN/SCN format.
Date Example: --cdc-start-position \xe2\x80\x9c2018-03-08T12:12:12\xe2\x80\x9d
Checkpoint Example: --cdc-start-position "checkpoint:V1#27#mysql-bin-changelog.157832:1975:-1:2002:677883278264080:mysql-bin-changelog.157832:1876#0#0#*#0#93"
LSN Example: --cdc-start-position \xe2\x80\x9cmysql-bin-changelog.000024:373\xe2\x80\x9d
CdcStopPosition (string) --
Indicates when you want a change data capture (CDC) operation to stop. The value can be either server time or commit time.
Server time example: --cdc-stop-position \xe2\x80\x9cserver_time:3018-02-09T12:12:12\xe2\x80\x9d
Commit time example: --cdc-stop-position \xe2\x80\x9ccommit_time: 3018-02-09T12:12:12 \xe2\x80\x9c
RecoveryCheckpoint (string) --
Indicates the last checkpoint that occurred during a change data capture (CDC) operation. You can provide this value to the CdcStartPosition parameter to start a CDC operation that begins at that checkpoint.
ReplicationTaskArn (string) --
The Amazon Resource Name (ARN) of the replication task.
ReplicationTaskStats (dict) --
The statistics for the task, including elapsed time, tables loaded, and table errors.
FullLoadProgressPercent (integer) --
The percent complete for the full load migration task.
ElapsedTimeMillis (integer) --
The elapsed time of the task, in milliseconds.
TablesLoaded (integer) --
The number of tables loaded for this task.
TablesLoading (integer) --
The number of tables currently loading for this task.
TablesQueued (integer) --
The number of tables queued for this task.
TablesErrored (integer) --
The number of errors that have occurred during this task.
FreshStartDate (datetime) --
The date the replication task was started either with a fresh start or a target reload.
StartDate (datetime) --
The date the replication task was started either with a fresh start or a resume. For more information, see StartReplicationTaskType .
StopDate (datetime) --
The date the replication task was stopped.
FullLoadStartDate (datetime) --
The date the replication task full load was started.
FullLoadFinishDate (datetime) --
The date the replication task full load was completed.
TaskData (string) --
Supplemental information that the task requires to migrate the data for certain source and target endpoints. For more information, see Specifying Supplemental Data for Task Settings in the AWS Database Migration User Guide.
Exceptions
DatabaseMigrationService.Client.exceptions.InvalidResourceStateFault
DatabaseMigrationService.Client.exceptions.ResourceNotFoundFault
DatabaseMigrationService.Client.exceptions.ResourceAlreadyExistsFault
DatabaseMigrationService.Client.exceptions.KMSKeyNotAccessibleFault
:return: {
'ReplicationTask': {
'ReplicationTaskIdentifier': 'string',
'SourceEndpointArn': 'string',
'TargetEndpointArn': 'string',
'ReplicationInstanceArn': 'string',
'MigrationType': 'full-load'|'cdc'|'full-load-and-cdc',
'TableMappings': 'string',
'ReplicationTaskSettings': 'string',
'Status': 'string',
'LastFailureMessage': 'string',
'StopReason': 'string',
'ReplicationTaskCreationDate': datetime(2015, 1, 1),
'ReplicationTaskStartDate': datetime(2015, 1, 1),
'CdcStartPosition': 'string',
'CdcStopPosition': 'string',
'RecoveryCheckpoint': 'string',
'ReplicationTaskArn': 'string',
'ReplicationTaskStats': {
'FullLoadProgressPercent': 123,
'ElapsedTimeMillis': 123,
'TablesLoaded': 123,
'TablesLoading': 123,
'TablesQueued': 123,
'TablesErrored': 123,
'FreshStartDate': datetime(2015, 1, 1),
'StartDate': datetime(2015, 1, 1),
'StopDate': datetime(2015, 1, 1),
'FullLoadStartDate': datetime(2015, 1, 1),
'FullLoadFinishDate': datetime(2015, 1, 1)
},
'TaskData': 'string'
}
}
:returns:
Must contain from 1 to 255 alphanumeric characters or hyphens.
First character must be a letter.
Cannot end with a hyphen or contain two consecutive hyphens.
"""
pass | 5,334,546 |
def psi_gauss_1d(x, a: float = 1.0, x_0: float = 0.0, k_0: float = 0.0):
"""
Gaussian wave packet of width a and momentum k_0, centered at x_0
:param x: mathematical variable
:param a: Amplitude of pulse
:param x_0: Mean spatial x of pulse
:param k_0: Group velocity of pulse
"""
return ((a * np.sqrt(np.pi)) ** (-0.5)
* np.exp(-0.5 * ((x - x_0) * 1. / a) ** 2 + 1j * x * k_0)) | 5,334,547 |
def get(address, limit=LIMIT):
"""
Recursively dereferences an address.
Returns:
A list containing ``address``, followed by up to ``limit`` valid pointers.
"""
result = []
for i in range(limit):
# Don't follow cycles, except to stop at the second occurrence.
if result.count(address) >= 2:
break
result.append(address)
try:
address = int(pwndbg.memory.poi(pwndbg.typeinfo.ppvoid, address))
except gdb.MemoryError:
break
return result | 5,334,548 |
def elemwise_checker(op, expected_f, gap=None, test_dtypes=None,
grad_test=True, name=None, gap_grad=None):
"""Return the appropriate test class for the elemwise on sparse.
:param op: Op to test.
:expected_f: Function use to compare. This function must act
on dense matrix. If the op is structured
see the `structure_function` decorator to make
this function structured.
:param gap: Tuple for the range of the random sample. When
length is 1, it is assumed to be the exclusive
max, when `gap` = (`a`, `b`) it provide a sample
from [a, b[. If `None` is used, it provide [0, 1]
for float dtypes and [0, 50[ for integer dtypes.
:param test_dtypes: Particular dtypes for testing the op.
If `None`, this is set to the most common
dtypes.
:param grad_test: True for testing the grad. False will
skip this test.
:param gap_grad: If None, we reuse gap. Otherwise it is the same as gap
but for testing the gradiant of the op.
:return: The class that perform the tests, not an instance
of the class.
"""
if test_dtypes is None:
test_dtypes = sparse.all_dtypes
class Tester(unittest.TestCase):
def setUp(self):
super(Tester, self).setUp()
self.op = op
self.expected_f = expected_f
self.gap = gap
if gap_grad is not None:
self.gap_grad = gap_grad
else:
self.gap_grad = gap
# Ensure the test's name is correct.
utt.seed_rng()
assert eval(self.__class__.__name__) is self.__class__
def test_op(self):
for format in sparse.sparse_formats:
for dtype in test_dtypes:
if dtype == 'int8' or dtype == 'uint8':
continue
# When testing with unsigned integers,
# we must check if the gap contains
# negative numbers.
if dtype.startswith('uint'):
if self.gap and len(self.gap) == 2 and self.gap[0] < 0:
if self.gap[1] >= 1:
self.gap = (0, self.gap[1])
else:
raise TypeError('Gap not suitable for',
dtype, self.__name__)
variable, data = sparse_random_inputs(
format,
shape=(4, 7),
out_dtype=dtype,
gap=self.gap)
f = theano.function(variable, self.op(*variable))
tested = f(*data)
data = [m.toarray() for m in data]
expected = self.expected_f(*data)
assert tested.format == format
tested = tested.toarray()
try:
utt.assert_allclose(expected, tested)
except AssertionError:
raise AssertionError(self.__name__)
# Test with int8 as dtype
# These tests are not in the loop for two reasons.
# First, in recent version of numpy, when a numpy
# function have int8 as input dtype, it returns a
# float16 as output dtype. Since this does not provide
# enough precision, we upcast the data before we apply the
# function.
# Second, the tolerance for the checkup in DebugMode
# is too high.
for dtype in ['int8', 'uint8']:
if dtype in test_dtypes:
if self.gap:
domain = self.gap
# When testing with unsigned integers,
# we must check if the gap contains
# negative numbers.
if dtype == 'uint8':
if len(domain) == 2 and domain[0] < 0:
if domain[1] >= 1:
domain = (0, domain[1])
else:
raise TypeError('Gap not suitable for',
dtype, self.__name__)
else:
domain = (0, 5)
variable, data = sparse_random_inputs(
format,
shape=(4, 7),
out_dtype=dtype,
gap=domain)
f = theano.function(variable, self.op(*variable))
old_value = (tensor.basic.float32_atol,
tensor.basic.float32_rtol,
tensor.basic.float64_atol,
tensor.basic.float64_rtol)
tensor.basic.float32_atol = 1e-4
tensor.basic.float32_rtol = 1e-3
tensor.basic.float64_atol = 1e-3
tensor.basic.float64_rtol = 1e-4
try:
tested = f(*data)
finally:
(tensor.basic.float32_atol,
tensor.basic.float32_rtol,
tensor.basic.float64_atol,
tensor.basic.float64_rtol) = old_value
data = [m.toarray().astype('float32') for m in data]
expected = self.expected_f(*data)
assert tested.format == format
tested = tested.toarray()
try:
utt.assert_allclose(tested, expected, rtol=1e-2)
except AssertionError:
raise AssertionError(self.__name__)
if grad_test:
def test_grad(self):
for format in sparse.sparse_formats:
for dtype in sparse.float_dtypes:
variable, data = sparse_random_inputs(
format,
shape=(4, 7),
out_dtype=dtype,
gap=self.gap_grad)
verify_grad_sparse(self.op,
data,
structured=True)
# Set proper class name to uniquely identify tests.
# Note that it is important to run this code *outside* of the `Tester`
# class itself, otherwise it will not work properly for some reason.
if name is None:
name = op.__name__.capitalize() + 'Tester'
Tester.__name__ = name
assert 'Roundhalftoeven' not in Tester.__name__
return Tester | 5,334,549 |
def plot_segments(track_generator, get_figure=False, plot_3D=False):
"""Plot the characteristic track segments from an OpenMOC simulation.
This method requires that tracks have been generated by a TrackGenerator.
Each segment is colored by the ID of the unique FSR it is within.
Parameters
----------
track_generator : openmoc.TrackGenerator
A TrackGenerator with the track segments to plot
get_figure : bool
Whether or not to return the Matplotlib figure
Returns
-------
fig : matplotlib.Figure or None
The Matplotlib figure is returned if get_figure is True
Examples
--------
A user may invoke this function from an OpenMOC Python file as follows:
>>> openmoc.plotter.plot_segments(track_generator)
"""
cv.check_type('track_generator', track_generator, openmoc.TrackGenerator)
if not track_generator.containsTracks():
py_printf('ERROR', 'Unable to plot Track segments since the ' +
'TrackGenerator has not yet generated Tracks.')
global subdirectory, matplotlib_rcparams
directory = openmoc.get_output_directory() + subdirectory
# Ensure that normal settings are used even if called from ipython
curr_rc = matplotlib.rcParams.copy()
update_rc_param(curr_rc)
# Make directory if it does not exist
try:
os.makedirs(directory)
except OSError:
pass
py_printf('NORMAL', 'Plotting the track segments...')
# Retrieve data from TrackGenerator
vals_per_segment = openmoc.NUM_VALUES_PER_RETRIEVED_SEGMENT
num_azim = track_generator.getNumAzim()
spacing = track_generator.getDesiredAzimSpacing()
if plot_3D and isinstance(track_generator, openmoc.TrackGenerator3D):
num_polar = track_generator.getNumPolar()
z_spacing = track_generator.getDesiredZSpacing()
num_segments = int(track_generator.getNumSegments())
num_fsrs = int(track_generator.getGeometry().getNumTotalFSRs())
coords = \
track_generator.retrieveSegmentCoords(num_segments*vals_per_segment)
# Convert data to NumPy arrays
coords = np.array(coords)
x = np.zeros(num_segments*2)
y = np.zeros(num_segments*2)
z = np.zeros(num_segments*2)
fsrs = np.zeros(num_segments)
for i in range(num_segments):
fsrs[i] = coords[i*vals_per_segment]
x[i*2] = coords[i*vals_per_segment+1]
y[i*2] = coords[i*vals_per_segment+2]
z[i*2] = coords[i*vals_per_segment+3]
x[i*2+1] = coords[i*vals_per_segment+4]
y[i*2+1] = coords[i*vals_per_segment+5]
z[i*2+1] = coords[i*vals_per_segment+6]
# Create array of equally spaced randomized floats as a color map for plots
# Seed the NumPy random number generator to ensure reproducible color maps
numpy.random.seed(1)
color_map = np.linspace(0., 1., num_fsrs, endpoint=False)
numpy.random.shuffle(color_map)
# Make figure of line segments for each track
fig = plt.figure(constrained_layout=True)
fig.patch.set_facecolor('none')
# Create a color map corresponding to FSR IDs
if plot_3D:
ax = fig.gca(projection = '3d')
for i in range(num_segments):
cNorm = colors.Normalize(vmin=0, vmax=max(color_map))
scalarMap = cmx.ScalarMappable(norm=cNorm)
color = scalarMap.to_rgba(color_map[int(fsrs[i]) % num_fsrs])
plt.plot(x[i*2:(i+1)*2], y[i*2:(i+1)*2], z[i*2:(i+1)*2], c=color)
if z.min() != z.max():
ax.set_zlim(z.min(), z.max())
else:
for i in range(num_segments):
cNorm = colors.Normalize(vmin=0, vmax=max(color_map))
scalarMap = cmx.ScalarMappable(norm=cNorm)
color = scalarMap.to_rgba(color_map[int(fsrs[i]) % num_fsrs])
plt.plot(x[i*2:(i+1)*2], y[i*2:(i+1)*2], c=color)
plt.xlim([x.min(), x.max()])
plt.ylim([y.min(), y.max()])
suptitle = 'Segments ({0} angles, {1} cm spacing)'.format(num_azim,
spacing)
if plot_3D and isinstance(track_generator, openmoc.TrackGenerator3D):
suptitle = 'Segments ({0}/{1} azimuthal/polar angles\n and {2}/{3} cm '\
'azimuthal/axial spacings'.format(num_azim, num_polar, spacing,
z_spacing)
title = 'z = {0}'.format(z[0])
plt.suptitle(suptitle)
if not plot_3D:
plt.title(title)
# Restore settings if called from ipython
update_rc_param(curr_rc)
if track_generator.getGeometry().isRootDomain():
if get_figure:
return fig
else:
filename = 'segments-{0}-angles-{1}-spacing'.format(num_azim,
spacing)
filename = '{0}-z-{1}.png'.format(filename, z[0])
if plot_3D and isinstance(track_generator, openmoc.TrackGenerator3D):
filename = '3d-segments-{0}-azimuthal-{1}-polar-angles-{2}-'\
'azimuthal-{3}-z-spacing.png'.format(num_azim, num_polar,
spacing, z_spacing)
fig.savefig(directory+filename, bbox_inches='tight')
plt.close(fig) | 5,334,550 |
def find_availability_by_year(park, campground, year, months=range(1, 13)):
"""
Parameters
----------
park : str
campground : str
year : str
months : list
list of months as str or int. Default is `range(1, 13)`
Returns
-------
list
list of weekend availability at the given park's campground during the
given month and year
"""
yearly_availability = []
for month in months:
if isinstance(month, int):
month = str(month)
try:
monthly_availability = find_availability_by_month(park, campground, year, month)
yearly_availability.append(monthly_availability)
except:
break
# Flatten list
yearly_availability = [item for sublist in yearly_availability for item in sublist]
return yearly_availability | 5,334,551 |
def seconds(value=None, utc=True, **kwargs):
"""
Converts value to seconds. If value is timedelta or struc_time, it will be just converted to seconds.
If value is datetime instance it will be converted to milliseconds since epoch (UTC). If value is number,
it's assumed that it's in milliseconds, so it will be just divided by 1000. You can also provide named arguments,
same as for timedelta function.
"""
if isinstance(value, (int, float)):
return int(float(value) / 1000.0)
else:
return _convert_time(value, utc, **kwargs) | 5,334,552 |
def fix_mocov2_state_dict(state_dict):
"""
Ref: https://bit.ly/3cDfGVA
"""
new_state_dict = {}
for k, v in state_dict.items():
if k.startswith("model.encoder_q."):
k = k.replace("model.encoder_q.", "")
new_state_dict[k] = v
return new_state_dict | 5,334,553 |
def do_flatten(lists):
"""Flatten multiple lists
Takes a list of lists and returns a single list of the contents.
"""
for item in itertools.chain.from_iterable(lists):
yield item | 5,334,554 |
def write_metadata(audio_format, filepath, title, artist=None, album=None):
"""Write the metadata to the audiofile"""
if audio_format == 'mp3':
write_mp3_metadata(filepath, title, artist, album)
else:
write_metadata_other_formats(
audio_format, filepath, title, artist, album) | 5,334,555 |
def get_perspective(image, contours, ratio):
"""
This function takes image and contours and returns perspective of this contours.
:param image: image, numpy array
:param contours: contours, numpy array
:param ratio: rescaling parameter to the original image
:return: warped image
"""
points = contours.reshape(4, 2)
points = points * ratio
rectangle = np.zeros(shape=(4, 2), dtype='float32')
total = points.sum(axis=1)
rectangle[0] = points[np.argmin(total)]
rectangle[2] = points[np.argmax(total)]
difference = np.diff(points, axis=1)
rectangle[1] = points[np.argmin(difference)]
rectangle[3] = points[np.argmax(difference)]
# rectangle *= ratio
(a, b, c, d) = rectangle
width1 = norm(c - d)
width2 = norm(b - a)
height1 = norm(b - c)
height2 = norm(a - d)
max_width = max(int(width1), int(width2))
max_height = max(int(height1), int(height2))
destination = np.array([[0, 0],
[max_width - 1, 0],
[max_width - 1, max_height - 1],
[0, max_height - 1]], dtype='float32')
M = cv2.getPerspectiveTransform(src=rectangle, dst=destination)
warped_image = cv2.warpPerspective(src=image, M=M, dsize=(max_width, max_height))
return warped_image | 5,334,556 |
def init_integer(m):
"""Initializes weights according to a Uniform distribution."""
a = -1.
b = 1.
if type(m) == nn.Linear or type(m) == nn.Conv2d:
nn.init.uniform_(m.weight, a=a, b=b)
nn.init.uniform_(m.bias, a=a, b=b)
m.weight.data.ceil_()
m.bias.data.ceil_() | 5,334,557 |
def processFilesOpen(filename, filetype='file', subname='', zptr=None,
**kwargs):
"""
Open a file for processing. If it is a compressed file, open for
decompression.
:param filetype: 'zip' if this is a zip archive.
:param filename: name of the file (if a zip archive, this is the archive).
:param subname: name within an archive.
:param zptr: a pointer to a zip archive if appropriate.
:returns: a file-like object and the display filename.
"""
if filetype == 'zip':
fptr = zptr.open(subname)
filename += ' - ' + subname
elif filename.lower().endswith('.bz2'):
fptr = bz2.BZ2File(filename)
filename = filename.rsplit('.', 1)[0]
elif (filename.lower().endswith('.gz') or
filename.lower().endswith('.gz.tmp')):
# fptr = gzip.open(filename)
# Using the command line utility lets a second core be used a little
fptr = os.popen('gunzip < %s' % filename)
filename = filename.rsplit('.', 1)[0]
else:
fptr = open(filename)
return fptr, filename | 5,334,558 |
def test_from_settings():
"""Test that from_settings converts application settings."""
expected = {'a': 1, 'b': 2}
actual = from_settings({'DATABASE_A': 1, 'DATABASE_B': 2})
assert actual == expected | 5,334,559 |
def load_model_selector(folder_path):
"""Load information about stored model selection
Parameters
----------
folder_path : str
path where .model_selector_result files are stored
Returns
-------
ModelSelector
Information about model selection for each partition
"""
results = [
load_model_selector_result(path=r.parent, partition_hash=r.stem)
for r in Path(folder_path).glob("*.model_selector_result")
]
model_selector = ModelSelector(
horizon=results[0].horizon,
frequency=results[0].frequency,
country_code_column=results[0].country_code_column,
)
model_selector.results = results
return model_selector | 5,334,560 |
def train_run():
"""
Runs the loop that trains the agent.
Trains the agent on the goal-oriented chatbot task. Training of the agent's neural network occurs every episode that
TRAIN_FREQ is a multiple of. Terminates when the episode reaches NUM_EP_TRAIN.
"""
print('Training Started...')
episode = 0
period_reward_total = 0
period_ireward_total = 0
period_success_total = 0
period_mood_success_total = 0
success_rate_best = 0.0
episode_efficiency_total = 0.0
# best_avg_reward = 0.0
period_step = 0
period_mood_total = 0
quality_metric = 0.0
best_quality_metric = 0.0
# Initialize tensorflow summary
tf.summary.experimental.set_step(episode)
while episode < NUM_EP_TRAIN:
user_action = episode_reset()
episode += 1
# Update Tensorflow
tf.summary.experimental.set_step(episode)
# Print the conversation if RENDER==TRUE
if RENDER:
print('\n********** Episode {} ********************'.format(episode))
print('User Goal: {}'.format(user.goal))
print(user.user_mood.current_mood)
print('-----------------------------------------------------------------------------------------------------------')
print('Initial User Utterance: {}'.format(user_action))
done = False
state = state_tracker.get_state()
while not done:
next_state, reward, done, success, intrinsisc_r = run_round(state)
period_reward_total += reward
period_ireward_total += intrinsisc_r
state = next_state
period_step += 1
period_mood_total += user.user_mood.emotions.index(user.user_mood.current_mood['emotion']) / 2
period_success_total += success
episode_efficiency_total += efficiency_metric(state_tracker.history, user.user_mood.current_mood['goal_desire'] )
if user.user_mood.current_mood['emotion']=="positiv":
period_mood_success_total += 1
# Train
if episode % TRAIN_FREQ == 0:
# Check success rate
success_rate = period_success_total / TRAIN_FREQ
mood_success_rate = period_mood_success_total / TRAIN_FREQ
avg_reward = period_reward_total / TRAIN_FREQ
avg_ireward = period_ireward_total / TRAIN_FREQ
avg_mood = period_mood_total / (period_step)
quality_metric = avg_mood * avg_reward * success_rate
avg_efficiancy = episode_efficiency_total / TRAIN_FREQ
# Flush
# if success_rate >= success_rate_best and success_rate >= SUCCESS_RATE_THRESHOLD:
if episode == 120:
dqn_agent.empty_memory()
# Update current best success rate
print('Episode: {} SUCCESS RATE: {} MOOD SUCCESS RATE: {} Avg Reward: {} QUALITY METRIC: {}' .format(episode, success_rate, mood_success_rate, avg_reward, quality_metric))
# Logg data to tensorboard
tf.summary.scalar(name="succes rate", data=success_rate)
tf.summary.scalar(name="mood sucess rate", data=mood_success_rate)
tf.summary.scalar(name="avg reward", data=avg_reward)
tf.summary.scalar(name="avg intrinisc reward", data=avg_ireward)
tf.summary.scalar(name="avg mood", data=avg_mood)
tf.summary.scalar(name="quality metric", data=quality_metric)
tf.summary.scalar(name="avg efficiency", data=avg_efficiancy)
writer.flush()
#if success_rate > success_rate_best:
if quality_metric > best_quality_metric:
print('**********************Episode: {} NEW BEST SUCCESS RATE: {} MOOD SUCCESS RATE: {} Avg Reward: {} QUALITY METRIC: {}********************************' .format(episode, success_rate, mood_success_rate, avg_reward, quality_metric))
best_quality_metric = quality_metric
dqn_agent.save_weights()
period_success_total = 0
period_reward_total = 0
period_ireward_total = 0
period_mood_success_total = 0
period_mood_total = 0
avg_mood = 0
episode_efficiency_total = 0
period_step = 0
# Copy
dqn_agent.copy()
# Train
dqn_agent.train()
# for e-greedy exploration and exploitation wich is GLIE we need eps to decay to zero
# if episode % 200 == 0:
# k = episode / 200
# dqn_agent.eps = 1 / (k + 1)
print('...Training Ended')
tf.summary.flush() | 5,334,561 |
def do_handshake(holdtime):
"""Does a handshake with the binary"""
optparam = "\x02\x06\x01\x04\x00\x01\x00\x01"
send_pkt(BGP_OPEN, flat(
4,
"AA",
p16b(holdtime),
"AAAA",
len(optparam),
optparam,
word_size = 8,
))
# Receive the two packages from the handshake
recv_packet(BGP_OPEN)
recv_packet(BGP_KEEPALIVE) | 5,334,562 |
def generate_expired_date():
"""Generate a datetime object NB_DAYS_BEFORE_DELETING_LIVE_RECORDINGS days in the past."""
return timezone.now() - timedelta(
days=settings.NB_DAYS_BEFORE_DELETING_LIVE_RECORDINGS
) | 5,334,563 |
def make_dummy_authentication_request_args() -> Dict[str, bytes]:
"""Creates a request to emulate a login request.
Returns:
Dict[str, bytes]: Authenticator dictionary
"""
def _make_dummy_authentication_request_args():
args = {
"username": ["foobar".encode()],
"password": ["mypassword".encode()],
"assignment_name": ["lab101".encode()],
"course_id": ["intro101".encode()],
"lms_user_id": ["abc123".encode()],
"user_role": ["Student".encode()],
}
return args
return _make_dummy_authentication_request_args | 5,334,564 |
def add_experiment_images_to_image_info_csv(image_info_df, experiment_xml_file):
"""
Goes through the xml file of the experiment and adds the info of its images to the image info dataframe.
If the gene name is missing in the experiment, then this experiment is considered invalid.
:param image_info_df: the image info dataframe to append the new images
:param experiment_xml_file: the xml file of the experiment that we want to add its images
:return: the image info dataframe and also a boolean which determines whether this experiment is invalid.
"""
invalid = False
tree = et.parse(experiment_xml_file)
root = tree.getroot()
section_data_sets = root.find('section-data-sets')
section_data_set = section_data_sets.find('section-data-set')
experiment_id = section_data_set.find('id').text
specimen_id = section_data_set.find('specimen-id').text
section_images = section_data_set.find('section-images')
genes = section_data_set.find('genes')
specimen = section_data_set.find('specimen')
donor = specimen.find('donor')
structure = specimen.find('structure')
donor_id = donor.find('name').text
donor_sex = donor.find('sex').text
donor_age = donor.find('age-id').text
pmi = donor.find('pmi').text
donor_race = donor.find('race-only').text
smoker = donor.find('smoker').text
chemotherapy = donor.find('chemotherapy').text
radiation_therapy = donor.find('radiation-therapy').text
tumor_status = donor.find('tumor-status').text
conditions = donor.find('conditions')
condition = conditions.find('condition')
description = condition.find('description').text
region_name = structure.find('name').text
region_acronym = structure.find('acronym').text
tissue_ph = specimen.find('tissue-ph').text
gene = genes.find('gene')
if gene == None:
print ("experiment " + experiment_id + " is invalid")
invalid = True
else:
gene_symbol = gene.find('acronym').text
gene_alias_tags = gene.find('alias-tags').text
entrez_id = gene.find('entrez-id').text
gene_original_name = gene.find('original-name').text
gene_original_symbol = gene.find('original-symbol').text
all_section_images = section_images.findall('section-image')
image_id_list = []
for item in all_section_images:
image_id_list.append(item.find('id').text)
for image_id in image_id_list:
new_row = pd.Series({'image_id': image_id, 'gene_symbol': gene_symbol, 'entrez_id': entrez_id,
'alias_tags': gene_alias_tags, 'original_name': gene_original_name,
'original_symbol': gene_original_symbol, 'experiment_id':experiment_id,'specimen_id': specimen_id,
'description': description, 'donor_id': donor_id, 'donor_sex': donor_sex,
'donor_age':donor_age, 'donor_race':donor_race,
'smoker' : smoker, 'chemotherapy': chemotherapy, 'radiation_therapy': radiation_therapy,
'tumor_status' : tumor_status,
'region':region_name, 'region_acronym': region_acronym,
'tissue_ph': tissue_ph, 'pmi': pmi })
image_info_df = image_info_df.append(new_row, ignore_index=True)
return image_info_df, invalid | 5,334,565 |
def dedup(iterable: Iterable[T], key: Optional[Callable[[T], U]] = None) -> Iterator[T]:
"""
List unique elements.
>>> tuple(dedup([5, 4, 3, 5, 3, 3]))
(3, 4, 5)
"""
return uniq(sorted(iterable, key=key), key) | 5,334,566 |
def get_product(barcode):
"""
Return information of a given product.
"""
return utils.fetch('api/v0/product/%s' % barcode) | 5,334,567 |
def knn_matcher(arr2, arr1, neighbours=2, img_id=0, ratio_threshold=0.75):
"""Computes the inlier matches for given descriptor ararys arr1 and arr2
Arguments:
arr2 {np.ndarray} -- Image used for finding the matches (train image)
arr1 {[type]} -- Image in which matches are found (test image)
Keyword Arguments:
neighbours {int} -- Number of neighbours to consider while matching.
Should be 2 (default: {2})
img_id {int} -- Id of the train image (default: {0})
ratio_threshold {float} -- Ratio threshold for the ratio test
(default: {0.75}). If 0 or None, the mathes are not filtered.
Returns:
list(matches) -- List of cv2.DMatch objects
"""
assert neighbours == 2
# Compute L2 distance for all the descriptors arr1 and arr2
all_distances = np.sqrt(np.square(arr2).sum(
axis=1)[:, np.newaxis] + np.square(arr1).sum(axis=1) - 2 * arr2.dot(arr1.T))
# Take top K closest neighbours for each descriptor
closest_indices = np.argsort(all_distances, axis=1)[:, :neighbours]
# Create a list of "K" match pairs
matches = []
for i in range(closest_indices.shape[0]):
match_list = [cv2.DMatch(
_trainIdx=n, _queryIdx=i, _distance=all_distances[i, n], _imgIdx=img_id) for n in closest_indices[i]]
matches.append(match_list)
# Perform ratio test to get inliers
if ratio_threshold:
matches = filter_matches(matches, ratio_threshold)
return matches | 5,334,568 |
def get_virtual_network_gateway_bgp_peer_status(peer: Optional[str] = None,
resource_group_name: Optional[str] = None,
virtual_network_gateway_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVirtualNetworkGatewayBgpPeerStatusResult:
"""
Response for list BGP peer status API service call.
:param str peer: The IP address of the peer to retrieve the status of.
:param str resource_group_name: The name of the resource group.
:param str virtual_network_gateway_name: The name of the virtual network gateway.
"""
__args__ = dict()
__args__['peer'] = peer
__args__['resourceGroupName'] = resource_group_name
__args__['virtualNetworkGatewayName'] = virtual_network_gateway_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20200601:getVirtualNetworkGatewayBgpPeerStatus', __args__, opts=opts, typ=GetVirtualNetworkGatewayBgpPeerStatusResult).value
return AwaitableGetVirtualNetworkGatewayBgpPeerStatusResult(
value=__ret__.value) | 5,334,569 |
def check_wrs2_tiles(wrs2_tile_list=[], path_list=[], row_list=[]):
"""Setup path/row lists
Populate the separate path and row lists from wrs2_tile_list
Filtering by path and row lists separately seems to be faster than
creating a new path/row field and filtering directly
"""
wrs2_tile_fmt = 'p{:03d}r{:03d}'
wrs2_tile_re = re.compile('p(?P<PATH>\d{1,3})r(?P<ROW>\d{1,3})')
# Force path/row list to zero padded three digit numbers
if wrs2_tile_list:
wrs2_tile_list = sorted([
wrs2_tile_fmt.format(int(m.group('PATH')), int(m.group('ROW')))
for pr in wrs2_tile_list
for m in [wrs2_tile_re.match(pr)] if m])
# If path_list and row_list were specified, force to integer type
# Declare variable as an empty list if it does not exist
try:
path_list = list(sorted(map(int, path_list)))
except ValueError:
logging.error(
'\nERROR: The path list could not be converted to integers, '
'exiting\n {}'.format(path_list))
sys.exit()
try:
row_list = list(sorted(map(int, row_list)))
except ValueError:
logging.error(
'\nERROR: The row list could not be converted to integers, '
'exiting\n {}'.format(row_list))
sys.exit()
# Convert wrs2_tile_list to path_list and row_list if not set
# Pre-filtering on path and row separately is faster than building wrs2_tile
# This is a pretty messy way of doing this...
if wrs2_tile_list and not path_list:
path_list = sorted(list(set([
int(wrs2_tile_re.match(pr).group('PATH'))
for pr in wrs2_tile_list if wrs2_tile_re.match(pr)])))
if wrs2_tile_list and not row_list:
row_list = sorted(list(set([
int(wrs2_tile_re.match(pr).group('ROW'))
for pr in wrs2_tile_list if wrs2_tile_re.match(pr)])))
if path_list:
logging.debug(' Paths: {}'.format(
' '.join(list(map(str, path_list)))))
if row_list:
logging.debug(' Rows: {}'.format(' '.join(list(map(str, row_list)))))
if wrs2_tile_list:
logging.debug(' WRS2 Tiles: {}'.format(
' '.join(list(map(str, wrs2_tile_list)))))
return wrs2_tile_list, path_list, row_list | 5,334,570 |
def cat(file_path: str) -> str:
"""pathlib.Path().read_textのshortcut
Args:
file_path (str): filepath
Returns:
str: file内の文字列
Example:
>>> cat('unknown.txt')
"""
file_path = pathlib.Path(file_path)
if file_path.is_file():
return file_path.read_text()
return None | 5,334,571 |
def test_check_gdf(gdf, exc, pattern):
"""Test GeoDataFrame validation function."""
with pytest.raises(exc) as err:
check_gdf(gdf)
assert err.match(re.escape(pattern))
assert check_gdf(GeoDataFrame(geometry=[POLY])) is None | 5,334,572 |
def test_deep_index(tree: Group):
"""Test deep indexing"""
node = tree[(1, 0)]
assert tree.index(node) == (1, 0) | 5,334,573 |
def _get_role_by_name(role_name):
"""
Get application membership role
Args:
role_name (str): role name.
Returns:
int: application membership role id.
"""
base_request = BaseRequest()
settings = Settings()
params = {
'filter': 'name',
'eq': role_name
}
roles = base_request.request(
'application_membership_role', 'GET', params=params,
endpoint=settings.get('pine_endpoint')
)['d']
if not roles:
raise exceptions.BalenaApplicationMembershipRoleNotFound(role_name=role_name)
else:
return roles[0]['id'] | 5,334,574 |
def parse_filename(filename, is_adversarial=False, **kwargs):
"""Parse the filename of the experment result file into a dictionary of settings.
Args:
filename: a string of filename
is_adversarial: whether the file is from experiments/GIB_node_adversarial_attack.
"""
if is_adversarial:
return parse_filename_adversarial(filename, **kwargs)
else:
return parse_filename_standard(filename) | 5,334,575 |
def WHo_mt(dist, sigma):
"""
Speed Accuracy model for generating finger movement time.
:param dist: euclidian distance between points.
:param sigma: speed-accuracy trade-off variance.
:return: mt: movement time.
"""
x0 = 0.092
y0 = 0.0018
alpha = 0.6
x_min = 0.006
x_max = 0.06
k_alpha = 0.12
if dist == 0:
dist = 0.0000001
mt = pow((k_alpha * pow(((sigma - y0) / dist), (alpha - 1))), 1 / alpha) + x0
return mt | 5,334,576 |
def find_next_sibling_position(element, tag_type):
"""
Gets current elements next sibling's (chosen by provided tag_type) actual character position in html document
:param element: Whose sibling to look for, type: An object of class bs4.Tag
:param tag_type: sibling tag's type (e.g. p, h2, div, span etc. ), type: A string
:return: An Integer specifying character pos. in html, infinite when no sibling is found
"""
nxt_sib = element.find_next_sibling(tag_type)
return float("inf") if nxt_sib is None else nxt_sib.sourcepos | 5,334,577 |
def on_create(data):
"""Create a game lobby"""
user = data['username']
session_id = request.sid
if data['players_number'] == 'join':
return
players_number = int(data['players_number'])
if session_id in USERS and USERS[session_id]['room'] != '':
print (session_id + " already in a room")
return
if not session_id in USERS:
USERS[session_id] = {'username': '', 'room': ''}
room_id = generate_room_id()
join_room(room_id)
message = {'username': user, "session": session_id, 'room': room_id}
message = json.dumps(message)
emit('joined_user', message)
ROOMS[room_id] = {}
ROOMS[room_id]["room_size"] = players_number
ROOMS[room_id]["players"] = [session_id]
USERS[session_id]['username'] = user
USERS[session_id]['room'] = room_id
print(ROOMS)
print(USERS)
update_client_users_list(room_id)
send_print_users() | 5,334,578 |
def CheckOutput(cmd, **kwargs):
"""Call subprocess.check_output to get output.
The subprocess.check_output return type is "bytes" in python 3, we have
to convert bytes as string with .decode() in advance.
Args:
cmd: String of command.
**kwargs: dictionary of keyword based args to pass to func.
Return:
String to command output.
"""
return subprocess.check_output(cmd, **kwargs).decode() | 5,334,579 |
def one_hot(arr, n_class=0):
"""Change labels to one-hot expression.
Args:
arr [np.array]: numpy array
n_class [int]: number of class
Returns:
oh [np.array]: numpy array with one-hot expression
"""
if arr is None:
return None
if isinstance(arr, list) or isinstance(arr, np.ndarray):
arr = np.array(arr)
ishape = arr.shape
arr = arr.flatten()
n_class = arr.max() + 1 if n_class == 0 else n_class
assert n_class >= arr.max() + 1, ValueError("Value of 'n_class' is too small.")
oh = np.zeros((arr.size, n_class), dtype=int)
oh[np.arange(arr.size), arr] = 1
oh = np.reshape(oh, (*ishape, -1))
return oh | 5,334,580 |
def boost_nfw_at_R(R, B0, R_scale):
"""NFW boost factor model.
Args:
R (float or array like): Distances on the sky in the same units as R_scale. Mpc/h comoving suggested for consistency with other modules.
B0 (float): NFW profile amplitude.
R_scale (float): NFW profile scale radius.
Returns:
float or array like: NFW boost factor profile; B = (1-fcl)^-1.
"""
R = _ArrayWrapper(R, 'R')
boost = _ArrayWrapper.zeros_like(R)
cluster_toolkit._lib.boost_nfw_at_R_arr(R.cast(), len(R), B0, R_scale,
boost.cast())
return boost.finish() | 5,334,581 |
def get_swatches(root):
"""Get swatch elements in the SVG"""
swatches = {}
for node in descendants(root):
if "hasAttribute" not in dir(node) or not node.hasAttribute("id"):
continue
classname = extract_class_name(node.getAttribute("id"))
if classname:
swatches[classname] = node
return swatches | 5,334,582 |
def chunks(l, n):
"""
Split list in chunks - useful for controlling memory usage
"""
if n < 1:
n = 1
return [l[i:i + n] for i in range(0, len(l), n)] | 5,334,583 |
def sync_active_stable_monitors(db_session: SessionLocal, project: Project):
"""Syncs incident monitors."""
monitor_plugin = plugin_service.get_active_instance(
db_session=db_session, project_id=project.id, plugin_type="monitor"
)
if not monitor_plugin:
log.warning(f"No monitor plugin is enabled. ProjectId: {project.id}")
return
# we get all active and stable incidents
active_incidents = incident_service.get_all_by_status(
db_session=db_session, project_id=project.id, status=IncidentStatus.active
)
stable_incidents = incident_service.get_all_by_status(
db_session=db_session, project_id=project.id, status=IncidentStatus.stable
)
incidents = active_incidents + stable_incidents
run_monitors(db_session, project, monitor_plugin, incidents, notify=True) | 5,334,584 |
def services() -> None:
"""Services.""" | 5,334,585 |
def patch_d2_meta_arch():
"""
D2Go requires interfaces like prepare_for_export/prepare_for_quant from meta-arch in
order to do export/quant, this function applies the monkey patch to the original
D2's meta-archs.
"""
def _check_and_set(cls_obj, method_name, method_func):
if hasattr(cls_obj, method_name):
assert getattr(cls_obj, method_name) == method_func
else:
setattr(cls_obj, method_name, method_func)
def _apply_patch(dst_cls, src_cls):
assert hasattr(src_cls, "METHODS_TO_PATCH")
for method_name in src_cls.METHODS_TO_PATCH:
assert hasattr(src_cls, method_name)
_check_and_set(dst_cls, method_name, getattr(src_cls, method_name))
_apply_patch(GeneralizedRCNN, GeneralizedRCNNPatch)
# TODO: patch other meta-archs defined in D2 | 5,334,586 |
def env_config(magic_dir):
"""Provides a config file with environment variables."""
if os.sys.platform == 'win32':
config_filename = 'envs_win.yaml'
else:
config_filename = 'envs.yaml'
dotenv_filename = 'test.env'
config = magic_dir / config_filename
dotenv = magic_dir / dotenv_filename
content = Path(__file__).parent.joinpath('files').joinpath(config_filename).read_text()
config.write_text(content)
content = Path(__file__).parent.joinpath('files').joinpath(dotenv_filename).read_text()
dotenv.write_text(content)
yield magic_dir
os.remove(magic_dir / config)
os.remove(magic_dir / dotenv) | 5,334,587 |
def UserLevelAuthEntry(val=None):
"""Provide a 2-tuple of user and level
* user: string
* level: oneof(ACCESS_LEVELS)
currently: GUEST, USER, ADMIN
"""
if len(val) != 2:
raise ValueError('UserLevelAuthEntry entry needs to be a 2-tuple '
'(name, accesslevel)')
# pylint: disable=unbalanced-tuple-unpacking
user, _p, level = UserPassLevelAuthEntry((val[0], '', val[1]))
return tuple((user, level)) | 5,334,588 |
def schedule_registration_notif(subscriber, *args, **kwargs):
"""
Schedule to send a notification once.
:param models.Subscriber subscriber: subscriber to send notification to.
:kwargs seconds: seconds to enqueue notification in.
"""
queue_time = kwargs.pop('seconds', 5)
job_time = timedelta(seconds=queue_time)
notification_data = generate_registration_notification_payload(subscriber)
scheduler.enqueue_in(
job_time,
send_notification,
subscriber,
**notification_data,
) | 5,334,589 |
def test_has_expected_attributes():
"""Ensure registers mapped to Batteries/BMS are represented in the model."""
expected_attributes = set()
for i in range(60):
name = InputRegister(i + 60).name.lower()
if name.endswith('_h'):
continue
elif name.endswith('_l'):
name = name[:-2]
elif name.startswith('status_') or name.startswith('warning_'):
pass
elif name.endswith('_1_2'):
name = name[:-4]
elif name.endswith('_3_4') or name.endswith('_5_6') or name.endswith('_7_8') or name.endswith('_9_10'):
continue
elif name.startswith('input_reg'):
continue
expected_attributes.add(name)
assert expected_attributes == set(Battery.__fields__.keys()) | 5,334,590 |
def parseCookie(headers):
"""Bleargh, the cookie spec sucks.
This surely needs interoperability testing.
There are two specs that are supported:
Version 0) http://wp.netscape.com/newsref/std/cookie_spec.html
Version 1) http://www.faqs.org/rfcs/rfc2965.html
"""
cookies = []
# There can't really be multiple cookie headers according to RFC, because
# if multiple headers are allowed, they must be joinable with ",".
# Neither new RFC2965 cookies nor old netscape cookies are.
header = ';'.join(headers)
if header[0:8].lower() == "$version":
# RFC2965 cookie
h = tokenize([header], foldCase=False)
r_cookies = split(h, Token(','))
for r_cookie in r_cookies:
last_cookie = None
rr_cookies = split(r_cookie, Token(';'))
for cookie in rr_cookies:
nameval = tuple(split(cookie, Token('=')))
if len(nameval) == 2:
(name,), (value,) = nameval
else:
(name,), = nameval
value = None
name = name.lower()
if name == '$version':
continue
if name[0] == '$':
if last_cookie is not None:
if name == '$path':
last_cookie.path = value
elif name == '$domain':
last_cookie.domain = value
elif name == '$port':
if value is None:
last_cookie.ports = ()
else:
last_cookie.ports = tuple([int(s) for s in value.split(',')])
else:
last_cookie = Cookie(name, value, version=1)
cookies.append(last_cookie)
else:
# Oldstyle cookies don't do quoted strings or anything sensible.
# All characters are valid for names except ';' and '=', and all
# characters are valid for values except ';'. Spaces are stripped,
# however.
r_cookies = header.split(';')
for r_cookie in r_cookies:
name, value = r_cookie.split('=', 1)
name = name.strip(' \t')
value = value.strip(' \t')
cookies.append(Cookie(name, value))
return cookies | 5,334,591 |
def get_system_metrics():
"""
For keys in fields
>>> from serverstats import get_system_metrics
>>> fields = dict()
>>> dl = get_system_metrics()
>>> _fields = {
... 'cpu': ['usage_percent', 'idle_percent', 'iowait',
... 'avg_load_15_min', 'avg_load_5_min', 'avg_load_1_min'],
... 'cpu_times': ['user', 'nice', 'system', 'idle', 'iowait',
... 'irq', 'softirq', 'steal', 'guest', 'guest_nice'],
... 'cpu_stats': ['ctx_switches', 'interrupts', 'soft_interrupts', 'syscalls'],
... 'cpu_times_percent': ['user', 'nice', 'system', 'idle',
... 'iowait', 'irq', 'softirq', 'steal', 'guest', 'guest_nice'],
... 'ram': ['total', 'available', 'percent', 'used', 'free',
... 'active', 'inactive', 'buffers', 'cached', 'shared', 'slab'],
... 'swap': ['total', 'used', 'free', 'percent', 'sin', 'sout'],
... 'disk': ['total', 'free', 'used', 'percent'],
... 'disk_partitions': ['sda1', 'sda15'],
... 'disk_io_counters': ['sda1', 'sda15'],
... 'network_traffic': ['lo', 'eth0']}
>>> for key, value in dl.items():
... lst = list()
... if type(value) is dict:
... for t , c in value.items():
... lst.append(t)
... fields[key] = lst
...
>>> _fields == fields
True
"""
load1, load5, load15 = psutil.os.getloadavg()
cpu_percent = psutil.cpu_percent()
cpu_times = psutil.cpu_times()._asdict()
cpu_stats = psutil.cpu_stats()._asdict()
percpu_percent = psutil.cpu_percent(interval=None, percpu=True)
cpu_times_percent = psutil.cpu_times_percent(interval=None, percpu=False)._asdict()
cpu_count = psutil.cpu_count(logical=True)
cpu_freq = [freq._asdict() for freq in psutil.cpu_freq(percpu=True)]
network_traffic_info = psutil.net_io_counters(pernic=True)
memory = psutil.virtual_memory()._asdict()
swap_mem = psutil.swap_memory()._asdict()
disk_partitions = {}
fs_types = set()
for part in psutil.disk_partitions(all=False):
usage = {}
if os.name == 'nt':
if 'cdrom' in part.opts or part.fstype == '':
continue
usage = part._asdict()
usage.pop("opts")
device = usage["device"].split("/")[-1]
fs_types.add(device)
_usage = psutil.disk_usage(part.mountpoint)
disk_partitions.update({device: {**usage, **_usage._asdict()}})
disk = {}
disk["total"] = 0
disk["used"] = 0
disk["percent"] = 0
for key, val in disk_partitions.items():
disk["total"] += val.get("total")
disk["used"] += val.get("used")
disk["percent"] += val.get("percent")
disk["free"] = disk["total"]-disk["used"]
disk["percent"] = disk["percent"]/len(disk_partitions)
disk_io_counters = {}
for key, val in psutil.disk_io_counters(perdisk=True, nowrap=False).items():
if key in fs_types:
disk_io_counters[key] = val._asdict()
network_traffic = dict()
for interface in network_traffic_info:
if any(st in interface for st in ["veth", "docker", "br"]):
continue
network_traffic[interface] = {
"bytes_sent": float(network_traffic_info[interface].bytes_sent),
"bytes_received": float(network_traffic_info[interface].bytes_recv),
"packets_sent": float(network_traffic_info[interface].packets_sent),
"packets_recv": float(network_traffic_info[interface].packets_recv)
}
net_connections = psutil.net_connections(kind='inet')
num_pids = len(psutil.pids())
num_users = len(psutil.users())
return dict(
# load_avg info
cpu=dict(
usage_percent=float(cpu_percent),
idle_percent=float(100.00 - cpu_percent),
iowait=float(cpu_times.get("iowait")),
avg_load_15_min=float(load15),
avg_load_5_min=float(load5),
avg_load_1_min=float(load1),
),
# cpu times
cpu_times=cpu_times,
# cpu stats
cpu_stats=cpu_stats,
# percpu pervents
percpu_percent=percpu_percent,
# cpu times percent
cpu_times_percent=cpu_times_percent,
# number of cpu
cpu_count=cpu_count,
# cpu frequency
cpu_freq=cpu_freq,
# ram info
ram=memory,
# swap memory info
swap=swap_mem,
# disk info
disk=disk,
# disk partitions info
disk_partitions = disk_partitions,
# disk io counter
disk_io_counters = disk_io_counters,
# network traffic
network_traffic=network_traffic,
# number of net connections
num_net_connections=len(net_connections),
# number of pids
num_pids=num_pids,
# number of users
num_users=num_users
) | 5,334,592 |
def FilterKeptAttachments(
is_description, kept_attachments, comments, approval_id):
"""Filter kept attachments to be a subset of last description's attachments.
Args:
is_description: bool, if the comment is a change to the issue description.
kept_attachments: list of ints with the attachment ids for attachments
kept from previous descriptions, if the comment is a change to the
issue description.
comments: list of IssueComment PBs for the issue we want to edit.
approval_id: int id of the APPROVAL_TYPE fielddef, if we're editing an
approval description, or None otherwise.
Returns:
A list of kept_attachment ids that are a subset of the last description.
"""
if not is_description:
return None
attachment_ids = set()
for comment in reversed(comments):
if comment.is_description and comment.approval_id == approval_id:
attachment_ids = set([a.attachment_id for a in comment.attachments])
break
kept_attachments = [
aid for aid in kept_attachments if aid in attachment_ids]
return kept_attachments | 5,334,593 |
def test_ptyshell_file_stage () :
""" Test pty_shell file staging """
conf = rut.get_test_config ()
shell = sups.PTYShell (saga.Url(conf.job_service_url), conf.session)
txt = "______1______2_____3_____"
shell.write_to_remote (txt, "/tmp/saga-test-staging")
out = shell.read_from_remote ("/tmp/saga-test-staging")
assert (txt == out) , "%s == %s" % (repr(out), repr(txt))
ret, out, _ = shell.run_sync ("rm /tmp/saga-test-staging")
assert (ret == 0) , "%s" % (repr(ret))
assert (out == "") , "%s == ''" % (repr(out)) | 5,334,594 |
def scan_fixtures(path):
"""Scan for fixture files on the given path.
:param path: The path to scan.
:type path: str
:rtype: list
:returns: A list of three-element tuples; the app name, file name, and relative path.
"""
results = list()
for root, dirs, files in os.walk(path):
relative_path = root.replace(path + "/", "")
if relative_path.startswith("static") or relative_path.startswith("theme"):
continue
for f in files:
if not f.endswith(".json"):
continue
app_name = os.path.basename(os.path.dirname(relative_path))
results.append((app_name, f, relative_path))
return results | 5,334,595 |
def invocations():
"""Do an inference on a single batch of data. In this sample server, we take data as CSV, convert
it to a pandas data frame for internal use and then convert the predictions back to CSV (which really
just means one prediction per line, since there's a single column.
"""
data = None
print("================ INVOCATIONS =================")
#parse json in request
print ("<<<< flask.request.content_type", flask.request.content_type)
data = flask.request.data.decode('utf-8')
data = json.loads(data)
print(data)
bucket = data['bucket']
s3_url = data['s3_url']
download_file_name = s3_url.split('/')[-1]
print ("<<<<download_file_name ", download_file_name)
# s3_client.download_file(bucket, s3_url, download_file_name)
#local test
download_file_name= data['s3_url']
print('Download finished!')
# inference and send result to RDS and SQS
print('Start to inference:')
#LOAD MODEL
weight = './yolov4.weights'
names = './coco.names'
cfg = './yolov4.cfg'
#make sure the model parameters exist
for i in [weight,names,cfg]:
if os.path.exists(i):
print ("<<<<pretrained model exists for :", i)
else:
print ("<<< make sure the model parameters exist for: ", i)
break
# 图片推理 make inference
if data['type'] == 'pic':
print('infer pic')
classes, confidences, boxes = yolo_infer(bucket, weight, names, cfg, download_file_name)
print ("Done inference picture! ")
inference_result = {
'classes':classes.tolist(),
'confidences':confidences.tolist(),
'boxes':boxes.tolist()
}
_payload = json.dumps(inference_result,ensure_ascii=False)
else:
print('infer video')
# detect_objects(bucket, weight, names, cfg, download_file_name)
output_s3_path = 'xxxxx'
_thread.start_new_thread(detect_objects, (bucket, weight, names, cfg, download_file_name))
print ("Done inference video! ")
inference_result = {
'vidoe':'infer is done!!',
'output_s3_path':output_s3_path
}
_payload = json.dumps(inference_result,ensure_ascii=False)
return flask.Response(response=_payload, status=200, mimetype='application/json') | 5,334,596 |
def main(argv=None):
"""The main event"""
try:
if 'piaplib.book.__main__' not in sys.modules:
import piaplib.book.__main__
else:
piaplib.book.__main__ = sys.modules["""piaplib.book.__main__"""]
if piaplib.book.__main__.__name__ is None:
raise ImportError("Failed to import piaplib.book.__main__")
except Exception as importErr:
del importErr
import piaplib.book.__main__
return piaplib.book.__main__.main(argv) | 5,334,597 |
def today() -> date:
"""
**today**
returns today's date
:return present date
"""
return datetime.datetime.now().date() | 5,334,598 |
def get_word_size(word,font_size):
"""get's the dimansions of any given word for any giving font size"""
Font=ImageFont.truetype(FONT, font_size)
return Font.getsize(word) | 5,334,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.