content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def setxlabels(dreameqsys, ax=None):
"""
Set x labels of current plot based on the given DREAMEqsys object.
"""
if ax is None:
ax = plt.gca()
ax.set_xticks(dreameqsys.getoffsets())
ax.set_xticklabels(dreameqsys.getnames())
plt.gcf().canvas.draw()
| 15,800
|
def is_docker_reachable(docker_client):
"""
Checks if Docker daemon is running.
:param docker_client : docker.from_env() - docker client object
:returns True, if Docker is available, False otherwise.
"""
errors = (
docker.errors.APIError,
requests.exceptions.ConnectionError,
)
if platform.system() == "Windows":
import pywintypes # pylint: disable=import-error
errors += (pywintypes.error,) # pylint: disable=no-member
try:
docker_client.ping()
return True
# When Docker is not installed, a request.exceptions.ConnectionError is thrown.
# and also windows-specific errors
except errors:
LOG.debug("Docker is not reachable", exc_info=True)
return False
| 15,801
|
def get_string(entry):
"""
This function ...
:param entry:
:return:
"""
value = entry.split(" / ")[0].rstrip()
return value
| 15,802
|
def data_getfilenode(ctx, all, filespec):
"""Retrieve file(s) from a compute node"""
ctx.initialize_for_batch()
convoy.fleet.action_data_getfilenode(
ctx.batch_client, ctx.config, all, filespec)
| 15,803
|
def tabtagged(files = 'chunked', basedir= None):
"""
@param files: One or more treebank files to be processed
@type files: L{string} or L{tuple(string)}
@return: iterator over lines in Malt-TAB input format
"""
if type(files) is str: files = (files,)
if not basedir: basedir = os.environ['NLTK_DATA']
for file in files:
path = os.path.join(get_basedir(), "treebank", file)
f = open(path).read()
for sent in tokenize.blankline(f):
l = []
for t in tokenize.whitespace(sent):
if (t != '[' and t != ']'):
l.append(tag2tab(t))
#add a blank line as sentence separator
l.append('\n')
yield l
| 15,804
|
def read_yaml_env(fname: str) -> Any:
"""Parse YAML file with environment variable substitution.
Parameters
----------
fname : str
yaml file name.
Returns
-------
table : Any
the object returned by YAML.
"""
content = read_file(fname)
# substitute environment variables
content = string.Template(content).substitute(os.environ)
return yaml.load(content)
| 15,805
|
def get_top_words(words):
"""
Получить список наиболее часто встречающихся слов, с указанием частоты
:param words: список слов для анализа
:return: [(слово1, количество повторений слова1), ..]
"""
return collections.Counter(words).most_common()
| 15,806
|
def get_slope(x, y, L):
"""
Funcao que retorna o slope da serie temporal dos dados
"""
try:
x=np.array(x).reshape(-1, 1)
y=np.array(y).reshape(-1, 1)
lr=LinearRegression()
lr.fit (x[:L],y[:L])
return lr.coef_[0][0]
except:
return 0
| 15,807
|
def _minimize_price(price: Dict[str, Any]) -> Price:
"""
Return only the keys and values of a price the end user would be interested in.
"""
keys = ['id', 'recurring', 'type', 'currency', 'unit_amount', 'unit_amount_decimal', 'nickname',
'product', 'metadata']
return {k: price[k] for k in keys}
| 15,808
|
def test_workflow(workflow_files, monkeypatch):
"""Given a list of input files (ini_file, jsonfile, bodyfile,
renderedfile) that are a record for the interaction with a for a particular
cluster, check that replay of that interaction yields the same
communication with the cluster (without actually connecting).
Specifically, the role of the input files is as follows:
* `ini_file`: Configures the job attributes (including the
settings for the cluster)
* `jsonfile`: Contains a record of the expected communication
with the cluster, and previously recorded responses
* `bodyfile`: The body of the job script
* `renderedfile`: The expected rendering of `bodyfile` for the given
backend and attributes.
"""
logger = logging.getLogger(__name__)
(ini_file, jsonfile, bodyfile, renderedfile) = workflow_files
# Monkeypatch the JobScript class to suppress actual communication in
# replay mode
monkeypatch.setenv('HOME', '/home/clusterjob_test')
def dummy_write_script(self, scriptbody, filename, remote):
filepath = os.path.split(filename)[0]
if len(filepath) > 0:
self._run_cmd(['mkdir', '-p', filepath], remote,
ignore_exit_code=False, ssh=self.ssh)
monkeypatch.setattr(JobScript, '_write_script', dummy_write_script)
# disable file transfer
monkeypatch.setattr(JobScript, '_upload_file',
staticmethod(lambda *args, **kwargs: None))
# wrap _run_cmd to check communication again jsonfile
monkeypatch.setattr(JobScript, '_run_cmd',
staticmethod(_wrap_run_cmd(jsonfile, 'replay')))
monkeypatch.setattr(AsyncResult, '_run_cmd',
staticmethod(JobScript._run_cmd))
monkeypatch.setattr(AsyncResult, '_min_sleep_interval', 0)
# configure job script
with open(bodyfile) as in_fh:
body = in_fh.read()
jobname = 'test_clj'
for key in JOB_NAMES:
if ini_file.endswith(key):
jobname = JOB_NAMES[key]
break
job = JobScript(body, jobname=jobname)
job.read_settings(ini_file)
stdout = 'clusterjob_test.out'
job.resources['stdout'] = stdout
if len(job.prologue) > 0:
logger.warn("prologue will be disabled")
job.prologue = ''
if len(job.epilogue) > 0:
logger.warn("epilogue will be disabled")
job.epilogue = ''
# check that job is rendered as expected
with open(renderedfile) as in_fh:
assert str(job) == in_fh.read(), "Unexpected renderedfile"
# run through the complete workflow
_run_testing_workflow(job, prompt=False)
| 15,809
|
def project(signals, q_matrix):
"""
Project the given signals on the given space.
Parameters
----------
signals : array_like
Matrix with the signals in its rows
q_matrix : array_like
Matrix with an orthonormal basis of the space in its rows
Returns
-------
proj_signals : ndarray
Matrix with the projected signals in its rows
"""
signals = np.asarray(signals)
q_matrix = np.asarray(q_matrix)
return q_matrix.T.dot(q_matrix.dot(signals.T)).T
| 15,810
|
def all_movies():
"""
Returns all movie in the database for Movies
service
"""
movies = ut.get_movies()
if len(movies) == 0:
abort(404)
return make_response(jsonify({"movies":movies}),200)
| 15,811
|
def test():
"""Test with sample inputs.
"""
u = Universe()
world = [
[Cell(AliveState()), Cell(AliveState())],
[Cell(AliveState()), Cell(AliveState())],
]
u.seed(world)
print 'INPUT (Block Pattern)'
print u
u.nextGeneration()
print 'OUTPUT'
print u, '\n'
world = [
[Cell(AliveState()), Cell(AliveState()), Cell(DeadState())],
[Cell(AliveState()), Cell(DeadState()), Cell(AliveState())],
[Cell(DeadState()), Cell(AliveState()), Cell(DeadState())],
]
u.seed(world)
print 'INPUT (Boat Pattern)'
print u
u.nextGeneration()
print 'OUTPUT'
print u, '\n'
world = [
[Cell(DeadState()), Cell(AliveState()), Cell(DeadState())],
[Cell(DeadState()), Cell(AliveState()), Cell(DeadState())],
[Cell(DeadState()), Cell(AliveState()), Cell(DeadState())],
]
u.seed(world)
print 'INPUT (Blinker Pattern)'
print u
u.nextGeneration()
print 'OUTPUT'
print u, '\n'
world = [
[Cell(DeadState()), Cell(DeadState()), Cell(DeadState()), Cell(DeadState())],
[Cell(DeadState()), Cell(AliveState()), Cell(AliveState()), Cell(AliveState())],
[Cell(AliveState()), Cell(AliveState()), Cell(AliveState()), Cell(DeadState())],
[Cell(DeadState()), Cell(DeadState()), Cell(DeadState()), Cell(DeadState())]
]
u.seed(world)
print 'INPUT (Toad Pattern)'
print u
u.nextGeneration()
print 'OUTPUT'
print u, '\n'
print '4 generations of Toad Pattern'
for i in range(4):
u.nextGeneration()
print u, '\n'
print '4 generations of a Random Cell Pattern'
u = Universe(auto=True, expand=True)
for i in range(4):
u.nextGeneration()
print u, '\n'
| 15,812
|
def remove_condition(self, node=None):
"""Check that user is able to see the table after row policy condition has been removed."""
table_name = f"table_{getuid()}"
pol_name = f"pol_{getuid()}"
if node is None:
node = self.context.node
with table(node, table_name):
with Given("I have a row policy"):
row_policy(name=pol_name, table=table_name)
with And("The row policy has a condition"):
node.query(
f"ALTER ROW POLICY {pol_name} ON {table_name} FOR SELECT USING 1"
)
with And("The table has some values"):
node.query(f"INSERT INTO {table_name} (y) VALUES (1)")
with When("I alter a row policy to not have a condition"):
node.query(
f"ALTER ROW POLICY {pol_name} ON {table_name} FOR SELECT USING NONE"
)
with Then("I try to select from the table"):
output = node.query(f"SELECT * FROM {table_name}").output
assert "1" in output, error()
| 15,813
|
def format_map(mapping, st):
"""
Format string st with given map.
"""
return st.format_map(mapping)
| 15,814
|
def communities_greedy_modularity(G,f):
"""
Adds a column to the dataframe f with the community of each node.
The communitys are detected using greedy modularity.
G: a networkx graph.
f: a pandas dataframe.
It works with networkx vesion: '2.4rc1.dev_20190610203526'
"""
if not(set(f.name) == set(G.nodes()) and len(f.name) == len(G.nodes())):
raise ValueError('The number of nodes and the length of the datadrame should be the same.')
communities_dic = nx.algorithms.community.greedy_modularity_communities(G)
communities_df = pd.DataFrame(data = {'name': [i for j in range(len(communities_dic)) for i in list(communities_dic[j])], 'communities_greedy_modularity': [j for j in range(len(communities_dic)) for i in list(communities_dic[j])] })
f = pd.merge(f, communities_df, on='name')
return f
| 15,815
|
def load_loglin_stats(infile_path):
"""read in data in json format"""
# convert all 'stats' to pandas data frames
with open(infile_path) as infile:
data = json.load(infile)
new_data = {}
for position_set in data:
try:
new_key = eval(position_set)
except NameError:
new_key = position_set
new_data[new_key] = {}
for key, value in list(data[position_set].items()):
if key == "stats":
value = read_json(value)
new_data[new_key][key] = value
return new_data
| 15,816
|
def _simple_logistic_regression(x,y,beta_start=None,verbose=False,
CONV_THRESH=1.e-3,MAXIT=500):
"""
Faster than logistic_regression when there is only one predictor.
"""
if len(x) != len(y):
raise ValueError, "x and y should be the same length!"
if beta_start is None:
beta_start = NA.zeros(2,x.dtype.char)
iter = 0; diff = 1.; beta = beta_start # initial values
if verbose:
print 'iteration beta log-likliehood |beta-beta_old|'
while iter < MAXIT:
beta_old = beta
p = NA.exp(beta[0]+beta[1]*x)/(1.+NA.exp(beta[0]+beta[1]*x))
l = NA.sum(y*NA.log(p) + (1.-y)*NA.log(1.-p)) # log-likliehood
s = NA.array([NA.sum(y-p), NA.sum((y-p)*x)]) # scoring function
# information matrix
J_bar = NA.array([[NA.sum(p*(1-p)),NA.sum(p*(1-p)*x)],
[NA.sum(p*(1-p)*x),NA.sum(p*(1-p)*x*x)]])
beta = beta_old + NA.dot(LA.inverse(J_bar),s) # new value of beta
diff = NA.sum(NA.fabs(beta-beta_old)) # sum of absolute differences
if verbose:
print iter+1, beta, l, diff
if diff <= CONV_THRESH: break
iter = iter + 1
return beta, J_bar, l
| 15,817
|
def preemphasis(signal,coeff=0.95):
"""perform preemphasis on the input signal.
:param signal: The signal to filter.
:param coeff: The preemphasis coefficient. 0 is no filter, default is 0.95.
:returns: the filtered signal.
"""
return np.append(signal[0],signal[1:]-coeff*signal[:-1])
| 15,818
|
def build_moses_tokenizer(tokenizer: MosesTokenizerSpans,
normalizer: MosesPunctNormalizer = None) -> Callable[[str], List[Token]]:
"""
Wrap Spacy model to build a tokenizer for the Sentence class.
:param model a Moses tokenizer instance
:return a tokenizer function to provide to Sentence class constructor
"""
try:
from sacremoses import MosesTokenizer
from sacremoses import MosesPunctNormalizer
except ImportError:
raise ImportError(
"Please install sacremoses or better before using the Spacy tokenizer, otherwise you can use segtok_tokenizer as advanced tokenizer."
)
moses_tokenizer: MosesTokenizerSpans = tokenizer
if normalizer:
normalizer: MosesPunctNormalizer = normalizer
def tokenizer(text: str) -> List[Token]:
if normalizer:
text = normalizer.normalize(text=text)
doc = moses_tokenizer.span_tokenize(text=text, escape=False)
previous_token = None
tokens: List[Token] = []
for word, (start_pos, end_pos) in doc:
word: str = word
token = Token(
text=word, start_position=start_pos, whitespace_after=True
)
tokens.append(token)
if (previous_token is not None) and (
token.start_pos - 1
== previous_token.start_pos + len(previous_token.text)
):
previous_token.whitespace_after = False
previous_token = token
return tokens
return tokenizer
| 15,819
|
def setup_lithops_logger(log_level=constants.LOGGER_LEVEL,
log_format=constants.LOGGER_FORMAT,
stream=None, filename=None):
"""Setup logging for lithops."""
if log_level is None or str(log_level).lower() == 'none':
return
if stream is None:
stream = constants.LOGGER_STREAM
if filename is None:
filename = os.devnull
if type(log_level) is str:
log_level = logging.getLevelName(log_level.upper())
config_dict = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': log_format
},
},
'handlers': {
'console_handler': {
'level': log_level,
'formatter': 'standard',
'class': 'logging.StreamHandler',
'stream': stream
},
'file_handler': {
'level': log_level,
'formatter': 'standard',
'class': 'logging.FileHandler',
'filename': filename,
'mode': 'a',
},
},
'loggers': {
'lithops': {
'handlers': ['console_handler'],
'level': log_level,
'propagate': False
},
}
}
if filename is not os.devnull:
config_dict['loggers']['lithops']['handlers'] = ['file_handler']
logging.config.dictConfig(config_dict)
| 15,820
|
def callback_query_stats(update: Update, context: CallbackContext):
"""
generate json file and send it back to poll's owner.
"""
query: CallbackQuery = update.callback_query
poll_id = int(context.match.groups()[0])
poll = Poll.load(poll_id)
if poll.owner.id != query.from_user.id:
logger.debug("user id %d attempted to access stats on poll id %d owner %d",
query.from_user.id, poll.id, poll.owner.id)
return
message: Message = query.message
# select
data = {
'answers': [{
'id': answer.id,
'text': answer.text,
'voters': {
'total': len(answer.voters()),
'_': [{
k: v
for k, v in {
'id': voter.id,
'first_name': voter.first_name,
'last_name': voter.last_name,
'username': voter.username,
}.items()
if v
} for voter in answer.voters()]
}
} for answer in poll.answers()]
}
content = json.dumps(data, indent=4, ensure_ascii=False)
raw = BytesIO(content.encode('utf-8'))
name = "statistics for poll #{}.json".format(poll.id)
context.bot.send_document(poll.owner.id, raw, filename=name)
query.answer()
| 15,821
|
def print_n(s: str, n: int):
"""Exibe na tela 'n' vezes a string 's'.
Args:
s (str): String fornecida pelo usuário.
n (int): Número de vezes que string será exibida.
"""
if n <= 0:
return # nota: se não dermos um valor para a instrução return, a função retorna None
else:
print(s)
print_n(s, n - 1)
| 15,822
|
def horizontal_block_reduce(
obj: T_DataArray_or_Dataset,
coarsening_factor: int,
reduction_function: Callable,
x_dim: Hashable = "xaxis_1",
y_dim: Hashable = "yaxis_1",
coord_func: Union[str, CoordFunc] = coarsen_coords_coord_func,
) -> T_DataArray_or_Dataset:
"""A generic horizontal block reduce function for xarray data structures.
This is a convenience wrapper around block_reduce for applying coarsening
over n x n patches of array elements. It should only be used if a dask
implementation of the reduction method has not been implemented (e.g. for
median) or if a custom reduction method is used that is not implemented in
xarray. Otherwise, block_coarsen should be used.
Args:
obj: Input Dataset or DataArray.
coarsening_factor: Integer coarsening factor to use.
reduction_function: Array reduction function which accepts a tuple of
axes to reduce along.
x_dim: x dimension name (default 'xaxis_1').
y_dim: y dimension name (default 'yaxis_1').
coord_func: function that is applied to the coordinates, or a
mapping from coordinate name to function. See `xarray's coarsen
method for details
<http://xarray.pydata.org/en/stable/generated/xarray.DataArray.coarsen.html>`_.
Returns:
xr.Dataset or xr.DataArray.
"""
block_sizes = {x_dim: coarsening_factor, y_dim: coarsening_factor}
return xarray_block_reduce(
obj, block_sizes, reduction_function, coord_func=coord_func,
)
| 15,823
|
def statements_api(context, request):
"""List all the statements for a period."""
dbsession = request.dbsession
owner = request.owner
owner_id = owner.id
period = context.period
inc_case = case([(AccountEntry.delta > 0, AccountEntry.delta)], else_=None)
dec_case = case([(AccountEntry.delta < 0, AccountEntry.delta)], else_=None)
statement_rows = (
dbsession.query(
Statement.id,
Statement.source,
Statement.filename,
func.count(inc_case).label('inc_count'),
func.count(dec_case).label('dec_count'),
func.sum(inc_case).label('inc_total'),
func.sum(dec_case).label('dec_total'),
)
.outerjoin(AccountEntry, AccountEntry.statement_id == Statement.id)
.filter(
Statement.owner_id == owner_id,
Statement.file_id == period.file_id,
Statement.period_id == period.id,
)
.group_by(Statement.id)
.order_by(Statement.id)
.all()
)
statements = [{
'id': str(row.id),
'source': row.source,
'filename': row.filename,
'inc_count': row.inc_count,
'dec_count': row.dec_count,
'inc_total': row.inc_total,
'dec_total': row.dec_total,
} for row in statement_rows]
now = dbsession.query(now_func).scalar()
return {
'now': now,
'statements': statements,
}
| 15,824
|
def fp(x):
"""Function used in **v(a, b, th, nu, dimh, k)** for **analytic_solution_slope()**
:param x: real number
:type x: list
:return: fp value
:rtype: list
"""
rx = np.sqrt(x * 2 / np.pi)
s_fresnel, c_fresnel = sp.fresnel(rx)
return - 2 * 1j * np.sqrt(x) * np.exp(-1j * x) * np.sqrt(np.pi / 2.) \
* (.5 - c_fresnel + 1j * (.5 - s_fresnel))
| 15,825
|
def get_truck_locations(given_address):
"""
Get the location of the food trucks in Boston TODAY within 1 mile
of a given_address
:param given_address: a pair of coordinates
:return: a list of features with unique food truck locations
"""
formatted_address = '{x_coordinate}, {y_coordinate}'.format(
x_coordinate=given_address['x'],
y_coordinate=given_address['y']
)
QUERY["geometry"] = formatted_address
trucks = gis_utils.get_features_from_feature_server(BASE_URL, QUERY)
truck_unique_locations = []
for t in trucks:
if t['attributes']['Day'] == DAY:
truck_unique_locations.append(t)
return truck_unique_locations
| 15,826
|
def _getRelevantKwds(method, kwds):
"""return kwd args for the given method, and remove them from the given kwds"""
import inspect
argspec = inspect.getargspec(method)
d = dict()
for a in kwds:
if a not in argspec.args:
warnings.warn("Unrecognized kwd: {!r}".format(a))
for a in argspec.args:
if a in kwds:
d[a] = kwds[a]
del kwds[a]
return d
| 15,827
|
def heatmap_contingency_triggers_01(df_devs=None, df_acts=None, df_con_tab_01=None, figsize=None,
idle=True, z_scale=None, numbers=None, file_path=None
):
"""
Plot the device on and off triggers against the activities
Parameters
----------
df_devs : pd.DataFrame, optional
recorded devices from a dataset. For more information refer to
:ref:`user guide<device_dataframe>`. If the parameter *df_devs* is not set,
the parameter *df_con_tab* has to be set.
df_acts : pd.DataFrame, optional
recorded activities from a dataset. Fore more information refer to the
:ref:`user guide<activity_dataframe>`. If the parameter *df_acts* is not set,
the parameter *df_con_tab* has to be set.
df_con_tab_01 : pd.DataFrame, optional
A precomputed contingency table. If the *df_con_tab* parameter is given, parameters
*df_acts* and *df_devs* are ignored. The contingency table can be computed
in :ref:`stats <stats_dna_con_dur>`.
figsize : (float, float), optional
width, height in inches. If not provided, the figsize is inferred by automatically.
z_scale : {"log", "linear"}, default: 'log'
The axis scale type to apply.
numbers : bool, default: True
Whether to display numbers inside the heatmaps fields or not.
idle : bool, default: False
Determines whether gaps between activities should be assigned
the activity *idle* or be ignored.
file_path : str, optional
If set, saves the plot under the given file path and return *None* instead
of returning the figure.
Examples
--------
>>> from pyadlml.plot import plot_hm_contingency_trigger_01
>>> plot_hm_contingency_trigger_01(data.df_devs, data.df_activities)
.. image:: ../_static/images/plots/cont_hm_trigger_01.png
:height: 300px
:width: 500 px
:scale: 100 %
:alt: alternate text
:align: center
Returns
-------
fig : Figure or None
If the parameter file_path is specified, the method return None rather than a matplotlib figure.
"""
assert (df_devs is not None and df_acts is not None) or df_con_tab_01 is not None
title = 'On/Off triggers'
cbarlabel = 'counts'
textcolors = ("white", "black")
log = (z_scale == 'log')
# if log than let automatically infer else
valfmt = (None if log else "{x:.0f}")
if df_con_tab_01 is None:
df_con = contingency_triggers_01(df_devs.copy(), df_acts, idle=idle)
else:
df_con = df_con_tab_01.copy()
# rename labels
df_con = df_con.reset_index(drop=False)
df_con['index'] = df_con['index'].apply(lambda x: x if "Off" in x else "On")
df_con = df_con.set_index('index')
vals = df_con.values.T
acts = df_con.columns
devs = list(df_con.index)
heatmap_contingency(acts, devs, vals, title, cbarlabel, textcolors=textcolors,
valfmt=valfmt, z_scale=z_scale, numbers=numbers, figsize=figsize)
| 15,828
|
def find_overview_details(park_code):
""" Find overview details from park code """
global API_KEY
fields = "&fields=images,entranceFees,entrancePasses,operatingHours,exceptions"
url = "https://developer.nps.gov/api/v1/parks?parkCode=" + park_code + "&api_key=" + API_KEY + fields
response = requests.get(url)
json_object = response.json()
overview = json_object['data']
return {'overview': overview}
| 15,829
|
def test_command_line_with_vowel_preserve_case():
""" foo -> fii """
out = getoutput(f'{prg} "APPLES AND BANANAS" --vowel i')
assert out.strip() == 'IPPLIS IND BININIS'
| 15,830
|
def p_expression_ID(p):
"""expression : FIELD operation value
"""
lookup = compa2lookup[p[2]]
try:
field = get_shortcut(p[1])
except KeyError:
field = p[1]
if lookup:
field = '%s__%s' % (field, lookup)
# In some situations (which ones?), python
# refuses unicode strings as dict keys for
# Q(**d)
field = str(field)
d = {field: p[3]}
p[0] = Q(**d)
| 15,831
|
def embed_nomenclature(
D,
embedding_dimension,
loss="rank",
n_steps=1000,
lr=10,
momentum=0.9,
weight_decay=1e-4,
ignore_index=None,
):
"""
Embed a finite metric into a target embedding space
Args:
D (tensor): 2D-cost matrix of the finite metric
embedding_dimension (int): dimension of the target embedding space
loss (str): embedding loss to use distortion base (loss='disto') or rank based (loss='rank')
n_steps (int): number of gradient iterations
lr (float): learning rate
momentum (float): momentum
weight_decay (float): weight decay
Returns:
embedding (tensor): embedding of each vertex of the finite metric space, shape n_vertex x embedding_dimension
"""
n_vertex = D.shape[0]
mapping = torch.rand(
(n_vertex, embedding_dimension), requires_grad=True, device=D.device
)
if loss == "rank":
crit = RankLoss(D, n_triplets=1000)
elif loss == "disto":
crit = DistortionLoss(D, scale_free=False)
else:
raise ValueError
optimizer = torch.optim.SGD(
[mapping], lr=lr, momentum=momentum, weight_decay=weight_decay
)
print("Embedding nomenclature . . .")
for i in range(n_steps):
loss = crit(mapping)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print(
"Step {}: loss {:.4f} ".format(i + 1, loss.cpu().detach().numpy(), end="\r")
)
print("Final loss {:.4f}".format(crit(mapping).cpu().detach().numpy()))
return mapping.detach()
| 15,832
|
def blitLevelData(playerList, level, goldCount, time, animate=False):
"""Draw the level data to the screen.
This includes the time remaining, gold remaining, players' lives, black hole sprites, and the level image.
Black hole sprites are included, as they are the only sprites drawn before the level begins.
If animate is True, this also includes all gold sprites.
Args:
playerList: A list of all PlayerSprite objects in the game.
level: A Level object representing the current level being played.
goldCount: An integer representing how many gold sprites are currently unrevealed (either invisible or
face-down).
time: An integer representing the time the players have remaining to complete the level.
animate: A boolean indicating if this function should update the gold and black hole sprites, causing
them to be animated.
This is only to be set to True when the level is completed or all players have run out of lives, as
the main playLevel function automatically updates all sprites on its own in all other cases.
"""
playerLivesData = []
for num, player in enumerate(playerList):
playerLivesData.append([c.FONT.render("<", False, playerFontColors[num]),
c.FONT.render("{}".format(min(player.lives, 9)), False, c.WHITE),
c.FONT.render(">", False, playerFontColors[num])])
timeText = c.FONT.render("TIME,{:03d}".format(time), False, c.WHITE)
# The location of where the players' lives are shown depends on the number of players.
# If there are one or two players, player one's lives are displayed on the left and player two's on the right
# If there are three or four players, player one's lives are displayed on the far left, player two's on the
# mid-left, player three's on the mid-right, and player four's on the far right.
if len(playerList) < 3:
livesDataCoordinates = [(42, 16), (428, 16)]
else:
livesDataCoordinates = [(5, 16), (62, 16), (408, 16), (467, 16)]
c.SCREEN.blit(level.image, (0, 0))
# Bonus levels blit the time count in a different location, and blit the word 'BONUS!' instead of the gold
# count (Also in a different location from the standard gold count location).
if isinstance(level, BonusLevel):
bonusWordText = c.FONT.render("BONUS!", False, c.WHITE)
c.SCREEN.blit(bonusWordText, (210, 210))
c.SCREEN.blit(timeText, (192, 242))
else:
goldText = c.FONT.render("LAST,{:02d}".format(goldCount), False, c.WHITE)
c.SCREEN.blit(goldText, (132, 16))
c.SCREEN.blit(timeText, (262, 16))
if animate:
GoldSprite.globalFrameCount += 1
for hole in c.blackHoleGroup:
hole.update()
for gold in c.goldGroup:
gold.update()
c.SCREEN.blit(gold.image, gold.coordinates)
for textSprite in c.textGroup:
textSprite.update()
c.SCREEN.blit(textSprite.image, textSprite.coordinates)
for hole in c.blackHoleGroup:
c.SCREEN.blit(hole.image, hole.coordinates)
for trap in c.rubberGroup:
c.SCREEN.blit(trap.image, trap.coordinates)
# Because the < > symbols should be slightly closer to the number of lives than the standard text width would
# allow, the life count is placed 13 pixels after the <, and the > is placed 15 frames after the life count.
for fontData, coords in zip(playerLivesData, livesDataCoordinates):
for num, text in enumerate(fontData):
c.SCREEN.blit(text, coords)
coords = (coords[0] + 13, coords[1]) if num == 0 else (coords[0] + 15, coords[1])
| 15,833
|
def raw_env():
"""
To support the AEC API, the raw_env() function just uses the from_parallel
function to convert from a ParallelEnv to an AEC env
"""
env = parallel_env()
env = parallel_to_aec(env)
return env
| 15,834
|
def cmd_line(preprocessor: Preprocessor, args: str) -> str:
"""the line command - prints the current line number"""
if args.strip() != "":
preprocessor.send_warning("extra-arguments", "the line command takes no arguments")
context = preprocessor.context.top
pos = context.true_position(preprocessor.current_position.begin)
return str(context.file.line_number(pos)[0])
| 15,835
|
def install_from_zip(pkgpath, install_path, register_func, delete_after_install=False):
"""Install plugin from zipfile."""
logger.debug("%s is a file, attempting to load zip", pkgpath)
pkgtempdir = tempfile.mkdtemp(prefix="honeycomb_")
try:
with zipfile.ZipFile(pkgpath) as pkgzip:
pkgzip.extractall(pkgtempdir)
except zipfile.BadZipfile as exc:
logger.debug(str(exc))
raise click.ClickException(str(exc))
if delete_after_install:
logger.debug("deleting %s", pkgpath)
os.remove(pkgpath)
logger.debug("installing from unzipped folder %s", pkgtempdir)
return install_dir(pkgtempdir, install_path, register_func, delete_after_install=True)
| 15,836
|
def parse(file, parser=None, bufsize=None):
"""Parse a file into a DOM by filename or file object."""
if parser is None and not bufsize:
from xml.dom import expatbuilder
return expatbuilder.parse(file)
else:
from xml.dom import pulldom
return _do_pulldom_parse(pulldom.parse, (file,),
{'parser': parser, 'bufsize': bufsize})
| 15,837
|
def create_hash256(max_length=None):
"""
Generate a hash that can be used as an application secret
Warning: this is not sufficiently secure for tasks like encription
Currently, this is just meant to create sufficiently random tokens
"""
hash_object = hashlib.sha256(force_bytes(get_random_string(32)))
hash_object.update(force_bytes(settings.SECRET_KEY))
output_hash = hash_object.hexdigest()
if max_length is not None and len(output_hash) > max_length:
return output_hash[:max_length]
return output_hash
| 15,838
|
def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
b2caps=None, **kwargs):
"""add parts containing listkeys namespaces to the requested bundle"""
listkeys = kwargs.get('listkeys', ())
for namespace in listkeys:
part = bundler.newpart('listkeys')
part.addparam('namespace', namespace)
keys = repo.listkeys(namespace).items()
part.data = pushkey.encodekeys(keys)
| 15,839
|
def test_get_exectype_from_xmlfile_with_unsupported_type():
"""Gets the exectype values for testcases from the testsuite.xml file """
filepath = os.path.join(os.path.split(__file__)[0], "exmp_suite_file.xml")
exectype = xml_Utils.getChildAttributebyParentTag = MagicMock(return_value='junk_value')
result = testsuite_utils.get_exectype_from_xmlfile(filepath)
assert result == 'sequential_testcases'
del xml_Utils.getChildAttributebyParentTag
| 15,840
|
def assert_content_in_file(file_name, expected_content):
"""
Fabric assertion: Check if some text is in the specified file (result of installing a test product)
Provision dir: PROVISION_ROOT_PATH
:param file_name: File name
:param expected_content: String to be found in file
:return: True if given content is in file (dir: PROVISION_ROOT_PATH).
"""
file_path = PROVISION_ROOT_PATH.format(file_name)
fd = StringIO()
get(file_path, fd)
file_content = fd.getvalue()
return expected_content in file_content
| 15,841
|
def generate_substrate_fasta(df):
""" gemerates fasta sequence files containing sequences of
all proteins that contain phosphosites that do not have kinase
annotations in PSP or Networkin. The outputs of the function
will be used as input to run Networkin locally and predict kinases
Parameters
----------
df : pandas dataframe
subset of phoproteomics data (metadata) that do
not have kinase annotations
Returns
-------
substrate_fasta : list of strings
each pair of elements in the list is a uniprot id (eg: '>P01345')
followed by the sequence
df2 : pandas dataframe
dataframe with uniprot id, amino acid and site of each phosphosite
"""
substrate_fasta = []
ids, aa, pos = [], [], []
obsolete_entries = []
for ind, substrate in enumerate(df.Uniprot_Id.tolist()):
r = requests.get('http://www.uniprot.org/uniprot/%s.fasta' %
substrate)
# substrate_fasta.append(r.text)
seq_lines = r.text.split('\n')
sequence = ''.join(seq_lines[1:])
id_line = seq_lines[0]
try:
# id = re.search('>(.*)HUMAN', id_line).group(1) + 'HUMAN'
id = re.search('>(?:sp|tr)\|(.*)\|', id_line).group(1)
ids.append(id)
# seq_lines[0] = id
substrate_fasta.append(">%s\n%s\n" % (id, sequence))
site = df.Site.iloc[ind]
aa.append(site[0])
pos.append(site[1:])
except AttributeError:
obsolete_entries.append(substrate)
df2 = pd.DataFrame(list(zip(ids, pos, aa)))
if obsolete_entries:
with open(os.path.join(resource_path,
'obsolete_entries.txt'), 'a') as f:
for s in list(set(obsolete_entries)):
f.write("%s\n" % s)
return substrate_fasta, df2
| 15,842
|
def _calculate_hwp_storage_fut(
hwp_shapes, base_dataset_uri, c_hwp_uri, bio_hwp_uri, vol_hwp_uri,
yr_cur, yr_fut, process_pool=None):
"""Calculates carbon storage, hwp biomassPerPixel and volumePerPixel due to
harvested wood products in parcels on current landscape.
hwp_shapes - a dictionary containing the current and/or future harvest
maps (or nothing)
hwp_shapes['cur'] - oal shapefile indicating harvest map from the
current landscape
hwp_shapes['fut'] - oal shapefile indicating harvest map from the
future landscape
c_hwp - an output GDAL rasterband representing carbon stored in
harvested wood products for current calculation
bio_hwp - an output GDAL rasterband representing carbon stored in
harvested wood products for land cover under interest
vol_hwp - an output GDAL rasterband representing carbon stored in
harvested wood products for land cover under interest
yr_cur - year of the current landcover map
yr_fut - year of the current landcover map
process_pool - a process pool for parallel processing (can be None)
No return value"""
############### Start
pixel_area = pygeoprocessing.geoprocessing.get_cell_size_from_uri(base_dataset_uri) ** 2 / 10000.0 #convert to Ha
nodata = -5.0
c_hwp_cur_uri = pygeoprocessing.geoprocessing.temporary_filename()
bio_hwp_cur_uri = pygeoprocessing.geoprocessing.temporary_filename()
vol_hwp_cur_uri = pygeoprocessing.geoprocessing.temporary_filename()
pygeoprocessing.geoprocessing.new_raster_from_base_uri(base_dataset_uri, c_hwp_uri, 'GTiff', nodata, gdal.GDT_Float32, fill_value=nodata)
pygeoprocessing.geoprocessing.new_raster_from_base_uri(base_dataset_uri, bio_hwp_uri, 'GTiff', nodata, gdal.GDT_Float32, fill_value=nodata)
pygeoprocessing.geoprocessing.new_raster_from_base_uri(base_dataset_uri, vol_hwp_uri, 'GTiff', nodata, gdal.GDT_Float32, fill_value=nodata)
#Create a temporary shapefile to hold values of per feature carbon pools
#HWP biomassPerPixel and volumePerPixel, will be used later to rasterize
#those values to output rasters
calculatedAttributeNames = ['c_hwp_pool', 'bio_hwp', 'vol_hwp']
if 'cur' in hwp_shapes:
hwp_shape = ogr.Open(hwp_shapes['cur'])
hwp_shape_copy = \
ogr.GetDriverByName('Memory').CopyDataSource(hwp_shape, '')
hwp_shape_layer_copy = \
hwp_shape_copy.GetLayer()
#Create fields in the layers to hold hardwood product pools,
#biomassPerPixel and volumePerPixel
for fieldName in calculatedAttributeNames:
field_def = ogr.FieldDefn(fieldName, ogr.OFTReal)
hwp_shape_layer_copy.CreateField(field_def)
#Visit each feature and calculate the carbon pool, biomassPerPixel,
#and volumePerPixel of that parcel
for feature in hwp_shape_layer_copy:
#This makes a helpful dictionary to access fields in the feature
#later in the code
field_args = _get_fields(feature)
#If start date and/or the amount of carbon per cut is zero, it
#doesn't make sense to do any calculation on carbon pools or
#biomassPerPixel/volumePerPixel
if field_args['start_date'] != 0 and field_args['cut_cur'] != 0:
time_span = (yr_fut + yr_cur) / 2.0 - field_args['start_date']
start_years = yr_fut - field_args['start_date']
#Calculate the carbon pool due to decaying HWP over the
#time_span
feature_carbon_storage_per_pixel = (
pixel_area * _carbon_pool_in_hwp_from_parcel(
field_args['cut_cur'], time_span, start_years,
field_args['freq_cur'], field_args['decay_cur']))
#Claculate biomassPerPixel and volumePerPixel of harvested wood
numberOfHarvests = \
math.ceil(time_span / float(field_args['freq_cur']))
#The measure of biomass is in terms of Mg/ha
biomassInFeaturePerArea = field_args['cut_cur'] * \
numberOfHarvests / float(field_args['c_den_cur'])
biomassPerPixel = biomassInFeaturePerArea * pixel_area
volumePerPixel = biomassPerPixel / field_args['bcef_cur']
#Copy biomassPerPixel and carbon pools to the temporary
#feature for rasterization of the entire layer later
for field, value in zip(calculatedAttributeNames,
[feature_carbon_storage_per_pixel,
biomassPerPixel, volumePerPixel]):
feature.SetField(feature.GetFieldIndex(field), value)
#This saves the changes made to feature back to the shape layer
hwp_shape_layer_copy.SetFeature(feature)
#burn all the attribute values to a raster
for attributeName, raster_uri in zip(calculatedAttributeNames,
[c_hwp_cur_uri, bio_hwp_cur_uri, vol_hwp_cur_uri]):
nodata = -1.0
pygeoprocessing.geoprocessing.new_raster_from_base_uri(base_dataset_uri, raster_uri, 'GTiff', nodata, gdal.GDT_Float32, fill_value=nodata)
raster = gdal.Open(raster_uri, gdal.GA_Update)
gdal.RasterizeLayer(raster, [1], hwp_shape_layer_copy, options=['ATTRIBUTE=' + attributeName])
raster.FlushCache()
raster = None
#handle the future term
if 'fut' in hwp_shapes:
hwp_shape = ogr.Open(hwp_shapes['fut'])
hwp_shape_copy = \
ogr.GetDriverByName('Memory').CopyDataSource(hwp_shape, '')
hwp_shape_layer_copy = \
hwp_shape_copy.GetLayer()
#Create fields in the layers to hold hardwood product pools,
#biomassPerPixel and volumePerPixel
for fieldName in calculatedAttributeNames:
field_def = ogr.FieldDefn(fieldName, ogr.OFTReal)
hwp_shape_layer_copy.CreateField(field_def)
#Visit each feature and calculate the carbon pool, biomassPerPixel,
#and volumePerPixel of that parcel
for feature in hwp_shape_layer_copy:
#This makes a helpful dictionary to access fields in the feature
#later in the code
field_args = _get_fields(feature)
#If start date and/or the amount of carbon per cut is zero, it
#doesn't make sense to do any calculation on carbon pools or
#biomassPerPixel/volumePerPixel
if field_args['cut_fut'] != 0:
time_span = yr_fut - (yr_fut + yr_cur) / 2.0
start_years = time_span
#Calculate the carbon pool due to decaying HWP over the
#time_span
feature_carbon_storage_per_pixel = pixel_area * \
_carbon_pool_in_hwp_from_parcel(
field_args['cut_fut'], time_span, start_years,
field_args['freq_fut'], field_args['decay_fut'])
#Claculate biomassPerPixel and volumePerPixel of harvested wood
numberOfHarvests = \
math.ceil(time_span / float(field_args['freq_fut']))
biomassInFeaturePerArea = field_args['cut_fut'] * \
numberOfHarvests / float(field_args['c_den_fut'])
biomassPerPixel = biomassInFeaturePerArea * pixel_area
volumePerPixel = biomassPerPixel / field_args['bcef_fut']
#Copy biomassPerPixel and carbon pools to the temporary
#feature for rasterization of the entire layer later
for field, value in zip(calculatedAttributeNames,
[feature_carbon_storage_per_pixel,
biomassPerPixel, volumePerPixel]):
feature.SetField(feature.GetFieldIndex(field), value)
#This saves the changes made to feature back to the shape layer
hwp_shape_layer_copy.SetFeature(feature)
#burn all the attribute values to a raster
for attributeName, (raster_uri, cur_raster_uri) in zip(
calculatedAttributeNames, [(c_hwp_uri, c_hwp_cur_uri), (bio_hwp_uri, bio_hwp_cur_uri), (vol_hwp_uri, vol_hwp_cur_uri)]):
temp_filename = pygeoprocessing.geoprocessing.temporary_filename()
pygeoprocessing.geoprocessing.new_raster_from_base_uri(
base_dataset_uri, temp_filename, 'GTiff',
nodata, gdal.GDT_Float32, fill_value=nodata)
temp_raster = gdal.Open(temp_filename, gdal.GA_Update)
gdal.RasterizeLayer(temp_raster, [1], hwp_shape_layer_copy,
options=['ATTRIBUTE=' + attributeName])
temp_raster.FlushCache()
temp_raster = None
#add temp_raster and raster cur raster into the output raster
nodata = -1.0
base_nodata = pygeoprocessing.geoprocessing.get_nodata_from_uri(
raster_uri)
cur_nodata = pygeoprocessing.geoprocessing.get_nodata_from_uri(
cur_raster_uri)
def add_op(base, current):
"""add two rasters"""
nodata_mask = (base == base_nodata) | (current == cur_nodata)
return numpy.where(nodata_mask, nodata, base+current)
pixel_size_out = (
pygeoprocessing.geoprocessing.get_cell_size_from_uri(
raster_uri))
pygeoprocessing.geoprocessing.vectorize_datasets(
[cur_raster_uri, temp_filename], add_op, raster_uri,
gdal.GDT_Float32, nodata,
pixel_size_out, "intersection", dataset_to_align_index=0,
vectorize_op=False)
| 15,843
|
def draw_boxes(
img, bbox_tlbr, class_prob=None, class_idx=None, class_names=None
):
"""
Draw bboxes (and class names or indices for each bbox) on an image.
Bboxes are drawn in-place on the original image.
If `class_prob` is provided, the prediction probability for each bbox
will be displayed along with the bbox. If `class_idx` is provided, the
class index of each bbox will be displayed along with the bbox. If both
`class_idx` and `class_names` are provided, `class_idx` will be used to
determine the class name for each bbox and the class name of each bbox
will be displayed along with the bbox.
If `class_names` is provided, a unique color is used for each class.
Args:
img (np.ndarray): Image on which to draw bboxes.
bbox_tlbr (np.ndarray): Mx4 array of M detections.
class_prob (np.ndarray): Array of M elements corresponding to predicted
class probabilities for each bbox.
class_idx (np.ndarray): Array of M elements corresponding to the
class index with the greatest probability for each bbox.
class_names (list): List of all class names in order.
"""
colors = None
if class_names is not None:
colors = dict()
num_colors = len(class_names)
colors = list(unique_colors(num_colors))
for i, (tl_x, tl_y, br_x, br_y) in enumerate(bbox_tlbr):
bbox_text = []
if colors is not None:
color = colors[class_idx[i]]
else:
color = (0, 255, 0)
if class_names is not None:
bbox_text.append(class_names[class_idx[i]])
elif class_idx is not None:
bbox_text.append(str(class_idx[i]))
if class_prob is not None:
bbox_text.append("({:.2f})".format(class_prob[i]))
bbox_text = " ".join(bbox_text)
cv2.rectangle(
img, (tl_x, tl_y), (br_x, br_y), color=color, thickness=2
)
if bbox_text:
cv2.rectangle(
img, (tl_x + 1, tl_y + 1),
(tl_x + int(8 * len(bbox_text)), tl_y + 18),
color=(20, 20, 20), thickness=cv2.FILLED
)
cv2.putText(
img, bbox_text, (tl_x + 1, tl_y + 13),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, (255, 255, 255), thickness=1
)
| 15,844
|
def test_function(client: MsGraphClient, args):
"""
Performs basic GET request to check if the API is reachable and authentication is successful.
Returns ok if successful.
"""
response = client.ms_client.http_request(
method='GET', url_suffix='security/alerts', params={'$top': 1}, resp_type='response')
try:
data = response.json() if response.text else {}
if not response.ok:
return_error(f'API call to MS Graph Security failed. Please check authentication related parameters.'
f' [{response.status_code}] - {demisto.get(data, "error.message")}')
params: dict = demisto.params()
if params.get('isFetch'):
fetch_time = params.get('fetch_time', '1 day')
fetch_providers = params.get('fetch_providers', '')
fetch_filter = params.get('fetch_filter', '')
filter_query = create_filter_query(fetch_filter, fetch_providers)
timestamp_format = '%Y-%m-%dT%H:%M:%S.%fZ'
time_from = parse_date_range(fetch_time, date_format=timestamp_format)[0]
time_to = datetime.now().strftime(timestamp_format)
try:
client.search_alerts(last_modified=None, severity=None, category=None, vendor=None, time_from=time_from,
time_to=time_to, filter_query=filter_query)['value']
except Exception as e:
if 'Invalid ODATA query filter' in e.args[0]:
raise DemistoException("Wrong filter format, correct usage: {property} eq '{property-value}'"
"\n\n" + e.args[0])
raise e
return 'ok', None, None
except TypeError as ex:
demisto.debug(str(ex))
return_error(f'API call to MS Graph Security failed, could not parse result. '
f'Please check authentication related parameters. [{response.status_code}]')
| 15,845
|
def load(path='db'):
"""Recursivly load a db directory"""
if not os.path.isabs(path):
path = os.path.abspath(path)
env["datastore"].update({
"type": "yamldir",
"path": path,
})
return loaddir(path)
| 15,846
|
def wcxf2arrays_symmetrized(d):
"""Convert a dictionary with a Wilson coefficient
name followed by underscore and numeric indices as keys and numbers as
values to a dictionary with Wilson coefficient names as keys and
numbers or numpy arrays as values.
In contrast to `wcxf2arrays`, here the numpy arrays fulfill the same
symmetry relations as the operators (i.e. they contain redundant entries)
and they do not contain undefined indices.
Zero arrays are added for missing coefficients."""
C = wcxf2arrays(d)
C = symmetrize_nonred(C)
C = add_missing(C)
return C
| 15,847
|
def main(port, dir):
"""Load [Fog of World] data from DIR.
DIR is the path to the [Fog of World] folder, the given folder should contain a subfolder [Sync].
"""
fog_map = parser.FogMap(dir)
tmp_folder = tempfile.TemporaryDirectory()
def exit_handler():
tmp_folder.cleanup()
atexit.register(exit_handler)
m = folium.Map()
with Pool(4) as pool:
pool.starmap(generate_image, zip(fog_map.tile_map.values(), itertools.repeat(tmp_folder)))
for tile in fog_map.tile_map.values():
folium.raster_layers.ImageOverlay(
"http://127.0.0.1:{}/{}.png".format(port, tile.id), tile.bounds(), opacity=1).add_to(m)
print(tile.bounds())
m.save(os.path.join(tmp_folder.name, "index.html"))
serve_http(port, tmp_folder.name)
| 15,848
|
def prepare_storage_paths(): # pragma: no cover
"""Ensures that the folder structure exists."""
if not os.path.exists(HELHEIMR_LOG_DIR):
_logger.info(f'Creating log folder {HELHEIMR_LOG_DIR}.')
os.makedirs(HELHEIMR_LOG_DIR)
else:
_logger.info(f'All logs will be stored to {HELHEIMR_LOG_DIR}.')
if not os.path.exists(HELHEIMR_STATE_DIR):
_logger.info(f'Creating state storage folder {HELHEIMR_STATE_DIR}.')
os.makedirs(HELHEIMR_STATE_DIR)
else:
_logger.info(f'Heating system states will be stored to {HELHEIMR_STATE_DIR}.')
| 15,849
|
async def test_pydelijn():
"""Example usage of pydelijn."""
subscriptionkey = "<put your data.delijn.be subscriptionkey here>"
stopid = 200551
maxpassages = 10
custom_session = aiohttp.ClientSession()
delijndata = Passages(
LOOP, stopid, maxpassages, subscriptionkey, custom_session, True
)
await delijndata.get_passages()
print_data(delijndata)
stopname = await delijndata.get_stopname()
print("----------------------------------------")
print("Stop Name: %s" % stopname)
await custom_session.close()
| 15,850
|
def gather_inputs(headers, test_suites, inputs_class=Inputs):
"""Read the list of inputs to test psa_constant_names with."""
inputs = inputs_class()
for header in headers:
inputs.parse_header(header)
for test_cases in test_suites:
inputs.parse_test_cases(test_cases)
inputs.gather_arguments()
return inputs
| 15,851
|
def load_institute(adapter, internal_id, display_name, sanger_recipients=None, loqusdb_id=None):
"""Load a institute into the database
Args:
adapter(MongoAdapter)
internal_id(str)
display_name(str)
sanger_recipients(list(email))
loqusdb_id(str)
"""
institute_obj = build_institute(
internal_id=internal_id,
display_name=display_name,
sanger_recipients=sanger_recipients,
loqusdb_id=loqusdb_id,
)
adapter.add_institute(institute_obj)
| 15,852
|
def key_create(adapter_id):
"""Creates a key using a certain adapter."""
adapter = get_adapter(adapter_id)
if not adapter:
return output.failure("That adapter doesn't (yet) exist. Please check the adapter name and try again.", 501)
if not adapter.do_verify(request.headers):
return output.failure("Credential verification failed. Please check your credentials and try again.", 401)
result = adapter.do_key_create(request.headers, request.json)
if 'error' in result:
return output.failure(result['error'], result['status'])
return output.success(result['data'], result['status'])
| 15,853
|
def run_jsonhook(hook, spec, res, dsarg=None):
"""Execute a hook on a given result
A hook definition's 'call' specification may contain placeholders that
will be expanded using matching values in the given result record. In
addition to keys in the result a '{dsarg}' placeholder is supported.
The characters '{' and '}' in the 'call' specification that are not part
of format() placeholders have to be escaped as '{{' and '}}'. Example
'call' specification to execute the DataLad ``unlock`` command::
unlock {{"dataset": "{dsarg}", "path": "{path}"}}
Parameters
----------
hook : str
Name of the hook
spec : dict
Hook definition as returned by `get_hooks_from_config()`
res : dict
Result records that were found to match the hook definition.
dsarg : Dataset or str or None, optional
Value to substitute a {dsarg} placeholder in a hook 'call' specification
with. Non-string values are automatically converted.
Yields
------
dict
Any result yielded by the command executed as hook.
"""
import datalad.api as dl
cmd_name = spec['cmd']
if not hasattr(dl, cmd_name):
# TODO maybe a proper error result?
lgr.warning(
'Hook %s requires unknown command %s, skipped',
hook, cmd_name)
return
cmd = getattr(dl, cmd_name)
# apply potential substitutions on the string form of the args
# for this particular result
# take care of proper JSON encoding for each value
enc = json.JSONEncoder().encode
# we have to ensure JSON encoding of all values (some might be Path instances),
# we are taking off the outer quoting, to enable flexible combination
# of individual items in supplied command and argument templates
args = spec['args'].format(
# we cannot use a dataset instance directly but must take the
# detour over the path location in order to have string substitution
# be possible
dsarg='' if dsarg is None else enc(dsarg.path).strip('"')
if isinstance(dsarg, dl.Dataset) else enc(dsarg).strip('"'),
# skip any present logger that we only carry for internal purposes
**{k: enc(str(v)).strip('"') for k, v in res.items() if k != 'logger'})
# now load
try:
args = json.loads(args)
except Exception as e:
from datalad.dochelpers import exc_str
lgr.warning(
'Invalid argument specification for hook %s '
'(after parameter substitutions): %s [%s], '
'hook will be skipped',
hook, args, exc_str(e))
return
# only debug level, the hook can issue its own results and communicate
# through them
lgr.debug('Running hook %s: %s%s', hook, cmd_name, args)
for r in cmd(**args):
yield r
| 15,854
|
def is_right(side1, side2, side3):
"""
Takes three side lengths and returns true if triangle is right
:param side1: int or float
:param side2: int or float
:param side3: int or float
:return: bool
"""
return False
| 15,855
|
def parser_electron_number(electron_line):
"""
function of parser for electron information
Args:
electron_line (str): line
Returns:
list: electron information
"""
electron_list = parser_split_line_by_length(electron_line.rstrip(), CPF_FORMAT["ELECTRON"]["length"], "int")
return electron_list
| 15,856
|
def summary(t, rtol=1e-5, atol=1e-8):
"""
Parameters
----------
t
rtol
atol
Returns
-------
"""
deltas = np.diff(t)
if np.allclose(deltas, deltas[0], rtol, atol):
# constant time steps
return deltas[0], deltas, ''
# non-constant time steps!
unqdt = np.unique(deltas)
mode = stats.mode(deltas)
dt = mode.mode
if len(unqdt) > 5:
info = f'{len(unqdt)} unique values between {deltas.min(), deltas.max()}'
else:
info = str(unqdt)
return dt, unqdt, f'Non-constant time steps: {info}'
| 15,857
|
def download(local_qanta_prefix, retrieve_paragraphs):
"""
Run once to download qanta data to data/. Runs inside the docker container, but results save to host machine
"""
#print("\n\n\tLocal Qanta Prefix: %s" %local_qanta_prefix)
#print("\n\n\tRetrieve Paragraphs: %s" %retrieve_paragraphs)
util.download(local_qanta_prefix, retrieve_paragraphs)
| 15,858
|
def test_assert_correct_version():
"""
yep. update it every time.
This is a dumb test, but I want it here to remind future committers that if
you make a change to the package, your change should probably include a
test which verifies why the change was made.
If you want your changes to be seen in PyPI, you have to rev the version.
If you rev the version without updating the test suite, you will break the
tests.
"""
assert "0.0.7" == sqlalchemy_bigquery.__version__
| 15,859
|
def masked_huber(input, target, lengths):
"""
Always mask the first (non-batch dimension) -> usually time
:param input:
:param target:
:param lengths:
:return:
"""
m = mask(input.shape, lengths, dim=1).float().to(input.device)
return F.smooth_l1_loss(input * m, target * m, reduction='sum') / m.sum()
| 15,860
|
def calcProbabilisticResiduals(
coords_actual,
coords_desired,
covariances_actual
):
"""
Calculate the probabilistic residual.
Parameters
----------
coords_actual : `~numpy.ndarray` (N, M)
Actual N coordinates in M dimensions.
coords_desired : `~numpy.ndarray` (N, M)
The desired N coordinates in M dimensions.
sigmas_actual : `~numpy.ndarray` (N, M)
The 1-sigma uncertainties of the actual coordinates.
covariances_actual : list of N `~numpy.ndarray`s (M, M)
The covariance matrix in M dimensions for each
actual observation if available.
Returns
-------
p : `~numpy.ndarray` (N)
The probability that the actual coordinates given their uncertainty
belong to the same multivariate normal distribution as the desired
coordinates.
d : `~numpy.ndarray` (N)
The Mahalanobis distance of each coordinate compared to the desired
coordinates.
"""
d = np.zeros(len(coords_actual))
p = np.zeros(len(coords_actual))
for i, (actual, desired, covar) in enumerate(zip(coords_actual, coords_desired, covariances_actual)):
# Calculate the degrees of freedom
k = len(actual)
# Calculate the mahalanobis distance between the two coordinates
d_i = mahalanobis(
actual,
desired,
np.linalg.inv(covar)
)
# Calculate the probability that both sets of coordinates are drawn from
# the same multivariate normal
p_i = 1 - chi2.cdf(d_i, k)
# Append results
d[i] = d_i
p[i] = p_i
return p, d
| 15,861
|
def split_words_and_quoted_text(text):
"""Split string text by space unless it is
wrapped inside double quotes, returning a list
of the elements.
For example
if text =
'Should give "3 elements only"'
the resulting list would be:
['Should', 'give', '3 elements only']
"""
# using shlex
# return shlex.split(text)
# using re
result = list()
pattern = re.findall(r'\w+\s*|\".+?\"', text)
for char in pattern:
result.append(char.strip().replace('"', ''))
return result
| 15,862
|
def scheduler(epoch):
"""Generating learning rate value for a given epoch.
inputs:
epoch = number of current epoch
outputs:
learning_rate = float learning rate value
"""
if epoch < 100:
return 1e-3
elif epoch < 125:
return 1e-4
else:
return 1e-5
| 15,863
|
def plot_predictions(device, test_loader, net):
"""Plot the predictions of 1D regression tasks.
Args:
(....): See docstring of function :func:`main.test`.
"""
net.eval()
data = test_loader.dataset
assert(data.inputs.shape[1] == 1 and data.outputs.shape[1] == 1)
inputs = data.inputs.detach().cpu().numpy()
targets = data.outputs.detach().cpu().numpy()
with torch.no_grad():
# Note, for simplicity, we assume that the dataset is small and we
# don't have to collect the predictions by iterating over mini-batches.
predictions = net.forward(data.inputs).detach().cpu().numpy()
plt.figure(figsize=(10, 6))
plt.title("Predictions in 1D regression task", size=20)
plt.plot(inputs, targets, color='k', label='Target function',
linestyle='dashed', linewidth=.5)
plt.scatter(inputs, predictions, color='r', label='Predictions')
plt.legend()
plt.xlabel('x')
plt.ylabel('y')
plt.show()
| 15,864
|
def external_search(query, feature_type, url):
""" Makes an external search request to a specified URL. The url will have the search
text appended to it. Returns geojson matches with extra data for the geocoder.
"""
logger.info("using external API for feature lookup: %s", url + query)
req = ExternalAPIRequest(
url=url + query,
layer=feature_type,
q={},
paginate=False
)
# Fetch features.
feature_collection = fetch_geojson_features([req])
features = feature_collection[0].geojson['features']
geocoder_features = []
for feature in features:
feature['layer'] = feature_type
feature['center'] = (feature.geometry.coordinates[0],
feature.geometry.coordinates[1])
feature['place_name'] = str(feature.properties['well_tag_number'])
geocoder_features.append(feature)
return geocoder_features
| 15,865
|
def has_supervisor() -> bool:
"""Return true if supervisor is available."""
return "SUPERVISOR" in os.environ
| 15,866
|
def setup(*args, **kwds):
"""
Compatibility wrapper.
"""
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
return setup(*args, **kwds)
| 15,867
|
def joinpath(base, end):
"""Like Path.joinpath(), but ensures the result is inside `base`.
Should be used for user-supplied `end`.
"""
result = (base / end).resolve()
if base not in result.parents:
print(base, end, result)
raise ValueError(end)
return result
| 15,868
|
def multicolored_line_collection(x, y, z, colors):
""" Color a 2D line based on which state it is in
:param x: data x-axis values
:param y: data y-axis values
:param z: values that determine the color of each (x, y) pair
"""
nstates = colors.shape[0]
# come up with color map and normalization (i.e. boundaries of colors)
cmap = ListedColormap(colors)
bounds = np.arange(-1, nstates) + 0.1
norm = BoundaryNorm(bounds, cmap.N) # add
# create line segments to color individually
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
# Set the values used for colormapping
lc = LineCollection(segments, cmap=cmap, norm=norm)
lc.set_array(z)
lc.set_linewidth(2)
return lc
| 15,869
|
def foldr(fn,
elems,
initializer=None,
parallel_iterations=10,
back_prop=True,
swap_memory=False,
name=None):
"""foldr on the list of tensors unpacked from `elems` on dimension 0.
This foldr operator repeatedly applies the callable `fn` to a sequence
of elements from last to first. The elements are made of the tensors
unpacked from `elems`. The callable fn takes two tensors as arguments.
The first argument is the accumulated value computed from the preceding
invocation of fn. If `initializer` is None, `elems` must contain at least
one element, and its first element is used as the initializer.
Suppose that `elems` is unpacked into `values`, a list of tensors. The shape
of the result tensor is `fn(initializer, values[0]).shape`.
This method also allows multi-arity `elems` and output of `fn`. If `elems`
is a (possibly nested) list or tuple of tensors, then each of these tensors
must have a matching first (unpack) dimension. The signature of `fn` may
match the structure of `elems`. That is, if `elems` is
`(t1, [t2, t3, [t4, t5]])`, then an appropriate signature for `fn` is:
`fn = lambda (t1, [t2, t3, [t4, t5]]):`.
Args:
fn: The callable to be performed.
elems: A tensor or (possibly nested) sequence of tensors, each of which will
be unpacked along their first dimension. The nested sequence of the
resulting slices will be the first argument to `fn`.
initializer: (optional) A tensor or (possibly nested) sequence of tensors,
as the initial value for the accumulator.
parallel_iterations: (optional) The number of iterations allowed to run in
parallel.
back_prop: (optional) True enables support for back propagation.
swap_memory: (optional) True enables GPU-CPU memory swapping.
name: (optional) Name prefix for the returned tensors.
Returns:
A tensor or (possibly nested) sequence of tensors, resulting from applying
`fn` consecutively to the list of tensors unpacked from `elems`, from last
to first.
Raises:
TypeError: if `fn` is not callable.
Example:
```python
elems = [1, 2, 3, 4, 5, 6]
sum = foldr(lambda a, x: a + x, elems)
# sum == 21
```
"""
if not callable(fn):
raise TypeError("fn must be callable.")
def create_ta(elem):
return tensor_array_ops.TensorArray(
dtype=elem.dtype, size=n, dynamic_size=False,
infer_shape=True).unstack(elem)
in_graph_mode = not context.executing_eagerly()
with ops.name_scope(name, "foldr", [elems]):
# TODO(akshayka): Remove the in_graph_mode check once caching devices are
# supported in Eager
if in_graph_mode:
# Any get_variable calls in fn will cache the first call locally and not
# issue repeated network I/O requests for each iteration.
varscope = vs.get_variable_scope()
varscope_caching_device_was_none = False
if varscope.caching_device is None:
# TODO(ebrevdo): Change to using colocate_with here and in other
# methods.
varscope.set_caching_device(lambda op: op.device)
varscope_caching_device_was_none = True
# Convert elems to tensor array. n may be known statically.
elems_flat = [
ops.convert_to_tensor(elem, name="elem") for elem in nest.flatten(elems)
]
n = (
tensor_shape.dimension_value(elems_flat[0].shape[0]) or
array_ops.shape(elems_flat[0])[0])
elems_ta = nest.map_structure(create_ta, elems)
if initializer is None:
i = n - 1
a = nest.map_structure(lambda elem: elem.read(i), elems_ta)
else:
i = n
a = initializer
def compute(i, a):
i -= 1
elem = nest.map_structure(lambda elem: elem.read(i), elems_ta)
a_out = fn(a, elem)
return [i, a_out]
_, r_a = control_flow_ops.while_loop(
lambda i, a: i > 0,
compute, [i, a],
parallel_iterations=parallel_iterations,
back_prop=back_prop,
swap_memory=swap_memory,
maximum_iterations=n)
# TODO(akshayka): Remove the in_graph_mode check once caching devices are
# supported in Eager
if in_graph_mode and varscope_caching_device_was_none:
varscope.set_caching_device(None)
return r_a
| 15,870
|
def featurise_distances(diagram):
"""Create feature vector by distance-to-diagonal calculation.
Creates a feature vector by calculating distances to the diagonal
for every point in the diagram and returning a sorted vector. The
representation is *stable* but might not be discriminative.
Parameters
----------
diagram : `PersistenceDiagram`
Persistence diagram to featurise. Can also be a generic 2D
container for iterating over tuples.
Returns
-------
Sorted vector of distances to diagonal. The vector is sorted in
descending order, such that high persistence points precede the
ones of low persistence.
"""
distances = [_persistence(x, y) for x, y in diagram]
return sorted(distances, reverse=True)
| 15,871
|
def resize(source, width=None, height=None, filter=None, radius=1,
wrapx=False, wrapy=False):
"""Create a new numpy image with the desired size.
Either width or height can be null, in which case its value
is inferred from the aspect ratio of the source image.
Filter can be HERMITE, TRIANGLE, GAUSSIAN, NEAREST, LANCZOS, or
MITCHELL.
"""
assert len(source.shape) == 3, 'Shape is not rows x cols x channels'
assert width != None or height != None, 'Missing target size'
aspect = source.shape[1] / source.shape[0]
if width == None: width = height * aspect
if height == None: height = width / aspect
magnifying = width > source.shape[1]
if filter == None: filter = MITCHELL if magnifying else LANCZOS
return resample(source, width, height, filter, radius, wrapx, wrapy)
| 15,872
|
def load_and_classify_payload(config, service, entity, raw_record):
"""Return a loaded and classified payload."""
# prepare the payloads
payload = load_stream_payload(service, entity, raw_record)
payload = list(payload.pre_parse())[0]
classifier = StreamClassifier(config=config)
classifier.load_sources(service, entity)
classifier.classify_record(payload)
return payload
| 15,873
|
def Mode():
"""Take mode of cryptography."""
if mode.get() == 'e':
Result.set(Encode(private_key.get(), Text.get()))
elif mode.get() == 'd':
Result.set(Decode(private_key.get(), Text.get()))
else:
Result.set('Invalid Mode')
| 15,874
|
def render_text(self, block: str, block_type: str, y: int) -> int:
"""
:param self: MarkdownRenderer
:param block: string of text
:param block_type: type of the text (e.g. headers, ordered/unordered lists, blockquotes, code etc)
:param y: y-coordinate to start rendering on
:return: y-coordinate after rendering is finished
"""
start_of_line_x = self.x
if block_type == 'blockquote':
start_of_line_x += self.indentation_quote
quote_y_start = y
x = start_of_line_x
# Cleanup and stripping
block = block \
.replace('\n', ' ') \
.strip(' ')
if block[:3] == '<p>':
block = block[3:]
if block[-4:] == '</p>':
block = block[:-4]
code_flag = False
bold_flag = False
italic_flag = False
position = None
if block_type in ('h1', 'h2', 'h3'): # insert additional gap in front of h1 or h2 headers
y += self.gap_line
for word in block.split(" "):
# _________ PREPARATION _________ #
# inline code, bold and italic formatting
word, position, code_flag, bold_flag, italic_flag = self.inline_formatting_preparation(word, position, code_flag, bold_flag, italic_flag)
# _________ TEXT BLITTING _________ #
# create surface to get width of the word to identify necessary linebreaks
word = word + " "
word = word.replace(">", ">").replace("<", "<")
if code_flag:
if position == 'first' or position == 'single':
x += self.code_padding
surface = self.get_surface(word, 'code', bold_flag, italic_flag)
else:
surface = self.get_surface(word, block_type, bold_flag, italic_flag)
text_height = surface.get_height() # update for next line
if not(x + surface.get_width() < self.x + self.w): # new line necessary
y = y + text_height + self.gap_line
x = start_of_line_x
if self.is_visible(y) and self.is_visible(y + text_height):
if block_type == 'blockquote': # draw quote-rectangle in front of text
self.draw_quote_rect(y, y + self.get_surface(word, 'blockquote').get_height())
self.draw_code_background(code_flag, word, x, y, position)
self.screen.blit(surface, (x, y))
# Update x for the next word
x = x + surface.get_width()
if code_flag and position in ('single', 'last'):
x -= self.code_padding # reduce empty space by padding.
# _________ FORMATTING RESET FOR NEXT WORD _________ #
bold_flag = False if bold_flag and position == 'last' else bold_flag
code_flag = False if code_flag and (position == 'last' or position == 'single') else code_flag
italic_flag = False if italic_flag and position == 'last' else italic_flag
position = 'Middle' if position == 'first' else position
if block_type in ('h1', 'h2'):
y = y + text_height * 0.5 # add an additional margin below h1 and h2 headers
if block_type == 'h1': # insert subline below h1 headers
y = y + text_height * 0.5 # add an additional margin below h1 headers for the subheader line
y = self.draw_subheader_line(y)
return y
| 15,875
|
def tvadam_reconstructor(dataset='ellipses', name=None):
"""
:param dataset: Can be 'ellipses' or 'lodopab'
:return: TV reconstructor for the specified dataset
"""
try:
params = Params.load('{}_tvadam'.format(dataset))
standard_dataset = load_standard_dataset(dataset)
if name is None:
name = 'TV-Adam'
reconstructor = TVAdamReconstructor(standard_dataset.ray_trafo,
hyper_params=params.dict,
name=name)
return reconstructor
except Exception as e:
raise Exception('The reconstructor doesn\'t exist')
| 15,876
|
def process_spf_data(res, data):
"""
This function will take the text info of a TXT or SPF record, extract the
IPv4, IPv6 addresses and ranges, request process include records and return
a list of IP Addresses for the records specified in the SPF Record.
"""
# Declare lists that will be used in the function.
ipv4 = []
ipv6 = []
includes = []
ip_list = []
# check first if it is a sfp record
if not re.search(r'v\=spf', data):
return
# Parse the record for IPv4 Ranges, individual IPs and include TXT Records.
ipv4.extend(re.findall('ip4:(\S*) ', "".join(data)))
ipv6.extend(re.findall('ip6:(\S*)', "".join(data)))
# Create a list of IPNetwork objects.
for ip in ipv4:
for i in IPNetwork(ip):
ip_list.append(i)
for ip in ipv6:
for i in IPNetwork(ip):
ip_list.append(i)
# Extract and process include values.
includes.extend(re.findall('include:(\S*)', "".join(data)))
for inc_ranges in includes:
for spr_rec in res.get_txt(inc_ranges):
spf_data = process_spf_data(res, spr_rec[2])
if spf_data is not None:
ip_list.extend(spf_data)
# Return a list of IP Addresses
return [str(ip) for ip in ip_list]
| 15,877
|
def label_attack(dataset, index_start, index_end, components):
"""Label one attack on a dataset
Parameters
----------
dataset: dataset, the dataset to label the attacks on
index_start: int, the first index of the attack
index_end: int, the last index of the attack
components: list, the list of components affected by the attack
Returns
-------
None, labeling occurs in place
"""
dataset.loc[index_start:index_end, 'ATT_FLAG'] = 1
for component in components:
dataset.loc[index_start:index_end, component] = 1
| 15,878
|
def create_knight():
"""
Creates a new knight according to player input.
Checks the knights module for how many points are to spend,
and which attributes are available. It then asks the player
for a name for the knight and to spend their points on the
available attributes.
Returns:
A knight instance with the player's values
"""
knight_class = get_class()
# get the constants from the knights module
max_attr_points = knights.MAX_ATTRIBUTE_POINTS
attributes = knights.ATTRIBUTES
knight = None # this will be the instance to be returned
name = input("What is your name?\n")
# reapet until the input was correct and a knight was created
while not knight:
# display the attributes and how many points are to be spent
spent_points = input(
f"You have {max_attr_points} points to spend on "
f"the attributes: { ', '.join(attributes) }.\n"
"Submit your points separated either by commas or by spaces, "
"like the list above with numbers instead of attribute names. "
"Points must be integers.\n"
)
try:
# we allow to use commas or spaces, so we check what was used
# we cast all input attribute points to integer since
# attribute points are integer numbers
if "," in spent_points:
points = [int(val) for val in spent_points.split(",")]
else:
points = [int(val) for val in spent_points.split(" ")]
# if not enough attributes were inputted, repeat the loop
if len(points) != len(attributes): continue
# knight the knight! Since knights take attributes as
# one parameter each, we unzip the input list into the call
knight = knight_class(name, *points)
except ValueError:
# When the casting to integer fails
print("Could not parse. Were the points all integer?")
continue
except knights.KnightError as e:
# a special error from the knights module that occurs when
# there are errors in knighting a new knight
print(f"Could not knight the knight: {str(e)}")
continue
return knight
| 15,879
|
async def get_bank_name(guild: discord.Guild = None) -> str:
"""Get the current bank name.
Parameters
----------
guild : `discord.Guild`, optional
The guild to get the bank name for (required if bank is
guild-specific).
Returns
-------
str
The bank's name.
Raises
------
RuntimeError
If the bank is guild-specific and guild was not provided.
"""
return await bank.get_bank_name(guild)
| 15,880
|
def main():
"""
Execute the nose test runner.
Drop privileges and alter the system argument to remove the
userid and group id arguments that are only required for the test.
"""
if len(sys.argv) < 2:
print (
u'Run the test suite using drop privileges username as first '
u'arguments. Use "-" if you do not want elevated mode.')
sys.exit(1)
# Delay import after coverage is started.
from chevah.compat.testing.nose_memory_usage import MemoryUsage
from chevah.compat.testing.nose_test_timer import TestTimer
from chevah.compat.testing.nose_run_reporter import RunReporter
from chevah.compat.testing import ChevahTestCase
drop_user = sys.argv[1].encode('utf-8')
ChevahTestCase.initialize(drop_user=drop_user)
ChevahTestCase.dropPrivileges()
new_argv = ['chevah-test-runner']
new_argv.extend(sys.argv[2:])
sys.argv = new_argv
plugins = [
TestTimer(),
RunReporter(),
MemoryUsage(),
]
try:
nose_main(addplugins=plugins)
except SystemExit as error:
if cov:
cov.stop()
cov.save()
import threading
print("Max RSS: %s" % ChevahTestCase.getPeakMemoryUsage())
threads = threading.enumerate()
if len(threads) < 2:
# No running threads, other than main so we can exit as normal.
sys.exit(error.code)
else:
print("There are still active threads: %s" % threads)
# We do a brute force exit here, since sys.exit will wait for
# unjoined threads.
# We have to do some manual work to compensate for skipping sys.exit()
sys.exitfunc()
# Don't forget to flush the toilet.
sys.stdout.flush()
sys.stderr.flush()
os._exit(error.code)
| 15,881
|
def build_cli_lib(to_save_location: Optional[str] = None, render_kwargs: Optional[Dict[str, Any]] = None) -> str:
"""Create project-specific cli.fif lib"""
if not to_save_location:
to_save_location: str = tempfile.mkstemp(suffix='.fif')[1]
logger.info(f"👽 Save ton-cli to {to_save_location}")
loader = FileSystemLoader(f"{project_root}/modules/fift")
env = Environment(
loader=loader,
autoescape=select_autoescape()
)
template = env.get_template(f"cli.fif.template")
render_kwargs = {} if render_kwargs is None else render_kwargs
if 'is_project' not in render_kwargs:
render_kwargs['is_project'] = 0
rendered = template.render(**render_kwargs)
with open(to_save_location, 'w', encoding='utf-8') as f:
f.write(rendered)
return to_save_location
| 15,882
|
def match(A, S, trueS):
"""Rearranges columns of S to best fit the components they likely represent (maximizes sum of correlations)"""
cov = np.cov(trueS, S)
k = S.shape[0]
corr = np.zeros([k, k])
for i in range(k):
for j in range(k):
corr[i][j] = cov[i + k][j] / np.sqrt(cov[i + k][i + k] * cov[j][j])
arrangement = linear_sum_assignment(-corr)
resS = np.zeros_like(S)
resAT = np.zeros_like(A.T)
for t in range(k):
resS[arrangement[1][t]] = S[arrangement[0][t]]
resAT[arrangement[1][t]] = A.T[arrangement[0][t]]
return resAT.T, resS
| 15,883
|
def test_check_time_data(times):
""" Tests the function "check_time_data" from heartRateMonitor.py
:param times: List of time data
:returns: passes if exceptions raised when necessary, fails
otherwise
"""
from heartRateMonitor import check_time_data
with pytest.raises(ValueError):
check_time_data(times)
| 15,884
|
def image_field_data(request, include_empty_option=False):
"""Returns a list of tuples of all images.
Generates a sorted list of images available. And returns a list of
(id, name) tuples.
:param request: django http request object
:param include_empty_option: flag to include a empty tuple in the front of
the list
:return: list of (id, name) tuples
"""
try:
images = get_available_images(request, request.user.project_id)
except Exception:
exceptions.handle(request, _('Unable to retrieve images'))
images.sort(key=lambda c: c.name)
images_list = [('', _('Select Image'))]
for image in images:
image_label = u"{} ({})".format(image.name,
sizeformat.diskgbformat(image.size))
images_list.append((image.id, image_label))
if not images:
return [("", _("No images available")), ]
return images_list
| 15,885
|
def plot_education_against_tv(adult_data: pd.DataFrame) -> None:
"""PLots combined barchart of the pop count and the mean tv in the sample
Source for combined barchart for sns:
https://python.tutorialink.com/how-can-i-plot-a-secondary-y-axis-with-seaborns-barplot/
* Notice how I link the stackoverflow resource for future me, who will change the script somehow and run into the
exact error above
Optional Excercise for the reader:
How could we improve this?
Well, eg the chart complexity could be reduced by grouping together the categories below HS-grad. This would get rid
of category-ordering issues on the chart, create a combined category that is relevant in size. Less bars but more
equal group sizes with a very minimal loss of information is a great tradeoff.
How does this help us?
First of all,
they are very similar groups, so by grouping them together we reduce modeling complexity as well. This is why
you need good visuals: they help convince the reader and also benefit modeling efforts if you understand whats going
on.
"""
width_scale = 0.5
plot_series = adult_data.groupby("education").agg(
{"target_encoded": ["mean", "count"]}
)
plot_series.columns = ["mean", "count"]
plot_series.sort_values(by="mean", inplace=True)
fig, ax = plt.subplots(1, 1, figsize=(10, 10))
ax2 = ax.twinx()
sns.barplot(
x=plot_series.index, y="mean", ax=ax, data=plot_series, color="blue", label="TV"
)
sns.barplot(
x=plot_series.index,
y="count",
ax=ax2,
data=plot_series,
color="grey",
label="COUNT",
)
offset_bars_on_double_barplot(ax, ax2, width_scale)
fig.suptitle("Education against Income")
ax.set_ylabel("Mean tv per group")
ax.set_xlabel("Education level")
rotate_ax_ticklabels(ax)
# create legend
grey_patch = mpatches.Patch(color="grey", label="Population size")
blue_patch = mpatches.Patch(color="blue", label="Income >50k $")
plt.legend(handles=[blue_patch, grey_patch], loc=2)
| 15,886
|
def _retry_for_entity_delay(func, timeout=60):
"""
Retry the given function a few times if it raises a DoesNotExistAWSError
with an increasing delay.
It sometimes takes AWS a bit for new entities to be available, so this
helper retries an AWS call a few times allowing for any of the errors
that indicate that an entity is not available, which may be a temporary
state after adding it.
"""
timeout = timedelta(seconds=timeout)
start_time = datetime.now()
attempt = 0
while datetime.now() - start_time < timeout:
attempt += 1
try:
func()
break
except (DoesNotExistAWSError, NotReadyAWSError) as e:
log(e.message)
delay = min(10 * attempt, 30)
log('Retrying in {} seconds', delay)
sleep(delay)
else:
raise TimeoutAWSError()
| 15,887
|
def posts_completed(scraped_posts, limit):
"""Returns true if the amount of posts scraped from
profile has reached its limit.
"""
if len(scraped_posts) == limit:
return True
else:
return False
| 15,888
|
def mag(x):
"""Returns the absolute value squared of the input"""
return np.abs(x)**2
| 15,889
|
def get_zero_columns(matrix):
""" Returns a list of the columns which are all 0 """
rows = matrix.shape[0]
columns = matrix.shape[1]
result = []
for j in range(columns):
is_zero_column = True
for i in range(rows):
is_zero_column = is_zero_column and matrix[i, j] == 0.0
result.append(is_zero_column)
return result
| 15,890
|
def traditional_constants_icr_equation_empty_fixed(fixed_params, X_col):
""" Traditional ICR equation with constants from ACE consensus """
a = 450
tdd = X_col[0]
return a / tdd
| 15,891
|
def sort_completions_key(completion):
"""
sort completions according to their type
Args:
completion (jedi.api.classes.Completion): completion
Returns:
int: sorting order
"""
if completion.type == "function":
return 2
elif completion.type == "instance":
return 1
else:
return 3
| 15,892
|
def _is_git_url_mismatch(mismatch_item):
"""Returns whether the given mismatch item is for a GitHub URL."""
_, (required, _) = mismatch_item
return required.startswith('git')
| 15,893
|
def from_url(url, output_path=None, options=None):
"""
Convert file of files from URLs to PDF document
:param url: URL or list of URLs to be saved
:param output_path: (optional) path to output PDF file. If not provided, PDF will be returned as string
:param options: (optional) dict to configure pyppeteer page.pdf action
Returns: output_path if provided else PDF Binary
"""
return async_to_sync(api_async.from_url)(url, output_path, options)
| 15,894
|
def parse_wmic_output(wmic_output: str) -> Dict[str, str]:
"""Parse output of wmic query
See test cases.
@param wmic_output: Output from wmic tool
@return Dictionary with key/value from wmic"""
try:
non_blank_lines = [s for s in wmic_output.splitlines() if s]
parsed = {non_blank_lines[0].rstrip(' '): non_blank_lines[1].rstrip(' ')}
logger.debug("Parsed wmic output: {}".format(str(parsed)))
except IndexError as error:
logger.error(f"Failed to parse {wmic_output}")
return {"": ""}
return parsed
| 15,895
|
def parse_identifier(stream: TokenStream) -> expression.Identifier:
"""Read an identifier from the token stream.
<ident>.<ident>
<ident>["<ident>"]
<ident>["<ident>"].<ident>
<ident>[<ident --> int/str>]
<ident>[<ident>.<ident --> int/str>]
<ident>[<int>]
<ident>[<int>].<ident>
"""
path: expression.IdentifierPath = []
while stream.current.type in IDENTIFIER_TOKENS:
if stream.current.type == TOKEN_IDENTIFIER:
path.append(IdentifierPathElement(stream.current.value))
elif stream.current.type == TOKEN_INTEGER:
path.append(IdentifierPathElement(int(stream.current.value)))
elif stream.current.type == TOKEN_LBRACKET:
stream.next_token() # Eat open bracket
if stream.current.type == TOKEN_STRING:
path.append(IdentifierPathElement(stream.current.value))
elif stream.current.type == TOKEN_NEGATIVE:
expect_peek(stream, TOKEN_INTEGER)
stream.next_token()
path.append(IdentifierPathElement(-int(stream.current.value)))
elif stream.current.type == TOKEN_INTEGER:
path.append(IdentifierPathElement(int(stream.current.value)))
elif stream.current.type == TOKEN_IDENTIFIER:
# Recursive call to parse_identifier. If it's not a string or
# integer, anything inside a pair of square brackets could be
# another identifier that resolves to a string or integer.
path.append(parse_identifier(stream))
else:
raise LiquidSyntaxError(
f"invalid identifier, found {stream.current.type}"
)
expect_peek(stream, TOKEN_RBRACKET)
stream.next_token() # Eat close bracket
elif stream.current.type == TOKEN_DOT:
pass
else:
raise LiquidSyntaxError(f"invalid identifier, found {stream.current.type}")
stream.next_token()
stream.push(stream.current)
return expression.Identifier(path)
| 15,896
|
def generate(json_file):
"""Read the JSON_FILE and write the PBS files"""
json_file = click.format_filename(json_file)
settings = read_jsonfile(json_file)
simulation = Simulation(settings)
click.echo('Job length = {}'.format(simulation.job_length))
simulation.writeSimulationFiles()
| 15,897
|
def cubemap_projection_matrices(from_point: Vector3D, far_plane: float) -> List[np.ndarray]:
"""
Create the required Cubemap projection matrices.
This method is suitable for generating a Shadow Map.
Simply speaking, this method generates 6 different camera matrices from the center of
an imaginary cube and covers all surfaces without conflicting.
Keyword arguments;
from_point -- Imaginary camera location
far_plane -- How far the camera is capable of seeing. (Effects performance!)
"""
def a2np(a: List[float]) -> np.ndarray:
return np.array(a, dtype=np.float32)
shadow_proj = pyrr.matrix44.create_perspective_projection(90.0, 1.0, 0.01, far_plane, np.float32)
lightpos = np.array(list(from_point), dtype=np.float32)[:3]
nx = pyrr.matrix44.create_look_at(
lightpos,
np.array(
lightpos + a2np([-1.0, 0, 0]),
dtype=np.float32,
),
a2np([0, -1.0, 0]),
dtype=np.float32,
)
px = pyrr.matrix44.create_look_at(
lightpos,
np.array(
lightpos + a2np([1, 0, 0]),
dtype=np.float32,
),
a2np([0, -1.0, 0]),
dtype=np.float32,
)
ny = pyrr.matrix44.create_look_at(
lightpos,
np.array(
lightpos + a2np([0, -1, 0]),
dtype=np.float32,
),
a2np([0, 0, -1.0]),
dtype=np.float32,
)
py = pyrr.matrix44.create_look_at(
lightpos,
np.array(
lightpos + a2np([0, 1, 0]),
dtype=np.float32,
),
a2np([0, 0, 1.0]),
dtype=np.float32,
)
pz = pyrr.matrix44.create_look_at(
lightpos,
np.array(
lightpos + a2np([0, 0, 1]),
dtype=np.float32,
),
a2np([0, -1.0, 0]),
dtype=np.float32,
)
nz = pyrr.matrix44.create_look_at(
lightpos,
np.array(
lightpos + a2np([0, 0, -1]),
dtype=np.float32,
),
a2np([0, -1.0, 0]),
dtype=np.float32,
)
return [
px.dot(shadow_proj),
nx.dot(shadow_proj),
py.dot(shadow_proj),
ny.dot(shadow_proj),
pz.dot(shadow_proj),
nz.dot(shadow_proj),
]
| 15,898
|
def loadOptionsFile():
"""Find the .buildbot/FILENAME file. Crawl from the current directory up
towards the root, and also look in ~/.buildbot . The first directory
that's owned by the user and has the file we're looking for wins. Windows
skips the owned-by-user test.
@rtype: dict
@return: a dictionary of names defined in the options file. If no options
file was found, return an empty dict.
"""
here = os.path.abspath(os.getcwd())
if runtime.platformType == 'win32':
# never trust env-vars, use the proper API
from win32com.shell import shellcon, shell
appdata = shell.SHGetFolderPath(0, shellcon.CSIDL_APPDATA, 0, 0)
home = os.path.join(appdata, "buildbot")
else:
home = os.path.expanduser("~/.buildbot")
searchpath = []
toomany = 20
while True:
searchpath.append(os.path.join(here, ".buildbot"))
next = os.path.dirname(here)
if next == here:
break # we've hit the root
here = next
toomany -= 1 # just in case
if toomany == 0:
raise ValueError("Hey, I seem to have wandered up into the "
"infinite glories of the heavens. Oops.")
searchpath.append(home)
localDict = {}
for d in searchpath:
if os.path.isdir(d):
if runtime.platformType != 'win32':
if os.stat(d)[stat.ST_UID] != os.getuid():
print "skipping %s because you don't own it" % d
continue # security, skip other people's directories
optfile = os.path.join(d, "options")
if os.path.exists(optfile):
try:
f = open(optfile, "r")
options = f.read()
exec options in localDict
except:
print "error while reading %s" % optfile
raise
break
for k in localDict.keys():
if k.startswith("__"):
del localDict[k]
return localDict
| 15,899
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.