content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def cleanup(number_of_days, path, pattern=".*", delete=True):
"""
Removes files from the passed in path that are older than or equal
to the number_of_days
"""
logger.info("Running cleanup with params: days:{},path: {}, pattern: {},delete:{}".format(number_of_days, path, pattern, delete))
time_in_secs = time.time() - (number_of_days * 24 * 60 * 60)
for root, dirs, files in os.walk(path, topdown=False):
for filename in files:
full_path = os.path.join(root, filename)
if re.match(pattern, full_path):
stat = os.stat(full_path)
if stat.st_mtime <= time_in_secs:
remove(full_path, delete=delete)
if not os.listdir(root) and re.match(pattern, root):
remove(root, delete=delete)
| 5,340,500
|
def primitive_name(method_name):
"""Given a method_name, returns the corresponding Phylanx primitive.
This primarily used for mapping NumPy mapped_methods to Phylanx primitives,
but there are also other functions in python that would map to primitives
with different name in Phylanx, e.g., `print` is mapped to `cout`.
"""
primitive_name = mapped_methods.get(method_name)
if primitive_name is None:
primitive_name = method_name
return primitive_name
| 5,340,501
|
def _split_pandas_data_with_ratios(data, ratios, seed=SEED, shuffle=False):
"""Helper function to split pandas DataFrame with given ratios
Note:
Implementation referenced from `this source
<https://stackoverflow.com/questions/38250710/how-to-split-data-into-3-sets-train-validation-and-test>`_.
Args:
data (pd.DataFrame): Pandas data frame to be split.
ratios (list of floats): list of ratios for split. The ratios have to sum to 1.
seed (int): random seed.
shuffle (bool): whether data will be shuffled when being split.
Returns:
list: List of pd.DataFrame split by the given specifications.
"""
if math.fsum(ratios) != 1.0:
raise ValueError("The ratios have to sum to 1")
split_index = np.cumsum(ratios).tolist()[:-1]
if shuffle:
data = data.sample(frac=1, random_state=seed)
splits = np.split(data, [round(x * len(data)) for x in split_index])
# Add split index (this makes splitting by group more efficient).
for i in range(len(ratios)):
splits[i]["split_index"] = i
return splits
| 5,340,502
|
def get_stock_information(stock, country, as_json=False):
"""
This function retrieves fundamental financial information from the specified stock. The retrieved
information from the stock can be valuable as it is additional information that can be used combined
with OHLC values, so to determine financial insights from the company which holds the specified stock.
Args:
stock (:obj:`str`): symbol of the stock to retrieve its information from.
country (:obj:`country`): name of the country from where the stock is from.
as_json (:obj:`bool`, optional):
optional argument to determine the format of the output data (:obj:`dict` or :obj:`json`).
Returns:
:obj:`pandas.DataFrame` or :obj:`dict`- stock_information:
The resulting :obj:`pandas.DataFrame` contains the information fields retrieved from Investing.com
from the specified stock ; it can also be returned as a :obj:`dict`, if argument `as_json=True`.
If any of the information fields could not be retrieved, that field/s will be filled with
None values. If the retrieval process succeeded, the resulting :obj:`dict` will look like::
stock_information = {
"Stock Symbol": "AAPL",
"Prev. Close": 267.25,
"Todays Range": "263.45 - 268.25",
"Revenue": 260170000000.00003,
"Open": 267.27,
"52 wk Range": "142 - 268.25",
"EPS": 11.85,
"Volume": 23693550.0,
"Market Cap": 1173730000000.0,
"Dividend (Yield)": "3.08 (1.15%)",
"Average Vol. (3m)": 25609925.0,
"P/E Ratio": 22.29,
"Beta": 1.23,
"1-Year Change": "47.92%",
"Shares Outstanding": 4443236000.0,
"Next Earnings Date": "04/02/2020"
}
Raises:
ValueError: raised if any of the introduced arguments is not valid or errored.
FileNotFoundError: raised if `stocks.csv` file was not found or errored.
IOError: raised if `stocks.csv` file is empty or errored.
RuntimeError: raised if scraping process failed while running.
ConnectionError: raised if the connection to Investing.com errored (did not return HTTP 200)
"""
if not stock:
raise ValueError("ERR#0013: stock parameter is mandatory and must be a valid stock symbol.")
if not isinstance(stock, str):
raise ValueError("ERR#0027: stock argument needs to be a str.")
if country is None:
raise ValueError("ERR#0039: country can not be None, it should be a str.")
if country is not None and not isinstance(country, str):
raise ValueError("ERR#0025: specified country value not valid.")
if not isinstance(as_json, bool):
raise ValueError("ERR#0002: as_json argument can just be True or False, bool type.")
country = unidecode(country.strip().lower())
stock = unidecode(stock.strip().lower())
stocks = _get_stock_data_from_csv(country, stock)
if stocks is None:
raise IOError("ERR#0001: stocks object not found or unable to retrieve.")
if country not in get_stock_countries():
raise RuntimeError("ERR#0034: country " + country.lower() + " not found, check if it is correct.")
if stock not in stocks['symbol'].lower():
raise RuntimeError("ERR#0018: stock " + stock + " not found, check if it is correct.")
tag = stocks['tag']
stock = stocks['symbol']
url = f"https://www.investing.com/equities/{tag}"
head = {
"User-Agent": random_user_agent(),
"X-Requested-With": "XMLHttpRequest",
"Accept": "text/html",
"Accept-Encoding": "gzip, deflate",
"Connection": "keep-alive",
}
req = requests.get(url, headers=head)
if req.status_code != 200:
raise ConnectionError("ERR#0015: error " + str(req.status_code) + ", try again later.")
root_ = fromstring(req.text)
path_ = root_.xpath("//dl[contains(@class, 'grid')]/div")
result = {}
result['Stock Symbol'] = stock
if not path_:
raise RuntimeError("ERR#0004: data retrieval error while scraping.")
if path_:
for elements_ in path_:
title_ = elements_[0].text_content()
value_ = elements_[1].text_content()
if title_ == "Day's Range":
title_ = 'Todays Range'
if title_ in result.columns.tolist():
try:
result[title_] = float(value_.replace(',', ''))
continue
except:
pass
try:
text = value_.strip()
result[title_] = datetime.strptime(text, "%b %d, %Y").strftime("%d/%m/%Y")
continue
except:
pass
try:
value = value_.strip()
if value.__contains__('B'):
value = float(value.replace('B', '').replace(',', '')) * 1e9
elif value.__contains__('T'):
value = float(value.replace('T', '').replace(',', '')) * 1e12
result[title_] = value
continue
except:
pass
return result
else:
raise RuntimeError("ERR#0004: data retrieval error while scraping.")
| 5,340,503
|
def manual_run_change_reporter(accounts):
"""Manual change reporting from the command line"""
app.logger.info("[ ] Executing manual change reporter task...")
try:
for account in accounts:
time1 = time.time()
rep = Reporter(account=account)
for monitor in rep.all_monitors:
if monitor.watcher:
app.logger.info("[ ] Running change finder for "
"account: {} technology: {}".format(account, monitor.watcher.index))
reporter_logic(account, monitor.watcher.index)
time2 = time.time()
app.logger.info('[@] Run Account %s took %0.1f s' % (account, (time2 - time1)))
app.logger.info("[+] Completed manual change reporting.")
except (OperationalError, InvalidRequestError, StatementError) as e:
app.logger.exception("[X] Database error processing cleaning up session.")
db.session.remove()
store_exception("scheduler-run-change-reporter", None, e)
raise e
| 5,340,504
|
def check_export_start_date(export_start_dates, export_end_dates,
export_day_range):
"""
Update export_start_date according to the export_end_date so that it could be export_end_date - EXPORT_DAY_RANGE.
Parameters:
export_start_date: dict
Read from params, values are strings of dates
export_end_date: dict
Calculated according to the data received.
The type of values are datetime.datetime
export_day_range: int
Number of days to report
Returns:
dict: {str: datetime.datetime}
The keys are "covid_ag" or "flu_ag"
The values are dates until when we export data
"""
for test_type in TEST_TYPES:
if export_start_dates[test_type] == "":
export_start_dates[test_type] = datetime(2020, 5, 26)
else:
export_start_dates[test_type] = datetime.strptime(
export_start_dates[test_type], '%Y-%m-%d')
# Only export data from -45 days to -5 days
export_start_dates[test_type] = compare_dates(
export_end_dates[test_type] - timedelta(days=export_day_range),
export_start_dates[test_type], "l")
if test_type == "covid_ag":
export_start_dates[test_type] = compare_dates(
export_start_dates[test_type], datetime(2020, 5, 26), "l")
return export_start_dates
| 5,340,505
|
def set_verbosity(module_name: str, verbose: bool = False, very_verbose: bool = False) -> logging.Logger:
"""
Used to set the verbosity of the logger.
:param module_name: Name of the module, e.g. ``__name__``.
:type module_name: str
:param verbose: Enables DEBUG level.
:type verbose: bool
:param very_verbose: Enables DEBUG level and the loggers from imported libraries.
:type very_verbose: bool
:return: A configured logger, which can be used throughout the code via ``logging.{LEVEL}()``.
:rtype: logging.Logger
"""
if very_verbose:
configure_console_logger(logging_level=logging.DEBUG, disable_external_lib_loggers=False)
elif verbose:
configure_console_logger(logging_level=logging.DEBUG)
else:
configure_console_logger(logging_level=logging.INFO)
return logging.getLogger(module_name)
| 5,340,506
|
def openie6_run(document_file, output, config=NLP_CONFIG, no_entity_filter=False):
"""
Initializes OpenIE6. Will generate the corresponding input file, reads the output and converts it to our
internal OpenIE format
:param document_file: input file with documents to generate
:param output: the output file
:param config: the nlp config
:param no_entity_filter: if true only sentences with two tags will be processed by OpenIE
:return: None
"""
# Read config
with open(config) as f:
conf = json.load(f)
openie6_dir = conf["openie6"]
# Prepare files
doc_count = count_documents(document_file)
logging.info('{} documents counted'.format(doc_count))
logging.info('Init spacy nlp...')
spacy_nlp = English() # just the language with no model
spacy_nlp.add_pipe("sentencizer")
doc2sentences = {}
if no_entity_filter:
for document_content in read_pubtator_documents(document_file):
doc = TaggedDocument(from_str=document_content, spacy_nlp=spacy_nlp)
if doc:
doc2sentences[doc.id] = [s.text for s in doc.sentence_by_id.values()]
else:
doc2sentences, doc2tags = filter_document_sentences_without_tags(doc_count, document_file, spacy_nlp)
doc_count = len(doc2tags)
openie6_input_file = f'{output}_pubtator'
openie6_raw_extractions = f'{output}_extractions'
if doc_count == 0:
print('no files to process - stopping')
else:
start = datetime.now()
# Process output
openie6_generate_openie6_input(doc2sentences, openie6_input_file)
# invoke OpenIE 6
openie6_invoke_toolkit(openie6_dir, openie6_input_file, openie6_raw_extractions)
# extract tuples
openie6_extract_tuples(openie6_raw_extractions, output)
print(f'removing temp file: {openie6_input_file}')
os.remove(openie6_input_file)
print(f'removing temp file: {openie6_raw_extractions}')
os.remove(openie6_raw_extractions)
print(" done in {}".format(datetime.now() - start))
| 5,340,507
|
def walk_attrs(module: ModuleType, attr_name, converter=Converter()) -> str:
"""
Create stubs for given class, including all attributes.
:param module:
:param attr_name:
:param converter:
:return:
"""
buf = StringList(convert_indents=True)
buf.indent_type = " "
if not is_dunder(attr_name):
obj = getattr(module, attr_name)
# TODO: case where obj is not a class
if not isinstance(obj, FunctionType):
bases = []
for base in obj.__bases__:
if base not in {System.Object, object}:
if base.__name__ in converter.type_mapping:
bases.append(converter.type_mapping[base.__name__])
else:
bases.append(base.__name__)
bases = list(filter(lambda x: x is Any, bases))
if bases:
buf.append(f"class {attr_name}({', '.join(bases)}):\n")
else:
buf.append(f"class {attr_name}:\n")
for child_attr_name in get_child_attrs(obj):
try:
child_obj = getattr(obj, child_attr_name)
except TypeError as e:
if str(e) in {
"instance property must be accessed through a class instance",
"property cannot be read",
}:
make_property(buf, child_attr_name)
continue
elif str(e) == "instance attribute must be accessed through a class instance":
print(f"{e.__class__.__name__}: '{e}' occurred for {attr_name}.{child_attr_name}")
continue
else:
raise e
# TODO: if isinstance(child_obj, FunctionType):
return_type, arguments = get_signature(child_obj, child_attr_name, converter)
with buf.with_indent_size(buf.indent_size + 1):
if arguments is not None and arguments:
signature = []
for idx, argument in enumerate(arguments.split(", ")):
signature.append(f"{'_' * (idx + 1)}: {converter.convert_type(argument)}")
line = f"def {child_attr_name}(self, {', '.join(signature)}) -> {return_type}: ..."
if len(line) > 88:
buf.blankline(ensure_single=True)
buf.append(f"def {child_attr_name}(")
with buf.with_indent_size(buf.indent_size + 2):
buf.append("self,")
for line in signature:
buf.append(f"{line},")
buf.append(f") -> {return_type}: ...\n")
else:
buf.append(line)
elif arguments is None:
buf.append(f"def {child_attr_name}(self, *args, **kwargs) -> {return_type}: ...")
elif not arguments:
# i.e. takes no arguments
buf.append(f"def {child_attr_name}(self) -> {return_type}: ...")
buf.blankline(ensure_single=True)
return str(buf)
return ''
| 5,340,508
|
def reformat_adata(
adata: AnnData, brain_region: str, num_seq_lanes: int, transgenes_list: str
):
"""
script that takes in user specified inputs in the data_reformat script
transforms dataframe input to usable AnnData output with group cell count labels,
df_obs
it also makes genes in the index since multiple ensembl IDs can map onto the same gene
"""
for i in range(1, num_seq_lanes + 1):
adata = obs_rename(adata, i, brain_region)
obs_seq_lanes_keys = [
int(seq_lane[1]) for seq_lane in adata.obs.index.str.split("_")
]
obs_seq_lanes_df = pd.DataFrame(
obs_seq_lanes_keys, index=adata.obs.index, columns=["seq_lane_number"]
)
print("Num seq_lanes parsed...")
# create bit labels for each transgene and its possible combinations.
gene_presence_df, _, cell_gene_flags, _ = gene_list_to_flag(adata, transgenes_list)
adata.obs[[col.upper() for col in gene_presence_df.columns]] = gene_presence_df
adata.obs["which_transgenes"] = cell_gene_flags
adata.obs["transgene_present"] = (
adata.obs["which_transgenes"].notnull().astype("str")
)
group_cell_count_labels = adata.obs["which_transgenes"].value_counts(dropna=False)
adata.obs["seq_lane"] = obs_seq_lanes_df
print("Group cell count labels generated")
if adata.var.index.has_duplicates:
print(f"Duplicate gene names in index (T/F): {adata.var.index.has_duplicates}")
adata.var = uniquify(adata.var)
else:
print(f"Duplicate gene names in index (T/F): {adata.var.index.has_duplicates}")
adata, __ = gene_mask(
adata, stringify_list(transgenes_list), col_name="transgene_mask"
)
adata, ribo_mask = gene_mask(adata, "^rp[sl][0-9]", col_name="ribo_mask")
adata, mito_mask = gene_mask(adata, "^mt*-", col_name="mito_mask")
adata.obs["percent_ribo"] = np.sum(adata[:, ribo_mask].X, axis=1) / np.sum(
adata.X, axis=1
)
adata.obs["percent_mito"] = np.sum(adata[:, mito_mask].X, axis=1) / np.sum(
adata.X, axis=1
)
adata.obs = adata.obs.drop(
columns=adata.obs.columns[adata.obs.columns.str.contains("temp")]
)
return (group_cell_count_labels, adata)
| 5,340,509
|
def chunks(list_, num_items):
"""break list_ into n-sized chunks..."""
results = []
for i in range(0, len(list_), num_items):
results.append(list_[i:i+num_items])
return results
| 5,340,510
|
def form_requires_input(form):
"""
Returns True if the form has at least one question that requires input
"""
for question in form.get_questions([]):
if question["tag"] not in ("trigger", "label", "hidden"):
return True
return False
| 5,340,511
|
def read_dino_waterlvl_csv(fname, to_mnap=True, read_series=True):
"""Read dino waterlevel data from a dinoloket csv file.
Parameters
----------
fname : str
to_mnap : boolean, optional
if True a column with 'stand_m_tov_nap' is added to the dataframe
read_series : boolean, optional
if False only metadata is read, default is True
"""
logging.info(f"reading -> {os.path.split(fname)[-1]}")
p_meta = re.compile(
"Locatie,Externe aanduiding,X-coordinaat,Y-coordinaat, Startdatum, Einddatum"
)
p_data = re.compile(r"Locatie,Peildatum,Stand \(cm t.o.v. NAP\),Bijzonderheid")
with open(fname, "r") as f:
line = f.readline()
while line != "":
line = f.readline()
if p_meta.match(line):
meta = _read_dino_waterlvl_metadata(f, line)
if meta:
meta["metadata_available"] = True
else:
meta["metadata_available"] = False
meta["filename"] = fname
elif p_data.match(line):
if read_series:
measurements = _read_dino_waterlvl_measurements(f, line)
if to_mnap and measurements is not None:
measurements["stand_m_tov_nap"] = (
measurements["stand_cm_tov_nap"] / 100.0
)
else:
measurements = None
return measurements, meta
| 5,340,512
|
def random_range():
"""
Test 100 values and put them into ipbin.
"""
numbers_to_test = 100
integer_space = 4294967295.0
list_of_bins = []
for _ in range(numbers_to_test):
multiple = random.random()
value = multiple * integer_space
ipbin = encode(value)
list_of_bins.append(ipbin)
list_of_bins.sort()
for ip_bin in list_of_bins:
print(ip_bin)
another = '10.86.41.92'
number = iptoint(another)
another_bin = encode(number)
print(another_bin)
| 5,340,513
|
def on_same_fs(request):
"""
Accept a POST request to check access to a FS available by a client.
:param request:
`django.http.HttpRequest` object, containing mandatory parameters
filename and checksum.
"""
filename = request.POST['filename']
checksum_in = request.POST['checksum']
checksum = 0
try:
data = open(filename, 'rb').read(32)
checksum = zlib.adler32(data, checksum) & 0xffffffff
if checksum == int(checksum_in):
return HttpResponse(content=json.dumps({'success': True}),
content_type=JSON, status=200)
except (IOError, ValueError):
pass
return HttpResponse(content=json.dumps({'success': False}),
content_type=JSON, status=200)
| 5,340,514
|
def get_memo(expense_group: ExpenseGroup, payment_type: str=None) -> str:
"""
Get the memo from the description of the expense group.
:param expense_group: The expense group to get the memo from.
:param payment_type: The payment type to use in the memo.
:return: The memo.
"""
expense_fund_source = 'Reimbursable expense' if expense_group.fund_source == 'PERSONAL' \
else 'Corporate Credit Card expense'
unique_number = None
if 'settlement_id' in expense_group.description and expense_group.description['settlement_id']:
# Grouped by payment
reimbursement = Reimbursement.objects.filter(
settlement_id=expense_group.description['settlement_id']
).values('payment_number').first()
if reimbursement and reimbursement['payment_number']:
unique_number = reimbursement['payment_number']
else:
unique_number = expense_group.description['settlement_id']
elif 'claim_number' in expense_group.description and expense_group.description['claim_number']:
# Grouped by expense report
unique_number = expense_group.description['claim_number']
if payment_type:
# Payments sync
return 'Payment for {0} - {1}'.format(payment_type, unique_number)
elif unique_number:
memo = '{} - {}'.format(expense_fund_source, unique_number)
expense_group_settings: ExpenseGroupSettings = ExpenseGroupSettings.objects.get(
workspace_id=expense_group.workspace_id
)
if expense_group.fund_source == 'CCC':
if expense_group_settings.ccc_export_date_type != 'current_date':
date = get_transaction_date(expense_group)
date = (datetime.strptime(date, '%Y-%m-%dT%H:%M:%S')).strftime('%d/%m/%Y')
memo = '{} - {}'.format(memo, date)
else:
if expense_group_settings.reimbursable_export_date_type != 'current_date':
date = get_transaction_date(expense_group)
date = (datetime.strptime(date, '%Y-%m-%dT%H:%M:%S')).strftime('%d/%m/%Y')
memo = '{} - {}'.format(memo, date)
return memo
else:
# Safety addition
return 'Reimbursable expenses by {0}'.format(expense_group.description.get('employee_email')) \
if expense_group.fund_source == 'PERSONAL' \
else 'Credit card expenses by {0}'.format(expense_group.description.get('employee_email'))
| 5,340,515
|
def get_prefix_for_google_proxy_groups():
"""
Return a string prefix for Google proxy groups based on configuration.
Returns:
str: prefix for proxy groups
"""
prefix = config.get("GOOGLE_GROUP_PREFIX")
if not prefix:
raise NotSupported(
"GOOGLE_GROUP_PREFIX must be set in the configuration. "
"This namespaces the Google groups for security and safety."
)
return prefix
| 5,340,516
|
def client(tmpdir):
"""Test client for the API."""
tmpdir.chdir()
views.app.catchall = False
return webtest.TestApp(views.app)
| 5,340,517
|
def print(*args, **kwargs) -> None:
"""Shadows python print method in order to use typer.echo instead."""
typer.echo(*args, **kwargs)
| 5,340,518
|
def list_extract(items, arg):
"""Extract items from a list of containers
Uses Django template lookup rules: tries list index / dict key lookup first, then
tries to getattr. If the result is callable, calls with no arguments and uses the return
value..
Usage: {{ list_of_lists|list_extract:1 }} (gets elt 1 from each item in list)
{{ list_of_dicts|list_extract:'key' }} (gets value of 'key' from each dict in list)
"""
def _extract(item):
try:
return item[arg]
except TypeError:
pass
attr = getattr(item, arg, None)
return attr() if callable(attr) else attr
return [_extract(item) for item in items]
| 5,340,519
|
def get_links(url):
"""Scan the text for http URLs and return a set
of URLs found, without duplicates"""
# look for any http URL in the page
links = set()
text = get_page(url)
soup = BeautifulSoup(text, "lxml")
for link in soup.find_all('a'):
if 'href' in link.attrs:
newurl = link.attrs['href']
# resolve relative URLs
if newurl.startswith('/'):
newurl = urljoin(url, newurl)
# ignore any URL that doesn't now start with http
if newurl.startswith('http'):
links.add(newurl)
return links
| 5,340,520
|
def get_merkle_root(*leaves: Tuple[str]) -> MerkleNode:
"""Builds a Merkle tree and returns the root given some leaf values."""
if len(leaves) % 2 == 1:
leaves = leaves + (leaves[-1],)
def find_root(nodes):
newlevel = [
MerkleNode(sha256d(i1.val + i2.val), children=[i1, i2])
for [i1, i2] in _chunks(nodes, 2)
]
return find_root(newlevel) if len(newlevel) > 1 else newlevel[0]
return find_root([MerkleNode(sha256d(l)) for l in leaves])
| 5,340,521
|
def BeginBlock(layer_to_call: torch.nn.Module,
user_id: str = None,
ipu_id: int = None) -> torch.nn.Module:
"""
Define a block by modifying an existing PyTorch module.
You can use this with an existing PyTorch module instance, as follows:
>>> poptorch.BeginBlock(myModel.a_layer)
>>> poptorch.BeginBlock(MyNewLayer())
The wrapped module and all sub-modules will be part of this block until
a sub-module is similar modified to be another block. In addition, if an IPU
is specified, the module and its submodules will run on the specified IPU.
You can combines multiple blocks into a stage.
:param layer_to_call: PyTorch module to assign to the block.
:param user_id: A user defined identifier for the block.
Blocks with the same id are considered as being a single block.
Block identifiers are also used to manually specify pipelines or
phases.
:param ipu_id: The id of the IPU to run on.
Note that the ``ipu_id`` is an index in a multi-IPU device
within PopTorch, and is separate and distinct from the device
ids used by ``gc-info``.
.. seealso:: :py:meth:`poptorch.Options.setExecutionStrategy`
"""
if not isinstance(layer_to_call, torch.nn.Module):
# Previously, the function returned a new model so would work for any
# callable. This was never documented but should still be permitted to
# work.
if callable(layer_to_call):
return LegacyBeginBlockFn(layer_to_call, user_id, ipu_id)
raise _impl.createPoptorchError(
"module is not an instance of torch.nn.Module or " + "function.")
class BlockModule(type(layer_to_call)):
def __call__(self, *input, **kwargs):
if Block._stages_manager is not None:
if self._user_id is None:
self.__dict__['_user_id'] = (
Block._stages_manager.nextAutoId())
Block._stages_manager.beginStage(self._user_id, self._ipu_id)
return super().__call__(*input, **kwargs)
if str(layer_to_call.__class__) == str(BlockModule):
raise _impl.createPoptorchError(
"module has already been assigned to a block.")
BlockModule.__name__ = type(layer_to_call).__name__
layer_to_call.__class__ = BlockModule
layer_to_call.__dict__['_user_id'] = user_id
layer_to_call.__dict__['_ipu_id'] = ipu_id
# Register custom function to copy / serialize wrappers
copyreg.pickle(BlockModule, _pickle_reduce_block)
# There is no need to return as it is passed by reference, but this is for
# backward compatibility
return layer_to_call
| 5,340,522
|
def set_attrs_via_get(obj, attr_names):
"""Sets attrs `attrs` for `obj` by sending a GET request."""
retrieved_obj = get_obj(obj)
for attr_name in attr_names:
retrieved_attr_value = getattr(retrieved_obj, attr_name)
setattr(obj, attr_name, retrieved_attr_value)
| 5,340,523
|
def print_samples_of_text_by_label(labeled_text, num_labels, num_samples):
"""Print random sample of documents from each label
Parameters
----------
labeled_text : pandas.DataFrame
Rows are documents, should have 'text' column and 'label' column
num_labels : int
Number of labels from which to draw samples.
num_samples : int
Number of samples to draw from each label group.
"""
label_counts = labeled_text.label.value_counts().nlargest(num_labels)
groups = labeled_text.groupby('label')
for label in label_counts.index:
print(f'\nLabel {label} containing {label_counts[label]} samples:')
print('\n'.join(groups.get_group(label).sample(num_samples).text))
| 5,340,524
|
def unbatch_nested_tensor(nested_tensor):
"""Squeeze the first (batch) dimension of each entry in ``nested_tensor``."""
return map_structure(lambda x: torch.squeeze(x, dim=0), nested_tensor)
| 5,340,525
|
def DLXCPP(rows):
"""
Solves the Exact Cover problem by using the Dancing Links algorithm
described by Knuth.
Consider a matrix M with entries of 0 and 1, and compute a subset
of the rows of this matrix which sum to the vector of all 1's.
The dancing links algorithm works particularly well for sparse
matrices, so the input is a list of lists of the form::
[
[i_11,i_12,...,i_1r]
...
[i_m1,i_m2,...,i_ms]
]
where M[j][i_jk] = 1.
The first example below corresponds to the matrix::
1110
1010
0100
0001
which is exactly covered by::
1110
0001
and
::
1010
0100
0001
If soln is a solution given by DLXCPP(rows) then
[ rows[soln[0]], rows[soln[1]], ... rows[soln[len(soln)-1]] ]
is an exact cover.
Solutions are given as a list.
EXAMPLES::
sage: rows = [[0,1,2]]
sage: rows+= [[0,2]]
sage: rows+= [[1]]
sage: rows+= [[3]]
sage: [x for x in DLXCPP(rows)]
[[3, 0], [3, 1, 2]]
"""
if not rows:
return
x = dlx_solver(rows)
while x.search():
yield x.get_solution()
| 5,340,526
|
def row2dict(cursor, row):
""" タプル型の行データを辞書型に変換
@param cursor: カーソルオブジェクト
@param row: 行データ(tuple)
@return: 行データ(dict)
@see: http://docs.python.jp/3.3/library/sqlite3.html
"""
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
| 5,340,527
|
def test_correct_auth_post(operation, test_client, constants, put_allowed_class_names,
headers_with_correct_pass_and_id):
"""
GIVEN a Flask application
WHEN a collection endpoint has a PUT request with correct user credentials
THEN check that a '401' status code is not returned
"""
API_NAME = constants["API_NAME"]
response = test_client.get(f"/{API_NAME}")
endpoints = json.loads(response.data.decode('utf-8'))
for endpoint in endpoints:
if endpoint in put_allowed_class_names:
response = test_client.get(endpoints[endpoint])
x_auth = 'X-Authentication'
headers_with_correct_pass_and_id[x_auth] = response.headers[x_auth]
response_op = getattr(test_client, operation)(endpoints[endpoint],
headers=headers_with_correct_pass_and_id,
data=json.dumps(dict(foo="bar")))
assert response_op.status_code != 401
| 5,340,528
|
def merge(left, right):
"""this is used for merging two halves """
# print('inside Merge ')
result = [];
leftIndex = 0;
rightIndex = 0;
while leftIndex < len(left) and rightIndex < len(right):
if left[leftIndex] < right[rightIndex]:
result.append(left[leftIndex])
leftIndex += 1
else:
result.append(right[rightIndex])
rightIndex += 1
# print('merge', left, right)
# print('result', result)
# print('left elements ->', left[leftIndex:] + right[rightIndex:])
# Checking if any element was left
return result + left[leftIndex:] + right[rightIndex:]
| 5,340,529
|
def parse_query(query):
"""Parse the given query, returning a tuple of strings list (include, exclude)."""
exclude = re.compile(r'(?<=-")[^"]+?(?=")|(?<=-)\w+').findall(query)
for w in sorted(exclude, key=lambda i: len(i), reverse=True):
query = query.replace(w, '')
query = " " + query
return re.compile(r'(?<=[+ ]")[^"]+?(?=")|(?<=[+ ])\w+').findall(query), exclude
| 5,340,530
|
def _m_verify_mg(state, method_name, multigoal, depth, verbose=0):
"""
Pyhop 2 uses this method to check whether a multigoal-method has achieved
the multigoal that it promised to achieve.
"""
goal_dict = _goals_not_achieved(state,multigoal)
if goal_dict:
raise Exception(f"depth {depth}: method {method_name} " + \
f"didn't achieve {multigoal}]")
if verbose >= 3:
print(f"depth {depth}: method {method_name} achieved {multigoal}")
return []
| 5,340,531
|
def eggs_attribute_decorator(eggs_style):
"""Applies the eggs style attribute to the function"""
def decorator(f):
f.eggs = eggs_style
@wraps(f)
def decorated_function(*args, **kwargs):
return f(*args, **kwargs)
return decorated_function
return decorator
| 5,340,532
|
def byte_size(num, suffix='B'):
"""
Return a formatted string indicating the size in bytes, with the proper
unit, e.g. KB, MB, GB, TB, etc.
:arg num: The number of byte
:arg suffix: An arbitrary suffix, like `Bytes`
:rtype: float
"""
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Y', suffix)
| 5,340,533
|
def cdsCoverage(genome_coverage, dict_cds, datatype, coverage):
"""Return Mean Coverage or Raw Counts for each CDS, or their promotor regions for tss and chip"""
genome_coverage = [map(int, genome_coverage[0]), map(int, genome_coverage[1])]
# CDS coverage is calculated from genome coverage on the entire gene
if datatype != 'tss' and datatype != 'chip':
for cds_id in dict_cds:
# Strand plus
plus = sum(genome_coverage[0][int(dict_cds[cds_id][2]-1):int(dict_cds[cds_id][3])])
if coverage == 'mean':
dict_cds[cds_id][5][0] = float(plus) / len(genome_coverage[0][int(dict_cds[cds_id][2]-1):int(dict_cds[cds_id][3])])
elif coverage == 'counts':
dict_cds[cds_id][5][0] = float(plus)
# Strand minus
minus = sum(genome_coverage[1][int(dict_cds[cds_id][2]-1):int(dict_cds[cds_id][3])])
if coverage == 'mean':
dict_cds[cds_id][5][1] = float(minus) / len(genome_coverage[1][int(dict_cds[cds_id][2]-1):int(dict_cds[cds_id][3])])
elif coverage == 'counts':
dict_cds[cds_id][5][1] = float(minus)
return dict_cds
# CDS coverage is calculated from genome coverage on the region [-250:ATG:+100]
else:
for cds_id in dict_cds:
# Strand plus
if int(dict_cds[cds_id][4]) == 1:
start = int(dict_cds[cds_id][2]) - 250
# Test position out of the first base
if start < 1:
start = 1
stop = int(dict_cds[cds_id][2]) + 2 + 100
# Test position out of the last base
if stop > len(genome_coverage[0]):
stop = len(genome_coverage[0])
plus = sum(genome_coverage[0][start-1:stop])
if coverage == 'mean':
dict_cds[cds_id][5][0] = float(plus) / len(genome_coverage[0][start-1:stop])
elif coverage == 'counts':
dict_cds[cds_id][5][0] = float(plus)
minus = sum(genome_coverage[1][start-1:stop])
if coverage == 'mean':
dict_cds[cds_id][5][1] = float(minus) / len(genome_coverage[1][start-1:stop])
elif coverage == 'counts':
dict_cds[cds_id][5][1] = float(minus)
# Strand minus: strand is set at -1
else:
start = int(dict_cds[cds_id][3]) + 250
# Test position out of the last base
if start > len(genome_coverage[0]):
start = len(genome_coverage[0])
stop = int(dict_cds[cds_id][3]) - 2 - 100
# Test position out of the first base
if stop < 1:
stop = 1
plus = sum(genome_coverage[0][stop-1:start])
if coverage == 'mean':
dict_cds[cds_id][5][0] = float(plus) / len(genome_coverage[0][stop-1:start])
elif coverage == 'counts':
dict_cds[cds_id][5][0] = float(plus)
minus = sum(genome_coverage[1][stop-1:start])
if coverage == 'mean':
dict_cds[cds_id][5][1] = float(minus) / len(genome_coverage[1][stop-1:start])
elif coverage == 'counts':
dict_cds[cds_id][5][1] = float(minus)
return dict_cds
| 5,340,534
|
def div88():
"""
Returns the divider ZZZZZZZZZZZZ
:return: divider88
"""
return divider88
| 5,340,535
|
def laplace_noise(epsilon, shape, dtype, args):
"""
Similar to foolbox but batched version.
:param epsilon: strength of the noise
:param bounds: min max for images
:param shape: the output shape
:param dtype: the output type
:return: the noise for images
"""
scale = epsilon / np.sqrt(3) * (args.max - args.min)
noise = nprng.laplace(scale=scale, size=shape)
noise = noise.astype(dtype)
return noise
| 5,340,536
|
async def forecast(ctx, *args):
"""Reply sender with forecast."""
await forecast_controller(ctx, format_darksky_forecast, *args)
| 5,340,537
|
def while_u():
""" Lower case Alphabet letter 'u' pattern using Python while loop"""
row = 0
while row<4:
col = 0
while col<4:
if col%3==0 and row<3 or row==3 and col>0:
print('*', end = ' ')
else:
print(' ', end = ' ')
col += 1
print()
row += 1
| 5,340,538
|
def import_by_path(dotted_path, error_prefix=''):
"""
Import a dotted module path and return the attribute/class designated by the
last name in the path. Raise ImproperlyConfigured if something goes wrong.
"""
try:
module_path, class_name = dotted_path.rsplit('.', 1)
except ValueError:
raise ImproperlyConfigured("%s%s doesn't look like a module path" % (
error_prefix, dotted_path))
try:
module = import_module(module_path)
except ImportError as e:
msg = '%sError importing module %s: "%s"' % (
error_prefix, module_path, e)
six.reraise(ImproperlyConfigured, ImproperlyConfigured(msg),
sys.exc_info()[2])
try:
attr = getattr(module, class_name)
except AttributeError:
raise ImproperlyConfigured('%sModule "%s" does not define a '
'"%s" attribute/class' %
(error_prefix, module_path, class_name))
return attr
| 5,340,539
|
def test_group_advanced_true_invisible_false_field_advanced_true_invisible_true_active_true(
sdk_client_fs: ADCMClient, path, app, login):
"""Field invisible
:param sdk_client_fs:
:param path:
:param app:
:param login:
:return:
"""
bundle = sdk_client_fs.upload_from_fs(path)
group_name = path.split("/")[-1]
cluster_name = "_".join(path.split("/")[-2:])
cluster = bundle.cluster_create(name=cluster_name)
app.driver.get("{}/cluster/{}/config".format
(app.adcm.url, cluster.cluster_id))
config = Configuration(app.driver)
fields = config.get_field_groups()
for field in fields:
assert not field.is_displayed(), field.get_attribute("class")
group_names = config.get_group_elements()
assert not group_names, group_names
if not config.advanced:
config.click_advanced()
assert config.advanced
group_active = config.group_is_active_by_name(group_name)
assert group_active
group_names = config.get_group_elements()
assert group_names, group_names
assert len(group_names) == 1
assert group_names[0].text == group_name
fields = config.get_field_groups()
for field in fields:
assert not field.is_displayed(), field.get_attribute("class")
| 5,340,540
|
def grid_convergence(lat, lon, radians=False):
"""
Given the latitude and longitude of a position, calculate the grid convergence
Args:
lat: latitude (degrees or radians)
lon: longitude (degrees or radians)
radians: true if lat/lon in radians
Returns: gamma, the grid convergence angle in radians or degrees
"""
lon0, lat0, _ = utm_origin_lla(lat, lon, radians=radians)
if radians:
return atan(tan(lon - lon0)*sin(lat))
else:
return rad2deg(atan(tand(lon - lon0)*sind(lat)))
| 5,340,541
|
def _make_index_item(resource_type):
""" """
id_prefix = "2c1|"
uuid_ = uuid.uuid4().hex
tpl = {
"access_roles": [
"guillotina.Reader",
"guillotina.Reviewer",
"guillotina.Owner",
"guillotina.Editor",
"guillotina.ContainerAdmin",
],
"access_users": ["root"],
"depth": 2,
"elastic_index": "{0}__{1}-{2}".format(
ES_INDEX_NAME, resource_type.lower(), uuid_
),
"id": None,
"uuid": id_prefix + uuid_,
}
with open(str(FHIR_EXAMPLE_RESOURCES / (resource_type + ".json")), "r") as fp:
data = json.load(fp)
tpl["id"] = data["id"]
tpl[resource_type.lower() + "_resource"] = data
return tpl
| 5,340,542
|
def mol_to_graph(mol):
"""
Converts Mol object to a graph compatible with Pytorch-Geometric
Args:
mol (Mol): RDKit Mol object
Returns:
node_feats (LongTensor): features for each node, one-hot encoded by element
edge_feats (LongTensor): features for each node, one-hot encoded by element
edges (LongTensor): edges in COO format
node_pos (FloatTensor): x-y-z coordinates of each node
"""
node_pos = torch.FloatTensor(dt.get_coordinates_of_conformer(mol))
bonds = dt.get_bonds_matrix(mol)
edge_tuples = np.argwhere(bonds)
edges = torch.LongTensor(edge_tuples).t().contiguous()
node_feats = torch.FloatTensor([one_of_k_encoding_unk(a.GetSymbol(), mol_atoms) for a in mol.GetAtoms()])
# edge_feats = torch.FloatTensor([one_of_k_encoding(bonds[i,j], [1.0, 2.0, 3.0, 1.5]) for i,j in edge_tuples])
edge_feats = torch.FloatTensor([bonds[i, j] for i, j in edge_tuples]).view(-1, 1)
return node_feats, edges, edge_feats, node_pos
| 5,340,543
|
def _tournament(evaluated_population: List[Eval], tournament_size: int = 5,
previous_winner: Chromosome = None) -> Chromosome:
"""Selects tournament_size number of chromosomes to 'compete' against each other. The chromosome with the highest
fitness score 'wins' the tournament.
Params:
- evaluated_population (list<tuple<list<int>,float>>): The evaluated population
- tournament_size (int): Specifies the size of the tournament. When equal to 1, the
method is equivalent to random selection. The higher the tournament size, the higher the
bias towards the fitter individuals.
- previous_winner (list<int>): The winner of the previous tournament. If the same chromosome wins both tournaments,
then the runner-up to the current tournament is chosen.
Returns:
- winner (list<int>): The chromosome with the highest score in the tournament
"""
tournament = random.sample(evaluated_population, tournament_size)
tournament.sort(key=lambda evaluated_chromosome: evaluated_chromosome[1])
winner = tournament[0][0] # pylint: disable=E1136
if winner == previous_winner:
winner = tournament[1][0] # pylint: disable=E1136
return winner
| 5,340,544
|
def interpolate_drift_table(table, start=0, skip=0, smooth=10):
"""
Smooth and interpolate a table
:param table: fxyz (nm) array
:param start: in case of renumbering needed : first frame
:param skip: how many frame were skipped
:param smooth: gaussian smoothing sigma
:return: interpolated table
"""
w = table.shape[1]
if smooth > 0:
table = smooth_drift_table(table, sigma=smooth)
table = update_frame_number(table, start=start, skip=skip)
time = table[:, 0]
# print(time.shape)
time_new = np.arange(1, max(time) + 1)
new_table = np.zeros((len(time_new), w))
new_table[:, 0] = time_new
for col in range(1, w):
y = table[:, col]
# print(y.shape)
f = interpolate.interp1d(time, y, fill_value='extrapolate')
ynew = f(time_new)
new_table[:, col] = ynew
logger.info(f'interpolating from {len(time)} to {len(ynew)} frames')
return new_table
| 5,340,545
|
def on_mrsim_config_change():
"""Update the mrsim.config dict. Only includes density, volume, and #sidebands"""
existing_data = ctx.states["local-mrsim-data.data"]
fields = ["integration_density", "integration_volume", "number_of_sidebands"]
# if existing_data is not None:
print(existing_data["config"])
existing_data["trigger"] = {"simulate": True, "method_index": None}
for item in fields:
existing_data["config"][item] = ctx.states[f"{item}.value"]
return prep_valid_data_for_simulation(existing_data)
| 5,340,546
|
def createuser(name, email, password, role):
"""
Creates a new user with specified roles
:return:
"""
from middleman.models import Role
user = user_controller.create(current_app.config['SECRET_KEY'], name, email, password)
role = db.session.query(Role).filter(Role.name == role).one()
user_controller.set_roles(user, role)
db.session.add(user)
db.session.commit()
| 5,340,547
|
def decrypt(encrypted, passphrase):
"""takes encrypted message in base64 and key, returns decrypted string without spaces on the left
IMPORTANT: key must be a multiple of 16.
Finaly, the strip function is used to remove the spaces from the left of the message"""
aes = AES.new(passphrase, AES.MODE_ECB)
return aes.decrypt(base64.b64decode(encrypted)).lstrip().decode('utf-8')
| 5,340,548
|
def automig(name):
"""
Create auto south migration and apply it to database.
"""
api.local('./manage.py schemamigration %s --auto' % name)
api.local('./manage.py migrate %s' % name)
| 5,340,549
|
def multi_ale_plot_1d(
features,
title=None,
xlabel=None,
ylabel=None,
x_rotation=20,
markers=("o", "v", "^", "<", ">", "x", "+"),
colors=plt.rcParams["axes.prop_cycle"].by_key()["color"],
zorders=None,
xlabel_skip=2,
format_xlabels=True,
show_full=True,
margin=0.03,
rngs=None,
**kwargs,
):
"""Plots ALE function of multiple specified features based on training set.
Multiple first-order (1D) ALE plots will be computed and plotted on the same plot.
Note that currently, only concave hull plotting of Monte-Carlo replicas is
supported.
Parameters
----------
features : iterable of column label
Features for which to plot the 1D ALE plot.
title : str or None
Figure title.
xlabel : str or None
Figure x-label.
ylabel : str or None
Figure y-label.
x_rotation : x-label rotation.
markers : iterable of str
Matplotlib markers used to differentiate the different features.
colors : iterable
Matplotlib colors used to differentiate the different features.
zorders : iterable of int or None
zorder used for each feature, with the hull (if applicable) having the same
zorder as the ALE line plot. By default, the last feature will have the
highest, and the first feature the lowest zorder.
xlabel_skip : int
Only plot an xlabel marker every `xlabel_skip` label.
format_xlabels : bool
If True, apply xlabel formatting according to the above options.
show_full : bool
If True, display the ALE plot generated using all the data, as opposed to
simply the bootstrap uncertainties.
margin : float
Fraction by which to multiply the plotted coordinate range to yield the
corresponding margin. This is applied separately for x and y.
rngs : iterable of numpy Generator or None
If given, the number of items given should match the number of features given.
Other Parameters
----------------
**kwargs : See alepython.ale_plot.
"""
if "quantile_axis" in kwargs:
raise NotImplementedError("'quantile_axis' is not implemented yet.")
if zorders is None:
zorders = list(range(2, 2 + len(features)))
if rngs is not None:
if len(rngs) != len(features):
raise ValueError("Number of `rngs` should match number of `features`.")
else:
rng = kwargs.get("rng")
rngs = [rng] * len(features)
quantile_list = []
ale_list = []
mc_data_list = []
for feature, rng in zip(
tqdm(
features,
desc="Calculating feature ALEs",
disable=not kwargs.get("verbose", False),
),
rngs,
):
out = ale_plot(
**{
**kwargs,
# Override certain kwargs essential to this function.
**dict(
features=feature,
rng=rng,
quantile_axis=False,
return_data=True,
return_mc_data=True,
fig=plt.figure(), # Create dummy figure.
ax=None,
),
}
)
if len(out) == 3:
temp_fig, _, (quantiles, ale) = out
mc_data = None
else:
temp_fig, _, (quantiles, ale), mc_data = out
# Close the unneeded temporary figure.
plt.close(temp_fig)
# Record the generated data for this feature.
quantile_list.append(quantiles)
ale_list.append(ale)
mc_data_list.append(mc_data)
# Construct quantiles from the individual quantiles, minimising the amount of interpolation.
combined_quantiles = np.vstack([quantiles[None] for quantiles in quantile_list])
final_quantiles = np.mean(combined_quantiles, axis=0)
mod_quantiles = np.arange(len(quantiles))
if kwargs.get("grid_kwargs") is None:
grid_kwargs = {}
if kwargs.get("hull_polygon_kwargs") is None:
hull_polygon_kwargs = {}
else:
hull_polygon_kwargs = kwargs["hull_polygon_kwargs"]
if "alpha" not in hull_polygon_kwargs:
hull_polygon_kwargs["alpha"] = 0.2
fig = kwargs.get("fig")
ax = kwargs.get("ax")
if fig is None and ax is None:
logger.debug("Getting current figure and axis.")
fig, ax = plt.gcf(), plt.gca()
elif fig is not None and ax is None:
logger.debug("Creating axis from figure {}.", fig)
ax = fig.add_subplot(111)
x_lims = [np.inf, -np.inf]
y_lims = [np.inf, -np.inf]
def update_lims(v, lims):
v_min = np.min(v)
v_max = np.max(v)
if v_min < lims[0]:
lims[0] = v_min
if v_max > lims[1]:
lims[1] = v_max
for feature, quantiles, ale, marker, color, zorder, mc_data in zip(
features,
quantile_list,
ale_list,
markers,
colors,
zorders,
mc_data_list,
):
if mc_data is not None:
# Compute the hull and plot it as a Polygon.
mod_mc_data = tuple(
(np.interp(mc_quantiles, final_quantiles, mod_quantiles), mc_ale)
for mc_quantiles, mc_ale in mc_data
)
mc_hull_points = _compute_mc_hull_poly_points(
mod_mc_data,
np.linspace(
np.min([mc_quantiles[0] for mc_quantiles, mc_ale in mod_mc_data]),
np.max([mc_quantiles[-1] for mc_quantiles, mc_ale in mod_mc_data]),
kwargs.get("monte_carlo_hull_points", 300) // 2,
),
)
ax.add_patch(
Polygon(
mc_hull_points,
**{
**hull_polygon_kwargs,
**dict(
facecolor=color,
zorder=zorder,
label=feature if not show_full else None,
),
},
)
)
# Update plot limits.
update_lims(mc_hull_points[:, 0], x_lims)
update_lims(mc_hull_points[:, 1], y_lims)
if show_full:
# Interpolate each of the quantiles relative to the accumulated final quantiles.
interp_quantiles = np.interp(quantiles, final_quantiles, mod_quantiles)
ax.plot(
interp_quantiles,
ale,
marker=marker,
label=feature,
c=color,
zorder=zorder,
)
# Update plot limits.
update_lims(interp_quantiles, x_lims)
update_lims(ale, y_lims)
# Set plot limits.
x_margin = margin * (x_lims[1] - x_lims[0])
ax.set_xlim(x_lims[0] - x_margin, x_lims[1] + x_margin)
y_margin = margin * (y_lims[1] - y_lims[0])
ax.set_ylim(y_lims[0] - y_margin, y_lims[1] + y_margin)
ax.legend(loc="best", ncol=2)
if format_xlabels:
ax.set_xticks(mod_quantiles[::xlabel_skip])
ax.set_xticklabels(_sci_format(final_quantiles[::xlabel_skip], scilim=0.6))
ax.xaxis.set_tick_params(rotation=x_rotation)
else:
ax.set_xticks(mod_quantiles)
ax.set_xticklabels(final_quantiles[::xlabel_skip])
if title is None:
mc_string = (
kwargs.get("monte_carlo_rep", 50) if kwargs.get("monte_carlo") else "False"
)
_ax_title(
ax,
f"First-order ALE of features '{', '.join(map(str, features))}'",
f"Bins : {len(quantile_list[0]) - 1} - Monte-Carlo : {mc_string}",
)
else:
fig.suptitle(title)
ax.set_xlabel(xlabel, va="center_baseline")
ax.set_ylabel(ylabel)
if "linestyle" not in grid_kwargs:
grid_kwargs["linestyle"] = "--"
if "alpha" not in grid_kwargs:
grid_kwargs["alpha"] = 0.4
if grid_kwargs:
ax.grid(**grid_kwargs)
return fig, ax, final_quantiles, quantile_list, ale_list, mc_data_list
| 5,340,550
|
def test_array_field_exact_no_match(Query):
"""
Test exact filter on a array field of string.
"""
schema = Schema(query=Query)
query = """
query {
events (tags: ["concert", "music"]) {
edges {
node {
name
}
}
}
}
"""
result = schema.execute(query)
assert not result.errors
assert result.data["events"]["edges"] == []
| 5,340,551
|
def load_ckpt(ckpt):
"""
:param ckpt: ckpt 目录或者 pb 文件
"""
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.per_process_gpu_memory_fraction = 0.2
if os.path.isdir(ckpt):
graph = tf.Graph()
with graph.as_default():
sess = tf.Session(config=config)
restore_ckpt(sess, os.path.abspath(ckpt))
elif os.path.isfile(ckpt) and ckpt.endswith('.pb'):
graph = load_graph(ckpt)
with graph.as_default():
sess = tf.Session(graph=graph, config=config)
else:
print("Load ckpt failed")
exit(-1)
return sess, graph
| 5,340,552
|
async def total_conversations(request: HistoryQuery = HistoryQuery(month=6),
collection: str = Depends(Authentication.authenticate_and_get_collection)):
"""Fetches the counts of conversations of the bot for previous months."""
range_value, message = HistoryProcessor.total_conversation_range(
collection, request.month
)
return {"data": range_value, "message": message}
| 5,340,553
|
def tiger2dot(tiger_filename, dot_filename):
"""
Genera un archivo en el formato DOT de Graphviz con el árbol de sintáxis
abstracta correspondiente a un programa Tiger.
Se utiliza la función auxiliar C{syntactic_analysis} para realizar el
análisis léxico-gráfico y sintáctico durante el cual se reportará cualquier
error en el programa Tiger. Luego, se utiliza la función auxiliar
C{generate_dot} para escribir el árbol de sintáxis abstracta en el
archivo DOT.
@type tiger_filename: C{str}
@param tiger_filename: Ruta absoluta al archivo que contiene el código
fuente del programa Tiger.
@type dot_filename: C{str}
@param dot_filename: Ruta absoluta al archivo donde se generará el archivo
DOT resultante. Si existe un archivo en la ruta especificada este será
sobreescrito.
@raise PyTiger2CError: Además de las excepciones lanzadas por cada una de las
funciones auxiliares, esta función puede lanzar esta excepción cuando
se produce algún error al leer del archivo que contiene el programa
Tiger que se quiere traducir o al escribir el árbol de sintáxis asbtracta
resultante en el archivo DOT especificado.
"""
try:
with codecs.open(tiger_filename, encoding='utf-8', mode='rb') as input_fd:
ast = syntactic_analysis(input_fd)
except IOError:
raise PyTiger2CError(message='Could not open the Tiger input file')
try:
with codecs.open(dot_filename, encoding='utf-8', mode='wb') as output_fd:
generate_dot(ast, output_fd)
except IOError:
raise PyTiger2CError(error_msg='Could not open the output file')
| 5,340,554
|
def update_app_trending():
"""
Update trending for all apps.
Spread these tasks out successively by 15 seconds so they don't hit
Monolith all at once.
"""
chunk_size = 50
seconds_between = 15
all_ids = list(Webapp.objects.filter(status=amo.STATUS_PUBLIC)
.values_list('id', flat=True))
countdown = 0
for ids in chunked(all_ids, chunk_size):
update_trending.delay(ids, countdown=countdown)
countdown += seconds_between
| 5,340,555
|
def log_pool_worker_start(metric_name, worker_name, data, args):
"""
Logging method for processing pool workers.
"""
logging.debug('{0} :: {1}\n'
'\tData = {2} rows,'
'\tArgs = {3},'
'\tPID = {4}'.format(metric_name, worker_name, len(data),
str(args), getpid()))
| 5,340,556
|
def restler_fuzzable_datetime(*args, **kwargs) :
""" datetime primitive
@param args: The argument with which the primitive is defined in the block
of the request to which it belongs to. This is a date-time
primitive and therefore the arguments will be added to the
existing candidate values for date-time mutations.
@type args: Tuple
@param kwargs: Optional keyword arguments.
@type kwargs: Dict
@return: A tuple of the primitive's name and its default value or its tag
both passed as arguments via the restler grammar.
@rtype : Tuple
"""
field_name = args[0]
quoted = False
if QUOTED_ARG in kwargs:
quoted = kwargs[QUOTED_ARG]
examples=[]
if EXAMPLES_ARG in kwargs:
examples = kwargs[EXAMPLES_ARG]
param_name = None
if PARAM_NAME_ARG in kwargs:
param_name = kwargs[PARAM_NAME_ARG]
writer_variable = None
if WRITER_VARIABLE_ARG in kwargs:
writer_variable = kwargs[WRITER_VARIABLE_ARG]
return sys._getframe().f_code.co_name, field_name, quoted, examples, param_name, writer_variable
| 5,340,557
|
def intersect(table_dfs, col_key):
""" intsc tables by column
"""
col_key_vals = list(unique_everseen(chain(*(
table_df[col_key] for table_df in table_dfs))))
lookup_dcts = [lookup_dictionary(table_df, col_key)
for table_df in table_dfs]
intscd_rows = []
for val in col_key_vals:
row = {}
if val and all(val in lookup_dct for lookup_dct in lookup_dcts):
for lookup_dct in lookup_dcts:
row.update(lookup_dct[val])
intscd_rows.append(row)
intscd_col_keys = list(unique_everseen(chain(*table_dfs)))
intscd_df = pandas.DataFrame.from_dict(intscd_rows)[intscd_col_keys]
return intscd_df
| 5,340,558
|
def p_class_def(p):
"""
class_def : CLASS TYPE INHERITS TYPE OCUR feature_list CCUR
| CLASS TYPE OCUR feature_list CCUR
"""
if len(p) == 8:
p[0] = ast.ClassDeclarationNode(p[2], p[6], p[4])
else:
p[0] = ast.ClassDeclarationNode(p[2], p[4])
p[0].set_pos(p.lineno(2), find_column(p.lexer.lexdata, p.lexpos(2)))
| 5,340,559
|
def ask_peer(peer_addr, req_type, body_dict, return_json=True):
"""
Makes request to peer, sending request_msg
:param peer_addr: (IP, port) of peer
:param req_type: type of request for request header
:param body_dict: dictionary of body
:param return_json: determines if json or string response should be returned
:return: string response of peer
"""
request_msg = create_request({"type": req_type}, body_dict)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as client:
client.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
client.settimeout(5)
try:
client.connect(peer_addr)
client.sendall(request_msg.encode())
data = client.recv(1024).decode()
except (socket.error, socket.timeout):
return None
if not data:
return None
return data if not return_json else json.loads(data)
| 5,340,560
|
def generate_invoices(based_on_date=None):
"""
Generates all invoices for the past month.
"""
today = based_on_date or datetime.date.today()
invoice_start, invoice_end = get_previous_month_date_range(today)
log_accounting_info("Starting up invoices for %(start)s - %(end)s" % {
'start': invoice_start.strftime(USER_DATE_FORMAT),
'end': invoice_end.strftime(USER_DATE_FORMAT),
})
all_domain_ids = [d['id'] for d in Domain.get_all(include_docs=False)]
for domain_doc in iter_docs(Domain.get_db(), all_domain_ids):
domain_obj = Domain.wrap(domain_doc)
if not domain_obj.is_active:
continue
try:
invoice_factory = DomainInvoiceFactory(invoice_start, invoice_end, domain_obj)
invoice_factory.create_invoices()
log_accounting_info("Sent invoices for domain %s" % domain_obj.name)
except CreditLineError as e:
log_accounting_error(
"There was an error utilizing credits for "
"domain %s: %s" % (domain_obj.name, e),
show_stack_trace=True,
)
except InvoiceError as e:
log_accounting_error(
"Could not create invoice for domain %s: %s" % (domain_obj.name, e),
show_stack_trace=True,
)
except Exception as e:
log_accounting_error(
"Error occurred while creating invoice for "
"domain %s: %s" % (domain_obj.name, e),
show_stack_trace=True,
)
all_customer_billing_accounts = BillingAccount.objects.filter(is_customer_billing_account=True)
for account in all_customer_billing_accounts:
try:
if account.invoicing_plan == InvoicingPlan.QUARTERLY:
customer_invoice_start = invoice_start - relativedelta(months=2)
elif account.invoicing_plan == InvoicingPlan.YEARLY:
customer_invoice_start = invoice_start - relativedelta(months=11)
else:
customer_invoice_start = invoice_start
invoice_factory = CustomerAccountInvoiceFactory(
account=account,
date_start=customer_invoice_start,
date_end=invoice_end
)
invoice_factory.create_invoice()
except CreditLineError as e:
log_accounting_error(
"There was an error utilizing credits for "
"domain %s: %s" % (domain_obj.name, e),
show_stack_trace=True,
)
except InvoiceError as e:
log_accounting_error(
"Could not create invoice for domain %s: %s" % (domain_obj.name, e),
show_stack_trace=True,
)
except Exception as e:
log_accounting_error(
"Error occurred while creating invoice for "
"domain %s: %s" % (domain_obj.name, e),
show_stack_trace=True,
)
if not settings.UNIT_TESTING:
_invoicing_complete_soft_assert(False, "Invoicing is complete!")
| 5,340,561
|
def describe_instances_header():
"""generate output header"""
return misc.format_line((
"Account",
"Region",
"VpcId",
"ec2Id",
"Type",
"State",
"ec2Name",
"PrivateIPAddress",
"PublicIPAddress",
"KeyPair"
))
| 5,340,562
|
def ordered_scaffold_split(dataset, lengths, chirality=True):
"""
Split a dataset into new datasets with non-overlapping scaffolds and sorted w.r.t. number of each scaffold.
Parameters:
dataset (Dataset): dataset to split
lengths (list of int): expected length for each split.
Note the results may be different in length due to rounding.
"""
frac_train, frac_valid, frac_test = 0.8, 0.1, 0.1
scaffold2id = defaultdict(list)
for idx, smiles in enumerate(dataset.smiles_list):
scaffold = MurckoScaffold.MurckoScaffoldSmiles(smiles=smiles, includeChirality=chirality)
scaffold2id[scaffold].append(idx)
scaffold2id = {key: sorted(value) for key, value in scaffold2id.items()}
scaffold_sets = [
scaffold_set for (scaffold, scaffold_set) in sorted(
scaffold2id.items(), key=lambda x: (len(x[1]), x[1][0]), reverse=True)
]
train_cutoff = frac_train * len(dataset)
valid_cutoff = (frac_train + frac_valid) * len(dataset)
train_idx, valid_idx, test_idx = [], [], []
for scaffold_set in scaffold_sets:
if len(train_idx) + len(scaffold_set) > train_cutoff:
if len(train_idx) + len(valid_idx) + len(scaffold_set) > valid_cutoff:
test_idx.extend(scaffold_set)
else:
valid_idx.extend(scaffold_set)
else:
train_idx.extend(scaffold_set)
return torch_data.Subset(dataset, train_idx), torch_data.Subset(dataset, valid_idx), torch_data.Subset(dataset, test_idx)
| 5,340,563
|
def read_plain_byte_array(file_obj, count):
"""Read `count` byte arrays using the plain encoding."""
return [file_obj.read(struct.unpack(b"<i", file_obj.read(4))[0]) for i in range(count)]
| 5,340,564
|
def test_product_create_without_model_permissions(api_client, user, list_url, product_data):
"""
Tests that a user without permissions cannot create a product.
"""
response = api_client.post(list_url, data=product_data)
assert response.status_code == 401
api_client.force_authenticate(user=user)
response = api_client.post(list_url, data=product_data)
assert response.status_code == 403
| 5,340,565
|
def find_django_migrations_module(module_name):
""" Tries to locate <module_name>.migrations_django (without actually importing it).
Appends either ".migrations_django" or ".migrations" to module_name.
For details why:
https://docs.djangoproject.com/en/1.7/topics/migrations/#libraries-third-party-apps
"""
import imp
try:
module_info = imp.find_module(module_name)
module = imp.load_module(module_name, *module_info)
imp.find_module('migrations_django', module.__path__)
return module_name + '.migrations_django'
except ImportError:
return module_name + '.migrations'
| 5,340,566
|
def test_io_import_bom_rf3_shape():
"""Test the importer Bom RF3."""
root_path = pysteps.rcparams.data_sources["bom"]["root_path"]
rel_path = os.path.join("prcp-cscn", "2", "2018", "06", "16")
filename = os.path.join(root_path, rel_path, "2_20180616_100000.prcp-cscn.nc")
precip, _, _ = pysteps.io.import_bom_rf3(filename)
assert precip.shape == (512, 512)
| 5,340,567
|
def big_number(int_in):
"""Converts a potentially big number into a lisible string.
Example:
- big_number(10000000) returns '10 000 000'.
"""
s = str(int_in)
position = len(s)
counter = 0
out = ''
while position != 0:
counter += 1
position -= 1
out = s[position] + out
if counter % 3 == 0 and position != 0:
out = " " + out
return (out)
| 5,340,568
|
def setup(app): # lint-amnesty, pylint: disable=redefined-outer-name
"""Sphinx extension: run sphinx-apidoc."""
event = 'builder-inited'
app.connect(event, on_init)
| 5,340,569
|
def action_list_to_string(action_list):
"""Util function for turning an action list into pretty string"""
action_list_string = ""
for idx, action in enumerate(action_list):
action_list_string += f"{action['name']} ({action['action']['class_name']})"
if idx == len(action_list) - 1:
continue
action_list_string += " => "
return action_list_string
| 5,340,570
|
def select_demands_matrix(vrp_params, name=None):
"""
Selects a demands matrix subject to being loaded, for specified VRP.
This is an interactive function, where the name of
the text file is requested.
:param vrp_params: VRP parameters.
:param name: Data file name.
"""
if name is None:
matrix_name = input("Target Node Demands Matrix Name > ")
elif name == "None":
vrp_params.cvrp_node_demand = None
vrp_params.node_demands_name = name
return
else:
matrix_name = name
temp_data = load_data(matrix_name, "node_demands")
if temp_data is not None:
vrp_params.cvrp_node_demand = temp_data
vrp_params.node_demands_name = matrix_name
| 5,340,571
|
def segment(img_fpath, bbox_, new_size=None):
""" Runs grabcut """
printDBG('[segm] segment(img_fpath=%r, bbox=%r)>' % (img_fpath, bbox_))
num_iters = 5
bgd_model = np.zeros((1, 13 * 5), np.float64)
fgd_model = np.zeros((1, 13 * 5), np.float64)
mode = cv2.GC_INIT_WITH_MASK
# Initialize
# !!! CV2 READS (H,W) !!!
# WH Unsafe
img_resz, bbox_resz = resize_img_and_bbox(img_fpath, bbox_, new_size=new_size)
# WH Unsafe
(img_h, img_w) = img_resz.shape[:2] # Image Shape
printDBG(' * img_resz.shape=%r' % ((img_h, img_w),))
# WH Safe
tlbr = ut.xywh_to_tlbr(bbox_resz, (img_w, img_h)) # Rectangle ANNOTATION
(x1, y1, x2, y2) = tlbr
rect = tuple(bbox_resz) # Initialize: rect
printDBG(' * rect=%r' % (rect,))
printDBG(' * tlbr=%r' % (tlbr,))
# WH Unsafe
_mask = np.zeros((img_h, img_w), dtype=np.uint8) # Initialize: mask
_mask[y1:y2, x1:x2] = cv2.GC_PR_FGD # Set ANNOTATION to cv2.GC_PR_FGD
# Grab Cut
tt = ut.Timer(' * cv2.grabCut()', verbose=DEBUG_SEGM)
cv2.grabCut(img_resz, _mask, rect, bgd_model, fgd_model, num_iters, mode=mode)
tt.toc()
img_mask = np.where((_mask == cv2.GC_FGD) + (_mask == cv2.GC_PR_FGD), 255, 0).astype('uint8')
# Crop
chip = img_resz[y1:y2, x1:x2]
chip_mask = img_mask[y1:y2, x1:x2]
chip_mask = clean_mask(chip_mask)
chip_mask = np.array(chip_mask, np.float) / 255.0
# Mask the value of HSV
chip_hsv = cv2.cvtColor(chip, cv2.COLOR_RGB2HSV)
chip_hsv = np.array(chip_hsv, dtype=np.float) / 255.0
chip_hsv[:, :, 2] *= chip_mask
chip_hsv = np.array(np.round(chip_hsv * 255.0), dtype=np.uint8)
seg_chip = cv2.cvtColor(chip_hsv, cv2.COLOR_HSV2RGB)
return seg_chip, img_mask
| 5,340,572
|
def initialize_db():
"""Ensure that the database has the CURRENT_KEY key and exists"""
db = sqlite3.connect(DATABASE)
cur = db.cursor()
cur.execute(
"""CREATE TABLE IF NOT EXISTS survey_results (id INTEGER PRIMARY KEY AUTOINCREMENT, data BLOB);""")
cur.close()
db.close()
| 5,340,573
|
def test_pandigital_9(*args):
"""
Test if args together contain the digits 1 through 9 uniquely
"""
digits = set()
digit_count = 0
for a in args:
while a > 0:
digits.add(a % 10)
digit_count += 1
a //= 10
return digit_count == 9 and len(digits) == 9 and 0 not in digits
| 5,340,574
|
def WMT14(
root,
split,
language_pair=("de", "en"),
train_set="train.tok.clean.bpe.32000",
valid_set="newstest2013.tok.bpe.32000",
test_set="newstest2014.tok.bpe.32000",
):
"""WMT14 Dataset
The available datasets include following:
**Language pairs**:
+-----+-----+-----+
| |'en' |'de' |
+-----+-----+-----+
|'en' | | x |
+-----+-----+-----+
|'de' | x | |
+-----+-----+-----+
Args:
root: Directory where the datasets are saved. Default: ".data"
split: split or splits to be returned. Can be a string or tuple of strings. Default: (‘train’, ‘valid’, ‘test’)
language_pair: tuple or list containing src and tgt language
train_set: A string to identify train set.
valid_set: A string to identify validation set.
test_set: A string to identify test set.
Examples:
>>> from torchtext.datasets import WMT14
>>> train_iter, valid_iter, test_iter = WMT14()
>>> src_sentence, tgt_sentence = next(train_iter)
"""
supported_language = ["en", "de"]
supported_train_set = [s for s in NUM_LINES if "train" in s]
supported_valid_set = [s for s in NUM_LINES if "test" in s]
supported_test_set = [s for s in NUM_LINES if "test" in s]
assert len(language_pair) == 2, "language_pair must contain only 2 elements: src and tgt language respectively"
if language_pair[0] not in supported_language:
raise ValueError(
"Source language '{}' is not supported. Valid options are {}".format(language_pair[0], supported_language)
)
if language_pair[1] not in supported_language:
raise ValueError(
"Target language '{}' is not supported. Valid options are {}".format(language_pair[1], supported_language)
)
if train_set not in supported_train_set:
raise ValueError(
"'{}' is not a valid train set identifier. valid options are {}".format(train_set, supported_train_set)
)
if valid_set not in supported_valid_set:
raise ValueError(
"'{}' is not a valid valid set identifier. valid options are {}".format(valid_set, supported_valid_set)
)
if test_set not in supported_test_set:
raise ValueError(
"'{}' is not a valid valid set identifier. valid options are {}".format(test_set, supported_test_set)
)
train_filenames = "{}.{}".format(train_set, language_pair[0]), "{}.{}".format(train_set, language_pair[1])
valid_filenames = "{}.{}".format(valid_set, language_pair[0]), "{}.{}".format(valid_set, language_pair[1])
test_filenames = "{}.{}".format(test_set, language_pair[0]), "{}.{}".format(test_set, language_pair[1])
if split == "train":
src_file, tgt_file = train_filenames
elif split == "valid":
src_file, tgt_file = valid_filenames
else:
src_file, tgt_file = test_filenames
dataset_tar = download_from_url(URL, root=root, hash_value=MD5, path=os.path.join(root, _PATH), hash_type="md5")
extracted_files = extract_archive(dataset_tar)
data_filenames = {
split: _construct_filepaths(extracted_files, src_file, tgt_file),
}
for key in data_filenames:
if len(data_filenames[key]) == 0 or data_filenames[key] is None:
raise FileNotFoundError("Files are not found for data type {}".format(key))
assert data_filenames[split][0] is not None, "Internal Error: File not found for reading"
assert data_filenames[split][1] is not None, "Internal Error: File not found for reading"
src_data_iter = _read_text_iterator(data_filenames[split][0])
tgt_data_iter = _read_text_iterator(data_filenames[split][1])
def _iter(src_data_iter, tgt_data_iter):
for item in zip(src_data_iter, tgt_data_iter):
yield item
return _RawTextIterableDataset(
DATASET_NAME, NUM_LINES[os.path.splitext(src_file)[0]], _iter(src_data_iter, tgt_data_iter)
)
| 5,340,575
|
def volume_attached(context, volume_id, instance_id, mountpoint):
"""Ensure that a volume is set as attached."""
return IMPL.volume_attached(context, volume_id, instance_id, mountpoint)
| 5,340,576
|
def findby1email(session, email):
"""<comment-ja>
メールアドレスを指定して1件のユーザ情報を取得します
@param session: Session
@type session: sqlalchemy.orm.session.Session
@param email: メールアドレス
@type email: str
@return: karesansui.db.model.user.User
</comment-ja>
<comment-en>
TODO: English Comment
</comment-en>
"""
return session.query(User).filter(User.email == email).first()
| 5,340,577
|
def _onJobSave(event):
"""
If a job is finalized (i.e. success or failure status) and contains
a temp token, we remove the token.
"""
params = event.info['params']
job = event.info['job']
if 'itemTaskTempToken' in job and params['status'] in (JobStatus.ERROR, JobStatus.SUCCESS):
token = ModelImporter.model('token').load(
job['itemTaskTempToken'], objectId=False, force=True)
if token:
ModelImporter.model('token').remove(token)
# Remove the itemTaskTempToken field from the job
ModelImporter.model('job', 'jobs').update({'_id': job['_id']}, update={
'$unset': {'itemTaskTempToken': True}
}, multi=False)
del job['itemTaskTempToken']
| 5,340,578
|
def _startswith(
self: str | ir.StringValue, start: str | ir.StringValue
) -> ir.BooleanValue:
"""Determine whether `self` starts with `end`.
Parameters
----------
self
String expression
start
prefix to check for
Examples
--------
>>> import ibis
>>> text = ibis.literal('Ibis project')
>>> text.startswith('Ibis')
Returns
-------
BooleanValue
Boolean indicating whether `self` starts with `start`
"""
return ops.StartsWith(self, start).to_expr()
| 5,340,579
|
def convert_fields_in_iterator(iterator, fields):
"""Rename field names and eventually add fields with decoded values as defined in the fields structure."""
encoding_map, encoding_field_names = encoding(fields)
yield from add_decoded_fields_in_iterator(
rename_fields_in_iterator(iterator, fields), encoding_map, encoding_field_names
)
| 5,340,580
|
def get_models(modelType=None, modelId=None, nextToken=None, maxResults=None):
"""
Gets all of the models for the AWS account, or the specified model type, or gets a single model for the specified model type, model ID combination.
See also: AWS API Documentation
Exceptions
:example: response = client.get_models(
modelType='ONLINE_FRAUD_INSIGHTS',
modelId='string',
nextToken='string',
maxResults=123
)
:type modelType: string
:param modelType: The model type.
:type modelId: string
:param modelId: The model ID.
:type nextToken: string
:param nextToken: The next token for the request.
:type maxResults: integer
:param maxResults: The maximum results to return for the request.
:rtype: dict
ReturnsResponse Syntax
{
'nextToken': 'string',
'models': [
{
'modelId': 'string',
'modelType': 'ONLINE_FRAUD_INSIGHTS',
'description': 'string',
'trainingDataSource': {
'dataLocation': 'string',
'dataAccessRoleArn': 'string'
},
'modelVariables': [
{
'name': 'string',
'index': 123
},
],
'labelSchema': {
'labelKey': 'string',
'labelMapper': {
'string': [
'string',
]
}
},
'lastUpdatedTime': 'string',
'createdTime': 'string'
},
]
}
Response Structure
(dict) --
nextToken (string) --
The next token for subsequent requests.
models (list) --
The returned models.
(dict) --
The model.
modelId (string) --
The model ID.
modelType (string) --
The model type.
description (string) --
The model description.
trainingDataSource (dict) --
The model training data source in Amazon S3.
dataLocation (string) --
The data location of the training data source.
dataAccessRoleArn (string) --
The data access role ARN for the training data source.
modelVariables (list) --
The model input variables.
(dict) --
The model variable.>
name (string) --
The model variable\'s name.>
index (integer) --
The model variable\'s index.>
labelSchema (dict) --
The model label schema.
labelKey (string) --
The label key.
labelMapper (dict) --
The label mapper maps the Amazon Fraud Detector supported label to the appropriate source labels. For example, if "FRAUD" and "LEGIT" are Amazon Fraud Detector supported labels, this mapper could be: {"FRAUD" => ["0"] , "LEGIT" => ["1"]} or {"FRAUD" => ["false"], "LEGIT" => ["true"]} or {"FRAUD" => ["fraud", "abuse"], "LEGIT" => ["legit", "safe"]} . The value part of the mapper is a list, because you may have multiple variants for a single Amazon Fraud Detector label.
(string) --
(list) --
(string) --
lastUpdatedTime (string) --
Timestamp of last time the model was updated.
createdTime (string) --
Timestamp of when the model was created.
Exceptions
FraudDetector.Client.exceptions.ValidationException
FraudDetector.Client.exceptions.ResourceNotFoundException
FraudDetector.Client.exceptions.InternalServerException
FraudDetector.Client.exceptions.ThrottlingException
:return: {
'nextToken': 'string',
'models': [
{
'modelId': 'string',
'modelType': 'ONLINE_FRAUD_INSIGHTS',
'description': 'string',
'trainingDataSource': {
'dataLocation': 'string',
'dataAccessRoleArn': 'string'
},
'modelVariables': [
{
'name': 'string',
'index': 123
},
],
'labelSchema': {
'labelKey': 'string',
'labelMapper': {
'string': [
'string',
]
}
},
'lastUpdatedTime': 'string',
'createdTime': 'string'
},
]
}
:returns:
(string) --
(list) --
(string) --
"""
pass
| 5,340,581
|
def read_XMLs(input_path):
"""Reads the building XMLs to list of `BuildingInfo` objects
Parameters
----------
input_path : str
Path where the XMLs are located
Returns
-------
info_list: list
A list of `BuildingInfo` objects with information about each building
"""
info_list = []
for file in os.listdir(input_path):
if file.endswith(".xml"):
print(file)
this_building = BuildingInfo()
this_XML = open(os.path.join(input_path, file), 'r')
tree = ET.parse(this_XML)
root = tree.getroot()
info = root.find('Allgemein')
this_building.year_of_construction = int(info.find('Baujahr').text)
usage_type = info.find('Gebaeudetyp').text
if usage_type == 'Buerogebaeude':
this_building.usage_type = 'office'
elif usage_type == 'Wohngebaeude':
this_building.usage_type = 'single_family_dwelling'
elif usage_type == 'Institut Allgemein':
this_building.usage_type = 'institute'
elif usage_type == 'Institut 4':
this_building.usage_type = 'institute4'
elif usage_type == 'Institut 8':
this_building.usage_type = 'institute8'
this_building.building_number = info.find('Gebaeude').text
this_building.floors = int(info.find('Geschosszahl').text)
this_building.area = float(info.find('Nettoflaeche').text)
this_building.weight = 'light'
this_building.height_of_floors = float(info.find(
'Geschosshoehe').text)
this_building.office_layout = 0
this_XML.close()
info_list.append(this_building)
return info_list
| 5,340,582
|
def _get_all_scopes(blocks):
"""Get all block-local scopes from an IR.
"""
all_scopes = []
for label, block in blocks.items():
if not (block.scope in all_scopes):
all_scopes.append(block.scope)
return all_scopes
| 5,340,583
|
def get_equinox_type(date):
"""Returns a string representing the type of equinox based on what month
the equinox occurs on. It is assumed the date being passed has been
confirmed to be a equinox.
Keyword arguments:
date -- a YYYY-MM-DD string.
"""
month = datetime.strptime(date, '%Y-%m-%d').month
if month == 3:
return 'march'
elif month == 9:
return 'september'
else:
return None
| 5,340,584
|
def skip_test_module_over_backend_topologies(request, tbinfo):
"""Skip testcases in the test module if the topo is storage backend."""
if "backend" in tbinfo["topo"]["name"]:
module_filename = request.module.__name__.split(".")[-1]
pytest.skip("Skip %s. Unsupported topology %s." % (module_filename, tbinfo["topo"]["name"]))
| 5,340,585
|
def mcs_worker(k, mols, n_atms):
"""Get per-molecule MCS distance vector."""
dists_k = []
n_incomp = 0 # Number of searches terminated before timeout
for l in range(k + 1, len(mols)):
# Set timeout to halt exhaustive search, which could take minutes
result = FindMCS([mols[k], mols[l]], completeRingsOnly=True,
ringMatchesRingOnly=True, timeout=10)
dists_k.append(1. - result.numAtoms /
((n_atms[k] + n_atms[l]) / 2))
if result.canceled:
n_incomp += 1
return np.array(dists_k), n_incomp
| 5,340,586
|
def rows_as_dicts(cur):
"""returns rows as dictionaries in a generator when
provided cx_Oracle Cursor"""
columns = column_names_from_cursor(cur)
for row in cur:
yield dict(zip(columns, row))
| 5,340,587
|
def test_html_blacklist_label():
"""The label element defines the label for another element."""
check_html_has_no_output("""
<label><input type=radio name=size/>Large</label>
""")
| 5,340,588
|
def get_tally_sort_key(code, status):
"""
Get a tally sort key
The sort key can be used to sort candidates and other tabulation
categories, for example the status and tally collections returned by
rcv.Tabulation().tabulate().
The sort codes will sort candidates before other tabulation
categories; elected candidates before defeated candidates; elected
candidates by increasing round of election, then by decreasing votes;
defeated candidates by decreasing round of election, then by
decreasing votes; any remaining ties are broken by the sort order of
candidate names and labels for other tabulation categories.
Arguments
=========
code
A string representing a candidate name or label of another
tabulation category.
status
A dictionary of tabulation result statuses, as given by the second
item of the return value from rcv.Tabulation().tabulate().
Returns
=======
A sort key in the form of a tuple of integers and/or strings.
"""
sort_key = tuple([9, code])
if code in status:
nbr_round = status[code].nbr_round
votes = status[code].votes
if status[code].status == 'elected':
sort_key = (1, 1, nbr_round, -votes, code)
else:
sort_key = (1, 2, -nbr_round, -votes, code)
else:
sort_key = (2, code)
# print('code =', code, ' sort_key =', sort_key)
return sort_key
| 5,340,589
|
def plot_corelevel_spectra(coreleveldict,
natom_typesdict,
exp_references=None,
scale_to=-1,
show_single=True,
show_ref=True,
energy_range=None,
title='',
fwhm_g=0.6,
fwhm_l=0.1,
energy_grid=0.2,
peakfunction='voigt',
linestyle_spec='-',
marker_spec='o',
color_spec='k',
color_single='g',
xlabel='Binding energy [eV]',
ylabel='Intensity [arb] (natoms*nelectrons)',
saveas=None,
xspec=None,
alpha_l=1.0,
beta_l=1.0,
**kwargs):
"""
Plotting function of corelevel in the form of a spectrum.
Convention: Binding energies are positiv!
Args:
coreleveldict: dict of corelevels with a list of corelevel energy of atomstypes
# (The given corelevel accounts for a weight (number of electrons for full occupied corelevel) in the plot.)
natom_typesdict: dict with number of atom types for each entry
Kwargs:
exp_references: dict with experimental refereces, will be ploted as vertical lines
show_single (bool): plot all single peaks.
scale_to float: the maximum 'intensity' will be scaled to this value (useful for experimental comparisons)
title (string): something for labeling
fwhm (float): full width half maximum of peaks (gaus, lorentz or voigt_profile)
energy_grid (float): energy resolution
linetyp_spec : linetype for spectrum
peakfunction (string): what the peakfunction should be {'voigt', 'pseudo-voigt', 'lorentz', 'gaus'}
example:
coreleveldict = {u'Be': {'1s1/2' : [-1.0220669053033051, -0.3185614920138805,-0.7924091040092139]}}
n_atom_types_Be12Ti = {'Be' : [4,4,4]}
"""
#show_compound=True, , compound_info={} compound_info dict: dict that can be used to specify what component should be shown together compound_info = {'Be12Ti' : {'Be' : 4, 'Ti' : 1}, 'BeTi' : {'Be' : 1, 'Ti' : 1}}
# TODO feature to make singles of different compounds a different color
if energy_range is None:
energy_range = (None, None)
if exp_references is None:
exp_references = {}
[xdata_spec, ydata_spec, ydata_single_all, xdata_all, ydata_all,
xdatalabel] = construct_corelevel_spectrum(coreleveldict,
natom_typesdict,
exp_references=exp_references,
scale_to=scale_to,
fwhm_g=fwhm_g,
fwhm_l=fwhm_l,
energy_range=energy_range,
xspec=xspec,
energy_grid=energy_grid,
peakfunction=peakfunction,
alpha_l=alpha_l,
beta_l=beta_l)
xmin = min(xdata_all) - 2 #0.5
xmax = max(xdata_all) + 2 #0.5
if energy_range[0]:
xmin = energy_range[0]
if energy_range[1]:
xmax = energy_range[1]
xdata = xdata_all
ydata = ydata_all
ymax2 = max(ydata_spec) + 1
ymin = -0.3
ymax = max(ydata) + 1
limits = {'x': (xmin, xmax), 'y': (ymin, ymax)}
limits_spec = {'x': (xmin, xmax), 'y': (ymin, ymax2)}
#title = title #'Spectrum of {}'.format(compound)
"""
# ToDo redesign to use multiple_scatterplot
axis = multiple_scatterplots(ydata, xdata, xlabel, ylabel, title, plot_labels,
linestyle='', marker='o', markersize=markersize_g, legend=legend_g,
legend_option={}, saveas='mscatterplot',
limits=limits, scale=[None, None],
axis=None, xerr=None, yerr=None, colors=[], linewidth=[], xticks=[], title=title, xlabel=xlabel, ylabel=ylabel, **kwargs)
"""
#print len(xdata), len(ydata)
if 'plot_label' not in kwargs:
kwargs['plot_label'] = 'corelevel shifts'
if 'linestyle' not in kwargs:
kwargs['linestyle'] = ''
if saveas is None:
saveas = f'XPS_theo_{fwhm_g}_{title}'
saveas1 = f'XPS_theo_2_{fwhm_g}_{title}'
else:
saveas1 = saveas[1]
saveas = saveas[0]
####################################
##### PLOT 1, plot raw datapoints
if not plot_params['show']:
return [xdata_spec, ydata_spec, ydata_single_all, xdata_all, ydata_all, xdatalabel]
states = []
if show_ref and exp_references:
for elm, ref_list_dict in exp_references.items():
for state, ref_list in ref_list_dict.items():
states.extend(ref_list)
ax = single_scatterplot(xdata_all,
ydata_all,
xlabel=xlabel,
ylabel=ylabel,
title=title,
line_options={
'color': 'k',
'linestyle': '-',
'linewidth': 2
},
lines={'vertical': {
'pos': states,
'ymin': 0,
'ymax': 0.1
}},
limits=limits,
saveas=saveas,
**kwargs)
''' TODO
for j,y in enumerate(ydata_all):
for i,x in enumerate(xdata):
lenx = xmax-xmin
length = 0.5/lenx
offset = 0.5/lenx
xminline = x/lenx + offset - length/2
xmaxline = x/lenx + offset + length/2
plt.axhline(y=y[i], xmin=xminline, xmax=xmaxline, linewidth=2, color='k')
text = r'{}'.format(y[i])
plt.text(x-0.25, y[i]+0.3, text, fontdict=font)
'''
##############################################################
##### PLOT 2, plot spectra, voigts around datapoints #########
kwargs.pop('linestyle', None)
kwargs.pop('marker', None)
kwargs.pop('color', None)
kwargs.pop('save', None)
kwargs.pop('save_plots', None)
ax2 = single_scatterplot(xdata_spec,
ydata_spec,
xlabel=xlabel,
ylabel=ylabel,
title=title,
marker=marker_spec,
linestyle=linestyle_spec,
color=color_spec,
line_options={
'color': 'k',
'linestyle': '-',
'linewidth': 2
},
lines={'vertical': {
'pos': states,
'ymin': 0,
'ymax': 0.1
}},
show=False,
save_plots=False,
limits=limits_spec,
**kwargs)
if show_single:
ax2 = multiple_scatterplots([xdata_spec] * len(ydata_single_all),
ydata_single_all,
xlabel=xlabel,
ylabel=ylabel,
title=title,
show=False,
save_plots=False,
axis=ax2,
linestyle='-',
color=color_single,
limits=limits_spec,
**kwargs)
'''TODO
if show_compound and compound_info:
for i,compound_data in enumerate(ydata_compound):
plotlabel = compound_plot_label[i]
plt.plot(xdata_spec, compound_data, '-', label=plotlabel, color = color,
linewidth=linewidth_g1, markersize = markersize_g)
'''
plot_params.save_plot(saveas1)
# for plotting or file writting
return [xdata_spec, ydata_spec, ydata_single_all, xdata_all, ydata_all, xdatalabel, ax, ax2]
| 5,340,590
|
def get_lidar_point_cloud(sample_name, frame_calib, velo_dir, intensity=False):
"""Gets the lidar point cloud in cam0 frame.
Args:
sample_name: Sample name
frame_calib: FrameCalib
velo_dir: Velodyne directory
Returns:
(3, N) point_cloud in the form [[x,...][y,...][z,...]]
"""
xyzi = read_lidar(velo_dir, sample_name)
# Calculate the point cloud
points_in_lidar_frame = xyzi[:, 0:3]
points = calib_utils.lidar_to_cam_frame(points_in_lidar_frame, frame_calib)
if intensity:
return points.T, xyzi[:, 3]
return points.T
| 5,340,591
|
def url2filename(url):
"""Return basename corresponding to url.
>>> print(url2filename('http://example.com/path/to/file%C3%80?opt=1'))
file??
>>> print(url2filename('http://example.com/slash%2fname')) # '/' in name
Traceback (most recent call last):
...
ValueError
"""
urlpath = urlsplit(url).path
basename = posixpath.basename(unquote(urlpath))
if (os.path.basename(basename) != basename or
unquote(posixpath.basename(urlpath)) != basename):
raise ValueError # reject '%2f' or 'dir%5Cbasename.ext' on Windows
return basename
| 5,340,592
|
def test_Timer():
"""Unit tests for Timer class"""
with Timer(verbose=True):
sleep(0.1)
logging.info('<< PASS : test_Timer >>')
| 5,340,593
|
def make_mlp(dim_list, activation_list, batch_norm=False, dropout=0):
"""
Generates MLP network:
Parameters
----------
dim_list : list, list of number for each layer
activation_list : list, list containing activation function for each layer
batch_norm : boolean, use batchnorm at each layer, default: False
dropout : float [0, 1], dropout probability applied on each layer (except last layer)
Returns
-------
nn.Sequential with layers
"""
layers = []
index = 0
for dim_in, dim_out in zip(dim_list[:-1], dim_list[1:]):
activation = activation_list[index]
layers.append(nn.Linear(dim_in, dim_out))
if batch_norm:
layers.append(nn.BatchNorm1d(dim_out))
if activation == 'relu':
layers.append(nn.ReLU())
elif activation == 'tanh':
layers.append(nn.Tanh())
elif activation == 'leakyrelu':
layers.append(nn.LeakyReLU())
elif activation == 'sigmoid':
layers.append(nn.Sigmoid())
if dropout > 0 and index < len(dim_list) - 2:
layers.append(nn.Dropout(p=dropout))
index += 1
return nn.Sequential(*layers)
| 5,340,594
|
def pddobj_video_file_path(instance, filename):
"""Generate file path for new video file"""
ext = filename.split('.')[-1]
filename = f'{uuid.uuid4()}.{ext}'
return os.path.join('uploads/videos/', filename)
| 5,340,595
|
def browse_files():
"""
# Function for opening the file explorer window
:return:
"""
filename = filedialog.askopenfilename(
initialdir='/',
title='Selecionar arquivo zipado (.zip)',
filetypes=(('Zip files', '*.zip'), ('All files', '*.*'))
)
# Change label contents
label_7.configure(text='File Opened: {}'.format(filename))
| 5,340,596
|
def load_and_prep_image(filename):
"""
Reads an image from filename, turns it into a tensor
and reshapes it to (img_shape, img_shape, colour_channel).
"""
image = cv2.imread(filename)
# gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
face_cascade = cv2.CascadeClassifier(haarcascade)
faces = face_cascade.detectMultiScale(image, scaleFactor=1.3, minNeighbors=5)
for (x, y, w, h) in faces:
cv2.rectangle(image, (x, y), (x + w, y + h), (255, 0, 0), 2)
face = image[y:y + h, x:x + w]
gray = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)
cropped_img = np.expand_dims(np.expand_dims(cv2.resize(gray, (48, 48)), -1), 0)
return cropped_img
| 5,340,597
|
def DoH(im, canvas, max_sigma=30, threshold=0.1, display=True):
""" Difference of Hessian blob detector
:param im: grayscale image
:param max_sigma: maximum sigma of Gaussian kernel
:param threshold: absolute lower bound Local maxima smaller than threshold ignore
"""
blobs = blob_doh(im, max_sigma=30, threshold=.1)
for blob in blobs:
y, x, r = blob
cv2.circle(canvas, (int(x), int(y)), int(r), (0, 0, 255), 2)
if display:
cv2.imshow('Difference of Hessian', canvas)
cv2.waitKey(0)
return blobs
| 5,340,598
|
def setup_communityservice(ts3bot):
"""
Setup the a community bot.
:return:
"""
global bot
global ts3conn
bot = ts3bot
ts3conn = bot.ts3conn
global channel_config
global channels_configured
channel_config = {int(k):v for k,v in config["channel_config"].items()}
channels_configured = list(channel_config.keys())
name = config["name"]
global bots_home
bots_home = config["botshome"]
| 5,340,599
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.