content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def S_id(v):
"""Fingerprints a potential value to a string identifier."""
return 'S{:07d}'.format(int(round(-v * 1e5))) | c25e13843d2c7654acaad9ab30a2401f448e76f9 | 93,333 |
import hmac
import hashlib
def calculate_answer(nonce, password_hash):
"""
Calculate Answer on Server
-> Answer = HMAC(nonce, password_hash, sha256)
:param nonce:
:param password:
:return: answer
"""
answer = hmac.new(key=password_hash.encode(), msg=nonce.encode(), digestmod=hashlib.sha256)
return answer.hexdigest() | b056360c1d6bb18258d728f488f451af73360fc6 | 93,335 |
def get_nodes_from_xpath(xpath, nodes):
"""If the selector is longer than 0 chars, then return the children
of nodes that match xpath. Otherwise, return all the nodes.
:param str xpath: The xpath to match.
:param etree nodes: LXML etree object of nodes to search.
:return list: The matched nodes, as ElementStringResult objects.
"""
if len(xpath.strip()) == 0 or nodes in nodes.xpath("../" + xpath):
return [nodes]
return nodes.xpath(xpath) | 027db6fcd97299cf594043d30f8ce24000175d69 | 93,338 |
def gtk_menu_get_item_by_label(menu, label):
"""
Retrieve a menu item from a menu by it's label. If more than one items share
the same label, only the first is returned.
:param menu: The menu to search for the item in.
:type menu: :py:class:`Gtk.Menu`
:param str label: The label to search for in *menu*.
:return: The identified menu item if it could be found, otherwise None is returned.
:rtype: :py:class:`Gtk.MenuItem`
"""
for item in menu:
if item.get_label() == label:
return item | 753767656c0ca35a48fbb6a255bebc03616cdf60 | 93,339 |
import re
def extract_id(url):
"""Extract the tournament id of the tournament from its name or URL."""
match = re.search(r'(\w+)?\.?challonge.com/([^/]+)', url)
if match is None or match.group(2) is None:
raise ValueError(f'Invalid Challonge URL: {url}')
subdomain, tourney = match.groups()
if subdomain is None:
return tourney
return f'{subdomain}-{tourney}' | c16f28fc114b5439800713e02a06243cbd3b67d8 | 93,340 |
def degrees_to_angle(value):
"""1 degree = 60000 angles"""
return int(round(value * 60000)) | 762da68859b790a679363737ea98c859f9bd1bd6 | 93,344 |
def read_list(file_path: str):
"""Reads in a \n separated file of things into a list.
Args:
file_path: Path to file.
Returns: List
"""
with open(file_path, 'r') as fh:
return [s.strip() for s in fh.readlines()] | eeacd59d6aca9a7736a52668a56c00aaa2fcb1d2 | 93,346 |
def mouv(m_):
"""
Converts SINGLE movement from human comprehensive to boolean and str
Parameters
----------
m_ : str
The movement <F B L R U D> [' 2]
Returns
-------
f : str
The face to move <F B L R U D>
cw : boolean
True = rotates clockwise
r180 : boolean
True = rotates twice
"""
# Creating returned vals
f, cw, r180 = "", True, False
mli = list(m_)
# check if len m is valid
if len(mli) > 2 or len(mli) == 0:
return None
# add face except if invalid
if mli[0] in "FBLRUD":
f = mli[0]
else:
return None
# modify r180 and cw except if invalid
if len(mli) == 2:
cw = False
if mli[1] == "'":
r180 = False
elif mli [1] == "2":
r180 = True
else:
return None
return (f, cw, r180) | 4978cb5b1641e773674d2dab7a63797a1818be40 | 93,348 |
def seconds_to_minutes(seconds):
"""
Convert seconds to minutes and seconds.
Parameters:
-----------
seconds : float
Returns:
--------
(float, float)
Minutes and seconds left as a tuple.
"""
minutes = seconds // 60
seconds = seconds % 60
return minutes, seconds | 98d6b6508ebdb7383b268ee381fc455e5864ace7 | 93,351 |
import torch
def abs(input, *args, **kwargs):
"""
Computes the absolute value of each element in ``input``.
Examples::
>>> import torch
>>> import treetensor.torch as ttorch
>>> ttorch.abs(ttorch.tensor([12, 0, -3]))
tensor([12, 0, 3])
>>> ttorch.abs(ttorch.tensor({
... 'a': [12, 0, -3],
... 'b': {'x': [[-3, 1], [0, -2]]},
... }))
<Tensor 0x7f1c81d78ee0>
├── a --> tensor([12, 0, 3])
└── b --> <Tensor 0x7f1c81d78d90>
└── x --> tensor([[3, 1],
[0, 2]])
"""
return torch.abs(input, *args, **kwargs) | 6a8be70c27951af7af6d5d99ccd6af0817fffd59 | 93,354 |
from pathlib import Path
def check_file_exist(file_path: Path) -> bool:
""" checks if the file behind the path exists """
return Path(file_path).is_file() | ca132fd51cf5a4f53d0efde27daede02b3ce95fa | 93,357 |
def _le_to_uint(val):
"""Returns the unsigned integer represented by the given byte array in little-endian format.
Args:
val: Byte array in little-endian format that represents an unsigned integer.
Returns:
The unsigned integer represented by the byte array ``val``.
"""
return int.from_bytes(val, byteorder='little') | 63672a183492d823c02a547d500b8d426dc2b5b1 | 93,358 |
def xor(s1, s2):
"""
Exclusive-Or of two byte arrays
Args:
s1 (bytes): first set of bytes
s2 (bytes): second set of bytes
Returns:
(bytes) s1 ^ s2
Raises:
ValueError if s1 and s2 lengths don't match
"""
if len(s1) != len(s2):
raise ValueError('Input not equal length: %d %d' % (len(s1), len(s2)))
return bytes(a ^ b for a, b in zip(s1, s2)) | dd07456e4db3af60c06df09ddb88a5a9db587ef6 | 93,360 |
def replace(arr, mask, val):
"""Replace values in mask with val; returns a copy of the array.
Parameters
----------
arr: ndarray
array of values to replace
mask: ndarray
boolean mask of values to replace
val: float
value replacing the elements on the mask
Returns
-------
res: ndarray
a copy of the array with the values replaced
"""
res = arr.copy()
res[mask] = val
return res | 60d0fd7d540abe6c44cea8964e8acd2770e0b722 | 93,363 |
def standard_deviation(x):
"""
calculates the standard deviation. Does not correct for bias
inputs
------
x: list of all floats/integers
returns:
float - standard deviation
"""
for elem in x:
if isinstance(elem, int) != True and isinstance(elem, float) != True:
raise TypeError("List elements must be float or int")
if len(x) < 2: return None
n = len(x)
mean = sum(x) / n
ssq = sum((x_i-mean)**2 for x_i in x)
stdev = (ssq/n)**0.5
return stdev | 364ff843eb1b915e23c7cea66e4bb7e6f5a85467 | 93,364 |
def _unescape_specification(specification):
# type: (str) -> str
"""
Unescapes the interface string: replaces '%2F' by slashes '/'
:param specification: Specification name
:return: The unescaped name
"""
return specification.replace("%2F", "/") | fee7492fedde134af72d3259a016b752f70f1053 | 93,365 |
import re
def get_params(line):
"""
Gets the parameters from a line.
@ In, line, string, The line to parse
@ Out, (name,params), (string,string or list), The name of the parameter
and either a single parameter or a list of parameters.
"""
start_match = re.search("\[\./([a-zA-Z_]+)\]",line)
if start_match is not None:
return "[./]",start_match.group(1)
if " = " not in line:
return None, None
equalsIndex = line.index("=")
name = line[:equalsIndex].strip()
if line.rstrip().endswith('"'):
params = line[equalsIndex + 1:].strip().strip('"').split()
return name,params
else:
return name,line[equalsIndex + 1:].strip() | c022cb28e9d77b1021fefaa4cc24f293e4b8bba2 | 93,367 |
def open_text_file(file_path, mode, encoding):
"""Open text file with encoding."""
try: # Python 3.5+
fhandle = open(file_path, mode + 't', encoding=encoding)
except TypeError: # pragma: no cover
# Python 2
fhandle = open(file_path, mode + 'b') # pylint: disable=unspecified-encoding
return fhandle | 0ca433917fd5bf66a23fa331f48ddb57a3792342 | 93,369 |
def _fileOpen(fileModel, file):
"""
Open a file using the local file path if possible, since that will be more
efficient than opening it through some assetstores.
:param fileModel: the Girder file model instance.
:param file: file document.
:return: A file-like object containing the bytes of the file.
"""
try:
return open(fileModel.getLocalFilePath(file), 'rb')
except Exception:
return fileModel.open(file, 'rb') | d33704d8ff5c179017efac617e21b0b83346ddf4 | 93,374 |
import hashlib
def GetChecksumsFromFile(filename, hash_fns=None):
"""Computes MD5 and/or other checksums of a file.
Args:
filename: Name of the file.
hash_fns: Mapping of hash functions.
Default is {'md5': hashlib.md5}
Returns:
Mapping of hash names to hexdigest strings.
{ <hashname>: <hexdigest>, ... }
"""
hash_fns = hash_fns or {'md5': hashlib.md5}
checksums = {k: fn() for (k, fn) in hash_fns.items()}
with open(filename, 'rb') as file_handle:
while True:
buf = file_handle.read(1048576) # 1 MiB
if not buf:
break
for hashfn in checksums.values():
hashfn.update(buf)
return {k: fn.hexdigest() for (k, fn) in checksums.items()} | bc74a9e708ae14ca0c15594d7623bd20d95b0fd0 | 93,380 |
def responsePeaks(peaks, sensor, energyScale):
""" Augment the peak list based on the sensor response
Args:
peaks: List of found peaks.
sensor: Sensor model used for responding the peaks.
energyScale: Energy scale.
Returns:
List of responsed peaks.
"""
for i in range(0, len(peaks)):
p = peaks[i]
p.response = sensor.getResponse(p.energy, 1, energyScale.getEdges())
return peaks | f1ca639f03f2e318f842f5191ce42e040d83aac3 | 93,381 |
def lzip(*args):
"""
zip function that returns a list.
"""
return list(zip(*args)) | b69558ce0c87fe932774a38e096185135dc00f1a | 93,383 |
from typing import Tuple
def options_help_message(options: Tuple[str, ...], message: str) -> str:
"""Help message for groups option
:param Tuple[str, ...] options: List of options
:param str message: Help message
:return: Formatted options help message
"""
if options == [''] or options is None or options == [] or not all(isinstance(n, str) for n in options):
return message
help_message = '''
{0}:
{1}
'''
return help_message.format(message, ', '.join(options)) | e5b5ae8e97f2a65974a6418eef6051d09bf92285 | 93,385 |
import time
def get_season(date):
"""
Get Season based on from_date
:param date: date
:return: season -> ex: 2016 for 2016-2017 season
"""
year = date[:4]
date = time.strptime(date, "%Y-%m-%d")
if date > time.strptime('-'.join([year, '01-01']), "%Y-%m-%d"):
if date < time.strptime('-'.join([year, '09-01']), "%Y-%m-%d"):
return int(year) - 1
else:
return int(year)
else:
if date > time.strptime('-'.join([year, '07-01']), "%Y-%m-%d"):
return int(year)
else:
return int(year) - 1 | b0152c389ebec7e576662632e02ce98b29b58e5b | 93,386 |
def highlight_max(s):
"""
highlight the maximum in a Pandas dataframe Series yellow.
"""
is_max = s == s.max()
return ["background-color: yellow" if v else "" for v in is_max] | af0efb38f83511c8368fb174dd400a4d93a9d148 | 93,394 |
def get_column_names(max_element_size, num_extra_columns):
"""Generate a list of column names used for Pandas to parse the data
Args:
max_element_size(int, required):
the maximum element size in the mesh elements (e.g. if the mesh only contains E3T elements then
the max_element_size is 3. If it also contains E4Q elements then it is 4).
num_extra_columns(int, required):
The number of extra columns after the element nodes and the material
Returns:
List of column names
"""
max_cols = max_element_size + num_extra_columns + 1 # add column for element material
data_columns = ['cmp{}'.format(i) for i in range(max_cols)]
names = ['row_type']
names.extend(data_columns)
return names | 2a3e730f00b1c88018552a13dc484efbb442c06a | 93,395 |
def build_atom_aliases(body_atoms):
""" Name each atom in the body with "aliases" and return the alias list
Args:
body_atoms:
the list of atoms of the rule body
Return:
body_atom_alias_list:
the list of aliases of the body atoms
"""
body_atom_alias_list = []
body_atom_naming_index = 0
for atom in body_atoms:
alias = atom['name'][0].lower() + str(body_atom_naming_index)
body_atom_alias_list.append(alias)
body_atom_naming_index += 1
return body_atom_alias_list | 793fa593b4f739a532a03b79ac721e2109d573e1 | 93,397 |
def get_exclude_seg_ids(exclude_grp, all_segs):
"""
get the segments to exclude
:param exclude_grp: [dict] dictionary representing the exclude group from
the exclude yml file
:param all_segs: [array] all of the segments. this is needed if we are doing
a reverse exclusion
:return: [list like] the segments to exclude
"""
# ex_segs are the sites to exclude
if "seg_id_nats_ex" in exclude_grp.keys():
ex_segs = exclude_grp["seg_id_nats_ex"]
# exclude all *but* the "seg_id_nats_in"
elif "seg_id_nats_in" in exclude_grp.keys():
ex_mask = ~all_segs.isin(exclude_grp["seg_id_nats_in"])
ex_segs = all_segs[ex_mask]
else:
ex_segs = all_segs
return ex_segs | 2a311acd516ee35151660395b5d64cce95252e84 | 93,405 |
import re
def alias_tpl(data):
"""Generates Mantle alias
Output:
@"postTime": @"post_time",
"""
name = data['original_name']
candidates = re.findall(r'(_\w)', name)
if not candidates:
new_name = data['name']
else:
new_name = re.sub(r'_(\w)', lambda x: x.group(1).upper(), name)
return '@"{}": @"{}",'.format(new_name, name) | 13e42ecffb27018e87acfd1919bb812603c50f4c | 93,416 |
def resize_quota_delta(context, new_flavor, old_flavor, sense, compare):
"""Calculate any quota adjustment required at a particular point
in the resize cycle.
:param context: the request context
:param new_flavor: the target instance type
:param old_flavor: the original instance type
:param sense: the sense of the adjustment, 1 indicates a
forward adjustment, whereas -1 indicates a
reversal of a prior adjustment
:param compare: the direction of the comparison, 1 indicates
we're checking for positive deltas, whereas
-1 indicates negative deltas
"""
def _quota_delta(resource):
return sense * (new_flavor[resource] - old_flavor[resource])
deltas = {}
if compare * _quota_delta('vcpus') > 0:
deltas['cores'] = _quota_delta('vcpus')
if compare * _quota_delta('memory_mb') > 0:
deltas['ram'] = _quota_delta('memory_mb')
return deltas | 25d65a5e60ab6665674dfb47ca2c573861432fc1 | 93,419 |
import random
def generate_key(word_list: list, words: int):
"""
(method) generate_key
---------------------
Generates a key.
---
Parameters
```
word_list : list
The word list.
words : int
The number of words to include in the key.
```
"""
return " ".join(random.choices(word_list, k=words)) | ba918cd9dfb4ee73442283371c30c1cd7debfa0f | 93,420 |
import yaml
def load_yaml_config(filename):
"""Read the YAML configuration file into a dictionary.
Arguments:
filename {string} -- The location of the YAML configuration file.]
Returns:
Dictionary -- The contents of the configuration file. An empty
dictionary is returned on error.
"""
with open(filename) as the_stream:
try:
return yaml.safe_load(the_stream)
except yaml.YAMLError as exc:
print(exc)
return {} | e0d8c6c32919cf5c226bdf09eccea78bfa911b14 | 93,422 |
from typing import Optional
import re
def parse_host(link: str) -> Optional[str]:
"""
Parse the host name in a given link
Args:
link: Link to a website
Returns:
str: the host name
"""
pattern = re.compile("^http[s]?://([a-z0-9]*\.[a-z]*)[/]?[a-zA-z0-9]*?$")
matches = pattern.match(link)
if matches:
return matches.group(1) # it's the only possible group
else:
return None | c3cea8e01565fb8540b6f2ae54d87bbf6fa16f01 | 93,426 |
import re
def normalize(string, force_underscore=False):
"""Replaces invalid characters with an underscore character
string.normalize will add an "_" character in front of the returned string
if the input starts with an intiger
Args:
string(str): A string to normalize
force_underscore(bool): Convert hyphens to underscores
Returns:
str: Normalized string
Note:
Invalid characters are punctuation and most of the symbols.
"""
string = str(string)
if re.match("^[0-9]", string):
string = "_" + string
if force_underscore:
token = "[^A-Za-z0-9_]"
else:
token = "[^A-Za-z0-9_-]"
return re.sub(token, "_", str(string)) | 582d0101da2af83c37ffec48bbd51e06503b8586 | 93,428 |
def first_sample_of_frame(frame,
frame_shift_in_samples,
window_size_in_samples):
"""
Returns the sample-index of the first sample of the frame with index
'frame'. Caution: this may be negative; we treat out-of-range samples
as zero.
Analogous to with kaldi10's FirstSampleOfFrame in feat/feature-window.h
Args:
frame (int): The frame index >= 0.
frame_shift_in_samples (int) The frame shift in samples
window_size_in_samples (int) The window size in samples
Returns:
int: The first sample of this frame (caution: may be negative).
"""
midpoint_of_frame = frame_shift_in_samples * frame + (frame_shift_in_samples // 2)
beginning_of_frame = midpoint_of_frame - window_size_in_samples // 2
assert isinstance(beginning_of_frame, int) # indirectly check inputs were
# int.
return beginning_of_frame | 8d7063a8491e294debd6ba8cdfdf090f7bb08942 | 93,430 |
def is_analytics_type(odk_type):
"""Test if an odk type is suitable for tracking in analytics.
Args:
odk_type (str): The type to test
Returns:
Return true if and only if odk_type is good for analytics
"""
bad_types = (
"type",
"calculate",
"hidden",
"start",
"end",
"begin ",
"deviceid",
"simserial",
"phonenumber",
)
bad = any((odk_type.startswith(bad) for bad in bad_types))
return not bad and odk_type != "" | df2a6f8e46575f01c32b0af600ed3a5af07197d1 | 93,431 |
def alc_calc_scans_n(inpt_mos_data_dict, inpt_selected_samples):
"""Get the total number of scans that will be run (i.e., represented in
both the Notebook data dict and in list[selected samples]). For ALC/
mosaic datasets/Notebooks.
Parameters
----------
inpt_mos_data_dict : dict
A dict of dicts containing data from project folder w/ format:
{'SAMPLE NAME': {'Scanlist': SCANLIST (.SCANCSV) PATH,
'Mosaic': MOSAIC .BMP PATH,
'Align_file': MOSAIC ALIGN FILE PATH,
'Max_zircon_size': MAX USER-INPUT ZIRCON SIZE,
'Offsets': [USER X OFFSET, USER Y OFFSET],
'Scan_dict': DICT LOADED FROM .SCANCSV FILE},
...}.
inpt_selected_samples : list(str)
A list of samples selected by a user for running; these should be keys
in the input data dict.
Returns
-------
n : int
Total number of scans/sub-images that will be processed.
"""
n = 0
avl_samples = list(inpt_mos_data_dict.keys())
for sample in [sample for sample in inpt_selected_samples
if sample in avl_samples]:
for _ in inpt_mos_data_dict[sample]['Scan_dict'].keys():
n += 1
return n | e35d47d2688c881851cc979e2d8336b8a4ccd685 | 93,435 |
import math
def ComputeDistAtom(dAtom1, dAtom2):
"""Compute the distance between two atoms.
Input: dAtom1, dAtom2 which are dico corresponding to atom 1 and atom 2 respectively.
For instance dAtom = dPDB[resnumber][atom].
Output: distance (float)
"""
coord1 = [dAtom1["x"], dAtom1["y"], dAtom1["z"]]
coord2 = [dAtom2["x"], dAtom2["y"], dAtom2["z"]]
dist = math.sqrt(
(coord1[0] - coord2[0]) ** 2
+ (coord1[1] - coord2[1]) ** 2
+ (coord1[2] - coord2[2]) ** 2
)
return float(dist) | 067338daab5ffc1c1c7f496f73fef1796d4b5884 | 93,436 |
def interpolation_linear(x, x1, x2, y1, y2):
"""
Linear interpolation
returns (y2 - y1) / (x2 - x1) * (x - x1) + y1
"""
m = (y2 - y1) / (x2 - x1)
t = (x - x1)
return m * t + y1 | c73fcf8f928ade078a18d0e171eaf94fb27db9a0 | 93,438 |
def getNeumes(seq, counter):
""" Given a list of MEI note elements, return a string of the names of the neumes seperated by underscores.
"""
neumes = str(seq[0].parent.parent.getAttribute('name').value)
for k in range(1, counter):
if seq[k].parent.parent.id != seq[k-1].parent.parent.id:
neumes = neumes + '_' + str(seq[k].parent.parent.getAttribute('name').value)
return neumes | f511511c89c4afba556d9ff8b38c8cfa3de1401f | 93,443 |
def fetch_parameter(kwargs, param):
"""Fetch a parameter from a keyword-argument dict
Specifically for use in enforcing "stricter" typing for functions or methods that
use **kwargs to allow for different sets of arguments.
"""
try:
return kwargs.pop(param)
except KeyError:
raise TypeError('Missing keyword argument {param}'.format(param=param)) | 575f1b68119fc2fd91b4180eb95ca0fb4ddc51bf | 93,453 |
import _struct
def from_native_int8( raw_bytes, offset ):
""" Reads a native 8-bit signed integer from an array of bytes. """
return _struct.unpack_from( "=b", raw_bytes, offset )[ 0 ], offset + 1 | 01e30fc8f9ebac92f61e8715a1388660ff56ba64 | 93,455 |
def get_datastore_state(target, device):
"""Apply datastore rules according to device and desired datastore.
- If no target is passed in and device has candidate, choose candidate.
- If candidate is chosen, allow commit.
- If candidate is chosen and writable-running exists, allow lock on running
prior to commit.
- If running, allow lock, no commit.
- If startup, allow lock, no commit.
- If intent, no lock, no commit.
- If operational, no lock, no commit.
- Default: running
Args:
target (str): Target datastore for YANG interaction.
device (rpcverify.RpcVerify): Class containing runtime capabilities.
Returns:
(tuple): Target datastore (str): assigned according to capabilities
Datastore state (dict):
commit - can apply a commit to datastore
lock_ok - can apply a lock to datastore
lock_running - apply lock to running datastore prior to commit
"""
target_state = {}
for store in device.datastore:
if store == 'candidate':
if not target:
target = 'candidate'
target_state['candidate'] = ['commit', 'lock_ok']
if 'running' in target_state:
target_state['candidate'].append('lock_running')
continue
if store == 'running':
if 'candidate' in target_state:
target_state['candidate'].append('lock_running')
target_state['running'] = ['lock_ok']
continue
if store == 'startup':
target_state['startup'] = ['lock_ok']
continue
if store == 'intent':
# read only
target_state['intent'] = []
continue
if store == 'operational':
# read only
target_state['operational'] = []
continue
if not target:
target = 'running'
return target, target_state | e4956d5283f525c9b282dec1784622d5f30a4816 | 93,457 |
def filter_df(summary_df, country_list, feat_list, year_range):
"""
Helper func to filter summary_df to countries, columns (feat_list), and list of years.
Keep "country", "happiness_score", "year" for downstream tasks
"""
year_list = list(range(min(year_range), max(year_range) + 1))
if country_list == []:
country_list = summary_df.country.unique().tolist()
return summary_df.loc[
((summary_df.country.isin(country_list)) & (summary_df.year.isin(year_list))),
feat_list + ["country", "happiness_score", "year", "country_code"],
] | 8535a5e8374423a7983a4c3d970dedcf9af5d1d3 | 93,461 |
def balanced_parentheses(string: str) -> bool:
"""
Check if parentheses in a string are balanced, ignoring any non-parenthesis
characters. E.g. true for "(x())yz", false for ")(" or "(".
"""
bal = 0
for c in string:
if c == "(":
bal += 1
elif c == ")":
if bal == 0:
return False
bal -= 1
return bal == 0 | fcb2ff7379d1c63cc7922d4c139ff0513261cc8c | 93,463 |
import importlib
def import_class(modname, classname):
"""function equivalent to from [mod] import [class]"""
mod = importlib.import_module(modname)
return getattr(mod, classname) | b6443dc68172ebdcf7a99d81574f366b4166135f | 93,464 |
def hpa_to_mmhg(hpa):
"""
Convert hectopascal to millimeter of mercury [0 °C]
"""
return int(hpa / 1.3332239) | 094958d800583024364375206de518cf976b8a87 | 93,465 |
import struct
def encode_time(t):
"""
Converts date to specific codification of time used in ZKTeco
get/set time procedures.
:param t: Datetime object, with the date.
:return: Bytearray, with the time stored in little endian format.
"""
return bytearray(struct.pack('<I',
((t.year % 100) * 12 * 31 + (
(t.month - 1) * 31) + t.day - 1) *
(24 * 60 * 60) + (
t.hour * 60 + t.minute) * 60 + t.second
)) | b6e6d88b1324e015e08649d5cdd4951585012756 | 93,468 |
import typing
def id_class_name(value: typing.Any) -> str:
"""Provide class name for test identifier."""
return str(value.__class__.__name__) | 9d7fae15e07dd994f865baf67d753b43031efd31 | 93,471 |
from typing import List
def get_lines(ls, params) -> List[str]:
"""Get all text lines in the current document."""
text_doc = ls.workspace.get_document(params.text_document.uri)
source = text_doc.source
return source.splitlines() | d0c48f4d2e7fd56fa00b23cd4409d70bdc9dd163 | 93,472 |
from typing import Union
from typing import Iterable
from typing import Set
import pkg_resources
def _safe_parse_requirements(
requirements: Union[str, Iterable[str]]
) -> Set[pkg_resources.Requirement]:
"""Safely parse a requirement or set of requirements. This effectively replaces
pkg_resources.parse_requirements, which blows up with a ValueError as soon as it
encounters a requirement it cannot parse (e.g. `-r requirements.txt`). This way
we can still extract all the parseable requirements out of a set containing some
unparseable requirements.
"""
parseable_requirements = set()
for requirement in pkg_resources.yield_lines(requirements):
try:
parseable_requirements.add(pkg_resources.Requirement.parse(requirement))
except ValueError:
continue
return parseable_requirements | 4366d6b88ceb3d2c7779c502afc08304015c928d | 93,473 |
def count(iterable):
"""Simply returns the number of entries (left) in the given iterable."""
return sum(1 for _ in iterable) | 418f2e1c954fd9230fd822a1a5d8ec605b6e228d | 93,478 |
def db_list_tables(con):
"""Return all table names"""
cursor = con.cursor()
cursor.execute("select name from sqlite_master where type='table';")
return [x[0] for x in cursor.fetchall()] | 4f83b111e6eb6d820d51bda9ee3fe2aeafc3ebee | 93,480 |
def next_perm(v):
"""
Generates next permutation with a given amount of set bits,
given the previous lexicographical value.
Taken from http://graphics.stanford.edu/~seander/bithacks.html
"""
t = (v | ( v - 1)) + 1
w = t | ((((t & -t) / (v & -v)) >> 1) - 1)
return w | 7b4098980a3c211153586a3b7e7127486b7f3ea0 | 93,488 |
import torch
def quad_kl_div(pi, gamma, ref):
"""Compute the quadratic entropy (KL^otimes(pi otimes gamma | ref))
with full plans
Parameters
----------
pi: first input, torch.Tensor of size [Batch, size_X, size_Y]
gamma: second input torch.Tensor of size [Batch, size_X, size_Y]
ref: Reference of the KL entropy to compare with (pi otimes gamma)
Returns
-------
div: torch.Tensor of size [Batch]
Quadratic Kl divergence between each batch.
"""
massp, massg = pi.sum(), gamma.sum()
div = (
massg * torch.sum(pi * (pi / ref + 1e-10).log())
+ massp
* torch.sum(gamma * (gamma / ref + 1e-10).log())
- massp * massg
+ ref.sum() ** 2
)
return div | 2dc303ee217ee034c12c631cf90a3432bcd37827 | 93,490 |
def rgb_intensity(rgb):
"""Convert an RGB color to its intensity"""
return rgb[0] * 0.299 + rgb[1] * 0.587 + rgb[2] * 0.114 | 65bd42cece4a66d0e9fa6419b67db887b1130ab3 | 93,491 |
def chunked(data, chunksize):
"""
Returns a list of chunks containing at most ``chunksize`` elements of data.
"""
if chunksize < 1:
raise ValueError("Chunksize must be at least 1!")
if int(chunksize) != chunksize:
raise ValueError("Chunksize needs to be an integer")
res = []
cur = []
for e in data:
cur.append(e)
if len(cur) >= chunksize:
res.append(cur)
cur = []
if cur:
res.append(cur)
return res | 4037be6d94b26d46a54ece9d733e7e9a325bd8fe | 93,496 |
def get_employee_vacation_days(employee, month):
"""Calls the vacation days for the given employee and month."""
return month.get_employee_vacation_days(employee) | ddf611cfee3f1a89b32d153220ebbf93d16b64e6 | 93,506 |
def dias_para_segundos(dias, horas, minutos, segundos):
""" Recebe uma data em dias com horas, minutos e segundos, e retorna
a data em segundos"""
dias_para_segundos = dias*86400
horas_para_segundos = horas*3600
minutos_para_segundos = minutos*60
segundos_para_segundos = segundos*1
soma = dias_para_segundos + horas_para_segundos + minutos_para_segundos + segundos_para_segundos
return round(soma, 2)
return soma | ab3bd996343e37863e9b064dab86100ed47b14de | 93,510 |
def labels_to_string(labels):
"""
Concatenates a list of labels to a single string to match the labelselector pattern
"""
return (
",".join(["%s=%s" % (str(k), str(v)) for k, v in labels.items()])
if labels
else "*"
) | 3607e7a84b912d7a9d8085fea36d17f884f393af | 93,514 |
from datetime import datetime
import click
import json
def write_json_file(file_name: str, results: list) -> str:
"""Write data to json file"""
now = datetime.now()
timestamp = f"_{now.month}-{now.day}-{now.year}_{now.hour}-{now.minute}.json"
file_path = file_name + timestamp
click.secho(f"[*] Writing results to {file_path}", fg="green")
with open(file_path, "w") as f:
json.dump(results, f, indent=4)
return file_path | 4cb127c0b9bb916ab5f32c859fd03cc0570eb561 | 93,519 |
def get_evaluation_args(eval_py: str, train_logdir_local: str,
dataset_dir_local: str, eval_logdir: str, tfdl_config):
"""Generate the array of arguments needed to run the eval script.
Args:
eval_py: The URI of the eval script.
train_logdir_local: The directory in-which checkpoints can be
found.
dataset_dir_local: The directory in which the records are
found.
eval_logdir: The directory where evaluation events should be
logged.
tfdl_config: google.protobuf.Struct with fields from
rv.protos.deeplab.train.proto containing TF Deeplab training configuration
Returns:
A list of arguments suitable for running the eval script.
"""
fields = [
'dataset',
'output_stride',
'decoder_output_stride',
'model_variant',
'eval_split',
]
multi_fields = [
'atrous_rates',
'eval_crop_size',
]
args = ['python', eval_py]
args.append('--checkpoint_dir={}'.format(train_logdir_local))
args.append('--eval_logdir={}'.format(eval_logdir))
args.append('--dataset_dir={}'.format(dataset_dir_local))
for field in multi_fields:
for item in tfdl_config.__getattribute__(field):
args.append('--{}={}'.format(field, item))
for field in fields:
field_value = tfdl_config.__getattribute__(field)
if (not type(field_value) is str) or (not len(field_value) == 0):
args.append('--{}={}'.format(field, field_value))
return args | 3f1f1da5e343d7a423990a1a19a317ed6f808686 | 93,521 |
def df_to_string_list(df):
"""
Convert the input df into a list of strings, suitable for using as popups in a map.
This is a utility function.
"""
# print "Converting df with size %s to string list" % df.shape[0]
array_list = df.to_dict(orient='records')
return [str(line) for line in array_list] | 2e6d125755001847364b9ee332ddc3a0e71d9d0a | 93,522 |
def _short_repr(value):
"""
Return a shortened ``repr`` output of value for use in ``__repr__`` methods.
"""
if isinstance(value, str):
chars = len(value)
threshold = 30
if chars > threshold:
return "{0!r}...+{1}".format(value[:threshold], chars-threshold)
return repr(value) | 22e827fe9415b7d5000c3b5337cae9e6f1d15635 | 93,524 |
def list_same_len(*lists):
"""
confirm all lists have the same length
"""
n = len(lists[0])
return all(len(x) == n for x in lists) | 808d93118890625a954acb630762870ad137a307 | 93,531 |
def _nominal_metric(v1, v2, **_kwargs):
"""Metric for nominal data."""
return v1 != v2 | a10d80868ecac0023edf0afbd5d23ebca7b2e63b | 93,538 |
def survivor(probabilities, t):
"""Survivor function S"""
s = 1 - probabilities[0]
for x in range(1, t + 1):
s = s - probabilities[x]
return s | af5ccc4173c595f8dce347f7a9afeaf2b56b1a34 | 93,540 |
def get_work_directory(config):
"""Return the aiida work directory to use."""
if config.getoption("lammps_workdir") is not None:
return config.getoption("lammps_workdir")
return None | 4ec26399efb1cabe31f1ff6ceccaa36013abf480 | 93,544 |
def generate_bond_indices(natoms):
"""
Finds the array of bond indices of an interatomic distance matrix, in row wise order:
[[0,1], [0,2], [1,2], [0,3], [1,3], [2,3], ..., [0, natoms], [1, natoms], ...,[natoms-1, natoms]]
Parameters
----------
natoms: int
The number of atoms
Returns
----------
bond_indices : list
A list of lists, where each sublist is the subscripts of an interatomic distance
from an interatomic distance matrix representation of a molecular system.
e.g. r_12, r_01, r_05
"""
# initialize j as the number of atoms
j = natoms - 1
# now loop backward until you generate all bond indices
bond_indices = []
while j > 0:
i = j - 1
while i >= 0:
new = [i, j]
bond_indices.insert(0, new)
i -= 1
j -= 1
return bond_indices | 6acf86aeefe33122c95f88db8a960012aa4198b4 | 93,545 |
def normalize_file_name(fn):
"""
Normalize a file name string by replacing '\' with '/'. This is useful for writing
file names to control files.
:param fn: file name
:returns: normalized file name
.. versionadded:: 9.2
"""
return fn.replace('\\', '/') | 71537295fda78fd8110e5cad187ffae4fb40b3da | 93,546 |
def row_to_edge(row):
"""
Given an election result row or poll data row, returns the Democratic edge
in that state.
"""
return float(row["Dem"]) - float(row["Rep"]) | fab57ddd3d0207ef558bfc5f2ad6e3fbddd11b24 | 93,548 |
import unicodedata
def strip_string(string):
"""Cleans a string based on a whitelist of printable unicode categories
You can find a full list of categories here:
http://www.fileformat.info/info/unicode/category/index.htm
"""
letters = ("LC", "Ll", "Lm", "Lo", "Lt", "Lu")
numbers = ("Nd", "Nl", "No")
marks = ("Mc", "Me", "Mn")
punctuation = ("Pc", "Pd", "Pe", "Pf", "Pi", "Po", "Ps")
symbol = ("Sc", "Sk", "Sm", "So")
space = ("Zs",)
allowed_categories = letters + numbers + marks + punctuation + symbol + space
return "".join([c for c in string if unicodedata.category(c) in allowed_categories]) | d6aa5f61be5a468d3fc9cf52e44e5325f2663167 | 93,550 |
import re
def has_repeat_char_with_one_between(chars):
"""
It contains at least one letter which repeats with exactly one letter
between them, like xyx, abcdefeghi (efe), or even aaa.
"""
if re.search(r'(\w)[^\1]\1', chars):
return True
return False | f0f37bcc7d4ab5b102e1f9f0ccfd3057a29a8f25 | 93,553 |
import re
def _validate_pattern(pattern):
"""Check that regex pattern conforms to type and format requirements."""
if not isinstance(pattern, str):
raise TypeError("Pattern must be a string.")
if pattern.count("(#") != 1:
raise ValueError("Pattern must contain exactly one anchor group.")
if pattern.startswith(" ") or pattern.endswith(" "):
raise ValueError("Pattern cannot start or end with a whitespace.")
if 2 * " " in pattern:
raise ValueError("Pattern cannot contain consecutive whitespaces.")
return re.compile(pattern) | 9222423d7fe9f723c33ce76aa1a88b39235b78de | 93,556 |
import re
def normalize_response_key(key: str) -> str:
"""
Convert camel case `ParamA` to snake case `param_a`
"""
split = re.findall(r'[A-Z](?:[a-z]+|[A-Z]*(?=[A-Z]|$))', key)
if split:
return '_'.join([val.lower() for val in split])
return key.lower() | b9df00dc7c39aca339c4d547d88bf102b4e765f1 | 93,557 |
from typing import Tuple
from typing import List
from typing import Dict
import csv
def parse_csv(csvfile: str) -> Tuple[List[str], List[str], List[str]]:
"""Open and parse csv.
Args:
csvfile: path to csvfile
Returns:
a tuple of lists for images, labels, and notes
"""
with open(csvfile) as fp:
D: Dict[str, list] = {"image": [], "label": [], "note": []}
reader = csv.DictReader(fp)
for row in reader:
for key, val in row.items():
D[key.lower()].append(val)
return D["image"], D["label"], D["note"] | 228edaffe9e1096866ef71a6f9b010b073ad2306 | 93,559 |
from typing import Any
import torch
def default_label_extractor(x: Any) -> torch.Tensor:
"""Default callable for getting label from batch data."""
out: torch.Tensor = x["label"] if isinstance(x, dict) else x[1]
return out | 5c65cf61651e01f15a93b650deb76965a2577d11 | 93,560 |
def escape_quotes(string):
"""Make the string accepatble to command line by esacping any quotes
>>> escape_quotes('word"here') == r'word\\"here'
True
"""
return string.replace('"', r'\"').replace("'", r"\'") | 1b6d6a4cdfd5fd2f6b5a69301b1b4829ec643e73 | 93,561 |
def normalizeRhythms(rhy):
"""Return normalized rhythms
Parameters: rhy: -1xn array- Rhythms energy
Returns: rhy_n: -1xn array- Normalized Rhythms"""
rhy_n = (rhy - min(rhy))/max(rhy)*100
return(rhy_n) | da4580792cfacf1cbd50178064c6178cefa30071 | 93,563 |
import re
def remove_urls(text):
"""
Removes twitter short URLS from a string and returns the result.
URLs are matched by the substring 'https://t.co'. If the string
contains no matching URLs, the returned string is identical to
the input string.
"""
text = re.sub(r"https://t.co\S+", "", text)
return re.sub(r"\s+", " ", text) | 84229e78c864eae03d26101469852c62d9a0fe89 | 93,565 |
from typing import List
from typing import Tuple
from typing import Dict
from typing import Counter
def create_cardinality_dict_for_doc_sentences(
list_of_tuples: List[Tuple[str]],
) -> Dict[Tuple[str], int]:
"""Creates a dictionary with the elements of the input list of tuples
as keys and their cardinalities inside the list as values.
Args:
list_of_tuples (List[Tuple[str]]): A list of tuples (of strings)
Returns:
Dict[Tuple[str], int]: The cardinality dictionary of the elements of the list
"""
return Counter(list_of_tuples) | 8f7f9ea35727e66e6f38dc44378bef588fb4a93b | 93,566 |
def extract_data(raw_data, data_list, price_dataset, region):
"""
Extract data on data_list from raw_data.
Return data will be a dictionary, including three key-value pairs:
(1) 'workdays'
a DataController, the workdays data of the trading region.
(2) 'price'
a Dataset, the price dataset of the trading region.
(3) 'others'
a dict of Dataset, other data that users specify in data_list.
"""
return {
'workdays' : raw_data['workdays'][region],
'price' : raw_data[region][price_dataset],
'others' : {
name : raw_data[region][name]
for name in data_list if name in raw_data[region]}} | 77fe452e23d2d0b7f0d228ef4a5311c194a89ae8 | 93,568 |
import math
def get_half(n: int) -> int:
"""Gets the half point of an integer
Parameters
----------
n : int
The number to divide in half.
Returns
-------
:rtype: int
"""
return math.ceil(n / 2) | af1e6d0b9712431c2d909747ff919f4e49dd443a | 93,569 |
import calendar
def days_in_month(date):
"""Number of days in the month `date` belongs to."""
return calendar.monthrange(date.year, date.month)[1] | 49aa1366b6be5ef2ec4df8a3bf81d358e2d8817e | 93,570 |
def f(X):
"""Given a scalar X, return some value (a real number)."""
Y = (X - 1.5)**2 + 0.5
print("X = {}, Y = {}".format(X, Y)) # for tracing
return Y | e76e90433f1528a2028df52113dc59835946d582 | 93,573 |
def get_start_button_payload(poll, action):
"""Compile the /start action payload for a certain action."""
# Compress the uuid a little and remove the 4 hypens
uuid = str(poll.uuid).replace("-", "")
return f"{uuid}-{action.value}" | 9e6e9548afa4411d1db3e93153f145c87f7748e7 | 93,574 |
def date_format(date_str):
"""Formats date to yyyy-mm-dd. Returns date as string"""
# The day in row data is not supplied
day = '01'
split_date = date_str.split('/')
# When month is not supplied
if len(split_date) < 2:
month, year = '01', split_date[0]
else:
month, year = split_date
return f'{year}-{month}-{day}' | 7ea8b8dffd9d7e1aa126b95367daf10634635ec9 | 93,579 |
def commit_veto(request, response): # unused request arg pylint: disable=W0613
"""
Strict commit veto to use with the transaction manager.
Unlike the default commit veto supplied with the transaction manager,
this will veto all commits for HTTP status codes other than 2xx unless
a commit is explicitly requested by setting the "x-tm" response header to
"commit". As with the default commit veto, the commit is always vetoed if
the "x-tm" response header is set to anything other than "commit".
"""
tm_header = response.headers.get('x-tm')
if not tm_header is None:
result = tm_header != 'commit'
else:
result = not response.status.startswith('2') \
and not tm_header == 'commit'
return result | 4b53d855ea0a4a91498c62e89dbbff6f2ed16efd | 93,580 |
from typing import Any
from typing import List
from typing import Dict
def remove_dict_from_list(
value: Any, target_list: List[Dict[str, Any]], key_name: str
) -> List[Dict[str, Any]]:
"""Remove a dict in list.
:param value: value to search for and remove
:type value: Any
:param target_list: list that needs to be updated
:type target_list: List[Dict[str, Any]]
:param key_name: key to match the value
:type key_name: str
:return: updated list
:rtype: list
"""
return_list = target_list[:]
for item in target_list:
if item.get(key_name) == value:
return_list.remove(item)
return return_list | e9f580869fc1b6a275c0bde1f50a1fbded361316 | 93,583 |
from typing import Any
import json
def format_data(data: Any, indent: int = 4) -> str:
"""Format the data dictionary returned by any class of
the cmc-py modules.
Args:
data (Any): Data to be formatted.
indent (int, optional): Indentation of the data. Defaults to 4.
Returns:
str: Formatted data.
"""
if type(data) != dict:
data = data.dict()
result = json.dumps(data, indent=indent, default=str)
return result | b8dd0d79fb0892405152ee9bc5fca934a8743f5d | 93,586 |
def calc_sc_carter(slr, ecc, x):
"""
Compute carter constant for the SC case (a = 0).
Parameters:
slr (float): semi-latus rectum [6, inf)
ecc (float): eccentricity [0, 1)
x (float): inclination value given by cos(theta_inc) (0, 1]
negative x -> retrograde
positive x -> prograde
Returns:
Q (float): Carter constant
"""
ecc2 = ecc * ecc
slr2 = slr * slr
x2 = x * x
Q = (slr2 * (-1 + x2)) / (3 + ecc2 - slr)
return Q | 0b1213c83494f806b67b60321bbd90e02d007c83 | 93,594 |
def asbool(s):
"""
Convert a string to its boolean value
"""
if s.lower() == 'true':
return True
elif s.lower() == 'false':
return False
elif s.isdigit():
return bool(int(s))
else:
raise ValueError('must be integer or boolean: %r' % s) | 1460377e516c16f076702ba3d55d4b1579f3fe22 | 93,596 |
def sqr(x):
"""Return the conjugate of x multiplied by x."""
return x.conjugate() * x | a3c5447145a0f7cb761f52c14998e16e739223a1 | 93,597 |
def tokenizer(sentence):
"""Split header text to tokens, add newline symbols"""
return " \n ".join(sentence.split("\n")).split(" ") | b64e29f0fcfa1c8cb21a101af57ac78337916d68 | 93,599 |
def fieldSorter(fieldname, forcedOrder=None):
"""Sort a field
A generic sorter which allows to define a field for sorting.
If forcedOrder is not None it will be used as the sort order.
"""
def handleSort(order):
if forcedOrder is not None:
order = forcedOrder
return {fieldname: {"order": order}}
return handleSort | bccca4d32165b92d059e40dd8e68f87d0e3fd808 | 93,601 |
def get_specific_mouse_files(mouse_ids, files):
"""Return sub-list of `files` that contain an id in `mouse_ids`.
"""
return [s for s in files if any(xs in s for xs in mouse_ids)] | 5bc41df971e2c464d6cb632f661623b5ff4a9047 | 93,602 |
def _field_access_description(target, provider, field):
"""Returns a string denoting field access to a provider on a target.
This function is used to generate a pretty string that can be used in
assertion failure messages, of the form
`<//package:target>[ProviderInfo].some.field.path`.
Args:
target: The target whose provider is being accessed.
provider: The name of the provider being accessed.
field: The field name or dotted field path being accessed.
Returns:
A string describing the field access that can be used in assertion
failure messages.
"""
return "<{}>[{}].{}".format(target.label, provider, field) | 3e6fb2eae77a237d30f32a131da66632cc791cb3 | 93,604 |
import json
def parse_posted_message(m):
"""
From.
{"event":"posted","data":{"channel_display_name":"","channel_name":"xxx","channel_type":"D","mentions":"[\"to56ryq76frapk54a1sx1sqppr\"]","post":"{\"id\":\"xxx\",\"create_at\":xxx,\"update_at\":xxx,\"edit_at\":0,\"delete_at\":0,\"is_pinned\":false,\"user_id\":\"xxx\",\"channel_id\":\"xxx\",\"root_id\":\"\",\"parent_id\":\"\",\"original_id\":\"\",\"message\":\"xxx\",\"type\":\"\",\"props\":{},\"hashtags\":\"\",\"pending_post_id\":\"\"}","sender_name":"bot","team_id":""},"broadcast":{"omit_users":null,"user_id":"","channel_id":"xxx","team_id":""},"seq":xxx} (string)
To.
{ "event": "posted", "data": { "channel_display_name": "", "channel_name": "xxx", "channel_type": "D", "mentions": [ "xxx" ], "post": { "id": "xxx", "create_at": xxx, "update_at": xxx, "edit_at": 0, "delete_at": 0, "is_pinned": false, "user_id": "xxx", "channel_id": "xxx", "root_id": "", "parent_id": "", "original_id": "", "message": "xxx", "type": "", "props": {}, "hashtags": "", "pending_post_id": "" }, "sender_name": "xxx", "team_id": "" }, "broadcast": { "omit_users": "None", "user_id": "", "channel_id": "qgbiknbrj3ytpeqzxb5gjb3wzr", "team_id": "" }, "seq": xxx } (dict)
"""
message = json.loads(m)
if "mentions" in message['data']:
val = message['data'].pop("mentions")
message['data']['mentions'] = json.loads(val)
if "post" in message['data']:
val = message['data'].pop("post")
message['data']['post'] = json.loads(val)
if "broadcast" in message['data']:
val = message['data'].pop("broadcast")
message['data']['broadcast'] = json.loads(val)
return message | deae2f7c23519466af11e87c1df37604f3932991 | 93,606 |
def topology_3_bonds_apart(covalent_bond_dict):
"""
Map atom connectivity EXACTLY 3 bonds apart.
See Amber20 manual Figure 14.1.
Parameters
----------
covalent_bond_dict : dict
Per residue covalent connectivity.
Returns
-------
dict
Per residue 3 bonds apart connectivity.
"""
x_bonds_apart = {}
prevs = set()
for res, residue_atoms in covalent_bond_dict.items():
res_d = x_bonds_apart.setdefault(res, {})
# for all atoms in the residue
for atom in residue_atoms:
xba = res_d.setdefault(atom, set())
prevs.clear()
prevs.add(atom)
# for CA, H in N... one bond
for subatom1 in set(residue_atoms[atom]).difference(prevs):
prevs.add(subatom1)
# for CB, HA in CA... two bonds
for subatom2 in set(residue_atoms[subatom1]).difference(prevs):
prevs.add(subatom2)
# for HB1, HB2 in CB... three bonds
for subatom3 in set(residue_atoms[subatom2]).difference(prevs):
xba.add(subatom3)
return x_bonds_apart | 3dacd28bd2a0510d5c9b6ed30bbbf88aa12a5e95 | 93,607 |
def create_pids2idxs(data_source):
"""Creates a mapping between pids and indexes of images for that pid.
Returns:
2D List with pids => idx
"""
pid2imgs = {}
for idx, (img, target, _) in enumerate(data_source.imgs):
if target not in pid2imgs:
pid2imgs[target] = [idx]
else:
pid2imgs[target].append(idx)
return pid2imgs | 3bad529a87dfaf62e8ebcd9373c413fc0bdc8ca3 | 93,608 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.