content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
from typing import List
def get_pairs_of_int_two_from_text(text: str, space: str) -> List[List[int]]:
"""Creates pairs of two from the given text
:param text: Text we want to create pairs from
:type text: str
:param space: Target space
:type space: str
:return: Pairs of two in list form
:rtype: List(List(int))
"""
return [
[space.index(text[i]), space.index(text[i + 1])]
for i in range(0, len(text) - 1, 2)
] | 3c9e572b9cf67c6ab2b5e8fe71765b925ee63d96 | 46,551 |
def verify_parenthesis(bracket_seq):
""" Returns true if parenthesis are correct, returns false otherwise."""
return bracket_seq.count('(') == bracket_seq.count(')') | 432e791fd9a1814c35e87467babe0101fe2533d7 | 46,552 |
from typing import Tuple
import os
def split_filepath(filepath: str) -> Tuple[str, str, str]:
"""Return directory, filename and extension without leading ."""
directory, filename = os.path.split(filepath)
filename, ext = os.path.splitext(filename)
return directory, filename, ext[1:] if ext else ext | 33d49d6d497b912ed06d396c5af7ef22f19a625f | 46,553 |
def ordinal_form(n):
"""Convert number to ordinal form in English.
Parameters
----------
n : :class:`~python:int`
Returns
-------
:class:`~python:str`
Examples
--------
>>> from pprint import pprint
>>> from sknano.core import ordinal_form
>>> pprint([ordinal_form(i) for i in range(200)], width=70, compact=True)
['0th', '1st', '2nd', '3rd', '4th', '5th', '6th', '7th', '8th', '9th',
'10th', '11th', '12th', '13th', '14th', '15th', '16th', '17th',
'18th', '19th', '20th', '21st', '22nd', '23rd', '24th', '25th',
'26th', '27th', '28th', '29th', '30th', '31st', '32nd', '33rd',
'34th', '35th', '36th', '37th', '38th', '39th', '40th', '41st',
'42nd', '43rd', '44th', '45th', '46th', '47th', '48th', '49th',
'50th', '51st', '52nd', '53rd', '54th', '55th', '56th', '57th',
'58th', '59th', '60th', '61st', '62nd', '63rd', '64th', '65th',
'66th', '67th', '68th', '69th', '70th', '71st', '72nd', '73rd',
'74th', '75th', '76th', '77th', '78th', '79th', '80th', '81st',
'82nd', '83rd', '84th', '85th', '86th', '87th', '88th', '89th',
'90th', '91st', '92nd', '93rd', '94th', '95th', '96th', '97th',
'98th', '99th', '100th', '101st', '102nd', '103rd', '104th', '105th',
'106th', '107th', '108th', '109th', '110th', '111st', '112nd',
'113rd', '114th', '115th', '116th', '117th', '118th', '119th',
'120th', '121st', '122nd', '123rd', '124th', '125th', '126th',
'127th', '128th', '129th', '130th', '131st', '132nd', '133rd',
'134th', '135th', '136th', '137th', '138th', '139th', '140th',
'141st', '142nd', '143rd', '144th', '145th', '146th', '147th',
'148th', '149th', '150th', '151st', '152nd', '153rd', '154th',
'155th', '156th', '157th', '158th', '159th', '160th', '161st',
'162nd', '163rd', '164th', '165th', '166th', '167th', '168th',
'169th', '170th', '171st', '172nd', '173rd', '174th', '175th',
'176th', '177th', '178th', '179th', '180th', '181st', '182nd',
'183rd', '184th', '185th', '186th', '187th', '188th', '189th',
'190th', '191st', '192nd', '193rd', '194th', '195th', '196th',
'197th', '198th', '199th']
"""
ordinal_suffix = {}
ordinal_suffix.update(dict.fromkeys(range(20), 'th'))
ordinal_suffix.update({1: 'st', 2: 'nd', 3: 'rd'})
try:
return ''.join((str(n), ordinal_suffix[n]))
except KeyError:
last_digit = int(str(n)[-1])
return ''.join((str(n), ordinal_suffix[last_digit])) | ee9545e15c0a2a85a76da0bde38ee5342687de0e | 46,554 |
def mapping():
"""
root
|-- _corrupt_record: string (nullable = true)
|-- attributes: struct (nullable = true)
| |-- birthday: string (nullable = true)
| |-- email: string (nullable = true)
| |-- first_name: string (nullable = true)
| |-- friends: array (nullable = true)
| | |-- element: struct (containsNull = true)
| | | |-- first_name: string (nullable = true)
| | | |-- id: long (nullable = true)
| | | |-- last_name: string (nullable = true)
| |-- gender: string (nullable = true)
| |-- ip_address: string (nullable = true)
| |-- last_name: string (nullable = true)
| |-- university: string (nullable = true)
|-- guid: string (nullable = true)
|-- id: long (nullable = true)
|-- location: struct (nullable = true)
| |-- latitude: string (nullable = true)
| |-- longitude: string (nullable = true)
|-- meta: struct (nullable = true)
| |-- created_at_ms: long (nullable = true)
| |-- created_at_sec: long (nullable = true)
| |-- version: long (nullable = true)
|-- birthday: timestamp (nullable = true)
"""
return [
("id", "id", "IntegerType"),
("guid", "guid", "StringType()"),
("created_at", "meta.created_at_sec", "timestamp_s_to_s"),
("created_at_ms", "meta.created_at_ms", "timestamp_ms_to_ms"),
("version", "meta.version", "IntegerType()"),
("birthday", "birthday", "TimestampType"),
("location_struct", "location", "as_is"),
("latitude", "location.latitude", "DoubleType"),
("longitude", "location.longitude", "DoubleType"),
("birthday_str", "attributes.birthday", "StringType"),
("email", "attributes.email", "StringType"),
("myspace", "attributes.myspace", "StringType"),
("first_name", "attributes.first_name", "StringBoolean"),
("last_name", "attributes.last_name", "StringBoolean"),
("gender", "attributes.gender", "StringType"),
("ip_address", "attributes.ip_address", "StringType"),
("university", "attributes.university", "StringType"),
("friends", "attributes.friends", "no_change"),
("friends_json", "attributes.friends", "json_string"),
] | 3109b68399e3ec17bbe73e4b17f588c34f1ab2b2 | 46,556 |
def jolt_differences(content):
"""Differeces of integer list."""
jolts = sorted(content)
jolts.insert(0, 0)
jolts.append(jolts[-1] + 3)
diff = [j-i for i, j in zip(jolts[:-1], jolts[1:])]
return diff | 8fba8d8c5f34552910ab295fe1d04e9b32c021e3 | 46,557 |
import os
import pkgutil
def list_package_modules(package_name):
"""returns the list of a package modules
:param package_name: a package folder name
:return: a list of modules if succes, None if an exception arises
"""
directory = str(package_name)
if not (os.path.isdir(directory) and os.path.exists(directory)):
print(f"Invalid package {package_name}.")
return None
search_path = [directory]
all_modules = [x[1] for x in pkgutil.iter_modules(path=search_path)]
print(all_modules)
return all_modules | 9afd3beae58705fc992c2eb8f82e568a44267349 | 46,558 |
def format_image_size(size):
"""Formats the given image size to a two-element tuple.
A valid image size can be an integer, indicating both the height and the
width, OR can be a two-element list or tuple. Both height and width are
assumed to be positive integer.
Args:
size: The input size to format.
Returns:
A two-elements tuple, indicating the height and the width, respectively.
Raises:
ValueError: If the input size is invalid.
"""
if not isinstance(size, (int, tuple, list)):
raise ValueError(f'Input size must be an integer, a tuple, or a list, '
f'but `{type(size)}` received!')
if isinstance(size, int):
size = (size, size)
else:
if len(size) == 1:
size = (size[0], size[0])
if not len(size) == 2:
raise ValueError(f'Input size is expected to have two numbers at '
f'most, but `{len(size)}` numbers received!')
if not isinstance(size[0], int) or size[0] < 0:
raise ValueError(f'The height is expected to be a non-negative '
f'integer, but `{size[0]}` received!')
if not isinstance(size[1], int) or size[1] < 0:
raise ValueError(f'The width is expected to be a non-negative '
f'integer, but `{size[1]}` received!')
return tuple(size) | caefa0ab4c9c4cdb4bde9caf5c07b65fee58ab62 | 46,559 |
def next_player(table, from_seat, step=1, hascards=False):
""" Attempts to find the index of the next valid player from the from_seat.
If step is -1 it will search backwards on the table. Step can only be
1 or -1. We can also specify to search for the next player with cards
by setting hascards to True. If no player is found after searching
the length of the table, an exception is raised.
"""
if from_seat < -1 or from_seat >= len(table):
raise ValueError('from_seat is out of bounds!')
if abs(step) != 1:
raise ValueError('step needs to be 1 or -1.')
length = len(table)
for i in range(1, length + 1):
_seat = (from_seat + (i * step)) % length
s = table.seats[_seat]
if s.vacant():
continue
elif hascards and not s.has_hand():
continue
return _seat
raise Exception('Error finding player!') | 9606b7b340b25d3dc277d0cfb1328e1e4d32cbb9 | 46,560 |
import os
def get_toplevel_dirpath(path):
"""
Provide the toplevel directory for the given path.
The toplevel directory will be the one containing ``controls.json`` file.
This function returns ``None`` if toplevel path could not be calculated.
:param path: absolute or relative path to file or dir.
"""
if path == '/' or path is None:
return None
if os.path.exists(os.path.join(path, 'controls.json')):
return path
return get_toplevel_dirpath(os.path.dirname(path)) | 7cc910d94fe4b32343e7f339709a977182206a31 | 46,563 |
def strip_namespace(obj):
"""
Returns the given object name after striping the namespace
:param obj: str, object to strip namespace from
:return: str
"""
return obj.split(':')[-1] | 1c34576458df1a90b6c0d075d7e54bbd7c350125 | 46,564 |
def convert_mothur_bool(item):
"""Converts python bool into a format that is compatible with mothur."""
if item is True:
return 'T'
elif item is False:
return 'F'
else:
return item | 2729fd1a3a0d2b0d8e9c8eaf786ed137e5c6c7c8 | 46,565 |
import logging
import sys
def setup_logging(name, f, console=True):
"""Set up a logging system."""
log_format = logging.Formatter('%(asctime)s : %(message)s')
logger = logging.getLogger(name)
logger.handlers = []
file_handler = logging.StreamHandler(f)
file_handler.setFormatter(log_format)
logger.addHandler(file_handler)
if console:
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(log_format)
logger.addHandler(console_handler)
logger.setLevel(logging.INFO)
return logger | 7397ebca3d6980184d12fe18b0b9001e7a579928 | 46,567 |
def get_fhir_type_name(type_):
""" """
try:
return type_.fhir_type_name()
except AttributeError:
if type_ is bool:
return "boolean"
type_str = str(type_)
if (
type_str.startswith("typing.Union[")
and "fhirtypes.FHIRPrimitiveExtensionType" in type_str
):
return "FHIRPrimitiveExtension"
raise | 06c7d8d345c237e18d4a56bf60dca370425aa83e | 46,569 |
def case_normalizer(word, dictionary):
"""In case the word is not available in the vocabulary, we can try multiple
case normalizing procedures. We consider the best substitute to be the one
with the lowest index, which is equivalent to the most frequent
alternative.
Obtained from
https://nbviewer.jupyter.org/gist/aboSamoor/6046170
Args:
word (str): The word.
dictionary (list): The dictionary.
Returns:
(str): The case-normalized word.
"""
w = word
lower = (dictionary.get(w.lower(), 1e12), w.lower())
upper = (dictionary.get(w.upper(), 1e12), w.upper())
title = (dictionary.get(w.title(), 1e12), w.title())
results = [lower, upper, title]
results.sort()
index, w = results[0]
if index != 1e12:
return w
return word | 1af2efc898777709c6bfce486dece5dbbb33f984 | 46,570 |
def workingOnFileObject(function):
"""A decorator that ensures that function 'function' is called upon a file object rather than a string."""
def convert_if_needed(f, *args, **kwargs):
if isinstance(f, str):
with open(f, 'r') as infile:
return function(infile, *args, **kwargs)
return function(f, *args, **kwargs)
return convert_if_needed | 925ef48c0fdfe69509ff46f411ba3f9d0b842479 | 46,571 |
def get_bbox(x_start, y_start, x_end, y_end):
"""
This method returns the bounding box of a face.
Parameters:
-------------
x_start: the x value of top-left corner of bounding box
y_start: the y value of top-left corner of bounding box
width : the x value of bottom-right corner of bounding box
height: the y value of bottom-right corner of bounding box
returns:
--------------
[x1, y1, x2, y2, x3, y3, x4, y4]
the list of x and y values starting from the top-left corner and going clock, or counter-clock wise
"""
x1 = x_start
y1 = y_start
x2 = x_end
y2 = y_start
x3 = x_end
y3 = y_end
x4 = x_start
y4 = y_end
return [x1, y1, x2, y2, x3, y3, x4, y4] | 79e440d4875f1e32d5f678d6715777ab525f4c69 | 46,572 |
import yaml
def load_config(cfg):
"""
Parameters
----------
cfg : str, configuration filename
Returns
-------
dict
"""
# Read inputs from config file
with open(cfg, 'rt') as yaml_file:
yaml_config = yaml.safe_load(yaml_file)
return yaml_config | 1bf1c9bf7e9cb898e8dd0440c36a12bd8d3ab255 | 46,574 |
def _needs_scope_expansion(filter_, filter_value, sub_scope):
"""
Check if there is a requirements to expand the `group` scope to individual `user` scopes.
Assumptions:
filter_ != Scope.ALL
"""
if not (filter_ == 'user' and 'group' in sub_scope):
return False
if 'user' in sub_scope:
return filter_value not in sub_scope['user']
else:
return True | 63b3ae54509d9350b78f187cb6cfd306c250ccfe | 46,575 |
import os
import argparse
def extant_file(f):
"""
Argparse type for files that exist.
"""
if not os.path.isfile(f):
raise argparse.ArgumentTypeError('%s does not exist' % f)
return f | 5cae80b5032aa387619db5ff1ff496fe2fa86cc3 | 46,576 |
def throttle_delay_set(session, delay):
"""Assign a new throttle delay to a session. The throttle delay
value, in seconds, is added to each request to avoid hitting
RESTCONF throttling limit.
:param session: Dictionary of session returned by :func:`login`.
:param delay: New delay to be assigned to the session, in seconds.
:rtype: None.
"""
session['throttle_delay'] = delay
return "Success" | 33000032575fb56fbbe48b41719feab8949c9677 | 46,577 |
def create_matrix(rows, columns):
"""Builds new matrix.
Args:
rows (int): number of rows.
columns (int): number of columns.
Returns:
[[]]: matrix that containing data with float type.
"""
matrix = []
for i in range(rows):
matrix.append([float(0)]*columns)
return matrix | 6f0dffd2548a4f82db5a8f4d441542b578e399bc | 46,579 |
def avoids(word, forbidden):
"""Returns True if word does not contain any letter in forbidden string
>>> avoids('yummy', 'abcdefg')
True
>>> avoids('dictionary', 'abcdefg')
False
>>> avoids('crypt', 'aeiou')
True
>>> avoids('tangible', 'aeiou')
False
"""
for letter in word:
if letter in forbidden:
return False
return True | bc6e87afb968b7651a992400a899fb0b231c0d77 | 46,580 |
import re
def analyze(code):
"""Analyze Code
Args:
code (string): Code
Returns:
tuple of:
length of code,
number of lines,
amount of variables,
amount of functions
"""
code = code.strip()
chunk = code
var_count = 0
func_count = 0
class_count = 0
while chunk:
# string
res = re.match(r"('''(\\'|[^'])*''')", chunk)
if res:
chunk = chunk[res.end():]
continue
res = re.match(r'("""(\\"|[^"])*""")', chunk)
if res:
chunk = chunk[res.end():]
continue
res = re.match(r'("(\\"|[^\n\r"])*")', chunk)
if res:
chunk = chunk[res.end():]
continue
res = re.match(r"('(\\'|[^\n\r'])*')", chunk)
if res:
chunk = chunk[res.end():]
continue
# int
res = re.match(r'(\d+)', chunk)
if res:
chunk = chunk[res.end():]
continue
# variable
res = re.match(
r'((\w+\s*([^\n\r]*\))?\s*\.\s*)*\w+\s*(:\s*\w+)?\s*=\s*.+)', chunk)
if res:
var_count += 1
chunk = chunk[res.end():]
continue
# function
res = re.match(r'(def\s*\w+\s*\([^\n\r]*\)\s*:)', chunk)
if res:
func_count += 1
chunk = chunk[res.end():]
continue
# class
res = re.match(r'(class\s*\w+(\s*\([^\n\r]*\))?\s*:)', chunk)
if res:
class_count += 1
chunk = chunk[res.end():]
continue
chunk = chunk[1:]
return len(code), len(code.split('\n')), var_count, func_count, class_count | a6d5b2fb81dda0515ddd587a053f8d4654cd9739 | 46,581 |
def unlock_time(web3):
"""UNIX timestamp to unlock tokens 180 days in the future."""
return web3.eth.getBlock('pending').timestamp + 180 * 24 * 60 * 60 | fe8888b663efc101ba79fc7200a342c09f44257c | 46,582 |
def create_information_store():
"""
Create object to store learned information.
"""
return {
"pos_known_pos": list(),
"pos_known_neg": list(),
"is_present": list(),
"eliminated": list()
} | 4e642f1caf87dd46ce516d09680bc9414db181a2 | 46,583 |
def define_result(result: str, index: int) -> str:
"""
Rather than having the Team decode this, what if we just told it
whether it had a win, loss, or draw? Then it wouldn't matter how
we came to know this, we would just have to inform the team.
The less that the Team knows about how we determine the match results,
the less likely it will be that the Team will need to be modified if
we modify how we represent match results.
:param result: Game Outcome
:param index: Team Index
:return:
"""
# remove spaces and convert chars to lower case
result = result.strip().lower()
possible_results = ('win', 'loss', 'draw')
if result not in possible_results:
raise ValueError("ERROR: this is invalid game outcome: {}".format(result))
if result == 'win' and index == 0:
return possible_results[0]
elif result == 'win' and index == 1:
return possible_results[1]
elif result == 'loss' and index == 0:
return possible_results[1]
elif result == 'loss' and index == 1:
return possible_results[0]
else:
return possible_results[2] | 6cf794f99334cbaa93139a801ada10cfec621790 | 46,585 |
from datetime import datetime
def askbookingInfoTime():
"""
This function asks the user for the prefered time of booking, following the format: mm/dd/YY, HH
Parameters
----------
date_entry : int
Returns
-------
date_entry : date str
"""
while True:
try:
date_entry = input("Enter a date in mm/dd/YYYY, HH format: \n")
date_entry=datetime.strptime(date_entry,'%m/%d/%Y, %H')
date_first = '01/01/2022, 00'
date_first = datetime.strptime(date_first, '%m/%d/%Y, %H')
date_last = '12/31/2022, 23'
date_last = datetime.strptime(date_last, '%m/%d/%Y, %H')
except ValueError:
print("Incorrect format, please input \"month/day/year, hour \" e.g.: 12/01/2022, 10")
continue
if date_entry<date_first or date_entry>date_last:
print("You can only pick dates from 01/01/2022, 00 till 12/31/2022, 23")
continue
else:
return date_entry | e4c73a4af96773619b6e8ef0da3df2ff761f4648 | 46,587 |
def IsValueValid(value):
"""Validate the value.
An invalid value is either an empty string or a string of multiple '+'.
Args:
value: string. The value in raw result.
Returns:
A boolean indicates if the value is valid or not.
"""
if value == '' or '+' in value:
return False
return True | bf3254371080d24c5f12188f0a52459f85480350 | 46,588 |
import os
import sys
def size():
"""return size in bytes of a stdin"""
return os.fstat(sys.stdin.fileno()).st_size | ae90c826af0000dd49edff13ae191f151bfcfce3 | 46,589 |
def is_predicate(logic, start, end):
"""Returns T/F depending on if logic string is predicate and index of predicate operator (<=)"""
equals_ind = logic.index("=")
if equals_ind>end or equals_ind<start:
return True, equals_ind
return False, -1 | 254527bc4a5604fa9ad4f3470adcce4a846fa44f | 46,591 |
import re
def trim_stopwords(s, stop_words):
"""Case-insensitive removal of stop phrases/words from a string
>>> trim_stopwords('Depártment de Testing Test royale', ['depártment de', 'royale'])
'Testing Test'
"""
for stop in stop_words:
if ' ' in stop: # phrase
s = re.sub(stop, '', s, flags=re.IGNORECASE)
else: # individual word
s = s.split()
for i, w in enumerate(s):
if w.lower() == stop:
s.pop(i)
s = ' '.join(s)
return s.strip() | 21b161ace4dd0ea288719856c03156fc8b08ec3a | 46,592 |
def get_number_of_colors(image):
"""Does what it says on the tin.
This attempts to return the number of POSSIBLE colors in the image, not the
number of colors actually used. In the case of a paletted image, PIL is
often limited to only returning the actual number of colors. But that's
usually what we mean for palettes, so eh.
But full-color images will always return 16777216. Alpha doesn't count, so
RGBA is still 24-bit color.
"""
# See http://www.pythonware.com/library/pil/handbook/concepts.htm for list
# of all possible PIL modes
mode = image.mode
if mode == '1':
return 2
elif mode == 'L':
return 256
elif mode == 'P':
# This is sort of (a) questionable and (b) undocumented, BUT:
# palette.getdata() returns a tuple of mode and raw bytes. The raw
# bytes are rgb encoded as three bytes each, so its length is three
# times the number of palette entries.
palmode, paldata = image.palette.getdata()
return len(paldata) / 3
elif mode in ('RGB', 'RGBA', 'CMYK', 'YCbCr', 'I', 'F',
'LA', 'RGBX', 'RGBa'):
return 2 ** 24
else:
raise ValueError("Unknown palette mode, argh!") | 3f824d1589ff64e08ea5a44f29d2c3e937077bf5 | 46,594 |
def read(filename):
""" reads a Horn SAT formula from a text file
:file format:
# comment
A # clause with unique positive literal
:- A # clause with unique negative literal
A :- B, C, D # clause where A is positive and B,C,D negative
# variables are strings without spaces
"""
formula = []
for line in open(filename, 'r'):
line = line.strip()
if line[0] == "#":
continue
lit = line.split(":-")
if len(lit) == 1:
posvar = lit[0]
negvars = []
else:
assert len(lit) == 2
posvar = lit[0].strip()
if posvar == '':
posvar = None
negvars = lit[1].split(',')
for i, _ in enumerate(negvars):
negvars[i] = negvars[i].strip()
formula.append((posvar, negvars))
return formula | 887ec777537aaa6b26dfa208493540972f2ea1ef | 46,596 |
import requests
def fetch_file_content(resource_url):
"""
Fetch the XML content of a resource via its URL
:param resource_url
:return: the text content of the file, None if the status code was not 200
"""
response = requests.get(resource_url, headers={'accept': 'application/xml'})
if response.status_code != 200:
return None
return response.text | 98b24b181eeef1e923900c4540c41b1b9e743ee0 | 46,597 |
def value_to_string(value, fmt):
"""
Convert numerical value to string with a specific format
"""
return "{value:>{fmt}}".format(value=value, fmt=fmt).strip() | 07437330b5c42640b48871cf37c503aa1a1b689a | 46,599 |
def str_rgb(rgb):
"""
Returns the string representation of an RGB object without alpha
Parameter rgb: the color object to display
Precondition: rgb is an RGB object
"""
return '('+str(rgb.red)+', '+str(rgb.green)+', '+str(rgb.blue)+')' | 2b0cba25a874605c0666e7a7ef5c3491558a6fba | 46,600 |
from typing import Optional
from pathlib import Path
def get_home_path() -> Optional[str]:
"""get home directory path, return None if failed"""
try:
return str(Path.home())
except RuntimeError:
return None | 05b35523084cf81c5890031ed1c5c533e6c5fe5d | 46,601 |
def oct_to_bin_str(oct_number, n):
"""
Oct to bin by real order
:param oct_number:
:param n:
:return:
"""
bin_string = bin(oct_number)[2:].zfill(n)
# return (''.join(reversed(bin_string)))
return bin_string | 005e985346d97b5bb66010e3e6a9c4504a34360c | 46,603 |
def seq_3mers(sequence):
"""Takes a sequence to overlapping 3-mers"""
seq_size = len(sequence)
seq_3mers = list() #intialise list
#iterate through sequence to obtain 3mers
for i in range (1,seq_size-1):
seq_3mers.append(sequence[i-1]+sequence[i]+sequence[i+1])
return seq_3mers | cd2ff01269e61f01db1f59d94dc9c2bab94743be | 46,604 |
import os
def getRelPathToBIDS(filepath, bids_root):
"""
This function returns a relative file link that is relative to the BIDS root directory.
:param filename: absolute path + file
:param bids_root: absolute path to BIDS directory
:return: relative path to file, relative to BIDS root
"""
path,file = os.path.split(filepath)
relpath = path.replace(bids_root,"")
return(os.path.join(relpath,file)) | d334a8978e76c7075924bd6a1561a8e3e9c355f0 | 46,605 |
def refresh_voter_primary_email_cached_information_by_voter_we_vote_id(voter_we_vote_id):
"""
Make sure this voter record has accurate cached email information.
:param voter_we_vote_id:
:return:
"""
results = {
'success': False,
'status': "TO_BE_IMPLEMENTED",
}
return results | 179cf1a0eeb49efc058d6e9d36d694a8bcfecc5c | 46,606 |
def ccdata_getattribute_with_coverage(self, attr):
"""A bookkeeping version of __getattribute__ for ccData objects."""
if attr != '_attrlist' and attr in self._attrlist:
if not hasattr(self, 'coverage'):
self.coverage = {}
self.coverage[attr] = self.coverage.get(attr, 0) + 1
return object.__getattribute__(self, attr) | bc3fc21e168977315bc65082eded68954eb7231e | 46,608 |
from typing import Dict
def unparse_connection(dsn: Dict[str, str]) -> str:
"""Return connection string for pretty printing or copying when starting psql."""
values = dict(dsn)
for key in ("user", "port"):
if key not in values:
values[key] = "<default>"
return "host={host} port={port} dbname={database} user={user} password=***".format_map(values) | 7b110587b5a56ed775d4e51c2bc6fe392c7bb727 | 46,610 |
import re
def extract_chunks_from_sentence(tagged_sentence, include_cardinal=True, include_other_spatial=True, include_types=True):
"""
Custom NER chunker, basically grabbing consecutive sequences of tagged terms from
a list in a format returned by NLTK's Stanford NER wrapper. We also concatenate
commas, parentheses, and two-letter capital abbreviations (usually states) with an
already-found chunk.
"""
chunk_tokens = [] # for the (str, type) tuples
tokens = []
concatenate = False
previous_token = ('', 'O')
cardinal_direction_tokens = ['east', 'west', 'south', 'north', 'eastern',
'western', 'southern', 'northern', 'central',
'northeast', 'northwest', 'southeast', 'southwest',
'northeastern', 'northwestern', 'southeastern', 'southwestern']
spatial_language_tokens = ['along', 'near', 'at']
feature_type_tokens = ['region', 'regions', 'county', 'counties', 'park', 'parks',
'coast', 'coasts', 'town', 'city', 'state', 'states', 'river', 'rivers']
for token in tagged_sentence:
word = token[0]
tag = token[1]
# start with a loc, org, or person (we include person bc of NER errors)
if tag == 'LOCATION' or tag == 'ORGANIZATION' or tag == 'PERSON':
if concatenate:
tokens.append(token)
else:
# new chunk: but include a previous "("
if previous_token[0] == '(':
tokens.append(previous_token)
# also keep stuff like 'north', 'south' before a relevant NER token
if include_cardinal and previous_token[0] in cardinal_direction_tokens:
tokens.append(previous_token)
tokens.append(token)
concatenate = True
# handle commas and other punctuation we want to keep when already in a chunk
elif concatenate and re.match(r'[,()]|\'s', word):
tokens.append(token)
# handle 2-letter abbreviations with a RE (usually states)
elif concatenate and re.match(r'\b[A-Z][A-Z]\b', word):
tokens.append(token)
# handle some prepositions we want to keep, some occur within placenames
elif concatenate and re.match(r'\bin\b|\bthe\b|\bupon\b|\bof\b', word.lower()):
tokens.append(token)
# keep cardinal direction tokens
elif concatenate and include_cardinal and word.lower() in cardinal_direction_tokens:
tokens.append(token)
# keep other spatial language tokens
elif concatenate and include_other_spatial and word.lower() in spatial_language_tokens:
tokens.append(token)
# keep feature type tokens
elif concatenate and include_types and word.lower() in feature_type_tokens:
tokens.append(token)
# keep 'et' because this is part of 'et al.' and we should reject these chunks down the road
elif concatenate and re.match(r'^et$', word):
tokens.append(token)
else:
# end of chunk!
if tokens:
chunk_tokens.append(tokens.copy())
# re-set things
concatenate = False
tokens.clear()
previous_token = token
# this is to catch a chunk which includes the very last token in a sentence (e.g. titles)
if concatenate:
chunk_tokens.append(tokens.copy())
#print("FINAL chunk_tokens: %s" %chunk_tokens)
return chunk_tokens | 155fea6d522b9c0951e7efd64d5f3d172e3956ea | 46,611 |
import yaml
def load_yaml(filepath):
"""Load the content of a YAML file to a dictionary."""
with open(filepath, "r") as m_file:
content = yaml.safe_load(m_file)
return content | c5d72a0af8234d7f369359a11279ba1a07b37f54 | 46,612 |
def is_busday(dates, weekmask=None, holidays=None, busdaycal=None, out=None):
"""
is_busday(dates, weekmask='1111100', holidays=None, busdaycal=None, out=None)
Calculates which of the given dates are valid days, and which are not.
.. versionadded:: 1.7.0
Parameters
----------
dates : array_like of datetime64[D]
The array of dates to process.
weekmask : str or array_like of bool, optional
A seven-element array indicating which of Monday through Sunday are
valid days. May be specified as a length-seven list or array, like
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
weekdays, optionally separated by white space. Valid abbreviations
are: Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
An array of dates to consider as invalid dates. They may be
specified in any order, and NaT (not-a-time) dates are ignored.
This list is saved in a normalized form that is suited for
fast calculations of valid days.
busdaycal : busdaycalendar, optional
A `busdaycalendar` object which specifies the valid days. If this
parameter is provided, neither weekmask nor holidays may be
provided.
out : array of bool, optional
If provided, this array is filled with the result.
Returns
-------
out : array of bool
An array with the same shape as ``dates``, containing True for
each valid day, and False for each invalid day.
See Also
--------
busdaycalendar: An object that specifies a custom set of valid days.
busday_offset : Applies an offset counted in valid days.
busday_count : Counts how many valid days are in a half-open date range.
Examples
--------
>>> # The weekdays are Friday, Saturday, and Monday
... np.is_busday(['2011-07-01', '2011-07-02', '2011-07-18'],
... holidays=['2011-07-01', '2011-07-04', '2011-07-17'])
array([False, False, True])
"""
return (dates, weekmask, holidays, out) | 4774e5d862e8b7c3f65d9d62d1beb3669ad7587f | 46,614 |
import heapq
def search (rmap, start, end):
"""Searches `RiskMap` `rmap` (breadth-first) to find the least risky
path from `start` to `end`. Returns the total risk of that path.
"""
risk = 0
queue = [ (rmap[p], p) for p in rmap.neighbors(start) ]
visited = { start }
heapq.heapify(queue)
while len(queue) > 0:
risk, current = heapq.heappop(queue)
if current == end:
break
for pos in rmap.neighbors(current):
if pos not in visited:
heapq.heappush( queue, ((rmap[pos] + risk), pos) )
visited.add(pos)
return risk | b814d6ce3d27c8623a175592355f744208d37f3e | 46,615 |
def pythonize_camelcase(origstr):
""" Turns camelCase into underscore_style """
ret = ""
for letter in origstr:
if letter.isupper():
ret += '_' + letter.lower()
else:
ret += letter
# kludge, because calc_f_f_t is silly.
ret = ret.replace("_f_f_t", "_fft")
# kludge for 3d calls
ret = ret.replace("3d", "_3d")
return ret | 3c440be64ffa3c0787602c3750d0a493bcbe40ca | 46,616 |
def construct_res_tuple(res):
"""
Build the BigDFT fragment tuple given the residue of the structure
Args:
res(Residue): A residue Class on the Biopython package
Returns:
tuple: the (chain,fragname,fragno) tuple
"""
chain = res.full_id[2]
if len(chain.lstrip(' ')) == 0:
chain = 'A'
resname = res.resname
position = res.full_id[3][1]
return chain, resname, position | 6091470e4a16f7b04ee9d3fcf5bf64f30f5a1b58 | 46,617 |
def ahash(a):
"""
* http://stackoverflow.com/questions/16589791/most-efficient-property-to-hash-for-numpy-array
::
hash(a.data)
Out[7]: 7079931724019902235
In [8]: "%x" % hash(a.data)
Out[8]: '6240f8645439a71b'
"""
a.flags.writeable = False
return "%x" % hash(a.data) | 184ca7b6f36999071bd30ea57948dadef60d80a4 | 46,618 |
def get_id_from_name(name):
"""
Takes the lowercase name of a component and removes '-' and redundant spaces by splitting and
then rejoining on spaces. Spaces and underscores are finally replaced with '-'.
"""
return ' '.join(name.lower().replace('-', '').split()).replace(' ', '-').replace('_', '-') | 252b7268b80de95920e73e1e70573d869d22e9f3 | 46,620 |
def with_siblings(graph, outputs):
"""Include all missing siblings."""
siblings = set()
for node in outputs:
siblings |= graph.siblings(node)
return siblings | 74b16a45853c983e6dbc05b50c64586fa007a590 | 46,622 |
def get_prop_t(pid, i):
"""
Gets a value of 'P585' (point in time) from a Wikidata property
"""
return pid[i]["qualifiers"]["P585"][0]["datavalue"]["value"]["time"] | fabaf5885fa2d5543182c5f19c818da43b533886 | 46,623 |
def deleteSubtree(tree, keypath):
""" Deletes the content at keypath. """
if not keypath:
raise Exception("You must provide a keypath")
existing = tree
path = keypath.split('.')
while len(path) > 1:
p = path.pop(0)
existing = existing.get(p)
# if we don't have a tree to update it's not there anyway, go home
if existing is None:
return tree
del existing[path[0]]
return tree | 6fb522e62b7b9af0006ec4ca39684338bccfda3a | 46,624 |
def is_valid(n: str) -> bool:
"""Tests if n is float or int"""
try:
float(n) or int(n)
return True
except ValueError:
return False | aa93c78f863942a3b5a2d25e3f96e83515117007 | 46,625 |
def check_if_vertically_overlapped(box_a, box_b):
"""
Return if box_b is intersected vertically with coord_a boxes.
:param box_a:
:param box_b:
:return: true if intersected, false instead
"""
return \
box_a['y_min'] < box_b['y_min'] < box_a['y_max'] or \
box_a['y_min'] < box_b['y_max'] < box_a['y_max'] or \
(box_a['y_min'] >= box_b['y_min'] and box_a['y_max'] <= box_b['y_max']) or \
(box_a['y_min'] <= box_b['y_min'] and box_a['y_max'] >= box_b['y_max']) | e749d5161a4dec2fb3feeb4f6c66ab604731b3e7 | 46,626 |
def scan(backend, timeout=10):
"""Scan for mithermometer devices.
Note: this must be run as root!
"""
result = []
for (mac, name) in backend.scan_for_devices(timeout):
print(mac + " " + name)
return result | de8ce7d6845902b163bafe884545f7b54b7fc73c | 46,627 |
import re
def orgunit_cleanup_name(name_str):
"""
Convert name to DHIS2 standard form and fix any whitespace issues (leading,
trailing or repeated)
"""
if name_str:
name_str = name_str.strip() # remove leading/trailing whitespace
name_str = re.sub(r'\s+', ' ', name_str) # standardise and "compress" all whitespace
name_str = name_str.replace(' H/C I', ' HC I') # matches ' HC II', ' HC III' and ' HC IV'
name_str = name_str.replace(' Health Centre I', ' HC I')
name_str = name_str.replace(' Health Center I', ' HC I')
name_str = name_str.replace(' HCIV', ' HC IV')
name_str = name_str.replace(' HCII', ' HC II') # matches HC II and HC III
return name_str | 5c914b617af8452c6ae57d7cd8259c7c296fdc59 | 46,628 |
def ms2smp(ms, fs):
"""
Parameters
----------
ms: float
Time in milliseconds
fs: float
Sampling rate in Hz.
"""
# return corresponding length in samples
return int(float(fs) * float(ms) / 1000.0) | 80f8ca79cd4bc8dc3defefde2749c2e97c37d744 | 46,629 |
import collections
def DictFilter(alist, bits):
"""Translates bits from EDID into a list of strings.
Args:
alist: A list of tuples, with the first being a number and second a string.
bits: The bits from EDID that indicate whether each string is supported by
this EDID or not.
Returns:
A dict of strings and bools.
"""
d = collections.OrderedDict()
for x, s in alist:
d[s] = bool(bits & x)
return d | d314fdb4c1fd34ae9974f63d64495b8cafefbef5 | 46,630 |
import re
def is_html(first_bytes):
""" Detect whether a file contains HTML by examining its first bytes. """
BOMS = [
(b'\xef\xbb\xbf', 'utf-8'),
(b'\x00\x00\xfe\xff', 'utf-32-be'),
(b'\xff\xfe\x00\x00', 'utf-32-le'),
(b'\xff\xfe', 'utf-16-le'),
(b'\xfe\xff', 'utf-16-be'),
]
for bom, enc in BOMS:
if first_bytes.startswith(bom):
s = first_bytes[len(bom):].decode(enc, 'replace')
break
else:
s = first_bytes.decode('utf-8', 'replace')
return re.match(r'^\s*<', s) | 5c1c9a8d097eb23b931cf4ed595585b8908f51e2 | 46,631 |
def unit_round_off(t=23):
"""
:param t:
number significand bits
:return:
unit round off based on nearest interpolation, for reference see [1]
"""
return 0.5 * 2. ** (1. - t) | 4bb8dce1c1dd2b0551bd799cac973069071bdb30 | 46,632 |
import sqlite3
def get_setting(path, name):
"""Gets a setting value from the settings database."""
db = sqlite3.connect(path)
r = db.execute("SELECT value FROM setting WHERE key = ?", (name,)).fetchone()
return r[0] if r else None | 42491fdf3102ce5e0afc0c68721f8445b2718cee | 46,633 |
def contact_get_message_string(user_name, contact_name, contact_email,
contact_message):
"""
Return message string, include user name if signed in
"""
if user_name:
message = (
"Message from FreeFrom \nName: " +
contact_name +
"\nUser Name: " + user_name +
"\nEmail Address: " + contact_email +
"\nMessage: " + contact_message)
else:
message = (
"Message from FreeFrom \nName: " +
contact_name +
"\nEmail Address: " + contact_email +
"\nMessage: " + contact_message)
return message | 63ad6471d685ecdb2fa4f33b40534e556c271775 | 46,634 |
def all_chars_to_hex(source):
"""
Converts input string into hex for all chars.
:param source: input string.
:return: output string witch exchanged chars.
"""
output = ""
for char in source:
output += "\\x{:02x}".format(ord(char))
return output | 1c9c7d77ae4439ceedea61410cc3d173cc5d6578 | 46,635 |
def build_data_dict(stats_arr, prefix):
"""
Build a data dictionary with columns named according to (k,z) bins, a
threshold value, and some prefix.
Assumes that stats_arr has shape: (N_samp, N_thres, N_z, N_kbins)
"""
# Get no. of points in each dimension.
N_sam, N_thres, N_z, N_kbins = stats_arr.shape
# Create dictionary with column names that can be used by ColumnDataSource
data_dict = {}
for n in range(N_thres):
for j in range(N_z):
for m in range(N_kbins):
key = "tot_%s_h%d_k%d_z%d" % (prefix, n+1, m+1, j+1)
data_dict[key] = stats_arr[:,n, j, m]
return data_dict | 808eeba9ed71b8d1420c6277d8db459d211d52bd | 46,637 |
def subspace(T, i, clqs, j=None):
"""
If T is a matrix,
projects out T_ij : \Omega_i -> \Omega_j from T : \Omega -> \Omega
(j = i by default).
Else, if T is a vector,
projects out T_i \in \Omega_i from T \in \Omega.
"""
offset_i = sum([clqs.dim(k) for k in range(i)])
offset_j = sum([clqs.dim(k) for k in range(j)]) if j is not None else offset_i
N = clqs.dim(i)
M = clqs.dim(j) if j is not None else N
if len(T.shape) == 2:
return T[offset_i:offset_i+N, offset_j:offset_j+M]
elif j is None:
return T[offset_i:offset_i+N]
else:
raise Exception('Unsupported arguments') | c9270a43e5e8acf1cd8d3abfe1f29a3b5d6a89c5 | 46,638 |
def end_marker(data):
"""Go always outputs a line feed at the end of output, and just to be sure
we check if '}' is the next to last character as expected. This seems somewhat
brittle but it works better in practice than using short timeouts, since some
database queries will break that very easily."""
if ord(data[-1]) == 10 and data[-2] == '}':
return True | ddcd93f1d1b262790d733ee7d03882230da36015 | 46,639 |
def text_align(keyword):
"""``text-align`` property validation."""
return keyword in ('left', 'right', 'center', 'justify') | 71db2a5f97831e9fb4de27ba670aa1fd14913fa2 | 46,641 |
def is_nested(ctx, rp1_uuid, rp2_uuid):
"""Returns True if the two resource providers are related with a :CONTAINS
relationship. The direction of the relationship doesn't matter.
"""
query = """
MATCH p=(rp1 {uuid: '%s'})-[:CONTAINS*]-(rp2 {uuid: '%s'})
RETURN p
""" % (rp1_uuid, rp2_uuid)
result = ctx.tx.run(query).data()
return bool(result) | d8c8faa95600024b272dd133bb33609f6483cd33 | 46,643 |
import random
def ChooseFrom(choice_list):
"""Return a random choice from given list.
Args:
choice_list: list of possible choices.
Returns:
One random element from choice_list
"""
return random.choice(choice_list) | 08ab9609ef974802f699803b8a7b2ff740a964d3 | 46,644 |
import copy
def merge_items(new_content, curr_contents):
"""
Given a dict of either partial or full items, update the curr_contents to
create the target contents.
This will create missing items; for existing items they will be set to the values in
the new_content dict.
:param new_content: Dict to update to curr_contents
:type: dict
:param curr_contents: Current dict content for conf file
:type: dict
:return: The result dict and changed flag (always True here)
:rtype: tuple[dict, bool]
"""
shadow = copy.deepcopy(curr_contents)
for section in new_content:
if section in shadow:
shadow[section].update(new_content.get(section) or {})
else:
shadow[section] = new_content.get(section)
return shadow, True | 6e82da22c651f742231f317fcbfcd57e4dafda8c | 46,645 |
from re import match as re_match
def event_nonum_args(e):
"""
Given an EventAnnotatation, returns its arguments without trailing
numbers (e.g. "Theme1" -> "Theme").
"""
nna = {}
for arg, aid in e.args:
m = re_match(r'^(.*?)\d*$', arg)
if m:
arg = m.group(1)
if arg not in nna:
nna[arg] = []
nna[arg].append(aid)
return nna | 273c5351a708b7484d7d58f1864b1aa2caa088ff | 46,646 |
def validate_rule_group_type(rule_group_type):
"""
Validate Type for RuleGroup
Property: RuleGroup.Type
"""
VALID_RULE_GROUP_TYPES = ("STATEFUL", "STATELESS")
if rule_group_type not in VALID_RULE_GROUP_TYPES:
raise ValueError(
"RuleGroup Type must be one of %s" % ", ".join(VALID_RULE_GROUP_TYPES)
)
return rule_group_type | 529d515fc1dd16e8a2e4e1d8b062096126c7c809 | 46,647 |
def model_tavg(tmin = 0.0,
tmax = 0.0):
"""
- Name: Tavg -Version: 1.0, -Time step: 1
- Description:
* Title: Mean temperature calculation
* Author: STICS
* Reference: doi:http://dx.doi.org/10.1016/j.agrformet.2014.05.002
* Institution: INRA
* Abstract: It simulates the depth of snow cover and recalculate weather data
- inputs:
* name: tmin
** description : current minimum air temperature
** inputtype : variable
** variablecategory : auxiliary
** datatype : DOUBLE
** default : 0.0
** min : 0.0
** max : 500.0
** unit : degC
** uri :
* name: tmax
** description : current maximum air temperature
** inputtype : variable
** variablecategory : auxiliary
** datatype : DOUBLE
** default : 0.0
** min : 0.0
** max : 100.0
** unit : degC
** uri :
- outputs:
* name: tavg
** description : mean temperature
** variablecategory : auxiliary
** datatype : DOUBLE
** min : 0.0
** max : 500.0
** unit : degC
** uri :
"""
tavg = (tmin + tmax) / 2
return tavg | 4c6a217bbf998912899486acbf2f5b4f1fc5f9f2 | 46,648 |
def _GenerateUniqueNames(lu, exts):
"""Generate a suitable LV name.
This will generate a logical volume name for the given instance.
"""
results = []
for val in exts:
new_id = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
results.append("%s%s" % (new_id, val))
return results | 2dec19b5a128d95c146362c404e00ff2be7085b1 | 46,650 |
def timecode_values_are_same(timecodes):
"""
A SourceClip can have multiple timecode objects (for example an auxTC24
value that got added via the Avid Bin column). As long as they have the
same start and length values, they can be treated as being the same.
"""
if len(timecodes) == 1:
return True
start_set = set()
length_set = set()
for timecode in timecodes:
start_set.add(timecode.getvalue('Start'))
length_set.add(timecode.getvalue('Length'))
# If all timecode objects are having same start and length we can consider
# them equivalent.
if len(start_set) == 1 and len(length_set) == 1:
return True
return False | 5ec1a4e1aab0de2cf4f0efcc9b1fc1d98a2f21f1 | 46,651 |
def get_info_sentence(st_tmp_lineg, st_tmp_linep):
"""
get info per sentence per line
takes two strings:
st_tmp_lineg - gold line
st_tmp_linep - system line
### reads the lines
### returns strings tmp_neg_g, tmp_neg_p
### where the columns with negation information (7 to end) are stored
### and the POS tag
### g stands for gold file, p for system file
"""
tmp_lineg = st_tmp_lineg.split("\t")
tmp_linep = st_tmp_linep.split("\t")
max_lineg = len(tmp_lineg)-1
max_linep = len(tmp_linep)-1
tmp_neg_g = ""
tmp_neg_p = ""
POS_tag = tmp_lineg[5]
for i in range(7, max_lineg+1):
# if it's the first neg col signifying there is no negation
if i == 7 and tmp_lineg[i] == "***":
tmp_neg_g = tmp_lineg[i]
# if it's the first neg col and there is negation
elif i == 7:
# separate elements of the same instance by space
tmp_neg_g = tmp_lineg[i] + " "
# if it's the last col
elif i == max_lineg:
# don't add a space
tmp_neg_g += tmp_lineg[i]
# if it's the end of one instance
elif i % 3 == 0:
# separate different instances by tab
tmp_neg_g += tmp_lineg[i] + "\t"
else:
# separate elements of the same instance by space
tmp_neg_g += tmp_lineg[i] + " "
for i in range(7, max_linep+1):
# if it's the first neg col signifying there is no negation
if i == 7 and tmp_linep[i] == "***":
tmp_neg_p = tmp_linep[i]
# if it's the first neg col and there is negation
elif i == 7:
# separate elements of the same instance by space
tmp_neg_p = tmp_linep[i] + " "
# if it's the last col
elif i == max_linep:
# don't add a space
tmp_neg_p += tmp_linep[i]
# if it's the end of one instance
elif i % 3 == 0:
# separate different instances by tab
tmp_neg_p += tmp_linep[i] + "\t"
else:
# separate elements of the same instance by space
tmp_neg_p += tmp_linep[i] + " "
return tmp_neg_g, tmp_neg_p, POS_tag | 525f4651cad4a3b0f75c3b599cd2e8b011d81de0 | 46,653 |
import torch
def get_semantic_segmentation(sem):
"""
Post-processing for semantic segmentation branch.
Arguments:
sem: A Tensor of shape [N, C, H, W], where N is the batch size, for consistent, we only
support N=1.
Returns:
A Tensor of shape [1, H, W] (to be gathered by distributed data parallel).
Raises:
ValueError, if batch size is not 1.
"""
if sem.size(0) != 1:
raise ValueError('Only supports inference for batch size = 1')
sem = sem.squeeze(0)
return torch.argmax(sem, dim=0, keepdim=True) | 5561f359f6a7d8acbad8d25596970aa681e43948 | 46,655 |
import torch
def getKneighborsConnections(affinity_mat: torch.Tensor, p_value: int):
"""
Binarize top-p values for each row from the given affinity matrix.
"""
binarized_affinity_mat = torch.zeros_like(affinity_mat).int()
for i in range(affinity_mat.shape[0]):
line = affinity_mat[i, :]
sorted_idx = torch.argsort(line, descending=True)
indices = sorted_idx[:p_value]
binarized_affinity_mat[indices, i] = torch.ones(indices.shape[0]).to(affinity_mat.device).int()
return binarized_affinity_mat | 95c3b6237a34ef10e6551991f6250f5f14837059 | 46,656 |
def crop_center(img, size):
"""
Crops out the center of `img` according to the given `size` tuple.
"""
return img[(Ellipsis,) + tuple(slice((x - s) // 2, (x + s) // 2)
if s < x else slice(None)
for x, s in zip(img.shape[-len(size):],
size))] | 36594ba9997a3e0c4eaac7feb03d2a7f9ef1ea34 | 46,658 |
def get_furthest_point(matrix, point):
"""
given a matrix, a point, return its furthest pair of point with smallest id
O(n) runtime
"""
# furthest_p = -1
max_dist = max(matrix[point,])
for i, val in enumerate(matrix[point,]):
if val == max_dist:
return i
return -1 | d3faf9058631ba3d72542d889e812a30e17f4f37 | 46,661 |
def create_questions_images_ids(questions_captions):
"""
Returns:
questions: set of question ids.
image_ids: Set of image ids.
"""
questions = set()
image_ids = set()
for q in questions_captions:
question_id = q['question_id']
questions.add(question_id)
image_ids.add(q['image_id'])
return questions, image_ids | d0e1171d0cba06425cd98081298c4fbd880630d9 | 46,664 |
import json
def GetDownloadSerializationData(src_obj_metadata, progress=0):
"""Returns download serialization data.
There are four entries:
auto_transfer: JSON-specific field, always False.
progress: How much of the download has already been completed.
total_size: Total object size.
url: Implementation-specific field used for saving a metadata get call.
For JSON, this the download URL of the object.
For XML, this is a pickled boto key.
Args:
src_obj_metadata: Object to be downloaded.
progress: See above.
Returns:
Serialization data for use with Cloud API GetObjectMedia.
"""
serialization_dict = {
'auto_transfer': 'False',
'progress': progress,
'total_size': src_obj_metadata.size,
'url': src_obj_metadata.mediaLink
}
return json.dumps(serialization_dict) | 64b0594268074f92499e6db08ba047e81633d4a4 | 46,665 |
def create_crumb(title, url=None, active=False):
"""
Helper function that creates breadcrumb.
"""
active_str = ""
if active:
active_str = ' class="active"'
inner_str = "%s" % title
if url:
inner_str = '<a href="%s">%s</a>' % (url, title)
crumb = "<li%s>%s</li>" % (active_str, inner_str)
return crumb | c3902084627a7a80804efc1b8e97f18040b9a0f9 | 46,666 |
import pickle
def is_picklable(obj: object) -> bool:
"""Tests if an object can be pickled"""
try:
pickle.dumps(obj)
return True
except (pickle.PickleError, AttributeError):
return False | 9246cfc006ec382620bb4fb4961a447e6ab924d3 | 46,668 |
def verifica_expresia(paranteze):
"""Verifică validitatea expresiei primite.
Verifică dacă toate parantezele din expresie
sunt folosite corespunzător.
"""
my_stack = []
for item in paranteze:
if len(my_stack) == 0:
if item == "]" or item == ")":
return False
else:
my_stack.append(item)
else:
if item == ")":
if my_stack.pop() == "(":
continue
else:
return False
elif item == "]":
if my_stack.pop() in "][":
continue
else:
return False
else:
my_stack.append(item)
return True | 0b69120658b944107f448d104267b0ad43442ad6 | 46,669 |
import json
def toJson(val):
"""Return JSON string
Supported datatypes:
<class 'sqlalchemy.engine.result.RowProxy'>
<type 'dict'>
Args:
val: datatype that gets converted to JSON string
"""
if str(type(val)) == "<class 'sqlalchemy.engine.result.RowProxy'>":
return json.dumps(dict(val.items()))
return json.dumps(val) | 9464cf94669f851588fd7b6e63159198c0eb91b9 | 46,670 |
import requests
def _is_ssl_issue(jwks_url: str) -> bool:
"""Helper method to verify if auth failed because of an ssl issue looking up the jwks
Args:
jwks_url: url to the jwks endpoint
Returns:
true if it is an ssl error, false if anything else happens (success or any other failure)
"""
result = False
try:
requests.get(jwks_url)
except requests.exceptions.SSLError:
result = True
except:
pass
return result | 82d2f140a40f6a80ca52d27d00dc668429786b3c | 46,671 |
import math
def xp_to_level(xp: float):
"""
Converts XP to Float
:type xp: float
:param xp: XP
:return: Level
"""
level = -6 + (math.sqrt(xp + 80) / math.sqrt(5))
level_normalized = 0 if level <= 0 else math.floor(level)
return level_normalized | 33e9bcaaf5e7c32aee8589e5505d1ec8b14bdbb9 | 46,672 |
from typing import OrderedDict
def cols2dict(headers,iterable):
"""creates an ordered dictionary from sequences of keys and values
headers - a sequence of hashable items
iterable - a sequence of items (checked to be the same length as headers)
"""
if len(headers) != len(iterable): raise ValueError('headers amd iterable sequences are not of equal length')
return OrderedDict(zip(headers,iterable)) | 236065bd4654790dd86d2e5bc3c8c81e0a901ba0 | 46,673 |
def get_jsonl_rows(assignments, submissions, reviewers, db_papers):
"""
Get the list of data to write to the output jsonl file
Args:
assignments (list[dict]): The per-submission assignment data
submissions (list[dict]): The list of jsonl-style dictionaries
containing the submission data
reviewers (list[dict]): The list of jsonl-style dictionaries containing
the reviewer data
db_papers: The list of jsonl-style dictionary containing the information
on the ACL-Anthology papers
Returns:
list[dict]: The list of jsonl-style rows to be written, containing the
reviewer assignment
"""
data = []
for submission_idx, submission in enumerate(assignments):
ret_dict = dict(submissions[submission_idx])
# Add information on the top similar papers
best_paper_info = zip(
submission['similar_paper_global_ids'],
submission['similar_paper_scores']
)
ret_dict['similarPapers'] = [
{
'title': db_papers[idx]['title'],
'paperAbstract': db_papers[idx]['paperAbstract'],
'score': score
} for idx, score in best_paper_info
]
# Add information on the top similar reviewers
ret_dict['topSimReviewers'] = []
similar_reviewer_info = zip(
submission['similar_reviewer_global_ids'],
submission['similar_reviewer_scores']
)
for idx, score in similar_reviewer_info:
next_dict = dict(reviewers[idx])
next_dict['score'] = score
ret_dict['topSimReviewers'].append(next_dict)
# Add information on the assigned reviewers
ret_dict['assignedReviewers'] = []
assigned_reviewer_info = zip(
submission['assigned_reviewer_global_ids'],
submission['assigned_reviewer_scores']
)
for idx, score in assigned_reviewer_info:
next_dict = dict(reviewers[idx])
next_dict['score'] = score
ret_dict['assignedReviewers'].append(next_dict)
data.append(ret_dict)
return data | 3d13f219768db40363f12f8211bd0df2e723dd5f | 46,676 |
def list_difference(l1,l2):
"""
return the difference between 2 lists
:param l1:
:param l2:
:return: list l1 - list l2
"""
return list(set(l1) - set(l2)) | 3ef73c1c58e27abf2a2d58c1de872cf3cb4b2a8f | 46,677 |
import codecs
def write_string_to_html_file(string_to_write, filename):
"""Writes String to UTF-8 HTML file.
Args:
string_to_write (str): String to be saved
filename (str): Path to the file
Returns:
str: content of file as string
"""
text_file = codecs.open(filename, "w", "utf-8-sig")
n = text_file.write(string_to_write)
text_file.close()
return n | 20daba01188524201b4fa3094c91960f7b0eeb64 | 46,678 |
import argparse
def parse_arguments():
""" parse arguments """
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("out_dir", help="path to save the plots and results")
parser.add_argument("model_choice",default=None,choices=['LSTM','BiLSTM'], type=str,help='Choice of training architecture')
parser.add_argument("-subword_info", default=None, type=bool, help='use pre-embeddings with subword information')
args = parser.parse_args()
return args | 6316d46c7bb88b4153cc0c277d8a8d7a0149d022 | 46,680 |
def _parse_grid_origin(grid_origin, radars):
""" Parse the grid origin parameter, finding origin if not given. """
if grid_origin is None:
lat = float(radars[0].latitude['data'])
lon = float(radars[0].longitude['data'])
grid_origin = (lat, lon)
return grid_origin | 383ec0ef5a5212ce0eec7346235a409a9d251733 | 46,681 |
import argparse
def arguments_handler():
""" Validates and parses script arguments.
Returns:
Namespace: Parsed arguments object.
"""
parser = argparse.ArgumentParser(description='Check if the contribution form needs to be filled.')
parser.add_argument('-p', '--pr_number', help='The PR number to check if the contribution form needs to be filled.')
parser.add_argument('-g', '--github_token', help='The GitHub token to authenticate the GitHub client.')
return parser.parse_args() | 76f5cbb1ed9fa12facc71d70e659a59a5241803d | 46,682 |
def remove_empty_right_end(mps):
"""
Removes any extra empty indices on the last site of the mps
"""
nleft = len(mps[0].legs[0])
if nleft > 1:
mps[0].unmerge_ind(0)
for i in reversed(range(nleft-1)):
mps[0] = mps[0].remove_empty_ind(i)
return mps | abae390367e622f0e12b08ebb5a7a0cb94d429ec | 46,683 |
def calc_Flesh_Kincaid(n_syllabes, n_words, n_sent):
"""Метрика Flesh Kincaid для английского языка"""
n = 206.835 - 1.015 * (float(n_words) / n_sent) - 84.6 * (float(n_syllabes) / n_words)
return n | 1b478a33658ad207b474725f8d1d5f23567cb594 | 46,684 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.