content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def _r_long(int_bytes):
"""Convert 4 bytes in little-endian to an integer."""
return int.from_bytes(int_bytes, 'little')
|
160a4fcf1acb9831baed8f9ee9307359a9690965
| 38,977
|
def search_for_transcript(edge_IDs, transcript_dict):
""" Given the edge IDs (in set data structure) that make up a query
transcript, look for a match in the transcript dict.
Return gene ID and transcript ID if found, and None if not. """
try:
transcript = transcript_dict[edge_IDs]
gene_ID = transcript["gene_ID"]
return gene_ID, transcript
except:
return None, None
|
bbbf54e7a1c6c47d5fec7d2fcd424bd885b710bc
| 38,978
|
def coinify(atoms):
"""
Convert the smallest unit of a coin into its coin value.
Args:
atoms (int): 1e8 division of a coin.
Returns:
float: The coin value.
"""
return round(atoms / 1e8, 8)
|
3acc1771168ba7b990282ccfe3bb6ce3adfbdd7b
| 38,979
|
def transform_input_data(data):
"""Transform input data dictionary into format ready to use with
model.predict"""
return {key: [value] for key, value in data.items()}
|
8edf2d47f0bc009896950d6c78edbf3c9e8a5b37
| 38,980
|
import requests
import configparser
def conf_from_url(url):
"""Read conf file from an URL.
Parameters
----------
url : str
conf file url (in a repo, make sure the "raw" url is passed)
Returns
-------
conf object
"""
text = requests.get(url).text
config = configparser.ConfigParser()
config.read_string(text)
return config
|
94eda1351b0ff38593bcc0ad485a8b43ad41fb09
| 38,987
|
import hashlib
def _fingerprint(path):
"""Fingerprint a file."""
with open(path) as fil:
return hashlib.md5(fil.read().encode('utf-8')).hexdigest()
|
65d200af8f8e2425f44cff5deeb60656ff572eb9
| 38,996
|
def movieTitle_to_id(movies_titles_list,movies):
"""
Convert a list of movie titles to a list of movie ids
"""
movies_list = []
for i in movies_titles_list:
id = list(movies['movieId'][movies['title'] == i])[0]
movies_list.append(id)
return movies_list
|
9e594f8bc590b29b9a09b68b197477ca66df918e
| 38,997
|
def validate_submission(y_pred_file):
"""
Validate that y_pred file is a valid prediction file.
Args:
y_pred_file: Predicted values object in sample_submission format
Returns:
bool: Validation succeeded, if false a detailed error message will be
supplied
Raises:
Any exception will be caught and str(exc) presented to the students
"""
if len(y_pred_file.columns) != 2:
raise ValueError(f"Make sure you have only two columns in your dataset with\
names User-ID and ISBN!")
if all(y_pred_file.columns == ['User-ID','ISBN']) != True:
raise ValueError(f"Make sure you have only two columns in your dataset with\
names User-ID and ISBN!")
if y_pred_file.groupby('User-ID').count().ISBN.unique()[0] != 10:
raise ValueError(f"You have to submit 10 (and only 10) books per user!")
if len(y_pred_file.groupby('User-ID').count().ISBN.unique()) != 1:
raise ValueError(f"You have to submit 10 (and only 10) books per user!")
if len(y_pred_file['User-ID'].unique()) != 589:
raise ValueError(f"Make sure you have all test users in your submission!")
return True
|
b4795ea086ce493f15ca6453dbdefca53d36ad05
| 38,999
|
def extract_indices_from_dependencies(dependencies):
""" Extract all tokens from dependencies
Input example:
[[8, 'cop', 7], [8, 'nsubj', 6]]
Output example:
[6, 7, 8]
"""
word_positions = set()
for governor_pos, _, dependent_pos in dependencies:
word_positions.add(governor_pos)
word_positions.add(dependent_pos)
return list(sorted(word_positions))
|
ed171902f6d5b9d3f28a56d866ff6a3011f2ec4e
| 39,006
|
def dfdb(B, E):
"""
B is the base
E is the exponent
f = B^E
partial df/dB = E * B**(E-1)
"""
out = E * (B**(E-1))
return out
|
89047a198028320ecd2cbfac26064db6af8a784b
| 39,008
|
def insert_doc(doc, new_items):
"""Insert ``new_items`` into the beginning of the ``doc``
Docstrings in ``new_items`` will be inserted right after the
*Parameters* header but before the existing docs.
Parameters
----------
doc : str
The existing docstring we're inserting docmentation into.
new_items : list
List of strings to be inserted in the ``doc``.
Examples
--------
>>> from nipype.utils.docparse import insert_doc
>>> doc = '''Parameters
... ----------
... outline :
... something about an outline'''
>>> new_items = ['infile : str', ' The name of the input file']
>>> new_items.extend(['outfile : str', ' The name of the output file'])
>>> newdoc = insert_doc(doc, new_items)
>>> print(newdoc)
Parameters
----------
infile : str
The name of the input file
outfile : str
The name of the output file
outline :
something about an outline
"""
# Insert new_items after the Parameters header
doclist = doc.split('\n')
tmpdoc = doclist[:2]
# Add new_items
tmpdoc.extend(new_items)
# Add rest of documents
tmpdoc.extend(doclist[2:])
# Insert newlines
newdoc = []
for line in tmpdoc:
newdoc.append(line)
newdoc.append('\n')
# We add one too many newlines, remove it.
newdoc.pop(-1)
return ''.join(newdoc)
|
6b729e9066c2690801d7a749fd366e828bc8cd18
| 39,012
|
def _get_encoder_dimension(encoder):
"""Find dimensionality of encoded vectors.
Args:
encoder: Object implementing the encode() method which takes
a list of strings as input and returns a list of
numpy vectors as output.
Returns:
dimension: Integer size of the encoded vectors.
"""
vector = encoder.encode(['test sentence'])
dimension = vector[0].shape[0]
return dimension
|
cef789252f4dd9975f1e1bddc5175bbdd49d9220
| 39,013
|
def change_current_lang_and_return(lang):
"""Change current language of text/speech recognition and return it"""
global CURRENT_LANG
CURRENT_LANG = lang
return CURRENT_LANG
|
3f4b377ad9eab98622890cff5296436e08926f38
| 39,033
|
def create_ngrams(kw_iterable, max_n=False):
"""takes a list of keywords and computes all possible ngrams e.g.
in> ['nice', 'red', 'wine']
out> [
('nice',),
('red',),
('wine',),
('nice', 'red'),
('red', 'wine'),
('nice', 'red', 'wine')
]
"""
kwCount = len(kw_iterable)
output = []
for n in reversed(range(kwCount+1)[1:]):
if n <= max_n:
for tokenIndex in range(kwCount-n+1):
output.append(tuple(kw_iterable[tokenIndex:tokenIndex+n]))
return output
|
16a2dab3240214162e1b386e30e912a38e564458
| 39,041
|
def arr_to_json_fixturen(array):
"""Convert given iterable to dict for json transformation.
Desired Format for djangod
{
"id" : None,
"model" : "ocdb.Sector",
"fields" : {
"name" : <mandatory>
"fk_sector" : <optional> *
}
}
*e.g. not for elements with no more parents like continents
"""
data = {}
data["id"] = None
data["model"] = "ocdb.Sector"
data["fields"] = {
"name": array[0],
}
parent = array[1:]
if parent != (None, None):
data["fields"]["fk_sector"] = parent
return data
|
45956ae37638342c3929adda3952f16d87f02025
| 39,042
|
def get_station_daily_path(station_id):
"""
Get path to a station daily file.
:param station_id:
:return:
"""
return "/pub/data/ghcn/daily/all/{0}.dly".format(station_id)
|
24060b3e6393318b6d7ad8a7dfe2d2d32d934079
| 39,050
|
def sites_difference(site1, site2):
"""Return minimal sequence of nucleotides that should be added at the end
of site1 to make site2 appear."""
for i in range(len(site2), -1, -1):
if site2[:i] == site1[-i:]:
return site2[i:]
return site2
|
3bcd7d4eda4fd23d253b3a5f77255663c13c2ab1
| 39,055
|
def format_includes(includes):
"""
Format includes for the api query (to {'include' : <foo>,<bar>,<bat>})
:param includes: str or list: can be None, related resources to include
:return: dict: the formatted includes
"""
result = None
if isinstance(includes, str):
result = includes
elif isinstance(includes, list):
result = ','.join(includes)
return {'include': result} if result is not None else {}
|
9f15ac9b767b6612794bec7b14427b6f90d4a734
| 39,067
|
def top_k_predictions(model, probs, k: int):
""" Returns the top `k` most probable classes from our model
After training a fastai Learner on a multi-label classification problem,
return the label and probabilities associated with the top `k` most
probable classes.
Args:
model [Learner]: Fastai Learner trained to do multi-label classification
probs [Tensor]: Tensor of class probabilities
k [int]: Number of classes/probabilities to return
Returns:
List [str]: Top k classes
List [tensor]: Probabilities associated with the top k classess
"""
# Determine all the potential classes our model will choose from
classes = model.data.classes
num_classes = len(classes)
# Limit k to the total number of classes
if k > num_classes:
k = num_classes
# Get the indices of the `k` classes with highest probabilities
top_k_indices = probs.argsort(descending=True)[:k]
labels = []
probabilities = []
for i in top_k_indices:
labels.append(classes[i])
probabilities.append(probs[i])
return labels, probabilities
|
abedd8ac1b5d90f4485dda8d7c0bae966d57d328
| 39,071
|
def sparse_batch_mm(m1, m2):
"""
https://github.com/pytorch/pytorch/issues/14489
m1: sparse matrix of size N x M
m2: dense matrix of size B x M x K
returns m1@m2 matrix of size B x N x K
"""
batch_size = m2.shape[0]
# stack m2 into columns: (B x N x K) -> (N, B, K) -> (N, B * K)
m2_stack = m2.transpose(0, 1).reshape(m1.shape[1], -1)
result = m1.mm(m2_stack).reshape(m1.shape[0], batch_size, -1) \
.transpose(1, 0)
return result
|
6fe7a5f4b407d27b71b646872d43a78154c594e8
| 39,073
|
def summarizekeys(d, counts={}, base=''):
"""Summarizes keys in the given dict, recursively.
This means counting how many fields exist at each level.
Returns keys of the form ``key0.key1`` and values of ints.
Checks if `d` is instance of dict before doing anything.
"""
if not isinstance(d, dict): return counts
for k, v in d.items():
k = '.'.join((base, k)) if base else k
if k not in counts:
counts[k] = 0
counts[k] += 1
summarizekeys(v, counts=counts, base=k)
return counts
|
714fc56cba1d63178902987623aa0a246c20aa6d
| 39,075
|
import hashlib
def hash_password(pw_in):
"""Take password as input, hash it in SHA-1, and split it for use later on"""
hash = hashlib.sha1()
hash.update(str.encode(pw_in))
digest = hash.hexdigest()
return [digest[:5], digest[5:]]
|
f3950dcda6053f5be3aaa05e11fb8b147abf5eb5
| 39,077
|
def NumGems(gems):
"""Returns the number of gems in the defaultdict(int)."""
return sum(gems.values())
|
60afed06af7c45daa1f8ef4f1097658cfc31e74d
| 39,078
|
def tree_attribute(identifier):
"""
Predicate that returns True for custom attributes added to AttrTrees
that are not methods, properties or internal attributes.
These custom attributes start with a capitalized character when
applicable (not applicable to underscore or certain unicode characters)
"""
if identifier[0].upper().isupper() is False and identifier[0] != '_':
return True
else:
return identifier[0].isupper()
|
a2e55597dc6df6a897f87a819e6d1dce2580923f
| 39,081
|
def get_sim_name(obj):
"""
Get an in-simulation object name: if ``obj`` has attribute
``__sim_name__``, it will be returned, otherwise ``__name__``
standard attribute.
Args:
obj: an object to get name of
Returns: object name
"""
if hasattr(obj, '__sim_name__') and obj.__sim_name__ is not None:
return obj.__sim_name__
else:
return obj.__name__
|
80c7b29047f09d5ca1f3cdb2d326d1f2e75d996b
| 39,082
|
def scoreSorter(order):
"""Sort by score value
"""
if order == 'asc':
return "_score"
return {"_score": {"order": "asc"}}
|
142e532bd744d7bbcd6568c8e027a441ab7a4e04
| 39,084
|
def braced(s):
"""Wrap the given string in braces, which is awkward with str.format"""
return '{' + s + '}'
|
360bfd93ab70ae8393563e25de3d437e1aebfb85
| 39,087
|
import math
def __get_views(views):
"""Convert viewcount to human readable format"""
if int(views) == 0:
return '0'
millnames = ['', 'k', 'M', 'Billion', 'Trillion']
millidx = max(0, min(len(millnames) - 1,
int(math.floor(math.log10(abs(views)) / 3.0))))
return '%.0f%s' % (views / 10 ** (3 * millidx), millnames[millidx])
|
6229801d09a703f860a1ba5e285d76b199cfdffb
| 39,096
|
def EXACT(string1, string2):
"""
Tests whether two strings are identical. Same as `string2 == string2`.
>>> EXACT("word", "word")
True
>>> EXACT("Word", "word")
False
>>> EXACT("w ord", "word")
False
"""
return string1 == string2
|
d6feb4c40bc93fa1ec5426f394a47c5d42c21dfd
| 39,097
|
import json
def putTableAll(obj):
"""
Returns table as string showing standings of league with all data
Parameters:
-----------
obj: dict
JSON object of league standings obtained from API/cache
Returns:
--------
str
Standings as a text code block (to get monospaced text) showing all data
"""
try:
assert(type(obj) == dict)
fin = open('source/teamcodes.json', 'r')
mapper = json.load(fin)
str_re = '```\nLEAGUE: ' + str(obj['competition']['name']) +\
' ' * (45 - 2 - 8 - 10 - len(str(obj['competition']['name']))) +\
'MATCHDAY: ' + str(obj['season']['currentMatchday']) + '\n'
str_re += 'ββββββ€βββββββ€βββββ€βββββ€βββββ€βββββ€ββββββ€ββββββ\n'
str_re += 'β SN β TEAM β M β W β D β L β PTS β GD β\n'
str_re += 'β βββββͺβββββββͺβββββͺβββββͺβββββͺβββββͺββββββͺββββββ£\n'
for team in obj['standings'][0]['table']:
text = 'β %-2d β %-4s β %-2d β %-2d β %-2d β %-2d β %-3d β %+-3d β\n'\
% (team['position'], mapper.get(team['team']['name'], team['team']['name'][:4])[:4], team['playedGames'], team['won'],
team['draw'], team['lost'], team['points'], team['goalDifference'])
str_re += text
str_re += 'ββββββ§βββββββ§βββββ§βββββ§βββββ§βββββ§ββββββ§ββββββ```'
fin.close()
return str_re
except AssertionError:
return 'Error!'
|
ae46f33be6200363ab2876fd4d95a1217d719305
| 39,098
|
def _fully_qualified_name(t: type) -> str:
"""Retrieves the fully qualified name of the provided type.
Args:
t (type): The type whose fully qualified name shall be retrieved.
Returns:
str: The fully qualified name of ``t``.
Raises:
TypeError: If ``t`` is not an instance of ``type``.
"""
if not isinstance(t, type):
raise TypeError(
"The parameter <t> has to be a type, but is an instance of {}!".format(_fully_qualified_name(type(t)))
)
prefix = ""
if hasattr(t, "__module__"):
prefix = t.__module__ + "."
return prefix + t.__name__
|
9a3a6795231b36184ce175c37224aa27e7c6b665
| 39,101
|
def version_string(version):
"""Returns a string from version tuple or list"""
if isinstance(version, str):
return version
return ".".join([f"{v}" for v in version])
|
8688b70a940febebeb16ee2e1f389968aea5a6ea
| 39,108
|
def spritecollideany(sprite, group, collided=None):
"""finds any sprites in a group that collide with the given sprite
pygame.sprite.spritecollideany(sprite, group): return sprite
Given a sprite and a group of sprites, this will return return any single
sprite that collides with with the given sprite. If there are no
collisions, then this returns None.
If you don't need all the features of the spritecollide function, this
function will be a bit quicker.
Collided is a callback function used to calculate if two sprites are
colliding. It should take two sprites as values and return a bool value
indicating if they are colliding. If collided is not passed, then all
sprites must have a "rect" value, which is a rectangle of the sprite area,
which will be used to calculate the collision.
"""
if collided:
for s in group:
if collided(sprite, s):
return s
else:
# Special case old behaviour for speed.
spritecollide = sprite.rect.colliderect
for s in group:
if spritecollide(s.rect):
return s
return None
|
4f80e045cec8e8641bc74de15f82adc65394da1a
| 39,113
|
import re
def fixture_readme_code_result(readme_content: str) -> str:
"""Get the README.md example result content.
:param readme_content: plain text content of README.md
:return: example result
"""
match = re.search(r"```text([^`]*)```", readme_content, flags=re.DOTALL)
assert match
return match.group(1).lstrip()
|
ca8fb13bb5013409c91d0ab979848f4745ae6246
| 39,115
|
def divide_list(list, perc=0.5):
"""
Divide a list into two new lists. perc is the first list's share. If perc=0.6 then the first new list will have 60 percent of the original list.
example : f,s = divide_list([1,2,3,4,5,6,7], perc=0.7)
"""
origLen = len(list)
lim = int(perc*origLen)
firstList = list[:lim]
secondList = list[lim:]
return firstList, secondList
|
afe1068a0cdc4f3125df26b821527838d5ef189e
| 39,116
|
def out_path(tmp_path):
"""Create a temporary output folder"""
out_dir = tmp_path / "output"
out_dir.mkdir()
return out_dir
|
dcd9cadc0d11ba3bb7bd415feaa45bcf093b551f
| 39,124
|
def _get_ftrack_secure_key(hostname, key):
"""Secure item key for entered hostname."""
return "/".join(("ftrack", hostname, key))
|
7513cd5807b1c1697e6c055de40f7873b2bf9d0a
| 39,125
|
def dec_hex(val):
"""
Format inteegr to hex
:param val: integer value
:return: string of hexadecimal representation
"""
return '0x%X' % val
|
fbccb1fa66ffef7746e365e1448d164ba9c3a4e6
| 39,131
|
import random
def powerRandomInt(max_val):
"""Returns a random integer from the interval [0, max_val],
using a power-law distribution.
The underlying probability distribution is given by:
P(X >= n) = (c/(n+c))^4, for n>=0 an integer, and where we use c=20.
But if X > max_val is generated then max_val is returned.
Assuming max_val is sufficiently large the distribution should look
approximately like the following. We display all values of n for
which P(n) >= 1%
P(0) = 0.177
P(1) = 0.139
P(2) = 0.111
P(3) = 0.089
P(4) = 0.072
P(5) = 0.059
P(6) = 0.049
P(7) = 0.040
P(8) = 0.034
P(9) = 0.028
P(10) = 0.024
P(11) = 0.020
P(12) = 0.017
P(13) = 0.015
P(14) = 0.013
P(15) = 0.011
P(16) = 0.009
The mean is approximately 6 and the variance is approximaley 4.4.
Args:
max_val {number} A positive number. All returned values will be less than
this.
Returns:
{int} A random integer in the range [0, max_val].
"""
x = int(20*random.paretovariate(4) - 20)
# Ensure the value is in the range [0, max_val]
return max(0, min(x, max_val))
|
b003a0ae90e254afa5bd2afe4f44e8369131bb6e
| 39,134
|
from typing import List
from typing import Union
def _parse_field_names(fields: List[Union[str, dict]]) -> List[str]:
"""Parse field names.
Args:
fields: Either field names or field descriptors with a `name` key.
Returns:
Field names.
"""
return [field if isinstance(field, str) else field["name"] for field in fields]
|
28ab3032c747ffdc6f2197c62374baf4f94e2ce5
| 39,136
|
from typing import Dict
from typing import Any
def check_action(reply: Dict[str, Any]) -> Dict[str, Any]:
"""Check that the reply contains a message of success."""
if not reply["success"]:
raise RuntimeError(f"Error communicating with the large object storage:\n{reply['error']}")
return reply
|
21170d0fb0d417e1ca525cf8540e630aa79a7e72
| 39,141
|
import json
def _parseNative(logs, needle):
"""Parse console logs from Chrome and get decoded JSON.
Args:
logs: Chrome log object
needle (str): the string leading the actual JSON.
Example:
>>> _parseNative([{'message':'a=b'},{'message':'ac={"a":[1,2]}'}],'c=')
{u'a': [1, 2]}
"""
ret = None
for log in logs:
message = log['message']
loc = message.find(needle)
if loc >= 0:
ret = json.loads(message[loc+len(needle):])
return ret
|
2d6af33aaecf4e2d41e83540c337b79475ba3c4c
| 39,142
|
def add_round_key(block, key):
"""Performs a bitwise XOR between a state block and the key.
Parameters
----------
block : np.ndarray
4x4 column major block matrix.
key : np.ndarray
4x4 column major block matrix.
Returns
-------
np.ndarray
4x4 result block matrix
"""
return block ^ key.T
|
1edcf1a28777099a3f7c8bc641606b4297ab8704
| 39,143
|
import requests
def check_status(address, text):
"""Sends request to address and checks if text is present in reponse
Args:
address (str): site address
text (str): text to be checked in responce
Returns:
(status, elapsed): (tuple (str, int)) with status, and responce time
"""
elapsed = 0
try:
r = requests.get(address)
except requests.ConnectionError:
status = "Error: CONNECTION_ERROR"
except Exception as e:
status = 'Error: {}'.format(str(e))
else:
if r.status_code == 200:
status = 'OK' if text in r.text else 'Error: CON_OK_MISSING_TEXT'
else:
status = 'Error: {}'.format(r.status_code)
elapsed = r.elapsed.total_seconds()
return status, elapsed
|
0c15e75f35f3ac65dad66b9ac45f09d2a043e2d0
| 39,145
|
def get_passed_tests(log_file_path):
"""Gets passed tests with OK status"""
ok_test_line_pattern = "[ OK ] "
ok_tests = []
with open(log_file_path) as log_file_obj:
for line in log_file_obj.readlines():
if ok_test_line_pattern in line:
ok_tests.append(line.split(ok_test_line_pattern)[1])
return ok_tests
|
97fdeab4a2330864f57cd7ac70e8d5f81eaeaa78
| 39,146
|
import json
def safeJson(result, path=[]):
"""Take the result of a requests call and format it into a structured dict."""
if result.status_code != 200:
output = {'code': result.status_code, 'content': result.text}
print("ConfigManager: get secret failed (token expired?)")
print(json.dumps(output, indent=2))
return output
output = result.json()
if path:
for level in path:
try:
output = output[level]
except:
return {'code': 200, 'content': "path not found: {0}".format(path)}
return output
|
1a5af140ef8d517a0236eaebfc9c676260a76a14
| 39,152
|
def reference_value_for_covariate_mean_all_values(cov_df):
"""
Strategy for choosing reference value for country covariate.
This one takes the mean of all incoming covariate values.
"""
return float(cov_df["mean_value"].mean())
|
57935c7b39e2f02f059e7f4b4a835bbe84a67081
| 39,153
|
def determine(hand):
"""Returns a list of values, a set of values, a list of suits, and a list of cards within a hand."""
values, vset, suits, all_cards = [], set(), [], []
for x in range(len(hand)):
values.append(int(hand[x]))
vset.add(int(hand[x]))
suits.append(hand[x].suit)
all_cards.append(hand[x])
return sorted(values, reverse=True), vset, suits, all_cards
|
60318bf9c9259f0741caaadb0246d2d8d66ca4f5
| 39,155
|
def osm_zoom_level_to_pixels_per_meter(
zoom_level: float, equator_length: float
) -> float:
"""
Convert OSM zoom level to pixels per meter on Equator. See
https://wiki.openstreetmap.org/wiki/Zoom_levels
:param zoom_level: integer number usually not bigger than 20, but this
function allows any non-negative float value
:param equator_length: celestial body equator length in meters
"""
return 2.0**zoom_level / equator_length * 256.0
|
7351c289dde4bc42a46e343efcd3e750841b2c8c
| 39,157
|
def subtract(minuend, subtrahend):
"""Subtracts minuend from subtrahend
This function subtracts minuend from subtrahend only when they are
either integers or floats
Args:
minuend(int/float): The quantity or number from which another
is to be subtracted
subtrahend(int/float): The quantity or number to be subtracted
from another
>>> subtract(10, 6)
4
Returns:
The difference of subtrahend subtracted by the minuend
Raises:
TypeError if subtrahend or minuend is neither an integer or
float
"""
if not isinstance(subtrahend, (int, float)):
raise TypeError(f"{subtrahend} is not an integer or float")
if not isinstance(minuend, (int, float)):
raise TypeError(f"{minuend} is not an integer or float")
return minuend - subtrahend
|
cdd43cd65cef485f929093cf8c5e481b6cc91148
| 39,159
|
import operator
def strip(listed):
"""Strip a list of string"""
return map(operator.methodcaller("strip"), listed)
|
503c7a745c34f211160f45aa7234e96875ddd8fb
| 39,162
|
import hashlib
def get_email_id(row):
"""
Creates a unique identifier for each email based on the
date received and a header ID
"""
text = row['X-GM-THRID'] + str(row['Date'])
return hashlib.md5(text.encode('utf-8')).hexdigest()
|
eddcea303947ef6d1b14b7b0dfb912fcc267f9f1
| 39,164
|
def split_by_unescaped_sep(text, sep=':'):
"""Split string at sep but only if not escaped."""
def remerge(s):
# s is a list of strings.
for i in range(len(s) - 1):
n_esc = len(s[i]) - len(s[i].rstrip('\\'))
if n_esc % 2 == 0:
continue
else:
new_s = s[:i] + [s[i] + sep + s[i + 1]] + s[i + 2:]
return remerge(new_s)
return s
# split by every sep (even unescaped ones)
# then re-merge strings that end in an uneven number of escape chars:
return remerge(text.split(sep))
|
6eb360787a3ba5e08f499d2a3352d1170d2fcf89
| 39,166
|
def sovatoms_to_tokens(sovatoms: int) -> float:
"""Convert sovatoms to tokens."""
return sovatoms / 100000000
|
5a683213ccc916396af9e7424e315d640fdaf3a3
| 39,171
|
def get_seqname(
locus_text: str) -> str:
"""
Args:
locus_text:
The LOCUS (i.e. header section) of genbank file
Returns:
The seqname of chromosome
"""
line1 = locus_text[:locus_text.find('\n')]
return line1[len('LOCUS'):].lstrip().split(' '*3)[0]
|
d97939f43ac8937a5fdd694f3575ebe28333718d
| 39,177
|
def overlap(interval1, interval2):
"""
Returns the total amount of overlap between two intervals in the format of (x,y)
Example:
input: (0,10) , (5,10)
returns: 5
"""
return max(0, min(interval1[1], interval2[1]) - max(interval1[0], interval2[0]))
|
3a859fd12181cbfe278f298c8dcf7133eadd59e3
| 39,180
|
def filt_dct(dct):
"""Filter None values from dict."""
return dict((k,v) for k,v in dct.items() \
if v is not None)
|
1d2803a9c7fd143b53206e7e5ac63a955d38b0c9
| 39,182
|
from typing import Sequence
def is_same_shape(a: Sequence, b: Sequence) -> bool:
"""
Compares two shapes a and b, returning True if they are the same
(their ranks and corresponding lengths match) and False otherwise.
"""
return tuple(a) == tuple(b)
|
27dbacaa36d600631f89081bb8085d9b2b7f9f58
| 39,183
|
def has_n_leading_zeroes(num_zeroes, digest):
"""Check if the given digest has the required number of leading zeroes."""
return digest[:num_zeroes] == '0'*num_zeroes
|
fe96941bbeb6325a36452e7b0730face0af54c54
| 39,192
|
import torch
def tangent_vectors(normals):
"""Returns a pair of vector fields u and v to complete the orthonormal basis [n,u,v].
normals -> uv
(N, 3) or (N, S, 3) -> (N, 2, 3) or (N, S, 2, 3)
This routine assumes that the 3D "normal" vectors are normalized.
It is based on the 2017 paper from Pixar, "Building an orthonormal basis, revisited".
Args:
normals (Tensor): (N,3) or (N,S,3) normals `n_i`, i.e. unit-norm 3D vectors.
Returns:
(Tensor): (N,2,3) or (N,S,2,3) unit vectors `u_i` and `v_i` to complete
the tangent coordinate systems `[n_i,u_i,v_i].
"""
x, y, z = normals[..., 0], normals[..., 1], normals[..., 2]
s = (2 * (z >= 0)) - 1.0 # = z.sign(), but =1. if z=0.
a = -1 / (s + z)
b = x * y * a
uv = torch.stack((1 + s * x * x * a, s * b, -s * x, b, s + y * y * a, -y), dim=-1)
uv = uv.view(uv.shape[:-1] + (2, 3))
return uv
|
60282a0f1efc63e0ab04e12685f35337b8185f3a
| 39,195
|
from typing import List
from typing import Dict
def _tags_as_dict(tags: List[Dict]) -> Dict[str, str]:
"""
Convert a list of tags to a dictionary
:param tags: the list of tags
:return: the dictionary of tags
"""
return {tag["Key"]: tag.get("Value", "") for tag in tags}
|
f909f1f4fb6773cf3b1e32067d963738b6e7023d
| 39,196
|
def select_color_marker(i):
""" Return index-based marker/color format for plotting """
colors = ['b', 'g', 'r', 'c', 'y', 'k']
style = ['-', '--', '-.', ':']
ci = i % len(colors)
si = (i // len(colors)) % len(style)
return '%s%s' % (colors[ci], style[si])
|
9b06ecc6de31e8c2a0dbf423233640752fe09110
| 39,198
|
import pickle
def get_mct_frame(site):
"""Serves up a dataframe of micro-CT SSA data for a given site"""
# Load a dictionary of dataframes stored as a pickle file
# The pickle file itself is generated by notebook: CheckOutCT.ipynb
frames = pickle.load(open('../data/microCT/processed_mCT.p', 'rb'))
# Select your dataframe from the dictionary by using the site code as the key
mct_df = frames[site]
# Drop duplicate samples that have the same average height
mct_df = mct_df.drop_duplicates(subset='height_ave (cm)',keep="first")
mct_df.set_index('height_ave (cm)', inplace=True)
return mct_df
|
ace2697ff9833ca0392815e16741b77d416de3ef
| 39,200
|
def get_memory_limit(component_limit, overall_limit):
"""
Return the minimum of the component and overall limits or None if neither is set.
"""
limits = [limit for limit in [component_limit, overall_limit] if limit is not None]
return min(limits) if limits else None
|
50c8372dca1bacaa3e408abaf5e03659d01e1eea
| 39,203
|
def is_tag(t):
"""Is `t` a tag?
"""
return t.strip().startswith('{%')
|
a7e4f55925844d8e6e881dcad8d20bfc2f5968bf
| 39,208
|
def parse_cdr_annotations_pubtator(entity_type, subset):
"""Get each annotation in the BC5CDR corpus with documents in PubTator format.
Requires:
entity_type: is str, either "Chemical" or "Disease"
subset: is str, either "train", "dev", "test" or "all"
Ensures:
annotations: is dict, each key is document str, values are list with all the annotations in document
"""
corpus_dir = "BioCreative-V-CDR-Corpus/CDR_Data/CDR.Corpus.v010516/"
annotations = dict()
filenames = list()
if subset == "train":
filenames.append("CDR_TrainingSet.PubTator.txt")
elif subset == "dev":
filenames.append("CDR_DevelopmentSet.PubTator.txt")
elif subset == "test":
filenames.append("CDR_TestSet.PubTator.txt")
elif subset == "all":
filenames.append("CDR_TrainingSet.PubTator.txt")
filenames.append("CDR_DevelopmentSet.PubTator.txt")
filenames.append("CDR_TestSet.PubTator.txt")
for filename in filenames:
with open(corpus_dir + filename, 'r') as corpus_file:
data = corpus_file.readlines()
corpus_file.close()
for line in data:
line_data = line.split("\t")
document_id = line_data[0]
if len(line_data) == 6 and line_data[4] == entity_type:
mesh_id = line_data[5].strip("\n")
annotation_text = line_data[3]
annotation = (mesh_id, annotation_text)
if document_id in annotations.keys():
current_values = annotations[document_id]
current_values.append(annotation)
annotations[document_id] = current_values
else:
annotations[document_id] = [annotation]
return annotations
|
0476628a01db6cd936132838d025ceb3838a9c30
| 39,219
|
import copy
def deeplist(x: list) -> list:
"""
Deep copy a list. This is needed because list() by itself only makes a shallow copy.
See https://stackoverflow.com/questions/5105517/deep-copy-of-a-dict-in-python
Convenience function.
:param x: List to copy
:return: Deep copy of the list provided by copy.deepcopy().
"""
return copy.deepcopy(x)
|
c4161510ddf150e7b57c4d9681a8221b8325b312
| 39,222
|
def get_layer_index(model, name):
"""Get index of layer by name"""
for idx, layer in enumerate(model.layers):
if layer.name == name:
return idx
|
5c57fad4cbb28ab8b6605be669e4cb75024ee977
| 39,224
|
import gzip
def encode_gzip(data, compresslevel=6):
"""Encode the passed in data with gzip."""
return gzip.compress(data, compresslevel=compresslevel)
|
2693956c15924194e1151d2c04eadf97167cc08b
| 39,226
|
def isImage(url):
"""Check if url is related to an image
Args:
url (string): string
Returns:
boolean: return true if url is referring to an image
"""
return \
url.endswith(".png") or \
url.endswith(".jpg") or \
url.endswith(".jpeg") or \
url.endswith(".svg")
|
f55b0ba05fa115b8a7d8d85219bd05e465b18854
| 39,228
|
def _get_separator(num, sep_title, sep_character, sep_length):
"""Get a row separator for row *num*."""
left_divider_length = right_divider_length = sep_length
if isinstance(sep_length, tuple):
left_divider_length, right_divider_length = sep_length
left_divider = sep_character * left_divider_length
right_divider = sep_character * right_divider_length
title = sep_title.format(n=num + 1)
return "{left_divider}[ {title} ]{right_divider}\n".format(
left_divider=left_divider, right_divider=right_divider, title=title
)
|
0e10658e11580264a7722f59390a9dfcfaf0a71b
| 39,229
|
def parse_pages(pages):
"""
Give a string possibly containing a start and end page, return the start and end page if any
:param pages:
:return: list with start and end pages
"""
if '-' in pages:
k = pages.find('-')
start = pages[0:k]
end = pages[k + 1:]
else:
start = pages
end = ''
return [start, end]
|
4ccf0dd8409d50c89dde3951eadd679e3009ffd8
| 39,230
|
def _ll_subvoxel_overlap(xs, x1, x2):
"""For an interval [x1, x2], return the index of the lower limit of the
overlapping subvoxels whose borders are defined by the elements of xs."""
xmin = min(x1, x2)
if xmin <= xs[0]:
return 0
elif xmin >= xs[-1]:
ll = len(xs) - 1
return ll
else:
for i, x in enumerate(xs):
if x > xmin:
ll = i - 1
return ll
ll = 0
return ll
|
5563642767d626f9bc516b90930432b2d5692442
| 39,238
|
import json
def _read_notebook_data_dict(notebook_path: str) -> dict:
"""
Read a dictionary of notebook data.
Parameters
----------
notebook_path : str
Path of target notebook.
Returns
-------
notebook_data_dict : dict
A dictionary of notebook data.
"""
with open(notebook_path, 'r') as f:
notebook_data_str: str = f.read()
notebook_data_dict: dict = json.loads(notebook_data_str)
return notebook_data_dict
|
c74fabb3ad1ff7d0e5d002791b1aef08a353199a
| 39,241
|
import itertools
def _binary_count(n):
"""Count `n` binary digits from [0...0] to [1...1]."""
return list(itertools.product([0, 1], repeat=n))
|
0b21fc49763a7c09bd1ac84c4c823a0239a31db9
| 39,246
|
def bdev_compress_get_orphans(client, name=None):
"""Get a list of comp bdevs that do not have a pmem file (aka orphaned).
Args:
name: comp bdev name to query (optional; if omitted, query all comp bdevs)
Returns:
List of comp bdev names.
"""
params = {}
if name:
params['name'] = name
return client.call('bdev_compress_get_orphans', params)
|
4723929303c27388870ed7d9a2339e7e832b41d1
| 39,248
|
import json
def message_to_json(message):
"""
This function tranforms the string message to a json
string, this is to make all REST responses
to be in JSON format and easier to implement
in a consistent way.
"""
#if message is alreay in json then do not do anything
mesage_dict = {}
try:
message_dict = json.loads(message)
except ValueError:
message_dict = {
"msg": message
}
return json.dumps(message_dict)
|
bf20d028068d2716c5b40807e6b17a6ffb8b1073
| 39,249
|
def recursive_replace(steps: int, to_expand: str, rules: dict) -> str:
""" Replace the given string with a new replacement string, according to the rules.
Args:
steps (int): How many iterations. Decremented with each recursion. Recursion ends when step=0.
input (str): Input str. The str to be replaced at this recursion depth
rules (dict): Map of XY: Z, such that XY becomes XZY """
res = to_expand # E.g. NNCB first iteration, NCN on second iteration, NBC on third...
chars_inserted = 0 # This grows as we insert horizontally, to allow indexing
for i in range(len(to_expand)-1): # sliding window of 2 chars; stop at len-1
pair = to_expand[i:i+2] # E.g. CB
if pair in rules: # if this pair has a valid replacement str
replacement = pair[0] + rules[pair] + pair[1] # E.g. CH -> CBH
insertion_point = i + chars_inserted
if steps > 1: # Deeper recursions to go
# Now recurse into the pair we've just expanded
replacement = recursive_replace(steps-1, replacement, rules)
res = res[:insertion_point] + replacement + res[insertion_point+2:]
# Because replacement is recursive, XY could be replaced by a long str
chars_inserted += len(replacement)-len(pair)
return res
|
2104ccfbc89d60aa09235a2933aee0bf15dea0a3
| 39,251
|
def nonzeros(u):
"""Return number of non-zero items in list `u`."""
return len([val for val in u if val != 0])
|
77180e06c9e82bcb4ca19a289514a92334ad62cd
| 39,254
|
def match_fields(exp_fields, fields):
"""
Check field names and values match the expected ones.
- exp_fields:
A list of dictionaries with field name/value pairs.
- fields:
SPARKL event fields as returned by the listener.
[
{'attr': {'name':'n', 'value':3}},
{'attr': {'name': 'div', 'value':2}}]
"""
# Reformat event fields to contain only name and value. If value is not
# given (in case of FLAG fields), value is None.
fields = [{field['attr']['name']: field['attr']['value']}
if 'value' in field['attr']
else {field['attr']['name']: None}
for field in fields]
return exp_fields == fields
|
1a260f344ca42c480069b6951e037320fb6a63aa
| 39,255
|
def merge_xml(xmls,output_file):
"""
merge xml files
Parameters:
-----------
xmls: list
List of paths of the xml files
output_file: str
Path of the merged xml
"""
if len(xmls) <2 :
raise Exception("Need two or more xml files to merge")
xmls = " ".join(xmls)
msg = f"scxmlmerge {xmls} > {output_file}"
# os.system(msg)
return msg
|
b574f3edb777f4a48f5209173baf7f74a465377e
| 39,261
|
def isNewPhase(ds1, ds2):
"""
Check if two dynamicsState have the same contacts
:param ds1:
:param ds2:
:return: True if they have the same contacts, False otherwise
"""
assert ds1.effNum() == ds2.effNum(), "The two dynamic states do not comes from the same model."
for i in range(ds1.effNum()):
if ds1.effActivation(i) != ds2.effActivation(i):
return True
return False
|
b6bf21106024991256a3a53b887bf73f83e7c037
| 39,264
|
def create_events_model(areas, virus_states):
"""Create events for the model.
Parameters
----------
virus_states : list of strings
List containing the names of all virus variants.
Returns
-------
events: dict
Dictionary that contains the event names as keys
and dicitonaries that contain the event ids, and formulas
as values.
"""
events = {}
for index1 in areas:
for index2 in virus_states:
keys = f"event_{index1}_{index2}"
assignee = f"infectious_{index1}_vac0_{index2}"
trigger_time_par = f"{index2}_{index1}_appears_time"
trigger_quantity = f"{index2}_{index1}_appears_quantity"
events[keys] = {
"trigger_formula": f"geq(time, {trigger_time_par})",
"assignee_id": assignee,
"assign_formula": trigger_quantity,
}
return events
|
b1e8394f2a57e89372844cbb2b456d702d7b5c59
| 39,266
|
def is_valid(box):
"""Check that a bounding box has valid coordinates"""
return (box[..., 2] > box[..., 0]) and (box[..., 3] > box[..., 1])
|
ca622196ac6710494fc052682dca5c2dde4af4ee
| 39,277
|
def mask_last_dim(tensor, binary_mask):
"""Pick the elements of tensor in the last dimension according to binary_mask."""
return tensor[..., 0] * binary_mask + tensor[..., 1] * (1 - binary_mask)
|
ca1b40d8b90184c18979443483361987e49ec370
| 39,280
|
def minimum(ints):
"""
Return the minimum in a list of integers. If the list is empty,
return None.
"""
if ints == ():
return None
else:
head, tail = ints
min = minimum(tail)
if min is None or head <= min:
return head
else:
return min
|
ff4e4eab86e2efa0a01b7e254e7d10556284a6d3
| 39,281
|
def clean_name(name: str):
"""Clean name by stripping whitespace and newline."""
return name.splitlines()[0].strip()
|
1df5c654bc52ecbe33b98fe9a32eb812abec4e0f
| 39,282
|
def datetime_to_list(date):
"""
convert a datetime object into a list [year,month,day,hour,minute,second]
Arguments
---------
date: datetime object
"""
return [date.year,date.month,date.day,date.hour,date.minute,date.second]
|
5a581680793172bede720aa9c144203e843beb31
| 39,286
|
def ignore_formatter(error):
"""
Formatter that emits nothing, regardless of the error.
"""
return ''
|
1d2d3b145e43d9d5840ad5dc3851331d5d67a23d
| 39,288
|
import six
def _encode_metadata(metadata):
"""
UTF8 encode any unicode keys or values in given metadata dict.
:param metadata: a dict
"""
def encode_str(item):
if isinstance(item, six.text_type):
return item.encode('utf8')
return item
return dict(((encode_str(k), encode_str(v)) for k, v in metadata.items()))
|
4b953a42b714f9729ae3ca41c413b295f012d72e
| 39,292
|
import torch
def get_means(tensors_list):
"""
Calculate the mean of a list of tensors for each tensor in the list. In our case the list typically contains
a tensor for each class, such as the per class z values.
Parameters:
tensors_list (list): List of Tensors
Returns:
list: List of Tensors containing mean vectors
"""
means = []
for i in range(len(tensors_list)):
if isinstance(tensors_list[i], torch.Tensor):
means.append(torch.mean(tensors_list[i], dim=0))
else:
means.append([])
return means
|
b99b0dc2f0ab19c5ae55170d59b69b6f714f3db2
| 39,297
|
def deltatime_format(a, b):
""" Compute and format the time elapsed between two points in time.
Args:
a Earlier point-in-time
b Later point-in-time
Returns:
Elapsed time integer (in s),
Formatted elapsed time string (human-readable way)
"""
# Elapsed time (in seconds)
t = b - a
# Elapsed time (formatted)
d = t
s = d % 60
d //= 60
m = d % 60
d //= 60
h = d % 24
d //= 24
# Return elapsed time
return t, f"{d} day(s), {h} hour(s), {m} min(s), {s} sec(s)"
|
0478dd50d7d8e4673058b4096cb0247352a80f6f
| 39,299
|
def pks_from_iterable(iterable, unique_output=False):
"""
Return pks list based on iterable
:param iterable: list of django model objects OR django queryset
:param unique_output: if True returned list will be unique
:return: list of int
"""
pks = list()
for obj in iterable:
try:
pks.append(int(getattr(obj, 'pk', obj)))
except (TypeError, ValueError):
raise TypeError("Iterable %s is not any of Queryset, list with django Model objects or ints" % iterable)
return list(set(pks)) if unique_output else pks
|
7112b2da95d09fb7fc54626b0aa374d3304af87d
| 39,302
|
import hashlib
def sha1_hash_from_text(text: str) -> str:
"""Return sha1 hex digest as string for text.
Parameters
----------
text: str
The text to be hashed
Returns
-------
str
the hash of the text
"""
return hashlib.sha1(text.encode()).hexdigest()
|
999a00131adbc207af990a80404887694845da86
| 39,303
|
import click
def os_options(f):
"""Aggregate multiple common options into one.
This decorator should be used by CLI commands that need an
Openstack client."""
f = click.option('--os-username', help='Openstack Username', required=True,
envvar='OS_USERNAME')(f)
f = click.option('--os-password', help='Openstack Password', required=True,
envvar='OS_PASSWORD')(f)
f = click.option('--os-project-id', help='Openstack Project ID',
required=True, envvar='OS_TENANT_NAME')(f)
f = click.option('--os-auth-url', help='Keystone auth URL',
envvar='OS_AUTH_URL')(f)
f = click.option('--os-region-name', help='Keystone region name',
default='RegionOne', envvar='OS_REGION_NAME')(f)
f = click.option('--os-service-type',
help='Defaults to env[OS_NETWORK_SERVICE_TYPE] '
'or network.',
default='network', envvar='OS_NETWORK_SERVICE_TYPE')(f)
f = click.option('--os-endpoint-type', envvar='OS_ENDPOINT_TYPE',
default='public',
help='Defaults to env[OS_ENDPOINT_TYPE] or public.')(f)
f = click.option('--os-tenant-name', envvar='OS_TENANT_NAME',
help='Authentication tenant name, defaults to'
'env[OS_TENANT_NAME].')(f)
f = click.option('--os-project-name', envvar='OS_PROJECT_NAME',
help='Another way to specify tenant name. '
'This option is mutually exclusive with '
'--os-tenant-name. '
'Defaults to env[OS_PROJECT_NAME].')(f)
f = click.option('--os-tenant-id', envvar='OS_TENANT_ID', default='',
help='Authentication tenant ID, defaults to '
'env[OS_TENANT_ID].')(f)
f = click.option('--insecure', default=False,
envvar='NEUTRONCLIENT_INSECURE',
help="Explicitly allow neutronclient to perform "
"\"insecure\" SSL (https) requests. The server's "
"certificate will not be verified against any "
"certificate authorities. This option should be "
"used with caution.")(f)
f = click.option('--os-token', envvar='OS_TOKEN', default='',
help='Authentication token, defaults to '
'env[OS_TOKEN].')(f)
f = click.option('--os-url', envvar='OS_URL', default='',
help='Defaults to env[OS_URL].')(f)
f = click.option('--os-key', envvar='OS_KEY', default='',
help="Path of client key to use in SSL "
"connection. This option is not necessary "
"if your key is prepended to your certificate "
"file. Defaults to env[OS_KEY].")(f)
f = click.option('--os-project-domain-id',
envvar='OS_PROJECT_DOMAIN_ID', default='',
help='Defaults to env[OS_PROJECT_DOMAIN_ID].')(f)
f = click.option('--os-project-domain-name',
envvar='OS_PROJECT_DOMAIN_NAME', default='',
help='Defaults to env[OS_PROJECT_DOMAIN_NAME].')(f)
f = click.option('--os-cert', envvar='OS_CERT', default='',
help="Path of certificate file to use in SSL "
"connection. This file can optionally be "
"prepended with the private key. Defaults "
"to env[OS_CERT].")(f)
f = click.option('--os-cacert', envvar='OS_CACERT',
help="Specify a CA bundle file to use in "
"verifying a TLS (https) server certificate. "
"Defaults to env[OS_CACERT].")(f)
f = click.option('--os-user-domain-name', envvar='OS_USER_DOMAIN_NAME',
default='',
help='OpenStack user domain name. '
'Defaults to env[OS_USER_DOMAIN_NAME].')(f)
f = click.option('--os-user-domain-id', envvar='OS_USER_DOMAIN_ID',
default='',
help='OpenStack user domain ID. '
'Defaults to env[OS_USER_DOMAIN_ID].')(f)
f = click.option('--os-user-id', envvar='OS_USER_ID', default='',
help='Authentication user ID (Env: OS_USER_ID)')(f)
f = click.option('--http-timeout', envvar='OS_NETWORK_TIMEOUT',
default=None, type=click.FLOAT,
help='Timeout in seconds to wait for an HTTP response. '
'Defaults to env[OS_NETWORK_TIMEOUT] or None if not '
'specified.')(f)
f = click.option('--os-cloud', envvar='OS_CLOUD', default=None,
help='Defaults to env[OS_CLOUD].')(f)
return f
|
f38a646a45055d4b23e22d887b9d703fb804c868
| 39,305
|
def is_ob_site_html(html):
"""Check if some HTML looks like it is from the overcomingbias site.
Parameters
----------
html : bs4.BeautifulSoup
An HTML page, possibly from the overcomingbias site.
Returns
-------
is_ob_site_html : bool
True if the input HTML "looks like" it is from the
overcomingbias site, and False otherwise.
"""
site_title = html.find(id="site-title")
if site_title is not None:
return "www.overcomingbias.com" in site_title.a["href"]
return False
|
ae3e21320858044772532e74fe94d0cfd0e1cb1b
| 39,307
|
def _2DprintInxRow(inxRow, lSpacesIndR):
"""
Function prints one index of a row of a 2D array
Input:
- 1 **inxCol** (*int*) Index of the row to be printed
- 2 **lSpacesIndR** (*list*) A list with spaces which should be added
to indices of rows
Output:
- 1 **strArray** (*string*) The string with printed requested index of
a row
"""
# Print index of the row
strRowInx = ('%d:') % inxRow
# Pick up a correct space which is added before the index
strSpaceBefore = lSpacesIndR[len(strRowInx) - 1]
# Connect the above together
strArray = strSpaceBefore + strRowInx
strArray = strArray + ' '
return strArray
|
f108007a9fcb25a6aa32e22297a654d6d262e247
| 39,312
|
import math
def SSphere(r):
"""
Surface of a sphere of radius r.
"""
return 4. * math.pi * r * r
|
161b43f95ccf02349b66ac5035457dd962e3ba11
| 39,315
|
def create(hdf5, name, dtype, shape=(None,), compression=None,
fillvalue=0, attrs=None):
"""
:param hdf5: a h5py.File object
:param name: an hdf5 key string
:param dtype: dtype of the dataset (usually composite)
:param shape: shape of the dataset (can be extendable)
:param compression: None or 'gzip' are recommended
:param attrs: dictionary of attributes of the dataset
:returns: a HDF5 dataset
"""
if shape[0] is None: # extendable dataset
dset = hdf5.create_dataset(
name, (0,) + shape[1:], dtype, chunks=True, maxshape=shape)
else: # fixed-shape dataset
dset = hdf5.create_dataset(name, shape, dtype, fillvalue=fillvalue)
if attrs:
for k, v in attrs.items():
dset.attrs[k] = v
return dset
|
b9000aa26a0f1ebcb86ba61704e8634e081d29c6
| 39,316
|
def retrieve_positions(position_file):
"""
This function returns a list of strings in the right format representing
the positions that will be read out. [spatialfrequency,xi,xf,y].
Args:
position_file (str): The path of the position file.
Returns:
positions (list,str): List representing the positions that will be read.
"""
position_lines = open(position_file, 'r').readlines()
positions = [line.split() for line in position_lines]
return positions
|
832d77e894c92b70edbcf330ad37bbaaf0cc3ed2
| 39,318
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.